diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 12d5ffd94..7aea1f81a 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -43,3 +43,47 @@ a6e5201ff3fad4c69bf24d17bace2ef744b9f51b f36bc497c8c8f89004f3f6879908d3f0b25123e1 # Remove some lint exclusions and fix the issues 5f78d1b82b2292d5ce0c99623ba0ec444b80d24c + +# 2025 +# Fix formatting +c490ac5810b70f3cf5fd8649669838e8fdb19f4d +# Importer restructure +9147577b2b19f43ca827e9650261a86fb0450cef +# Move functionality under MusicBrainz plugin +529aaac7dced71266c6d69866748a7d044ec20ff +# musicbrainz: reorder methods +5dc6f45110b99f0cc8dbb94251f9b1f6d69583fa +# Copy paste query, types from library to dbcore +1a045c91668c771686f4c871c84f1680af2e944b +# Library restructure (split library.py into multiple modules) +0ad4e19d4f870db757373f44d12ff3be2441363a +# Docs: fix linting issues +769dcdc88a1263638ae25944ba6b2be3e8933666 +# Reformat all docs using docstrfmt +ab5acaabb3cd24c482adb7fa4800c89fd6a2f08d +# Replace format calls with f-strings +4a361bd501e85de12c91c2474c423559ca672852 +# Replace percent formatting +9352a79e4108bd67f7e40b1e944c01e0a7353272 +# Replace string concatenation (' + ') +1c16b2b3087e9c3635d68d41c9541c4319d0bdbe +# Do not use backslashes to deal with long strings +2fccf64efe82851861e195b521b14680b480a42a +# Do not use explicit indices for logging args when not needed +d93ddf8dd43e4f9ed072a03829e287c78d2570a2 +# Moved dev docs +07549ed896d9649562d40b75cd30702e6fa6e975 +# Moved plugin docs Further Reading chapter +33f1a5d0bef8ca08be79ee7a0d02a018d502680d +# Moved art.py utility module from beets into beetsplug +28aee0fde463f1e18dfdba1994e2bdb80833722f +# Refactor `ui/commands.py` into multiple modules +59c93e70139f70e9fd1c6f3c1bceb005945bec33 +# Moved ui.commands._utils into ui.commands.utils +25ae330044abf04045e3f378f72bbaed739fb30d +# Refactor test_ui_command.py into multiple modules +a59e41a88365e414db3282658d2aa456e0b3468a +# pyupgrade Python 3.10 +301637a1609831947cb5dd90270ed46c24b1ab1b +# Fix changelog formatting +658b184c59388635787b447983ecd3a575f4fe56 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..fe4ce3378 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,7 @@ +# assign the entire repo to the maintainers team +* @beetbox/maintainers + +# Specific ownerships: +/beets/metadata_plugins.py @semohr +/beetsplug/titlecase.py @henry-oberholtzer +/beetsplug/mbpseudo.py @asardaes diff --git a/.github/problem-matchers/sphinx-build.json b/.github/problem-matchers/sphinx-build.json new file mode 100644 index 000000000..aff752ae9 --- /dev/null +++ b/.github/problem-matchers/sphinx-build.json @@ -0,0 +1,16 @@ +{ + "problemMatcher": [ + { + "owner": "sphinx-build", + "severity": "error", + "pattern": [ + { + "regexp": "^(/[^:]+):((\\d+):)?(\\sWARNING:)?\\s*(.+)$", + "file": 1, + "line": 3, + "message": 5 + } + ] + } + ] +} diff --git a/.github/problem-matchers/sphinx-lint.json b/.github/problem-matchers/sphinx-lint.json new file mode 100644 index 000000000..44e93e886 --- /dev/null +++ b/.github/problem-matchers/sphinx-lint.json @@ -0,0 +1,17 @@ +{ + "problemMatcher": [ + { + "owner": "sphinx-lint", + "severity": "error", + "pattern": [ + { + "regexp": "^([^:]+):(\\d+):\\s+(.*)\\s\\(([a-z-]+)\\)$", + "file": 1, + "line": 2, + "message": 3, + "code": 4 + } + ] + } + ] +} diff --git a/.github/sphinx-problem-matcher.json b/.github/sphinx-problem-matcher.json deleted file mode 100644 index 0bfcf0ef4..000000000 --- a/.github/sphinx-problem-matcher.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "problemMatcher": [ - { - "owner": "sphinx", - "pattern": [ - { - "regexp": "^([^:]+):(\\d+): (WARNING: )?(.+)$", - "file": 1, - "line": 2, - "message": 4 - } - ] - } - ] -} diff --git a/.github/workflows/changelog_reminder.yaml b/.github/workflows/changelog_reminder.yaml index da0f670a0..380d89996 100644 --- a/.github/workflows/changelog_reminder.yaml +++ b/.github/workflows/changelog_reminder.yaml @@ -1,6 +1,6 @@ name: Verify changelog updated -on: +on: pull_request_target: types: - opened @@ -10,24 +10,24 @@ jobs: check_changes: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Get all updated Python files id: changed-python-files - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v46 with: files: | **.py - name: Check for the changelog update id: changelog-update - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v46 with: files: docs/changelog.rst - + - name: Comment under the PR with a reminder if: steps.changed-python-files.outputs.any_changed == 'true' && steps.changelog-update.outputs.any_changed == 'false' uses: thollander/actions-comment-pull-request@v2 with: - message: 'Thank you for the PR! The changelog has not been updated, so here is a friendly reminder to check if you need to add an entry.' - GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' + message: 'Thank you for the PR! The changelog has not been updated, so here is a friendly reminder to check if you need to add an entry.' + GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index ea79d59b2..bfd05c718 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -4,6 +4,12 @@ on: push: branches: - master + +concurrency: + # Cancel previous workflow run when a new commit is pushed to a feature branch + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + env: PY_COLORS: 1 @@ -14,17 +20,17 @@ jobs: fail-fast: false matrix: platform: [ubuntu-latest, windows-latest] - python-version: ["3.9", "3.10", "3.11", "3.12"] + python-version: ["3.10", "3.11", "3.12", "3.13"] runs-on: ${{ matrix.platform }} env: - IS_MAIN_PYTHON: ${{ matrix.python-version == '3.9' && matrix.platform == 'ubuntu-latest' }} + IS_MAIN_PYTHON: ${{ matrix.python-version == '3.10' && matrix.platform == 'ubuntu-latest' }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Install Python tools - uses: BrandonLWhite/pipx-install-action@v0.1.1 + uses: BrandonLWhite/pipx-install-action@v1.0.3 - name: Setup Python with poetry caching # poetry cache requires poetry to already be installed, weirdly - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ matrix.python-version }} cache: poetry @@ -33,11 +39,19 @@ jobs: if: matrix.platform == 'ubuntu-latest' run: | sudo apt update - sudo apt install ffmpeg gobject-introspection libcairo2-dev libgirepository1.0-dev pandoc + sudo apt install --yes --no-install-recommends \ + ffmpeg \ + gobject-introspection \ + gstreamer1.0-plugins-base \ + python3-gst-1.0 \ + libcairo2-dev \ + libgirepository-2.0-dev \ + pandoc \ + imagemagick - name: Get changed lyrics files id: lyrics-update - uses: tj-actions/changed-files@v45 + uses: tj-actions/changed-files@v46 with: files: | beetsplug/lyrics.py @@ -52,7 +66,7 @@ jobs: - if: ${{ env.IS_MAIN_PYTHON != 'true' }} name: Test without coverage run: | - poetry install --extras=autobpm --extras=lyrics + poetry install --without=lint --extras=autobpm --extras=lyrics --extras=replaygain --extras=reflink --extras=fetchart --extras=chroma --extras=sonosupdate poe test - if: ${{ env.IS_MAIN_PYTHON == 'true' }} @@ -60,10 +74,16 @@ jobs: env: LYRICS_UPDATED: ${{ steps.lyrics-update.outputs.any_changed }} run: | - poetry install --extras=autobpm --extras=lyrics --extras=docs --extras=replaygain --extras=reflink + poetry install --extras=autobpm --extras=lyrics --extras=docs --extras=replaygain --extras=reflink --extras=fetchart --extras=chroma --extras=sonosupdate poe docs poe test-with-coverage + - if: ${{ !cancelled() }} + name: Upload test results to Codecov + uses: codecov/test-results-action@v1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + - if: ${{ env.IS_MAIN_PYTHON == 'true' }} name: Store the coverage report uses: actions/upload-artifact@v4 @@ -78,15 +98,15 @@ jobs: permissions: id-token: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Get the coverage report - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 with: name: coverage-report - name: Upload code coverage - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v5 with: files: ./coverage.xml use_oidc: ${{ !(github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork) }} diff --git a/.github/workflows/integration_test.yaml b/.github/workflows/integration_test.yaml index 88945bb8e..375968571 100644 --- a/.github/workflows/integration_test.yaml +++ b/.github/workflows/integration_test.yaml @@ -3,16 +3,20 @@ on: workflow_dispatch: schedule: - cron: "0 0 * * SUN" # run every Sunday at midnight + +env: + PYTHON_VERSION: "3.10" + jobs: test_integration: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Install Python tools - uses: BrandonLWhite/pipx-install-action@v0.1.1 - - uses: actions/setup-python@v5 + uses: BrandonLWhite/pipx-install-action@v1.0.3 + - uses: actions/setup-python@v6 with: - python-version: 3.8 + python-version: ${{ env.PYTHON_VERSION }} cache: poetry - name: Install dependencies diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yaml similarity index 71% rename from .github/workflows/lint.yml rename to .github/workflows/lint.yaml index 9e2552ab1..bb54c8875 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yaml @@ -6,8 +6,13 @@ on: branches: - master +concurrency: + # Cancel previous workflow run when a new commit is pushed to a feature branch + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + env: - PYTHON_VERSION: 3.9 + PYTHON_VERSION: "3.10" jobs: changed-files: @@ -19,16 +24,16 @@ jobs: changed_doc_files: ${{ steps.changed-doc-files.outputs.all_changed_files }} changed_python_files: ${{ steps.changed-python-files.outputs.all_changed_files }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Get changed docs files id: changed-doc-files - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v46 with: files: | docs/** - name: Get changed python files id: raw-changed-python-files - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v46 with: files: | **.py @@ -51,10 +56,10 @@ jobs: name: Check formatting needs: changed-files steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Install Python tools - uses: BrandonLWhite/pipx-install-action@v0.1.1 - - uses: actions/setup-python@v5 + uses: BrandonLWhite/pipx-install-action@v1.0.3 + - uses: actions/setup-python@v6 with: python-version: ${{ env.PYTHON_VERSION }} cache: poetry @@ -72,10 +77,10 @@ jobs: name: Check linting needs: changed-files steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Install Python tools - uses: BrandonLWhite/pipx-install-action@v0.1.1 - - uses: actions/setup-python@v5 + uses: BrandonLWhite/pipx-install-action@v1.0.3 + - uses: actions/setup-python@v6 with: python-version: ${{ env.PYTHON_VERSION }} cache: poetry @@ -92,10 +97,10 @@ jobs: name: Check types with mypy needs: changed-files steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Install Python tools - uses: BrandonLWhite/pipx-install-action@v0.1.1 - - uses: actions/setup-python@v5 + uses: BrandonLWhite/pipx-install-action@v1.0.3 + - uses: actions/setup-python@v6 with: python-version: ${{ env.PYTHON_VERSION }} cache: poetry @@ -105,10 +110,9 @@ jobs: - name: Type check code uses: liskin/gh-problem-matcher-wrap@v3 - continue-on-error: true with: linters: mypy - run: poe check-types --show-column-numbers --no-error-summary ${{ needs.changed-files.outputs.changed_python_files }} + run: poe check-types --show-column-numbers --no-error-summary . docs: if: needs.changed-files.outputs.any_docs_changed == 'true' @@ -116,10 +120,10 @@ jobs: name: Check docs needs: changed-files steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Install Python tools - uses: BrandonLWhite/pipx-install-action@v0.1.1 - - uses: actions/setup-python@v5 + uses: BrandonLWhite/pipx-install-action@v1.0.3 + - uses: actions/setup-python@v6 with: python-version: ${{ env.PYTHON_VERSION }} cache: poetry @@ -127,11 +131,16 @@ jobs: - name: Install dependencies run: poetry install --extras=docs - - name: Add Sphinx problem matcher - run: echo "::add-matcher::.github/sphinx-problem-matcher.json" + - name: Add Sphinx problem matchers + run: | + echo "::add-matcher::.github/problem-matchers/sphinx-build.json" + echo "::add-matcher::.github/problem-matchers/sphinx-lint.json" + + - name: Check docs formatting + run: poe format-docs --check + + - name: Lint docs + run: poe lint-docs - name: Build docs - run: |- - poe docs |& tee /tmp/output - # fail the job if there are issues - grep -q " WARNING:" /tmp/output && exit 1 || exit 0 + run: poe docs -- -e 'SPHINXOPTS=--fail-on-warning --keep-going' diff --git a/.github/workflows/make_release.yaml b/.github/workflows/make_release.yaml index 248755703..571b50970 100644 --- a/.github/workflows/make_release.yaml +++ b/.github/workflows/make_release.yaml @@ -8,7 +8,7 @@ on: required: true env: - PYTHON_VERSION: 3.8 + PYTHON_VERSION: "3.10" NEW_VERSION: ${{ inputs.version }} NEW_TAG: v${{ inputs.version }} @@ -17,16 +17,16 @@ jobs: name: Bump version, commit and create tag runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Install Python tools - uses: BrandonLWhite/pipx-install-action@v0.1.1 - - uses: actions/setup-python@v5 + uses: BrandonLWhite/pipx-install-action@v1.0.3 + - uses: actions/setup-python@v6 with: python-version: ${{ env.PYTHON_VERSION }} cache: poetry - name: Install dependencies - run: poetry install --only=release + run: poetry install --with=release --extras=docs - name: Bump project version run: poe bump "${{ env.NEW_VERSION }}" @@ -45,13 +45,13 @@ jobs: outputs: changelog: ${{ steps.generate_changelog.outputs.changelog }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: ref: ${{ env.NEW_TAG }} - name: Install Python tools - uses: BrandonLWhite/pipx-install-action@v0.1.1 - - uses: actions/setup-python@v5 + uses: BrandonLWhite/pipx-install-action@v1.0.3 + - uses: actions/setup-python@v6 with: python-version: ${{ env.PYTHON_VERSION }} cache: poetry @@ -92,7 +92,7 @@ jobs: id-token: write steps: - name: Download all the dists - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 with: name: python-package-distributions path: dist/ @@ -107,7 +107,7 @@ jobs: CHANGELOG: ${{ needs.build.outputs.changelog }} steps: - name: Download all the dists - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 with: name: python-package-distributions path: dist/ diff --git a/.gitignore b/.gitignore index 90ef7387d..138965b22 100644 --- a/.gitignore +++ b/.gitignore @@ -94,3 +94,6 @@ ENV/ # pyright pyrightconfig.json + +# Pyrefly +pyrefly.toml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d773af3e1..d33ff2955 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,17 @@ # See https://pre-commit.com/hooks.html for more hooks repos: - - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.1 + - repo: local hooks: - - id: ruff-format + - id: format + name: Format Python files + entry: poe format + language: system + files: '.*.py' + pass_filenames: true + - id: format-docs + name: Format docs + entry: poe format-docs + language: system + files: '.*.rst' + pass_filenames: true diff --git a/CODE_OF_CONDUCT.rst b/CODE_OF_CONDUCT.rst index 63c3eb537..e4f744bdd 100644 --- a/CODE_OF_CONDUCT.rst +++ b/CODE_OF_CONDUCT.rst @@ -1,9 +1,8 @@ -#################################### Contributor Covenant Code of Conduct -#################################### +==================================== Our Pledge -========== +---------- We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body @@ -16,7 +15,7 @@ We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. Our Standards -============= +------------- Examples of behavior that contributes to a positive environment for our community include: @@ -41,7 +40,7 @@ Examples of unacceptable behavior include: professional setting Enforcement Responsibilities -============================ +---------------------------- Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in @@ -54,7 +53,7 @@ not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. Scope -===== +----- This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. @@ -63,7 +62,7 @@ posting via an official social media account, or acting as an appointed representative at an online or offline event. Enforcement -=========== +----------- Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at here on Github. @@ -73,13 +72,13 @@ All community leaders are obligated to respect the privacy and security of the reporter of any incident. Enforcement Guidelines -====================== +---------------------- Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: 1. Correction -------------- +~~~~~~~~~~~~~ **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. @@ -89,7 +88,7 @@ clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. 2. Warning ----------- +~~~~~~~~~~ **Community Impact**: A violation through a single incident or series of actions. @@ -102,7 +101,7 @@ like social media. Violating these terms may lead to a temporary or permanent ban. 3. Temporary Ban ----------------- +~~~~~~~~~~~~~~~~ **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. @@ -114,7 +113,7 @@ with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. 4. Permanent Ban ----------------- +~~~~~~~~~~~~~~~~ **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an @@ -124,7 +123,7 @@ individual, or aggression toward or disparagement of classes of individuals. community. Attribution -=========== +----------- This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.1, available `here @@ -136,4 +135,3 @@ enforcement ladder. For answers to common questions about this code of conduct, see the `FAQ `_. Translations are available at `translations `_. - diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 9010db2c3..c2cde4ed4 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -1,102 +1,106 @@ -############ Contributing -############ +============ .. contents:: :depth: 3 Thank you! -========== +---------- -First off, thank you for considering contributing to beets! It’s people -like you that make beets continue to succeed. +First off, thank you for considering contributing to beets! It’s people like you +that make beets continue to succeed. -These guidelines describe how you can help most effectively. By -following these guidelines, you can make life easier for the development -team as it indicates you respect the maintainers’ time; in return, the -maintainers will reciprocate by helping to address your issue, review -changes, and finalize pull requests. +These guidelines describe how you can help most effectively. By following these +guidelines, you can make life easier for the development team as it indicates +you respect the maintainers’ time; in return, the maintainers will reciprocate +by helping to address your issue, review changes, and finalize pull requests. Types of Contributions -====================== +---------------------- -We love to get contributions from our community—you! There are many ways -to contribute, whether you’re a programmer or not. +We love to get contributions from our community—you! There are many ways to +contribute, whether you’re a programmer or not. The first thing to do, regardless of how you'd like to contribute to the project, is to check out our :doc:`Code of Conduct ` and to keep that in mind while interacting with other contributors and users. Non-Programming ---------------- +~~~~~~~~~~~~~~~ -- Promote beets! Help get the word out by telling your friends, writing - a blog post, or discussing it on a forum you frequent. -- Improve the `documentation`_. It’s - incredibly easy to contribute here: just find a page you want to - modify and hit the “Edit on GitHub” button in the upper-right. You - can automatically send us a pull request for your changes. -- GUI design. For the time being, beets is a command-line-only affair. - But that’s mostly because we don’t have any great ideas for what a - good GUI should look like. If you have those great ideas, please get - in touch. -- Benchmarks. We’d like to have a consistent way of measuring speed - improvements in beets’ tagger and other functionality as well as a - way of comparing beets’ performance to other tools. You can help by - compiling a library of freely-licensed music files (preferably with - incorrect metadata) for testing and measurement. -- Think you have a nice config or cool use-case for beets? We’d love to - hear about it! Submit a post to our `discussion board - `__ - under the “Show and Tell” category for a chance to get featured in `the - docs `__. -- Consider helping out fellow users by by `responding to support requests - `__ . +- Promote beets! Help get the word out by telling your friends, writing a blog + post, or discussing it on a forum you frequent. +- Improve the documentation_. It’s incredibly easy to contribute here: just find + a page you want to modify and hit the “Edit on GitHub” button in the + upper-right. You can automatically send us a pull request for your changes. +- GUI design. For the time being, beets is a command-line-only affair. But + that’s mostly because we don’t have any great ideas for what a good GUI should + look like. If you have those great ideas, please get in touch. +- Benchmarks. We’d like to have a consistent way of measuring speed improvements + in beets’ tagger and other functionality as well as a way of comparing beets’ + performance to other tools. You can help by compiling a library of + freely-licensed music files (preferably with incorrect metadata) for testing + and measurement. +- Think you have a nice config or cool use-case for beets? We’d love to hear + about it! Submit a post to our `discussion board + `__ + under the “Show and Tell” category for a chance to get featured in `the docs + `__. +- Consider helping out fellow users by by `responding to support requests + `__ . Programming ------------ +~~~~~~~~~~~ -- As a programmer (even if you’re just a beginner!), you have a ton of - opportunities to get your feet wet with beets. -- For developing plugins, or hacking away at beets, there’s some good - information in the `“For Developers” section of the - docs `__. +- As a programmer (even if you’re just a beginner!), you have a ton of + opportunities to get your feet wet with beets. +- For developing plugins, or hacking away at beets, there’s some good + information in the `“For Developers” section of the docs + `__. .. _development-tools: Development Tools -^^^^^^^^^^^^^^^^^ ++++++++++++++++++ In order to develop beets, you will need a few tools installed: -- `poetry`_ for packaging, virtual environment and dependency management -- `poethepoet`_ to run tasks, such as linting, formatting, testing +- poetry_ for packaging, virtual environment and dependency management +- poethepoet_ to run tasks, such as linting, formatting, testing -Python community recommends using `pipx`_ to install stand-alone command-line -applications such as above. `pipx`_ installs each application in an isolated +Python community recommends using pipx_ to install stand-alone command-line +applications such as above. pipx_ installs each application in an isolated virtual environment, where its dependencies will not interfere with your system and other CLI tools. -If you do not have `pipx`_ installed in your system, follow `pipx-installation-instructions`_ or +If you do not have pipx_ installed in your system, follow `pipx installation +instructions `__ or .. code-block:: sh $ python3 -m pip install --user pipx -Install `poetry`_ and `poethepoet`_ using `pipx`_:: +Install poetry_ and poethepoet_ using pipx_: + +:: $ pipx install poetry poethepoet -.. _pipx: https://pipx.pypa.io/stable -.. _pipx-installation-instructions: https://pipx.pypa.io/stable/installation/ +.. admonition:: Check ``tool.pipx-install`` section in ``pyproject.toml`` to see supported versions + + .. code-block:: toml + + [tool.pipx-install] + poethepoet = ">=0.26" + poetry = "<2" .. _getting-the-source: Getting the Source -^^^^^^^^^^^^^^^^^^ +++++++++++++++++++ The easiest way to get started with the latest beets source is to clone the -repository and install ``beets`` in a local virtual environment using `poetry`_. +repository and install ``beets`` in a local virtual environment using poetry_. This can be done with: .. code-block:: bash @@ -106,26 +110,32 @@ This can be done with: $ poetry install This will install ``beets`` and all development dependencies into its own -virtual environment in your ``$POETRY_CACHE_DIR``. See ``poetry install ---help`` for installation options, including installing ``extra`` dependencies -for plugins. +virtual environment in your ``$POETRY_CACHE_DIR``. See ``poetry install --help`` +for installation options, including installing ``extra`` dependencies for +plugins. In order to run something within this virtual environment, start the command with ``poetry run`` to them, for example ``poetry run pytest``. On the other hand, it may get tedious to type ``poetry run`` before every -command. Instead, you can activate the virtual environment in your shell with:: +command. Instead, you can activate the virtual environment in your shell with: + +:: $ poetry shell -You should see ``(beets-py3.9)`` prefix in your shell prompt. Now you can run -commands directly, for example:: +You should see ``(beets-py3.10)`` prefix in your shell prompt. Now you can run +commands directly, for example: - $ (beets-py3.9) pytest +:: -Additionally, `poethepoet`_ task runner assists us with the most common + $ (beets-py3.10) pytest + +Additionally, poethepoet_ task runner assists us with the most common operations. Formatting, linting, testing are defined as ``poe`` tasks in -`pyproject.toml`_. Run:: +pyproject.toml_. Run: + +:: $ poe @@ -140,204 +150,180 @@ to see all available tasks. They can be used like this, for example $ poe test --lf # re-run failing tests (note the additional pytest option) $ poe check-types --pretty # check types with an extra option for mypy - Code Contribution Ideas -^^^^^^^^^^^^^^^^^^^^^^^ ++++++++++++++++++++++++ -- We maintain a set of `issues marked as - “bite-sized” `__. - These are issues that would serve as a good introduction to the - codebase. Claim one and start exploring! -- Like testing? Our `test - coverage `__ is somewhat - low. You can help out by finding low-coverage modules or checking out - other `testing-related - issues `__. -- There are several ways to improve the tests in general (see :ref:`testing` and some - places to think about performance optimization (see - `Optimization `__). -- Not all of our code is up to our coding conventions. In particular, - the `library API - documentation `__ - are currently quite sparse. You can help by adding to the docstrings - in the code and to the documentation pages themselves. beets follows - `PEP-257 `__ for - docstrings and in some places, we also sometimes use `ReST autodoc - syntax for - Sphinx `__ - to, for example, refer to a class name. - -.. _poethepoet: https://poethepoet.natn.io/index.html -.. _poetry: https://python-poetry.org/docs/ +- We maintain a set of `issues marked as “good first issue” + `__. These are + issues that would serve as a good introduction to the codebase. Claim one and + start exploring! +- Like testing? Our `test coverage `__ + is somewhat low. You can help out by finding low-coverage modules or checking + out other `testing-related issues + `__. +- There are several ways to improve the tests in general (see :ref:`testing` and + some places to think about performance optimization (see `Optimization + `__). +- Not all of our code is up to our coding conventions. In particular, the + `library API documentation + `__ are currently + quite sparse. You can help by adding to the docstrings in the code and to the + documentation pages themselves. beets follows `PEP-257 + `__ for docstrings and in some + places, we also sometimes use `ReST autodoc syntax for Sphinx + `__ to, + for example, refer to a class name. Your First Contribution -======================= +----------------------- -If this is your first time contributing to an open source project, -welcome! If you are confused at all about how to contribute or what to -contribute, take a look at `this great -tutorial `__, or stop by our -`discussion board `__ -if you have any questions. +If this is your first time contributing to an open source project, welcome! If +you are confused at all about how to contribute or what to contribute, take a +look at `this great tutorial `__, or stop by our +`discussion board`_ if you have any questions. -We maintain a list of issues we reserved for those new to open source -labeled `“first timers -only” `__. -Since the goal of these issues is to get users comfortable with -contributing to an open source project, please do not hesitate to ask -any questions. +We maintain a list of issues we reserved for those new to open source labeled +`first timers only`_. Since the goal of these issues is to get users comfortable +with contributing to an open source project, please do not hesitate to ask any +questions. + +.. _first timers only: https://github.com/beetbox/beets/issues?q=is%3Aopen+is%3Aissue+label%3A%22first+timers+only%22 How to Submit Your Work -======================= +----------------------- -Do you have a great bug fix, new feature, or documentation expansion -you’d like to contribute? Follow these steps to create a GitHub pull -request and your code will ship in no time. +Do you have a great bug fix, new feature, or documentation expansion you’d like +to contribute? Follow these steps to create a GitHub pull request and your code +will ship in no time. -1. Fork the beets repository and clone it (see above) to create a - workspace. +1. Fork the beets repository and clone it (see above) to create a workspace. 2. Install pre-commit, following the instructions `here `_. 3. Make your changes. -4. Add tests. If you’ve fixed a bug, write a test to ensure that you’ve - actually fixed it. If there’s a new feature or plugin, please - contribute tests that show that your code does what it says. -5. Add documentation. If you’ve added a new command flag, for example, - find the appropriate page under ``docs/`` where it needs to be - listed. -6. Add a changelog entry to ``docs/changelog.rst`` near the top of the - document. +4. Add tests. If you’ve fixed a bug, write a test to ensure that you’ve actually + fixed it. If there’s a new feature or plugin, please contribute tests that + show that your code does what it says. +5. Add documentation. If you’ve added a new command flag, for example, find the + appropriate page under ``docs/`` where it needs to be listed. +6. Add a changelog entry to ``docs/changelog.rst`` near the top of the document. 7. Run the tests and style checker, see :ref:`testing`. 8. Push to your fork and open a pull request! We’ll be in touch shortly. -9. If you add commits to a pull request, please add a comment or - re-request a review after you push them since GitHub doesn’t - automatically notify us when commits are added. +9. If you add commits to a pull request, please add a comment or re-request a + review after you push them since GitHub doesn’t automatically notify us when + commits are added. Remember, code contributions have four parts: the code, the tests, the documentation, and the changelog entry. Thank you for contributing! -The Code -======== +.. admonition:: Ownership -The documentation has a section on the -`library API `__ -that serves as an introduction to beets’ design. + If you are the owner of a plugin, please consider reviewing pull requests + that affect your plugin. If you are not the owner of a plugin, please + consider becoming one! You can do so by adding an entry to + ``.github/CODEOWNERS``. This way, you will automatically receive a review + request for pull requests that adjust the code that you own. If you have any + questions, please ask on our `discussion board`_. + +The Code +-------- + +The documentation has a section on the `library API +`__ that serves as an +introduction to beets’ design. Coding Conventions -================== +------------------ General -------- +~~~~~~~ + There are a few coding conventions we use in beets: -- Whenever you access the library database, do so through the provided - Library methods or via a Transaction object. Never call - ``lib.conn.*`` directly. For example, do this: +- Whenever you access the library database, do so through the provided Library + methods or via a Transaction object. Never call ``lib.conn.*`` directly. For + example, do this: - .. code-block:: python + .. code-block:: python - with g.lib.transaction() as tx: - rows = tx.query("SELECT DISTINCT '{0}' FROM '{1}' ORDER BY '{2}'" - .format(field, model._table, sort_field)) + with g.lib.transaction() as tx: + rows = tx.query("SELECT DISTINCT {field} FROM {model._table} ORDER BY {sort_field}") - To fetch Item objects from the database, use lib.items(…) and supply - a query as an argument. Resist the urge to write raw SQL for your - query. If you must use lower-level queries into the database, do - this: + To fetch Item objects from the database, use lib.items(…) and supply a query + as an argument. Resist the urge to write raw SQL for your query. If you must + use lower-level queries into the database, do this, for example: - .. code-block:: python + .. code-block:: python - with lib.transaction() as tx: - rows = tx.query("SELECT …") + with lib.transaction() as tx: + rows = tx.query("SELECT path FROM items WHERE album_id = ?", (album_id,)) - Transaction objects help control concurrent access to the database - and assist in debugging conflicting accesses. -- ``str.format()`` should be used instead of the ``%`` operator -- Never ``print`` informational messages; use the - `logging `__ module - instead. In particular, we have our own logging shim, so you’ll see - ``from beets import logging`` in most files. + Transaction objects help control concurrent access to the database and assist + in debugging conflicting accesses. - - The loggers use - `str.format `__-style - logging instead of ``%``-style, so you can type - ``log.debug("{0}", obj)`` to do your formatting. +- f-strings should be used instead of the ``%`` operator and ``str.format()`` + calls. +- Never ``print`` informational messages; use the `logging + `__ module instead. In + particular, we have our own logging shim, so you’ll see ``from beets import + logging`` in most files. -- Exception handlers must use ``except A as B:`` instead of - ``except A, B:``. + - The loggers use `str.format + `__-style logging + instead of ``%``-style, so you can type ``log.debug("{}", obj)`` to do your + formatting. + +- Exception handlers must use ``except A as B:`` instead of ``except A, B:``. Style ------ +~~~~~ -We use `ruff`_ to format and lint the codebase. +We use `ruff `__ to format and lint the codebase. Run ``poe check-format`` and ``poe lint`` to check your code for style and linting errors. Running ``poe format`` will automatically format your code according to the specifications required by the project. -.. _ruff: https://docs.astral.sh/ruff/ - -Handling Paths --------------- - -A great deal of convention deals with the handling of **paths**. Paths -are stored internally—in the database, for instance—as byte strings -(i.e., ``bytes`` instead of ``str`` in Python 3). This is because POSIX -operating systems’ path names are only reliably usable as byte -strings—operating systems typically recommend but do not require that -filenames use a given encoding, so violations of any reported encoding -are inevitable. On Windows, the strings are always encoded with UTF-8; -on Unix, the encoding is controlled by the filesystem. Here are some -guidelines to follow: - -- If you have a Unicode path or you’re not sure whether something is - Unicode or not, pass it through ``bytestring_path`` function in the - ``beets.util`` module to convert it to bytes. -- Pass every path name through the ``syspath`` function (also in - ``beets.util``) before sending it to any *operating system* file - operation (``open``, for example). This is necessary to use long - filenames (which, maddeningly, must be Unicode) on Windows. This - allows us to consistently store bytes in the database but use the - native encoding rule on both POSIX and Windows. -- Similarly, the ``displayable_path`` utility function converts - bytestring paths to a Unicode string for displaying to the user. - Every time you want to print out a string to the terminal or log it - with the ``logging`` module, feed it through this function. +Similarly, run ``poe format-docs`` and ``poe lint-docs`` to ensure consistent +documentation formatting and check for any issues. Editor Settings ---------------- +~~~~~~~~~~~~~~~ -Personally, I work on beets with `vim`_. Here are -some ``.vimrc`` lines that might help with PEP 8-compliant Python -coding:: +Personally, I work on beets with vim_. Here are some ``.vimrc`` lines that might +help with PEP 8-compliant Python coding: + +:: filetype indent on autocmd FileType python setlocal shiftwidth=4 tabstop=4 softtabstop=4 expandtab shiftround autoindent -Consider installing `this alternative Python indentation -plugin `__. I also -like `neomake `__ with its flake8 -checker. +Consider installing `this alternative Python indentation plugin +`__. I also like `neomake +`__ with its flake8 checker. .. _testing: Testing -======= +------- Running the Tests ------------------ +~~~~~~~~~~~~~~~~~ -Use ``poe`` to run tests:: +Use ``poe`` to run tests: + +:: $ poe test [pytest options] -You can disable a hand-selected set of "slow" tests by setting the -environment variable ``SKIP_SLOW_TESTS``, for example:: +You can disable a hand-selected set of "slow" tests by setting the environment +variable ``SKIP_SLOW_TESTS``, for example: + +:: $ SKIP_SLOW_TESTS=1 poe test Coverage -^^^^^^^^ +++++++++ The ``test`` command does not include coverage as it slows down testing. In order to measure it, use the ``test-with-coverage`` task @@ -350,56 +336,71 @@ You are welcome to explore coverage by opening the HTML report in Note that for each covered line the report shows **which tests cover it** (expand the list on the right-hand side of the affected line). -You can find project coverage status on `Codecov`_. +You can find project coverage status on Codecov_. Red Flags -^^^^^^^^^ ++++++++++ -The `pytest-random`_ plugin makes it easy to randomize the order of -tests. ``poe test --random`` will occasionally turn up failing tests -that reveal ordering dependencies—which are bad news! +The pytest-random_ plugin makes it easy to randomize the order of tests. ``poe +test --random`` will occasionally turn up failing tests that reveal ordering +dependencies—which are bad news! Test Dependencies -^^^^^^^^^^^^^^^^^ ++++++++++++++++++ The tests have a few more dependencies than beets itself. (The additional dependencies consist of testing utilities and dependencies of non-default plugins exercised by the test suite.) The dependencies are listed under the -``tool.poetry.group.test.dependencies`` section in `pyproject.toml`_. +``tool.poetry.group.test.dependencies`` section in pyproject.toml_. Writing Tests -------------- +~~~~~~~~~~~~~ -Writing tests is done by adding or modifying files in folder `test`_. -Take a look at -`https://github.com/beetbox/beets/blob/master/test/test_template.py#L224`_ -to get a basic view on how tests are written. Since we are currently migrating -the tests from `unittest`_ to `pytest`_, new tests should be written using -`pytest`_. Contributions migrating existing tests are welcome! +Writing tests is done by adding or modifying files in folder test_. Take a look +at `https://github.com/beetbox/beets/blob/master/test/test_template.py#L224`_ to +get a basic view on how tests are written. Since we are currently migrating the +tests from unittest_ to pytest_, new tests should be written using pytest_. +Contributions migrating existing tests are welcome! -External API requests under test should be mocked with `requests-mock`_, -However, we still want to know whether external APIs are up and that they -return expected responses, therefore we test them weekly with our `integration -test`_ suite. +External API requests under test should be mocked with requests-mock_, However, +we still want to know whether external APIs are up and that they return expected +responses, therefore we test them weekly with our `integration test`_ suite. In order to add such a test, mark your test with the ``integration_test`` marker .. code-block:: python - @pytest.mark.integration_test - def test_external_api_call(): - ... + @pytest.mark.integration_test + def test_external_api_call(): ... This way, the test will be run only in the integration test suite. -.. _Codecov: https://codecov.io/github/beetbox/beets -.. _pytest-random: https://github.com/klrmn/pytest-random -.. _pytest: https://docs.pytest.org/en/stable/ -.. _pyproject.toml: https://github.com/beetbox/beets/tree/master/pyproject.toml -.. _test: https://github.com/beetbox/beets/tree/master/test -.. _`https://github.com/beetbox/beets/blob/master/test/test_template.py#L224`: https://github.com/beetbox/beets/blob/master/test/test_template.py#L224 -.. _unittest: https://docs.python.org/3/library/unittest.html -.. _integration test: https://github.com/beetbox/beets/actions?query=workflow%3A%22integration+tests%22 -.. _requests-mock: https://requests-mock.readthedocs.io/en/latest/response.html +.. _codecov: https://codecov.io/github/beetbox/beets + +.. _discussion board: https://github.com/beetbox/beets/discussions + .. _documentation: https://beets.readthedocs.io/en/stable/ + +.. _https://github.com/beetbox/beets/blob/master/test/test_template.py#l224: https://github.com/beetbox/beets/blob/master/test/test_template.py#L224 + +.. _integration test: https://github.com/beetbox/beets/actions?query=workflow%3A%22integration+tests%22 + +.. _pipx: https://pipx.pypa.io/stable + +.. _poethepoet: https://poethepoet.natn.io/index.html + +.. _poetry: https://python-poetry.org/docs/ + +.. _pyproject.toml: https://github.com/beetbox/beets/tree/master/pyproject.toml + +.. _pytest: https://docs.pytest.org/en/stable/ + +.. _pytest-random: https://github.com/klrmn/pytest-random + +.. _requests-mock: https://requests-mock.readthedocs.io/en/latest/response.html + +.. _test: https://github.com/beetbox/beets/tree/master/test + +.. _unittest: https://docs.python.org/3/library/unittest.html + .. _vim: https://www.vim.org/ diff --git a/README.rst b/README.rst index c916b65de..9e42eec30 100644 --- a/README.rst +++ b/README.rst @@ -10,115 +10,132 @@ .. image:: https://repology.org/badge/tiny-repos/beets.svg :target: https://repology.org/project/beets/versions - beets ===== Beets is the media library management system for obsessive music geeks. -The purpose of beets is to get your music collection right once and for all. -It catalogs your collection, automatically improving its metadata as it goes. -It then provides a bouquet of tools for manipulating and accessing your music. +The purpose of beets is to get your music collection right once and for all. It +catalogs your collection, automatically improving its metadata as it goes. It +then provides a suite of tools for manipulating and accessing your music. -Here's an example of beets' brainy tag corrector doing its thing:: +Here's an example of beets' brainy tag corrector doing its thing: - $ beet import ~/music/ladytron - Tagging: - Ladytron - Witching Hour - (Similarity: 98.4%) - * Last One Standing -> The Last One Standing - * Beauty -> Beauty*2 - * White Light Generation -> Whitelightgenerator - * All the Way -> All the Way... +:: + + $ beet import ~/music/ladytron + Tagging: + Ladytron - Witching Hour + (Similarity: 98.4%) + * Last One Standing -> The Last One Standing + * Beauty -> Beauty*2 + * White Light Generation -> Whitelightgenerator + * All the Way -> All the Way... Because beets is designed as a library, it can do almost anything you can -imagine for your music collection. Via `plugins`_, beets becomes a panacea: +imagine for your music collection. Via plugins_, beets becomes a panacea: - Fetch or calculate all the metadata you could possibly need: `album art`_, - `lyrics`_, `genres`_, `tempos`_, `ReplayGain`_ levels, or `acoustic - fingerprints`_. -- Get metadata from `MusicBrainz`_, `Discogs`_, and `Beatport`_. Or guess - metadata using songs' filenames or their acoustic fingerprints. + lyrics_, genres_, tempos_, ReplayGain_ levels, or `acoustic fingerprints`_. +- Get metadata from MusicBrainz_, Discogs_, and Beatport_. Or guess metadata + using songs' filenames or their acoustic fingerprints. - `Transcode audio`_ to any format you like. -- Check your library for `duplicate tracks and albums`_ or for `albums that - are missing tracks`_. +- Check your library for `duplicate tracks and albums`_ or for `albums that are + missing tracks`_. - Clean up crufty tags left behind by other, less-awesome tools. - Embed and extract album art from files' metadata. - Browse your music library graphically through a Web browser and play it in any browser that supports `HTML5 Audio`_. - Analyze music files' metadata from the command line. -- Listen to your library with a music player that speaks the `MPD`_ protocol - and works with a staggering variety of interfaces. +- Listen to your library with a music player that speaks the MPD_ protocol and + works with a staggering variety of interfaces. -If beets doesn't do what you want yet, `writing your own plugin`_ is -shockingly simple if you know a little Python. +If beets doesn't do what you want yet, `writing your own plugin`_ is shockingly +simple if you know a little Python. + +.. _acoustic fingerprints: https://beets.readthedocs.org/page/plugins/chroma.html + +.. _album art: https://beets.readthedocs.org/page/plugins/fetchart.html + +.. _albums that are missing tracks: https://beets.readthedocs.org/page/plugins/missing.html + +.. _beatport: https://www.beatport.com + +.. _discogs: https://www.discogs.com/ + +.. _duplicate tracks and albums: https://beets.readthedocs.org/page/plugins/duplicates.html + +.. _genres: https://beets.readthedocs.org/page/plugins/lastgenre.html + +.. _html5 audio: https://html.spec.whatwg.org/multipage/media.html#the-audio-element + +.. _lyrics: https://beets.readthedocs.org/page/plugins/lyrics.html + +.. _mpd: https://www.musicpd.org/ + +.. _musicbrainz: https://musicbrainz.org/ + +.. _musicbrainz music collection: https://musicbrainz.org/doc/Collections/ .. _plugins: https://beets.readthedocs.org/page/plugins/ -.. _MPD: https://www.musicpd.org/ -.. _MusicBrainz music collection: https://musicbrainz.org/doc/Collections/ -.. _writing your own plugin: - https://beets.readthedocs.org/page/dev/plugins.html -.. _HTML5 Audio: - https://html.spec.whatwg.org/multipage/media.html#the-audio-element -.. _albums that are missing tracks: - https://beets.readthedocs.org/page/plugins/missing.html -.. _duplicate tracks and albums: - https://beets.readthedocs.org/page/plugins/duplicates.html -.. _Transcode audio: - https://beets.readthedocs.org/page/plugins/convert.html -.. _Discogs: https://www.discogs.com/ -.. _acoustic fingerprints: - https://beets.readthedocs.org/page/plugins/chroma.html -.. _ReplayGain: https://beets.readthedocs.org/page/plugins/replaygain.html + +.. _replaygain: https://beets.readthedocs.org/page/plugins/replaygain.html + .. _tempos: https://beets.readthedocs.org/page/plugins/acousticbrainz.html -.. _genres: https://beets.readthedocs.org/page/plugins/lastgenre.html -.. _album art: https://beets.readthedocs.org/page/plugins/fetchart.html -.. _lyrics: https://beets.readthedocs.org/page/plugins/lyrics.html -.. _MusicBrainz: https://musicbrainz.org/ -.. _Beatport: https://www.beatport.com + +.. _transcode audio: https://beets.readthedocs.org/page/plugins/convert.html + +.. _writing your own plugin: https://beets.readthedocs.org/page/dev/plugins/index.html Install ------- -You can install beets by typing ``pip install beets`` or directly from Github (see details `here`_). -Beets has also been packaged in the `software repositories`_ of several -distributions. Check out the `Getting Started`_ guide for more information. +You can install beets by typing ``pip install beets`` or directly from Github +(see details here_). Beets has also been packaged in the `software +repositories`_ of several distributions. Check out the `Getting Started`_ guide +for more information. + +.. _getting started: https://beets.readthedocs.org/page/guides/main.html .. _here: https://beets.readthedocs.io/en/latest/faq.html#run-the-latest-source-version-of-beets -.. _Getting Started: https://beets.readthedocs.org/page/guides/main.html + .. _software repositories: https://repology.org/project/beets/versions Contribute ---------- -Thank you for considering contributing to ``beets``! Whether you're a -programmer or not, you should be able to find all the info you need at -`CONTRIBUTING.rst`_. +Thank you for considering contributing to ``beets``! Whether you're a programmer +or not, you should be able to find all the info you need at CONTRIBUTING.rst_. -.. _CONTRIBUTING.rst: https://github.com/beetbox/beets/blob/master/CONTRIBUTING.rst +.. _contributing.rst: https://github.com/beetbox/beets/blob/master/CONTRIBUTING.rst Read More --------- -Learn more about beets at `its Web site`_. Follow `@b33ts`_ on Mastodon for -news and updates. +Learn more about beets at `its Web site`_. Follow `@b33ts`_ on Mastodon for news +and updates. -.. _its Web site: https://beets.io/ .. _@b33ts: https://fosstodon.org/@beets +.. _its web site: https://beets.io/ + Contact ------- -* Encountered a bug you'd like to report? Check out our `issue tracker`_! - * If your issue hasn't already been reported, please `open a new ticket`_ - and we'll be in touch with you shortly. - * If you'd like to vote on a feature/bug, simply give a :+1: on issues - you'd like to see prioritized over others. -* Need help/support, would like to start a discussion, have an idea for a new - feature, or would just like to introduce yourself to the team? Check out - `GitHub Discussions`_! -.. _GitHub Discussions: https://github.com/beetbox/beets/discussions +- Encountered a bug you'd like to report? Check out our `issue tracker`_! + + - If your issue hasn't already been reported, please `open a new ticket`_ and + we'll be in touch with you shortly. + - If you'd like to vote on a feature/bug, simply give a :+1: on issues you'd + like to see prioritized over others. + - Need help/support, would like to start a discussion, have an idea for a new + feature, or would just like to introduce yourself to the team? Check out + `GitHub Discussions`_! + +.. _github discussions: https://github.com/beetbox/beets/discussions + .. _issue tracker: https://github.com/beetbox/beets/issues + .. _open a new ticket: https://github.com/beetbox/beets/issues/new/choose Authors @@ -126,4 +143,4 @@ Authors Beets is by `Adrian Sampson`_ with a supporting cast of thousands. -.. _Adrian Sampson: https://www.cs.cornell.edu/~asampson/ +.. _adrian sampson: https://www.cs.cornell.edu/~asampson/ diff --git a/README_kr.rst b/README_kr.rst index c12fc8b71..803229425 100644 --- a/README_kr.rst +++ b/README_kr.rst @@ -1,108 +1,119 @@ -.. image:: https://img.shields.io/pypi/v/beets.svg - :target: https://pypi.python.org/pypi/beets - -.. image:: https://img.shields.io/codecov/c/github/beetbox/beets.svg - :target: https://codecov.io/github/beetbox/beets - -.. image:: https://travis-ci.org/beetbox/beets.svg?branch=master - :target: https://travis-ci.org/beetbox/beets - - -beets -===== - -Beets는 강박적인 음악을 듣는 사람들을 위한 미디어 라이브러리 관리 시스템이다. - -Beets의 목적은 음악들을 한번에 다 받는 것이다. -음악들을 카탈로그화 하고, 자동으로 메타 데이터를 개선한다. -그리고 음악에 접근하고 조작할 수 있는 도구들을 제공한다. - -다음은 Beets의 brainy tag corrector가 한 일의 예시이다. - - $ beet import ~/music/ladytron - Tagging: - Ladytron - Witching Hour - (Similarity: 98.4%) - * Last One Standing -> The Last One Standing - * Beauty -> Beauty*2 - * White Light Generation -> Whitelightgenerator - * All the Way -> All the Way... - -Beets는 라이브러리로 디자인 되었기 때문에, 당신이 음악들에 대해 상상하는 모든 것을 할 수 있다. -`plugins`_ 을 통해서 모든 것을 할 수 있는 것이다! - -- 필요하는 메타 데이터를 계산하거나 패치 할 때: `album art`_, - `lyrics`_, `genres`_, `tempos`_, `ReplayGain`_ levels, or `acoustic - fingerprints`_. -- `MusicBrainz`_, `Discogs`_,`Beatport`_로부터 메타데이터를 가져오거나, - 노래 제목이나 음향 특징으로 메타데이터를 추측한다 -- `Transcode audio`_ 당신이 좋아하는 어떤 포맷으로든 변경한다. -- 당신의 라이브러리에서 `duplicate tracks and albums`_ 이나 `albums that are missing tracks`_ 를 검사한다. -- 남이 남기거나, 좋지 않은 도구로 남긴 잡다한 태그들을 지운다. -- 파일의 메타데이터에서 앨범 아트를 삽입이나 추출한다. -- 당신의 음악들을 `HTML5 Audio`_ 를 지원하는 어떤 브라우저든 재생할 수 있고, - 웹 브라우저에 표시 할 수 있다. -- 명령어로부터 음악 파일의 메타데이터를 분석할 수 있다. -- `MPD`_ 프로토콜을 사용하여 음악 플레이어로 음악을 들으면, 엄청나게 다양한 인터페이스로 작동한다. - -만약 Beets에 당신이 원하는게 아직 없다면, -당신이 python을 안다면 `writing your own plugin`_ _은 놀라울정도로 간단하다. - -.. _plugins: https://beets.readthedocs.org/page/plugins/ -.. _MPD: https://www.musicpd.org/ -.. _MusicBrainz music collection: https://musicbrainz.org/doc/Collections/ -.. _writing your own plugin: - https://beets.readthedocs.org/page/dev/plugins.html -.. _HTML5 Audio: - https://html.spec.whatwg.org/multipage/media.html#the-audio-element -.. _albums that are missing tracks: - https://beets.readthedocs.org/page/plugins/missing.html -.. _duplicate tracks and albums: - https://beets.readthedocs.org/page/plugins/duplicates.html -.. _Transcode audio: - https://beets.readthedocs.org/page/plugins/convert.html -.. _Discogs: https://www.discogs.com/ -.. _acoustic fingerprints: - https://beets.readthedocs.org/page/plugins/chroma.html -.. _ReplayGain: https://beets.readthedocs.org/page/plugins/replaygain.html -.. _tempos: https://beets.readthedocs.org/page/plugins/acousticbrainz.html -.. _genres: https://beets.readthedocs.org/page/plugins/lastgenre.html -.. _album art: https://beets.readthedocs.org/page/plugins/fetchart.html -.. _lyrics: https://beets.readthedocs.org/page/plugins/lyrics.html -.. _MusicBrainz: https://musicbrainz.org/ -.. _Beatport: https://www.beatport.com - -설치 -------- - -당신은 ``pip install beets`` 을 사용해서 Beets를 설치할 수 있다. -그리고 `Getting Started`_ 가이드를 확인할 수 있다. - -.. _Getting Started: https://beets.readthedocs.org/page/guides/main.html - -컨트리뷰션 ----------- - -어떻게 도우려는지 알고싶다면 `Hacking`_ 위키페이지를 확인하라. -당신은 docs 안에 `For Developers`_ 에도 관심이 있을수 있다. - -.. _Hacking: https://github.com/beetbox/beets/wiki/Hacking -.. _For Developers: https://beets.readthedocs.io/en/stable/dev/ - -Read More ---------- - -`its Web site`_ 에서 Beets에 대해 조금 더 알아볼 수 있다. -트위터에서 `@b33ts`_ 를 팔로우하면 새 소식을 볼 수 있다. - -.. _its Web site: https://beets.io/ -.. _@b33ts: https://twitter.com/b33ts/ - -저자들 -------- - -`Adrian Sampson`_ 와 많은 사람들의 지지를 받아 Beets를 만들었다. -돕고 싶다면 `forum`_.를 방문하면 된다. - -.. _forum: https://github.com/beetbox/beets/discussions/ -.. _Adrian Sampson: https://www.cs.cornell.edu/~asampson/ +.. image:: https://img.shields.io/pypi/v/beets.svg + :target: https://pypi.python.org/pypi/beets + +.. image:: https://img.shields.io/codecov/c/github/beetbox/beets.svg + :target: https://codecov.io/github/beetbox/beets + +.. image:: https://travis-ci.org/beetbox/beets.svg?branch=master + :target: https://travis-ci.org/beetbox/beets + +beets +===== + +Beets는 강박적인 음악을 듣는 사람들을 위한 미디어 라이브러리 관리 시스템이다. + +Beets의 목적은 음악들을 한번에 다 받는 것이다. 음악들을 카탈로그화 하고, 자동으로 메타 데이터를 개선한다. 그리고 음악에 접근하고 조작할 +수 있는 도구들을 제공한다. + +다음은 Beets의 brainy tag corrector가 한 일의 예시이다. + +:: + + $ beet import ~/music/ladytron + Tagging: + Ladytron - Witching Hour + (Similarity: 98.4%) + * Last One Standing -> The Last One Standing + * Beauty -> Beauty*2 + * White Light Generation -> Whitelightgenerator + * All the Way -> All the Way... + +Beets는 라이브러리로 디자인 되었기 때문에, 당신이 음악들에 대해 상상하는 모든 것을 할 수 있다. plugins_ 을 통해서 모든 것을 할 +수 있는 것이다! + +- 필요하는 메타 데이터를 계산하거나 패치 할 때: `album art`_, lyrics_, genres_, tempos_, + ReplayGain_ levels, or `acoustic fingerprints`_. +- MusicBrainz_, Discogs_,`Beatport`_로부터 메타데이터를 가져오거나, 노래 제목이나 음향 특징으로 메타데이터를 + 추측한다 +- `Transcode audio`_ 당신이 좋아하는 어떤 포맷으로든 변경한다. +- 당신의 라이브러리에서 `duplicate tracks and albums`_ 이나 `albums that are missing + tracks`_ 를 검사한다. +- 남이 남기거나, 좋지 않은 도구로 남긴 잡다한 태그들을 지운다. +- 파일의 메타데이터에서 앨범 아트를 삽입이나 추출한다. +- 당신의 음악들을 `HTML5 Audio`_ 를 지원하는 어떤 브라우저든 재생할 수 있고, 웹 브라우저에 표시 할 수 있다. +- 명령어로부터 음악 파일의 메타데이터를 분석할 수 있다. +- MPD_ 프로토콜을 사용하여 음악 플레이어로 음악을 들으면, 엄청나게 다양한 인터페이스로 작동한다. + +만약 Beets에 당신이 원하는게 아직 없다면, 당신이 python을 안다면 `writing your own plugin`_ _은 놀라울정도로 +간단하다. + +.. _acoustic fingerprints: https://beets.readthedocs.org/page/plugins/chroma.html + +.. _album art: https://beets.readthedocs.org/page/plugins/fetchart.html + +.. _albums that are missing tracks: https://beets.readthedocs.org/page/plugins/missing.html + +.. _beatport: https://www.beatport.com + +.. _discogs: https://www.discogs.com/ + +.. _duplicate tracks and albums: https://beets.readthedocs.org/page/plugins/duplicates.html + +.. _genres: https://beets.readthedocs.org/page/plugins/lastgenre.html + +.. _html5 audio: https://html.spec.whatwg.org/multipage/media.html#the-audio-element + +.. _lyrics: https://beets.readthedocs.org/page/plugins/lyrics.html + +.. _mpd: https://www.musicpd.org/ + +.. _musicbrainz: https://musicbrainz.org/ + +.. _musicbrainz music collection: https://musicbrainz.org/doc/Collections/ + +.. _plugins: https://beets.readthedocs.org/page/plugins/ + +.. _replaygain: https://beets.readthedocs.org/page/plugins/replaygain.html + +.. _tempos: https://beets.readthedocs.org/page/plugins/acousticbrainz.html + +.. _transcode audio: https://beets.readthedocs.org/page/plugins/convert.html + +.. _writing your own plugin: https://beets.readthedocs.org/page/dev/plugins/index.html + +설치 +------- + +당신은 ``pip install beets`` 을 사용해서 Beets를 설치할 수 있다. 그리고 `Getting Started`_ 가이드를 +확인할 수 있다. + +.. _getting started: https://beets.readthedocs.org/page/guides/main.html + +컨트리뷰션 +---------- + +어떻게 도우려는지 알고싶다면 Hacking_ 위키페이지를 확인하라. 당신은 docs 안에 `For Developers`_ 에도 관심이 있을수 +있다. + +.. _for developers: https://beets.readthedocs.io/en/stable/dev/ + +.. _hacking: https://github.com/beetbox/beets/wiki/Hacking + +Read More +--------- + +`its Web site`_ 에서 Beets에 대해 조금 더 알아볼 수 있다. 트위터에서 `@b33ts`_ 를 팔로우하면 새 소식을 볼 수 +있다. + +.. _@b33ts: https://twitter.com/b33ts/ + +.. _its web site: https://beets.io/ + +저자들 +------- + +`Adrian Sampson`_ 와 많은 사람들의 지지를 받아 Beets를 만들었다. 돕고 싶다면 forum_.를 방문하면 된다. + +.. _adrian sampson: https://www.cs.cornell.edu/~asampson/ + +.. _forum: https://github.com/beetbox/beets/discussions/ diff --git a/beets/__init__.py b/beets/__init__.py index 845d251ae..2c6069b29 100644 --- a/beets/__init__.py +++ b/beets/__init__.py @@ -17,10 +17,21 @@ from sys import stderr import confuse -__version__ = "2.2.0" +from .util.deprecation import deprecate_imports + +__version__ = "2.5.1" __author__ = "Adrian Sampson " +def __getattr__(name: str): + """Handle deprecated imports.""" + return deprecate_imports( + __name__, + {"art": "beetsplug._utils", "vfs": "beetsplug._utils"}, + name, + ) + + class IncludeLazyConfig(confuse.LazyConfig): """A version of Confuse's LazyConfig that also merges in data from YAML files specified in an `include` setting. @@ -35,7 +46,7 @@ class IncludeLazyConfig(confuse.LazyConfig): except confuse.NotFoundError: pass except confuse.ConfigReadError as err: - stderr.write("configuration `import` failed: {}".format(err.reason)) + stderr.write(f"configuration `import` failed: {err.reason}") config = IncludeLazyConfig("beets", __name__) diff --git a/beets/autotag/__init__.py b/beets/autotag/__init__.py index 42f957b0d..feeefbf28 100644 --- a/beets/autotag/__init__.py +++ b/beets/autotag/__init__.py @@ -14,36 +14,48 @@ """Facilities for automatically determining files' correct metadata.""" -from collections.abc import Mapping, Sequence -from typing import Union +from __future__ import annotations + +from importlib import import_module +from typing import TYPE_CHECKING from beets import config, logging -from beets.library import Album, Item, LibModel # Parts of external interface. from beets.util import unique_list +from beets.util.deprecation import deprecate_for_maintainers, deprecate_imports + +from .hooks import AlbumInfo, AlbumMatch, TrackInfo, TrackMatch +from .match import Proposal, Recommendation, tag_album, tag_item + +if TYPE_CHECKING: + from collections.abc import Sequence + + from beets.library import Album, Item, LibModel + + +def __getattr__(name: str): + if name == "current_metadata": + deprecate_for_maintainers( + f"'beets.autotag.{name}'", "'beets.util.get_most_common_tags'" + ) + return import_module("beets.util").get_most_common_tags + + return deprecate_imports( + __name__, {"Distance": "beets.autotag.distance"}, name + ) -from .hooks import AlbumInfo, AlbumMatch, Distance, TrackInfo, TrackMatch -from .match import ( - Proposal, - Recommendation, - current_metadata, - tag_album, - tag_item, -) __all__ = [ "AlbumInfo", "AlbumMatch", - "Distance", - "TrackInfo", - "TrackMatch", "Proposal", "Recommendation", + "TrackInfo", + "TrackMatch", "apply_album_metadata", "apply_item_metadata", "apply_metadata", - "current_metadata", "tag_album", "tag_item", ] @@ -99,8 +111,8 @@ SPECIAL_FIELDS = { def _apply_metadata( - info: Union[AlbumInfo, TrackInfo], - db_obj: Union[Album, Item], + info: AlbumInfo | TrackInfo, + db_obj: Album | Item, nullable_fields: Sequence[str] = [], ): """Set the db_obj's metadata to match the info.""" @@ -192,11 +204,11 @@ def apply_album_metadata(album_info: AlbumInfo, album: Album): correct_list_fields(album) -def apply_metadata(album_info: AlbumInfo, mapping: Mapping[Item, TrackInfo]): - """Set the items' metadata to match an AlbumInfo object using a - mapping from Items to TrackInfo objects. - """ - for item, track_info in mapping.items(): +def apply_metadata( + album_info: AlbumInfo, item_info_pairs: list[tuple[Item, TrackInfo]] +): + """Set items metadata to match corresponding tagged info.""" + for item, track_info in item_info_pairs: # Artist or artist credit. if config["artist_credit"]: item.artist = ( @@ -243,7 +255,7 @@ def apply_metadata(album_info: AlbumInfo, mapping: Mapping[Item, TrackInfo]): continue for suffix in "year", "month", "day": - key = prefix + suffix + key = f"{prefix}{suffix}" value = getattr(album_info, key) or 0 # If we don't even have a year, apply nothing. diff --git a/beets/autotag/distance.py b/beets/autotag/distance.py new file mode 100644 index 000000000..5e3f630e3 --- /dev/null +++ b/beets/autotag/distance.py @@ -0,0 +1,535 @@ +from __future__ import annotations + +import datetime +import re +from functools import cache, total_ordering +from typing import TYPE_CHECKING, Any + +from jellyfish import levenshtein_distance +from unidecode import unidecode + +from beets import config, metadata_plugins +from beets.util import as_string, cached_classproperty, get_most_common_tags + +if TYPE_CHECKING: + from collections.abc import Iterator, Sequence + + from beets.library import Item + + from .hooks import AlbumInfo, TrackInfo + +# Candidate distance scoring. + +# Artist signals that indicate "various artists". These are used at the +# album level to determine whether a given release is likely a VA +# release and also on the track level to to remove the penalty for +# differing artists. +VA_ARTISTS = ("", "various artists", "various", "va", "unknown") + +# Parameters for string distance function. +# Words that can be moved to the end of a string using a comma. +SD_END_WORDS = ["the", "a", "an"] +# Reduced weights for certain portions of the string. +SD_PATTERNS = [ + (r"^the ", 0.1), + (r"[\[\(]?(ep|single)[\]\)]?", 0.0), + (r"[\[\(]?(featuring|feat|ft)[\. :].+", 0.1), + (r"\(.*?\)", 0.3), + (r"\[.*?\]", 0.3), + (r"(, )?(pt\.|part) .+", 0.2), +] +# Replacements to use before testing distance. +SD_REPLACE = [ + (r"&", "and"), +] + + +def _string_dist_basic(str1: str, str2: str) -> float: + """Basic edit distance between two strings, ignoring + non-alphanumeric characters and case. Comparisons are based on a + transliteration/lowering to ASCII characters. Normalized by string + length. + """ + assert isinstance(str1, str) + assert isinstance(str2, str) + str1 = as_string(unidecode(str1)) + str2 = as_string(unidecode(str2)) + str1 = re.sub(r"[^a-z0-9]", "", str1.lower()) + str2 = re.sub(r"[^a-z0-9]", "", str2.lower()) + if not str1 and not str2: + return 0.0 + return levenshtein_distance(str1, str2) / float(max(len(str1), len(str2))) + + +def string_dist(str1: str | None, str2: str | None) -> float: + """Gives an "intuitive" edit distance between two strings. This is + an edit distance, normalized by the string length, with a number of + tweaks that reflect intuition about text. + """ + if str1 is None and str2 is None: + return 0.0 + if str1 is None or str2 is None: + return 1.0 + + str1 = str1.lower() + str2 = str2.lower() + + # Don't penalize strings that move certain words to the end. For + # example, "the something" should be considered equal to + # "something, the". + for word in SD_END_WORDS: + if str1.endswith(f", {word}"): + str1 = f"{word} {str1[: -len(word) - 2]}" + if str2.endswith(f", {word}"): + str2 = f"{word} {str2[: -len(word) - 2]}" + + # Perform a couple of basic normalizing substitutions. + for pat, repl in SD_REPLACE: + str1 = re.sub(pat, repl, str1) + str2 = re.sub(pat, repl, str2) + + # Change the weight for certain string portions matched by a set + # of regular expressions. We gradually change the strings and build + # up penalties associated with parts of the string that were + # deleted. + base_dist = _string_dist_basic(str1, str2) + penalty = 0.0 + for pat, weight in SD_PATTERNS: + # Get strings that drop the pattern. + case_str1 = re.sub(pat, "", str1) + case_str2 = re.sub(pat, "", str2) + + if case_str1 != str1 or case_str2 != str2: + # If the pattern was present (i.e., it is deleted in the + # the current case), recalculate the distances for the + # modified strings. + case_dist = _string_dist_basic(case_str1, case_str2) + case_delta = max(0.0, base_dist - case_dist) + if case_delta == 0.0: + continue + + # Shift our baseline strings down (to avoid rematching the + # same part of the string) and add a scaled distance + # amount to the penalties. + str1 = case_str1 + str2 = case_str2 + base_dist = case_dist + penalty += weight * case_delta + + return base_dist + penalty + + +@total_ordering +class Distance: + """Keeps track of multiple distance penalties. Provides a single + weighted distance for all penalties as well as a weighted distance + for each individual penalty. + """ + + def __init__(self) -> None: + self._penalties: dict[str, list[float]] = {} + self.tracks: dict[TrackInfo, Distance] = {} + + @cached_classproperty + def _weights(cls) -> dict[str, float]: + """A dictionary from keys to floating-point weights.""" + weights_view = config["match"]["distance_weights"] + weights = {} + for key in weights_view.keys(): + weights[key] = weights_view[key].as_number() + return weights + + # Access the components and their aggregates. + + @property + def distance(self) -> float: + """Return a weighted and normalized distance across all + penalties. + """ + dist_max = self.max_distance + if dist_max: + return self.raw_distance / self.max_distance + return 0.0 + + @property + def max_distance(self) -> float: + """Return the maximum distance penalty (normalization factor).""" + dist_max = 0.0 + for key, penalty in self._penalties.items(): + dist_max += len(penalty) * self._weights[key] + return dist_max + + @property + def raw_distance(self) -> float: + """Return the raw (denormalized) distance.""" + dist_raw = 0.0 + for key, penalty in self._penalties.items(): + dist_raw += sum(penalty) * self._weights[key] + return dist_raw + + def items(self) -> list[tuple[str, float]]: + """Return a list of (key, dist) pairs, with `dist` being the + weighted distance, sorted from highest to lowest. Does not + include penalties with a zero value. + """ + list_ = [] + for key in self._penalties: + dist = self[key] + if dist: + list_.append((key, dist)) + # Convert distance into a negative float we can sort items in + # ascending order (for keys, when the penalty is equal) and + # still get the items with the biggest distance first. + return sorted( + list_, key=lambda key_and_dist: (-key_and_dist[1], key_and_dist[0]) + ) + + def __hash__(self) -> int: + return id(self) + + def __eq__(self, other) -> bool: + return self.distance == other + + # Behave like a float. + + def __lt__(self, other) -> bool: + return self.distance < other + + def __float__(self) -> float: + return self.distance + + def __sub__(self, other) -> float: + return self.distance - other + + def __rsub__(self, other) -> float: + return other - self.distance + + def __str__(self) -> str: + return f"{self.distance:.2f}" + + # Behave like a dict. + + def __getitem__(self, key) -> float: + """Returns the weighted distance for a named penalty.""" + dist = sum(self._penalties[key]) * self._weights[key] + dist_max = self.max_distance + if dist_max: + return dist / dist_max + return 0.0 + + def __iter__(self) -> Iterator[tuple[str, float]]: + return iter(self.items()) + + def __len__(self) -> int: + return len(self.items()) + + def keys(self) -> list[str]: + return [key for key, _ in self.items()] + + def update(self, dist: Distance): + """Adds all the distance penalties from `dist`.""" + if not isinstance(dist, Distance): + raise ValueError( + f"`dist` must be a Distance object, not {type(dist)}" + ) + for key, penalties in dist._penalties.items(): + self._penalties.setdefault(key, []).extend(penalties) + + # Adding components. + + def _eq(self, value1: re.Pattern[str] | Any, value2: Any) -> bool: + """Returns True if `value1` is equal to `value2`. `value1` may + be a compiled regular expression, in which case it will be + matched against `value2`. + """ + if isinstance(value1, re.Pattern): + return bool(value1.match(value2)) + return value1 == value2 + + def add(self, key: str, dist: float): + """Adds a distance penalty. `key` must correspond with a + configured weight setting. `dist` must be a float between 0.0 + and 1.0, and will be added to any existing distance penalties + for the same key. + """ + if not 0.0 <= dist <= 1.0: + raise ValueError(f"`dist` must be between 0.0 and 1.0, not {dist}") + self._penalties.setdefault(key, []).append(dist) + + def add_equality( + self, + key: str, + value: Any, + options: list[Any] | tuple[Any, ...] | Any, + ): + """Adds a distance penalty of 1.0 if `value` doesn't match any + of the values in `options`. If an option is a compiled regular + expression, it will be considered equal if it matches against + `value`. + """ + if not isinstance(options, (list, tuple)): + options = [options] + for opt in options: + if self._eq(opt, value): + dist = 0.0 + break + else: + dist = 1.0 + self.add(key, dist) + + def add_expr(self, key: str, expr: bool): + """Adds a distance penalty of 1.0 if `expr` evaluates to True, + or 0.0. + """ + if expr: + self.add(key, 1.0) + else: + self.add(key, 0.0) + + def add_number(self, key: str, number1: int, number2: int): + """Adds a distance penalty of 1.0 for each number of difference + between `number1` and `number2`, or 0.0 when there is no + difference. Use this when there is no upper limit on the + difference between the two numbers. + """ + diff = abs(number1 - number2) + if diff: + for i in range(diff): + self.add(key, 1.0) + else: + self.add(key, 0.0) + + def add_priority( + self, + key: str, + value: Any, + options: list[Any] | tuple[Any, ...] | Any, + ): + """Adds a distance penalty that corresponds to the position at + which `value` appears in `options`. A distance penalty of 0.0 + for the first option, or 1.0 if there is no matching option. If + an option is a compiled regular expression, it will be + considered equal if it matches against `value`. + """ + if not isinstance(options, (list, tuple)): + options = [options] + unit = 1.0 / (len(options) or 1) + for i, opt in enumerate(options): + if self._eq(opt, value): + dist = i * unit + break + else: + dist = 1.0 + self.add(key, dist) + + def add_ratio( + self, + key: str, + number1: int | float, + number2: int | float, + ): + """Adds a distance penalty for `number1` as a ratio of `number2`. + `number1` is bound at 0 and `number2`. + """ + number = float(max(min(number1, number2), 0)) + if number2: + dist = number / number2 + else: + dist = 0.0 + self.add(key, dist) + + def add_string(self, key: str, str1: str | None, str2: str | None): + """Adds a distance penalty based on the edit distance between + `str1` and `str2`. + """ + dist = string_dist(str1, str2) + self.add(key, dist) + + def add_data_source(self, before: str | None, after: str | None) -> None: + if before != after and ( + before or len(metadata_plugins.find_metadata_source_plugins()) > 1 + ): + self.add("data_source", metadata_plugins.get_penalty(after)) + + +@cache +def get_track_length_grace() -> float: + """Get cached grace period for track length matching.""" + return config["match"]["track_length_grace"].as_number() + + +@cache +def get_track_length_max() -> float: + """Get cached maximum track length for track length matching.""" + return config["match"]["track_length_max"].as_number() + + +def track_index_changed(item: Item, track_info: TrackInfo) -> bool: + """Returns True if the item and track info index is different. Tolerates + per disc and per release numbering. + """ + return item.track not in (track_info.medium_index, track_info.index) + + +def track_distance( + item: Item, + track_info: TrackInfo, + incl_artist: bool = False, +) -> Distance: + """Determines the significance of a track metadata change. Returns a + Distance object. `incl_artist` indicates that a distance component should + be included for the track artist (i.e., for various-artist releases). + + ``track_length_grace`` and ``track_length_max`` configuration options are + cached because this function is called many times during the matching + process and their access comes with a performance overhead. + """ + dist = Distance() + + # Length. + if info_length := track_info.length: + diff = abs(item.length - info_length) - get_track_length_grace() + dist.add_ratio("track_length", diff, get_track_length_max()) + + # Title. + dist.add_string("track_title", item.title, track_info.title) + + # Artist. Only check if there is actually an artist in the track data. + if ( + incl_artist + and track_info.artist + and item.artist.lower() not in VA_ARTISTS + ): + dist.add_string("track_artist", item.artist, track_info.artist) + + # Track index. + if track_info.index and item.track: + dist.add_expr("track_index", track_index_changed(item, track_info)) + + # Track ID. + if item.mb_trackid: + dist.add_expr("track_id", item.mb_trackid != track_info.track_id) + + # Penalize mismatching disc numbers. + if track_info.medium and item.disc: + dist.add_expr("medium", item.disc != track_info.medium) + + dist.add_data_source(item.get("data_source"), track_info.data_source) + + return dist + + +def distance( + items: Sequence[Item], + album_info: AlbumInfo, + item_info_pairs: list[tuple[Item, TrackInfo]], +) -> Distance: + """Determines how "significant" an album metadata change would be. + Returns a Distance object. `album_info` is an AlbumInfo object + reflecting the album to be compared. `items` is a sequence of all + Item objects that will be matched (order is not important). + `mapping` is a dictionary mapping Items to TrackInfo objects; the + keys are a subset of `items` and the values are a subset of + `album_info.tracks`. + """ + likelies, _ = get_most_common_tags(items) + + dist = Distance() + + # Artist, if not various. + if not album_info.va: + dist.add_string("artist", likelies["artist"], album_info.artist) + + # Album. + dist.add_string("album", likelies["album"], album_info.album) + + preferred_config = config["match"]["preferred"] + # Current or preferred media. + if album_info.media: + # Preferred media options. + media_patterns: Sequence[str] = preferred_config["media"].as_str_seq() + options = [ + re.compile(rf"(\d+x)?({pat})", re.I) for pat in media_patterns + ] + if options: + dist.add_priority("media", album_info.media, options) + # Current media. + elif likelies["media"]: + dist.add_equality("media", album_info.media, likelies["media"]) + + # Mediums. + if likelies["disctotal"] and album_info.mediums: + dist.add_number("mediums", likelies["disctotal"], album_info.mediums) + + # Prefer earliest release. + if album_info.year and preferred_config["original_year"]: + # Assume 1889 (earliest first gramophone discs) if we don't know the + # original year. + original = album_info.original_year or 1889 + diff = abs(album_info.year - original) + diff_max = abs(datetime.date.today().year - original) + dist.add_ratio("year", diff, diff_max) + # Year. + elif likelies["year"] and album_info.year: + if likelies["year"] in (album_info.year, album_info.original_year): + # No penalty for matching release or original year. + dist.add("year", 0.0) + elif album_info.original_year: + # Prefer matchest closest to the release year. + diff = abs(likelies["year"] - album_info.year) + diff_max = abs( + datetime.date.today().year - album_info.original_year + ) + dist.add_ratio("year", diff, diff_max) + else: + # Full penalty when there is no original year. + dist.add("year", 1.0) + + # Preferred countries. + country_patterns: Sequence[str] = preferred_config["countries"].as_str_seq() + options = [re.compile(pat, re.I) for pat in country_patterns] + if album_info.country and options: + dist.add_priority("country", album_info.country, options) + # Country. + elif likelies["country"] and album_info.country: + dist.add_string("country", likelies["country"], album_info.country) + + # Label. + if likelies["label"] and album_info.label: + dist.add_string("label", likelies["label"], album_info.label) + + # Catalog number. + if likelies["catalognum"] and album_info.catalognum: + dist.add_string( + "catalognum", likelies["catalognum"], album_info.catalognum + ) + + # Disambiguation. + if likelies["albumdisambig"] and album_info.albumdisambig: + dist.add_string( + "albumdisambig", likelies["albumdisambig"], album_info.albumdisambig + ) + + # Album ID. + if likelies["mb_albumid"]: + dist.add_equality( + "album_id", likelies["mb_albumid"], album_info.album_id + ) + + # Tracks. + dist.tracks = {} + for item, track in item_info_pairs: + dist.tracks[track] = track_distance(item, track, album_info.va) + dist.add("tracks", dist.tracks[track].distance) + + # Missing tracks. + for _ in range(len(album_info.tracks) - len(item_info_pairs)): + dist.add("missing_tracks", 1.0) + + # Unmatched tracks. + for _ in range(len(items) - len(item_info_pairs)): + dist.add("unmatched_tracks", 1.0) + + dist.add_data_source(likelies["data_source"], album_info.data_source) + + return dist diff --git a/beets/autotag/hooks.py b/beets/autotag/hooks.py index 81cfd7bb2..82e685b7a 100644 --- a/beets/autotag/hooks.py +++ b/beets/autotag/hooks.py @@ -16,675 +16,246 @@ from __future__ import annotations -import re -from functools import total_ordering -from typing import TYPE_CHECKING, Any, Callable, NamedTuple, TypeVar, cast +from copy import deepcopy +from dataclasses import dataclass +from functools import cached_property +from typing import TYPE_CHECKING, Any, TypeVar -from jellyfish import levenshtein_distance -from unidecode import unidecode +from typing_extensions import Self -from beets import config, logging, plugins -from beets.autotag import mb -from beets.util import as_string, cached_classproperty +from beets.util import cached_classproperty if TYPE_CHECKING: - from collections.abc import Iterable, Iterator - from beets.library import Item -log = logging.getLogger("beets") + from .distance import Distance V = TypeVar("V") # Classes used to represent candidate options. class AttrDict(dict[str, V]): - """A dictionary that supports attribute ("dot") access, so `d.field` - is equivalent to `d['field']`. - """ + """Mapping enabling attribute-style access to stored metadata values.""" + + def copy(self) -> Self: + return deepcopy(self) def __getattr__(self, attr: str) -> V: if attr in self: return self[attr] - else: - raise AttributeError - def __setattr__(self, key: str, value: V): + raise AttributeError( + f"'{self.__class__.__name__}' object has no attribute '{attr}'" + ) + + def __setattr__(self, key: str, value: V) -> None: self.__setitem__(key, value) - def __hash__(self): + def __hash__(self) -> int: # type: ignore[override] return id(self) -class AlbumInfo(AttrDict): - """Describes a canonical release that may be used to match a release - in the library. Consists of these data members: +class Info(AttrDict[Any]): + """Container for metadata about a musical entity.""" - - ``album``: the release title - - ``album_id``: MusicBrainz ID; UUID fragment only - - ``artist``: name of the release's primary artist - - ``artist_id`` - - ``tracks``: list of TrackInfo objects making up the release + @cached_property + def name(self) -> str: + raise NotImplementedError - ``mediums`` along with the fields up through ``tracks`` are required. - The others are optional and may be None. + def __init__( + self, + album: str | None = None, + artist_credit: str | None = None, + artist_id: str | None = None, + artist: str | None = None, + artists_credit: list[str] | None = None, + artists_ids: list[str] | None = None, + artists: list[str] | None = None, + artist_sort: str | None = None, + artists_sort: list[str] | None = None, + data_source: str | None = None, + data_url: str | None = None, + genre: str | None = None, + media: str | None = None, + **kwargs, + ) -> None: + self.album = album + self.artist = artist + self.artist_credit = artist_credit + self.artist_id = artist_id + self.artists = artists or [] + self.artists_credit = artists_credit or [] + self.artists_ids = artists_ids or [] + self.artist_sort = artist_sort + self.artists_sort = artists_sort or [] + self.data_source = data_source + self.data_url = data_url + self.genre = genre + self.media = media + self.update(kwargs) + + +class AlbumInfo(Info): + """Metadata snapshot representing a single album candidate. + + Aggregates track entries and album-wide context gathered from an external + provider. Used during matching to evaluate similarity against a group of + user items, and later to drive tagging decisions once selected. """ - # TYPING: are all of these correct? I've assumed optional strings + @cached_property + def name(self) -> str: + return self.album or "" + def __init__( self, tracks: list[TrackInfo], - album: str | None = None, + *, album_id: str | None = None, - artist: str | None = None, - artist_id: str | None = None, - artists: list[str] | None = None, - artists_ids: list[str] | None = None, - asin: str | None = None, + albumdisambig: str | None = None, + albumstatus: str | None = None, albumtype: str | None = None, albumtypes: list[str] | None = None, + asin: str | None = None, + barcode: str | None = None, + catalognum: str | None = None, + country: str | None = None, + day: int | None = None, + discogs_albumid: str | None = None, + discogs_artistid: str | None = None, + discogs_labelid: str | None = None, + label: str | None = None, + language: str | None = None, + mediums: int | None = None, + month: int | None = None, + original_day: int | None = None, + original_month: int | None = None, + original_year: int | None = None, + release_group_title: str | None = None, + releasegroup_id: str | None = None, + releasegroupdisambig: str | None = None, + script: str | None = None, + style: str | None = None, va: bool = False, year: int | None = None, - month: int | None = None, - day: int | None = None, - label: str | None = None, - barcode: str | None = None, - mediums: int | None = None, - artist_sort: str | None = None, - artists_sort: list[str] | None = None, - releasegroup_id: str | None = None, - release_group_title: str | None = None, - catalognum: str | None = None, - script: str | None = None, - language: str | None = None, - country: str | None = None, - style: str | None = None, - genre: str | None = None, - albumstatus: str | None = None, - media: str | None = None, - albumdisambig: str | None = None, - releasegroupdisambig: str | None = None, - artist_credit: str | None = None, - artists_credit: list[str] | None = None, - original_year: int | None = None, - original_month: int | None = None, - original_day: int | None = None, - data_source: str | None = None, - data_url: str | None = None, - discogs_albumid: str | None = None, - discogs_labelid: str | None = None, - discogs_artistid: str | None = None, **kwargs, - ): - self.album = album - self.album_id = album_id - self.artist = artist - self.artist_id = artist_id - self.artists = artists or [] - self.artists_ids = artists_ids or [] + ) -> None: self.tracks = tracks - self.asin = asin + self.album_id = album_id + self.albumdisambig = albumdisambig + self.albumstatus = albumstatus self.albumtype = albumtype self.albumtypes = albumtypes or [] + self.asin = asin + self.barcode = barcode + self.catalognum = catalognum + self.country = country + self.day = day + self.discogs_albumid = discogs_albumid + self.discogs_artistid = discogs_artistid + self.discogs_labelid = discogs_labelid + self.label = label + self.language = language + self.mediums = mediums + self.month = month + self.original_day = original_day + self.original_month = original_month + self.original_year = original_year + self.release_group_title = release_group_title + self.releasegroup_id = releasegroup_id + self.releasegroupdisambig = releasegroupdisambig + self.script = script + self.style = style self.va = va self.year = year - self.month = month - self.day = day - self.label = label - self.barcode = barcode - self.mediums = mediums - self.artist_sort = artist_sort - self.artists_sort = artists_sort or [] - self.releasegroup_id = releasegroup_id - self.release_group_title = release_group_title - self.catalognum = catalognum - self.script = script - self.language = language - self.country = country - self.style = style - self.genre = genre - self.albumstatus = albumstatus - self.media = media - self.albumdisambig = albumdisambig - self.releasegroupdisambig = releasegroupdisambig - self.artist_credit = artist_credit - self.artists_credit = artists_credit or [] - self.original_year = original_year - self.original_month = original_month - self.original_day = original_day - self.data_source = data_source - self.data_url = data_url - self.discogs_albumid = discogs_albumid - self.discogs_labelid = discogs_labelid - self.discogs_artistid = discogs_artistid - self.update(kwargs) - - def copy(self) -> AlbumInfo: - dupe = AlbumInfo([]) - dupe.update(self) - dupe.tracks = [track.copy() for track in self.tracks] - return dupe + super().__init__(**kwargs) -class TrackInfo(AttrDict): - """Describes a canonical track present on a release. Appears as part - of an AlbumInfo's ``tracks`` list. Consists of these data members: +class TrackInfo(Info): + """Metadata snapshot for a single track candidate. - - ``title``: name of the track - - ``track_id``: MusicBrainz ID; UUID fragment only - - Only ``title`` and ``track_id`` are required. The rest of the fields - may be None. The indices ``index``, ``medium``, and ``medium_index`` - are all 1-based. + Captures identifying details and creative credits used to compare against + a user's item. Instances often originate within an AlbumInfo but may also + stand alone for singleton matching. """ - # TYPING: are all of these correct? I've assumed optional strings + @cached_property + def name(self) -> str: + return self.title or "" + def __init__( self, - title: str | None = None, - track_id: str | None = None, - release_track_id: str | None = None, - artist: str | None = None, - artist_id: str | None = None, - artists: list[str] | None = None, - artists_ids: list[str] | None = None, - length: float | None = None, + *, + arranger: str | None = None, + bpm: str | None = None, + composer: str | None = None, + composer_sort: str | None = None, + disctitle: str | None = None, index: int | None = None, + initial_key: str | None = None, + length: float | None = None, + lyricist: str | None = None, + mb_workid: str | None = None, medium: int | None = None, medium_index: int | None = None, medium_total: int | None = None, - artist_sort: str | None = None, - artists_sort: list[str] | None = None, - disctitle: str | None = None, - artist_credit: str | None = None, - artists_credit: list[str] | None = None, - data_source: str | None = None, - data_url: str | None = None, - media: str | None = None, - lyricist: str | None = None, - composer: str | None = None, - composer_sort: str | None = None, - arranger: str | None = None, + release_track_id: str | None = None, + title: str | None = None, track_alt: str | None = None, + track_id: str | None = None, work: str | None = None, - mb_workid: str | None = None, work_disambig: str | None = None, - bpm: str | None = None, - initial_key: str | None = None, - genre: str | None = None, - album: str | None = None, **kwargs, - ): - self.title = title - self.track_id = track_id - self.release_track_id = release_track_id - self.artist = artist - self.artist_id = artist_id - self.artists = artists or [] - self.artists_ids = artists_ids or [] - self.length = length + ) -> None: + self.arranger = arranger + self.bpm = bpm + self.composer = composer + self.composer_sort = composer_sort + self.disctitle = disctitle self.index = index - self.media = media + self.initial_key = initial_key + self.length = length + self.lyricist = lyricist + self.mb_workid = mb_workid self.medium = medium self.medium_index = medium_index self.medium_total = medium_total - self.artist_sort = artist_sort - self.artists_sort = artists_sort or [] - self.disctitle = disctitle - self.artist_credit = artist_credit - self.artists_credit = artists_credit or [] - self.data_source = data_source - self.data_url = data_url - self.lyricist = lyricist - self.composer = composer - self.composer_sort = composer_sort - self.arranger = arranger + self.release_track_id = release_track_id + self.title = title self.track_alt = track_alt + self.track_id = track_id self.work = work - self.mb_workid = mb_workid self.work_disambig = work_disambig - self.bpm = bpm - self.initial_key = initial_key - self.genre = genre - self.album = album - self.update(kwargs) - - def copy(self) -> TrackInfo: - dupe = TrackInfo() - dupe.update(self) - return dupe - - -# Candidate distance scoring. - -# Parameters for string distance function. -# Words that can be moved to the end of a string using a comma. -SD_END_WORDS = ["the", "a", "an"] -# Reduced weights for certain portions of the string. -SD_PATTERNS = [ - (r"^the ", 0.1), - (r"[\[\(]?(ep|single)[\]\)]?", 0.0), - (r"[\[\(]?(featuring|feat|ft)[\. :].+", 0.1), - (r"\(.*?\)", 0.3), - (r"\[.*?\]", 0.3), - (r"(, )?(pt\.|part) .+", 0.2), -] -# Replacements to use before testing distance. -SD_REPLACE = [ - (r"&", "and"), -] - - -def _string_dist_basic(str1: str, str2: str) -> float: - """Basic edit distance between two strings, ignoring - non-alphanumeric characters and case. Comparisons are based on a - transliteration/lowering to ASCII characters. Normalized by string - length. - """ - assert isinstance(str1, str) - assert isinstance(str2, str) - str1 = as_string(unidecode(str1)) - str2 = as_string(unidecode(str2)) - str1 = re.sub(r"[^a-z0-9]", "", str1.lower()) - str2 = re.sub(r"[^a-z0-9]", "", str2.lower()) - if not str1 and not str2: - return 0.0 - return levenshtein_distance(str1, str2) / float(max(len(str1), len(str2))) - - -def string_dist(str1: str | None, str2: str | None) -> float: - """Gives an "intuitive" edit distance between two strings. This is - an edit distance, normalized by the string length, with a number of - tweaks that reflect intuition about text. - """ - if str1 is None and str2 is None: - return 0.0 - if str1 is None or str2 is None: - return 1.0 - - str1 = str1.lower() - str2 = str2.lower() - - # Don't penalize strings that move certain words to the end. For - # example, "the something" should be considered equal to - # "something, the". - for word in SD_END_WORDS: - if str1.endswith(", %s" % word): - str1 = "{} {}".format(word, str1[: -len(word) - 2]) - if str2.endswith(", %s" % word): - str2 = "{} {}".format(word, str2[: -len(word) - 2]) - - # Perform a couple of basic normalizing substitutions. - for pat, repl in SD_REPLACE: - str1 = re.sub(pat, repl, str1) - str2 = re.sub(pat, repl, str2) - - # Change the weight for certain string portions matched by a set - # of regular expressions. We gradually change the strings and build - # up penalties associated with parts of the string that were - # deleted. - base_dist = _string_dist_basic(str1, str2) - penalty = 0.0 - for pat, weight in SD_PATTERNS: - # Get strings that drop the pattern. - case_str1 = re.sub(pat, "", str1) - case_str2 = re.sub(pat, "", str2) - - if case_str1 != str1 or case_str2 != str2: - # If the pattern was present (i.e., it is deleted in the - # the current case), recalculate the distances for the - # modified strings. - case_dist = _string_dist_basic(case_str1, case_str2) - case_delta = max(0.0, base_dist - case_dist) - if case_delta == 0.0: - continue - - # Shift our baseline strings down (to avoid rematching the - # same part of the string) and add a scaled distance - # amount to the penalties. - str1 = case_str1 - str2 = case_str2 - base_dist = case_dist - penalty += weight * case_delta - - return base_dist + penalty - - -@total_ordering -class Distance: - """Keeps track of multiple distance penalties. Provides a single - weighted distance for all penalties as well as a weighted distance - for each individual penalty. - """ - - def __init__(self): - self._penalties = {} - self.tracks: dict[TrackInfo, Distance] = {} - - @cached_classproperty - def _weights(cls) -> dict[str, float]: - """A dictionary from keys to floating-point weights.""" - weights_view = config["match"]["distance_weights"] - weights = {} - for key in weights_view.keys(): - weights[key] = weights_view[key].as_number() - return weights - - # Access the components and their aggregates. - - @property - def distance(self) -> float: - """Return a weighted and normalized distance across all - penalties. - """ - dist_max = self.max_distance - if dist_max: - return self.raw_distance / self.max_distance - return 0.0 - - @property - def max_distance(self) -> float: - """Return the maximum distance penalty (normalization factor).""" - dist_max = 0.0 - for key, penalty in self._penalties.items(): - dist_max += len(penalty) * self._weights[key] - return dist_max - - @property - def raw_distance(self) -> float: - """Return the raw (denormalized) distance.""" - dist_raw = 0.0 - for key, penalty in self._penalties.items(): - dist_raw += sum(penalty) * self._weights[key] - return dist_raw - - def items(self) -> list[tuple[str, float]]: - """Return a list of (key, dist) pairs, with `dist` being the - weighted distance, sorted from highest to lowest. Does not - include penalties with a zero value. - """ - list_ = [] - for key in self._penalties: - dist = self[key] - if dist: - list_.append((key, dist)) - # Convert distance into a negative float we can sort items in - # ascending order (for keys, when the penalty is equal) and - # still get the items with the biggest distance first. - return sorted( - list_, key=lambda key_and_dist: (-key_and_dist[1], key_and_dist[0]) - ) - - def __hash__(self) -> int: - return id(self) - - def __eq__(self, other) -> bool: - return self.distance == other - - # Behave like a float. - - def __lt__(self, other) -> bool: - return self.distance < other - - def __float__(self) -> float: - return self.distance - - def __sub__(self, other) -> float: - return self.distance - other - - def __rsub__(self, other) -> float: - return other - self.distance - - def __str__(self) -> str: - return f"{self.distance:.2f}" - - # Behave like a dict. - - def __getitem__(self, key) -> float: - """Returns the weighted distance for a named penalty.""" - dist = sum(self._penalties[key]) * self._weights[key] - dist_max = self.max_distance - if dist_max: - return dist / dist_max - return 0.0 - - def __iter__(self) -> Iterator[tuple[str, float]]: - return iter(self.items()) - - def __len__(self) -> int: - return len(self.items()) - - def keys(self) -> list[str]: - return [key for key, _ in self.items()] - - def update(self, dist: Distance): - """Adds all the distance penalties from `dist`.""" - if not isinstance(dist, Distance): - raise ValueError( - "`dist` must be a Distance object, not {}".format(type(dist)) - ) - for key, penalties in dist._penalties.items(): - self._penalties.setdefault(key, []).extend(penalties) - - # Adding components. - - def _eq(self, value1: re.Pattern[str] | Any, value2: Any) -> bool: - """Returns True if `value1` is equal to `value2`. `value1` may - be a compiled regular expression, in which case it will be - matched against `value2`. - """ - if isinstance(value1, re.Pattern): - value2 = cast(str, value2) - return bool(value1.match(value2)) - return value1 == value2 - - def add(self, key: str, dist: float): - """Adds a distance penalty. `key` must correspond with a - configured weight setting. `dist` must be a float between 0.0 - and 1.0, and will be added to any existing distance penalties - for the same key. - """ - if not 0.0 <= dist <= 1.0: - raise ValueError(f"`dist` must be between 0.0 and 1.0, not {dist}") - self._penalties.setdefault(key, []).append(dist) - - def add_equality( - self, - key: str, - value: Any, - options: list[Any] | tuple[Any, ...] | Any, - ): - """Adds a distance penalty of 1.0 if `value` doesn't match any - of the values in `options`. If an option is a compiled regular - expression, it will be considered equal if it matches against - `value`. - """ - if not isinstance(options, (list, tuple)): - options = [options] - for opt in options: - if self._eq(opt, value): - dist = 0.0 - break - else: - dist = 1.0 - self.add(key, dist) - - def add_expr(self, key: str, expr: bool): - """Adds a distance penalty of 1.0 if `expr` evaluates to True, - or 0.0. - """ - if expr: - self.add(key, 1.0) - else: - self.add(key, 0.0) - - def add_number(self, key: str, number1: int, number2: int): - """Adds a distance penalty of 1.0 for each number of difference - between `number1` and `number2`, or 0.0 when there is no - difference. Use this when there is no upper limit on the - difference between the two numbers. - """ - diff = abs(number1 - number2) - if diff: - for i in range(diff): - self.add(key, 1.0) - else: - self.add(key, 0.0) - - def add_priority( - self, - key: str, - value: Any, - options: list[Any] | tuple[Any, ...] | Any, - ): - """Adds a distance penalty that corresponds to the position at - which `value` appears in `options`. A distance penalty of 0.0 - for the first option, or 1.0 if there is no matching option. If - an option is a compiled regular expression, it will be - considered equal if it matches against `value`. - """ - if not isinstance(options, (list, tuple)): - options = [options] - unit = 1.0 / (len(options) or 1) - for i, opt in enumerate(options): - if self._eq(opt, value): - dist = i * unit - break - else: - dist = 1.0 - self.add(key, dist) - - def add_ratio( - self, - key: str, - number1: int | float, - number2: int | float, - ): - """Adds a distance penalty for `number1` as a ratio of `number2`. - `number1` is bound at 0 and `number2`. - """ - number = float(max(min(number1, number2), 0)) - if number2: - dist = number / number2 - else: - dist = 0.0 - self.add(key, dist) - - def add_string(self, key: str, str1: str | None, str2: str | None): - """Adds a distance penalty based on the edit distance between - `str1` and `str2`. - """ - dist = string_dist(str1, str2) - self.add(key, dist) + super().__init__(**kwargs) # Structures that compose all the information for a candidate match. - - -class AlbumMatch(NamedTuple): +@dataclass +class Match: distance: Distance + info: Info + + @cached_classproperty + def type(cls) -> str: + return cls.__name__.removesuffix("Match") # type: ignore[attr-defined] + + +@dataclass +class AlbumMatch(Match): info: AlbumInfo mapping: dict[Item, TrackInfo] extra_items: list[Item] extra_tracks: list[TrackInfo] + @property + def item_info_pairs(self) -> list[tuple[Item, TrackInfo]]: + return list(self.mapping.items()) -class TrackMatch(NamedTuple): - distance: Distance + @property + def items(self) -> list[Item]: + return [i for i, _ in self.item_info_pairs] + + +@dataclass +class TrackMatch(Match): info: TrackInfo - - -# Aggregation of sources. - - -def album_for_mbid(release_id: str) -> AlbumInfo | None: - """Get an AlbumInfo object for a MusicBrainz release ID. Return None - if the ID is not found. - """ - try: - if album := mb.album_for_id(release_id): - plugins.send("albuminfo_received", info=album) - return album - except mb.MusicBrainzAPIError as exc: - exc.log(log) - return None - - -def track_for_mbid(recording_id: str) -> TrackInfo | None: - """Get a TrackInfo object for a MusicBrainz recording ID. Return None - if the ID is not found. - """ - try: - if track := mb.track_for_id(recording_id): - plugins.send("trackinfo_received", info=track) - return track - except mb.MusicBrainzAPIError as exc: - exc.log(log) - return None - - -def album_for_id(_id: str) -> AlbumInfo | None: - """Get AlbumInfo object for the given ID string.""" - return album_for_mbid(_id) or plugins.album_for_id(_id) - - -def track_for_id(_id: str) -> TrackInfo | None: - """Get TrackInfo object for the given ID string.""" - return track_for_mbid(_id) or plugins.track_for_id(_id) - - -def invoke_mb(call_func: Callable, *args): - try: - return call_func(*args) - except mb.MusicBrainzAPIError as exc: - exc.log(log) - return () - - -@plugins.notify_info_yielded("albuminfo_received") -def album_candidates( - items: list[Item], - artist: str, - album: str, - va_likely: bool, - extra_tags: dict, -) -> Iterable[tuple]: - """Search for album matches. ``items`` is a list of Item objects - that make up the album. ``artist`` and ``album`` are the respective - names (strings), which may be derived from the item list or may be - entered by the user. ``va_likely`` is a boolean indicating whether - the album is likely to be a "various artists" release. ``extra_tags`` - is an optional dictionary of additional tags used to further - constrain the search. - """ - - if config["musicbrainz"]["enabled"]: - # Base candidates if we have album and artist to match. - if artist and album: - yield from invoke_mb( - mb.match_album, artist, album, len(items), extra_tags - ) - - # Also add VA matches from MusicBrainz where appropriate. - if va_likely and album: - yield from invoke_mb( - mb.match_album, None, album, len(items), extra_tags - ) - - # Candidates from plugins. - yield from plugins.candidates(items, artist, album, va_likely, extra_tags) - - -@plugins.notify_info_yielded("trackinfo_received") -def item_candidates(item: Item, artist: str, title: str) -> Iterable[tuple]: - """Search for item matches. ``item`` is the Item to be matched. - ``artist`` and ``title`` are strings and either reflect the item or - are specified by the user. - """ - - # MusicBrainz candidates. - if config["musicbrainz"]["enabled"] and artist and title: - yield from invoke_mb(mb.match_track, artist, title) - - # Plugin candidates. - yield from plugins.item_candidates(item, artist, title) diff --git a/beets/autotag/match.py b/beets/autotag/match.py index bc30ccea2..8adbaeda1 100644 --- a/beets/autotag/match.py +++ b/beets/autotag/match.py @@ -18,35 +18,22 @@ releases and tracks. from __future__ import annotations -import datetime -import re -from collections.abc import Iterable, Sequence from enum import IntEnum -from functools import cache -from typing import TYPE_CHECKING, Any, NamedTuple, TypeVar, cast +from typing import TYPE_CHECKING, Any, NamedTuple, TypeVar import lap import numpy as np -from beets import config, logging, plugins -from beets.autotag import ( - AlbumInfo, - AlbumMatch, - Distance, - TrackInfo, - TrackMatch, - hooks, -) -from beets.util import plurality +from beets import config, logging, metadata_plugins, plugins +from beets.autotag import AlbumInfo, AlbumMatch, TrackInfo, TrackMatch, hooks +from beets.util import get_most_common_tags + +from .distance import VA_ARTISTS, distance, track_distance if TYPE_CHECKING: - from beets.library import Item + from collections.abc import Iterable, Sequence -# Artist signals that indicate "various artists". These are used at the -# album level to determine whether a given release is likely a VA -# release and also on the track level to to remove the penalty for -# differing artists. -VA_ARTISTS = ("", "various artists", "various", "va", "unknown") + from beets.library import Item # Global logger. log = logging.getLogger("beets") @@ -79,48 +66,10 @@ class Proposal(NamedTuple): # Primary matching functionality. -def current_metadata( - items: Iterable[Item], -) -> tuple[dict[str, Any], dict[str, Any]]: - """Extract the likely current metadata for an album given a list of its - items. Return two dictionaries: - - The most common value for each field. - - Whether each field's value was unanimous (values are booleans). - """ - assert items # Must be nonempty. - - likelies = {} - consensus = {} - fields = [ - "artist", - "album", - "albumartist", - "year", - "disctotal", - "mb_albumid", - "label", - "barcode", - "catalognum", - "country", - "media", - "albumdisambig", - ] - for field in fields: - values = [item[field] for item in items if item] - likelies[field], freq = plurality(values) - consensus[field] = freq == len(values) - - # If there's an album artist consensus, use this for the artist. - if consensus["albumartist"] and likelies["albumartist"]: - likelies["artist"] = likelies["albumartist"] - - return likelies, consensus - - def assign_items( items: Sequence[Item], tracks: Sequence[TrackInfo], -) -> tuple[dict[Item, TrackInfo], list[Item], list[TrackInfo]]: +) -> tuple[list[tuple[Item, TrackInfo]], list[Item], list[TrackInfo]]: """Given a list of Items and a list of TrackInfo objects, find the best mapping between them. Returns a mapping from Items to TrackInfo objects, a set of extra Items, and a set of extra TrackInfo @@ -146,195 +95,11 @@ def assign_items( extra_items.sort(key=lambda i: (i.disc, i.track, i.title)) extra_tracks = list(set(tracks) - set(mapping.values())) extra_tracks.sort(key=lambda t: (t.index, t.title)) - return mapping, extra_items, extra_tracks + return list(mapping.items()), extra_items, extra_tracks -def track_index_changed(item: Item, track_info: TrackInfo) -> bool: - """Returns True if the item and track info index is different. Tolerates - per disc and per release numbering. - """ - return item.track not in (track_info.medium_index, track_info.index) - - -@cache -def get_track_length_grace() -> float: - """Get cached grace period for track length matching.""" - return config["match"]["track_length_grace"].as_number() - - -@cache -def get_track_length_max() -> float: - """Get cached maximum track length for track length matching.""" - return config["match"]["track_length_max"].as_number() - - -def track_distance( - item: Item, - track_info: TrackInfo, - incl_artist: bool = False, -) -> Distance: - """Determines the significance of a track metadata change. Returns a - Distance object. `incl_artist` indicates that a distance component should - be included for the track artist (i.e., for various-artist releases). - - ``track_length_grace`` and ``track_length_max`` configuration options are - cached because this function is called many times during the matching - process and their access comes with a performance overhead. - """ - dist = hooks.Distance() - - # Length. - if info_length := track_info.length: - diff = abs(item.length - info_length) - get_track_length_grace() - dist.add_ratio("track_length", diff, get_track_length_max()) - - # Title. - dist.add_string("track_title", item.title, track_info.title) - - # Artist. Only check if there is actually an artist in the track data. - if ( - incl_artist - and track_info.artist - and item.artist.lower() not in VA_ARTISTS - ): - dist.add_string("track_artist", item.artist, track_info.artist) - - # Track index. - if track_info.index and item.track: - dist.add_expr("track_index", track_index_changed(item, track_info)) - - # Track ID. - if item.mb_trackid: - dist.add_expr("track_id", item.mb_trackid != track_info.track_id) - - # Penalize mismatching disc numbers. - if track_info.medium and item.disc: - dist.add_expr("medium", item.disc != track_info.medium) - - # Plugins. - dist.update(plugins.track_distance(item, track_info)) - - return dist - - -def distance( - items: Sequence[Item], - album_info: AlbumInfo, - mapping: dict[Item, TrackInfo], -) -> Distance: - """Determines how "significant" an album metadata change would be. - Returns a Distance object. `album_info` is an AlbumInfo object - reflecting the album to be compared. `items` is a sequence of all - Item objects that will be matched (order is not important). - `mapping` is a dictionary mapping Items to TrackInfo objects; the - keys are a subset of `items` and the values are a subset of - `album_info.tracks`. - """ - likelies, _ = current_metadata(items) - - dist = hooks.Distance() - - # Artist, if not various. - if not album_info.va: - dist.add_string("artist", likelies["artist"], album_info.artist) - - # Album. - dist.add_string("album", likelies["album"], album_info.album) - - # Current or preferred media. - if album_info.media: - # Preferred media options. - patterns = config["match"]["preferred"]["media"].as_str_seq() - patterns = cast(Sequence[str], patterns) - options = [re.compile(r"(\d+x)?(%s)" % pat, re.I) for pat in patterns] - if options: - dist.add_priority("media", album_info.media, options) - # Current media. - elif likelies["media"]: - dist.add_equality("media", album_info.media, likelies["media"]) - - # Mediums. - if likelies["disctotal"] and album_info.mediums: - dist.add_number("mediums", likelies["disctotal"], album_info.mediums) - - # Prefer earliest release. - if album_info.year and config["match"]["preferred"]["original_year"]: - # Assume 1889 (earliest first gramophone discs) if we don't know the - # original year. - original = album_info.original_year or 1889 - diff = abs(album_info.year - original) - diff_max = abs(datetime.date.today().year - original) - dist.add_ratio("year", diff, diff_max) - # Year. - elif likelies["year"] and album_info.year: - if likelies["year"] in (album_info.year, album_info.original_year): - # No penalty for matching release or original year. - dist.add("year", 0.0) - elif album_info.original_year: - # Prefer matchest closest to the release year. - diff = abs(likelies["year"] - album_info.year) - diff_max = abs( - datetime.date.today().year - album_info.original_year - ) - dist.add_ratio("year", diff, diff_max) - else: - # Full penalty when there is no original year. - dist.add("year", 1.0) - - # Preferred countries. - patterns = config["match"]["preferred"]["countries"].as_str_seq() - patterns = cast(Sequence[str], patterns) - options = [re.compile(pat, re.I) for pat in patterns] - if album_info.country and options: - dist.add_priority("country", album_info.country, options) - # Country. - elif likelies["country"] and album_info.country: - dist.add_string("country", likelies["country"], album_info.country) - - # Label. - if likelies["label"] and album_info.label: - dist.add_string("label", likelies["label"], album_info.label) - - # Catalog number. - if likelies["catalognum"] and album_info.catalognum: - dist.add_string( - "catalognum", likelies["catalognum"], album_info.catalognum - ) - - # Disambiguation. - if likelies["albumdisambig"] and album_info.albumdisambig: - dist.add_string( - "albumdisambig", likelies["albumdisambig"], album_info.albumdisambig - ) - - # Album ID. - if likelies["mb_albumid"]: - dist.add_equality( - "album_id", likelies["mb_albumid"], album_info.album_id - ) - - # Tracks. - dist.tracks = {} - for item, track in mapping.items(): - dist.tracks[track] = track_distance(item, track, album_info.va) - dist.add("tracks", dist.tracks[track].distance) - - # Missing tracks. - for _ in range(len(album_info.tracks) - len(mapping)): - dist.add("missing_tracks", 1.0) - - # Unmatched tracks. - for _ in range(len(items) - len(mapping)): - dist.add("unmatched_tracks", 1.0) - - # Plugins. - dist.update(plugins.album_distance(items, album_info, mapping)) - - return dist - - -def match_by_id(items: Iterable[Item]): - """If the items are tagged with a MusicBrainz album ID, returns an +def match_by_id(items: Iterable[Item]) -> AlbumInfo | None: + """If the items are tagged with an external source ID, return an AlbumInfo object for the corresponding album. Otherwise, returns None. """ @@ -353,8 +118,8 @@ def match_by_id(items: Iterable[Item]): log.debug("No album ID consensus.") return None # If all album IDs are equal, look up the album. - log.debug("Searching for discovered album ID: {0}", first) - return hooks.album_for_mbid(first) + log.debug("Searching for discovered album ID: {}", first) + return metadata_plugins.album_for_id(first) def _recommendation( @@ -432,9 +197,7 @@ def _add_candidate( checking the track count, ordering the items, checking for duplicates, and calculating the distance. """ - log.debug( - "Candidate: {0} - {1} ({2})", info.artist, info.album, info.album_id - ) + log.debug("Candidate: {0.artist} - {0.album} ({0.album_id})", info) # Discard albums with zero tracks. if not info.tracks: @@ -447,37 +210,38 @@ def _add_candidate( return # Discard matches without required tags. - for req_tag in cast( - Sequence[str], config["match"]["required"].as_str_seq() - ): + required_tags: Sequence[str] = config["match"]["required"].as_str_seq() + for req_tag in required_tags: if getattr(info, req_tag) is None: - log.debug("Ignored. Missing required tag: {0}", req_tag) + log.debug("Ignored. Missing required tag: {}", req_tag) return # Find mapping between the items and the track info. - mapping, extra_items, extra_tracks = assign_items(items, info.tracks) + item_info_pairs, extra_items, extra_tracks = assign_items( + items, info.tracks + ) # Get the change distance. - dist = distance(items, info, mapping) + dist = distance(items, info, item_info_pairs) # Skip matches with ignored penalties. penalties = [key for key, _ in dist] - ignored = cast(Sequence[str], config["match"]["ignored"].as_str_seq()) - for penalty in ignored: + ignored_tags: Sequence[str] = config["match"]["ignored"].as_str_seq() + for penalty in ignored_tags: if penalty in penalties: - log.debug("Ignored. Penalty: {0}", penalty) + log.debug("Ignored. Penalty: {}", penalty) return - log.debug("Success. Distance: {0}", dist) + log.debug("Success. Distance: {}", dist) results[info.album_id] = hooks.AlbumMatch( - dist, info, mapping, extra_items, extra_tracks + dist, info, dict(item_info_pairs), extra_items, extra_tracks ) def tag_album( items, search_artist: str | None = None, - search_album: str | None = None, + search_name: str | None = None, search_ids: list[str] = [], ) -> tuple[str, str, Proposal]: """Return a tuple of the current artist name, the current album @@ -498,10 +262,10 @@ def tag_album( candidates. """ # Get current metadata. - likelies, consensus = current_metadata(items) - cur_artist = cast(str, likelies["artist"]) - cur_album = cast(str, likelies["album"]) - log.debug("Tagging {0} - {1}", cur_artist, cur_album) + likelies, consensus = get_most_common_tags(items) + cur_artist: str = likelies["artist"] + cur_album: str = likelies["album"] + log.debug("Tagging {} - {}", cur_artist, cur_album) # The output result, keys are the MB album ID. candidates: dict[Any, AlbumMatch] = {} @@ -509,18 +273,22 @@ def tag_album( # Search by explicit ID. if search_ids: for search_id in search_ids: - log.debug("Searching for album ID: {0}", search_id) - if info := hooks.album_for_id(search_id): + log.debug("Searching for album ID: {}", search_id) + if info := metadata_plugins.album_for_id(search_id): _add_candidate(items, candidates, info) + if opt_candidate := candidates.get(info.album_id): + plugins.send("album_matched", match=opt_candidate) # Use existing metadata or text search. else: # Try search based on current ID. - id_info = match_by_id(items) - if id_info: - _add_candidate(items, candidates, id_info) + if info := match_by_id(items): + _add_candidate(items, candidates, info) + for candidate in candidates.values(): + plugins.send("album_matched", match=candidate) + rec = _recommendation(list(candidates.values())) - log.debug("Album ID match recommendation is {0}", rec) + log.debug("Album ID match recommendation is {}", rec) if candidates and not config["import"]["timid"]: # If we have a very good MBID match, return immediately. # Otherwise, this match will compete against metadata-based @@ -534,16 +302,10 @@ def tag_album( ) # Search terms. - if not (search_artist and search_album): + if not (search_artist and search_name): # No explicit search terms -- use current metadata. - search_artist, search_album = cur_artist, cur_album - log.debug("Search terms: {0} - {1}", search_artist, search_album) - - extra_tags = None - if config["musicbrainz"]["extra_tags"]: - tag_list = config["musicbrainz"]["extra_tags"].get() - extra_tags = {k: v for (k, v) in likelies.items() if k in tag_list} - log.debug("Additional search terms: {0}", extra_tags) + search_artist, search_name = cur_artist, cur_album + log.debug("Search terms: {} - {}", search_artist, search_name) # Is this album likely to be a "various artist" release? va_likely = ( @@ -551,15 +313,17 @@ def tag_album( or (search_artist.lower() in VA_ARTISTS) or any(item.comp for item in items) ) - log.debug("Album might be VA: {0}", va_likely) + log.debug("Album might be VA: {}", va_likely) # Get the results from the data sources. - for matched_candidate in hooks.album_candidates( - items, search_artist, search_album, va_likely, extra_tags + for matched_candidate in metadata_plugins.candidates( + items, search_artist, search_name, va_likely ): _add_candidate(items, candidates, matched_candidate) + if opt_candidate := candidates.get(matched_candidate.album_id): + plugins.send("album_matched", match=opt_candidate) - log.debug("Evaluating {0} candidates.", len(candidates)) + log.debug("Evaluating {} candidates.", len(candidates)) # Sort and get the recommendation. candidates_sorted = _sort_candidates(candidates.values()) rec = _recommendation(candidates_sorted) @@ -569,28 +333,27 @@ def tag_album( def tag_item( item, search_artist: str | None = None, - search_title: str | None = None, + search_name: str | None = None, search_ids: list[str] | None = None, ) -> Proposal: """Find metadata for a single track. Return a `Proposal` consisting of `TrackMatch` objects. - `search_artist` and `search_title` may be used - to override the current metadata for the purposes of the MusicBrainz - title. `search_ids` may be used for restricting the search to a list - of metadata backend IDs. + `search_artist` and `search_title` may be used to override the item + metadata in the search query. `search_ids` may be used for restricting the + search to a list of metadata backend IDs. """ # Holds candidates found so far: keys are MBIDs; values are # (distance, TrackInfo) pairs. candidates = {} rec: Recommendation | None = None - # First, try matching by MusicBrainz ID. + # First, try matching by the external source ID. trackids = search_ids or [t for t in [item.mb_trackid] if t] if trackids: for trackid in trackids: - log.debug("Searching for track ID: {0}", trackid) - if info := hooks.track_for_id(trackid): + log.debug("Searching for track ID: {}", trackid) + if info := metadata_plugins.track_for_id(trackid): dist = track_distance(item, info, incl_artist=True) candidates[info.track_id] = hooks.TrackMatch(dist, info) # If this is a good match, then don't keep searching. @@ -611,17 +374,19 @@ def tag_item( return Proposal([], Recommendation.none) # Search terms. - if not (search_artist and search_title): - search_artist, search_title = item.artist, item.title - log.debug("Item search terms: {0} - {1}", search_artist, search_title) + search_artist = search_artist or item.artist + search_name = search_name or item.title + log.debug("Item search terms: {} - {}", search_artist, search_name) # Get and evaluate candidate metadata. - for track_info in hooks.item_candidates(item, search_artist, search_title): + for track_info in metadata_plugins.item_candidates( + item, search_artist, search_name + ): dist = track_distance(item, track_info, incl_artist=True) candidates[track_info.track_id] = hooks.TrackMatch(dist, track_info) # Sort by distance and return with recommendation. - log.debug("Found {0} candidates.", len(candidates)) + log.debug("Found {} candidates.", len(candidates)) candidates_sorted = _sort_candidates(candidates.values()) rec = _recommendation(candidates_sorted) return Proposal(candidates_sorted, rec) diff --git a/beets/autotag/mb.py b/beets/autotag/mb.py deleted file mode 100644 index 6c2b604cd..000000000 --- a/beets/autotag/mb.py +++ /dev/null @@ -1,884 +0,0 @@ -# This file is part of beets. -# Copyright 2016, Adrian Sampson. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. - -"""Searches for albums in the MusicBrainz database.""" - -from __future__ import annotations - -import re -import traceback -from collections import Counter -from collections.abc import Iterator, Sequence -from itertools import product -from typing import Any, cast -from urllib.parse import urljoin - -import musicbrainzngs - -import beets -import beets.autotag.hooks -from beets import config, logging, plugins, util -from beets.plugins import MetadataSourcePlugin -from beets.util.id_extractors import ( - beatport_id_regex, - deezer_id_regex, - extract_discogs_id_regex, - spotify_id_regex, -) - -VARIOUS_ARTISTS_ID = "89ad4ac3-39f7-470e-963a-56509c546377" - -BASE_URL = "https://musicbrainz.org/" - -SKIPPED_TRACKS = ["[data track]"] - -FIELDS_TO_MB_KEYS = { - "catalognum": "catno", - "country": "country", - "label": "label", - "barcode": "barcode", - "media": "format", - "year": "date", -} - -musicbrainzngs.set_useragent("beets", beets.__version__, "https://beets.io/") - - -class MusicBrainzAPIError(util.HumanReadableError): - """An error while talking to MusicBrainz. The `query` field is the - parameter to the action and may have any type. - """ - - def __init__(self, reason, verb, query, tb=None): - self.query = query - if isinstance(reason, musicbrainzngs.WebServiceError): - reason = "MusicBrainz not reachable" - super().__init__(reason, verb, tb) - - def get_message(self): - return "{} in {} with query {}".format( - self._reasonstr(), self.verb, repr(self.query) - ) - - -log = logging.getLogger("beets") - -RELEASE_INCLUDES = list( - { - "artists", - "media", - "recordings", - "release-groups", - "labels", - "artist-credits", - "aliases", - "recording-level-rels", - "work-rels", - "work-level-rels", - "artist-rels", - "isrcs", - "url-rels", - "release-rels", - "tags", - } - & set(musicbrainzngs.VALID_INCLUDES["release"]) -) - -TRACK_INCLUDES = list( - { - "artists", - "aliases", - "isrcs", - "work-level-rels", - "artist-rels", - } - & set(musicbrainzngs.VALID_INCLUDES["recording"]) -) - -BROWSE_INCLUDES = [ - "artist-credits", - "work-rels", - "artist-rels", - "recording-rels", - "release-rels", -] -if "work-level-rels" in musicbrainzngs.VALID_BROWSE_INCLUDES["recording"]: - BROWSE_INCLUDES.append("work-level-rels") -BROWSE_CHUNKSIZE = 100 -BROWSE_MAXTRACKS = 500 - - -def track_url(trackid: str) -> str: - return urljoin(BASE_URL, "recording/" + trackid) - - -def album_url(albumid: str) -> str: - return urljoin(BASE_URL, "release/" + albumid) - - -def configure(): - """Set up the python-musicbrainz-ngs module according to settings - from the beets configuration. This should be called at startup. - """ - hostname = config["musicbrainz"]["host"].as_str() - https = config["musicbrainz"]["https"].get(bool) - # Only call set_hostname when a custom server is configured. Since - # musicbrainz-ngs connects to musicbrainz.org with HTTPS by default - if hostname != "musicbrainz.org": - musicbrainzngs.set_hostname(hostname, https) - musicbrainzngs.set_rate_limit( - config["musicbrainz"]["ratelimit_interval"].as_number(), - config["musicbrainz"]["ratelimit"].get(int), - ) - - -def _preferred_alias(aliases: list): - """Given an list of alias structures for an artist credit, select - and return the user's preferred alias alias or None if no matching - alias is found. - """ - if not aliases: - return - - # Only consider aliases that have locales set. - aliases = [a for a in aliases if "locale" in a] - - # Get any ignored alias types and lower case them to prevent case issues - ignored_alias_types = config["import"]["ignored_alias_types"].as_str_seq() - ignored_alias_types = [a.lower() for a in ignored_alias_types] - - # Search configured locales in order. - for locale in config["import"]["languages"].as_str_seq(): - # Find matching primary aliases for this locale that are not - # being ignored - matches = [] - for a in aliases: - if ( - a["locale"] == locale - and "primary" in a - and a.get("type", "").lower() not in ignored_alias_types - ): - matches.append(a) - - # Skip to the next locale if we have no matches - if not matches: - continue - - return matches[0] - - -def _preferred_release_event(release: dict[str, Any]) -> tuple[str, str]: - """Given a release, select and return the user's preferred release - event as a tuple of (country, release_date). Fall back to the - default release event if a preferred event is not found. - """ - countries = config["match"]["preferred"]["countries"].as_str_seq() - countries = cast(Sequence, countries) - - for country in countries: - for event in release.get("release-event-list", {}): - try: - if country in event["area"]["iso-3166-1-code-list"]: - return country, event["date"] - except KeyError: - pass - - return (cast(str, release.get("country")), cast(str, release.get("date"))) - - -def _multi_artist_credit( - credit: list[dict], include_join_phrase: bool -) -> tuple[list[str], list[str], list[str]]: - """Given a list representing an ``artist-credit`` block, accumulate - data into a triple of joined artist name lists: canonical, sort, and - credit. - """ - artist_parts = [] - artist_sort_parts = [] - artist_credit_parts = [] - for el in credit: - if isinstance(el, str): - # Join phrase. - if include_join_phrase: - artist_parts.append(el) - artist_credit_parts.append(el) - artist_sort_parts.append(el) - - else: - alias = _preferred_alias(el["artist"].get("alias-list", ())) - - # An artist. - if alias: - cur_artist_name = alias["alias"] - else: - cur_artist_name = el["artist"]["name"] - artist_parts.append(cur_artist_name) - - # Artist sort name. - if alias: - artist_sort_parts.append(alias["sort-name"]) - elif "sort-name" in el["artist"]: - artist_sort_parts.append(el["artist"]["sort-name"]) - else: - artist_sort_parts.append(cur_artist_name) - - # Artist credit. - if "name" in el: - artist_credit_parts.append(el["name"]) - else: - artist_credit_parts.append(cur_artist_name) - - return ( - artist_parts, - artist_sort_parts, - artist_credit_parts, - ) - - -def _flatten_artist_credit(credit: list[dict]) -> tuple[str, str, str]: - """Given a list representing an ``artist-credit`` block, flatten the - data into a triple of joined artist name strings: canonical, sort, and - credit. - """ - artist_parts, artist_sort_parts, artist_credit_parts = _multi_artist_credit( - credit, include_join_phrase=True - ) - return ( - "".join(artist_parts), - "".join(artist_sort_parts), - "".join(artist_credit_parts), - ) - - -def _artist_ids(credit: list[dict]) -> list[str]: - """ - Given a list representing an ``artist-credit``, - return a list of artist IDs - """ - artist_ids: list[str] = [] - for el in credit: - if isinstance(el, dict): - artist_ids.append(el["artist"]["id"]) - - return artist_ids - - -def _get_related_artist_names(relations, relation_type): - """Given a list representing the artist relationships extract the names of - the remixers and concatenate them. - """ - related_artists = [] - - for relation in relations: - if relation["type"] == relation_type: - related_artists.append(relation["artist"]["name"]) - - return ", ".join(related_artists) - - -def track_info( - recording: dict, - index: int | None = None, - medium: int | None = None, - medium_index: int | None = None, - medium_total: int | None = None, -) -> beets.autotag.hooks.TrackInfo: - """Translates a MusicBrainz recording result dictionary into a beets - ``TrackInfo`` object. Three parameters are optional and are used - only for tracks that appear on releases (non-singletons): ``index``, - the overall track number; ``medium``, the disc number; - ``medium_index``, the track's index on its medium; ``medium_total``, - the number of tracks on the medium. Each number is a 1-based index. - """ - info = beets.autotag.hooks.TrackInfo( - title=recording["title"], - track_id=recording["id"], - index=index, - medium=medium, - medium_index=medium_index, - medium_total=medium_total, - data_source="MusicBrainz", - data_url=track_url(recording["id"]), - ) - - if recording.get("artist-credit"): - # Get the artist names. - ( - info.artist, - info.artist_sort, - info.artist_credit, - ) = _flatten_artist_credit(recording["artist-credit"]) - - ( - info.artists, - info.artists_sort, - info.artists_credit, - ) = _multi_artist_credit( - recording["artist-credit"], include_join_phrase=False - ) - - info.artists_ids = _artist_ids(recording["artist-credit"]) - info.artist_id = info.artists_ids[0] - - if recording.get("artist-relation-list"): - info.remixer = _get_related_artist_names( - recording["artist-relation-list"], relation_type="remixer" - ) - - if recording.get("length"): - info.length = int(recording["length"]) / 1000.0 - - info.trackdisambig = recording.get("disambiguation") - - if recording.get("isrc-list"): - info.isrc = ";".join(recording["isrc-list"]) - - lyricist = [] - composer = [] - composer_sort = [] - for work_relation in recording.get("work-relation-list", ()): - if work_relation["type"] != "performance": - continue - info.work = work_relation["work"]["title"] - info.mb_workid = work_relation["work"]["id"] - if "disambiguation" in work_relation["work"]: - info.work_disambig = work_relation["work"]["disambiguation"] - - for artist_relation in work_relation["work"].get( - "artist-relation-list", () - ): - if "type" in artist_relation: - type = artist_relation["type"] - if type == "lyricist": - lyricist.append(artist_relation["artist"]["name"]) - elif type == "composer": - composer.append(artist_relation["artist"]["name"]) - composer_sort.append(artist_relation["artist"]["sort-name"]) - if lyricist: - info.lyricist = ", ".join(lyricist) - if composer: - info.composer = ", ".join(composer) - info.composer_sort = ", ".join(composer_sort) - - arranger = [] - for artist_relation in recording.get("artist-relation-list", ()): - if "type" in artist_relation: - type = artist_relation["type"] - if type == "arranger": - arranger.append(artist_relation["artist"]["name"]) - if arranger: - info.arranger = ", ".join(arranger) - - # Supplementary fields provided by plugins - extra_trackdatas = plugins.send("mb_track_extract", data=recording) - for extra_trackdata in extra_trackdatas: - info.update(extra_trackdata) - - return info - - -def _set_date_str( - info: beets.autotag.hooks.AlbumInfo, - date_str: str, - original: bool = False, -): - """Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo - object, set the object's release date fields appropriately. If - `original`, then set the original_year, etc., fields. - """ - if date_str: - date_parts = date_str.split("-") - for key in ("year", "month", "day"): - if date_parts: - date_part = date_parts.pop(0) - try: - date_num = int(date_part) - except ValueError: - continue - - if original: - key = "original_" + key - setattr(info, key, date_num) - - -def album_info(release: dict) -> beets.autotag.hooks.AlbumInfo: - """Takes a MusicBrainz release result dictionary and returns a beets - AlbumInfo object containing the interesting data about that release. - """ - # Get artist name using join phrases. - artist_name, artist_sort_name, artist_credit_name = _flatten_artist_credit( - release["artist-credit"] - ) - - ( - artists_names, - artists_sort_names, - artists_credit_names, - ) = _multi_artist_credit( - release["artist-credit"], include_join_phrase=False - ) - - ntracks = sum(len(m["track-list"]) for m in release["medium-list"]) - - # The MusicBrainz API omits 'artist-relation-list' and 'work-relation-list' - # when the release has more than 500 tracks. So we use browse_recordings - # on chunks of tracks to recover the same information in this case. - if ntracks > BROWSE_MAXTRACKS: - log.debug("Album {} has too many tracks", release["id"]) - recording_list = [] - for i in range(0, ntracks, BROWSE_CHUNKSIZE): - log.debug("Retrieving tracks starting at {}", i) - recording_list.extend( - musicbrainzngs.browse_recordings( - release=release["id"], - limit=BROWSE_CHUNKSIZE, - includes=BROWSE_INCLUDES, - offset=i, - )["recording-list"] - ) - track_map = {r["id"]: r for r in recording_list} - for medium in release["medium-list"]: - for recording in medium["track-list"]: - recording_info = track_map[recording["recording"]["id"]] - recording["recording"] = recording_info - - # Basic info. - track_infos = [] - index = 0 - for medium in release["medium-list"]: - disctitle = medium.get("title") - format = medium.get("format") - - if format in config["match"]["ignored_media"].as_str_seq(): - continue - - all_tracks = medium["track-list"] - if ( - "data-track-list" in medium - and not config["match"]["ignore_data_tracks"] - ): - all_tracks += medium["data-track-list"] - track_count = len(all_tracks) - - if "pregap" in medium: - all_tracks.insert(0, medium["pregap"]) - - for track in all_tracks: - if ( - "title" in track["recording"] - and track["recording"]["title"] in SKIPPED_TRACKS - ): - continue - - if ( - "video" in track["recording"] - and track["recording"]["video"] == "true" - and config["match"]["ignore_video_tracks"] - ): - continue - - # Basic information from the recording. - index += 1 - ti = track_info( - track["recording"], - index, - int(medium["position"]), - int(track["position"]), - track_count, - ) - ti.release_track_id = track["id"] - ti.disctitle = disctitle - ti.media = format - ti.track_alt = track["number"] - - # Prefer track data, where present, over recording data. - if track.get("title"): - ti.title = track["title"] - if track.get("artist-credit"): - # Get the artist names. - ( - ti.artist, - ti.artist_sort, - ti.artist_credit, - ) = _flatten_artist_credit(track["artist-credit"]) - - ( - ti.artists, - ti.artists_sort, - ti.artists_credit, - ) = _multi_artist_credit( - track["artist-credit"], include_join_phrase=False - ) - - ti.artists_ids = _artist_ids(track["artist-credit"]) - ti.artist_id = ti.artists_ids[0] - if track.get("length"): - ti.length = int(track["length"]) / (1000.0) - - track_infos.append(ti) - - album_artist_ids = _artist_ids(release["artist-credit"]) - info = beets.autotag.hooks.AlbumInfo( - album=release["title"], - album_id=release["id"], - artist=artist_name, - artist_id=album_artist_ids[0], - artists=artists_names, - artists_ids=album_artist_ids, - tracks=track_infos, - mediums=len(release["medium-list"]), - artist_sort=artist_sort_name, - artists_sort=artists_sort_names, - artist_credit=artist_credit_name, - artists_credit=artists_credit_names, - data_source="MusicBrainz", - data_url=album_url(release["id"]), - barcode=release.get("barcode"), - ) - info.va = info.artist_id == VARIOUS_ARTISTS_ID - if info.va: - info.artist = config["va_name"].as_str() - info.asin = release.get("asin") - info.releasegroup_id = release["release-group"]["id"] - info.albumstatus = release.get("status") - - if release["release-group"].get("title"): - info.release_group_title = release["release-group"].get("title") - - # Get the disambiguation strings at the release and release group level. - if release["release-group"].get("disambiguation"): - info.releasegroupdisambig = release["release-group"].get( - "disambiguation" - ) - if release.get("disambiguation"): - info.albumdisambig = release.get("disambiguation") - - # Get the "classic" Release type. This data comes from a legacy API - # feature before MusicBrainz supported multiple release types. - if "type" in release["release-group"]: - reltype = release["release-group"]["type"] - if reltype: - info.albumtype = reltype.lower() - - # Set the new-style "primary" and "secondary" release types. - albumtypes = [] - if "primary-type" in release["release-group"]: - rel_primarytype = release["release-group"]["primary-type"] - if rel_primarytype: - albumtypes.append(rel_primarytype.lower()) - if "secondary-type-list" in release["release-group"]: - if release["release-group"]["secondary-type-list"]: - for sec_type in release["release-group"]["secondary-type-list"]: - albumtypes.append(sec_type.lower()) - info.albumtypes = albumtypes - - # Release events. - info.country, release_date = _preferred_release_event(release) - release_group_date = release["release-group"].get("first-release-date") - if not release_date: - # Fall back if release-specific date is not available. - release_date = release_group_date - _set_date_str(info, release_date, False) - _set_date_str(info, release_group_date, True) - - # Label name. - if release.get("label-info-list"): - label_info = release["label-info-list"][0] - if label_info.get("label"): - label = label_info["label"]["name"] - if label != "[no label]": - info.label = label - info.catalognum = label_info.get("catalog-number") - - # Text representation data. - if release.get("text-representation"): - rep = release["text-representation"] - info.script = rep.get("script") - info.language = rep.get("language") - - # Media (format). - if release["medium-list"]: - # If all media are the same, use that medium name - if len({m.get("format") for m in release["medium-list"]}) == 1: - info.media = release["medium-list"][0].get("format") - # Otherwise, let's just call it "Media" - else: - info.media = "Media" - - if config["musicbrainz"]["genres"]: - sources = [ - release["release-group"].get("tag-list", []), - release.get("tag-list", []), - ] - genres: Counter[str] = Counter() - for source in sources: - for genreitem in source: - genres[genreitem["name"]] += int(genreitem["count"]) - info.genre = "; ".join( - genre - for genre, _count in sorted(genres.items(), key=lambda g: -g[1]) - ) - - # We might find links to external sources (Discogs, Bandcamp, ...) - external_ids = config["musicbrainz"]["external_ids"].get() - wanted_sources = {site for site, wanted in external_ids.items() if wanted} - if wanted_sources and (url_rels := release.get("url-relation-list")): - urls = {} - - for source, url in product(wanted_sources, url_rels): - if f"{source}.com" in (target := url["target"]): - urls[source] = target - log.debug( - "Found link to {} release via MusicBrainz", - source.capitalize(), - ) - - if "discogs" in urls: - info.discogs_albumid = extract_discogs_id_regex(urls["discogs"]) - if "bandcamp" in urls: - info.bandcamp_album_id = urls["bandcamp"] - if "spotify" in urls: - info.spotify_album_id = MetadataSourcePlugin._get_id( - "album", urls["spotify"], spotify_id_regex - ) - if "deezer" in urls: - info.deezer_album_id = MetadataSourcePlugin._get_id( - "album", urls["deezer"], deezer_id_regex - ) - if "beatport" in urls: - info.beatport_album_id = MetadataSourcePlugin._get_id( - "album", urls["beatport"], beatport_id_regex - ) - if "tidal" in urls: - info.tidal_album_id = urls["tidal"].split("/")[-1] - - extra_albumdatas = plugins.send("mb_album_extract", data=release) - for extra_albumdata in extra_albumdatas: - info.update(extra_albumdata) - - return info - - -def match_album( - artist: str, - album: str, - tracks: int | None = None, - extra_tags: dict[str, Any] | None = None, -) -> Iterator[beets.autotag.hooks.AlbumInfo]: - """Searches for a single album ("release" in MusicBrainz parlance) - and returns an iterator over AlbumInfo objects. May raise a - MusicBrainzAPIError. - - The query consists of an artist name, an album name, and, - optionally, a number of tracks on the album and any other extra tags. - """ - # Build search criteria. - criteria = {"release": album.lower().strip()} - if artist is not None: - criteria["artist"] = artist.lower().strip() - else: - # Various Artists search. - criteria["arid"] = VARIOUS_ARTISTS_ID - if tracks is not None: - criteria["tracks"] = str(tracks) - - # Additional search cues from existing metadata. - if extra_tags: - for tag, value in extra_tags.items(): - key = FIELDS_TO_MB_KEYS[tag] - value = str(value).lower().strip() - if key == "catno": - value = value.replace(" ", "") - if value: - criteria[key] = value - - # Abort if we have no search terms. - if not any(criteria.values()): - return - - try: - log.debug("Searching for MusicBrainz releases with: {!r}", criteria) - res = musicbrainzngs.search_releases( - limit=config["musicbrainz"]["searchlimit"].get(int), **criteria - ) - except musicbrainzngs.MusicBrainzError as exc: - raise MusicBrainzAPIError( - exc, "release search", criteria, traceback.format_exc() - ) - for release in res["release-list"]: - # The search result is missing some data (namely, the tracks), - # so we just use the ID and fetch the rest of the information. - albuminfo = album_for_id(release["id"]) - if albuminfo is not None: - yield albuminfo - - -def match_track( - artist: str, - title: str, -) -> Iterator[beets.autotag.hooks.TrackInfo]: - """Searches for a single track and returns an iterable of TrackInfo - objects. May raise a MusicBrainzAPIError. - """ - criteria = { - "artist": artist.lower().strip(), - "recording": title.lower().strip(), - } - - if not any(criteria.values()): - return - - try: - res = musicbrainzngs.search_recordings( - limit=config["musicbrainz"]["searchlimit"].get(int), **criteria - ) - except musicbrainzngs.MusicBrainzError as exc: - raise MusicBrainzAPIError( - exc, "recording search", criteria, traceback.format_exc() - ) - for recording in res["recording-list"]: - yield track_info(recording) - - -def _parse_id(s: str) -> str | None: - """Search for a MusicBrainz ID in the given string and return it. If - no ID can be found, return None. - """ - # Find the first thing that looks like a UUID/MBID. - match = re.search("[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}", s) - if match is not None: - return match.group() if match else None - return None - - -def _is_translation(r): - _trans_key = "transl-tracklisting" - return r["type"] == _trans_key and r["direction"] == "backward" - - -def _find_actual_release_from_pseudo_release( - pseudo_rel: dict, -) -> dict | None: - try: - relations = pseudo_rel["release"]["release-relation-list"] - except KeyError: - return None - - # currently we only support trans(liter)ation's - translations = [r for r in relations if _is_translation(r)] - - if not translations: - return None - - actual_id = translations[0]["target"] - - return musicbrainzngs.get_release_by_id(actual_id, RELEASE_INCLUDES) - - -def _merge_pseudo_and_actual_album( - pseudo: beets.autotag.hooks.AlbumInfo, actual: beets.autotag.hooks.AlbumInfo -) -> beets.autotag.hooks.AlbumInfo | None: - """ - Merges a pseudo release with its actual release. - - This implementation is naive, it doesn't overwrite fields, - like status or ids. - - According to the ticket PICARD-145, the main release id should be used. - But the ticket has been in limbo since over a decade now. - It also suggests the introduction of the tag `musicbrainz_pseudoreleaseid`, - but as of this field can't be found in any official Picard docs, - hence why we did not implement that for now. - """ - merged = pseudo.copy() - from_actual = { - k: actual[k] - for k in [ - "media", - "mediums", - "country", - "catalognum", - "year", - "month", - "day", - "original_year", - "original_month", - "original_day", - "label", - "barcode", - "asin", - "style", - "genre", - ] - } - merged.update(from_actual) - return merged - - -def album_for_id(releaseid: str) -> beets.autotag.hooks.AlbumInfo | None: - """Fetches an album by its MusicBrainz ID and returns an AlbumInfo - object or None if the album is not found. May raise a - MusicBrainzAPIError. - """ - log.debug("Requesting MusicBrainz release {}", releaseid) - albumid = _parse_id(releaseid) - if not albumid: - log.debug("Invalid MBID ({0}).", releaseid) - return None - try: - res = musicbrainzngs.get_release_by_id(albumid, RELEASE_INCLUDES) - - # resolve linked release relations - actual_res = None - - if res["release"].get("status") == "Pseudo-Release": - actual_res = _find_actual_release_from_pseudo_release(res) - - except musicbrainzngs.ResponseError: - log.debug("Album ID match failed.") - return None - except musicbrainzngs.MusicBrainzError as exc: - raise MusicBrainzAPIError( - exc, "get release by ID", albumid, traceback.format_exc() - ) - - # release is potentially a pseudo release - release = album_info(res["release"]) - - # should be None unless we're dealing with a pseudo release - if actual_res is not None: - actual_release = album_info(actual_res["release"]) - return _merge_pseudo_and_actual_album(release, actual_release) - else: - return release - - -def track_for_id(releaseid: str) -> beets.autotag.hooks.TrackInfo | None: - """Fetches a track by its MusicBrainz ID. Returns a TrackInfo object - or None if no track is found. May raise a MusicBrainzAPIError. - """ - trackid = _parse_id(releaseid) - if not trackid: - log.debug("Invalid MBID ({0}).", releaseid) - return None - try: - res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES) - except musicbrainzngs.ResponseError: - log.debug("Track ID match failed.") - return None - except musicbrainzngs.MusicBrainzError as exc: - raise MusicBrainzAPIError( - exc, "get recording by ID", trackid, traceback.format_exc() - ) - return track_info(res["recording"]) diff --git a/beets/config_default.yaml b/beets/config_default.yaml index c5cebd441..c0bab8056 100644 --- a/beets/config_default.yaml +++ b/beets/config_default.yaml @@ -6,7 +6,8 @@ statefile: state.pickle # --------------- Plugins --------------- -plugins: [] +plugins: [musicbrainz] + pluginpath: [] # --------------- Import --------------- @@ -126,19 +127,12 @@ ui: action_default: ['bold', 'cyan'] action: ['bold', 'cyan'] # New Colors - text: ['normal'] text_faint: ['faint'] import_path: ['bold', 'blue'] import_path_items: ['bold', 'blue'] - added: ['green'] - removed: ['red'] changed: ['yellow'] - added_highlight: ['bold', 'green'] - removed_highlight: ['bold', 'red'] - changed_highlight: ['bold', 'yellow'] - text_diff_added: ['bold', 'red'] + text_diff_added: ['bold', 'green'] text_diff_removed: ['bold', 'red'] - text_diff_changed: ['bold', 'red'] action_description: ['white'] import: indentation: @@ -163,22 +157,6 @@ sort_case_insensitive: yes overwrite_null: album: [] track: [] -musicbrainz: - enabled: yes - host: musicbrainz.org - https: no - ratelimit: 1 - ratelimit_interval: 1.0 - searchlimit: 5 - extra_tags: [] - genres: no - external_ids: - discogs: no - bandcamp: no - spotify: no - deezer: no - beatport: no - tidal: no match: strong_rec_thresh: 0.04 @@ -188,7 +166,7 @@ match: missing_tracks: medium unmatched_tracks: medium distance_weights: - source: 2.0 + data_source: 2.0 artist: 3.0 album: 3.0 media: 1.0 diff --git a/beets/dbcore/db.py b/beets/dbcore/db.py index dd8401935..110cd70d0 100755 --- a/beets/dbcore/db.py +++ b/beets/dbcore/db.py @@ -17,17 +17,31 @@ from __future__ import annotations import contextlib +import functools import os import re import sqlite3 +import sys import threading import time from abc import ABC from collections import defaultdict -from collections.abc import Generator, Iterable, Iterator, Mapping, Sequence -from sqlite3 import Connection -from typing import TYPE_CHECKING, Any, AnyStr, Callable, Generic, TypeVar, cast +from collections.abc import ( + Callable, + Generator, + Iterable, + Iterator, + Mapping, + Sequence, +) +from functools import cached_property +from sqlite3 import Connection, sqlite_version_info +from typing import TYPE_CHECKING, Any, AnyStr, Generic +from typing_extensions import ( + Self, + TypeVar, # default value support +) from unidecode import unidecode import beets @@ -49,10 +63,7 @@ if TYPE_CHECKING: from .query import SQLiteType - D = TypeVar("D", bound="Database", default=Any) -else: - D = TypeVar("D", bound="Database") - +D = TypeVar("D", bound="Database", default=Any) FlexAttrs = dict[str, str] @@ -66,6 +77,20 @@ class DBAccessError(Exception): """ +class DBCustomFunctionError(Exception): + """A sqlite function registered by beets failed.""" + + def __init__(self): + super().__init__( + "beets defined SQLite function failed; " + "see the other errors above for details" + ) + + +class NotFoundError(LookupError): + pass + + class FormattedMapping(Mapping[str, str]): """A `dict`-like formatted view of a model. @@ -80,6 +105,8 @@ class FormattedMapping(Mapping[str, str]): are replaced. """ + model: Model + ALL_KEYS = "*" def __init__( @@ -126,8 +153,8 @@ class FormattedMapping(Mapping[str, str]): value = value.decode("utf-8", "ignore") if self.for_path: - sep_repl = cast(str, beets.config["path_sep_replace"].as_str()) - sep_drive = cast(str, beets.config["drive_sep_replace"].as_str()) + sep_repl: str = beets.config["path_sep_replace"].as_str() + sep_drive: str = beets.config["drive_sep_replace"].as_str() if re.match(r"^\w:", value): value = re.sub(r"(?<=^\w):", sep_drive, value) @@ -289,19 +316,22 @@ class Model(ABC, Generic[D]): terms. """ - _types: dict[str, types.Type] = {} - """Optional Types for non-fixed (i.e., flexible and computed) fields. - """ + @cached_classproperty + def _types(cls) -> dict[str, types.Type]: + """Optional types for non-fixed (flexible and computed) fields.""" + return {} _sorts: dict[str, type[FieldSort]] = {} """Optional named sort criteria. The keys are strings and the values are subclasses of `Sort`. """ - _queries: dict[str, FieldQueryType] = {} - """Named queries that use a field-like `name:value` syntax but which - do not relate to any specific field. - """ + @cached_classproperty + def _queries(cls) -> dict[str, FieldQueryType]: + """Named queries that use a field-like `name:value` syntax but which + do not relate to any specific field. + """ + return {} _always_dirty = False """By default, fields only become "dirty" when their value actually @@ -340,6 +370,22 @@ class Model(ABC, Generic[D]): """Fields in the related table.""" return cls._relation._fields.keys() - cls.shared_db_fields + @cached_property + def db(self) -> D: + """Get the database associated with this object. + + This validates that the database is attached and the object has an id. + """ + return self._check_db() + + def get_fresh_from_db(self) -> Self: + """Load this object from the database.""" + model_cls = self.__class__ + if obj := self.db._get(model_cls, self.id): + return obj + + raise NotFoundError(f"No matching {model_cls.__name__} found") from None + @classmethod def _getters(cls: type[Model]): """Return a mapping from field names to getter functions.""" @@ -389,9 +435,9 @@ class Model(ABC, Generic[D]): return obj def __repr__(self) -> str: - return "{}({})".format( - type(self).__name__, - ", ".join(f"{k}={v!r}" for k, v in dict(self).items()), + return ( + f"{type(self).__name__}" + f"({', '.join(f'{k}={v!r}' for k, v in dict(self).items())})" ) def clear_dirty(self): @@ -408,9 +454,9 @@ class Model(ABC, Generic[D]): exception is raised otherwise. """ if not self._db: - raise ValueError("{} has no database".format(type(self).__name__)) + raise ValueError(f"{type(self).__name__} has no database") if need_id and not self.id: - raise ValueError("{} has no id".format(type(self).__name__)) + raise ValueError(f"{type(self).__name__} has no id") return self._db @@ -579,7 +625,6 @@ class Model(ABC, Generic[D]): """ if fields is None: fields = self._fields - db = self._check_db() # Build assignments for query. assignments = [] @@ -587,16 +632,14 @@ class Model(ABC, Generic[D]): for key in fields: if key != "id" and key in self._dirty: self._dirty.remove(key) - assignments.append(key + "=?") + assignments.append(f"{key}=?") value = self._type(key).to_sql(self[key]) subvars.append(value) - with db.transaction() as tx: + with self.db.transaction() as tx: # Main table update. if assignments: - query = "UPDATE {} SET {} WHERE id=?".format( - self._table, ",".join(assignments) - ) + query = f"UPDATE {self._table} SET {','.join(assignments)} WHERE id=?" subvars.append(self.id) tx.mutate(query, subvars) @@ -604,10 +647,11 @@ class Model(ABC, Generic[D]): for key, value in self._values_flex.items(): if key in self._dirty: self._dirty.remove(key) + value = self._type(key).to_sql(value) tx.mutate( - "INSERT INTO {} " + f"INSERT INTO {self._flex_table} " "(entity_id, key, value) " - "VALUES (?, ?, ?);".format(self._flex_table), + "VALUES (?, ?, ?);", (self.id, key, value), ) @@ -626,21 +670,16 @@ class Model(ABC, Generic[D]): If check_revision is true, the database is only queried loaded when a transaction has been committed since the item was last loaded. """ - db = self._check_db() - if not self._dirty and db.revision == self._revision: + if not self._dirty and self.db.revision == self._revision: # Exit early return - stored_obj = db._get(type(self), self.id) - assert stored_obj is not None, f"object {self.id} not in DB" - self._values_fixed = LazyConvertDict(self) - self._values_flex = LazyConvertDict(self) - self.update(dict(stored_obj)) + + self.__dict__.update(self.get_fresh_from_db().__dict__) self.clear_dirty() def remove(self): """Remove the object's associated rows from the database.""" - db = self._check_db() - with db.transaction() as tx: + with self.db.transaction() as tx: tx.mutate(f"DELETE FROM {self._table} WHERE id=?", (self.id,)) tx.mutate( f"DELETE FROM {self._flex_table} WHERE entity_id=?", (self.id,) @@ -656,7 +695,7 @@ class Model(ABC, Generic[D]): """ if db: self._db = db - db = self._check_db(False) + db = self._check_db(need_id=False) with db.transaction() as tx: new_id = tx.mutate(f"INSERT INTO {self._table} DEFAULT VALUES") @@ -677,7 +716,7 @@ class Model(ABC, Generic[D]): self, included_keys: str = _formatter.ALL_KEYS, for_path: bool = False, - ): + ) -> FormattedMapping: """Get a mapping containing all values on this object formatted as human-readable unicode strings. """ @@ -721,9 +760,9 @@ class Model(ABC, Generic[D]): Remove the database connection as sqlite connections are not picklable. """ - state = self.__dict__.copy() - state["_db"] = None - return state + return { + k: v for k, v in self.__dict__.items() if k not in {"_db", "db"} + } # Database controller and supporting interfaces. @@ -928,10 +967,10 @@ class Transaction: def __exit__( self, - exc_type: type[Exception], - exc_value: Exception, - traceback: TracebackType, - ): + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> bool | None: """Complete a transaction. This must be the most recently entered but not yet exited transaction. If it is the last active transaction, the database updates are committed. @@ -947,6 +986,14 @@ class Transaction: self._mutated = False self.db._db_lock.release() + if ( + isinstance(exc_value, sqlite3.OperationalError) + and exc_value.args[0] == "user-defined function raised exception" + ): + raise DBCustomFunctionError() + + return None + def query( self, statement: str, subvals: Sequence[SQLiteType] = () ) -> list[sqlite3.Row]: @@ -1007,6 +1054,13 @@ class Database: "sqlite3 must be compiled with multi-threading support" ) + # Print tracebacks for exceptions in user defined functions + # See also `self.add_functions` and `DBCustomFunctionError`. + # + # `if`: use feature detection because PyPy doesn't support this. + if hasattr(sqlite3, "enable_callback_tracebacks"): + sqlite3.enable_callback_tracebacks(True) + self.path = path self.timeout = timeout @@ -1102,9 +1156,16 @@ class Database: return bytestring - conn.create_function("regexp", 2, regexp) - conn.create_function("unidecode", 1, unidecode) - conn.create_function("bytelower", 1, bytelower) + create_function = conn.create_function + if sys.version_info >= (3, 8) and sqlite_version_info >= (3, 8, 3): + # Let sqlite make extra optimizations + create_function = functools.partial( + conn.create_function, deterministic=True + ) + + create_function("regexp", 2, regexp) + create_function("unidecode", 1, unidecode) + create_function("bytelower", 1, bytelower) def _close(self): """Close the all connections to the underlying SQLite database @@ -1158,7 +1219,7 @@ class Database: """ # Get current schema. with self.transaction() as tx: - rows = tx.query("PRAGMA table_info(%s)" % table) + rows = tx.query(f"PRAGMA table_info({table})") current_fields = {row[1] for row in rows} field_names = set(fields.keys()) @@ -1171,9 +1232,7 @@ class Database: columns = [] for name, typ in fields.items(): columns.append(f"{name} {typ.sql}") - setup_sql = "CREATE TABLE {} ({});\n".format( - table, ", ".join(columns) - ) + setup_sql = f"CREATE TABLE {table} ({', '.join(columns)});\n" else: # Table exists does not match the field set. @@ -1181,8 +1240,8 @@ class Database: for name, typ in fields.items(): if name in current_fields: continue - setup_sql += "ALTER TABLE {} ADD COLUMN {} {};\n".format( - table, name, typ.sql + setup_sql += ( + f"ALTER TABLE {table} ADD COLUMN {name} {typ.sql};\n" ) with self.transaction() as tx: @@ -1193,18 +1252,16 @@ class Database: for the given entity (if they don't exist). """ with self.transaction() as tx: - tx.script( - """ - CREATE TABLE IF NOT EXISTS {0} ( + tx.script(f""" + CREATE TABLE IF NOT EXISTS {flex_table} ( id INTEGER PRIMARY KEY, entity_id INTEGER, key TEXT, value TEXT, UNIQUE(entity_id, key) ON CONFLICT REPLACE); - CREATE INDEX IF NOT EXISTS {0}_by_entity - ON {0} (entity_id); - """.format(flex_table) - ) + CREATE INDEX IF NOT EXISTS {flex_table}_by_entity + ON {flex_table} (entity_id); + """) # Querying. @@ -1266,12 +1323,6 @@ class Database: sort if sort.is_slow() else None, # Slow sort component. ) - def _get( - self, - model_cls: type[AnyModel], - id, - ) -> AnyModel | None: - """Get a Model object by its id or None if the id does not - exist. - """ - return self._fetch(model_cls, MatchQuery("id", id)).get() + def _get(self, model_cls: type[AnyModel], id_: int) -> AnyModel | None: + """Get a Model object by its id or None if the id does not exist.""" + return self._fetch(model_cls, MatchQuery("id", id_)).get() diff --git a/beets/dbcore/query.py b/beets/dbcore/query.py index c7ca44452..dfeb42707 100644 --- a/beets/dbcore/query.py +++ b/beets/dbcore/query.py @@ -16,26 +16,34 @@ from __future__ import annotations +import os import re import unicodedata from abc import ABC, abstractmethod from collections.abc import Iterator, MutableSequence, Sequence from datetime import datetime, timedelta -from functools import reduce +from functools import cached_property, reduce from operator import mul, or_ from re import Pattern from typing import TYPE_CHECKING, Any, Generic, TypeVar, Union from beets import util +from beets.util.units import raw_seconds_short if TYPE_CHECKING: - from beets.dbcore import Model - from beets.dbcore.db import AnyModel + from beets.dbcore.db import AnyModel, Model P = TypeVar("P", default=Any) else: P = TypeVar("P") +# To use the SQLite "blob" type, it doesn't suffice to provide a byte +# string; SQLite treats that as encoded text. Wrapping it in a +# `memoryview` tells it that we actually mean non-text data. +# needs to be defined in here due to circular import. +# TODO: remove it from this module and define it in dbcore/types.py instead +BLOB_TYPE = memoryview + class ParsingError(ValueError): """Abstract class for any unparsable user-requested album/query @@ -78,6 +86,7 @@ class Query(ABC): """Return a set with field names that this query operates on.""" return set() + @abstractmethod def clause(self) -> tuple[str | None, Sequence[Any]]: """Generate an SQLite expression implementing the query. @@ -88,14 +97,12 @@ class Query(ABC): The default implementation returns None, falling back to a slow query using `match()`. """ - return None, () @abstractmethod def match(self, obj: Model): """Check whether this query matches a given Model. Can be used to perform queries on arbitrary sets of Model. """ - ... def __and__(self, other: Query) -> AndQuery: return AndQuery([self, other]) @@ -145,7 +152,7 @@ class FieldQuery(Query, Generic[P]): self.fast = fast def col_clause(self) -> tuple[str, Sequence[SQLiteType]]: - return self.field, () + raise NotImplementedError def clause(self) -> tuple[str | None, Sequence[SQLiteType]]: if self.fast: @@ -157,7 +164,7 @@ class FieldQuery(Query, Generic[P]): @classmethod def value_match(cls, pattern: P, value: Any): """Determine whether the value matches the pattern.""" - raise NotImplementedError() + raise NotImplementedError def match(self, obj: Model) -> bool: return self.value_match(self.pattern, obj.get(self.field_name)) @@ -183,7 +190,7 @@ class MatchQuery(FieldQuery[AnySQLiteType]): """A query that looks for exact matches in an Model field.""" def col_clause(self) -> tuple[str, Sequence[SQLiteType]]: - return self.field + " = ?", [self.pattern] + return f"{self.field} = ?", [self.pattern] @classmethod def value_match(cls, pattern: AnySQLiteType, value: Any) -> bool: @@ -197,7 +204,7 @@ class NoneQuery(FieldQuery[None]): super().__init__(field, None, fast) def col_clause(self) -> tuple[str, Sequence[SQLiteType]]: - return self.field + " IS NULL", () + return f"{self.field} IS NULL", () def match(self, obj: Model) -> bool: return obj.get(self.field_name) is None @@ -227,7 +234,7 @@ class StringFieldQuery(FieldQuery[P]): """Determine whether the value matches the pattern. Both arguments are strings. Subclasses implement this method. """ - raise NotImplementedError() + raise NotImplementedError class StringQuery(StringFieldQuery[str]): @@ -239,7 +246,7 @@ class StringQuery(StringFieldQuery[str]): .replace("%", "\\%") .replace("_", "\\_") ) - clause = self.field + " like ? escape '\\'" + clause = f"{self.field} like ? escape '\\'" subvals = [search] return clause, subvals @@ -257,8 +264,8 @@ class SubstringQuery(StringFieldQuery[str]): .replace("%", "\\%") .replace("_", "\\_") ) - search = "%" + pattern + "%" - clause = self.field + " like ? escape '\\'" + search = f"%{pattern}%" + clause = f"{self.field} like ? escape '\\'" subvals = [search] return clause, subvals @@ -267,6 +274,91 @@ class SubstringQuery(StringFieldQuery[str]): return pattern.lower() in value.lower() +class PathQuery(FieldQuery[bytes]): + """A query that matches all items under a given path. + + Matching can either be case-insensitive or case-sensitive. By + default, the behavior depends on the OS: case-insensitive on Windows + and case-sensitive otherwise. + """ + + def __init__(self, field: str, pattern: bytes, fast: bool = True) -> None: + """Create a path query. + + `pattern` must be a path, either to a file or a directory. + """ + path = util.normpath(pattern) + + # Case sensitivity depends on the filesystem that the query path is located on. + self.case_sensitive = util.case_sensitive(path) + + # Use a normalized-case pattern for case-insensitive matches. + if not self.case_sensitive: + # We need to lowercase the entire path, not just the pattern. + # In particular, on Windows, the drive letter is otherwise not + # lowercased. + # This also ensures that the `match()` method below and the SQL + # from `col_clause()` do the same thing. + path = path.lower() + + super().__init__(field, path, fast) + + @cached_property + def dir_path(self) -> bytes: + return os.path.join(self.pattern, b"") + + @staticmethod + def is_path_query(query_part: str) -> bool: + """Try to guess whether a unicode query part is a path query. + + The path query must + 1. precede the colon in the query, if a colon is present + 2. contain either ``os.sep`` or ``os.altsep`` (Windows) + 3. this path must exist on the filesystem. + """ + query_part = query_part.split(":")[0] + + return ( + # make sure the query part contains a path separator + bool(set(query_part) & {os.sep, os.altsep}) + and os.path.exists(util.normpath(query_part)) + ) + + def match(self, obj: Model) -> bool: + """Check whether a model object's path matches this query. + + Performs either an exact match against the pattern or checks if the path + starts with the given directory path. Case sensitivity depends on the object's + filesystem as determined during initialization. + """ + path = obj.path if self.case_sensitive else obj.path.lower() + return (path == self.pattern) or path.startswith(self.dir_path) + + def col_clause(self) -> tuple[str, Sequence[SQLiteType]]: + """Generate an SQL clause that implements path matching in the database. + + Returns a tuple of SQL clause string and parameter values list that matches + paths either exactly or by directory prefix. Handles case sensitivity + appropriately using BYTELOWER for case-insensitive matches. + """ + if self.case_sensitive: + left, right = self.field, "?" + else: + left, right = f"BYTELOWER({self.field})", "BYTELOWER(?)" + + return f"({left} = {right}) || (substr({left}, 1, ?) = {right})", [ + BLOB_TYPE(self.pattern), + len(dir_blob := BLOB_TYPE(self.dir_path)), + dir_blob, + ] + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}({self.field!r}, {self.pattern!r}, " + f"fast={self.fast}, case_sensitive={self.case_sensitive})" + ) + + class RegexpQuery(StringFieldQuery[Pattern[str]]): """A query that matches a regular expression in a specific Model field. @@ -320,39 +412,6 @@ class BooleanQuery(MatchQuery[int]): super().__init__(field_name, pattern_int, fast) -class BytesQuery(FieldQuery[bytes]): - """Match a raw bytes field (i.e., a path). This is a necessary hack - to work around the `sqlite3` module's desire to treat `bytes` and - `unicode` equivalently in Python 2. Always use this query instead of - `MatchQuery` when matching on BLOB values. - """ - - def __init__(self, field_name: str, pattern: bytes | str | memoryview): - # Use a buffer/memoryview representation of the pattern for SQLite - # matching. This instructs SQLite to treat the blob as binary - # rather than encoded Unicode. - if isinstance(pattern, (str, bytes)): - if isinstance(pattern, str): - bytes_pattern = pattern.encode("utf-8") - else: - bytes_pattern = pattern - self.buf_pattern = memoryview(bytes_pattern) - elif isinstance(pattern, memoryview): - self.buf_pattern = pattern - bytes_pattern = bytes(pattern) - else: - raise ValueError("pattern must be bytes, str, or memoryview") - - super().__init__(field_name, bytes_pattern) - - def col_clause(self) -> tuple[str, Sequence[SQLiteType]]: - return self.field + " = ?", [self.buf_pattern] - - @classmethod - def value_match(cls, pattern: bytes, value: Any) -> bool: - return pattern == value - - class NumericQuery(FieldQuery[str]): """Matches numeric fields. A syntax using Ruby-style range ellipses (``..``) lets users specify one- or two-sided ranges. For example, @@ -412,11 +471,11 @@ class NumericQuery(FieldQuery[str]): def col_clause(self) -> tuple[str, Sequence[SQLiteType]]: if self.point is not None: - return self.field + "=?", (self.point,) + return f"{self.field}=?", (self.point,) else: if self.rangemin is not None and self.rangemax is not None: return ( - "{0} >= ? AND {0} <= ?".format(self.field), + f"{self.field} >= ? AND {self.field} <= ?", (self.rangemin, self.rangemax), ) elif self.rangemin is not None: @@ -490,9 +549,9 @@ class CollectionQuery(Query): if not subq_clause: # Fall back to slow query. return None, () - clause_parts.append("(" + subq_clause + ")") + clause_parts.append(f"({subq_clause})") subvals += subq_subvals - clause = (" " + joiner + " ").join(clause_parts) + clause = f" {joiner} ".join(clause_parts) return clause, subvals def __repr__(self) -> str: @@ -631,9 +690,7 @@ class Period: ("%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"), # second ) relative_units = {"y": 365, "m": 30, "w": 7, "d": 1} - relative_re = ( - "(?P[+|-]?)(?P[0-9]+)" + "(?P[y|m|w|d])" - ) + relative_re = "(?P[+|-]?)(?P[0-9]+)(?P[y|m|w|d])" def __init__(self, date: datetime, precision: str): """Create a period with the given date (a `datetime` object) and @@ -741,9 +798,7 @@ class DateInterval: def __init__(self, start: datetime | None, end: datetime | None): if start is not None and end is not None and not start < end: - raise ValueError( - "start date {} is not before end date {}".format(start, end) - ) + raise ValueError(f"start date {start} is not before end date {end}") self.start = start self.end = end @@ -791,8 +846,6 @@ class DateQuery(FieldQuery[str]): date = datetime.fromtimestamp(timestamp) return self.interval.contains(date) - _clause_tmpl = "{0} {1} ?" - def col_clause(self) -> tuple[str, Sequence[SQLiteType]]: clause_parts = [] subvals = [] @@ -800,11 +853,11 @@ class DateQuery(FieldQuery[str]): # Convert the `datetime` objects to an integer number of seconds since # the (local) Unix epoch using `datetime.timestamp()`. if self.interval.start: - clause_parts.append(self._clause_tmpl.format(self.field, ">=")) + clause_parts.append(f"{self.field} >= ?") subvals.append(int(self.interval.start.timestamp())) if self.interval.end: - clause_parts.append(self._clause_tmpl.format(self.field, "<")) + clause_parts.append(f"{self.field} < ?") subvals.append(int(self.interval.end.timestamp())) if clause_parts: @@ -834,7 +887,7 @@ class DurationQuery(NumericQuery): if not s: return None try: - return util.raw_seconds_short(s) + return raw_seconds_short(s) except ValueError: try: return float(s) @@ -844,6 +897,24 @@ class DurationQuery(NumericQuery): ) +class SingletonQuery(FieldQuery[str]): + """This query is responsible for the 'singleton' lookup. + + It is based on the FieldQuery and constructs a SQL clause + 'album_id is NULL' which yields the same result as the previous filter + in Python but is more performant since it's done in SQL. + + Using util.str2bool ensures that lookups like singleton:true, singleton:1 + and singleton:false, singleton:0 are handled consistently. + """ + + def __new__(cls, field: str, value: str, *args, **kwargs): + query = NoneQuery("album_id") + if util.str2bool(value): + return query + return NotQuery(query) + + # Sorting. @@ -997,9 +1068,9 @@ class FixedFieldSort(FieldSort): if self.case_insensitive: field = ( "(CASE " - "WHEN TYPEOF({0})='text' THEN LOWER({0}) " - "WHEN TYPEOF({0})='blob' THEN LOWER({0}) " - "ELSE {0} END)".format(self.field) + f"WHEN TYPEOF({self.field})='text' THEN LOWER({self.field}) " + f"WHEN TYPEOF({self.field})='blob' THEN LOWER({self.field}) " + f"ELSE {self.field} END)" ) else: field = self.field diff --git a/beets/dbcore/types.py b/beets/dbcore/types.py index 2a64b2ed9..3b4badd33 100644 --- a/beets/dbcore/types.py +++ b/beets/dbcore/types.py @@ -16,19 +16,20 @@ from __future__ import annotations +import re +import time import typing from abc import ABC from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast -from beets.util import str2bool +import beets +from beets import util +from beets.util.units import human_seconds_short, raw_seconds_short -from .query import ( - BooleanQuery, - FieldQueryType, - NumericQuery, - SQLiteType, - SubstringQuery, -) +from . import query + +SQLiteType = query.SQLiteType +BLOB_TYPE = query.BLOB_TYPE class ModelType(typing.Protocol): @@ -61,7 +62,7 @@ class Type(ABC, Generic[T, N]): """The SQLite column type for the value. """ - query: FieldQueryType = SubstringQuery + query: query.FieldQueryType = query.SubstringQuery """The `Query` subclass to be used when querying the field. """ @@ -160,7 +161,7 @@ class BaseInteger(Type[int, N]): """A basic integer type.""" sql = "INTEGER" - query = NumericQuery + query = query.NumericQuery model_type = int def normalize(self, value: Any) -> int | N: @@ -193,7 +194,7 @@ class BasePaddedInt(BaseInteger[N]): self.digits = digits def format(self, value: int | N) -> str: - return "{0:0{1}d}".format(value or 0, self.digits) + return f"{value or 0:0{self.digits}d}" class PaddedInt(BasePaddedInt[int]): @@ -218,7 +219,7 @@ class ScaledInt(Integer): self.suffix = suffix def format(self, value: int) -> str: - return "{}{}".format((value or 0) // self.unit, self.suffix) + return f"{(value or 0) // self.unit}{self.suffix}" class Id(NullInteger): @@ -241,14 +242,14 @@ class BaseFloat(Type[float, N]): """ sql = "REAL" - query: FieldQueryType = NumericQuery + query: query.FieldQueryType = query.NumericQuery model_type = float def __init__(self, digits: int = 1): self.digits = digits def format(self, value: float | N) -> str: - return "{0:.{1}f}".format(value or 0, self.digits) + return f"{value or 0:.{self.digits}f}" class Float(BaseFloat[float]): @@ -271,7 +272,7 @@ class BaseString(Type[T, N]): """A Unicode string type.""" sql = "TEXT" - query = SubstringQuery + query = query.SubstringQuery def normalize(self, value: Any) -> T | N: if value is None: @@ -291,7 +292,7 @@ class DelimitedString(BaseString[list[str], list[str]]): containing delimiter-separated values. """ - model_type = list + model_type = list[str] def __init__(self, delimiter: str): self.delimiter = delimiter @@ -312,14 +313,145 @@ class Boolean(Type): """A boolean type.""" sql = "INTEGER" - query = BooleanQuery + query = query.BooleanQuery model_type = bool def format(self, value: bool) -> str: return str(bool(value)) def parse(self, string: str) -> bool: - return str2bool(string) + return util.str2bool(string) + + +class DateType(Float): + # TODO representation should be `datetime` object + # TODO distinguish between date and time types + query = query.DateQuery + + def format(self, value): + return time.strftime( + beets.config["time_format"].as_str(), time.localtime(value or 0) + ) + + def parse(self, string): + try: + # Try a formatted date string. + return time.mktime( + time.strptime(string, beets.config["time_format"].as_str()) + ) + except ValueError: + # Fall back to a plain timestamp number. + try: + return float(string) + except ValueError: + return self.null + + +class BasePathType(Type[bytes, N]): + """A dbcore type for filesystem paths. + + These are represented as `bytes` objects, in keeping with + the Unix filesystem abstraction. + """ + + sql = "BLOB" + query = query.PathQuery + model_type = bytes + + def parse(self, string: str) -> bytes: + return util.normpath(string) + + def normalize(self, value: Any) -> bytes | N: + if isinstance(value, str): + # Paths stored internally as encoded bytes. + return util.bytestring_path(value) + + elif isinstance(value, BLOB_TYPE): + # We unwrap buffers to bytes. + return bytes(value) + + else: + return value + + def from_sql(self, sql_value): + return self.normalize(sql_value) + + def to_sql(self, value: bytes) -> BLOB_TYPE: + if isinstance(value, bytes): + value = BLOB_TYPE(value) + return value + + +class NullPathType(BasePathType[None]): + @property + def null(self) -> None: + return None + + def format(self, value: bytes | None) -> str: + return util.displayable_path(value or b"") + + +class PathType(BasePathType[bytes]): + @property + def null(self) -> bytes: + return b"" + + def format(self, value: bytes) -> str: + return util.displayable_path(value or b"") + + +class MusicalKey(String): + """String representing the musical key of a song. + + The standard format is C, Cm, C#, C#m, etc. + """ + + ENHARMONIC = { + r"db": "c#", + r"eb": "d#", + r"gb": "f#", + r"ab": "g#", + r"bb": "a#", + } + + null = None + + def parse(self, key): + key = key.lower() + for flat, sharp in self.ENHARMONIC.items(): + key = re.sub(flat, sharp, key) + key = re.sub(r"[\W\s]+minor", "m", key) + key = re.sub(r"[\W\s]+major", "", key) + return key.capitalize() + + def normalize(self, key): + if key is None: + return None + else: + return self.parse(key) + + +class DurationType(Float): + """Human-friendly (M:SS) representation of a time interval.""" + + query = query.DurationQuery + + def format(self, value): + if not beets.config["format_raw_length"].get(bool): + return human_seconds_short(value or 0.0) + else: + return value + + def parse(self, string): + try: + # Try to format back hh:ss to seconds. + return raw_seconds_short(string) + except ValueError: + # Fall back to a plain float. + try: + return float(string) + except ValueError: + return self.null # Shared instances of common types. @@ -331,6 +463,7 @@ FLOAT = Float() NULL_FLOAT = NullFloat() STRING = String() BOOLEAN = Boolean() +DATE = DateType() SEMICOLON_SPACE_DSV = DelimitedString(delimiter="; ") # Will set the proper null char in mediafile diff --git a/beetsplug/gmusic.py b/beets/importer/__init__.py similarity index 53% rename from beetsplug/gmusic.py rename to beets/importer/__init__.py index 5dda3a2e5..586b238e6 100644 --- a/beetsplug/gmusic.py +++ b/beets/importer/__init__.py @@ -1,4 +1,5 @@ # This file is part of beets. +# Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -11,17 +12,27 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. -"""Deprecation warning for the removed gmusic plugin.""" +"""Provides the basic, interface-agnostic workflow for importing and +autotagging music files. +""" -from beets.plugins import BeetsPlugin +from .session import ImportAbortError, ImportSession +from .tasks import ( + Action, + ArchiveImportTask, + ImportTask, + SentinelImportTask, + SingletonImportTask, +) +# Note: Stages are not exposed to the public API -class Gmusic(BeetsPlugin): - def __init__(self): - super().__init__() - - self._log.warning( - "The 'gmusic' plugin has been removed following the" - " shutdown of Google Play Music. Remove the plugin" - " from your configuration to silence this warning." - ) +__all__ = [ + "ImportSession", + "ImportAbortError", + "Action", + "ImportTask", + "ArchiveImportTask", + "SentinelImportTask", + "SingletonImportTask", +] diff --git a/beets/importer/session.py b/beets/importer/session.py new file mode 100644 index 000000000..83c5ad4e3 --- /dev/null +++ b/beets/importer/session.py @@ -0,0 +1,308 @@ +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +from __future__ import annotations + +import os +import time +from typing import TYPE_CHECKING + +from beets import config, dbcore, library, logging, plugins, util +from beets.importer.tasks import Action +from beets.util import displayable_path, normpath, pipeline, syspath + +from . import stages as stagefuncs +from .state import ImportState + +if TYPE_CHECKING: + from collections.abc import Sequence + + from beets.util import PathBytes + + from .tasks import ImportTask + + +QUEUE_SIZE = 128 + +# Global logger. +log = logging.getLogger("beets") + + +class ImportAbortError(Exception): + """Raised when the user aborts the tagging operation.""" + + pass + + +class ImportSession: + """Controls an import action. Subclasses should implement methods to + communicate with the user or otherwise make decisions. + """ + + logger: logging.Logger + paths: list[PathBytes] + lib: library.Library + + _is_resuming: dict[bytes, bool] + _merged_items: set[PathBytes] + _merged_dirs: set[PathBytes] + + def __init__( + self, + lib: library.Library, + loghandler: logging.Handler | None, + paths: Sequence[PathBytes] | None, + query: dbcore.Query | None, + ): + """Create a session. + + Parameters + ---------- + lib : library.Library + The library instance to which items will be imported. + loghandler : logging.Handler or None + A logging handler to use for the session's logger. If None, a + NullHandler will be used. + paths : os.PathLike or None + The paths to be imported. + query : dbcore.Query or None + A query to filter items for import. + """ + self.lib = lib + self.logger = self._setup_logging(loghandler) + self.query = query + self._is_resuming = {} + self._merged_items = set() + self._merged_dirs = set() + + # Normalize the paths. + self.paths = list(map(normpath, paths or [])) + + def _setup_logging(self, loghandler: logging.Handler | None): + logger = logging.getLogger(__name__) + logger.propagate = False + if not loghandler: + loghandler = logging.NullHandler() + logger.handlers = [loghandler] + return logger + + def set_config(self, config): + """Set `config` property from global import config and make + implied changes. + """ + # FIXME: Maybe this function should not exist and should instead + # provide "decision wrappers" like "should_resume()", etc. + iconfig = dict(config) + self.config = iconfig + + # Incremental and progress are mutually exclusive. + if iconfig["incremental"]: + iconfig["resume"] = False + + # When based on a query instead of directories, never + # save progress or try to resume. + if self.query is not None: + iconfig["resume"] = False + iconfig["incremental"] = False + + if iconfig["reflink"]: + iconfig["reflink"] = iconfig["reflink"].as_choice( + ["auto", True, False] + ) + + # Copy, move, reflink, link, and hardlink are mutually exclusive. + if iconfig["move"]: + iconfig["copy"] = False + iconfig["link"] = False + iconfig["hardlink"] = False + iconfig["reflink"] = False + elif iconfig["link"]: + iconfig["copy"] = False + iconfig["move"] = False + iconfig["hardlink"] = False + iconfig["reflink"] = False + elif iconfig["hardlink"]: + iconfig["copy"] = False + iconfig["move"] = False + iconfig["link"] = False + iconfig["reflink"] = False + elif iconfig["reflink"]: + iconfig["copy"] = False + iconfig["move"] = False + iconfig["link"] = False + iconfig["hardlink"] = False + + # Only delete when copying. + if not iconfig["copy"]: + iconfig["delete"] = False + + self.want_resume = config["resume"].as_choice([True, False, "ask"]) + + def tag_log(self, status, paths: Sequence[PathBytes]): + """Log a message about a given album to the importer log. The status + should reflect the reason the album couldn't be tagged. + """ + self.logger.info("{} {}", status, displayable_path(paths)) + + def log_choice(self, task: ImportTask, duplicate=False): + """Logs the task's current choice if it should be logged. If + ``duplicate``, then this is a secondary choice after a duplicate was + detected and a decision was made. + """ + paths = task.paths + if duplicate: + # Duplicate: log all three choices (skip, keep both, and trump). + if task.should_remove_duplicates: + self.tag_log("duplicate-replace", paths) + elif task.choice_flag in (Action.ASIS, Action.APPLY): + self.tag_log("duplicate-keep", paths) + elif task.choice_flag is Action.SKIP: + self.tag_log("duplicate-skip", paths) + else: + # Non-duplicate: log "skip" and "asis" choices. + if task.choice_flag is Action.ASIS: + self.tag_log("asis", paths) + elif task.choice_flag is Action.SKIP: + self.tag_log("skip", paths) + + def should_resume(self, path: PathBytes): + raise NotImplementedError + + def choose_match(self, task: ImportTask): + raise NotImplementedError + + def resolve_duplicate(self, task: ImportTask, found_duplicates): + raise NotImplementedError + + def choose_item(self, task: ImportTask): + raise NotImplementedError + + def run(self): + """Run the import task.""" + self.logger.info("import started {}", time.asctime()) + self.set_config(config["import"]) + + # Set up the pipeline. + if self.query is None: + stages = [stagefuncs.read_tasks(self)] + else: + stages = [stagefuncs.query_tasks(self)] + + # In pretend mode, just log what would otherwise be imported. + if self.config["pretend"]: + stages += [stagefuncs.log_files(self)] + else: + if self.config["group_albums"] and not self.config["singletons"]: + # Split directory tasks into one task for each album. + stages += [stagefuncs.group_albums(self)] + + # These stages either talk to the user to get a decision or, + # in the case of a non-autotagged import, just choose to + # import everything as-is. In *both* cases, these stages + # also add the music to the library database, so later + # stages need to read and write data from there. + if self.config["autotag"]: + stages += [ + stagefuncs.lookup_candidates(self), + stagefuncs.user_query(self), + ] + else: + stages += [stagefuncs.import_asis(self)] + + # Plugin stages. + for stage_func in plugins.early_import_stages(): + stages.append(stagefuncs.plugin_stage(self, stage_func)) + for stage_func in plugins.import_stages(): + stages.append(stagefuncs.plugin_stage(self, stage_func)) + + stages += [stagefuncs.manipulate_files(self)] + + pl = pipeline.Pipeline(stages) + + # Run the pipeline. + plugins.send("import_begin", session=self) + try: + if config["threaded"]: + pl.run_parallel(QUEUE_SIZE) + else: + pl.run_sequential() + except ImportAbortError: + # User aborted operation. Silently stop. + pass + + # Incremental and resumed imports + + def already_imported(self, toppath: PathBytes, paths: Sequence[PathBytes]): + """Returns true if the files belonging to this task have already + been imported in a previous session. + """ + if self.is_resuming(toppath) and all( + [ImportState().progress_has_element(toppath, p) for p in paths] + ): + return True + if self.config["incremental"] and tuple(paths) in self.history_dirs: + return True + + return False + + _history_dirs = None + + @property + def history_dirs(self) -> set[tuple[PathBytes, ...]]: + # FIXME: This could be simplified to a cached property + if self._history_dirs is None: + self._history_dirs = ImportState().taghistory + return self._history_dirs + + def already_merged(self, paths: Sequence[PathBytes]): + """Returns true if all the paths being imported were part of a merge + during previous tasks. + """ + for path in paths: + if path not in self._merged_items and path not in self._merged_dirs: + return False + return True + + def mark_merged(self, paths: Sequence[PathBytes]): + """Mark paths and directories as merged for future reimport tasks.""" + self._merged_items.update(paths) + dirs = { + os.path.dirname(path) if os.path.isfile(syspath(path)) else path + for path in paths + } + self._merged_dirs.update(dirs) + + def is_resuming(self, toppath: PathBytes): + """Return `True` if user wants to resume import of this path. + + You have to call `ask_resume` first to determine the return value. + """ + return self._is_resuming.get(toppath, False) + + def ask_resume(self, toppath: PathBytes): + """If import of `toppath` was aborted in an earlier session, ask + user if they want to resume the import. + + Determines the return value of `is_resuming(toppath)`. + """ + if self.want_resume and ImportState().progress_has(toppath): + # Either accept immediately or prompt for input to decide. + if self.want_resume is True or self.should_resume(toppath): + log.warning( + "Resuming interrupted import of {}", + util.displayable_path(toppath), + ) + self._is_resuming[toppath] = True + else: + # Clear progress; we're starting from the top. + ImportState().progress_reset(toppath) diff --git a/beets/importer/stages.py b/beets/importer/stages.py new file mode 100644 index 000000000..5474053d0 --- /dev/null +++ b/beets/importer/stages.py @@ -0,0 +1,392 @@ +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import annotations + +import itertools +import logging +from typing import TYPE_CHECKING + +from beets import config, plugins +from beets.util import MoveOperation, displayable_path, pipeline + +from .tasks import ( + Action, + ImportTask, + ImportTaskFactory, + SentinelImportTask, + SingletonImportTask, +) + +if TYPE_CHECKING: + from collections.abc import Callable + + from beets import library + + from .session import ImportSession + +# Global logger. +log = logging.getLogger("beets") + +# ---------------------------- Producer functions ---------------------------- # +# Functions that are called first i.e. they generate import tasks + + +def read_tasks(session: ImportSession): + """A generator yielding all the albums (as ImportTask objects) found + in the user-specified list of paths. In the case of a singleton + import, yields single-item tasks instead. + """ + skipped = 0 + + for toppath in session.paths: + # Check whether we need to resume the import. + session.ask_resume(toppath) + + # Generate tasks. + task_factory = ImportTaskFactory(toppath, session) + yield from task_factory.tasks() + skipped += task_factory.skipped + + if not task_factory.imported: + log.warning("No files imported from {}", displayable_path(toppath)) + + # Show skipped directories (due to incremental/resume). + if skipped: + log.info("Skipped {} paths.", skipped) + + +def query_tasks(session: ImportSession): + """A generator that works as a drop-in-replacement for read_tasks. + Instead of finding files from the filesystem, a query is used to + match items from the library. + """ + task: ImportTask + if session.config["singletons"]: + # Search for items. + for item in session.lib.items(session.query): + task = SingletonImportTask(None, item) + for task in task.handle_created(session): + yield task + + else: + # Search for albums. + for album in session.lib.albums(session.query): + log.debug( + "yielding album {0.id}: {0.albumartist} - {0.album}", album + ) + items = list(album.items()) + _freshen_items(items) + + task = ImportTask(None, [album.item_dir()], items) + for task in task.handle_created(session): + yield task + + +# ---------------------------------- Stages ---------------------------------- # +# Functions that process import tasks, may transform or filter them +# They are chained together in the pipeline e.g. stage2(stage1(task)) -> task + + +def group_albums(session: ImportSession): + """A pipeline stage that groups the items of each task into albums + using their metadata. + + Groups are identified using their artist and album fields. The + pipeline stage emits new album tasks for each discovered group. + """ + + def group(item): + return (item.albumartist or item.artist, item.album) + + task = None + while True: + task = yield task + if task.skip: + continue + tasks = [] + sorted_items: list[library.Item] = sorted(task.items, key=group) + for _, items in itertools.groupby(sorted_items, group): + l_items = list(items) + task = ImportTask(task.toppath, [i.path for i in l_items], l_items) + tasks += task.handle_created(session) + tasks.append(SentinelImportTask(task.toppath, task.paths)) + + task = pipeline.multiple(tasks) + + +@pipeline.mutator_stage +def lookup_candidates(session: ImportSession, task: ImportTask): + """A coroutine for performing the initial MusicBrainz lookup for an + album. It accepts lists of Items and yields + (items, cur_artist, cur_album, candidates, rec) tuples. If no match + is found, all of the yielded parameters (except items) are None. + """ + if task.skip: + # FIXME This gets duplicated a lot. We need a better + # abstraction. + return + + plugins.send("import_task_start", session=session, task=task) + log.debug("Looking up: {}", displayable_path(task.paths)) + + # Restrict the initial lookup to IDs specified by the user via the -m + # option. Currently all the IDs are passed onto the tasks directly. + task.lookup_candidates(session.config["search_ids"].as_str_seq()) + + +@pipeline.stage +def user_query(session: ImportSession, task: ImportTask): + """A coroutine for interfacing with the user about the tagging + process. + + The coroutine accepts an ImportTask objects. It uses the + session's `choose_match` method to determine the `action` for + this task. Depending on the action additional stages are executed + and the processed task is yielded. + + It emits the ``import_task_choice`` event for plugins. Plugins have + access to the choice via the ``task.choice_flag`` property and may + choose to change it. + """ + if task.skip: + return task + + if session.already_merged(task.paths): + return pipeline.BUBBLE + + # Ask the user for a choice. + task.choose_match(session) + plugins.send("import_task_choice", session=session, task=task) + + # As-tracks: transition to singleton workflow. + if task.choice_flag is Action.TRACKS: + # Set up a little pipeline for dealing with the singletons. + def emitter(task): + for item in task.items: + task = SingletonImportTask(task.toppath, item) + yield from task.handle_created(session) + yield SentinelImportTask(task.toppath, task.paths) + + return _extend_pipeline( + emitter(task), lookup_candidates(session), user_query(session) + ) + + # As albums: group items by albums and create task for each album + if task.choice_flag is Action.ALBUMS: + return _extend_pipeline( + [task], + group_albums(session), + lookup_candidates(session), + user_query(session), + ) + + _resolve_duplicates(session, task) + + if task.should_merge_duplicates: + # Create a new task for tagging the current items + # and duplicates together + duplicate_items = task.duplicate_items(session.lib) + + # Duplicates would be reimported so make them look "fresh" + _freshen_items(duplicate_items) + duplicate_paths = [item.path for item in duplicate_items] + + # Record merged paths in the session so they are not reimported + session.mark_merged(duplicate_paths) + + merged_task = ImportTask( + None, task.paths + duplicate_paths, task.items + duplicate_items + ) + + return _extend_pipeline( + [merged_task], lookup_candidates(session), user_query(session) + ) + + _apply_choice(session, task) + return task + + +@pipeline.mutator_stage +def import_asis(session: ImportSession, task: ImportTask): + """Select the `action.ASIS` choice for all tasks. + + This stage replaces the initial_lookup and user_query stages + when the importer is run without autotagging. + """ + if task.skip: + return + + log.info("{}", displayable_path(task.paths)) + task.set_choice(Action.ASIS) + _apply_choice(session, task) + + +@pipeline.mutator_stage +def plugin_stage( + session: ImportSession, + func: Callable[[ImportSession, ImportTask], None], + task: ImportTask, +): + """A coroutine (pipeline stage) that calls the given function with + each non-skipped import task. These stages occur between applying + metadata changes and moving/copying/writing files. + """ + if task.skip: + return + + func(session, task) + + # Stage may modify DB, so re-load cached item data. + # FIXME Importer plugins should not modify the database but instead + # the albums and items attached to tasks. + task.reload() + + +@pipeline.stage +def log_files(session: ImportSession, task: ImportTask): + """A coroutine (pipeline stage) to log each file to be imported.""" + if isinstance(task, SingletonImportTask): + log.info("Singleton: {}", displayable_path(task.item["path"])) + elif task.items: + log.info("Album: {}", displayable_path(task.paths[0])) + for item in task.items: + log.info(" {}", displayable_path(item["path"])) + + +# --------------------------------- Consumer --------------------------------- # +# Anything that should be placed last in the pipeline +# In theory every stage could be a consumer, but in practice there are some +# functions which are typically placed last in the pipeline + + +@pipeline.stage +def manipulate_files(session: ImportSession, task: ImportTask): + """A coroutine (pipeline stage) that performs necessary file + manipulations *after* items have been added to the library and + finalizes each task. + """ + if not task.skip: + if task.should_remove_duplicates: + task.remove_duplicates(session.lib) + + if session.config["move"]: + operation = MoveOperation.MOVE + elif session.config["copy"]: + operation = MoveOperation.COPY + elif session.config["link"]: + operation = MoveOperation.LINK + elif session.config["hardlink"]: + operation = MoveOperation.HARDLINK + elif session.config["reflink"] == "auto": + operation = MoveOperation.REFLINK_AUTO + elif session.config["reflink"]: + operation = MoveOperation.REFLINK + else: + operation = None + + task.manipulate_files( + session=session, + operation=operation, + write=session.config["write"], + ) + + # Progress, cleanup, and event. + task.finalize(session) + + +# ---------------------------- Utility functions ----------------------------- # +# Private functions only used in the stages above + + +def _apply_choice(session: ImportSession, task: ImportTask): + """Apply the task's choice to the Album or Item it contains and add + it to the library. + """ + if task.skip: + return + + # Change metadata. + if task.apply: + task.apply_metadata() + plugins.send("import_task_apply", session=session, task=task) + + task.add(session.lib) + + # If ``set_fields`` is set, set those fields to the + # configured values. + # NOTE: This cannot be done before the ``task.add()`` call above, + # because then the ``ImportTask`` won't have an `album` for which + # it can set the fields. + if config["import"]["set_fields"]: + task.set_fields(session.lib) + + +def _resolve_duplicates(session: ImportSession, task: ImportTask): + """Check if a task conflicts with items or albums already imported + and ask the session to resolve this. + """ + if task.choice_flag in (Action.ASIS, Action.APPLY, Action.RETAG): + found_duplicates = task.find_duplicates(session.lib) + if found_duplicates: + log.debug("found duplicates: {}", [o.id for o in found_duplicates]) + + # Get the default action to follow from config. + duplicate_action = config["import"]["duplicate_action"].as_choice( + { + "skip": "s", + "keep": "k", + "remove": "r", + "merge": "m", + "ask": "a", + } + ) + log.debug("default action for duplicates: {}", duplicate_action) + + if duplicate_action == "s": + # Skip new. + task.set_choice(Action.SKIP) + elif duplicate_action == "k": + # Keep both. Do nothing; leave the choice intact. + pass + elif duplicate_action == "r": + # Remove old. + task.should_remove_duplicates = True + elif duplicate_action == "m": + # Merge duplicates together + task.should_merge_duplicates = True + else: + # No default action set; ask the session. + session.resolve_duplicate(task, found_duplicates) + + session.log_choice(task, True) + + +def _freshen_items(items): + # Clear IDs from re-tagged items so they appear "fresh" when + # we add them back to the library. + for item in items: + item.id = None + item.album_id = None + + +def _extend_pipeline(tasks, *stages): + # Return pipeline extension for stages with list of tasks + if isinstance(tasks, list): + task_iter = iter(tasks) + else: + task_iter = tasks + + ipl = pipeline.Pipeline([task_iter] + list(stages)) + return pipeline.multiple(ipl.pull()) diff --git a/beets/importer/state.py b/beets/importer/state.py new file mode 100644 index 000000000..fde26c606 --- /dev/null +++ b/beets/importer/state.py @@ -0,0 +1,142 @@ +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from __future__ import annotations + +import logging +import os +import pickle +from bisect import bisect_left, insort +from dataclasses import dataclass +from typing import TYPE_CHECKING + +from beets import config + +if TYPE_CHECKING: + from beets.util import PathBytes + + +# Global logger. +log = logging.getLogger("beets") + + +@dataclass +class ImportState: + """Representing the progress of an import task. + + Opens the state file on creation of the class. If you want + to ensure the state is written to disk, you should use the + context manager protocol. + + Tagprogress allows long tagging tasks to be resumed when they pause. + + Taghistory is a utility for manipulating the "incremental" import log. + This keeps track of all directories that were ever imported, which + allows the importer to only import new stuff. + + Usage + ----- + ``` + # Readonly + progress = ImportState().tagprogress + + # Read and write + with ImportState() as state: + state["key"] = "value" + ``` + """ + + tagprogress: dict[PathBytes, list[PathBytes]] + taghistory: set[tuple[PathBytes, ...]] + path: PathBytes + + def __init__(self, readonly=False, path: PathBytes | None = None): + self.path = path or os.fsencode(config["statefile"].as_filename()) + self.tagprogress = {} + self.taghistory = set() + self._open() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self._save() + + def _open( + self, + ): + try: + with open(self.path, "rb") as f: + state = pickle.load(f) + # Read the states + self.tagprogress = state.get("tagprogress", {}) + self.taghistory = state.get("taghistory", set()) + except Exception as exc: + # The `pickle` module can emit all sorts of exceptions during + # unpickling, including ImportError. We use a catch-all + # exception to avoid enumerating them all (the docs don't even have a + # full list!). + log.debug("state file could not be read: {}", exc) + + def _save(self): + try: + with open(self.path, "wb") as f: + pickle.dump( + { + "tagprogress": self.tagprogress, + "taghistory": self.taghistory, + }, + f, + ) + except OSError as exc: + log.error("state file could not be written: {}", exc) + + # -------------------------------- Tagprogress ------------------------------- # + + def progress_add(self, toppath: PathBytes, *paths: PathBytes): + """Record that the files under all of the `paths` have been imported + under `toppath`. + """ + with self as state: + imported = state.tagprogress.setdefault(toppath, []) + for path in paths: + if imported and imported[-1] <= path: + imported.append(path) + else: + insort(imported, path) + + def progress_has_element(self, toppath: PathBytes, path: PathBytes) -> bool: + """Return whether `path` has been imported in `toppath`.""" + imported = self.tagprogress.get(toppath, []) + i = bisect_left(imported, path) + return i != len(imported) and imported[i] == path + + def progress_has(self, toppath: PathBytes) -> bool: + """Return `True` if there exist paths that have already been + imported under `toppath`. + """ + return toppath in self.tagprogress + + def progress_reset(self, toppath: PathBytes | None): + """Reset the progress for `toppath`.""" + with self as state: + if toppath in state.tagprogress: + del state.tagprogress[toppath] + + # -------------------------------- Taghistory -------------------------------- # + + def history_add(self, paths: list[PathBytes]): + """Add the paths to the history.""" + with self as state: + state.taghistory.add(tuple(paths)) diff --git a/beets/importer.py b/beets/importer/tasks.py similarity index 53% rename from beets/importer.py rename to beets/importer/tasks.py index 2bdb16669..3a9c044b2 100644 --- a/beets/importer.py +++ b/beets/importer/tasks.py @@ -12,44 +12,35 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. -"""Provides the basic, interface-agnostic workflow for importing and -autotagging music files. -""" - from __future__ import annotations -import itertools +import logging import os -import pickle import re import shutil import time -from bisect import bisect_left, insort from collections import defaultdict -from dataclasses import dataclass +from collections.abc import Callable, Iterable, Sequence from enum import Enum from tempfile import mkdtemp -from typing import Callable, Iterable, Sequence +from typing import TYPE_CHECKING, Any import mediafile -from beets import autotag, config, dbcore, library, logging, plugins, util -from beets.util import ( - MoveOperation, - ancestry, - displayable_path, - normpath, - pipeline, - sorted_walk, - syspath, -) +from beets import autotag, config, library, plugins, util +from beets.dbcore.query import PathQuery + +from .state import ImportState + +if TYPE_CHECKING: + from beets.autotag.match import Recommendation + + from .session import ImportSession + +# Global logger. +log = logging.getLogger("beets") -action = Enum("action", ["SKIP", "ASIS", "TRACKS", "APPLY", "ALBUMS", "RETAG"]) -# The RETAG action represents "don't apply any match, but do record -# new metadata". It's not reachable via the standard command prompt but -# can be used by plugins. -QUEUE_SIZE = 128 SINGLE_ARTIST_THRESH = 0.25 # Usually flexible attributes are preserved (i.e., not updated) during @@ -61,23 +52,20 @@ SINGLE_ARTIST_THRESH = 0.25 # def extend_reimport_fresh_fields_item(): # importer.REIMPORT_FRESH_FIELDS_ITEM.extend(['tidal_track_popularity'] # ) -REIMPORT_FRESH_FIELDS_ALBUM = [ +REIMPORT_FRESH_FIELDS_ITEM = [ "data_source", "bandcamp_album_id", "spotify_album_id", "deezer_album_id", "beatport_album_id", "tidal_album_id", + "data_url", ] -REIMPORT_FRESH_FIELDS_ITEM = list(REIMPORT_FRESH_FIELDS_ALBUM) +REIMPORT_FRESH_FIELDS_ALBUM = [*REIMPORT_FRESH_FIELDS_ITEM, "media"] # Global logger. log = logging.getLogger("beets") -# Here for now to allow for a easy replace later on -# once we can move to a PathLike -PathBytes = bytes - class ImportAbortError(Exception): """Raised when the user aborts the tagging operation.""" @@ -85,379 +73,18 @@ class ImportAbortError(Exception): pass -@dataclass -class ImportState: - """Representing the progress of an import task. +class Action(Enum): + """Enumeration of possible actions for an import task.""" - Opens the state file on creation of the class. If you want - to ensure the state is written to disk, you should use the - context manager protocol. - - Tagprogress allows long tagging tasks to be resumed when they pause. - - Taghistory is a utility for manipulating the "incremental" import log. - This keeps track of all directories that were ever imported, which - allows the importer to only import new stuff. - - Usage - ----- - ``` - # Readonly - progress = ImportState().tagprogress - - # Read and write - with ImportState() as state: - state["key"] = "value" - ``` - """ - - tagprogress: dict[PathBytes, list[PathBytes]] - taghistory: set[tuple[PathBytes, ...]] - path: PathBytes - - def __init__(self, readonly=False, path: PathBytes | None = None): - self.path = path or os.fsencode(config["statefile"].as_filename()) - self.tagprogress = {} - self.taghistory = set() - self._open() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self._save() - - def _open( - self, - ): - try: - with open(self.path, "rb") as f: - state = pickle.load(f) - # Read the states - self.tagprogress = state.get("tagprogress", {}) - self.taghistory = state.get("taghistory", set()) - except Exception as exc: - # The `pickle` module can emit all sorts of exceptions during - # unpickling, including ImportError. We use a catch-all - # exception to avoid enumerating them all (the docs don't even have a - # full list!). - log.debug("state file could not be read: {0}", exc) - - def _save(self): - try: - with open(self.path, "wb") as f: - pickle.dump( - { - "tagprogress": self.tagprogress, - "taghistory": self.taghistory, - }, - f, - ) - except OSError as exc: - log.error("state file could not be written: {0}", exc) - - # -------------------------------- Tagprogress ------------------------------- # - - def progress_add(self, toppath: PathBytes, *paths: PathBytes): - """Record that the files under all of the `paths` have been imported - under `toppath`. - """ - with self as state: - imported = state.tagprogress.setdefault(toppath, []) - for path in paths: - if imported and imported[-1] <= path: - imported.append(path) - else: - insort(imported, path) - - def progress_has_element(self, toppath: PathBytes, path: PathBytes) -> bool: - """Return whether `path` has been imported in `toppath`.""" - imported = self.tagprogress.get(toppath, []) - i = bisect_left(imported, path) - return i != len(imported) and imported[i] == path - - def progress_has(self, toppath: PathBytes) -> bool: - """Return `True` if there exist paths that have already been - imported under `toppath`. - """ - return toppath in self.tagprogress - - def progress_reset(self, toppath: PathBytes | None): - """Reset the progress for `toppath`.""" - with self as state: - if toppath in state.tagprogress: - del state.tagprogress[toppath] - - # -------------------------------- Taghistory -------------------------------- # - - def history_add(self, paths: list[PathBytes]): - """Add the paths to the history.""" - with self as state: - state.taghistory.add(tuple(paths)) - - -class ImportSession: - """Controls an import action. Subclasses should implement methods to - communicate with the user or otherwise make decisions. - """ - - logger: logging.Logger - paths: list[PathBytes] - lib: library.Library - - _is_resuming: dict[bytes, bool] - _merged_items: set[PathBytes] - _merged_dirs: set[PathBytes] - - def __init__( - self, - lib: library.Library, - loghandler: logging.Handler | None, - paths: Sequence[PathBytes] | None, - query: dbcore.Query | None, - ): - """Create a session. - - Parameters - ---------- - lib : library.Library - The library instance to which items will be imported. - loghandler : logging.Handler or None - A logging handler to use for the session's logger. If None, a - NullHandler will be used. - paths : os.PathLike or None - The paths to be imported. - query : dbcore.Query or None - A query to filter items for import. - """ - self.lib = lib - self.logger = self._setup_logging(loghandler) - self.query = query - self._is_resuming = {} - self._merged_items = set() - self._merged_dirs = set() - - # Normalize the paths. - self.paths = list(map(normpath, paths or [])) - - def _setup_logging(self, loghandler: logging.Handler | None): - logger = logging.getLogger(__name__) - logger.propagate = False - if not loghandler: - loghandler = logging.NullHandler() - logger.handlers = [loghandler] - return logger - - def set_config(self, config): - """Set `config` property from global import config and make - implied changes. - """ - # FIXME: Maybe this function should not exist and should instead - # provide "decision wrappers" like "should_resume()", etc. - iconfig = dict(config) - self.config = iconfig - - # Incremental and progress are mutually exclusive. - if iconfig["incremental"]: - iconfig["resume"] = False - - # When based on a query instead of directories, never - # save progress or try to resume. - if self.query is not None: - iconfig["resume"] = False - iconfig["incremental"] = False - - if iconfig["reflink"]: - iconfig["reflink"] = iconfig["reflink"].as_choice( - ["auto", True, False] - ) - - # Copy, move, reflink, link, and hardlink are mutually exclusive. - if iconfig["move"]: - iconfig["copy"] = False - iconfig["link"] = False - iconfig["hardlink"] = False - iconfig["reflink"] = False - elif iconfig["link"]: - iconfig["copy"] = False - iconfig["move"] = False - iconfig["hardlink"] = False - iconfig["reflink"] = False - elif iconfig["hardlink"]: - iconfig["copy"] = False - iconfig["move"] = False - iconfig["link"] = False - iconfig["reflink"] = False - elif iconfig["reflink"]: - iconfig["copy"] = False - iconfig["move"] = False - iconfig["link"] = False - iconfig["hardlink"] = False - - # Only delete when copying. - if not iconfig["copy"]: - iconfig["delete"] = False - - self.want_resume = config["resume"].as_choice([True, False, "ask"]) - - def tag_log(self, status, paths: Sequence[PathBytes]): - """Log a message about a given album to the importer log. The status - should reflect the reason the album couldn't be tagged. - """ - self.logger.info("{0} {1}", status, displayable_path(paths)) - - def log_choice(self, task: ImportTask, duplicate=False): - """Logs the task's current choice if it should be logged. If - ``duplicate``, then this is a secondary choice after a duplicate was - detected and a decision was made. - """ - paths = task.paths - if duplicate: - # Duplicate: log all three choices (skip, keep both, and trump). - if task.should_remove_duplicates: - self.tag_log("duplicate-replace", paths) - elif task.choice_flag in (action.ASIS, action.APPLY): - self.tag_log("duplicate-keep", paths) - elif task.choice_flag is (action.SKIP): - self.tag_log("duplicate-skip", paths) - else: - # Non-duplicate: log "skip" and "asis" choices. - if task.choice_flag is action.ASIS: - self.tag_log("asis", paths) - elif task.choice_flag is action.SKIP: - self.tag_log("skip", paths) - - def should_resume(self, path: PathBytes): - raise NotImplementedError - - def choose_match(self, task: ImportTask): - raise NotImplementedError - - def resolve_duplicate(self, task: ImportTask, found_duplicates): - raise NotImplementedError - - def choose_item(self, task: ImportTask): - raise NotImplementedError - - def run(self): - """Run the import task.""" - self.logger.info("import started {0}", time.asctime()) - self.set_config(config["import"]) - - # Set up the pipeline. - if self.query is None: - stages = [read_tasks(self)] - else: - stages = [query_tasks(self)] - - # In pretend mode, just log what would otherwise be imported. - if self.config["pretend"]: - stages += [log_files(self)] - else: - if self.config["group_albums"] and not self.config["singletons"]: - # Split directory tasks into one task for each album. - stages += [group_albums(self)] - - # These stages either talk to the user to get a decision or, - # in the case of a non-autotagged import, just choose to - # import everything as-is. In *both* cases, these stages - # also add the music to the library database, so later - # stages need to read and write data from there. - if self.config["autotag"]: - stages += [lookup_candidates(self), user_query(self)] - else: - stages += [import_asis(self)] - - # Plugin stages. - for stage_func in plugins.early_import_stages(): - stages.append(plugin_stage(self, stage_func)) - for stage_func in plugins.import_stages(): - stages.append(plugin_stage(self, stage_func)) - - stages += [manipulate_files(self)] - - pl = pipeline.Pipeline(stages) - - # Run the pipeline. - plugins.send("import_begin", session=self) - try: - if config["threaded"]: - pl.run_parallel(QUEUE_SIZE) - else: - pl.run_sequential() - except ImportAbortError: - # User aborted operation. Silently stop. - pass - - # Incremental and resumed imports - - def already_imported(self, toppath: PathBytes, paths: Sequence[PathBytes]): - """Returns true if the files belonging to this task have already - been imported in a previous session. - """ - if self.is_resuming(toppath) and all( - [ImportState().progress_has_element(toppath, p) for p in paths] - ): - return True - if self.config["incremental"] and tuple(paths) in self.history_dirs: - return True - - return False - - _history_dirs = None - - @property - def history_dirs(self) -> set[tuple[PathBytes, ...]]: - # FIXME: This could be simplified to a cached property - if self._history_dirs is None: - self._history_dirs = ImportState().taghistory - return self._history_dirs - - def already_merged(self, paths: Sequence[PathBytes]): - """Returns true if all the paths being imported were part of a merge - during previous tasks. - """ - for path in paths: - if path not in self._merged_items and path not in self._merged_dirs: - return False - return True - - def mark_merged(self, paths: Sequence[PathBytes]): - """Mark paths and directories as merged for future reimport tasks.""" - self._merged_items.update(paths) - dirs = { - os.path.dirname(path) if os.path.isfile(syspath(path)) else path - for path in paths - } - self._merged_dirs.update(dirs) - - def is_resuming(self, toppath: PathBytes): - """Return `True` if user wants to resume import of this path. - - You have to call `ask_resume` first to determine the return value. - """ - return self._is_resuming.get(toppath, False) - - def ask_resume(self, toppath: PathBytes): - """If import of `toppath` was aborted in an earlier session, ask - user if they want to resume the import. - - Determines the return value of `is_resuming(toppath)`. - """ - if self.want_resume and ImportState().progress_has(toppath): - # Either accept immediately or prompt for input to decide. - if self.want_resume is True or self.should_resume(toppath): - log.warning( - "Resuming interrupted import of {0}", - util.displayable_path(toppath), - ) - self._is_resuming[toppath] = True - else: - # Clear progress; we're starting from the top. - ImportState().progress_reset(toppath) - - -# The importer task class. + SKIP = "SKIP" + ASIS = "ASIS" + TRACKS = "TRACKS" + APPLY = "APPLY" + ALBUMS = "ALBUMS" + RETAG = "RETAG" + # The RETAG action represents "don't apply any match, but do record + # new metadata". It's not reachable via the standard command prompt but + # can be used by plugins. class BaseImportTask: @@ -466,14 +93,14 @@ class BaseImportTask: Tasks flow through the importer pipeline. Each stage can update them.""" - toppath: PathBytes | None - paths: list[PathBytes] + toppath: util.PathBytes | None + paths: list[util.PathBytes] items: list[library.Item] def __init__( self, - toppath: PathBytes | None, - paths: Iterable[PathBytes] | None, + toppath: util.PathBytes | None, + paths: Iterable[util.PathBytes] | None, items: Iterable[library.Item] | None, ): """Create a task. The primary fields that define a task are: @@ -529,29 +156,28 @@ class ImportTask(BaseImportTask): system. """ - choice_flag: action | None = None + choice_flag: Action | None = None match: autotag.AlbumMatch | autotag.TrackMatch | None = None # Keep track of the current task item cur_album: str | None = None cur_artist: str | None = None candidates: Sequence[autotag.AlbumMatch | autotag.TrackMatch] = [] + rec: Recommendation | None = None def __init__( self, - toppath: PathBytes | None, - paths: Iterable[PathBytes] | None, + toppath: util.PathBytes | None, + paths: Iterable[util.PathBytes] | None, items: Iterable[library.Item] | None, ): super().__init__(toppath, paths, items) - self.rec = None self.should_remove_duplicates = False self.should_merge_duplicates = False self.is_album = True - self.search_ids = [] # user-supplied candidate IDs. def set_choice( - self, choice: action | autotag.AlbumMatch | autotag.TrackMatch + self, choice: Action | autotag.AlbumMatch | autotag.TrackMatch ): """Given an AlbumMatch or TrackMatch object or an action constant, indicates that an action has been selected for this task. @@ -560,20 +186,20 @@ class ImportTask(BaseImportTask): use isinstance to check for them. """ # Not part of the task structure: - assert choice != action.APPLY # Only used internally. + assert choice != Action.APPLY # Only used internally. if choice in ( - action.SKIP, - action.ASIS, - action.TRACKS, - action.ALBUMS, - action.RETAG, + Action.SKIP, + Action.ASIS, + Action.TRACKS, + Action.ALBUMS, + Action.RETAG, ): # TODO: redesign to stricten the type self.choice_flag = choice # type: ignore[assignment] self.match = None else: - self.choice_flag = action.APPLY # Implicit choice. + self.choice_flag = Action.APPLY # Implicit choice. self.match = choice # type: ignore[assignment] def save_progress(self): @@ -591,11 +217,11 @@ class ImportTask(BaseImportTask): @property def apply(self): - return self.choice_flag == action.APPLY + return self.choice_flag == Action.APPLY @property def skip(self): - return self.choice_flag == action.SKIP + return self.choice_flag == Action.SKIP # Convenient data. @@ -605,10 +231,10 @@ class ImportTask(BaseImportTask): (in which case the data comes from the files' current metadata) or APPLY (in which case the data comes from the choice). """ - if self.choice_flag in (action.ASIS, action.RETAG): - likelies, consensus = autotag.current_metadata(self.items) + if self.choice_flag in (Action.ASIS, Action.RETAG): + likelies, consensus = util.get_most_common_tags(self.items) return likelies - elif self.choice_flag is action.APPLY and self.match: + elif self.choice_flag is Action.APPLY and self.match: return self.match.info.copy() assert False @@ -618,22 +244,22 @@ class ImportTask(BaseImportTask): If the tasks applies an album match the method only returns the matched items. """ - if self.choice_flag in (action.ASIS, action.RETAG): - return list(self.items) - elif self.choice_flag == action.APPLY and isinstance( + if self.choice_flag in (Action.ASIS, Action.RETAG): + return self.items + elif self.choice_flag == Action.APPLY and isinstance( self.match, autotag.AlbumMatch ): - return list(self.match.mapping.keys()) + return self.match.items else: assert False def apply_metadata(self): """Copy metadata from match info to the items.""" if config["import"]["from_scratch"]: - for item in self.match.mapping: + for item in self.match.items: item.clear() - autotag.apply_metadata(self.match.info, self.match.mapping) + autotag.apply_metadata(self.match.info, self.match.item_info_pairs) def duplicate_items(self, lib: library.Library): duplicate_items = [] @@ -643,13 +269,11 @@ class ImportTask(BaseImportTask): def remove_duplicates(self, lib: library.Library): duplicate_items = self.duplicate_items(lib) - log.debug("removing {0} old duplicated items", len(duplicate_items)) + log.debug("removing {} old duplicated items", len(duplicate_items)) for item in duplicate_items: item.remove() if lib.directory in util.ancestry(item.path): - log.debug( - "deleting duplicate {0}", util.displayable_path(item.path) - ) + log.debug("deleting duplicate {.filepath}", item) util.remove(item.path) util.prune_dirs(os.path.dirname(item.path), lib.directory) @@ -661,10 +285,10 @@ class ImportTask(BaseImportTask): for field, view in config["import"]["set_fields"].items(): value = str(view.get()) log.debug( - "Set field {1}={2} for {0}", - displayable_path(self.paths), + "Set field {}={} for {}", field, value, + util.displayable_path(self.paths), ) self.album.set_parse(field, format(self.album, value)) for item in items: @@ -733,20 +357,17 @@ class ImportTask(BaseImportTask): tasks = [t for inner in tasks for t in inner] return tasks - def lookup_candidates(self): - """Retrieve and store candidates for this album. User-specified - candidate IDs are stored in self.search_ids: if present, the - initial lookup is restricted to only those IDs. - """ - artist, album, prop = autotag.tag_album( - self.items, search_ids=self.search_ids - ) - self.cur_artist = artist - self.cur_album = album - self.candidates = prop.candidates - self.rec = prop.recommendation + def lookup_candidates(self, search_ids: list[str]) -> None: + """Retrieve and store candidates for this album. - def find_duplicates(self, lib: library.Library): + If User-specified ``search_ids`` list is not empty, the lookup is + restricted to only those IDs. + """ + self.cur_artist, self.cur_album, (self.candidates, self.rec) = ( + autotag.tag_album(self.items, search_ids=search_ids) + ) + + def find_duplicates(self, lib: library.Library) -> list[library.Album]: """Return a list of albums from `lib` with the same artist and album name as the task. """ @@ -787,7 +408,7 @@ class ImportTask(BaseImportTask): """ changes = {} - if self.choice_flag == action.ASIS: + if self.choice_flag == Action.ASIS: # Taking metadata "as-is". Guess whether this album is VA. plur_albumartist, freq = util.plurality( [i.albumartist or i.artist for i in self.items] @@ -804,7 +425,7 @@ class ImportTask(BaseImportTask): changes["albumartist"] = config["va_name"].as_str() changes["comp"] = True - elif self.choice_flag in (action.APPLY, action.RETAG): + elif self.choice_flag in (Action.APPLY, Action.RETAG): # Applying autotagged metadata. Just get AA from the first # item. if not self.items[0].albumartist: @@ -823,7 +444,7 @@ class ImportTask(BaseImportTask): def manipulate_files( self, session: ImportSession, - operation: MoveOperation | None = None, + operation: util.MoveOperation | None = None, write=False, ): """Copy, move, link, hardlink or reflink (depending on `operation`) @@ -838,7 +459,7 @@ class ImportTask(BaseImportTask): items = self.imported_items() # Save the original paths of all items for deletion and pruning # in the next step (finalization). - self.old_paths: list[PathBytes] = [item.path for item in items] + self.old_paths: list[util.PathBytes] = [item.path for item in items] for item in items: if operation is not None: # In copy and link modes, treat re-imports specially: @@ -846,7 +467,7 @@ class ImportTask(BaseImportTask): # copied/moved as usual). old_path = item.path if ( - operation != MoveOperation.MOVE + operation != util.MoveOperation.MOVE and self.replaced_items[item] and session.lib.directory in util.ancestry(old_path) ): @@ -859,7 +480,7 @@ class ImportTask(BaseImportTask): # old paths. item.move(operation) - if write and (self.apply or self.choice_flag == action.RETAG): + if write and (self.apply or self.choice_flag == Action.RETAG): item.try_write() with session.lib.transaction(): @@ -876,7 +497,7 @@ class ImportTask(BaseImportTask): self.remove_replaced(lib) self.album = lib.add_album(self.imported_items()) - if self.choice_flag == action.APPLY and isinstance( + if self.choice_flag == Action.APPLY and isinstance( self.match, autotag.AlbumMatch ): # Copy album flexible fields to the DB @@ -893,12 +514,12 @@ class ImportTask(BaseImportTask): and `replaced_albums` dictionaries. """ self.replaced_items = defaultdict(list) - self.replaced_albums: dict[PathBytes, library.Album] = defaultdict() + self.replaced_albums: dict[util.PathBytes, library.Album] = ( + defaultdict() + ) replaced_album_ids = set() for item in self.imported_items(): - dup_items = list( - lib.items(dbcore.query.BytesQuery("path", item.path)) - ) + dup_items = list(lib.items(query=PathQuery("path", item.path))) self.replaced_items[item] = dup_items for dup_item in dup_items: if ( @@ -933,12 +554,11 @@ class ImportTask(BaseImportTask): ] if overwritten_fields: log.debug( - "Reimported {} {}. Not preserving flexible attributes {}. " - "Path: {}", + "Reimported {0} {1.id}. Not preserving flexible attributes {2}. " + "Path: {1.filepath}", noun, - new_obj.id, + new_obj, overwritten_fields, - displayable_path(new_obj.path), ) for key in overwritten_fields: del existing_fields[key] @@ -957,17 +577,15 @@ class ImportTask(BaseImportTask): self.album.artpath = replaced_album.artpath self.album.store() log.debug( - "Reimported album {}. Preserving attribute ['added']. " - "Path: {}", - self.album.id, - displayable_path(self.album.path), + "Reimported album {0.album.id}. Preserving attribute ['added']. " + "Path: {0.album.filepath}", + self, ) log.debug( - "Reimported album {}. Preserving flexible attributes {}. " - "Path: {}", - self.album.id, + "Reimported album {0.album.id}. Preserving flexible" + " attributes {1}. Path: {0.album.filepath}", + self, list(album_fields.keys()), - displayable_path(self.album.path), ) for item in self.imported_items(): @@ -976,21 +594,19 @@ class ImportTask(BaseImportTask): if dup_item.added and dup_item.added != item.added: item.added = dup_item.added log.debug( - "Reimported item {}. Preserving attribute ['added']. " - "Path: {}", - item.id, - displayable_path(item.path), + "Reimported item {0.id}. Preserving attribute ['added']. " + "Path: {0.filepath}", + item, ) item_fields = _reduce_and_log( item, dup_item._values_flex, REIMPORT_FRESH_FIELDS_ITEM ) item.update(item_fields) log.debug( - "Reimported item {}. Preserving flexible attributes {}. " - "Path: {}", - item.id, + "Reimported item {0.id}. Preserving flexible attributes {1}. " + "Path: {0.filepath}", + item, list(item_fields.keys()), - displayable_path(item.path), ) item.store() @@ -1000,14 +616,10 @@ class ImportTask(BaseImportTask): """ for item in self.imported_items(): for dup_item in self.replaced_items[item]: - log.debug( - "Replacing item {0}: {1}", - dup_item.id, - displayable_path(item.path), - ) + log.debug("Replacing item {.id}: {.filepath}", dup_item, item) dup_item.remove() log.debug( - "{0} of {1} items replaced", + "{} of {} items replaced", sum(bool(v) for v in self.replaced_items.values()), len(self.imported_items()), ) @@ -1033,7 +645,7 @@ class ImportTask(BaseImportTask): the file still exists, no pruning is performed, so it's safe to call when the file in question may not have been removed. """ - if self.toppath and not os.path.exists(syspath(filename)): + if self.toppath and not os.path.exists(util.syspath(filename)): util.prune_dirs( os.path.dirname(filename), self.toppath, @@ -1044,7 +656,7 @@ class ImportTask(BaseImportTask): class SingletonImportTask(ImportTask): """ImportTask for a single track that is not associated to an album.""" - def __init__(self, toppath: PathBytes | None, item: library.Item): + def __init__(self, toppath: util.PathBytes | None, item: library.Item): super().__init__(toppath, [item.path], [item]) self.item = item self.is_album = False @@ -1056,10 +668,10 @@ class SingletonImportTask(ImportTask): (in which case the data comes from the files' current metadata) or APPLY (in which case the data comes from the choice). """ - assert self.choice_flag in (action.ASIS, action.RETAG, action.APPLY) - if self.choice_flag in (action.ASIS, action.RETAG): + assert self.choice_flag in (Action.ASIS, Action.RETAG, Action.APPLY) + if self.choice_flag in (Action.ASIS, Action.RETAG): return dict(self.item) - elif self.choice_flag is action.APPLY: + elif self.choice_flag is Action.APPLY: return self.match.info.copy() def imported_items(self): @@ -1072,12 +684,12 @@ class SingletonImportTask(ImportTask): for item in self.imported_items(): plugins.send("item_imported", lib=lib, item=item) - def lookup_candidates(self): - prop = autotag.tag_item(self.item, search_ids=self.search_ids) - self.candidates = prop.candidates - self.rec = prop.recommendation + def lookup_candidates(self, search_ids: list[str]) -> None: + self.candidates, self.rec = autotag.tag_item( + self.item, search_ids=search_ids + ) - def find_duplicates(self, lib): + def find_duplicates(self, lib: library.Library) -> list[library.Item]: # type: ignore[override] # Need splitting Singleton and Album tasks into separate classes """Return a list of items from `lib` that have the same artist and title as the task. """ @@ -1126,10 +738,10 @@ class SingletonImportTask(ImportTask): for field, view in config["import"]["set_fields"].items(): value = str(view.get()) log.debug( - "Set field {1}={2} for {0}", - displayable_path(self.paths), + "Set field {}={} for {}", field, value, + util.displayable_path(self.paths), ) self.item.set_parse(field, format(self.item, value)) self.item.store() @@ -1179,6 +791,11 @@ class SentinelImportTask(ImportTask): pass +ArchiveHandler = tuple[ + Callable[[util.StrPath], bool], Callable[[util.StrPath], Any] +] + + class ArchiveImportTask(SentinelImportTask): """An import task that represents the processing of an archive. @@ -1204,13 +821,13 @@ class ArchiveImportTask(SentinelImportTask): if not os.path.isfile(path): return False - for path_test, _ in cls.handlers(): + for path_test, _ in cls.handlers: if path_test(os.fsdecode(path)): return True return False - @classmethod - def handlers(cls): + @util.cached_classproperty + def handlers(cls) -> list[ArchiveHandler]: """Returns a list of archive handlers. Each handler is a `(path_test, ArchiveClass)` tuple. `path_test` @@ -1218,37 +835,36 @@ class ArchiveImportTask(SentinelImportTask): handled by `ArchiveClass`. `ArchiveClass` is a class that implements the same interface as `tarfile.TarFile`. """ - if not hasattr(cls, "_handlers"): - cls._handlers: list[tuple[Callable, ...]] = [] - from zipfile import ZipFile, is_zipfile + _handlers: list[ArchiveHandler] = [] + from zipfile import ZipFile, is_zipfile - cls._handlers.append((is_zipfile, ZipFile)) - import tarfile + _handlers.append((is_zipfile, ZipFile)) + import tarfile - cls._handlers.append((tarfile.is_tarfile, tarfile.open)) - try: - from rarfile import RarFile, is_rarfile - except ImportError: - pass - else: - cls._handlers.append((is_rarfile, RarFile)) - try: - from py7zr import SevenZipFile, is_7zfile - except ImportError: - pass - else: - cls._handlers.append((is_7zfile, SevenZipFile)) + _handlers.append((tarfile.is_tarfile, tarfile.open)) + try: + from rarfile import RarFile, is_rarfile + except ImportError: + pass + else: + _handlers.append((is_rarfile, RarFile)) + try: + from py7zr import SevenZipFile, is_7zfile + except ImportError: + pass + else: + _handlers.append((is_7zfile, SevenZipFile)) - return cls._handlers + return _handlers def cleanup(self, copy=False, delete=False, move=False): """Removes the temporary directory the archive was extracted to.""" if self.extracted and self.toppath: log.debug( - "Removing extracted directory: {0}", - displayable_path(self.toppath), + "Removing extracted directory: {}", + util.displayable_path(self.toppath), ) - shutil.rmtree(syspath(self.toppath)) + shutil.rmtree(util.syspath(self.toppath)) def extract(self): """Extracts the archive to a temporary directory and sets @@ -1256,7 +872,7 @@ class ArchiveImportTask(SentinelImportTask): """ assert self.toppath is not None, "toppath must be set" - for path_test, handler_class in self.handlers(): + for path_test, handler_class in self.handlers: if path_test(os.fsdecode(self.toppath)): break else: @@ -1289,7 +905,7 @@ class ImportTaskFactory: indicated by a path. """ - def __init__(self, toppath: PathBytes, session: ImportSession): + def __init__(self, toppath: util.PathBytes, session: ImportSession): """Create a new task factory. `toppath` is the user-specified path to search for music to @@ -1300,9 +916,9 @@ class ImportTaskFactory: self.session = session self.skipped = 0 # Skipped due to incremental/resume. self.imported = 0 # "Real" tasks created. - self.is_archive = ArchiveImportTask.is_archive(syspath(toppath)) + self.is_archive = ArchiveImportTask.is_archive(util.syspath(toppath)) - def tasks(self): + def tasks(self) -> Iterable[ImportTask]: """Yield all import tasks for music found in the user-specified path `self.toppath`. Any necessary sentinel tasks are also produced. @@ -1362,7 +978,7 @@ class ImportTaskFactory: single track when `toppath` is a file, a single directory in `flat` mode. """ - if not os.path.isdir(syspath(self.toppath)): + if not os.path.isdir(util.syspath(self.toppath)): yield [self.toppath], [self.toppath] elif self.session.config["flat"]: paths = [] @@ -1373,11 +989,12 @@ class ImportTaskFactory: for dirs, paths in albums_in_dir(self.toppath): yield dirs, paths - def singleton(self, path: PathBytes): + def singleton(self, path: util.PathBytes): """Return a `SingletonImportTask` for the music file.""" if self.session.already_imported(self.toppath, [path]): log.debug( - "Skipping previously-imported path: {0}", displayable_path(path) + "Skipping previously-imported path: {}", + util.displayable_path(path), ) self.skipped += 1 return None @@ -1388,7 +1005,7 @@ class ImportTaskFactory: else: return None - def album(self, paths: Iterable[PathBytes], dirs=None): + def album(self, paths: Iterable[util.PathBytes], dirs=None): """Return a `ImportTask` with all media files from paths. `dirs` is a list of parent directories used to record already @@ -1400,7 +1017,8 @@ class ImportTaskFactory: if self.session.already_imported(self.toppath, dirs): log.debug( - "Skipping previously-imported path: {0}", displayable_path(dirs) + "Skipping previously-imported path: {}", + util.displayable_path(dirs), ) self.skipped += 1 return None @@ -1414,7 +1032,7 @@ class ImportTaskFactory: else: return None - def sentinel(self, paths: Iterable[PathBytes] | None = None): + def sentinel(self, paths: Iterable[util.PathBytes] | None = None): """Return a `SentinelImportTask` indicating the end of a top-level directory import. """ @@ -1436,20 +1054,20 @@ class ImportTaskFactory: ) return - log.debug("Extracting archive: {0}", displayable_path(self.toppath)) + log.debug("Extracting archive: {}", util.displayable_path(self.toppath)) archive_task = ArchiveImportTask(self.toppath) try: archive_task.extract() except Exception as exc: - log.error("extraction failed: {0}", exc) + log.error("extraction failed: {}", exc) return # Now read albums from the extracted directory. self.toppath = archive_task.toppath - log.debug("Archive extracted to: {0}", self.toppath) + log.debug("Archive extracted to: {.toppath}", self) return archive_task - def read_item(self, path: PathBytes): + def read_item(self, path: util.PathBytes): """Return an `Item` read from the path. If an item cannot be read, return `None` instead and log an @@ -1462,355 +1080,11 @@ class ImportTaskFactory: # Silently ignore non-music files. pass elif isinstance(exc.reason, mediafile.UnreadableFileError): - log.warning("unreadable file: {0}", displayable_path(path)) + log.warning("unreadable file: {}", util.displayable_path(path)) else: - log.error("error reading {0}: {1}", displayable_path(path), exc) - - -# Pipeline utilities - - -def _freshen_items(items): - # Clear IDs from re-tagged items so they appear "fresh" when - # we add them back to the library. - for item in items: - item.id = None - item.album_id = None - - -def _extend_pipeline(tasks, *stages): - # Return pipeline extension for stages with list of tasks - if isinstance(tasks, list): - task_iter = iter(tasks) - else: - task_iter = tasks - - ipl = pipeline.Pipeline([task_iter] + list(stages)) - return pipeline.multiple(ipl.pull()) - - -# Full-album pipeline stages. - - -def read_tasks(session: ImportSession): - """A generator yielding all the albums (as ImportTask objects) found - in the user-specified list of paths. In the case of a singleton - import, yields single-item tasks instead. - """ - skipped = 0 - - for toppath in session.paths: - # Check whether we need to resume the import. - session.ask_resume(toppath) - - # Generate tasks. - task_factory = ImportTaskFactory(toppath, session) - yield from task_factory.tasks() - skipped += task_factory.skipped - - if not task_factory.imported: - log.warning("No files imported from {0}", displayable_path(toppath)) - - # Show skipped directories (due to incremental/resume). - if skipped: - log.info("Skipped {0} paths.", skipped) - - -def query_tasks(session: ImportSession): - """A generator that works as a drop-in-replacement for read_tasks. - Instead of finding files from the filesystem, a query is used to - match items from the library. - """ - if session.config["singletons"]: - # Search for items. - for item in session.lib.items(session.query): - task = SingletonImportTask(None, item) - for task in task.handle_created(session): - yield task - - else: - # Search for albums. - for album in session.lib.albums(session.query): - log.debug( - "yielding album {0}: {1} - {2}", - album.id, - album.albumartist, - album.album, - ) - items = list(album.items()) - _freshen_items(items) - - task = ImportTask(None, [album.item_dir()], items) - for task in task.handle_created(session): - yield task - - -@pipeline.mutator_stage -def lookup_candidates(session: ImportSession, task: ImportTask): - """A coroutine for performing the initial MusicBrainz lookup for an - album. It accepts lists of Items and yields - (items, cur_artist, cur_album, candidates, rec) tuples. If no match - is found, all of the yielded parameters (except items) are None. - """ - if task.skip: - # FIXME This gets duplicated a lot. We need a better - # abstraction. - return - - plugins.send("import_task_start", session=session, task=task) - log.debug("Looking up: {0}", displayable_path(task.paths)) - - # Restrict the initial lookup to IDs specified by the user via the -m - # option. Currently all the IDs are passed onto the tasks directly. - task.search_ids = session.config["search_ids"].as_str_seq() - - task.lookup_candidates() - - -@pipeline.stage -def user_query(session: ImportSession, task: ImportTask): - """A coroutine for interfacing with the user about the tagging - process. - - The coroutine accepts an ImportTask objects. It uses the - session's `choose_match` method to determine the `action` for - this task. Depending on the action additional stages are executed - and the processed task is yielded. - - It emits the ``import_task_choice`` event for plugins. Plugins have - access to the choice via the ``task.choice_flag`` property and may - choose to change it. - """ - if task.skip: - return task - - if session.already_merged(task.paths): - return pipeline.BUBBLE - - # Ask the user for a choice. - task.choose_match(session) - plugins.send("import_task_choice", session=session, task=task) - - # As-tracks: transition to singleton workflow. - if task.choice_flag is action.TRACKS: - # Set up a little pipeline for dealing with the singletons. - def emitter(task): - for item in task.items: - task = SingletonImportTask(task.toppath, item) - yield from task.handle_created(session) - yield SentinelImportTask(task.toppath, task.paths) - - return _extend_pipeline( - emitter(task), lookup_candidates(session), user_query(session) - ) - - # As albums: group items by albums and create task for each album - if task.choice_flag is action.ALBUMS: - return _extend_pipeline( - [task], - group_albums(session), - lookup_candidates(session), - user_query(session), - ) - - resolve_duplicates(session, task) - - if task.should_merge_duplicates: - # Create a new task for tagging the current items - # and duplicates together - duplicate_items = task.duplicate_items(session.lib) - - # Duplicates would be reimported so make them look "fresh" - _freshen_items(duplicate_items) - duplicate_paths = [item.path for item in duplicate_items] - - # Record merged paths in the session so they are not reimported - session.mark_merged(duplicate_paths) - - merged_task = ImportTask( - None, task.paths + duplicate_paths, task.items + duplicate_items - ) - - return _extend_pipeline( - [merged_task], lookup_candidates(session), user_query(session) - ) - - apply_choice(session, task) - return task - - -def resolve_duplicates(session: ImportSession, task: ImportTask): - """Check if a task conflicts with items or albums already imported - and ask the session to resolve this. - """ - if task.choice_flag in (action.ASIS, action.APPLY, action.RETAG): - found_duplicates = task.find_duplicates(session.lib) - if found_duplicates: - log.debug( - "found duplicates: {}".format([o.id for o in found_duplicates]) - ) - - # Get the default action to follow from config. - duplicate_action = config["import"]["duplicate_action"].as_choice( - { - "skip": "s", - "keep": "k", - "remove": "r", - "merge": "m", - "ask": "a", - } - ) - log.debug("default action for duplicates: {0}", duplicate_action) - - if duplicate_action == "s": - # Skip new. - task.set_choice(action.SKIP) - elif duplicate_action == "k": - # Keep both. Do nothing; leave the choice intact. - pass - elif duplicate_action == "r": - # Remove old. - task.should_remove_duplicates = True - elif duplicate_action == "m": - # Merge duplicates together - task.should_merge_duplicates = True - else: - # No default action set; ask the session. - session.resolve_duplicate(task, found_duplicates) - - session.log_choice(task, True) - - -@pipeline.mutator_stage -def import_asis(session: ImportSession, task: ImportTask): - """Select the `action.ASIS` choice for all tasks. - - This stage replaces the initial_lookup and user_query stages - when the importer is run without autotagging. - """ - if task.skip: - return - - log.info("{}", displayable_path(task.paths)) - task.set_choice(action.ASIS) - apply_choice(session, task) - - -def apply_choice(session: ImportSession, task: ImportTask): - """Apply the task's choice to the Album or Item it contains and add - it to the library. - """ - if task.skip: - return - - # Change metadata. - if task.apply: - task.apply_metadata() - plugins.send("import_task_apply", session=session, task=task) - - task.add(session.lib) - - # If ``set_fields`` is set, set those fields to the - # configured values. - # NOTE: This cannot be done before the ``task.add()`` call above, - # because then the ``ImportTask`` won't have an `album` for which - # it can set the fields. - if config["import"]["set_fields"]: - task.set_fields(session.lib) - - -@pipeline.mutator_stage -def plugin_stage( - session: ImportSession, - func: Callable[[ImportSession, ImportTask], None], - task: ImportTask, -): - """A coroutine (pipeline stage) that calls the given function with - each non-skipped import task. These stages occur between applying - metadata changes and moving/copying/writing files. - """ - if task.skip: - return - - func(session, task) - - # Stage may modify DB, so re-load cached item data. - # FIXME Importer plugins should not modify the database but instead - # the albums and items attached to tasks. - task.reload() - - -@pipeline.stage -def manipulate_files(session: ImportSession, task: ImportTask): - """A coroutine (pipeline stage) that performs necessary file - manipulations *after* items have been added to the library and - finalizes each task. - """ - if not task.skip: - if task.should_remove_duplicates: - task.remove_duplicates(session.lib) - - if session.config["move"]: - operation = MoveOperation.MOVE - elif session.config["copy"]: - operation = MoveOperation.COPY - elif session.config["link"]: - operation = MoveOperation.LINK - elif session.config["hardlink"]: - operation = MoveOperation.HARDLINK - elif session.config["reflink"] == "auto": - operation = MoveOperation.REFLINK_AUTO - elif session.config["reflink"]: - operation = MoveOperation.REFLINK - else: - operation = None - - task.manipulate_files( - session=session, - operation=operation, - write=session.config["write"], - ) - - # Progress, cleanup, and event. - task.finalize(session) - - -@pipeline.stage -def log_files(session: ImportSession, task: ImportTask): - """A coroutine (pipeline stage) to log each file to be imported.""" - if isinstance(task, SingletonImportTask): - log.info("Singleton: {0}", displayable_path(task.item["path"])) - elif task.items: - log.info("Album: {0}", displayable_path(task.paths[0])) - for item in task.items: - log.info(" {0}", displayable_path(item["path"])) - - -def group_albums(session: ImportSession): - """A pipeline stage that groups the items of each task into albums - using their metadata. - - Groups are identified using their artist and album fields. The - pipeline stage emits new album tasks for each discovered group. - """ - - def group(item): - return (item.albumartist or item.artist, item.album) - - task = None - while True: - task = yield task - if task.skip: - continue - tasks = [] - sorted_items: list[library.Item] = sorted(task.items, key=group) - for _, items in itertools.groupby(sorted_items, group): - l_items = list(items) - task = ImportTask(task.toppath, [i.path for i in l_items], l_items) - tasks += task.handle_created(session) - tasks.append(SentinelImportTask(task.toppath, task.paths)) - - task = pipeline.multiple(tasks) + log.error( + "error reading {}: {}", util.displayable_path(path), exc + ) MULTIDISC_MARKERS = (rb"dis[ck]", rb"cd") @@ -1821,21 +1095,24 @@ def is_subdir_of_any_in_list(path, dirs): """Returns True if path os a subdirectory of any directory in dirs (a list). In other case, returns False. """ - ancestors = ancestry(path) + ancestors = util.ancestry(path) return any(d in ancestors for d in dirs) -def albums_in_dir(path: PathBytes): +def albums_in_dir(path: util.PathBytes): """Recursively searches the given directory and returns an iterable of (paths, items) where paths is a list of directories and items is a list of Items that is probably an album. Specifically, any folder containing any media files is an album. """ - collapse_pat = collapse_paths = collapse_items = None + collapse_paths: list[util.PathBytes] = [] + collapse_items: list[util.PathBytes] = [] + collapse_pat = None + ignore: list[str] = config["ignore"].as_str_seq() ignore_hidden: bool = config["ignore_hidden"].get(bool) - for root, dirs, files in sorted_walk( + for root, dirs, files in util.sorted_walk( path, ignore=ignore, ignore_hidden=ignore_hidden, logger=log ): items = [os.path.join(root, f) for f in files] @@ -1856,7 +1133,7 @@ def albums_in_dir(path: PathBytes): # proceed to process the current one. if collapse_items: yield collapse_paths, collapse_items - collapse_pat = collapse_paths = collapse_items = None + collapse_pat, collapse_paths, collapse_items = None, [], [] # Check whether this directory looks like the *first* directory # in a multi-disc sequence. There are two indicators: the file diff --git a/beets/library/__init__.py b/beets/library/__init__.py new file mode 100644 index 000000000..22416ecb5 --- /dev/null +++ b/beets/library/__init__.py @@ -0,0 +1,29 @@ +from beets.util.deprecation import deprecate_imports + +from .exceptions import FileOperationError, ReadError, WriteError +from .library import Library +from .models import Album, Item, LibModel +from .queries import parse_query_parts, parse_query_string + +NEW_MODULE_BY_NAME = dict.fromkeys( + ("DateType", "DurationType", "MusicalKey", "PathType"), "beets.dbcore.types" +) | dict.fromkeys( + ("BLOB_TYPE", "SingletonQuery", "PathQuery"), "beets.dbcore.query" +) + + +def __getattr__(name: str): + return deprecate_imports(__name__, NEW_MODULE_BY_NAME, name) + + +__all__ = [ + "Library", + "LibModel", + "Album", + "Item", + "parse_query_parts", + "parse_query_string", + "FileOperationError", + "ReadError", + "WriteError", +] diff --git a/beets/library/exceptions.py b/beets/library/exceptions.py new file mode 100644 index 000000000..0dc874c2a --- /dev/null +++ b/beets/library/exceptions.py @@ -0,0 +1,38 @@ +from beets import util + + +class FileOperationError(Exception): + """Indicate an error when interacting with a file on disk. + + Possibilities include an unsupported media type, a permissions + error, and an unhandled Mutagen exception. + """ + + def __init__(self, path, reason): + """Create an exception describing an operation on the file at + `path` with the underlying (chained) exception `reason`. + """ + super().__init__(path, reason) + self.path = path + self.reason = reason + + def __str__(self): + """Get a string representing the error. + + Describe both the underlying reason and the file path in question. + """ + return f"{util.displayable_path(self.path)}: {self.reason}" + + +class ReadError(FileOperationError): + """An error while reading a file (i.e. in `Item.read`).""" + + def __str__(self): + return f"error reading {super()}" + + +class WriteError(FileOperationError): + """An error while writing a file (i.e. in `Item.write`).""" + + def __str__(self): + return f"error writing {super()}" diff --git a/beets/library/library.py b/beets/library/library.py new file mode 100644 index 000000000..39d559901 --- /dev/null +++ b/beets/library/library.py @@ -0,0 +1,144 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import platformdirs + +import beets +from beets import dbcore +from beets.util import normpath + +from .models import Album, Item +from .queries import PF_KEY_DEFAULT, parse_query_parts, parse_query_string + +if TYPE_CHECKING: + from beets.dbcore import Results + + +class Library(dbcore.Database): + """A database of music containing songs and albums.""" + + _models = (Item, Album) + + def __init__( + self, + path="library.blb", + directory: str | None = None, + path_formats=((PF_KEY_DEFAULT, "$artist/$album/$track $title"),), + replacements=None, + ): + timeout = beets.config["timeout"].as_number() + super().__init__(path, timeout=timeout) + + self.directory = normpath(directory or platformdirs.user_music_path()) + + self.path_formats = path_formats + self.replacements = replacements + + # Used for template substitution performance. + self._memotable: dict[tuple[str, ...], str] = {} + + # Adding objects to the database. + + def add(self, obj): + """Add the :class:`Item` or :class:`Album` object to the library + database. + + Return the object's new id. + """ + obj.add(self) + self._memotable = {} + return obj.id + + def add_album(self, items): + """Create a new album consisting of a list of items. + + The items are added to the database if they don't yet have an + ID. Return a new :class:`Album` object. The list items must not + be empty. + """ + if not items: + raise ValueError("need at least one item") + + # Create the album structure using metadata from the first item. + values = {key: items[0][key] for key in Album.item_keys} + album = Album(self, **values) + + # Add the album structure and set the items' album_id fields. + # Store or add the items. + with self.transaction(): + album.add(self) + for item in items: + item.album_id = album.id + if item.id is None: + item.add(self) + else: + item.store() + + return album + + # Querying. + + def _fetch(self, model_cls, query, sort=None): + """Parse a query and fetch. + + If an order specification is present in the query string + the `sort` argument is ignored. + """ + # Parse the query, if necessary. + try: + parsed_sort = None + if isinstance(query, str): + query, parsed_sort = parse_query_string(query, model_cls) + elif isinstance(query, (list, tuple)): + query, parsed_sort = parse_query_parts(query, model_cls) + except dbcore.query.InvalidQueryArgumentValueError as exc: + raise dbcore.InvalidQueryError(query, exc) + + # Any non-null sort specified by the parsed query overrides the + # provided sort. + if parsed_sort and not isinstance(parsed_sort, dbcore.query.NullSort): + sort = parsed_sort + + return super()._fetch(model_cls, query, sort) + + @staticmethod + def get_default_album_sort(): + """Get a :class:`Sort` object for albums from the config option.""" + return dbcore.sort_from_strings( + Album, beets.config["sort_album"].as_str_seq() + ) + + @staticmethod + def get_default_item_sort(): + """Get a :class:`Sort` object for items from the config option.""" + return dbcore.sort_from_strings( + Item, beets.config["sort_item"].as_str_seq() + ) + + def albums(self, query=None, sort=None) -> Results[Album]: + """Get :class:`Album` objects matching the query.""" + return self._fetch(Album, query, sort or self.get_default_album_sort()) + + def items(self, query=None, sort=None) -> Results[Item]: + """Get :class:`Item` objects matching the query.""" + return self._fetch(Item, query, sort or self.get_default_item_sort()) + + # Convenience accessors. + def get_item(self, id_: int) -> Item | None: + """Fetch a :class:`Item` by its ID. + + Return `None` if no match is found. + """ + return self._get(Item, id_) + + def get_album(self, item_or_id: Item | int) -> Album | None: + """Given an album ID or an item associated with an album, return + a :class:`Album` object for the album. + + If no such album exists, return `None`. + """ + album_id = ( + item_or_id if isinstance(item_or_id, int) else item_or_id.album_id + ) + return self._get(Album, album_id) if album_id else None diff --git a/beets/library.py b/beets/library/models.py similarity index 73% rename from beets/library.py rename to beets/library/models.py index d4ec63200..9609989bc 100644 --- a/beets/library.py +++ b/beets/library/models.py @@ -1,24 +1,6 @@ -# This file is part of beets. -# Copyright 2016, Adrian Sampson. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. - -"""The core data store and collection logic for beets.""" - from __future__ import annotations import os -import re -import shlex import string import sys import time @@ -27,12 +9,11 @@ from functools import cached_property from pathlib import Path from typing import TYPE_CHECKING -import platformdirs from mediafile import MediaFile, UnreadableFileError import beets from beets import dbcore, logging, plugins, util -from beets.dbcore import Results, types +from beets.dbcore import types from beets.util import ( MoveOperation, bytestring_path, @@ -43,317 +24,44 @@ from beets.util import ( ) from beets.util.functemplate import Template, template -if TYPE_CHECKING: - from .dbcore.query import FieldQuery, FieldQueryType +from .exceptions import FileOperationError, ReadError, WriteError +from .queries import PF_KEY_DEFAULT, parse_query_string -# To use the SQLite "blob" type, it doesn't suffice to provide a byte -# string; SQLite treats that as encoded text. Wrapping it in a -# `memoryview` tells it that we actually mean non-text data. -BLOB_TYPE = memoryview +if TYPE_CHECKING: + from ..dbcore.query import FieldQuery, FieldQueryType + from .library import Library # noqa: F401 log = logging.getLogger("beets") -# Library-specific query types. - - -class SingletonQuery(dbcore.FieldQuery[str]): - """This query is responsible for the 'singleton' lookup. - - It is based on the FieldQuery and constructs a SQL clause - 'album_id is NULL' which yields the same result as the previous filter - in Python but is more performant since it's done in SQL. - - Using util.str2bool ensures that lookups like singleton:true, singleton:1 - and singleton:false, singleton:0 are handled consistently. - """ - - def __new__(cls, field: str, value: str, *args, **kwargs): - query = dbcore.query.NoneQuery("album_id") - if util.str2bool(value): - return query - return dbcore.query.NotQuery(query) - - -class PathQuery(dbcore.FieldQuery[bytes]): - """A query that matches all items under a given path. - - Matching can either be case-insensitive or case-sensitive. By - default, the behavior depends on the OS: case-insensitive on Windows - and case-sensitive otherwise. - """ - - # For tests - force_implicit_query_detection = False - - def __init__(self, field, pattern, fast=True, case_sensitive=None): - """Create a path query. - - `pattern` must be a path, either to a file or a directory. - - `case_sensitive` can be a bool or `None`, indicating that the - behavior should depend on the filesystem. - """ - super().__init__(field, pattern, fast) - - path = util.normpath(pattern) - - # By default, the case sensitivity depends on the filesystem - # that the query path is located on. - if case_sensitive is None: - case_sensitive = util.case_sensitive(path) - self.case_sensitive = case_sensitive - - # Use a normalized-case pattern for case-insensitive matches. - if not case_sensitive: - # We need to lowercase the entire path, not just the pattern. - # In particular, on Windows, the drive letter is otherwise not - # lowercased. - # This also ensures that the `match()` method below and the SQL - # from `col_clause()` do the same thing. - path = path.lower() - - # Match the path as a single file. - self.file_path = path - # As a directory (prefix). - self.dir_path = os.path.join(path, b"") - - @classmethod - def is_path_query(cls, query_part): - """Try to guess whether a unicode query part is a path query. - - Condition: separator precedes colon and the file exists. - """ - colon = query_part.find(":") - if colon != -1: - query_part = query_part[:colon] - - # Test both `sep` and `altsep` (i.e., both slash and backslash on - # Windows). - if not ( - os.sep in query_part or (os.altsep and os.altsep in query_part) - ): - return False - - if cls.force_implicit_query_detection: - return True - return os.path.exists(syspath(normpath(query_part))) - - def match(self, item): - path = item.path if self.case_sensitive else item.path.lower() - return (path == self.file_path) or path.startswith(self.dir_path) - - def col_clause(self): - file_blob = BLOB_TYPE(self.file_path) - dir_blob = BLOB_TYPE(self.dir_path) - - if self.case_sensitive: - query_part = "({0} = ?) || (substr({0}, 1, ?) = ?)" - else: - query_part = "(BYTELOWER({0}) = BYTELOWER(?)) || \ - (substr(BYTELOWER({0}), 1, ?) = BYTELOWER(?))" - - return query_part.format(self.field), ( - file_blob, - len(dir_blob), - dir_blob, - ) - - def __repr__(self) -> str: - return ( - f"{self.__class__.__name__}({self.field!r}, {self.pattern!r}, " - f"fast={self.fast}, case_sensitive={self.case_sensitive})" - ) - - -# Library-specific field types. - - -class DateType(types.Float): - # TODO representation should be `datetime` object - # TODO distinguish between date and time types - query = dbcore.query.DateQuery - - def format(self, value): - return time.strftime( - beets.config["time_format"].as_str(), time.localtime(value or 0) - ) - - def parse(self, string): - try: - # Try a formatted date string. - return time.mktime( - time.strptime(string, beets.config["time_format"].as_str()) - ) - except ValueError: - # Fall back to a plain timestamp number. - try: - return float(string) - except ValueError: - return self.null - - -class PathType(types.Type[bytes, bytes]): - """A dbcore type for filesystem paths. - - These are represented as `bytes` objects, in keeping with - the Unix filesystem abstraction. - """ - - sql = "BLOB" - query = PathQuery - model_type = bytes - - def __init__(self, nullable=False): - """Create a path type object. - - `nullable` controls whether the type may be missing, i.e., None. - """ - self.nullable = nullable - - @property - def null(self): - if self.nullable: - return None - else: - return b"" - - def format(self, value): - return util.displayable_path(value) - - def parse(self, string): - return normpath(bytestring_path(string)) - - def normalize(self, value): - if isinstance(value, str): - # Paths stored internally as encoded bytes. - return bytestring_path(value) - - elif isinstance(value, BLOB_TYPE): - # We unwrap buffers to bytes. - return bytes(value) - - else: - return value - - def from_sql(self, sql_value): - return self.normalize(sql_value) - - def to_sql(self, value): - if isinstance(value, bytes): - value = BLOB_TYPE(value) - return value - - -class MusicalKey(types.String): - """String representing the musical key of a song. - - The standard format is C, Cm, C#, C#m, etc. - """ - - ENHARMONIC = { - r"db": "c#", - r"eb": "d#", - r"gb": "f#", - r"ab": "g#", - r"bb": "a#", - } - - null = None - - def parse(self, key): - key = key.lower() - for flat, sharp in self.ENHARMONIC.items(): - key = re.sub(flat, sharp, key) - key = re.sub(r"[\W\s]+minor", "m", key) - key = re.sub(r"[\W\s]+major", "", key) - return key.capitalize() - - def normalize(self, key): - if key is None: - return None - else: - return self.parse(key) - - -class DurationType(types.Float): - """Human-friendly (M:SS) representation of a time interval.""" - - query = dbcore.query.DurationQuery - - def format(self, value): - if not beets.config["format_raw_length"].get(bool): - return beets.ui.human_seconds_short(value or 0.0) - else: - return value - - def parse(self, string): - try: - # Try to format back hh:ss to seconds. - return util.raw_seconds_short(string) - except ValueError: - # Fall back to a plain float. - try: - return float(string) - except ValueError: - return self.null - - -# Special path format key. -PF_KEY_DEFAULT = "default" - - -# Exceptions. -class FileOperationError(Exception): - """Indicate an error when interacting with a file on disk. - - Possibilities include an unsupported media type, a permissions - error, and an unhandled Mutagen exception. - """ - - def __init__(self, path, reason): - """Create an exception describing an operation on the file at - `path` with the underlying (chained) exception `reason`. - """ - super().__init__(path, reason) - self.path = path - self.reason = reason - - def __str__(self): - """Get a string representing the error. - - Describe both the underlying reason and the file path in question. - """ - return f"{util.displayable_path(self.path)}: {self.reason}" - - -class ReadError(FileOperationError): - """An error while reading a file (i.e. in `Item.read`).""" - - def __str__(self): - return "error reading " + str(super()) - - -class WriteError(FileOperationError): - """An error while writing a file (i.e. in `Item.write`).""" - - def __str__(self): - return "error writing " + str(super()) - - -# Item and Album model classes. - - class LibModel(dbcore.Model["Library"]): """Shared concrete functionality for Items and Albums.""" # Config key that specifies how an instance should be formatted. _format_config_key: str + path: bytes + + @cached_classproperty + def _types(cls) -> dict[str, types.Type]: + """Return the types of the fields in this model.""" + return { + **plugins.types(cls), # type: ignore[arg-type] + "data_source": types.STRING, + } + + @cached_classproperty + def _queries(cls) -> dict[str, FieldQueryType]: + return plugins.named_queries(cls) # type: ignore[arg-type] @cached_classproperty def writable_media_fields(cls) -> set[str]: return set(MediaFile.fields()) & cls._fields.keys() + @property + def filepath(self) -> Path: + """The path to the entity as pathlib.Path.""" + return Path(os.fsdecode(self.path)) + def _template_funcs(self): funcs = DefaultTemplateFunctions(self, self._db).functions() funcs.update(plugins.template_funcs()) @@ -368,8 +76,9 @@ class LibModel(dbcore.Model["Library"]): plugins.send("database_change", lib=self._db, model=self) def add(self, lib=None): + # super().add() calls self.store(), which sends `database_change`, + # so don't do it here super().add(lib) - plugins.send("database_change", lib=self._db, model=self) def __format__(self, spec): if not spec: @@ -508,14 +217,416 @@ class FormattedItemMapping(dbcore.db.FormattedMapping): return len(self.all_keys) +class Album(LibModel): + """Provide access to information about albums stored in a + library. + + Reflects the library's "albums" table, including album art. + """ + + artpath: bytes + + _table = "albums" + _flex_table = "album_attributes" + _always_dirty = True + _fields = { + "id": types.PRIMARY_ID, + "artpath": types.NullPathType(), + "added": types.DATE, + "albumartist": types.STRING, + "albumartist_sort": types.STRING, + "albumartist_credit": types.STRING, + "albumartists": types.MULTI_VALUE_DSV, + "albumartists_sort": types.MULTI_VALUE_DSV, + "albumartists_credit": types.MULTI_VALUE_DSV, + "album": types.STRING, + "genre": types.STRING, + "style": types.STRING, + "discogs_albumid": types.INTEGER, + "discogs_artistid": types.INTEGER, + "discogs_labelid": types.INTEGER, + "year": types.PaddedInt(4), + "month": types.PaddedInt(2), + "day": types.PaddedInt(2), + "disctotal": types.PaddedInt(2), + "comp": types.BOOLEAN, + "mb_albumid": types.STRING, + "mb_albumartistid": types.STRING, + "mb_albumartistids": types.MULTI_VALUE_DSV, + "albumtype": types.STRING, + "albumtypes": types.SEMICOLON_SPACE_DSV, + "label": types.STRING, + "barcode": types.STRING, + "mb_releasegroupid": types.STRING, + "release_group_title": types.STRING, + "asin": types.STRING, + "catalognum": types.STRING, + "script": types.STRING, + "language": types.STRING, + "country": types.STRING, + "albumstatus": types.STRING, + "albumdisambig": types.STRING, + "releasegroupdisambig": types.STRING, + "rg_album_gain": types.NULL_FLOAT, + "rg_album_peak": types.NULL_FLOAT, + "r128_album_gain": types.NULL_FLOAT, + "original_year": types.PaddedInt(4), + "original_month": types.PaddedInt(2), + "original_day": types.PaddedInt(2), + } + + _search_fields = ("album", "albumartist", "genre") + + @cached_classproperty + def _types(cls) -> dict[str, types.Type]: + return {**super()._types, "path": types.PathType()} + + _sorts = { + "albumartist": dbcore.query.SmartArtistSort, + "artist": dbcore.query.SmartArtistSort, + } + + # List of keys that are set on an album's items. + item_keys = [ + "added", + "albumartist", + "albumartists", + "albumartist_sort", + "albumartists_sort", + "albumartist_credit", + "albumartists_credit", + "album", + "genre", + "style", + "discogs_albumid", + "discogs_artistid", + "discogs_labelid", + "year", + "month", + "day", + "disctotal", + "comp", + "mb_albumid", + "mb_albumartistid", + "mb_albumartistids", + "albumtype", + "albumtypes", + "label", + "barcode", + "mb_releasegroupid", + "asin", + "catalognum", + "script", + "language", + "country", + "albumstatus", + "albumdisambig", + "releasegroupdisambig", + "release_group_title", + "rg_album_gain", + "rg_album_peak", + "r128_album_gain", + "original_year", + "original_month", + "original_day", + ] + + _format_config_key = "format_album" + + @cached_classproperty + def _relation(cls) -> type[Item]: + return Item + + @cached_classproperty + def relation_join(cls) -> str: + """Return FROM clause which joins on related album items. + + Use LEFT join to select all albums, including those that do not have + any items. + """ + return ( + f"LEFT JOIN {cls._relation._table} " + f"ON {cls._table}.id = {cls._relation._table}.album_id" + ) + + @property + def art_filepath(self) -> Path | None: + """The path to album's cover picture as pathlib.Path.""" + return Path(os.fsdecode(self.artpath)) if self.artpath else None + + @classmethod + def _getters(cls): + # In addition to plugin-provided computed fields, also expose + # the album's directory as `path`. + getters = plugins.album_field_getters() + getters["path"] = Album.item_dir + getters["albumtotal"] = Album._albumtotal + return getters + + def items(self): + """Return an iterable over the items associated with this + album. + + This method conflicts with :meth:`LibModel.items`, which is + inherited from :meth:`beets.dbcore.Model.items`. + Since :meth:`Album.items` predates these methods, and is + likely to be used by plugins, we keep this interface as-is. + """ + return self._db.items(dbcore.MatchQuery("album_id", self.id)) + + def remove(self, delete=False, with_items=True): + """Remove this album and all its associated items from the + library. + + If delete, then the items' files are also deleted from disk, + along with any album art. The directories containing the album are + also removed (recursively) if empty. + + Set with_items to False to avoid removing the album's items. + """ + super().remove() + + # Send a 'album_removed' signal to plugins + plugins.send("album_removed", album=self) + + # Delete art file. + if delete: + artpath = self.artpath + if artpath: + util.remove(artpath) + + # Remove (and possibly delete) the constituent items. + if with_items: + for item in self.items(): + item.remove(delete, False) + + def move_art(self, operation=MoveOperation.MOVE): + """Move, copy, link or hardlink (depending on `operation`) any + existing album art so that it remains in the same directory as + the items. + + `operation` should be an instance of `util.MoveOperation`. + """ + old_art = self.artpath + if not old_art: + return + + if not os.path.exists(syspath(old_art)): + log.error( + "removing reference to missing album art file {}", + util.displayable_path(old_art), + ) + self.artpath = None + return + + new_art = self.art_destination(old_art) + if new_art == old_art: + return + + new_art = util.unique_path(new_art) + log.debug( + "moving album art {} to {}", + util.displayable_path(old_art), + util.displayable_path(new_art), + ) + if operation == MoveOperation.MOVE: + util.move(old_art, new_art) + util.prune_dirs(os.path.dirname(old_art), self._db.directory) + elif operation == MoveOperation.COPY: + util.copy(old_art, new_art) + elif operation == MoveOperation.LINK: + util.link(old_art, new_art) + elif operation == MoveOperation.HARDLINK: + util.hardlink(old_art, new_art) + elif operation == MoveOperation.REFLINK: + util.reflink(old_art, new_art, fallback=False) + elif operation == MoveOperation.REFLINK_AUTO: + util.reflink(old_art, new_art, fallback=True) + else: + assert False, "unknown MoveOperation" + self.artpath = new_art + + def move(self, operation=MoveOperation.MOVE, basedir=None, store=True): + """Move, copy, link or hardlink (depending on `operation`) + all items to their destination. Any album art moves along with them. + + `basedir` overrides the library base directory for the destination. + + `operation` should be an instance of `util.MoveOperation`. + + By default, the album is stored to the database, persisting any + modifications to its metadata. If `store` is `False` however, + the album is not stored automatically, and it will have to be manually + stored after invoking this method. + """ + basedir = basedir or self._db.directory + + # Ensure new metadata is available to items for destination + # computation. + if store: + self.store() + + # Move items. + items = list(self.items()) + for item in items: + item.move(operation, basedir=basedir, with_album=False, store=store) + + # Move art. + self.move_art(operation) + if store: + self.store() + + def item_dir(self): + """Return the directory containing the album's first item, + provided that such an item exists. + """ + item = self.items().get() + if not item: + raise ValueError(f"empty album for album id {self.id}") + return os.path.dirname(item.path) + + def _albumtotal(self): + """Return the total number of tracks on all discs on the album.""" + if self.disctotal == 1 or not beets.config["per_disc_numbering"]: + return self.items()[0].tracktotal + + counted = [] + total = 0 + + for item in self.items(): + if item.disc in counted: + continue + + total += item.tracktotal + counted.append(item.disc) + + if len(counted) == self.disctotal: + break + + return total + + def art_destination(self, image, item_dir=None): + """Return a path to the destination for the album art image + for the album. + + `image` is the path of the image that will be + moved there (used for its extension). + + The path construction uses the existing path of the album's + items, so the album must contain at least one item or + item_dir must be provided. + """ + image = bytestring_path(image) + item_dir = item_dir or self.item_dir() + + filename_tmpl = template(beets.config["art_filename"].as_str()) + subpath = self.evaluate_template(filename_tmpl, True) + if beets.config["asciify_paths"]: + subpath = util.asciify_path( + subpath, beets.config["path_sep_replace"].as_str() + ) + subpath = util.sanitize_path( + subpath, replacements=self._db.replacements + ) + subpath = bytestring_path(subpath) + + _, ext = os.path.splitext(image) + dest = os.path.join(item_dir, subpath + ext) + + return bytestring_path(dest) + + def set_art(self, path, copy=True): + """Set the album's cover art to the image at the given path. + + The image is copied (or moved) into place, replacing any + existing art. + + Send an 'art_set' event with `self` as the sole argument. + """ + path = bytestring_path(path) + oldart = self.artpath + artdest = self.art_destination(path) + + if oldart and samefile(path, oldart): + # Art already set. + return + elif samefile(path, artdest): + # Art already in place. + self.artpath = path + return + + # Normal operation. + if oldart == artdest: + util.remove(oldart) + artdest = util.unique_path(artdest) + if copy: + util.copy(path, artdest) + else: + util.move(path, artdest) + self.artpath = artdest + + plugins.send("art_set", album=self) + + def store(self, fields=None, inherit=True): + """Update the database with the album information. + + `fields` represents the fields to be stored. If not specified, + all fields will be. + + The album's tracks are also updated when the `inherit` flag is enabled. + This applies to fixed attributes as well as flexible ones. The `id` + attribute of the album will never be inherited. + """ + # Get modified track fields. + track_updates = {} + track_deletes = set() + for key in self._dirty: + if inherit: + if key in self.item_keys: # is a fixed attribute + track_updates[key] = self[key] + elif key not in self: # is a fixed or a flexible attribute + track_deletes.add(key) + elif key != "id": # is a flexible attribute + track_updates[key] = self[key] + + with self._db.transaction(): + super().store(fields) + if track_updates: + for item in self.items(): + for key, value in track_updates.items(): + item[key] = value + item.store() + if track_deletes: + for item in self.items(): + for key in track_deletes: + if key in item: + del item[key] + item.store() + + def try_sync(self, write, move, inherit=True): + """Synchronize the album and its items with the database. + Optionally, also write any new tags into the files and update + their paths. + + `write` indicates whether to write tags to the item files, and + `move` controls whether files (both audio and album art) are + moved. + """ + self.store(inherit=inherit) + for item in self.items(): + item.try_sync(write, move) + + class Item(LibModel): """Represent a song or track.""" + album_id: int | None + _table = "items" _flex_table = "item_attributes" _fields = { "id": types.PRIMARY_ID, - "path": PathType(), + "path": types.PathType(), "album_id": types.FOREIGN_ID, "title": types.STRING, "artist": types.STRING, @@ -594,8 +705,8 @@ class Item(LibModel): "original_year": types.PaddedInt(4), "original_month": types.PaddedInt(2), "original_day": types.PaddedInt(2), - "initial_key": MusicalKey(), - "length": DurationType(), + "initial_key": types.MusicalKey(), + "length": types.DurationType(), "bitrate": types.ScaledInt(1000, "kbps"), "bitrate_mode": types.STRING, "encoder_info": types.STRING, @@ -604,8 +715,8 @@ class Item(LibModel): "samplerate": types.ScaledInt(1000, "kHz"), "bitdepth": types.INTEGER, "channels": types.INTEGER, - "mtime": DateType(), - "added": DateType(), + "mtime": types.DATE, + "added": types.DATE, } _search_fields = ( @@ -617,10 +728,6 @@ class Item(LibModel): "genre", ) - _types = { - "data_source": types.STRING, - } - # Set of item fields that are backed by `MediaFile` fields. # Any kind of field (fixed, flexible, and computed) may be a media # field. Only these fields are read from disk in `read` and written in @@ -639,12 +746,14 @@ class Item(LibModel): _sorts = {"artist": dbcore.query.SmartArtistSort} - _queries = {"singleton": SingletonQuery} + @cached_classproperty + def _queries(cls) -> dict[str, FieldQueryType]: + return {**super()._queries, "singleton": dbcore.query.SingletonQuery} _format_config_key = "format_item" # Cached album object. Read-only. - __album = None + __album: Album | None = None @cached_classproperty def _relation(cls) -> type[Album]: @@ -662,11 +771,6 @@ class Item(LibModel): f"ON {cls._table}.album_id = {cls._relation._table}.id" ) - @property - def filepath(self) -> Path | None: - """The path to the item's file as pathlib.Path.""" - return Path(os.fsdecode(self.path)) if self.path else self.path - @property def _cached_album(self): """The Album object that this item belongs to, if any, or @@ -715,7 +819,7 @@ class Item(LibModel): if key == "path": if isinstance(value, str): value = bytestring_path(value) - elif isinstance(value, BLOB_TYPE): + elif isinstance(value, types.BLOB_TYPE): value = bytes(value) elif key == "album_id": self._cached_album = None @@ -742,12 +846,9 @@ class Item(LibModel): # This must not use `with_album=True`, because that might access # the database. When debugging, that is not guaranteed to succeed, and # can even deadlock due to the database lock. - return "{}({})".format( - type(self).__name__, - ", ".join( - "{}={!r}".format(k, self[k]) - for k in self.keys(with_album=False) - ), + return ( + f"{type(self).__name__}" + f"({', '.join(f'{k}={self[k]!r}' for k in self.keys(with_album=False))})" ) def keys(self, computed=False, with_album=True): @@ -893,7 +994,7 @@ class Item(LibModel): self.write(*args, **kwargs) return True except FileOperationError as exc: - log.error("{0}", exc) + log.error("{}", exc) return False def try_sync(self, write, move, with_album=True): @@ -913,10 +1014,7 @@ class Item(LibModel): if move: # Check whether this file is inside the library directory. if self._db and self._db.directory in util.ancestry(self.path): - log.debug( - "moving {0} to synchronize path", - util.displayable_path(self.path), - ) + log.debug("moving {.filepath} to synchronize path", self) self.move(with_album=with_album) self.store() @@ -988,7 +1086,7 @@ class Item(LibModel): try: return os.path.getsize(syspath(self.path)) except (OSError, Exception) as exc: - log.warning("could not get filesize: {0}", exc) + log.warning("could not get filesize: {}", exc) return 0 # Model methods. @@ -1047,7 +1145,6 @@ class Item(LibModel): If `store` is `False` however, the item won't be stored and it will have to be manually stored after invoking this method. """ - self._check_db() dest = self.destination(basedir=basedir) # Create necessary ancestry for the move. @@ -1075,26 +1172,20 @@ class Item(LibModel): def destination( self, - fragment=False, + relative_to_libdir=False, basedir=None, - platform=None, path_formats=None, - replacements=None, - ): - """Return the path in the library directory designated for the - item (i.e., where the file ought to be). + ) -> bytes: + """Return the path in the library directory designated for the item + (i.e., where the file ought to be). - fragment makes this method return just the path fragment underneath - the root library directory; the path is also returned as Unicode - instead of encoded as a bytestring. basedir can override the library's - base directory for the destination. + The path is returned as a bytestring. ``basedir`` can override the + library's base directory for the destination. If ``relative_to_libdir`` + is true, returns just the fragment of the path underneath the library + base directory. """ - db = self._check_db() - platform = platform or sys.platform - basedir = basedir or db.directory - path_formats = path_formats or db.path_formats - if replacements is None: - replacements = self._db.replacements + basedir = basedir or self.db.directory + path_formats = path_formats or self.db.path_formats # Use a path format based on a query, falling back on the # default. @@ -1122,7 +1213,7 @@ class Item(LibModel): subpath = self.evaluate_template(subpath_tmpl, True) # Prepare path for output: normalize Unicode characters. - if platform == "darwin": + if sys.platform == "darwin": subpath = unicodedata.normalize("NFD", subpath) else: subpath = unicodedata.normalize("NFC", subpath) @@ -1132,19 +1223,10 @@ class Item(LibModel): subpath, beets.config["path_sep_replace"].as_str() ) - maxlen = beets.config["max_filename_length"].get(int) - if not maxlen: - # When zero, try to determine from filesystem. - maxlen = util.max_filename_length(db.directory) - - subpath, fellback = util.legalize_path( - subpath, - replacements, - maxlen, - os.path.splitext(self.path)[1], - fragment, + lib_path_str, fallback = util.legalize_path( + subpath, self.db.replacements, self.filepath.suffix ) - if fellback: + if fallback: # Print an error message if legalization fell back to # default replacements because of the maximum length. log.warning( @@ -1153,589 +1235,12 @@ class Item(LibModel): "the filename.", subpath, ) + lib_path_bytes = util.bytestring_path(lib_path_str) - if fragment: - return util.as_string(subpath) - else: - return normpath(os.path.join(basedir, subpath)) + if relative_to_libdir: + return lib_path_bytes - -class Album(LibModel): - """Provide access to information about albums stored in a - library. - - Reflects the library's "albums" table, including album art. - """ - - _table = "albums" - _flex_table = "album_attributes" - _always_dirty = True - _fields = { - "id": types.PRIMARY_ID, - "artpath": PathType(True), - "added": DateType(), - "albumartist": types.STRING, - "albumartist_sort": types.STRING, - "albumartist_credit": types.STRING, - "albumartists": types.MULTI_VALUE_DSV, - "albumartists_sort": types.MULTI_VALUE_DSV, - "albumartists_credit": types.MULTI_VALUE_DSV, - "album": types.STRING, - "genre": types.STRING, - "style": types.STRING, - "discogs_albumid": types.INTEGER, - "discogs_artistid": types.INTEGER, - "discogs_labelid": types.INTEGER, - "year": types.PaddedInt(4), - "month": types.PaddedInt(2), - "day": types.PaddedInt(2), - "disctotal": types.PaddedInt(2), - "comp": types.BOOLEAN, - "mb_albumid": types.STRING, - "mb_albumartistid": types.STRING, - "albumtype": types.STRING, - "albumtypes": types.SEMICOLON_SPACE_DSV, - "label": types.STRING, - "barcode": types.STRING, - "mb_releasegroupid": types.STRING, - "release_group_title": types.STRING, - "asin": types.STRING, - "catalognum": types.STRING, - "script": types.STRING, - "language": types.STRING, - "country": types.STRING, - "albumstatus": types.STRING, - "albumdisambig": types.STRING, - "releasegroupdisambig": types.STRING, - "rg_album_gain": types.NULL_FLOAT, - "rg_album_peak": types.NULL_FLOAT, - "r128_album_gain": types.NULL_FLOAT, - "original_year": types.PaddedInt(4), - "original_month": types.PaddedInt(2), - "original_day": types.PaddedInt(2), - } - - _search_fields = ("album", "albumartist", "genre") - - _types = { - "path": PathType(), - "data_source": types.STRING, - } - - _sorts = { - "albumartist": dbcore.query.SmartArtistSort, - "artist": dbcore.query.SmartArtistSort, - } - - # List of keys that are set on an album's items. - item_keys = [ - "added", - "albumartist", - "albumartists", - "albumartist_sort", - "albumartists_sort", - "albumartist_credit", - "albumartists_credit", - "album", - "genre", - "style", - "discogs_albumid", - "discogs_artistid", - "discogs_labelid", - "year", - "month", - "day", - "disctotal", - "comp", - "mb_albumid", - "mb_albumartistid", - "albumtype", - "albumtypes", - "label", - "barcode", - "mb_releasegroupid", - "asin", - "catalognum", - "script", - "language", - "country", - "albumstatus", - "albumdisambig", - "releasegroupdisambig", - "release_group_title", - "rg_album_gain", - "rg_album_peak", - "r128_album_gain", - "original_year", - "original_month", - "original_day", - ] - - _format_config_key = "format_album" - - @cached_classproperty - def _relation(cls) -> type[Item]: - return Item - - @cached_classproperty - def relation_join(cls) -> str: - """Return FROM clause which joins on related album items. - - Use LEFT join to select all albums, including those that do not have - any items. - """ - return ( - f"LEFT JOIN {cls._relation._table} " - f"ON {cls._table}.id = {cls._relation._table}.album_id" - ) - - @classmethod - def _getters(cls): - # In addition to plugin-provided computed fields, also expose - # the album's directory as `path`. - getters = plugins.album_field_getters() - getters["path"] = Album.item_dir - getters["albumtotal"] = Album._albumtotal - return getters - - def items(self): - """Return an iterable over the items associated with this - album. - - This method conflicts with :meth:`LibModel.items`, which is - inherited from :meth:`beets.dbcore.Model.items`. - Since :meth:`Album.items` predates these methods, and is - likely to be used by plugins, we keep this interface as-is. - """ - return self._db.items(dbcore.MatchQuery("album_id", self.id)) - - def remove(self, delete=False, with_items=True): - """Remove this album and all its associated items from the - library. - - If delete, then the items' files are also deleted from disk, - along with any album art. The directories containing the album are - also removed (recursively) if empty. - - Set with_items to False to avoid removing the album's items. - """ - super().remove() - - # Send a 'album_removed' signal to plugins - plugins.send("album_removed", album=self) - - # Delete art file. - if delete: - artpath = self.artpath - if artpath: - util.remove(artpath) - - # Remove (and possibly delete) the constituent items. - if with_items: - for item in self.items(): - item.remove(delete, False) - - def move_art(self, operation=MoveOperation.MOVE): - """Move, copy, link or hardlink (depending on `operation`) any - existing album art so that it remains in the same directory as - the items. - - `operation` should be an instance of `util.MoveOperation`. - """ - old_art = self.artpath - if not old_art: - return - - if not os.path.exists(syspath(old_art)): - log.error( - "removing reference to missing album art file {}", - util.displayable_path(old_art), - ) - self.artpath = None - return - - new_art = self.art_destination(old_art) - if new_art == old_art: - return - - new_art = util.unique_path(new_art) - log.debug( - "moving album art {0} to {1}", - util.displayable_path(old_art), - util.displayable_path(new_art), - ) - if operation == MoveOperation.MOVE: - util.move(old_art, new_art) - util.prune_dirs(os.path.dirname(old_art), self._db.directory) - elif operation == MoveOperation.COPY: - util.copy(old_art, new_art) - elif operation == MoveOperation.LINK: - util.link(old_art, new_art) - elif operation == MoveOperation.HARDLINK: - util.hardlink(old_art, new_art) - elif operation == MoveOperation.REFLINK: - util.reflink(old_art, new_art, fallback=False) - elif operation == MoveOperation.REFLINK_AUTO: - util.reflink(old_art, new_art, fallback=True) - else: - assert False, "unknown MoveOperation" - self.artpath = new_art - - def move(self, operation=MoveOperation.MOVE, basedir=None, store=True): - """Move, copy, link or hardlink (depending on `operation`) - all items to their destination. Any album art moves along with them. - - `basedir` overrides the library base directory for the destination. - - `operation` should be an instance of `util.MoveOperation`. - - By default, the album is stored to the database, persisting any - modifications to its metadata. If `store` is `False` however, - the album is not stored automatically, and it will have to be manually - stored after invoking this method. - """ - basedir = basedir or self._db.directory - - # Ensure new metadata is available to items for destination - # computation. - if store: - self.store() - - # Move items. - items = list(self.items()) - for item in items: - item.move(operation, basedir=basedir, with_album=False, store=store) - - # Move art. - self.move_art(operation) - if store: - self.store() - - def item_dir(self): - """Return the directory containing the album's first item, - provided that such an item exists. - """ - item = self.items().get() - if not item: - raise ValueError("empty album for album id %d" % self.id) - return os.path.dirname(item.path) - - def _albumtotal(self): - """Return the total number of tracks on all discs on the album.""" - if self.disctotal == 1 or not beets.config["per_disc_numbering"]: - return self.items()[0].tracktotal - - counted = [] - total = 0 - - for item in self.items(): - if item.disc in counted: - continue - - total += item.tracktotal - counted.append(item.disc) - - if len(counted) == self.disctotal: - break - - return total - - def art_destination(self, image, item_dir=None): - """Return a path to the destination for the album art image - for the album. - - `image` is the path of the image that will be - moved there (used for its extension). - - The path construction uses the existing path of the album's - items, so the album must contain at least one item or - item_dir must be provided. - """ - image = bytestring_path(image) - item_dir = item_dir or self.item_dir() - - filename_tmpl = template(beets.config["art_filename"].as_str()) - subpath = self.evaluate_template(filename_tmpl, True) - if beets.config["asciify_paths"]: - subpath = util.asciify_path( - subpath, beets.config["path_sep_replace"].as_str() - ) - subpath = util.sanitize_path( - subpath, replacements=self._db.replacements - ) - subpath = bytestring_path(subpath) - - _, ext = os.path.splitext(image) - dest = os.path.join(item_dir, subpath + ext) - - return bytestring_path(dest) - - def set_art(self, path, copy=True): - """Set the album's cover art to the image at the given path. - - The image is copied (or moved) into place, replacing any - existing art. - - Send an 'art_set' event with `self` as the sole argument. - """ - path = bytestring_path(path) - oldart = self.artpath - artdest = self.art_destination(path) - - if oldart and samefile(path, oldart): - # Art already set. - return - elif samefile(path, artdest): - # Art already in place. - self.artpath = path - return - - # Normal operation. - if oldart == artdest: - util.remove(oldart) - artdest = util.unique_path(artdest) - if copy: - util.copy(path, artdest) - else: - util.move(path, artdest) - self.artpath = artdest - - plugins.send("art_set", album=self) - - def store(self, fields=None, inherit=True): - """Update the database with the album information. - - `fields` represents the fields to be stored. If not specified, - all fields will be. - - The album's tracks are also updated when the `inherit` flag is enabled. - This applies to fixed attributes as well as flexible ones. The `id` - attribute of the album will never be inherited. - """ - # Get modified track fields. - track_updates = {} - track_deletes = set() - for key in self._dirty: - if inherit: - if key in self.item_keys: # is a fixed attribute - track_updates[key] = self[key] - elif key not in self: # is a fixed or a flexible attribute - track_deletes.add(key) - elif key != "id": # is a flexible attribute - track_updates[key] = self[key] - - with self._db.transaction(): - super().store(fields) - if track_updates: - for item in self.items(): - for key, value in track_updates.items(): - item[key] = value - item.store() - if track_deletes: - for item in self.items(): - for key in track_deletes: - if key in item: - del item[key] - item.store() - - def try_sync(self, write, move, inherit=True): - """Synchronize the album and its items with the database. - Optionally, also write any new tags into the files and update - their paths. - - `write` indicates whether to write tags to the item files, and - `move` controls whether files (both audio and album art) are - moved. - """ - self.store(inherit=inherit) - for item in self.items(): - item.try_sync(write, move) - - -# Query construction helpers. - - -def parse_query_parts(parts, model_cls): - """Given a beets query string as a list of components, return the - `Query` and `Sort` they represent. - - Like `dbcore.parse_sorted_query`, with beets query prefixes and - ensuring that implicit path queries are made explicit with 'path::' - """ - # Get query types and their prefix characters. - prefixes = { - ":": dbcore.query.RegexpQuery, - "=~": dbcore.query.StringQuery, - "=": dbcore.query.MatchQuery, - } - prefixes.update(plugins.queries()) - - # Special-case path-like queries, which are non-field queries - # containing path separators (/). - parts = [f"path:{s}" if PathQuery.is_path_query(s) else s for s in parts] - - case_insensitive = beets.config["sort_case_insensitive"].get(bool) - - query, sort = dbcore.parse_sorted_query( - model_cls, parts, prefixes, case_insensitive - ) - log.debug("Parsed query: {!r}", query) - log.debug("Parsed sort: {!r}", sort) - return query, sort - - -def parse_query_string(s, model_cls): - """Given a beets query string, return the `Query` and `Sort` they - represent. - - The string is split into components using shell-like syntax. - """ - message = f"Query is not unicode: {s!r}" - assert isinstance(s, str), message - try: - parts = shlex.split(s) - except ValueError as exc: - raise dbcore.InvalidQueryError(s, exc) - return parse_query_parts(parts, model_cls) - - -# The Library: interface to the database. - - -class Library(dbcore.Database): - """A database of music containing songs and albums.""" - - _models = (Item, Album) - - def __init__( - self, - path="library.blb", - directory: str | None = None, - path_formats=((PF_KEY_DEFAULT, "$artist/$album/$track $title"),), - replacements=None, - ): - timeout = beets.config["timeout"].as_number() - super().__init__(path, timeout=timeout) - - self.directory = normpath(directory or platformdirs.user_music_path()) - - self.path_formats = path_formats - self.replacements = replacements - - # Used for template substitution performance. - self._memotable: dict[tuple[str, ...], str] = {} - - # Adding objects to the database. - - def add(self, obj): - """Add the :class:`Item` or :class:`Album` object to the library - database. - - Return the object's new id. - """ - obj.add(self) - self._memotable = {} - return obj.id - - def add_album(self, items): - """Create a new album consisting of a list of items. - - The items are added to the database if they don't yet have an - ID. Return a new :class:`Album` object. The list items must not - be empty. - """ - if not items: - raise ValueError("need at least one item") - - # Create the album structure using metadata from the first item. - values = {key: items[0][key] for key in Album.item_keys} - album = Album(self, **values) - - # Add the album structure and set the items' album_id fields. - # Store or add the items. - with self.transaction(): - album.add(self) - for item in items: - item.album_id = album.id - if item.id is None: - item.add(self) - else: - item.store() - - return album - - # Querying. - - def _fetch(self, model_cls, query, sort=None): - """Parse a query and fetch. - - If an order specification is present in the query string - the `sort` argument is ignored. - """ - # Parse the query, if necessary. - try: - parsed_sort = None - if isinstance(query, str): - query, parsed_sort = parse_query_string(query, model_cls) - elif isinstance(query, (list, tuple)): - query, parsed_sort = parse_query_parts(query, model_cls) - except dbcore.query.InvalidQueryArgumentValueError as exc: - raise dbcore.InvalidQueryError(query, exc) - - # Any non-null sort specified by the parsed query overrides the - # provided sort. - if parsed_sort and not isinstance(parsed_sort, dbcore.query.NullSort): - sort = parsed_sort - - return super()._fetch(model_cls, query, sort) - - @staticmethod - def get_default_album_sort(): - """Get a :class:`Sort` object for albums from the config option.""" - return dbcore.sort_from_strings( - Album, beets.config["sort_album"].as_str_seq() - ) - - @staticmethod - def get_default_item_sort(): - """Get a :class:`Sort` object for items from the config option.""" - return dbcore.sort_from_strings( - Item, beets.config["sort_item"].as_str_seq() - ) - - def albums(self, query=None, sort=None) -> Results[Album]: - """Get :class:`Album` objects matching the query.""" - return self._fetch(Album, query, sort or self.get_default_album_sort()) - - def items(self, query=None, sort=None) -> Results[Item]: - """Get :class:`Item` objects matching the query.""" - return self._fetch(Item, query, sort or self.get_default_item_sort()) - - # Convenience accessors. - - def get_item(self, id): - """Fetch a :class:`Item` by its ID. - - Return `None` if no match is found. - """ - return self._get(Item, id) - - def get_album(self, item_or_id): - """Given an album ID or an item associated with an album, return - a :class:`Album` object for the album. - - If no such album exists, return `None`. - """ - if isinstance(item_or_id, int): - album_id = item_or_id - else: - album_id = item_or_id.album_id - if album_id is None: - return None - return self._get(Album, album_id) - - -# Default path template resources. + return normpath(os.path.join(basedir, lib_path_bytes)) def _int_arg(s): diff --git a/beets/library/queries.py b/beets/library/queries.py new file mode 100644 index 000000000..7c9d688cd --- /dev/null +++ b/beets/library/queries.py @@ -0,0 +1,61 @@ +from __future__ import annotations + +import shlex + +import beets +from beets import dbcore, logging, plugins + +log = logging.getLogger("beets") + + +# Special path format key. +PF_KEY_DEFAULT = "default" + +# Query construction helpers. + + +def parse_query_parts(parts, model_cls): + """Given a beets query string as a list of components, return the + `Query` and `Sort` they represent. + + Like `dbcore.parse_sorted_query`, with beets query prefixes and + ensuring that implicit path queries are made explicit with 'path::' + """ + # Get query types and their prefix characters. + prefixes = { + ":": dbcore.query.RegexpQuery, + "=~": dbcore.query.StringQuery, + "=": dbcore.query.MatchQuery, + } + prefixes.update(plugins.queries()) + + # Special-case path-like queries, which are non-field queries + # containing path separators (/). + parts = [ + f"path:{s}" if dbcore.query.PathQuery.is_path_query(s) else s + for s in parts + ] + + case_insensitive = beets.config["sort_case_insensitive"].get(bool) + + query, sort = dbcore.parse_sorted_query( + model_cls, parts, prefixes, case_insensitive + ) + log.debug("Parsed query: {!r}", query) + log.debug("Parsed sort: {!r}", sort) + return query, sort + + +def parse_query_string(s, model_cls): + """Given a beets query string, return the `Query` and `Sort` they + represent. + + The string is split into components using shell-like syntax. + """ + message = f"Query is not unicode: {s!r}" + assert isinstance(s, str), message + try: + parts = shlex.split(s) + except ValueError as exc: + raise dbcore.InvalidQueryError(s, exc) + return parse_query_parts(parts, model_cls) diff --git a/beets/logging.py b/beets/logging.py index fd8b1962f..5a837cd80 100644 --- a/beets/logging.py +++ b/beets/logging.py @@ -20,6 +20,9 @@ use {}-style formatting and can interpolate keywords arguments to the logging calls (`debug`, `info`, etc). """ +from __future__ import annotations + +import re import threading from copy import copy from logging import ( @@ -32,8 +35,10 @@ from logging import ( Handler, Logger, NullHandler, + RootLogger, StreamHandler, ) +from typing import TYPE_CHECKING, Any, TypeVar, Union, overload __all__ = [ "DEBUG", @@ -49,8 +54,31 @@ __all__ = [ "getLogger", ] +if TYPE_CHECKING: + from collections.abc import Mapping -def logsafe(val): + T = TypeVar("T") + from types import TracebackType + + # see https://github.com/python/typeshed/blob/main/stdlib/logging/__init__.pyi + _SysExcInfoType = Union[ + tuple[type[BaseException], BaseException, Union[TracebackType, None]], + tuple[None, None, None], + ] + _ExcInfoType = Union[None, bool, _SysExcInfoType, BaseException] + _ArgsType = Union[tuple[object, ...], Mapping[str, object]] + + +# Regular expression to match: +# - C0 control characters (0x00-0x1F) except useful whitespace (\t, \n, \r) +# - DEL control character (0x7f) +# - C1 control characters (0x80-0x9F) +# Used to sanitize log messages that could disrupt terminal output +_CONTROL_CHAR_REGEX = re.compile(r"[\x00-\x08\x0b\x0c\x0e-\x1f\x7f\x80-\x9f]") +_UNICODE_REPLACEMENT_CHARACTER = "\ufffd" + + +def _logsafe(val: T) -> str | T: """Coerce `bytes` to `str` to avoid crashes solely due to logging. This is particularly relevant for bytestring paths. Much of our code @@ -64,6 +92,10 @@ def logsafe(val): # type, and (b) warn the developer if they do this for other # bytestrings. return val.decode("utf-8", "replace") + if isinstance(val, str): + # Sanitize log messages by replacing control characters that can disrupt + # terminals. + return _CONTROL_CHAR_REGEX.sub(_UNICODE_REPLACEMENT_CHARACTER, val) # Other objects are used as-is so field access, etc., still works in # the format string. Relies on a working __str__ implementation. @@ -83,40 +115,45 @@ class StrFormatLogger(Logger): """ class _LogMessage: - def __init__(self, msg, args, kwargs): + def __init__( + self, + msg: str, + args: _ArgsType, + kwargs: dict[str, Any], + ): self.msg = msg self.args = args self.kwargs = kwargs def __str__(self): - args = [logsafe(a) for a in self.args] - kwargs = {k: logsafe(v) for (k, v) in self.kwargs.items()} + args = [_logsafe(a) for a in self.args] + kwargs = {k: _logsafe(v) for (k, v) in self.kwargs.items()} return self.msg.format(*args, **kwargs) def _log( self, - level, - msg, - args, - exc_info=None, - extra=None, - stack_info=False, + level: int, + msg: object, + args: _ArgsType, + exc_info: _ExcInfoType = None, + extra: Mapping[str, Any] | None = None, + stack_info: bool = False, + stacklevel: int = 1, **kwargs, ): """Log msg.format(*args, **kwargs)""" - m = self._LogMessage(msg, args, kwargs) - stacklevel = kwargs.pop("stacklevel", 1) - stacklevel = {"stacklevel": stacklevel} + if isinstance(msg, str): + msg = self._LogMessage(msg, args, kwargs) return super()._log( level, - m, + msg, (), exc_info=exc_info, extra=extra, stack_info=stack_info, - **stacklevel, + stacklevel=stacklevel, ) @@ -156,9 +193,12 @@ my_manager = copy(Logger.manager) my_manager.loggerClass = BeetsLogger -# Override the `getLogger` to use our machinery. -def getLogger(name=None): # noqa +@overload +def getLogger(name: str) -> BeetsLogger: ... +@overload +def getLogger(name: None = ...) -> RootLogger: ... +def getLogger(name=None) -> BeetsLogger | RootLogger: # noqa: N802 if name: - return my_manager.getLogger(name) + return my_manager.getLogger(name) # type: ignore[return-value] else: return Logger.root diff --git a/beets/mediafile.py b/beets/mediafile.py index 8bde9274c..df735afff 100644 --- a/beets/mediafile.py +++ b/beets/mediafile.py @@ -13,17 +13,11 @@ # included in all copies or substantial portions of the Software. -import warnings - import mediafile -warnings.warn( - "beets.mediafile is deprecated; use mediafile instead", - # Show the location of the `import mediafile` statement as the warning's - # source, rather than this file, such that the offending module can be - # identified easily. - stacklevel=2, -) +from .util.deprecation import deprecate_for_maintainers + +deprecate_for_maintainers("'beets.mediafile'", "'mediafile'", stacklevel=2) # Import everything from the mediafile module into this module. for key, value in mediafile.__dict__.items(): @@ -31,4 +25,4 @@ for key, value in mediafile.__dict__.items(): globals()[key] = value # Cleanup namespace. -del key, value, warnings, mediafile +del key, value, mediafile diff --git a/beets/metadata_plugins.py b/beets/metadata_plugins.py new file mode 100644 index 000000000..f42e8f690 --- /dev/null +++ b/beets/metadata_plugins.py @@ -0,0 +1,359 @@ +"""Metadata source plugin interface. + +This allows beets to lookup metadata from various sources. We define +a common interface for all metadata sources which need to be +implemented as plugins. +""" + +from __future__ import annotations + +import abc +import re +from functools import cache, cached_property +from typing import TYPE_CHECKING, Generic, Literal, TypedDict, TypeVar + +import unidecode +from confuse import NotFoundError +from typing_extensions import NotRequired + +from beets.util import cached_classproperty +from beets.util.id_extractors import extract_release_id + +from .plugins import BeetsPlugin, find_plugins, notify_info_yielded, send + +if TYPE_CHECKING: + from collections.abc import Iterable, Sequence + + from .autotag.hooks import AlbumInfo, Item, TrackInfo + + +@cache +def find_metadata_source_plugins() -> list[MetadataSourcePlugin]: + """Return a list of all loaded metadata source plugins.""" + # TODO: Make this an isinstance(MetadataSourcePlugin, ...) check in v3.0.0 + return [p for p in find_plugins() if hasattr(p, "data_source")] # type: ignore[misc] + + +@notify_info_yielded("albuminfo_received") +def candidates(*args, **kwargs) -> Iterable[AlbumInfo]: + """Return matching album candidates from all metadata source plugins.""" + for plugin in find_metadata_source_plugins(): + yield from plugin.candidates(*args, **kwargs) + + +@notify_info_yielded("trackinfo_received") +def item_candidates(*args, **kwargs) -> Iterable[TrackInfo]: + """Return matching track candidates fromm all metadata source plugins.""" + for plugin in find_metadata_source_plugins(): + yield from plugin.item_candidates(*args, **kwargs) + + +def album_for_id(_id: str) -> AlbumInfo | None: + """Get AlbumInfo object for the given ID string. + + A single ID can yield just a single album, so we return the first match. + """ + for plugin in find_metadata_source_plugins(): + if info := plugin.album_for_id(album_id=_id): + send("albuminfo_received", info=info) + return info + + return None + + +def track_for_id(_id: str) -> TrackInfo | None: + """Get TrackInfo object for the given ID string. + + A single ID can yield just a single track, so we return the first match. + """ + for plugin in find_metadata_source_plugins(): + if info := plugin.track_for_id(_id): + send("trackinfo_received", info=info) + return info + + return None + + +@cache +def get_penalty(data_source: str | None) -> float: + """Get the penalty value for the given data source.""" + return next( + ( + p.data_source_mismatch_penalty + for p in find_metadata_source_plugins() + if p.data_source == data_source + ), + MetadataSourcePlugin.DEFAULT_DATA_SOURCE_MISMATCH_PENALTY, + ) + + +class MetadataSourcePlugin(BeetsPlugin, metaclass=abc.ABCMeta): + """A plugin that provides metadata from a specific source. + + This base class implements a contract for plugins that provide metadata + from a specific source. The plugin must implement the methods to search for albums + and tracks, and to retrieve album and track information by ID. + """ + + DEFAULT_DATA_SOURCE_MISMATCH_PENALTY = 0.5 + + @cached_classproperty + def data_source(cls) -> str: + """The data source name for this plugin. + + This is inferred from the plugin name. + """ + return cls.__name__.replace("Plugin", "") # type: ignore[attr-defined] + + @cached_property + def data_source_mismatch_penalty(self) -> float: + try: + return self.config["source_weight"].as_number() + except NotFoundError: + return self.config["data_source_mismatch_penalty"].as_number() + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.config.add( + { + "search_limit": 5, + "data_source_mismatch_penalty": self.DEFAULT_DATA_SOURCE_MISMATCH_PENALTY, # noqa: E501 + } + ) + + @abc.abstractmethod + def album_for_id(self, album_id: str) -> AlbumInfo | None: + """Return :py:class:`AlbumInfo` object or None if no matching release was + found.""" + raise NotImplementedError + + @abc.abstractmethod + def track_for_id(self, track_id: str) -> TrackInfo | None: + """Return a :py:class:`TrackInfo` object or None if no matching release was + found. + """ + raise NotImplementedError + + # ---------------------------------- search ---------------------------------- # + + @abc.abstractmethod + def candidates( + self, + items: Sequence[Item], + artist: str, + album: str, + va_likely: bool, + ) -> Iterable[AlbumInfo]: + """Return :py:class:`AlbumInfo` candidates that match the given album. + + Used in the autotag functionality to search for albums. + + :param items: List of items in the album + :param artist: Album artist + :param album: Album name + :param va_likely: Whether the album is likely to be by various artists + """ + raise NotImplementedError + + @abc.abstractmethod + def item_candidates( + self, item: Item, artist: str, title: str + ) -> Iterable[TrackInfo]: + """Return :py:class:`TrackInfo` candidates that match the given track. + + Used in the autotag functionality to search for tracks. + + :param item: Track item + :param artist: Track artist + :param title: Track title + """ + raise NotImplementedError + + def albums_for_ids(self, ids: Sequence[str]) -> Iterable[AlbumInfo | None]: + """Batch lookup of album metadata for a list of album IDs. + + Given a list of album identifiers, yields corresponding AlbumInfo objects. + Missing albums result in None values in the output iterator. + Plugins may implement this for optimized batched lookups instead of + single calls to album_for_id. + """ + + return (self.album_for_id(id) for id in ids) + + def tracks_for_ids(self, ids: Sequence[str]) -> Iterable[TrackInfo | None]: + """Batch lookup of track metadata for a list of track IDs. + + Given a list of track identifiers, yields corresponding TrackInfo objects. + Missing tracks result in None values in the output iterator. + Plugins may implement this for optimized batched lookups instead of + single calls to track_for_id. + """ + + return (self.track_for_id(id) for id in ids) + + def _extract_id(self, url: str) -> str | None: + """Extract an ID from a URL for this metadata source plugin. + + Uses the plugin's data source name to determine the ID format and + extracts the ID from a given URL. + """ + return extract_release_id(self.data_source, url) + + @staticmethod + def get_artist( + artists: Iterable[dict[str | int, str]], + id_key: str | int = "id", + name_key: str | int = "name", + join_key: str | int | None = None, + ) -> tuple[str, str | None]: + """Returns an artist string (all artists) and an artist_id (the main + artist) for a list of artist object dicts. + + For each artist, this function moves articles (such as 'a', 'an', and 'the') + to the front. It returns a tuple containing the comma-separated string + of all normalized artists and the ``id`` of the main/first artist. + Alternatively a keyword can be used to combine artists together into a + single string by passing the join_key argument. + + :param artists: Iterable of artist dicts or lists returned by API. + :param id_key: Key or index corresponding to the value of ``id`` for + the main/first artist. Defaults to 'id'. + :param name_key: Key or index corresponding to values of names + to concatenate for the artist string (containing all artists). + Defaults to 'name'. + :param join_key: Key or index corresponding to a field containing a + keyword to use for combining artists into a single string, for + example "Feat.", "Vs.", "And" or similar. The default is None + which keeps the default behaviour (comma-separated). + :return: Normalized artist string. + """ + artist_id = None + artist_string = "" + artists = list(artists) # In case a generator was passed. + total = len(artists) + for idx, artist in enumerate(artists): + if not artist_id: + artist_id = artist[id_key] + name = artist[name_key] + # Move articles to the front. + name = re.sub(r"^(.*?), (a|an|the)$", r"\2 \1", name, flags=re.I) + # Use a join keyword if requested and available. + if idx < (total - 1): # Skip joining on last. + if join_key and artist.get(join_key, None): + name += f" {artist[join_key]} " + else: + name += ", " + artist_string += name + + return artist_string, artist_id + + +class IDResponse(TypedDict): + """Response from the API containing an ID.""" + + id: str + + +class SearchFilter(TypedDict): + artist: NotRequired[str] + album: NotRequired[str] + + +R = TypeVar("R", bound=IDResponse) + + +class SearchApiMetadataSourcePlugin( + Generic[R], MetadataSourcePlugin, metaclass=abc.ABCMeta +): + """Helper class to implement a metadata source plugin with an API. + + Plugins using this ABC must implement an API search method to + retrieve album and track information by ID, + i.e. `album_for_id` and `track_for_id`, and a search method to + perform a search on the API. The search method should return a list + of identifiers for the requested type (album or track). + """ + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.config.add( + { + "search_query_ascii": False, + } + ) + + @abc.abstractmethod + def _search_api( + self, + query_type: Literal["album", "track"], + filters: SearchFilter, + query_string: str = "", + ) -> Sequence[R]: + """Perform a search on the API. + + :param query_type: The type of query to perform. + :param filters: A dictionary of filters to apply to the search. + :param query_string: Additional query to include in the search. + + Should return a list of identifiers for the requested type (album or track). + """ + raise NotImplementedError + + def candidates( + self, + items: Sequence[Item], + artist: str, + album: str, + va_likely: bool, + ) -> Iterable[AlbumInfo]: + query_filters: SearchFilter = {} + if album: + query_filters["album"] = album + if not va_likely: + query_filters["artist"] = artist + + results = self._search_api("album", query_filters) + if not results: + return [] + + return filter( + None, self.albums_for_ids([result["id"] for result in results]) + ) + + def item_candidates( + self, item: Item, artist: str, title: str + ) -> Iterable[TrackInfo]: + results = self._search_api( + "track", {"artist": artist}, query_string=title + ) + if not results: + return [] + + return filter( + None, + self.tracks_for_ids([result["id"] for result in results if result]), + ) + + def _construct_search_query( + self, filters: SearchFilter, query_string: str + ) -> str: + """Construct a query string with the specified filters and keywords to + be provided to the spotify (or similar) search API. + + The returned format was initially designed for spotify's search API but + we found is also useful with other APIs that support similar query structures. + see `spotify `_ + and `deezer `_. + + :param filters: Field filters to apply. + :param query_string: Query keywords to use. + :return: Query string to be provided to the search API. + """ + + components = [query_string, *(f"{k}:'{v}'" for k, v in filters.items())] + query = " ".join(filter(None, components)) + + if self.config["search_query_ascii"].get(): + query = unidecode.unidecode(query) + + return query diff --git a/beets/plugins.py b/beets/plugins.py index d33458825..0dc2754b9 100644 --- a/beets/plugins.py +++ b/beets/plugins.py @@ -20,37 +20,28 @@ import abc import inspect import re import sys -import traceback from collections import defaultdict -from collections.abc import Iterable -from functools import wraps -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Generic, - Sequence, - TypedDict, - TypeVar, -) +from functools import cached_property, wraps +from importlib import import_module +from pathlib import Path +from typing import TYPE_CHECKING, Any, ClassVar, Literal, TypeVar import mediafile +from typing_extensions import ParamSpec import beets from beets import logging - -if sys.version_info >= (3, 10): - from typing import ParamSpec -else: - from typing_extensions import ParamSpec - +from beets.util import unique_list +from beets.util.deprecation import deprecate_for_maintainers, deprecate_for_user if TYPE_CHECKING: + from collections.abc import Callable, Iterable, Sequence + from confuse import ConfigView - from beets.autotag import AlbumInfo, Distance, TrackInfo from beets.dbcore import Query - from beets.dbcore.db import FieldQueryType, SQLiteType + from beets.dbcore.db import FieldQueryType + from beets.dbcore.types import Type from beets.importer import ImportSession, ImportTask from beets.library import Album, Item, Library from beets.ui import Subcommand @@ -64,21 +55,53 @@ if TYPE_CHECKING: AnyModel = TypeVar("AnyModel", Album, Item) + P = ParamSpec("P") + Ret = TypeVar("Ret", bound=Any) + Listener = Callable[..., Any] + IterF = Callable[P, Iterable[Ret]] + PLUGIN_NAMESPACE = "beetsplug" # Plugins using the Last.fm API can share the same API key. LASTFM_KEY = "2dc3914abf35f0d9c92d97d8f8e42b43" +EventType = Literal[ + "after_write", + "album_imported", + "album_removed", + "albuminfo_received", + "album_matched", + "before_choose_candidate", + "before_item_moved", + "cli_exit", + "database_change", + "import", + "import_begin", + "import_task_apply", + "import_task_before_choice", + "import_task_choice", + "import_task_created", + "import_task_files", + "import_task_start", + "item_copied", + "item_hardlinked", + "item_imported", + "item_linked", + "item_moved", + "item_reflinked", + "item_removed", + "library_opened", + "mb_album_extract", + "mb_track_extract", + "pluginload", + "trackinfo_received", + "write", +] # Global logger. log = logging.getLogger("beets") -P = ParamSpec("P") -Ret = TypeVar("Ret", bound=Any) -Listener = Callable[..., None] - - class PluginConflictError(Exception): """Indicates that the services provided by one plugin conflict with those of another. @@ -87,6 +110,17 @@ class PluginConflictError(Exception): """ +class PluginImportError(ImportError): + """Indicates that a plugin could not be imported. + + This is a subclass of ImportError so that it can be caught separately + from other errors. + """ + + def __init__(self, name: str): + super().__init__(f"Could not import plugin {name}") + + class PluginLogFilter(logging.Filter): """A logging filter that identifies the plugin that emitted a log message. @@ -98,34 +132,96 @@ class PluginLogFilter(logging.Filter): def filter(self, record): if hasattr(record.msg, "msg") and isinstance(record.msg.msg, str): # A _LogMessage from our hacked-up Logging replacement. - record.msg.msg = self.prefix + record.msg.msg + record.msg.msg = f"{self.prefix}{record.msg.msg}" elif isinstance(record.msg, str): - record.msg = self.prefix + record.msg + record.msg = f"{self.prefix}{record.msg}" return True # Managing the plugins themselves. -class BeetsPlugin: +class BeetsPlugin(metaclass=abc.ABCMeta): """The base class for all beets plugins. Plugins provide functionality by defining a subclass of BeetsPlugin and overriding the abstract methods defined here. """ + _raw_listeners: ClassVar[dict[EventType, list[Listener]]] = defaultdict( + list + ) + listeners: ClassVar[dict[EventType, list[Listener]]] = defaultdict(list) + template_funcs: ClassVar[TFuncMap[str]] | TFuncMap[str] = {} # type: ignore[valid-type] + template_fields: ClassVar[TFuncMap[Item]] | TFuncMap[Item] = {} # type: ignore[valid-type] + album_template_fields: ClassVar[TFuncMap[Album]] | TFuncMap[Album] = {} # type: ignore[valid-type] + name: str config: ConfigView early_import_stages: list[ImportStageFunc] import_stages: list[ImportStageFunc] + def __init_subclass__(cls) -> None: + """Enable legacy metadata‐source plugins to work with the new interface. + + When a plugin subclass of BeetsPlugin defines a `data_source` attribute + but does not inherit from MetadataSourcePlugin, this hook: + + 1. Skips abstract classes. + 2. Warns that the class should extend MetadataSourcePlugin (deprecation). + 3. Copies any nonabstract methods from MetadataSourcePlugin onto the + subclass to provide the full plugin API. + + This compatibility layer will be removed in the v3.0.0 release. + """ + # TODO: Remove in v3.0.0 + if inspect.isabstract(cls): + return + + from beets.metadata_plugins import MetadataSourcePlugin + + if issubclass(cls, MetadataSourcePlugin) or not hasattr( + cls, "data_source" + ): + return + + deprecate_for_maintainers( + ( + f"'{cls.__name__}' is used as a legacy metadata source since it" + " inherits 'beets.plugins.BeetsPlugin'. Support for this" + ), + "'beets.metadata_plugins.MetadataSourcePlugin'", + stacklevel=3, + ) + + method: property | cached_property[Any] | Callable[..., Any] + for name, method in inspect.getmembers( + MetadataSourcePlugin, + predicate=lambda f: ( # type: ignore[arg-type] + ( + isinstance(f, (property, cached_property)) + and not hasattr( + BeetsPlugin, + getattr(f, "attrname", None) or f.fget.__name__, # type: ignore[union-attr] + ) + ) + or ( + inspect.isfunction(f) + and f.__name__ + and not getattr(f, "__isabstractmethod__", False) + and not hasattr(BeetsPlugin, f.__name__) + ) + ), + ): + setattr(cls, name, method) + def __init__(self, name: str | None = None): """Perform one-time plugin setup.""" self.name = name or self.__module__.split(".")[-1] self.config = beets.config[self.name] - # Set class attributes if they are not already set - # for the type of plugin. + # If the class attributes are not set, initialize as instance attributes. + # TODO: Revise with v3.0.0, see also type: ignore[valid-type] above if not self.template_funcs: self.template_funcs = {} if not self.template_fields: @@ -141,6 +237,40 @@ class BeetsPlugin: if not any(isinstance(f, PluginLogFilter) for f in self._log.filters): self._log.addFilter(PluginLogFilter(self)) + # In order to verify the config we need to make sure the plugin is fully + # configured (plugins usually add the default configuration *after* + # calling super().__init__()). + self.register_listener("pluginload", self._verify_config) + + def _verify_config(self, *_, **__) -> None: + """Verify plugin configuration. + + If deprecated 'source_weight' option is explicitly set by the user, they + will see a warning in the logs. Otherwise, this must be configured by + a third party plugin, thus we raise a deprecation warning which won't be + shown to user but will be visible to plugin developers. + """ + # TODO: Remove in v3.0.0 + if ( + not hasattr(self, "data_source") + or "source_weight" not in self.config + ): + return + + for source in self.config.root().sources: + if "source_weight" in (source.get(self.name) or {}): + if source.filename: # user config + deprecate_for_user( + self._log, + f"'{self.name}.source_weight' configuration option", + f"'{self.name}.data_source_mismatch_penalty'", + ) + else: # 3rd-party plugin config + deprecate_for_maintainers( + "'source_weight' configuration option", + "'data_source_mismatch_penalty'", + ) + def commands(self) -> Sequence[Subcommand]: """Should return a list of beets.ui.Subcommand objects for commands that should be added to beets' CLI. @@ -210,67 +340,6 @@ class BeetsPlugin: """Return a dict mapping prefixes to Query subclasses.""" return {} - def track_distance( - self, - item: Item, - info: TrackInfo, - ) -> Distance: - """Should return a Distance object to be added to the - distance for every track comparison. - """ - from beets.autotag.hooks import Distance - - return Distance() - - def album_distance( - self, - items: list[Item], - album_info: AlbumInfo, - mapping: dict[Item, TrackInfo], - ) -> Distance: - """Should return a Distance object to be added to the - distance for every album-level comparison. - """ - from beets.autotag.hooks import Distance - - return Distance() - - def candidates( - self, - items: list[Item], - artist: str, - album: str, - va_likely: bool, - extra_tags: dict[str, Any] | None = None, - ) -> Sequence[AlbumInfo]: - """Should return a sequence of AlbumInfo objects that match the - album whose items are provided. - """ - return () - - def item_candidates( - self, - item: Item, - artist: str, - title: str, - ) -> Sequence[TrackInfo]: - """Should return a sequence of TrackInfo objects that match the - item provided. - """ - return () - - def album_for_id(self, album_id: str) -> AlbumInfo | None: - """Return an AlbumInfo object or None if no matching release was - found. - """ - return None - - def track_for_id(self, track_id: str) -> TrackInfo | None: - """Return a TrackInfo object or None if no matching release was - found. - """ - return None - def add_media_field( self, name: str, descriptor: mediafile.MediaField ) -> None: @@ -287,25 +356,13 @@ class BeetsPlugin: mediafile.MediaFile.add_field(name, descriptor) library.Item._media_fields.add(name) - _raw_listeners: dict[str, list[Listener]] | None = None - listeners: dict[str, list[Listener]] | None = None - - def register_listener(self, event: str, func: Listener) -> None: + def register_listener(self, event: EventType, func: Listener) -> None: """Add a function as a listener for the specified event.""" - wrapped_func = self._set_log_level_and_params(logging.WARNING, func) - - cls = self.__class__ - - if cls.listeners is None or cls._raw_listeners is None: - cls._raw_listeners = defaultdict(list) - cls.listeners = defaultdict(list) - if func not in cls._raw_listeners[event]: - cls._raw_listeners[event].append(func) - cls.listeners[event].append(wrapped_func) - - template_funcs: TFuncMap[str] | None = None - template_fields: TFuncMap[Item] | None = None - album_template_fields: TFuncMap[Album] | None = None + if func not in self._raw_listeners[event]: + self._raw_listeners[event].append(func) + self.listeners[event].append( + self._set_log_level_and_params(logging.WARNING, func) + ) @classmethod def template_func(cls, name: str) -> Callable[[TFunc[str]], TFunc[str]]: @@ -315,8 +372,6 @@ class BeetsPlugin: """ def helper(func: TFunc[str]) -> TFunc[str]: - if cls.template_funcs is None: - cls.template_funcs = {} cls.template_funcs[name] = func return func @@ -331,74 +386,113 @@ class BeetsPlugin: """ def helper(func: TFunc[Item]) -> TFunc[Item]: - if cls.template_fields is None: - cls.template_fields = {} cls.template_fields[name] = func return func return helper -_classes: set[type[BeetsPlugin]] = set() +def get_plugin_names() -> list[str]: + """Discover and return the set of plugin names to be loaded. - -def load_plugins(names: Sequence[str] = ()) -> None: - """Imports the modules for a sequence of plugin names. Each name - must be the name of a Python module under the "beetsplug" namespace - package in sys.path; the module indicated should contain the - BeetsPlugin subclasses desired. + Configures the plugin search paths and resolves the final set of plugins + based on configuration settings, inclusion filters, and exclusion rules. + Automatically includes the musicbrainz plugin when enabled in configuration. """ - for name in names: - modname = f"{PLUGIN_NAMESPACE}.{name}" + paths = [ + str(Path(p).expanduser().absolute()) + for p in beets.config["pluginpath"].as_str_seq(split=False) + ] + log.debug("plugin paths: {}", paths) + + # Extend the `beetsplug` package to include the plugin paths. + import beetsplug + + beetsplug.__path__ = paths + list(beetsplug.__path__) + + # For backwards compatibility, also support plugin paths that + # *contain* a `beetsplug` package. + sys.path += paths + plugins = unique_list(beets.config["plugins"].as_str_seq()) + beets.config.add({"disabled_plugins": []}) + disabled_plugins = set(beets.config["disabled_plugins"].as_str_seq()) + # TODO: Remove in v3.0.0 + mb_enabled = beets.config["musicbrainz"].flatten().get("enabled") + if mb_enabled: + deprecate_for_user( + log, + "'musicbrainz.enabled' configuration option", + "'plugins' configuration to explicitly add 'musicbrainz'", + ) + if "musicbrainz" not in plugins: + plugins.append("musicbrainz") + elif mb_enabled is False: + deprecate_for_user(log, "'musicbrainz.enabled' configuration option") + disabled_plugins.add("musicbrainz") + + return [p for p in plugins if p not in disabled_plugins] + + +def _get_plugin(name: str) -> BeetsPlugin | None: + """Dynamically load and instantiate a plugin class by name. + + Attempts to import the plugin module, locate the appropriate plugin class + within it, and return an instance. Handles import failures gracefully and + logs warnings for missing plugins or loading errors. + + Note we load the *last* plugin class found in the plugin namespace. This + allows plugins to define helper classes that inherit from BeetsPlugin + without those being loaded as the main plugin class. + + Returns None if the plugin could not be loaded for any reason. + """ + try: try: - try: - namespace = __import__(modname, None, None) - except ImportError as exc: - # Again, this is hacky: - if exc.args[0].endswith(" " + name): - log.warning("** plugin {0} not found", name) - else: - raise - else: - for obj in getattr(namespace, name).__dict__.values(): - if ( - isinstance(obj, type) - and issubclass(obj, BeetsPlugin) - and obj != BeetsPlugin - and obj != MetadataSourcePlugin - and obj not in _classes - ): - _classes.add(obj) + namespace = import_module(f"{PLUGIN_NAMESPACE}.{name}") + except Exception as exc: + raise PluginImportError(name) from exc - except Exception: - log.warning( - "** error loading plugin {}:\n{}", - name, - traceback.format_exc(), - ) + for obj in reversed(namespace.__dict__.values()): + if ( + inspect.isclass(obj) + and issubclass(obj, BeetsPlugin) + and obj != BeetsPlugin + and not inspect.isabstract(obj) + # Only consider this plugin's module or submodules to avoid + # conflicts when plugins import other BeetsPlugin classes + and ( + obj.__module__ == namespace.__name__ + or obj.__module__.startswith(f"{namespace.__name__}.") + ) + ): + return obj() + + except Exception: + log.warning("** error loading plugin {}", name, exc_info=True) + + return None -_instances: dict[type[BeetsPlugin], BeetsPlugin] = {} +_instances: list[BeetsPlugin] = [] -def find_plugins() -> list[BeetsPlugin]: - """Returns a list of BeetsPlugin subclass instances from all - currently loaded beets plugins. Loads the default plugin set - first. +def load_plugins() -> None: + """Initialize the plugin system by loading all configured plugins. + + Performs one-time plugin discovery and instantiation, storing loaded plugin + instances globally. Emits a pluginload event after successful initialization + to notify other components. """ - if _instances: - # After the first call, use cached instances for performance reasons. - # See https://github.com/beetbox/beets/pull/3810 - return list(_instances.values()) + if not _instances: + names = get_plugin_names() + log.debug("Loading plugins: {}", ", ".join(sorted(names))) + _instances.extend(filter(None, map(_get_plugin, names))) - load_plugins() - plugins = [] - for cls in _classes: - # Only instantiate each plugin class once. - if cls not in _instances: - _instances[cls] = cls() - plugins.append(_instances[cls]) - return plugins + send("pluginload") + + +def find_plugins() -> Iterable[BeetsPlugin]: + return _instances # Communication with plugins. @@ -422,103 +516,53 @@ def queries() -> dict[str, type[Query]]: return out -def types(model_cls: type[AnyModel]) -> dict[str, type[SQLiteType]]: - # Gives us `item_types` and `album_types` +def types(model_cls: type[AnyModel]) -> dict[str, Type]: + """Return mapping between flex field names and types for the given model.""" attr_name = f"{model_cls.__name__.lower()}_types" - types: dict[str, type[SQLiteType]] = {} + types: dict[str, Type] = {} for plugin in find_plugins(): plugin_types = getattr(plugin, attr_name, {}) for field in plugin_types: if field in types and plugin_types[field] != types[field]: raise PluginConflictError( - "Plugin {} defines flexible field {} " + f"Plugin {plugin.name} defines flexible field {field} " "which has already been defined with " - "another type.".format(plugin.name, field) + "another type." ) types.update(plugin_types) return types def named_queries(model_cls: type[AnyModel]) -> dict[str, FieldQueryType]: - # Gather `item_queries` and `album_queries` from the plugins. + """Return mapping between field names and queries for the given model.""" attr_name = f"{model_cls.__name__.lower()}_queries" - queries: dict[str, FieldQueryType] = {} - for plugin in find_plugins(): - plugin_queries = getattr(plugin, attr_name, {}) - queries.update(plugin_queries) - return queries + return { + field: query + for plugin in find_plugins() + for field, query in getattr(plugin, attr_name, {}).items() + } -def track_distance(item: Item, info: TrackInfo) -> Distance: - """Gets the track distance calculated by all loaded plugins. - Returns a Distance object. +def notify_info_yielded( + event: EventType, +) -> Callable[[IterF[P, Ret]], IterF[P, Ret]]: + """Makes a generator send the event 'event' every time it yields. + This decorator is supposed to decorate a generator, but any function + returning an iterable should work. + Each yielded value is passed to plugins using the 'info' parameter of + 'send'. """ - from beets.autotag.hooks import Distance - dist = Distance() - for plugin in find_plugins(): - dist.update(plugin.track_distance(item, info)) - return dist + def decorator(func: IterF[P, Ret]) -> IterF[P, Ret]: + @wraps(func) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> Iterable[Ret]: + for v in func(*args, **kwargs): + send(event, info=v) + yield v + return wrapper -def album_distance( - items: list[Item], - album_info: AlbumInfo, - mapping: dict[Item, TrackInfo], -) -> Distance: - """Returns the album distance calculated by plugins.""" - from beets.autotag.hooks import Distance - - dist = Distance() - for plugin in find_plugins(): - dist.update(plugin.album_distance(items, album_info, mapping)) - return dist - - -def candidates( - items: list[Item], - artist: str, - album: str, - va_likely: bool, - extra_tags: dict[str, Any] | None = None, -) -> Iterable[AlbumInfo]: - """Gets MusicBrainz candidates for an album from each plugin.""" - for plugin in find_plugins(): - yield from plugin.candidates( - items, artist, album, va_likely, extra_tags - ) - - -def item_candidates(item: Item, artist: str, title: str) -> Iterable[TrackInfo]: - """Gets MusicBrainz candidates for an item from the plugins.""" - for plugin in find_plugins(): - yield from plugin.item_candidates(item, artist, title) - - -def album_for_id(_id: str) -> AlbumInfo | None: - """Get AlbumInfo object for the given ID string. - - A single ID can yield just a single album, so we return the first match. - """ - for plugin in find_plugins(): - if info := plugin.album_for_id(_id): - send("albuminfo_received", info=info) - return info - - return None - - -def track_for_id(_id: str) -> TrackInfo | None: - """Get TrackInfo object for the given ID string. - - A single ID can yield just a single track, so we return the first match. - """ - for plugin in find_plugins(): - if info := plugin.track_for_id(_id): - send("trackinfo_received", info=info) - return info - - return None + return decorator def template_funcs() -> TFuncMap[str]: @@ -527,8 +571,7 @@ def template_funcs() -> TFuncMap[str]: """ funcs: TFuncMap[str] = {} for plugin in find_plugins(): - if plugin.template_funcs: - funcs.update(plugin.template_funcs) + funcs.update(plugin.template_funcs) return funcs @@ -554,21 +597,20 @@ F = TypeVar("F") def _check_conflicts_and_merge( - plugin: BeetsPlugin, plugin_funcs: dict[str, F] | None, funcs: dict[str, F] + plugin: BeetsPlugin, plugin_funcs: dict[str, F], funcs: dict[str, F] ) -> None: """Check the provided template functions for conflicts and merge into funcs. Raises a `PluginConflictError` if a plugin defines template functions for fields that another plugin has already defined template functions for. """ - if plugin_funcs: - if not plugin_funcs.keys().isdisjoint(funcs.keys()): - conflicted_fields = ", ".join(plugin_funcs.keys() & funcs.keys()) - raise PluginConflictError( - f"Plugin {plugin.name} defines template functions for " - f"{conflicted_fields} that conflict with another plugin." - ) - funcs.update(plugin_funcs) + if not plugin_funcs.keys().isdisjoint(funcs.keys()): + conflicted_fields = ", ".join(plugin_funcs.keys() & funcs.keys()) + raise PluginConflictError( + f"Plugin {plugin.name} defines template functions for " + f"{conflicted_fields} that conflict with another plugin." + ) + funcs.update(plugin_funcs) def item_field_getters() -> TFuncMap[Item]: @@ -592,19 +634,7 @@ def album_field_getters() -> TFuncMap[Album]: # Event dispatch. -def event_handlers() -> dict[str, list[Listener]]: - """Find all event handlers from plugins as a dictionary mapping - event names to sequences of callables. - """ - all_handlers: dict[str, list[Listener]] = defaultdict(list) - for plugin in find_plugins(): - if plugin.listeners: - for event, handlers in plugin.listeners.items(): - all_handlers[event] += handlers - return all_handlers - - -def send(event: str, **arguments: Any) -> list[Any]: +def send(event: EventType, **arguments: Any) -> list[Any]: """Send an event to all assigned event listeners. `event` is the name of the event to send, all other named arguments @@ -612,129 +642,32 @@ def send(event: str, **arguments: Any) -> list[Any]: Return a list of non-None values returned from the handlers. """ - log.debug("Sending event: {0}", event) - results: list[Any] = [] - for handler in event_handlers()[event]: - result = handler(**arguments) - if result is not None: - results.append(result) - return results + log.debug("Sending event: {}", event) + return [ + r + for handler in BeetsPlugin.listeners[event] + if (r := handler(**arguments)) is not None + ] -def feat_tokens(for_artist: bool = True) -> str: +def feat_tokens( + for_artist: bool = True, custom_words: list[str] | None = None +) -> str: """Return a regular expression that matches phrases like "featuring" that separate a main artist or a song title from secondary artists. The `for_artist` option determines whether the regex should be suitable for matching artist fields (the default) or title fields. """ feat_words = ["ft", "featuring", "feat", "feat.", "ft."] + if isinstance(custom_words, list): + feat_words += custom_words if for_artist: feat_words += ["with", "vs", "and", "con", "&"] - return r"(?<=[\s(\[])(?:{})(?=\s)".format( - "|".join(re.escape(x) for x in feat_words) + return ( + rf"(?<=[\s(\[])(?:{'|'.join(re.escape(x) for x in feat_words)})(?=\s)" ) -def sanitize_choices( - choices: Sequence[str], choices_all: Sequence[str] -) -> list[str]: - """Clean up a stringlist configuration attribute: keep only choices - elements present in choices_all, remove duplicate elements, expand '*' - wildcard while keeping original stringlist order. - """ - seen: set[str] = set() - others = [x for x in choices_all if x not in choices] - res: list[str] = [] - for s in choices: - if s not in seen: - if s in list(choices_all): - res.append(s) - elif s == "*": - res.extend(others) - seen.add(s) - return res - - -def sanitize_pairs( - pairs: Sequence[tuple[str, str]], pairs_all: Sequence[tuple[str, str]] -) -> list[tuple[str, str]]: - """Clean up a single-element mapping configuration attribute as returned - by Confuse's `Pairs` template: keep only two-element tuples present in - pairs_all, remove duplicate elements, expand ('str', '*') and ('*', '*') - wildcards while keeping the original order. Note that ('*', '*') and - ('*', 'whatever') have the same effect. - - For example, - - >>> sanitize_pairs( - ... [('foo', 'baz bar'), ('key', '*'), ('*', '*')], - ... [('foo', 'bar'), ('foo', 'baz'), ('foo', 'foobar'), - ... ('key', 'value')] - ... ) - [('foo', 'baz'), ('foo', 'bar'), ('key', 'value'), ('foo', 'foobar')] - """ - pairs_all: list[tuple[str, str]] = list(pairs_all) - seen: set[tuple[str, str]] = set() - others = [x for x in pairs_all if x not in pairs] - res: list[tuple[str, str]] = [] - for k, values in pairs: - for v in values.split(): - x = (k, v) - if x in pairs_all: - if x not in seen: - seen.add(x) - res.append(x) - elif k == "*": - new = [o for o in others if o not in seen] - seen.update(new) - res.extend(new) - elif v == "*": - new = [o for o in others if o not in seen and o[0] == k] - seen.update(new) - res.extend(new) - return res - - -IterF = Callable[P, Iterable[Ret]] - - -def notify_info_yielded( - event: str, -) -> Callable[[IterF[P, Ret]], IterF[P, Ret]]: - """Makes a generator send the event 'event' every time it yields. - This decorator is supposed to decorate a generator, but any function - returning an iterable should work. - Each yielded value is passed to plugins using the 'info' parameter of - 'send'. - """ - - def decorator( - generator: IterF[P, Ret], - ) -> IterF[P, Ret]: - def decorated(*args: P.args, **kwargs: P.kwargs) -> Iterable[Ret]: - for v in generator(*args, **kwargs): - send(event, info=v) - yield v - - return decorated - - return decorator - - -def get_distance( - config: ConfigView, data_source: str, info: AlbumInfo | TrackInfo -) -> Distance: - """Returns the ``data_source`` weight and the maximum source weight - for albums or individual tracks. - """ - from beets.autotag.hooks import Distance - - dist = Distance() - if info.data_source == data_source: - dist.add("source", config["source_weight"].as_number()) - return dist - - def apply_item_changes( lib: Library, item: Item, move: bool, pretend: bool, write: bool ) -> None: @@ -760,203 +693,3 @@ def apply_item_changes( item.try_write() item.store() - - -class Response(TypedDict): - """A dictionary with the response of a plugin API call. - - May be extended by plugins to include additional information, but `id` - is required. - """ - - id: str - - -class RegexDict(TypedDict): - """A dictionary containing a regex pattern and the number of the - match group. - """ - - pattern: str - match_group: int - - -R = TypeVar("R", bound=Response) - - -class MetadataSourcePlugin(Generic[R], BeetsPlugin, metaclass=abc.ABCMeta): - def __init__(self): - super().__init__() - self.config.add({"source_weight": 0.5}) - - @property - @abc.abstractmethod - def id_regex(self) -> RegexDict: - raise NotImplementedError - - @property - @abc.abstractmethod - def data_source(self) -> str: - raise NotImplementedError - - @property - @abc.abstractmethod - def search_url(self) -> str: - raise NotImplementedError - - @property - @abc.abstractmethod - def album_url(self) -> str: - raise NotImplementedError - - @property - @abc.abstractmethod - def track_url(self) -> str: - raise NotImplementedError - - @abc.abstractmethod - def _search_api( - self, - query_type: str, - filters: dict[str, str] | None, - keywords: str = "", - ) -> Sequence[R]: - raise NotImplementedError - - @abc.abstractmethod - def album_for_id(self, album_id: str) -> AlbumInfo | None: - raise NotImplementedError - - @abc.abstractmethod - def track_for_id( - self, track_id: str | None = None, track_data: R | None = None - ) -> TrackInfo | None: - raise NotImplementedError - - @staticmethod - def get_artist( - artists, - id_key: str | int = "id", - name_key: str | int = "name", - join_key: str | int | None = None, - ) -> tuple[str, str | None]: - """Returns an artist string (all artists) and an artist_id (the main - artist) for a list of artist object dicts. - - For each artist, this function moves articles (such as 'a', 'an', - and 'the') to the front and strips trailing disambiguation numbers. It - returns a tuple containing the comma-separated string of all - normalized artists and the ``id`` of the main/first artist. - Alternatively a keyword can be used to combine artists together into a - single string by passing the join_key argument. - - :param artists: Iterable of artist dicts or lists returned by API. - :type artists: list[dict] or list[list] - :param id_key: Key or index corresponding to the value of ``id`` for - the main/first artist. Defaults to 'id'. - :param name_key: Key or index corresponding to values of names - to concatenate for the artist string (containing all artists). - Defaults to 'name'. - :param join_key: Key or index corresponding to a field containing a - keyword to use for combining artists into a single string, for - example "Feat.", "Vs.", "And" or similar. The default is None - which keeps the default behaviour (comma-separated). - :return: Normalized artist string. - """ - artist_id = None - artist_string = "" - artists = list(artists) # In case a generator was passed. - total = len(artists) - for idx, artist in enumerate(artists): - if not artist_id: - artist_id = artist[id_key] - name = artist[name_key] - # Strip disambiguation number. - name = re.sub(r" \(\d+\)$", "", name) - # Move articles to the front. - name = re.sub(r"^(.*?), (a|an|the)$", r"\2 \1", name, flags=re.I) - # Use a join keyword if requested and available. - if idx < (total - 1): # Skip joining on last. - if join_key and artist.get(join_key, None): - name += f" {artist[join_key]} " - else: - name += ", " - artist_string += name - - return artist_string, artist_id - - @staticmethod - def _get_id(url_type: str, id_: str, id_regex: RegexDict) -> str | None: - """Parse an ID from its URL if necessary. - - :param url_type: Type of URL. Either 'album' or 'track'. - :param id_: Album/track ID or URL. - :param id_regex: A dictionary containing a regular expression - extracting an ID from an URL (if it's not an ID already) in - 'pattern' and the number of the match group in 'match_group'. - :return: Album/track ID. - """ - log.debug("Extracting {} ID from '{}'", url_type, id_) - match = re.search(id_regex["pattern"].format(url_type), str(id_)) - if match: - id_ = match.group(id_regex["match_group"]) - if id_: - return id_ - return None - - def candidates( - self, - items: list[Item], - artist: str, - album: str, - va_likely: bool, - extra_tags: dict[str, Any] | None = None, - ) -> Sequence[AlbumInfo]: - """Returns a list of AlbumInfo objects for Search API results - matching an ``album`` and ``artist`` (if not various). - - :param items: List of items comprised by an album to be matched. - :param artist: The artist of the album to be matched. - :param album: The name of the album to be matched. - :param va_likely: True if the album to be matched likely has - Various Artists. - """ - query_filters = {"album": album} - if not va_likely: - query_filters["artist"] = artist - results = self._search_api(query_type="album", filters=query_filters) - albums = [self.album_for_id(album_id=r["id"]) for r in results] - return [a for a in albums if a is not None] - - def item_candidates( - self, item: Item, artist: str, title: str - ) -> Sequence[TrackInfo]: - """Returns a list of TrackInfo objects for Search API results - matching ``title`` and ``artist``. - - :param item: Singleton item to be matched. - :param artist: The artist of the track to be matched. - :param title: The title of the track to be matched. - """ - track_responses = self._search_api( - query_type="track", keywords=title, filters={"artist": artist} - ) - - tracks = [self.track_for_id(track_data=r) for r in track_responses] - - return [t for t in tracks if t is not None] - - def album_distance( - self, - items: list[Item], - album_info: AlbumInfo, - mapping: dict[Item, TrackInfo], - ) -> Distance: - return get_distance( - data_source=self.data_source, info=album_info, config=self.config - ) - - def track_distance(self, item: Item, info: TrackInfo) -> Distance: - return get_distance( - data_source=self.data_source, info=info, config=self.config - ) diff --git a/beets/py.typed b/beets/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/beets/test/_common.py b/beets/test/_common.py index 757d461bd..487f7c442 100644 --- a/beets/test/_common.py +++ b/beets/test/_common.py @@ -63,8 +63,8 @@ HAVE_SYMLINK = sys.platform != "win32" HAVE_HARDLINK = sys.platform != "win32" -def item(lib=None): - i = beets.library.Item( +def item(lib=None, **kwargs): + defaults = dict( title="the title", artist="the artist", albumartist="the album artist", @@ -99,6 +99,7 @@ def item(lib=None): album_id=None, mtime=12345, ) + i = beets.library.Item(**{**defaults, **kwargs}) if lib: lib.add(i) return i @@ -106,38 +107,14 @@ def item(lib=None): # Dummy import session. def import_session(lib=None, loghandler=None, paths=[], query=[], cli=False): - cls = commands.TerminalImportSession if cli else importer.ImportSession + cls = ( + commands.import_.session.TerminalImportSession + if cli + else importer.ImportSession + ) return cls(lib, loghandler, paths, query) -class Assertions: - """A mixin with additional unit test assertions.""" - - def assertExists(self, path): - assert os.path.exists(syspath(path)), f"file does not exist: {path!r}" - - def assertNotExists(self, path): - assert not os.path.exists(syspath(path)), f"file exists: {path!r}" - - def assertIsFile(self, path): - self.assertExists(path) - assert os.path.isfile( - syspath(path) - ), "path exists, but is not a regular file: {!r}".format(path) - - def assertIsDir(self, path): - self.assertExists(path) - assert os.path.isdir( - syspath(path) - ), "path exists, but is not a directory: {!r}".format(path) - - def assert_equal_path(self, a, b): - """Check that two paths are equal.""" - a_bytes, b_bytes = util.normpath(a), util.normpath(b) - - assert a_bytes == b_bytes, f"{a_bytes=} != {b_bytes=}" - - # Mock I/O. @@ -180,7 +157,7 @@ class DummyIn: self.out = out def add(self, s): - self.buf.append(s + "\n") + self.buf.append(f"{s}\n") def close(self): pass diff --git a/beets/test/helper.py b/beets/test/helper.py index 85ea6bcf7..3cb1e4c3c 100644 --- a/beets/test/helper.py +++ b/beets/test/helper.py @@ -35,6 +35,7 @@ import subprocess import sys import unittest from contextlib import contextmanager +from dataclasses import dataclass from enum import Enum from functools import cached_property from io import StringIO @@ -48,12 +49,12 @@ from mediafile import Image, MediaFile import beets import beets.plugins -from beets import autotag, importer, logging, util +from beets import importer, logging, util from beets.autotag.hooks import AlbumInfo, TrackInfo from beets.importer import ImportSession -from beets.library import Album, Item, Library +from beets.library import Item, Library from beets.test import _common -from beets.ui.commands import TerminalImportSession +from beets.ui.commands.import_.session import TerminalImportSession from beets.util import ( MoveOperation, bytestring_path, @@ -162,15 +163,49 @@ NEEDS_REFLINK = unittest.skipUnless( ) -class TestHelper(_common.Assertions, ConfigMixin): +class IOMixin: + @cached_property + def io(self) -> _common.DummyIO: + return _common.DummyIO() + + def setUp(self): + super().setUp() + self.io.install() + + def tearDown(self): + super().tearDown() + self.io.restore() + + +class TestHelper(ConfigMixin): """Helper mixin for high-level cli and plugin tests. This mixin provides methods to isolate beets' global state provide fixtures. """ + resource_path = Path(os.fsdecode(_common.RSRC)) / "full.mp3" + db_on_disk: ClassVar[bool] = False + @cached_property + def temp_dir_path(self) -> Path: + return Path(self.create_temp_dir()) + + @cached_property + def temp_dir(self) -> bytes: + return util.bytestring_path(self.temp_dir_path) + + @cached_property + def lib_path(self) -> Path: + lib_path = self.temp_dir_path / "libdir" + lib_path.mkdir(exist_ok=True) + return lib_path + + @cached_property + def libdir(self) -> bytes: + return bytestring_path(self.lib_path) + # TODO automate teardown through hook registration def setup_beets(self): @@ -193,8 +228,7 @@ class TestHelper(_common.Assertions, ConfigMixin): Make sure you call ``teardown_beets()`` afterwards. """ - self.create_temp_dir() - temp_dir_str = os.fsdecode(self.temp_dir) + temp_dir_str = str(self.temp_dir_path) self.env_patcher = patch.dict( "os.environ", { @@ -204,9 +238,7 @@ class TestHelper(_common.Assertions, ConfigMixin): ) self.env_patcher.start() - self.libdir = os.path.join(self.temp_dir, b"libdir") - os.mkdir(syspath(self.libdir)) - self.config["directory"] = os.fsdecode(self.libdir) + self.config["directory"] = str(self.lib_path) if self.db_on_disk: dbpath = util.bytestring_path(self.config["library"].as_filename()) @@ -214,12 +246,8 @@ class TestHelper(_common.Assertions, ConfigMixin): dbpath = ":memory:" self.lib = Library(dbpath, self.libdir) - # Initialize, but don't install, a DummyIO. - self.io = _common.DummyIO() - def teardown_beets(self): self.env_patcher.stop() - self.io.restore() self.lib._close() self.remove_temp_dir() @@ -238,7 +266,7 @@ class TestHelper(_common.Assertions, ConfigMixin): The item is attached to the database from `self.lib`. """ values_ = { - "title": "t\u00eftle {0}", + "title": "t\u00eftle {}", "artist": "the \u00e4rtist", "album": "the \u00e4lbum", "track": 1, @@ -249,7 +277,7 @@ class TestHelper(_common.Assertions, ConfigMixin): values_["db"] = self.lib item = Item(**values_) if "path" not in values: - item["path"] = "audio." + item["format"].lower() + item["path"] = f"audio.{item['format'].lower()}" # mtime needs to be set last since other assignments reset it. item.mtime = 12345 return item @@ -281,7 +309,7 @@ class TestHelper(_common.Assertions, ConfigMixin): item = self.create_item(**values) extension = item["format"].lower() item["path"] = os.path.join( - _common.RSRC, util.bytestring_path("min." + extension) + _common.RSRC, util.bytestring_path(f"min.{extension}") ) item.add(self.lib) item.move(operation=MoveOperation.COPY) @@ -296,7 +324,7 @@ class TestHelper(_common.Assertions, ConfigMixin): """Add a number of items with files to the database.""" # TODO base this on `add_item()` items = [] - path = os.path.join(_common.RSRC, util.bytestring_path("full." + ext)) + path = os.path.join(_common.RSRC, util.bytestring_path(f"full.{ext}")) for i in range(count): item = Item.from_path(path) item.album = f"\u00e4lbum {i}" # Check unicode paths @@ -343,7 +371,7 @@ class TestHelper(_common.Assertions, ConfigMixin): specified extension a cover art image is added to the media file. """ - src = os.path.join(_common.RSRC, util.bytestring_path("full." + ext)) + src = os.path.join(_common.RSRC, util.bytestring_path(f"full.{ext}")) handle, path = mkstemp(dir=self.temp_dir) path = bytestring_path(path) os.close(handle) @@ -383,16 +411,12 @@ class TestHelper(_common.Assertions, ConfigMixin): # Safe file operations - def create_temp_dir(self, **kwargs): - """Create a temporary directory and assign it into - `self.temp_dir`. Call `remove_temp_dir` later to delete it. - """ - temp_dir = mkdtemp(**kwargs) - self.temp_dir = util.bytestring_path(temp_dir) + def create_temp_dir(self, **kwargs) -> str: + return mkdtemp(**kwargs) def remove_temp_dir(self): """Delete the temporary directory created by `create_temp_dir`.""" - shutil.rmtree(syspath(self.temp_dir)) + shutil.rmtree(self.temp_dir_path) def touch(self, path, dir=None, content=""): """Create a file at `path` with given content. @@ -456,6 +480,11 @@ class PluginMixin(ConfigMixin): super().teardown_beets() self.unload_plugins() + def register_plugin( + self, plugin_class: type[beets.plugins.BeetsPlugin] + ) -> None: + beets.plugins._instances.append(plugin_class()) + def load_plugins(self, *plugins: str) -> None: """Load and initialize plugins by names. @@ -465,33 +494,15 @@ class PluginMixin(ConfigMixin): # FIXME this should eventually be handled by a plugin manager plugins = (self.plugin,) if hasattr(self, "plugin") else plugins self.config["plugins"] = plugins - beets.plugins.load_plugins(plugins) - beets.plugins.find_plugins() - - # Take a backup of the original _types and _queries to restore - # when unloading. - Item._original_types = dict(Item._types) - Album._original_types = dict(Album._types) - Item._types.update(beets.plugins.types(Item)) - Album._types.update(beets.plugins.types(Album)) - - Item._original_queries = dict(Item._queries) - Album._original_queries = dict(Album._queries) - Item._queries.update(beets.plugins.named_queries(Item)) - Album._queries.update(beets.plugins.named_queries(Album)) + beets.plugins.load_plugins() def unload_plugins(self) -> None: """Unload all plugins and remove them from the configuration.""" # FIXME this should eventually be handled by a plugin manager - for plugin_class in beets.plugins._instances: - plugin_class.listeners = None + beets.plugins.BeetsPlugin.listeners.clear() + beets.plugins.BeetsPlugin._raw_listeners.clear() self.config["plugins"] = [] - beets.plugins._classes = set() - beets.plugins._instances = {} - Item._types = getattr(Item, "_original_types", {}) - Album._types = getattr(Album, "_original_types", {}) - Item._queries = getattr(Item, "_original_queries", {}) - Album._queries = getattr(Album, "_original_queries", {}) + beets.plugins._instances.clear() @contextmanager def configure_plugin(self, config: Any): @@ -513,7 +524,6 @@ class ImportHelper(TestHelper): autotagging library and several assertions for the library. """ - resource_path = syspath(os.path.join(_common.RSRC, b"full.mp3")) default_import_config = { "autotag": True, "copy": True, @@ -530,7 +540,7 @@ class ImportHelper(TestHelper): @cached_property def import_path(self) -> Path: - import_path = Path(os.fsdecode(self.temp_dir)) / "import" + import_path = self.temp_dir_path / "import" import_path.mkdir(exist_ok=True) return import_path @@ -558,7 +568,7 @@ class ImportHelper(TestHelper): medium = MediaFile(track_path) medium.update( { - "album": "Tag Album" + (f" {album_id}" if album_id else ""), + "album": f"Tag Album{f' {album_id}' if album_id else ''}", "albumartist": None, "mb_albumid": None, "comp": None, @@ -598,7 +608,7 @@ class ImportHelper(TestHelper): ] def prepare_albums_for_import(self, count: int = 1) -> None: - album_dirs = Path(os.fsdecode(self.import_dir)).glob("album_*") + album_dirs = self.import_path.glob("album_*") base_idx = int(str(max(album_dirs, default="0")).split("_")[-1]) + 1 for album_id in range(base_idx, count + base_idx): @@ -622,21 +632,6 @@ class ImportHelper(TestHelper): def setup_singleton_importer(self, **kwargs) -> ImportSession: return self.setup_importer(singletons=True, **kwargs) - def assert_file_in_lib(self, *segments): - """Join the ``segments`` and assert that this path exists in the - library directory. - """ - self.assertExists(os.path.join(self.libdir, *segments)) - - def assert_file_not_in_lib(self, *segments): - """Join the ``segments`` and assert that this path does not - exist in the library directory. - """ - self.assertNotExists(os.path.join(self.libdir, *segments)) - - def assert_lib_dir_empty(self): - assert not os.listdir(syspath(self.libdir)) - class AsIsImporterMixin: def setUp(self): @@ -658,9 +653,9 @@ class ImportSessionFixture(ImportSession): >>> lib = Library(':memory:') >>> importer = ImportSessionFixture(lib, paths=['/path/to/import']) - >>> importer.add_choice(importer.action.SKIP) - >>> importer.add_choice(importer.action.ASIS) - >>> importer.default_choice = importer.action.APPLY + >>> importer.add_choice(importer.Action.SKIP) + >>> importer.add_choice(importer.Action.ASIS) + >>> importer.default_choice = importer.Action.APPLY >>> importer.run() This imports ``/path/to/import`` into `lib`. It skips the first @@ -673,7 +668,7 @@ class ImportSessionFixture(ImportSession): self._choices = [] self._resolutions = [] - default_choice = importer.action.APPLY + default_choice = importer.Action.APPLY def add_choice(self, choice): self._choices.append(choice) @@ -687,7 +682,7 @@ class ImportSessionFixture(ImportSession): except IndexError: choice = self.default_choice - if choice == importer.action.APPLY: + if choice == importer.Action.APPLY: return task.candidates[0] elif isinstance(choice, int): return task.candidates[choice - 1] @@ -707,7 +702,7 @@ class ImportSessionFixture(ImportSession): res = self.default_resolution if res == self.Resolution.SKIP: - task.set_choice(importer.action.SKIP) + task.set_choice(importer.Action.SKIP) elif res == self.Resolution.REMOVE: task.should_remove_duplicates = True elif res == self.Resolution.MERGE: @@ -720,7 +715,7 @@ class TerminalImportSessionFixture(TerminalImportSession): super().__init__(*args, **kwargs) self._choices = [] - default_choice = importer.action.APPLY + default_choice = importer.Action.APPLY def add_choice(self, choice): self._choices.append(choice) @@ -742,15 +737,15 @@ class TerminalImportSessionFixture(TerminalImportSession): except IndexError: choice = self.default_choice - if choice == importer.action.APPLY: + if choice == importer.Action.APPLY: self.io.addinput("A") - elif choice == importer.action.ASIS: + elif choice == importer.Action.ASIS: self.io.addinput("U") - elif choice == importer.action.ALBUMS: + elif choice == importer.Action.ALBUMS: self.io.addinput("G") - elif choice == importer.action.TRACKS: + elif choice == importer.Action.TRACKS: self.io.addinput("T") - elif choice == importer.action.SKIP: + elif choice == importer.Action.SKIP: self.io.addinput("S") else: self.io.addinput("M") @@ -758,7 +753,7 @@ class TerminalImportSessionFixture(TerminalImportSession): self._add_choice_input() -class TerminalImportMixin(ImportHelper): +class TerminalImportMixin(IOMixin, ImportHelper): """Provides_a terminal importer for the import session.""" io: _common.DummyIO @@ -774,6 +769,7 @@ class TerminalImportMixin(ImportHelper): ) +@dataclass class AutotagStub: """Stub out MusicBrainz album and track matcher and control what the autotagger returns. @@ -784,47 +780,44 @@ class AutotagStub: GOOD = "GOOD" BAD = "BAD" MISSING = "MISSING" - """Generate an album match for all but one track - """ + matching: str length = 2 - matching = IDENT def install(self): - self.mb_match_album = autotag.mb.match_album - self.mb_match_track = autotag.mb.match_track - self.mb_album_for_id = autotag.mb.album_for_id - self.mb_track_for_id = autotag.mb.track_for_id - - autotag.mb.match_album = self.match_album - autotag.mb.match_track = self.match_track - autotag.mb.album_for_id = self.album_for_id - autotag.mb.track_for_id = self.track_for_id + self.patchers = [ + patch("beets.metadata_plugins.album_for_id", lambda *_: None), + patch("beets.metadata_plugins.track_for_id", lambda *_: None), + patch("beets.metadata_plugins.candidates", self.candidates), + patch( + "beets.metadata_plugins.item_candidates", self.item_candidates + ), + ] + for p in self.patchers: + p.start() return self def restore(self): - autotag.mb.match_album = self.mb_match_album - autotag.mb.match_track = self.mb_match_track - autotag.mb.album_for_id = self.mb_album_for_id - autotag.mb.track_for_id = self.mb_track_for_id + for p in self.patchers: + p.stop() - def match_album(self, albumartist, album, tracks, extra_tags): + def candidates(self, items, artist, album, va_likely): if self.matching == self.IDENT: - yield self._make_album_match(albumartist, album, tracks) + yield self._make_album_match(artist, album, len(items)) elif self.matching == self.GOOD: for i in range(self.length): - yield self._make_album_match(albumartist, album, tracks, i) + yield self._make_album_match(artist, album, len(items), i) elif self.matching == self.BAD: for i in range(self.length): - yield self._make_album_match(albumartist, album, tracks, i + 1) + yield self._make_album_match(artist, album, len(items), i + 1) elif self.matching == self.MISSING: - yield self._make_album_match(albumartist, album, tracks, missing=1) + yield self._make_album_match(artist, album, len(items), missing=1) - def match_track(self, artist, title): + def item_candidates(self, item, artist, title): yield TrackInfo( title=title.replace("Tag", "Applied"), track_id="trackid", @@ -834,31 +827,23 @@ class AutotagStub: index=0, ) - def album_for_id(self, mbid): - return None - - def track_for_id(self, mbid): - return None - def _make_track_match(self, artist, album, number): return TrackInfo( - title="Applied Track %d" % number, - track_id="match %d" % number, + title=f"Applied Track {number}", + track_id=f"match {number}", artist=artist, length=1, index=0, ) def _make_album_match(self, artist, album, tracks, distance=0, missing=0): - if distance: - id = " " + "M" * distance - else: - id = "" + id = f" {'M' * distance}" if distance else "" + if artist is None: artist = "Various Artists" else: - artist = artist.replace("Tag", "Applied") + id - album = album.replace("Tag", "Applied") + id + artist = f"{artist.replace('Tag', 'Applied')}{id}" + album = f"{album.replace('Tag', 'Applied')}{id}" track_infos = [] for i in range(tracks - missing): @@ -869,14 +854,23 @@ class AutotagStub: album=album, tracks=track_infos, va=False, - album_id="albumid" + id, - artist_id="artistid" + id, + album_id=f"albumid{id}", + artist_id=f"artistid{id}", albumtype="soundtrack", data_source="match_source", bandcamp_album_id="bc_url", ) +class AutotagImportTestCase(ImportTestCase): + matching = AutotagStub.IDENT + + def setUp(self): + super().setUp() + self.matcher = AutotagStub(self.matching).install() + self.addCleanup(self.matcher.restore) + + class FetchImageHelper: """Helper mixin for mocking requests when fetching images with remote art sources. @@ -886,20 +880,43 @@ class FetchImageHelper: def run(self, *args, **kwargs): super().run(*args, **kwargs) - IMAGEHEADER = { - "image/jpeg": b"\xff\xd8\xff" + b"\x00" * 3 + b"JFIF", + IMAGEHEADER: dict[str, bytes] = { + "image/jpeg": b"\xff\xd8\xff\x00\x00\x00JFIF", "image/png": b"\211PNG\r\n\032\n", + "image/gif": b"GIF89a", + # dummy type that is definitely not a valid image content type + "image/watercolour": b"watercolour", + "text/html": ( + b"\n\n\n\n" + b"\n\n" + ), } - def mock_response(self, url, content_type="image/jpeg", file_type=None): + def mock_response( + self, + url: str, + content_type: str = "image/jpeg", + file_type: None | str = None, + ) -> None: + # Potentially return a file of a type that differs from the + # server-advertised content type to mimic misbehaving servers. if file_type is None: file_type = content_type + + try: + # imghdr reads 32 bytes + header = self.IMAGEHEADER[file_type].ljust(32, b"\x00") + except KeyError: + # If we can't return a file that looks like real file of the requested + # type, better fail the test than returning something else, which might + # violate assumption made when writing a test. + raise AssertionError(f"Mocking {file_type} responses not supported") + responses.add( responses.GET, url, content_type=content_type, - # imghdr reads 32 bytes - body=self.IMAGEHEADER.get(file_type, b"").ljust(32, b"\x00"), + body=header, ) diff --git a/beets/ui/__init__.py b/beets/ui/__init__.py index 386410a09..5eeef815d 100644 --- a/beets/ui/__init__.py +++ b/beets/ui/__init__.py @@ -17,27 +17,37 @@ interface. To invoke the CLI, just call beets.ui.main(). The actual CLI commands are implemented in the ui.commands module. """ +from __future__ import annotations + import errno import optparse import os.path import re +import shutil import sqlite3 -import struct import sys import textwrap import traceback from difflib import SequenceMatcher -from typing import Any, Callable +from functools import cache +from itertools import chain +from typing import TYPE_CHECKING, Any, Literal import confuse from beets import config, library, logging, plugins, util -from beets.autotag import mb from beets.dbcore import db from beets.dbcore import query as db_query from beets.util import as_string +from beets.util.deprecation import deprecate_for_maintainers from beets.util.functemplate import template +if TYPE_CHECKING: + from collections.abc import Callable, Iterable + + from beets.dbcore.db import FormattedMapping + + # On Windows platforms, use colorama to support "ANSI" terminal colors. if sys.platform == "win32": try: @@ -103,27 +113,23 @@ def _stream_encoding(stream, default="utf-8"): def decargs(arglist): """Given a list of command-line argument bytestrings, attempts to decode them to Unicode strings when running under Python 2. + + .. deprecated:: 2.4.0 + This function will be removed in 3.0.0. """ + deprecate_for_maintainers("'beets.ui.decargs'") return arglist -def print_(*strings, **kwargs): +def print_(*strings: str, end: str = "\n") -> None: """Like print, but rather than raising an error when a character is not in the terminal's encoding's character set, just silently replaces it. - The arguments must be Unicode strings: `unicode` on Python 2; `str` on - Python 3. - The `end` keyword argument behaves similarly to the built-in `print` (it defaults to a newline). """ - if not strings: - strings = [""] - assert isinstance(strings[0], str) - - txt = " ".join(strings) - txt += kwargs.get("end", "\n") + txt = f"{' '.join(strings or ('',))}{end}" # Encode the string and write it to stdout. # On Python 3, sys.stdout expects text strings and uses the @@ -267,7 +273,7 @@ def input_options( ) ): # The first option is the default; mark it. - show_letter = "[%s]" % found_letter.upper() + show_letter = f"[{found_letter.upper()}]" is_default = True else: show_letter = found_letter.upper() @@ -306,9 +312,9 @@ def input_options( if isinstance(default, int): default_name = str(default) default_name = colorize("action_default", default_name) - tmpl = "# selection (default %s)" - prompt_parts.append(tmpl % default_name) - prompt_part_lengths.append(len(tmpl % str(default))) + tmpl = "# selection (default {})" + prompt_parts.append(tmpl.format(default_name)) + prompt_part_lengths.append(len(tmpl) - 2 + len(str(default))) else: prompt_parts.append("# selection") prompt_part_lengths.append(len(prompt_parts[-1])) @@ -336,7 +342,7 @@ def input_options( if line_length != 0: # Not the beginning of the line; need a space. - part = " " + part + part = f" {part}" length += 1 prompt += part @@ -347,8 +353,8 @@ def input_options( if not fallback_prompt: fallback_prompt = "Enter one of " if numrange: - fallback_prompt += "%i-%i, " % numrange - fallback_prompt += ", ".join(display_letters) + ":" + fallback_prompt += "{}-{}, ".format(*numrange) + fallback_prompt += f"{', '.join(display_letters)}:" resp = input_(prompt) while True: @@ -404,7 +410,7 @@ def input_select_objects(prompt, objs, rep, prompt_all=None): objects individually. """ choice = input_options( - ("y", "n", "s"), False, "%s? (Yes/no/select)" % (prompt_all or prompt) + ("y", "n", "s"), False, f"{prompt_all or prompt}? (Yes/no/select)" ) print() # Blank line. @@ -418,7 +424,7 @@ def input_select_objects(prompt, objs, rep, prompt_all=None): answer = input_options( ("y", "n", "q"), True, - "%s? (yes/no/quit)" % prompt, + f"{prompt}? (yes/no/quit)", "Enter Y or N:", ) if answer == "y": @@ -431,62 +437,12 @@ def input_select_objects(prompt, objs, rep, prompt_all=None): return [] -# Human output formatting. - - -def human_bytes(size): - """Formats size, a number of bytes, in a human-readable way.""" - powers = ["", "K", "M", "G", "T", "P", "E", "Z", "Y", "H"] - unit = "B" - for power in powers: - if size < 1024: - return f"{size:3.1f} {power}{unit}" - size /= 1024.0 - unit = "iB" - return "big" - - -def human_seconds(interval): - """Formats interval, a number of seconds, as a human-readable time - interval using English words. - """ - units = [ - (1, "second"), - (60, "minute"), - (60, "hour"), - (24, "day"), - (7, "week"), - (52, "year"), - (10, "decade"), - ] - for i in range(len(units) - 1): - increment, suffix = units[i] - next_increment, _ = units[i + 1] - interval /= float(increment) - if interval < next_increment: - break - else: - # Last unit. - increment, suffix = units[-1] - interval /= float(increment) - - return f"{interval:3.1f} {suffix}s" - - -def human_seconds_short(interval): - """Formats a number of seconds as a short human-readable M:SS - string. - """ - interval = int(interval) - return "%i:%02i" % (interval // 60, interval % 60) - - # Colorization. # ANSI terminal colorization code heavily inspired by pygments: # https://bitbucket.org/birkenfeld/pygments-main/src/default/pygments/console.py # (pygments is by Tim Hatch, Armin Ronacher, et al.) -COLOR_ESCAPE = "\x1b[" +COLOR_ESCAPE = "\x1b" LEGACY_COLORS = { "black": ["black"], "darkred": ["red"], @@ -511,7 +467,7 @@ LEGACY_COLORS = { "white": ["bold", "white"], } # All ANSI Colors. -ANSI_CODES = { +CODE_BY_COLOR = { # Styles. "normal": 0, "bold": 1, @@ -542,11 +498,17 @@ ANSI_CODES = { "bg_cyan": 46, "bg_white": 47, } -RESET_COLOR = COLOR_ESCAPE + "39;49;00m" - -# These abstract COLOR_NAMES are lazily mapped on to the actual color in COLORS -# as they are defined in the configuration files, see function: colorize -COLOR_NAMES = [ +RESET_COLOR = f"{COLOR_ESCAPE}[39;49;00m" +# Precompile common ANSI-escape regex patterns +ANSI_CODE_REGEX = re.compile(rf"({COLOR_ESCAPE}\[[;0-9]*m)") +ESC_TEXT_REGEX = re.compile( + rf"""(?P[^{COLOR_ESCAPE}]*) + (?P(?:{ANSI_CODE_REGEX.pattern})+) + (?P[^{COLOR_ESCAPE}]+)(?P{re.escape(RESET_COLOR)}) + (?P[^{COLOR_ESCAPE}]*)""", + re.VERBOSE, +) +ColorName = Literal[ "text_success", "text_warning", "text_error", @@ -555,76 +517,54 @@ COLOR_NAMES = [ "action_default", "action", # New Colors - "text", "text_faint", "import_path", "import_path_items", "action_description", - "added", - "removed", "changed", - "added_highlight", - "removed_highlight", - "changed_highlight", "text_diff_added", "text_diff_removed", - "text_diff_changed", ] -COLORS = None -def _colorize(color, text): - """Returns a string that prints the given text in the given color - in a terminal that is ANSI color-aware. The color must be a list of strings - from ANSI_CODES. +@cache +def get_color_config() -> dict[ColorName, str]: + """Parse and validate color configuration, converting names to ANSI codes. + + Processes the UI color configuration, handling both new list format and + legacy single-color format. Validates all color names against known codes + and raises an error for any invalid entries. """ - # Construct escape sequence to be put before the text by iterating - # over all "ANSI codes" in `color`. - escape = "" - for code in color: - escape = escape + COLOR_ESCAPE + "%im" % ANSI_CODES[code] - return escape + text + RESET_COLOR + colors_by_color_name: dict[ColorName, list[str]] = { + k: (v if isinstance(v, list) else LEGACY_COLORS.get(v, [v])) + for k, v in config["ui"]["colors"].flatten().items() + } + + if invalid_colors := ( + set(chain.from_iterable(colors_by_color_name.values())) + - CODE_BY_COLOR.keys() + ): + raise UserError( + f"Invalid color(s) in configuration: {', '.join(invalid_colors)}" + ) + + return { + n: ";".join(str(CODE_BY_COLOR[c]) for c in colors) + for n, colors in colors_by_color_name.items() + } -def colorize(color_name, text): - """Colorize text if colored output is enabled. (Like _colorize but - conditional.) +def colorize(color_name: ColorName, text: str) -> str: + """Apply ANSI color formatting to text based on configuration settings. + + Returns colored text when color output is enabled and NO_COLOR environment + variable is not set, otherwise returns plain text unchanged. """ if config["ui"]["color"] and "NO_COLOR" not in os.environ: - global COLORS - if not COLORS: - # Read all color configurations and set global variable COLORS. - COLORS = dict() - for name in COLOR_NAMES: - # Convert legacy color definitions (strings) into the new - # list-based color definitions. Do this by trying to read the - # color definition from the configuration as unicode - if this - # is successful, the color definition is a legacy definition - # and has to be converted. - try: - color_def = config["ui"]["colors"][name].get(str) - except (confuse.ConfigTypeError, NameError): - # Normal color definition (type: list of unicode). - color_def = config["ui"]["colors"][name].get(list) - else: - # Legacy color definition (type: unicode). Convert. - if color_def in LEGACY_COLORS: - color_def = LEGACY_COLORS[color_def] - else: - raise UserError("no such color %s", color_def) - for code in color_def: - if code not in ANSI_CODES.keys(): - raise ValueError("no such ANSI code %s", code) - COLORS[name] = color_def - # In case a 3rd party plugin is still passing the actual color ('red') - # instead of the abstract color name ('text_error') - color = COLORS.get(color_name) - if not color: - log.debug("Invalid color_name: {0}", color_name) - color = color_name - return _colorize(color, text) - else: - return text + color_code = get_color_config()[color_name] + return f"{COLOR_ESCAPE}[{color_code}m{text}{RESET_COLOR}" + + return text def uncolorize(colored_text): @@ -637,26 +577,22 @@ def uncolorize(colored_text): # [;\d]* - matches a sequence consisting of one or more digits or # semicola # [A-Za-z] - matches a letter - ansi_code_regex = re.compile(r"\x1b\[[;\d]*[A-Za-z]", re.VERBOSE) - # Strip ANSI codes from `colored_text` using the regular expression. - text = ansi_code_regex.sub("", colored_text) - return text + return ANSI_CODE_REGEX.sub("", colored_text) def color_split(colored_text, index): - ansi_code_regex = re.compile(r"(\x1b\[[;\d]*[A-Za-z])", re.VERBOSE) length = 0 pre_split = "" post_split = "" found_color_code = None found_split = False - for part in ansi_code_regex.split(colored_text): + for part in ANSI_CODE_REGEX.split(colored_text): # Count how many real letters we have passed length += color_len(part) if found_split: post_split += part else: - if ansi_code_regex.match(part): + if ANSI_CODE_REGEX.match(part): # This is a color code if part == RESET_COLOR: found_color_code = None @@ -669,8 +605,8 @@ def color_split(colored_text, index): split_index = index - (length - color_len(part)) found_split = True if found_color_code: - pre_split += part[:split_index] + RESET_COLOR - post_split += found_color_code + part[split_index:] + pre_split += f"{part[:split_index]}{RESET_COLOR}" + post_split += f"{found_color_code}{part[split_index:]}" else: pre_split += part[:split_index] post_split += part[split_index:] @@ -690,7 +626,7 @@ def color_len(colored_text): return len(uncolorize(colored_text)) -def _colordiff(a, b): +def _colordiff(a: Any, b: Any) -> tuple[str, str]: """Given two values, return the same pair of strings except with their differences highlighted in the specified color. Strings are highlighted intelligently to show differences; other values are @@ -712,35 +648,21 @@ def _colordiff(a, b): colorize("text_diff_added", str(b)), ) - a_out = [] - b_out = [] + before = "" + after = "" matcher = SequenceMatcher(lambda x: False, a, b) for op, a_start, a_end, b_start, b_end in matcher.get_opcodes(): - if op == "equal": - # In both strings. - a_out.append(a[a_start:a_end]) - b_out.append(b[b_start:b_end]) - elif op == "insert": - # Right only. - b_out.append(colorize("text_diff_added", b[b_start:b_end])) - elif op == "delete": - # Left only. - a_out.append(colorize("text_diff_removed", a[a_start:a_end])) - elif op == "replace": - # Right and left differ. Colorise with second highlight if - # it's just a case change. - if a[a_start:a_end].lower() != b[b_start:b_end].lower(): - a_color = "text_diff_removed" - b_color = "text_diff_added" - else: - a_color = b_color = "text_highlight_minor" - a_out.append(colorize(a_color, a[a_start:a_end])) - b_out.append(colorize(b_color, b[b_start:b_end])) - else: - assert False + before_part, after_part = a[a_start:a_end], b[b_start:b_end] + if op in {"delete", "replace"}: + before_part = colorize("text_diff_removed", before_part) + if op in {"insert", "replace"}: + after_part = colorize("text_diff_added", after_part) - return "".join(a_out), "".join(b_out) + before += before_part + after += after_part + + return before, after def colordiff(a, b): @@ -774,32 +696,16 @@ def get_replacements(): replacements.append((re.compile(pattern), repl)) except re.error: raise UserError( - "malformed regular expression in replace: {}".format(pattern) + f"malformed regular expression in replace: {pattern}" ) return replacements -def term_width(): +@cache +def term_width() -> int: """Get the width (columns) of the terminal.""" - fallback = config["ui"]["terminal_width"].get(int) - - # The fcntl and termios modules are not available on non-Unix - # platforms, so we fall back to a constant. - try: - import fcntl - import termios - except ImportError: - return fallback - - try: - buf = fcntl.ioctl(0, termios.TIOCGWINSZ, " " * 4) - except OSError: - return fallback - try: - height, width = struct.unpack("hh", buf) - except struct.error: - return fallback - return width + columns, _ = shutil.get_terminal_size(fallback=(0, 0)) + return columns if columns else config["ui"]["terminal_width"].get(int) def split_into_lines(string, width_tuple): @@ -813,19 +719,13 @@ def split_into_lines(string, width_tuple): """ first_width, middle_width, last_width = width_tuple words = [] - esc_text = re.compile( - r"""(?P[^\x1b]*) - (?P(?:\x1b\[[;\d]*[A-Za-z])+) - (?P[^\x1b]+)(?P\x1b\[39;49;00m) - (?P[^\x1b]*)""", - re.VERBOSE, - ) + if uncolorize(string) == string: # No colors in string words = string.split() else: # Use a regex to find escapes and the text within them. - for m in esc_text.finditer(string): + for m in ESC_TEXT_REGEX.finditer(string): # m contains four groups: # pretext - any text before escape sequence # esc - intitial escape sequence @@ -854,17 +754,17 @@ def split_into_lines(string, width_tuple): # Colorize each word with pre/post escapes # Reconstruct colored words words += [ - m.group("esc") + raw_word + RESET_COLOR + f"{m['esc']}{raw_word}{RESET_COLOR}" for raw_word in raw_words ] elif raw_words: # Pretext stops mid-word if m.group("esc") != RESET_COLOR: # Add the rest of the current word, with a reset after it - words[-1] += m.group("esc") + raw_words[0] + RESET_COLOR + words[-1] += f"{m['esc']}{raw_words[0]}{RESET_COLOR}" # Add the subsequent colored words: words += [ - m.group("esc") + raw_word + RESET_COLOR + f"{m['esc']}{raw_word}{RESET_COLOR}" for raw_word in raw_words[1:] ] else: @@ -955,18 +855,12 @@ def print_column_layout( With subsequent lines (i.e. {lhs1}, {rhs1} onwards) being the rest of contents, wrapped if the width would be otherwise exceeded. """ - if right["prefix"] + right["contents"] + right["suffix"] == "": + if f"{right['prefix']}{right['contents']}{right['suffix']}" == "": # No right hand information, so we don't need a separator. separator = "" first_line_no_wrap = ( - indent_str - + left["prefix"] - + left["contents"] - + left["suffix"] - + separator - + right["prefix"] - + right["contents"] - + right["suffix"] + f"{indent_str}{left['prefix']}{left['contents']}{left['suffix']}" + f"{separator}{right['prefix']}{right['contents']}{right['suffix']}" ) if color_len(first_line_no_wrap) < max_width: # Everything fits, print out line. @@ -1092,18 +986,12 @@ def print_newline_layout( If {lhs0} would go over the maximum width, the subsequent lines are indented a second time for ease of reading. """ - if right["prefix"] + right["contents"] + right["suffix"] == "": + if f"{right['prefix']}{right['contents']}{right['suffix']}" == "": # No right hand information, so we don't need a separator. separator = "" first_line_no_wrap = ( - indent_str - + left["prefix"] - + left["contents"] - + left["suffix"] - + separator - + right["prefix"] - + right["contents"] - + right["suffix"] + f"{indent_str}{left['prefix']}{left['contents']}{left['suffix']}" + f"{separator}{right['prefix']}{right['contents']}{right['suffix']}" ) if color_len(first_line_no_wrap) < max_width: # Everything fits, print out line. @@ -1117,7 +1005,7 @@ def print_newline_layout( empty_space - len(indent_str), empty_space - len(indent_str), ) - left_str = left["prefix"] + left["contents"] + left["suffix"] + left_str = f"{left['prefix']}{left['contents']}{left['suffix']}" left_split = split_into_lines(left_str, left_width_tuple) # Repeat calculations for rhs, including separator on first line right_width_tuple = ( @@ -1125,58 +1013,65 @@ def print_newline_layout( empty_space - len(indent_str), empty_space - len(indent_str), ) - right_str = right["prefix"] + right["contents"] + right["suffix"] + right_str = f"{right['prefix']}{right['contents']}{right['suffix']}" right_split = split_into_lines(right_str, right_width_tuple) for i, line in enumerate(left_split): if i == 0: - print_(indent_str + line) + print_(f"{indent_str}{line}") elif line != "": # Ignore empty lines - print_(indent_str * 2 + line) + print_(f"{indent_str * 2}{line}") for i, line in enumerate(right_split): if i == 0: - print_(indent_str + separator + line) + print_(f"{indent_str}{separator}{line}") elif line != "": - print_(indent_str * 2 + line) + print_(f"{indent_str * 2}{line}") FLOAT_EPSILON = 0.01 -def _field_diff(field, old, old_fmt, new, new_fmt): +def _field_diff( + field: str, old: FormattedMapping, new: FormattedMapping +) -> str | None: """Given two Model objects and their formatted views, format their values for `field` and highlight changes among them. Return a human-readable string. If the value has not changed, return None instead. """ - oldval = old.get(field) - newval = new.get(field) - # If no change, abort. - if ( + if (oldval := old.model.get(field)) == (newval := new.model.get(field)) or ( isinstance(oldval, float) and isinstance(newval, float) and abs(oldval - newval) < FLOAT_EPSILON ): return None - elif oldval == newval: - return None # Get formatted values for output. - oldstr = old_fmt.get(field, "") - newstr = new_fmt.get(field, "") + oldstr, newstr = old.get(field, ""), new.get(field, "") + if field not in new: + return colorize("text_diff_removed", f"{field}: {oldstr}") + + if field not in old: + return colorize("text_diff_added", f"{field}: {newstr}") # For strings, highlight changes. For others, colorize the whole # thing. if isinstance(oldval, str): - oldstr, newstr = colordiff(oldval, newstr) + oldstr, newstr = colordiff(oldstr, newstr) else: - oldstr = colorize("text_error", oldstr) - newstr = colorize("text_error", newstr) + oldstr = colorize("text_diff_removed", oldstr) + newstr = colorize("text_diff_added", newstr) - return f"{oldstr} -> {newstr}" + return f"{field}: {oldstr} -> {newstr}" -def show_model_changes(new, old=None, fields=None, always=False): +def show_model_changes( + new: library.LibModel, + old: library.LibModel | None = None, + fields: Iterable[str] | None = None, + always: bool = False, + print_obj: bool = True, +) -> bool: """Given a Model object, print a list of changes from its pristine version stored in the database. Return a boolean indicating whether any changes were found. @@ -1186,7 +1081,7 @@ def show_model_changes(new, old=None, fields=None, always=False): restrict the detection to. `always` indicates whether the object is always identified, regardless of whether any changes are present. """ - old = old or new._db._get(type(new), new.id) + old = old or new.get_fresh_from_db() # Keep the formatted views around instead of re-creating them in each # iteration step @@ -1194,113 +1089,28 @@ def show_model_changes(new, old=None, fields=None, always=False): new_fmt = new.formatted() # Build up lines showing changed fields. - changes = [] - for field in old: - # Subset of the fields. Never show mtime. - if field == "mtime" or (fields and field not in fields): - continue + diff_fields = (set(old) | set(new)) - {"mtime"} + if allowed_fields := set(fields or {}): + diff_fields &= allowed_fields - # Detect and show difference for this field. - line = _field_diff(field, old, old_fmt, new, new_fmt) - if line: - changes.append(f" {field}: {line}") - - # New fields. - for field in set(new) - set(old): - if fields and field not in fields: - continue - - changes.append( - " {}: {}".format(field, colorize("text_highlight", new_fmt[field])) - ) + changes = [ + d + for f in sorted(diff_fields) + if (d := _field_diff(f, old_fmt, new_fmt)) + ] # Print changes. - if changes or always: + if print_obj and (changes or always): print_(format(old)) if changes: - print_("\n".join(changes)) + print_(textwrap.indent("\n".join(changes), " ")) return bool(changes) -def show_path_changes(path_changes): - """Given a list of tuples (source, destination) that indicate the - path changes, log the changes as INFO-level output to the beets log. - The output is guaranteed to be unicode. - - Every pair is shown on a single line if the terminal width permits it, - else it is split over two lines. E.g., - - Source -> Destination - - vs. - - Source - -> Destination - """ - sources, destinations = zip(*path_changes) - - # Ensure unicode output - sources = list(map(util.displayable_path, sources)) - destinations = list(map(util.displayable_path, destinations)) - - # Calculate widths for terminal split - col_width = (term_width() - len(" -> ")) // 2 - max_width = len(max(sources + destinations, key=len)) - - if max_width > col_width: - # Print every change over two lines - for source, dest in zip(sources, destinations): - color_source, color_dest = colordiff(source, dest) - print_("{0} \n -> {1}".format(color_source, color_dest)) - else: - # Print every change on a single line, and add a header - title_pad = max_width - len("Source ") + len(" -> ") - - print_("Source {0} Destination".format(" " * title_pad)) - for source, dest in zip(sources, destinations): - pad = max_width - len(source) - color_source, color_dest = colordiff(source, dest) - print_( - "{0} {1} -> {2}".format( - color_source, - " " * pad, - color_dest, - ) - ) - - # Helper functions for option parsing. -def _store_dict(option, opt_str, value, parser): - """Custom action callback to parse options which have ``key=value`` - pairs as values. All such pairs passed for this option are - aggregated into a dictionary. - """ - dest = option.dest - option_values = getattr(parser.values, dest, None) - - if option_values is None: - # This is the first supplied ``key=value`` pair of option. - # Initialize empty dictionary and get a reference to it. - setattr(parser.values, dest, {}) - option_values = getattr(parser.values, dest) - - try: - key, value = value.split("=", 1) - if not (key and value): - raise ValueError - except ValueError: - raise UserError( - "supplied argument `{}' is not of the form `key=value'".format( - value - ) - ) - - option_values[key] = value - - class CommonOptionsParser(optparse.OptionParser): """Offers a simple way to add common formatting options. @@ -1354,14 +1164,9 @@ class CommonOptionsParser(optparse.OptionParser): setattr(parser.values, option.dest, True) # Use the explicitly specified format, or the string from the option. - if fmt: - value = fmt - elif value: - (value,) = decargs([value]) - else: - value = "" - + value = fmt or value or "" parser.values.format = value + if target: config[target._format_config_key].set(value) else: @@ -1479,8 +1284,8 @@ class Subcommand: @root_parser.setter def root_parser(self, root_parser): self._root_parser = root_parser - self.parser.prog = "{} {}".format( - as_string(root_parser.get_prog_name()), self.name + self.parser.prog = ( + f"{as_string(root_parser.get_prog_name())} {self.name}" ) @@ -1536,7 +1341,7 @@ class SubcommandsOptionParser(CommonOptionsParser): for subcommand in subcommands: name = subcommand.name if subcommand.aliases: - name += " (%s)" % ", ".join(subcommand.aliases) + name += f" ({', '.join(subcommand.aliases)})" disp_names.append(name) # Set the help position based on the max width. @@ -1549,32 +1354,24 @@ class SubcommandsOptionParser(CommonOptionsParser): # Lifted directly from optparse.py. name_width = help_position - formatter.current_indent - 2 if len(name) > name_width: - name = "%*s%s\n" % (formatter.current_indent, "", name) + name = f"{' ' * formatter.current_indent}{name}\n" indent_first = help_position else: - name = "%*s%-*s " % ( - formatter.current_indent, - "", - name_width, - name, - ) + name = f"{' ' * formatter.current_indent}{name:<{name_width}}\n" indent_first = 0 result.append(name) help_width = formatter.width - help_position help_lines = textwrap.wrap(subcommand.help, help_width) help_line = help_lines[0] if help_lines else "" - result.append("%*s%s\n" % (indent_first, "", help_line)) + result.append(f"{' ' * indent_first}{help_line}\n") result.extend( - [ - "%*s%s\n" % (help_position, "", line) - for line in help_lines[1:] - ] + [f"{' ' * help_position}{line}\n" for line in help_lines[1:]] ) formatter.dedent() # Concatenate the original help message with the subcommand # list. - return out + "".join(result) + return f"{out}{''.join(result)}" def _subcommand_for_name(self, name): """Return the subcommand in self.subcommands matching the @@ -1623,66 +1420,16 @@ optparse.Option.ALWAYS_TYPED_ACTIONS += ("callback",) # The main entry point and bootstrapping. -def _load_plugins(options, config): - """Load the plugins specified on the command line or in the configuration.""" - paths = config["pluginpath"].as_str_seq(split=False) - paths = [util.normpath(p) for p in paths] - log.debug("plugin paths: {0}", util.displayable_path(paths)) - - # On Python 3, the search paths need to be unicode. - paths = [os.fsdecode(p) for p in paths] - - # Extend the `beetsplug` package to include the plugin paths. - import beetsplug - - beetsplug.__path__ = paths + list(beetsplug.__path__) - - # For backwards compatibility, also support plugin paths that - # *contain* a `beetsplug` package. - sys.path += paths - - # If we were given any plugins on the command line, use those. - if options.plugins is not None: - plugin_list = ( - options.plugins.split(",") if len(options.plugins) > 0 else [] - ) - else: - plugin_list = config["plugins"].as_str_seq() - - # Exclude any plugins that were specified on the command line - if options.exclude is not None: - plugin_list = [ - p for p in plugin_list if p not in options.exclude.split(",") - ] - - plugins.load_plugins(plugin_list) - return plugins - - -def _setup(options, lib=None): +def _setup( + options: optparse.Values, lib: library.Library | None +) -> tuple[list[Subcommand], library.Library]: """Prepare and global state and updates it with command line options. Returns a list of subcommands, a list of plugins, and a library instance. """ - # Configure the MusicBrainz API. - mb.configure() - config = _configure(options) - plugins = _load_plugins(options, config) - - # Add types and queries defined by plugins. - plugin_types_album = plugins.types(library.Album) - library.Album._types.update(plugin_types_album) - item_types = plugin_types_album.copy() - item_types.update(library.Item._types) - item_types.update(plugins.types(library.Item)) - library.Item._types = item_types - - library.Item._queries.update(plugins.named_queries(library.Item)) - library.Album._queries.update(plugins.named_queries(library.Album)) - - plugins.send("pluginload") + plugins.load_plugins() # Get the default subcommands. from beets.ui.commands import default_commands @@ -1694,7 +1441,7 @@ def _setup(options, lib=None): lib = _open_library(config) plugins.send("library_opened", lib=lib) - return subcommands, plugins, lib + return subcommands, lib def _configure(options): @@ -1718,19 +1465,19 @@ def _configure(options): if overlay_path: log.debug( - "overlaying configuration: {0}", util.displayable_path(overlay_path) + "overlaying configuration: {}", util.displayable_path(overlay_path) ) config_path = config.user_config_path() if os.path.isfile(config_path): - log.debug("user configuration: {0}", util.displayable_path(config_path)) + log.debug("user configuration: {}", util.displayable_path(config_path)) else: log.debug( - "no user configuration found at {0}", + "no user configuration found at {}", util.displayable_path(config_path), ) - log.debug("data directory: {0}", util.displayable_path(config.config_dir())) + log.debug("data directory: {}", util.displayable_path(config.config_dir())) return config @@ -1740,15 +1487,13 @@ def _ensure_db_directory_exists(path): newpath = os.path.dirname(path) if not os.path.isdir(newpath): if input_yn( - "The database directory {} does not \ - exist. Create it (Y/n)?".format( - util.displayable_path(newpath) - ) + f"The database directory {util.displayable_path(newpath)} does not" + " exist. Create it (Y/n)?" ): os.makedirs(newpath) -def _open_library(config): +def _open_library(config: confuse.LazyConfig) -> library.Library: """Create a new library instance from the configuration.""" dbpath = util.bytestring_path(config["library"].as_filename()) _ensure_db_directory_exists(dbpath) @@ -1763,19 +1508,18 @@ def _open_library(config): except (sqlite3.OperationalError, sqlite3.DatabaseError) as db_error: log.debug("{}", traceback.format_exc()) raise UserError( - "database file {} cannot not be opened: {}".format( - util.displayable_path(dbpath), db_error - ) + f"database file {util.displayable_path(dbpath)} cannot not be" + f" opened: {db_error}" ) log.debug( - "library database: {0}\n" "library directory: {1}", + "library database: {}\nlibrary directory: {}", util.displayable_path(lib.path), util.displayable_path(lib.directory), ) return lib -def _raw_main(args, lib=None): +def _raw_main(args: list[str], lib=None) -> None: """A helper function for `main` without top-level exception handling. """ @@ -1801,16 +1545,31 @@ def _raw_main(args, lib=None): parser.add_option( "-c", "--config", dest="config", help="path to configuration file" ) + + def parse_csl_callback( + option: optparse.Option, _, value: str, parser: SubcommandsOptionParser + ): + """Parse a comma-separated list of values.""" + setattr( + parser.values, + option.dest, # type: ignore[arg-type] + list(filter(None, value.split(","))), + ) + parser.add_option( "-p", "--plugins", dest="plugins", + action="callback", + callback=parse_csl_callback, help="a comma-separated list of plugins to load", ) parser.add_option( "-P", "--disable-plugins", - dest="exclude", + dest="disabled_plugins", + action="callback", + callback=parse_csl_callback, help="a comma-separated list of plugins to disable", ) parser.add_option( @@ -1837,12 +1596,12 @@ def _raw_main(args, lib=None): and subargs[0] == "config" and ("-e" in subargs or "--edit" in subargs) ): - from beets.ui.commands import config_edit + from beets.ui.commands.config import config_edit - return config_edit() + return config_edit(options) test_lib = bool(lib) - subcommands, plugins, lib = _setup(options, lib) + subcommands, lib = _setup(options, lib) parser.add_subcommand(*subcommands) subcommand, suboptions, subargs = parser.parse_subcommand(subargs) @@ -1870,7 +1629,7 @@ def main(args=None): _raw_main(args) except UserError as exc: message = exc.args[0] if exc.args else None - log.error("error: {0}", message) + log.error("error: {}", message) sys.exit(1) except util.HumanReadableError as exc: exc.log(log) @@ -1882,10 +1641,10 @@ def main(args=None): log.error("{}", exc) sys.exit(1) except confuse.ConfigError as exc: - log.error("configuration error: {0}", exc) + log.error("configuration error: {}", exc) sys.exit(1) except db_query.InvalidQueryError as exc: - log.error("invalid query: {0}", exc) + log.error("invalid query: {}", exc) sys.exit(1) except OSError as exc: if exc.errno == errno.EPIPE: @@ -1898,7 +1657,7 @@ def main(args=None): log.debug("{}", traceback.format_exc()) except db.DBAccessError as exc: log.error( - "database access error: {0}\n" + "database access error: {}\n" "the library file might have a permissions problem", exc, ) diff --git a/beets/ui/commands.py b/beets/ui/commands.py deleted file mode 100755 index 1822c3e7c..000000000 --- a/beets/ui/commands.py +++ /dev/null @@ -1,2517 +0,0 @@ -# This file is part of beets. -# Copyright 2016, Adrian Sampson. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. - -"""This module provides the default commands for beets' command-line -interface. -""" - -import os -import re -from collections import Counter -from collections.abc import Sequence -from itertools import chain -from platform import python_version -from typing import Any, NamedTuple - -import beets -from beets import autotag, config, importer, library, logging, plugins, ui, util -from beets.autotag import Recommendation, hooks -from beets.ui import ( - decargs, - input_, - print_, - print_column_layout, - print_newline_layout, - show_path_changes, -) -from beets.util import ( - MoveOperation, - ancestry, - displayable_path, - functemplate, - normpath, - syspath, -) - -from . import _store_dict - -VARIOUS_ARTISTS = "Various Artists" - -# Global logger. -log = logging.getLogger("beets") - -# The list of default subcommands. This is populated with Subcommand -# objects that can be fed to a SubcommandsOptionParser. -default_commands = [] - - -# Utilities. - - -def _do_query(lib, query, album, also_items=True): - """For commands that operate on matched items, performs a query - and returns a list of matching items and a list of matching - albums. (The latter is only nonempty when album is True.) Raises - a UserError if no items match. also_items controls whether, when - fetching albums, the associated items should be fetched also. - """ - if album: - albums = list(lib.albums(query)) - items = [] - if also_items: - for al in albums: - items += al.items() - - else: - albums = [] - items = list(lib.items(query)) - - if album and not albums: - raise ui.UserError("No matching albums found.") - elif not album and not items: - raise ui.UserError("No matching items found.") - - return items, albums - - -def _paths_from_logfile(path): - """Parse the logfile and yield skipped paths to pass to the `import` - command. - """ - with open(path, encoding="utf-8") as fp: - for i, line in enumerate(fp, start=1): - verb, sep, paths = line.rstrip("\n").partition(" ") - if not sep: - raise ValueError(f"line {i} is invalid") - - # Ignore informational lines that don't need to be re-imported. - if verb in {"import", "duplicate-keep", "duplicate-replace"}: - continue - - if verb not in {"asis", "skip", "duplicate-skip"}: - raise ValueError(f"line {i} contains unknown verb {verb}") - - yield os.path.commonpath(paths.split("; ")) - - -def _parse_logfiles(logfiles): - """Parse all `logfiles` and yield paths from it.""" - for logfile in logfiles: - try: - yield from _paths_from_logfile(syspath(normpath(logfile))) - except ValueError as err: - raise ui.UserError( - "malformed logfile {}: {}".format( - util.displayable_path(logfile), str(err) - ) - ) from err - except OSError as err: - raise ui.UserError( - "unreadable logfile {}: {}".format( - util.displayable_path(logfile), str(err) - ) - ) from err - - -# fields: Shows a list of available fields for queries and format strings. - - -def _print_keys(query): - """Given a SQLite query result, print the `key` field of each - returned row, with indentation of 2 spaces. - """ - for row in query: - print_(" " * 2 + row["key"]) - - -def fields_func(lib, opts, args): - def _print_rows(names): - names.sort() - print_(" " + "\n ".join(names)) - - print_("Item fields:") - _print_rows(library.Item.all_keys()) - - print_("Album fields:") - _print_rows(library.Album.all_keys()) - - with lib.transaction() as tx: - # The SQL uses the DISTINCT to get unique values from the query - unique_fields = "SELECT DISTINCT key FROM (%s)" - - print_("Item flexible attributes:") - _print_keys(tx.query(unique_fields % library.Item._flex_table)) - - print_("Album flexible attributes:") - _print_keys(tx.query(unique_fields % library.Album._flex_table)) - - -fields_cmd = ui.Subcommand( - "fields", help="show fields available for queries and format strings" -) -fields_cmd.func = fields_func -default_commands.append(fields_cmd) - - -# help: Print help text for commands - - -class HelpCommand(ui.Subcommand): - def __init__(self): - super().__init__( - "help", - aliases=("?",), - help="give detailed help on a specific sub-command", - ) - - def func(self, lib, opts, args): - if args: - cmdname = args[0] - helpcommand = self.root_parser._subcommand_for_name(cmdname) - if not helpcommand: - raise ui.UserError(f"unknown command '{cmdname}'") - helpcommand.print_help() - else: - self.root_parser.print_help() - - -default_commands.append(HelpCommand()) - - -# import: Autotagger and importer. - -# Importer utilities and support. - - -def disambig_string(info): - """Generate a string for an AlbumInfo or TrackInfo object that - provides context that helps disambiguate similar-looking albums and - tracks. - """ - if isinstance(info, hooks.AlbumInfo): - disambig = get_album_disambig_fields(info) - elif isinstance(info, hooks.TrackInfo): - disambig = get_singleton_disambig_fields(info) - else: - return "" - - return ", ".join(disambig) - - -def get_singleton_disambig_fields(info: hooks.TrackInfo) -> Sequence[str]: - out = [] - chosen_fields = config["match"]["singleton_disambig_fields"].as_str_seq() - calculated_values = { - "index": "Index {}".format(str(info.index)), - "track_alt": "Track {}".format(info.track_alt), - "album": ( - "[{}]".format(info.album) - if ( - config["import"]["singleton_album_disambig"].get() - and info.get("album") - ) - else "" - ), - } - - for field in chosen_fields: - if field in calculated_values: - out.append(str(calculated_values[field])) - else: - try: - out.append(str(info[field])) - except (AttributeError, KeyError): - print(f"Disambiguation string key {field} does not exist.") - - return out - - -def get_album_disambig_fields(info: hooks.AlbumInfo) -> Sequence[str]: - out = [] - chosen_fields = config["match"]["album_disambig_fields"].as_str_seq() - calculated_values = { - "media": ( - "{}x{}".format(info.mediums, info.media) - if (info.mediums and info.mediums > 1) - else info.media - ), - } - - for field in chosen_fields: - if field in calculated_values: - out.append(str(calculated_values[field])) - else: - try: - out.append(str(info[field])) - except (AttributeError, KeyError): - print(f"Disambiguation string key {field} does not exist.") - - return out - - -def dist_colorize(string, dist): - """Formats a string as a colorized similarity string according to - a distance. - """ - if dist <= config["match"]["strong_rec_thresh"].as_number(): - string = ui.colorize("text_success", string) - elif dist <= config["match"]["medium_rec_thresh"].as_number(): - string = ui.colorize("text_warning", string) - else: - string = ui.colorize("text_error", string) - return string - - -def dist_string(dist): - """Formats a distance (a float) as a colorized similarity percentage - string. - """ - string = "{:.1f}%".format(((1 - dist) * 100)) - return dist_colorize(string, dist) - - -def penalty_string(distance, limit=None): - """Returns a colorized string that indicates all the penalties - applied to a distance object. - """ - penalties = [] - for key in distance.keys(): - key = key.replace("album_", "") - key = key.replace("track_", "") - key = key.replace("_", " ") - penalties.append(key) - if penalties: - if limit and len(penalties) > limit: - penalties = penalties[:limit] + ["..."] - # Prefix penalty string with U+2260: Not Equal To - penalty_string = "\u2260 {}".format(", ".join(penalties)) - return ui.colorize("changed", penalty_string) - - -class ChangeRepresentation: - """Keeps track of all information needed to generate a (colored) text - representation of the changes that will be made if an album or singleton's - tags are changed according to `match`, which must be an AlbumMatch or - TrackMatch object, accordingly. - """ - - cur_artist = None - # cur_album set if album, cur_title set if singleton - cur_album = None - cur_title = None - match = None - indent_header = "" - indent_detail = "" - - def __init__(self): - # Read match header indentation width from config. - match_header_indent_width = config["ui"]["import"]["indentation"][ - "match_header" - ].as_number() - self.indent_header = ui.indent(match_header_indent_width) - - # Read match detail indentation width from config. - match_detail_indent_width = config["ui"]["import"]["indentation"][ - "match_details" - ].as_number() - self.indent_detail = ui.indent(match_detail_indent_width) - - # Read match tracklist indentation width from config - match_tracklist_indent_width = config["ui"]["import"]["indentation"][ - "match_tracklist" - ].as_number() - self.indent_tracklist = ui.indent(match_tracklist_indent_width) - self.layout = config["ui"]["import"]["layout"].as_choice( - { - "column": 0, - "newline": 1, - } - ) - - def print_layout( - self, indent, left, right, separator=" -> ", max_width=None - ): - if not max_width: - # If no max_width provided, use terminal width - max_width = ui.term_width() - if self.layout == 0: - print_column_layout(indent, left, right, separator, max_width) - else: - print_newline_layout(indent, left, right, separator, max_width) - - def show_match_header(self): - """Print out a 'header' identifying the suggested match (album name, - artist name,...) and summarizing the changes that would be made should - the user accept the match. - """ - # Print newline at beginning of change block. - print_("") - - # 'Match' line and similarity. - print_( - self.indent_header + f"Match ({dist_string(self.match.distance)}):" - ) - - if self.match.info.get("album"): - # Matching an album - print that - artist_album_str = ( - f"{self.match.info.artist}" + f" - {self.match.info.album}" - ) - else: - # Matching a single track - artist_album_str = ( - f"{self.match.info.artist}" + f" - {self.match.info.title}" - ) - print_( - self.indent_header - + dist_colorize(artist_album_str, self.match.distance) - ) - - # Penalties. - penalties = penalty_string(self.match.distance) - if penalties: - print_(self.indent_header + penalties) - - # Disambiguation. - disambig = disambig_string(self.match.info) - if disambig: - print_(self.indent_header + disambig) - - # Data URL. - if self.match.info.data_url: - url = ui.colorize("text_faint", f"{self.match.info.data_url}") - print_(self.indent_header + url) - - def show_match_details(self): - """Print out the details of the match, including changes in album name - and artist name. - """ - # Artist. - artist_l, artist_r = self.cur_artist or "", self.match.info.artist - if artist_r == VARIOUS_ARTISTS: - # Hide artists for VA releases. - artist_l, artist_r = "", "" - if artist_l != artist_r: - artist_l, artist_r = ui.colordiff(artist_l, artist_r) - # Prefix with U+2260: Not Equal To - left = { - "prefix": ui.colorize("changed", "\u2260") + " Artist: ", - "contents": artist_l, - "suffix": "", - } - right = {"prefix": "", "contents": artist_r, "suffix": ""} - self.print_layout(self.indent_detail, left, right) - - else: - print_(self.indent_detail + "*", "Artist:", artist_r) - - if self.cur_album: - # Album - album_l, album_r = self.cur_album or "", self.match.info.album - if ( - self.cur_album != self.match.info.album - and self.match.info.album != VARIOUS_ARTISTS - ): - album_l, album_r = ui.colordiff(album_l, album_r) - # Prefix with U+2260: Not Equal To - left = { - "prefix": ui.colorize("changed", "\u2260") + " Album: ", - "contents": album_l, - "suffix": "", - } - right = {"prefix": "", "contents": album_r, "suffix": ""} - self.print_layout(self.indent_detail, left, right) - else: - print_(self.indent_detail + "*", "Album:", album_r) - elif self.cur_title: - # Title - for singletons - title_l, title_r = self.cur_title or "", self.match.info.title - if self.cur_title != self.match.info.title: - title_l, title_r = ui.colordiff(title_l, title_r) - # Prefix with U+2260: Not Equal To - left = { - "prefix": ui.colorize("changed", "\u2260") + " Title: ", - "contents": title_l, - "suffix": "", - } - right = {"prefix": "", "contents": title_r, "suffix": ""} - self.print_layout(self.indent_detail, left, right) - else: - print_(self.indent_detail + "*", "Title:", title_r) - - def make_medium_info_line(self, track_info): - """Construct a line with the current medium's info.""" - track_media = track_info.get("media", "Media") - # Build output string. - if self.match.info.mediums > 1 and track_info.disctitle: - return ( - f"* {track_media} {track_info.medium}: {track_info.disctitle}" - ) - elif self.match.info.mediums > 1: - return f"* {track_media} {track_info.medium}" - elif track_info.disctitle: - return f"* {track_media}: {track_info.disctitle}" - else: - return "" - - def format_index(self, track_info): - """Return a string representing the track index of the given - TrackInfo or Item object. - """ - if isinstance(track_info, hooks.TrackInfo): - index = track_info.index - medium_index = track_info.medium_index - medium = track_info.medium - mediums = self.match.info.mediums - else: - index = medium_index = track_info.track - medium = track_info.disc - mediums = track_info.disctotal - if config["per_disc_numbering"]: - if mediums and mediums > 1: - return f"{medium}-{medium_index}" - else: - return str(medium_index if medium_index is not None else index) - else: - return str(index) - - def make_track_numbers(self, item, track_info): - """Format colored track indices.""" - cur_track = self.format_index(item) - new_track = self.format_index(track_info) - templ = "(#{})" - changed = False - # Choose color based on change. - if cur_track != new_track: - changed = True - if item.track in (track_info.index, track_info.medium_index): - highlight_color = "text_highlight_minor" - else: - highlight_color = "text_highlight" - else: - highlight_color = "text_faint" - - cur_track = templ.format(cur_track) - new_track = templ.format(new_track) - lhs_track = ui.colorize(highlight_color, cur_track) - rhs_track = ui.colorize(highlight_color, new_track) - return lhs_track, rhs_track, changed - - @staticmethod - def make_track_titles(item, track_info): - """Format colored track titles.""" - new_title = track_info.title - if not item.title.strip(): - # If there's no title, we use the filename. Don't colordiff. - cur_title = displayable_path(os.path.basename(item.path)) - return cur_title, new_title, True - else: - # If there is a title, highlight differences. - cur_title = item.title.strip() - cur_col, new_col = ui.colordiff(cur_title, new_title) - return cur_col, new_col, cur_title != new_title - - @staticmethod - def make_track_lengths(item, track_info): - """Format colored track lengths.""" - changed = False - if ( - item.length - and track_info.length - and abs(item.length - track_info.length) - >= config["ui"]["length_diff_thresh"].as_number() - ): - highlight_color = "text_highlight" - changed = True - else: - highlight_color = "text_highlight_minor" - - # Handle nonetype lengths by setting to 0 - cur_length0 = item.length if item.length else 0 - new_length0 = track_info.length if track_info.length else 0 - # format into string - cur_length = f"({ui.human_seconds_short(cur_length0)})" - new_length = f"({ui.human_seconds_short(new_length0)})" - # colorize - lhs_length = ui.colorize(highlight_color, cur_length) - rhs_length = ui.colorize(highlight_color, new_length) - - return lhs_length, rhs_length, changed - - def make_line(self, item, track_info): - """Extract changes from item -> new TrackInfo object, and colorize - appropriately. Returns (lhs, rhs) for column printing. - """ - # Track titles. - lhs_title, rhs_title, diff_title = self.make_track_titles( - item, track_info - ) - # Track number change. - lhs_track, rhs_track, diff_track = self.make_track_numbers( - item, track_info - ) - # Length change. - lhs_length, rhs_length, diff_length = self.make_track_lengths( - item, track_info - ) - - changed = diff_title or diff_track or diff_length - - # Construct lhs and rhs dicts. - # Previously, we printed the penalties, however this is no longer - # the case, thus the 'info' dictionary is unneeded. - # penalties = penalty_string(self.match.distance.tracks[track_info]) - - prefix = ui.colorize("changed", "\u2260 ") if changed else "* " - lhs = { - "prefix": prefix + lhs_track + " ", - "contents": lhs_title, - "suffix": " " + lhs_length, - } - rhs = {"prefix": "", "contents": "", "suffix": ""} - if not changed: - # Only return the left side, as nothing changed. - return (lhs, rhs) - else: - # Construct a dictionary for the "changed to" side - rhs = { - "prefix": rhs_track + " ", - "contents": rhs_title, - "suffix": " " + rhs_length, - } - return (lhs, rhs) - - def print_tracklist(self, lines): - """Calculates column widths for tracks stored as line tuples: - (left, right). Then prints each line of tracklist. - """ - if len(lines) == 0: - # If no lines provided, e.g. details not required, do nothing. - return - - def get_width(side): - """Return the width of left or right in uncolorized characters.""" - try: - return len( - ui.uncolorize( - " ".join( - [side["prefix"], side["contents"], side["suffix"]] - ) - ) - ) - except KeyError: - # An empty dictionary -> Nothing to report - return 0 - - # Check how to fit content into terminal window - indent_width = len(self.indent_tracklist) - terminal_width = ui.term_width() - joiner_width = len("".join(["* ", " -> "])) - col_width = (terminal_width - indent_width - joiner_width) // 2 - max_width_l = max(get_width(line_tuple[0]) for line_tuple in lines) - max_width_r = max(get_width(line_tuple[1]) for line_tuple in lines) - - if ( - (max_width_l <= col_width) - and (max_width_r <= col_width) - or ( - ((max_width_l > col_width) or (max_width_r > col_width)) - and ((max_width_l + max_width_r) <= col_width * 2) - ) - ): - # All content fits. Either both maximum widths are below column - # widths, or one of the columns is larger than allowed but the - # other is smaller than allowed. - # In this case we can afford to shrink the columns to fit their - # largest string - col_width_l = max_width_l - col_width_r = max_width_r - else: - # Not all content fits - stick with original half/half split - col_width_l = col_width - col_width_r = col_width - - # Print out each line, using the calculated width from above. - for left, right in lines: - left["width"] = col_width_l - right["width"] = col_width_r - self.print_layout(self.indent_tracklist, left, right) - - -class AlbumChange(ChangeRepresentation): - """Album change representation, setting cur_album""" - - def __init__(self, cur_artist, cur_album, match): - super().__init__() - self.cur_artist = cur_artist - self.cur_album = cur_album - self.match = match - - def show_match_tracks(self): - """Print out the tracks of the match, summarizing changes the match - suggests for them. - """ - # Tracks. - # match is an AlbumMatch NamedTuple, mapping is a dict - # Sort the pairs by the track_info index (at index 1 of the NamedTuple) - pairs = list(self.match.mapping.items()) - pairs.sort(key=lambda item_and_track_info: item_and_track_info[1].index) - # Build up LHS and RHS for track difference display. The `lines` list - # contains `(left, right)` tuples. - lines = [] - medium = disctitle = None - for item, track_info in pairs: - # If the track is the first on a new medium, show medium - # number and title. - if medium != track_info.medium or disctitle != track_info.disctitle: - # Create header for new medium - header = self.make_medium_info_line(track_info) - if header != "": - # Print tracks from previous medium - self.print_tracklist(lines) - lines = [] - print_(self.indent_detail + header) - # Save new medium details for future comparison. - medium, disctitle = track_info.medium, track_info.disctitle - - # Construct the line tuple for the track. - left, right = self.make_line(item, track_info) - if right["contents"] != "": - lines.append((left, right)) - else: - if config["import"]["detail"]: - lines.append((left, right)) - self.print_tracklist(lines) - - # Missing and unmatched tracks. - if self.match.extra_tracks: - print_( - "Missing tracks ({0}/{1} - {2:.1%}):".format( - len(self.match.extra_tracks), - len(self.match.info.tracks), - len(self.match.extra_tracks) / len(self.match.info.tracks), - ) - ) - for track_info in self.match.extra_tracks: - line = f" ! {track_info.title} (#{self.format_index(track_info)})" - if track_info.length: - line += f" ({ui.human_seconds_short(track_info.length)})" - print_(ui.colorize("text_warning", line)) - if self.match.extra_items: - print_(f"Unmatched tracks ({len(self.match.extra_items)}):") - for item in self.match.extra_items: - line = " ! {} (#{})".format(item.title, self.format_index(item)) - if item.length: - line += " ({})".format(ui.human_seconds_short(item.length)) - print_(ui.colorize("text_warning", line)) - - -class TrackChange(ChangeRepresentation): - """Track change representation, comparing item with match.""" - - def __init__(self, cur_artist, cur_title, match): - super().__init__() - self.cur_artist = cur_artist - self.cur_title = cur_title - self.match = match - - -def show_change(cur_artist, cur_album, match): - """Print out a representation of the changes that will be made if an - album's tags are changed according to `match`, which must be an AlbumMatch - object. - """ - change = AlbumChange( - cur_artist=cur_artist, cur_album=cur_album, match=match - ) - - # Print the match header. - change.show_match_header() - - # Print the match details. - change.show_match_details() - - # Print the match tracks. - change.show_match_tracks() - - -def show_item_change(item, match): - """Print out the change that would occur by tagging `item` with the - metadata from `match`, a TrackMatch object. - """ - change = TrackChange( - cur_artist=item.artist, cur_title=item.title, match=match - ) - # Print the match header. - change.show_match_header() - # Print the match details. - change.show_match_details() - - -def summarize_items(items, singleton): - """Produces a brief summary line describing a set of items. Used for - manually resolving duplicates during import. - - `items` is a list of `Item` objects. `singleton` indicates whether - this is an album or single-item import (if the latter, them `items` - should only have one element). - """ - summary_parts = [] - if not singleton: - summary_parts.append("{} items".format(len(items))) - - format_counts = {} - for item in items: - format_counts[item.format] = format_counts.get(item.format, 0) + 1 - if len(format_counts) == 1: - # A single format. - summary_parts.append(items[0].format) - else: - # Enumerate all the formats by decreasing frequencies: - for fmt, count in sorted( - format_counts.items(), - key=lambda fmt_and_count: (-fmt_and_count[1], fmt_and_count[0]), - ): - summary_parts.append(f"{fmt} {count}") - - if items: - average_bitrate = sum([item.bitrate for item in items]) / len(items) - total_duration = sum([item.length for item in items]) - total_filesize = sum([item.filesize for item in items]) - summary_parts.append("{}kbps".format(int(average_bitrate / 1000))) - if items[0].format == "FLAC": - sample_bits = "{}kHz/{} bit".format( - round(int(items[0].samplerate) / 1000, 1), items[0].bitdepth - ) - summary_parts.append(sample_bits) - summary_parts.append(ui.human_seconds_short(total_duration)) - summary_parts.append(ui.human_bytes(total_filesize)) - - return ", ".join(summary_parts) - - -def _summary_judgment(rec): - """Determines whether a decision should be made without even asking - the user. This occurs in quiet mode and when an action is chosen for - NONE recommendations. Return None if the user should be queried. - Otherwise, returns an action. May also print to the console if a - summary judgment is made. - """ - - if config["import"]["quiet"]: - if rec == Recommendation.strong: - return importer.action.APPLY - else: - action = config["import"]["quiet_fallback"].as_choice( - { - "skip": importer.action.SKIP, - "asis": importer.action.ASIS, - } - ) - elif config["import"]["timid"]: - return None - elif rec == Recommendation.none: - action = config["import"]["none_rec_action"].as_choice( - { - "skip": importer.action.SKIP, - "asis": importer.action.ASIS, - "ask": None, - } - ) - else: - return None - - if action == importer.action.SKIP: - print_("Skipping.") - elif action == importer.action.ASIS: - print_("Importing as-is.") - return action - - -class PromptChoice(NamedTuple): - short: str - long: str - callback: Any - - -def choose_candidate( - candidates, - singleton, - rec, - cur_artist=None, - cur_album=None, - item=None, - itemcount=None, - choices=[], -): - """Given a sorted list of candidates, ask the user for a selection - of which candidate to use. Applies to both full albums and - singletons (tracks). Candidates are either AlbumMatch or TrackMatch - objects depending on `singleton`. for albums, `cur_artist`, - `cur_album`, and `itemcount` must be provided. For singletons, - `item` must be provided. - - `choices` is a list of `PromptChoice`s to be used in each prompt. - - Returns one of the following: - * the result of the choice, which may be SKIP or ASIS - * a candidate (an AlbumMatch/TrackMatch object) - * a chosen `PromptChoice` from `choices` - """ - # Sanity check. - if singleton: - assert item is not None - else: - assert cur_artist is not None - assert cur_album is not None - - # Build helper variables for the prompt choices. - choice_opts = tuple(c.long for c in choices) - choice_actions = {c.short: c for c in choices} - - # Zero candidates. - if not candidates: - if singleton: - print_("No matching recordings found.") - else: - print_("No matching release found for {} tracks.".format(itemcount)) - print_( - "For help, see: " - "https://beets.readthedocs.org/en/latest/faq.html#nomatch" - ) - sel = ui.input_options(choice_opts) - if sel in choice_actions: - return choice_actions[sel] - else: - assert False - - # Is the change good enough? - bypass_candidates = False - if rec != Recommendation.none: - match = candidates[0] - bypass_candidates = True - - while True: - # Display and choose from candidates. - require = rec <= Recommendation.low - - if not bypass_candidates: - # Display list of candidates. - print_("") - print_( - 'Finding tags for {} "{} - {}".'.format( - "track" if singleton else "album", - item.artist if singleton else cur_artist, - item.title if singleton else cur_album, - ) - ) - - print_(ui.indent(2) + "Candidates:") - for i, match in enumerate(candidates): - # Index, metadata, and distance. - index0 = "{0}.".format(i + 1) - index = dist_colorize(index0, match.distance) - dist = "({:.1f}%)".format((1 - match.distance) * 100) - distance = dist_colorize(dist, match.distance) - metadata = "{0} - {1}".format( - match.info.artist, - match.info.title if singleton else match.info.album, - ) - if i == 0: - metadata = dist_colorize(metadata, match.distance) - else: - metadata = ui.colorize("text_highlight_minor", metadata) - line1 = [index, distance, metadata] - print_(ui.indent(2) + " ".join(line1)) - - # Penalties. - penalties = penalty_string(match.distance, 3) - if penalties: - print_(ui.indent(13) + penalties) - - # Disambiguation - disambig = disambig_string(match.info) - if disambig: - print_(ui.indent(13) + disambig) - - # Ask the user for a choice. - sel = ui.input_options(choice_opts, numrange=(1, len(candidates))) - if sel == "m": - pass - elif sel in choice_actions: - return choice_actions[sel] - else: # Numerical selection. - match = candidates[sel - 1] - if sel != 1: - # When choosing anything but the first match, - # disable the default action. - require = True - bypass_candidates = False - - # Show what we're about to do. - if singleton: - show_item_change(item, match) - else: - show_change(cur_artist, cur_album, match) - - # Exact match => tag automatically if we're not in timid mode. - if rec == Recommendation.strong and not config["import"]["timid"]: - return match - - # Ask for confirmation. - default = config["import"]["default_action"].as_choice( - { - "apply": "a", - "skip": "s", - "asis": "u", - "none": None, - } - ) - if default is None: - require = True - # Bell ring when user interaction is needed. - if config["import"]["bell"]: - ui.print_("\a", end="") - sel = ui.input_options( - ("Apply", "More candidates") + choice_opts, - require=require, - default=default, - ) - if sel == "a": - return match - elif sel in choice_actions: - return choice_actions[sel] - - -def manual_search(session, task): - """Get a new `Proposal` using manual search criteria. - - Input either an artist and album (for full albums) or artist and - track name (for singletons) for manual search. - """ - artist = input_("Artist:").strip() - name = input_("Album:" if task.is_album else "Track:").strip() - - if task.is_album: - _, _, prop = autotag.tag_album(task.items, artist, name) - return prop - else: - return autotag.tag_item(task.item, artist, name) - - -def manual_id(session, task): - """Get a new `Proposal` using a manually-entered ID. - - Input an ID, either for an album ("release") or a track ("recording"). - """ - prompt = "Enter {} ID:".format("release" if task.is_album else "recording") - search_id = input_(prompt).strip() - - if task.is_album: - _, _, prop = autotag.tag_album(task.items, search_ids=search_id.split()) - return prop - else: - return autotag.tag_item(task.item, search_ids=search_id.split()) - - -def abort_action(session, task): - """A prompt choice callback that aborts the importer.""" - raise importer.ImportAbortError() - - -class TerminalImportSession(importer.ImportSession): - """An import session that runs in a terminal.""" - - def choose_match(self, task): - """Given an initial autotagging of items, go through an interactive - dance with the user to ask for a choice of metadata. Returns an - AlbumMatch object, ASIS, or SKIP. - """ - # Show what we're tagging. - print_() - - path_str0 = displayable_path(task.paths, "\n") - path_str = ui.colorize("import_path", path_str0) - items_str0 = "({} items)".format(len(task.items)) - items_str = ui.colorize("import_path_items", items_str0) - print_(" ".join([path_str, items_str])) - - # Let plugins display info or prompt the user before we go through the - # process of selecting candidate. - results = plugins.send( - "import_task_before_choice", session=self, task=task - ) - actions = [action for action in results if action] - - if len(actions) == 1: - return actions[0] - elif len(actions) > 1: - raise plugins.PluginConflictError( - "Only one handler for `import_task_before_choice` may return " - "an action." - ) - - # Take immediate action if appropriate. - action = _summary_judgment(task.rec) - if action == importer.action.APPLY: - match = task.candidates[0] - show_change(task.cur_artist, task.cur_album, match) - return match - elif action is not None: - return action - - # Loop until we have a choice. - while True: - # Ask for a choice from the user. The result of - # `choose_candidate` may be an `importer.action`, an - # `AlbumMatch` object for a specific selection, or a - # `PromptChoice`. - choices = self._get_choices(task) - choice = choose_candidate( - task.candidates, - False, - task.rec, - task.cur_artist, - task.cur_album, - itemcount=len(task.items), - choices=choices, - ) - - # Basic choices that require no more action here. - if choice in (importer.action.SKIP, importer.action.ASIS): - # Pass selection to main control flow. - return choice - - # Plugin-provided choices. We invoke the associated callback - # function. - elif choice in choices: - post_choice = choice.callback(self, task) - if isinstance(post_choice, importer.action): - return post_choice - elif isinstance(post_choice, autotag.Proposal): - # Use the new candidates and continue around the loop. - task.candidates = post_choice.candidates - task.rec = post_choice.recommendation - - # Otherwise, we have a specific match selection. - else: - # We have a candidate! Finish tagging. Here, choice is an - # AlbumMatch object. - assert isinstance(choice, autotag.AlbumMatch) - return choice - - def choose_item(self, task): - """Ask the user for a choice about tagging a single item. Returns - either an action constant or a TrackMatch object. - """ - print_() - print_(displayable_path(task.item.path)) - candidates, rec = task.candidates, task.rec - - # Take immediate action if appropriate. - action = _summary_judgment(task.rec) - if action == importer.action.APPLY: - match = candidates[0] - show_item_change(task.item, match) - return match - elif action is not None: - return action - - while True: - # Ask for a choice. - choices = self._get_choices(task) - choice = choose_candidate( - candidates, True, rec, item=task.item, choices=choices - ) - - if choice in (importer.action.SKIP, importer.action.ASIS): - return choice - - elif choice in choices: - post_choice = choice.callback(self, task) - if isinstance(post_choice, importer.action): - return post_choice - elif isinstance(post_choice, autotag.Proposal): - candidates = post_choice.candidates - rec = post_choice.recommendation - - else: - # Chose a candidate. - assert isinstance(choice, autotag.TrackMatch) - return choice - - def resolve_duplicate(self, task, found_duplicates): - """Decide what to do when a new album or item seems similar to one - that's already in the library. - """ - log.warning( - "This {0} is already in the library!", - ("album" if task.is_album else "item"), - ) - - if config["import"]["quiet"]: - # In quiet mode, don't prompt -- just skip. - log.info("Skipping.") - sel = "s" - else: - # Print some detail about the existing and new items so the - # user can make an informed decision. - for duplicate in found_duplicates: - print_( - "Old: " - + summarize_items( - ( - list(duplicate.items()) - if task.is_album - else [duplicate] - ), - not task.is_album, - ) - ) - if config["import"]["duplicate_verbose_prompt"]: - if task.is_album: - for dup in duplicate.items(): - print(f" {dup}") - else: - print(f" {duplicate}") - - print_( - "New: " - + summarize_items( - task.imported_items(), - not task.is_album, - ) - ) - if config["import"]["duplicate_verbose_prompt"]: - for item in task.imported_items(): - print(f" {item}") - - sel = ui.input_options( - ("Skip new", "Keep all", "Remove old", "Merge all") - ) - - if sel == "s": - # Skip new. - task.set_choice(importer.action.SKIP) - elif sel == "k": - # Keep both. Do nothing; leave the choice intact. - pass - elif sel == "r": - # Remove old. - task.should_remove_duplicates = True - elif sel == "m": - task.should_merge_duplicates = True - else: - assert False - - def should_resume(self, path): - return ui.input_yn( - "Import of the directory:\n{}\n" - "was interrupted. Resume (Y/n)?".format(displayable_path(path)) - ) - - def _get_choices(self, task): - """Get the list of prompt choices that should be presented to the - user. This consists of both built-in choices and ones provided by - plugins. - - The `before_choose_candidate` event is sent to the plugins, with - session and task as its parameters. Plugins are responsible for - checking the right conditions and returning a list of `PromptChoice`s, - which is flattened and checked for conflicts. - - If two or more choices have the same short letter, a warning is - emitted and all but one choices are discarded, giving preference - to the default importer choices. - - Returns a list of `PromptChoice`s. - """ - # Standard, built-in choices. - choices = [ - PromptChoice("s", "Skip", lambda s, t: importer.action.SKIP), - PromptChoice("u", "Use as-is", lambda s, t: importer.action.ASIS), - ] - if task.is_album: - choices += [ - PromptChoice( - "t", "as Tracks", lambda s, t: importer.action.TRACKS - ), - PromptChoice( - "g", "Group albums", lambda s, t: importer.action.ALBUMS - ), - ] - choices += [ - PromptChoice("e", "Enter search", manual_search), - PromptChoice("i", "enter Id", manual_id), - PromptChoice("b", "aBort", abort_action), - ] - - # Send the before_choose_candidate event and flatten list. - extra_choices = list( - chain( - *plugins.send( - "before_choose_candidate", session=self, task=task - ) - ) - ) - - # Add a "dummy" choice for the other baked-in option, for - # duplicate checking. - all_choices = ( - [ - PromptChoice("a", "Apply", None), - ] - + choices - + extra_choices - ) - - # Check for conflicts. - short_letters = [c.short for c in all_choices] - if len(short_letters) != len(set(short_letters)): - # Duplicate short letter has been found. - duplicates = [ - i for i, count in Counter(short_letters).items() if count > 1 - ] - for short in duplicates: - # Keep the first of the choices, removing the rest. - dup_choices = [c for c in all_choices if c.short == short] - for c in dup_choices[1:]: - log.warning( - "Prompt choice '{0}' removed due to conflict " - "with '{1}' (short letter: '{2}')", - c.long, - dup_choices[0].long, - c.short, - ) - extra_choices.remove(c) - - return choices + extra_choices - - -# The import command. - - -def import_files(lib, paths, query): - """Import the files in the given list of paths or matching the - query. - """ - # Check parameter consistency. - if config["import"]["quiet"] and config["import"]["timid"]: - raise ui.UserError("can't be both quiet and timid") - - # Open the log. - if config["import"]["log"].get() is not None: - logpath = syspath(config["import"]["log"].as_filename()) - try: - loghandler = logging.FileHandler(logpath, encoding="utf-8") - except OSError: - raise ui.UserError( - f"Could not open log file for writing: {displayable_path(logpath)}" - ) - else: - loghandler = None - - # Never ask for input in quiet mode. - if config["import"]["resume"].get() == "ask" and config["import"]["quiet"]: - config["import"]["resume"] = False - - session = TerminalImportSession(lib, loghandler, paths, query) - session.run() - - # Emit event. - plugins.send("import", lib=lib, paths=paths) - - -def import_func(lib, opts, args): - config["import"].set_args(opts) - - # Special case: --copy flag suppresses import_move (which would - # otherwise take precedence). - if opts.copy: - config["import"]["move"] = False - - if opts.library: - query = decargs(args) - paths = [] - else: - query = None - paths = args - - # The paths from the logfiles go into a separate list to allow handling - # errors differently from user-specified paths. - paths_from_logfiles = list(_parse_logfiles(opts.from_logfiles or [])) - - if not paths and not paths_from_logfiles: - raise ui.UserError("no path specified") - - # On Python 2, we used to get filenames as raw bytes, which is - # what we need. On Python 3, we need to undo the "helpful" - # conversion to Unicode strings to get the real bytestring - # filename. - paths = [os.fsencode(p) for p in paths] - paths_from_logfiles = [os.fsencode(p) for p in paths_from_logfiles] - - # Check the user-specified directories. - for path in paths: - if not os.path.exists(syspath(normpath(path))): - raise ui.UserError( - "no such file or directory: {}".format( - displayable_path(path) - ) - ) - - # Check the directories from the logfiles, but don't throw an error in - # case those paths don't exist. Maybe some of those paths have already - # been imported and moved separately, so logging a warning should - # suffice. - for path in paths_from_logfiles: - if not os.path.exists(syspath(normpath(path))): - log.warning( - "No such file or directory: {}".format( - displayable_path(path) - ) - ) - continue - - paths.append(path) - - # If all paths were read from a logfile, and none of them exist, throw - # an error - if not paths: - raise ui.UserError("none of the paths are importable") - - import_files(lib, paths, query) - - -import_cmd = ui.Subcommand( - "import", help="import new music", aliases=("imp", "im") -) -import_cmd.parser.add_option( - "-c", - "--copy", - action="store_true", - default=None, - help="copy tracks into library directory (default)", -) -import_cmd.parser.add_option( - "-C", - "--nocopy", - action="store_false", - dest="copy", - help="don't copy tracks (opposite of -c)", -) -import_cmd.parser.add_option( - "-m", - "--move", - action="store_true", - dest="move", - help="move tracks into the library (overrides -c)", -) -import_cmd.parser.add_option( - "-w", - "--write", - action="store_true", - default=None, - help="write new metadata to files' tags (default)", -) -import_cmd.parser.add_option( - "-W", - "--nowrite", - action="store_false", - dest="write", - help="don't write metadata (opposite of -w)", -) -import_cmd.parser.add_option( - "-a", - "--autotag", - action="store_true", - dest="autotag", - help="infer tags for imported files (default)", -) -import_cmd.parser.add_option( - "-A", - "--noautotag", - action="store_false", - dest="autotag", - help="don't infer tags for imported files (opposite of -a)", -) -import_cmd.parser.add_option( - "-p", - "--resume", - action="store_true", - default=None, - help="resume importing if interrupted", -) -import_cmd.parser.add_option( - "-P", - "--noresume", - action="store_false", - dest="resume", - help="do not try to resume importing", -) -import_cmd.parser.add_option( - "-q", - "--quiet", - action="store_true", - dest="quiet", - help="never prompt for input: skip albums instead", -) -import_cmd.parser.add_option( - "--quiet-fallback", - type="string", - dest="quiet_fallback", - help="decision in quiet mode when no strong match: skip or asis", -) -import_cmd.parser.add_option( - "-l", - "--log", - dest="log", - help="file to log untaggable albums for later review", -) -import_cmd.parser.add_option( - "-s", - "--singletons", - action="store_true", - help="import individual tracks instead of full albums", -) -import_cmd.parser.add_option( - "-t", - "--timid", - dest="timid", - action="store_true", - help="always confirm all actions", -) -import_cmd.parser.add_option( - "-L", - "--library", - dest="library", - action="store_true", - help="retag items matching a query", -) -import_cmd.parser.add_option( - "-i", - "--incremental", - dest="incremental", - action="store_true", - help="skip already-imported directories", -) -import_cmd.parser.add_option( - "-I", - "--noincremental", - dest="incremental", - action="store_false", - help="do not skip already-imported directories", -) -import_cmd.parser.add_option( - "-R", - "--incremental-skip-later", - action="store_true", - dest="incremental_skip_later", - help="do not record skipped files during incremental import", -) -import_cmd.parser.add_option( - "-r", - "--noincremental-skip-later", - action="store_false", - dest="incremental_skip_later", - help="record skipped files during incremental import", -) -import_cmd.parser.add_option( - "--from-scratch", - dest="from_scratch", - action="store_true", - help="erase existing metadata before applying new metadata", -) -import_cmd.parser.add_option( - "--flat", - dest="flat", - action="store_true", - help="import an entire tree as a single album", -) -import_cmd.parser.add_option( - "-g", - "--group-albums", - dest="group_albums", - action="store_true", - help="group tracks in a folder into separate albums", -) -import_cmd.parser.add_option( - "--pretend", - dest="pretend", - action="store_true", - help="just print the files to import", -) -import_cmd.parser.add_option( - "-S", - "--search-id", - dest="search_ids", - action="append", - metavar="ID", - help="restrict matching to a specific metadata backend ID", -) -import_cmd.parser.add_option( - "--from-logfile", - dest="from_logfiles", - action="append", - metavar="PATH", - help="read skipped paths from an existing logfile", -) -import_cmd.parser.add_option( - "--set", - dest="set_fields", - action="callback", - callback=_store_dict, - metavar="FIELD=VALUE", - help="set the given fields to the supplied values", -) -import_cmd.func = import_func -default_commands.append(import_cmd) - - -# list: Query and show library contents. - - -def list_items(lib, query, album, fmt=""): - """Print out items in lib matching query. If album, then search for - albums instead of single items. - """ - if album: - for album in lib.albums(query): - ui.print_(format(album, fmt)) - else: - for item in lib.items(query): - ui.print_(format(item, fmt)) - - -def list_func(lib, opts, args): - list_items(lib, decargs(args), opts.album) - - -list_cmd = ui.Subcommand("list", help="query the library", aliases=("ls",)) -list_cmd.parser.usage += ( - "\n" "Example: %prog -f '$album: $title' artist:beatles" -) -list_cmd.parser.add_all_common_options() -list_cmd.func = list_func -default_commands.append(list_cmd) - - -# update: Update library contents according to on-disk tags. - - -def update_items(lib, query, album, move, pretend, fields, exclude_fields=None): - """For all the items matched by the query, update the library to - reflect the item's embedded tags. - :param fields: The fields to be stored. If not specified, all fields will - be. - :param exclude_fields: The fields to not be stored. If not specified, all - fields will be. - """ - with lib.transaction(): - items, _ = _do_query(lib, query, album) - if move and fields is not None and "path" not in fields: - # Special case: if an item needs to be moved, the path field has to - # updated; otherwise the new path will not be reflected in the - # database. - fields.append("path") - if fields is None: - # no fields were provided, update all media fields - item_fields = fields or library.Item._media_fields - if move and "path" not in item_fields: - # move is enabled, add 'path' to the list of fields to update - item_fields.add("path") - else: - # fields was provided, just update those - item_fields = fields - # get all the album fields to update - album_fields = fields or library.Album._fields.keys() - if exclude_fields: - # remove any excluded fields from the item and album sets - item_fields = [f for f in item_fields if f not in exclude_fields] - album_fields = [f for f in album_fields if f not in exclude_fields] - - # Walk through the items and pick up their changes. - affected_albums = set() - for item in items: - # Item deleted? - if not item.path or not os.path.exists(syspath(item.path)): - ui.print_(format(item)) - ui.print_(ui.colorize("text_error", " deleted")) - if not pretend: - item.remove(True) - affected_albums.add(item.album_id) - continue - - # Did the item change since last checked? - if item.current_mtime() <= item.mtime: - log.debug( - "skipping {0} because mtime is up to date ({1})", - displayable_path(item.path), - item.mtime, - ) - continue - - # Read new data. - try: - item.read() - except library.ReadError as exc: - log.error( - "error reading {0}: {1}", displayable_path(item.path), exc - ) - continue - - # Special-case album artist when it matches track artist. (Hacky - # but necessary for preserving album-level metadata for non- - # autotagged imports.) - if not item.albumartist: - old_item = lib.get_item(item.id) - if old_item.albumartist == old_item.artist == item.artist: - item.albumartist = old_item.albumartist - item._dirty.discard("albumartist") - - # Check for and display changes. - changed = ui.show_model_changes(item, fields=item_fields) - - # Save changes. - if not pretend: - if changed: - # Move the item if it's in the library. - if move and lib.directory in ancestry(item.path): - item.move(store=False) - - item.store(fields=item_fields) - affected_albums.add(item.album_id) - else: - # The file's mtime was different, but there were no - # changes to the metadata. Store the new mtime, - # which is set in the call to read(), so we don't - # check this again in the future. - item.store(fields=item_fields) - - # Skip album changes while pretending. - if pretend: - return - - # Modify affected albums to reflect changes in their items. - for album_id in affected_albums: - if album_id is None: # Singletons. - continue - album = lib.get_album(album_id) - if not album: # Empty albums have already been removed. - log.debug("emptied album {0}", album_id) - continue - first_item = album.items().get() - - # Update album structure to reflect an item in it. - for key in library.Album.item_keys: - album[key] = first_item[key] - album.store(fields=album_fields) - - # Move album art (and any inconsistent items). - if move and lib.directory in ancestry(first_item.path): - log.debug("moving album {0}", album_id) - - # Manually moving and storing the album. - items = list(album.items()) - for item in items: - item.move(store=False, with_album=False) - item.store(fields=item_fields) - album.move(store=False) - album.store(fields=album_fields) - - -def update_func(lib, opts, args): - # Verify that the library folder exists to prevent accidental wipes. - if not os.path.isdir(syspath(lib.directory)): - ui.print_("Library path is unavailable or does not exist.") - ui.print_(lib.directory) - if not ui.input_yn("Are you sure you want to continue (y/n)?", True): - return - update_items( - lib, - decargs(args), - opts.album, - ui.should_move(opts.move), - opts.pretend, - opts.fields, - opts.exclude_fields, - ) - - -update_cmd = ui.Subcommand( - "update", - help="update the library", - aliases=( - "upd", - "up", - ), -) -update_cmd.parser.add_album_option() -update_cmd.parser.add_format_option() -update_cmd.parser.add_option( - "-m", - "--move", - action="store_true", - dest="move", - help="move files in the library directory", -) -update_cmd.parser.add_option( - "-M", - "--nomove", - action="store_false", - dest="move", - help="don't move files in library", -) -update_cmd.parser.add_option( - "-p", - "--pretend", - action="store_true", - help="show all changes but do nothing", -) -update_cmd.parser.add_option( - "-F", - "--field", - default=None, - action="append", - dest="fields", - help="list of fields to update", -) -update_cmd.parser.add_option( - "-e", - "--exclude-field", - default=None, - action="append", - dest="exclude_fields", - help="list of fields to exclude from updates", -) -update_cmd.func = update_func -default_commands.append(update_cmd) - - -# remove: Remove items from library, delete files. - - -def remove_items(lib, query, album, delete, force): - """Remove items matching query from lib. If album, then match and - remove whole albums. If delete, also remove files from disk. - """ - # Get the matching items. - items, albums = _do_query(lib, query, album) - objs = albums if album else items - - # Confirm file removal if not forcing removal. - if not force: - # Prepare confirmation with user. - album_str = ( - " in {} album{}".format(len(albums), "s" if len(albums) > 1 else "") - if album - else "" - ) - - if delete: - fmt = "$path - $title" - prompt = "Really DELETE" - prompt_all = "Really DELETE {} file{}{}".format( - len(items), "s" if len(items) > 1 else "", album_str - ) - else: - fmt = "" - prompt = "Really remove from the library?" - prompt_all = "Really remove {} item{}{} from the library?".format( - len(items), "s" if len(items) > 1 else "", album_str - ) - - # Helpers for printing affected items - def fmt_track(t): - ui.print_(format(t, fmt)) - - def fmt_album(a): - ui.print_() - for i in a.items(): - fmt_track(i) - - fmt_obj = fmt_album if album else fmt_track - - # Show all the items. - for o in objs: - fmt_obj(o) - - # Confirm with user. - objs = ui.input_select_objects( - prompt, objs, fmt_obj, prompt_all=prompt_all - ) - - if not objs: - return - - # Remove (and possibly delete) items. - with lib.transaction(): - for obj in objs: - obj.remove(delete) - - -def remove_func(lib, opts, args): - remove_items(lib, decargs(args), opts.album, opts.delete, opts.force) - - -remove_cmd = ui.Subcommand( - "remove", help="remove matching items from the library", aliases=("rm",) -) -remove_cmd.parser.add_option( - "-d", "--delete", action="store_true", help="also remove files from disk" -) -remove_cmd.parser.add_option( - "-f", "--force", action="store_true", help="do not ask when removing items" -) -remove_cmd.parser.add_album_option() -remove_cmd.func = remove_func -default_commands.append(remove_cmd) - - -# stats: Show library/query statistics. - - -def show_stats(lib, query, exact): - """Shows some statistics about the matched items.""" - items = lib.items(query) - - total_size = 0 - total_time = 0.0 - total_items = 0 - artists = set() - albums = set() - album_artists = set() - - for item in items: - if exact: - try: - total_size += os.path.getsize(syspath(item.path)) - except OSError as exc: - log.info("could not get size of {}: {}", item.path, exc) - else: - total_size += int(item.length * item.bitrate / 8) - total_time += item.length - total_items += 1 - artists.add(item.artist) - album_artists.add(item.albumartist) - if item.album_id: - albums.add(item.album_id) - - size_str = "" + ui.human_bytes(total_size) - if exact: - size_str += f" ({total_size} bytes)" - - print_( - """Tracks: {} -Total time: {}{} -{}: {} -Artists: {} -Albums: {} -Album artists: {}""".format( - total_items, - ui.human_seconds(total_time), - f" ({total_time:.2f} seconds)" if exact else "", - "Total size" if exact else "Approximate total size", - size_str, - len(artists), - len(albums), - len(album_artists), - ), - ) - - -def stats_func(lib, opts, args): - show_stats(lib, decargs(args), opts.exact) - - -stats_cmd = ui.Subcommand( - "stats", help="show statistics about the library or a query" -) -stats_cmd.parser.add_option( - "-e", "--exact", action="store_true", help="exact size and time" -) -stats_cmd.func = stats_func -default_commands.append(stats_cmd) - - -# version: Show current beets version. - - -def show_version(lib, opts, args): - print_("beets version %s" % beets.__version__) - print_(f"Python version {python_version()}") - # Show plugins. - names = sorted(p.name for p in plugins.find_plugins()) - if names: - print_("plugins:", ", ".join(names)) - else: - print_("no plugins loaded") - - -version_cmd = ui.Subcommand("version", help="output version information") -version_cmd.func = show_version -default_commands.append(version_cmd) - - -# modify: Declaratively change metadata. - - -def modify_items(lib, mods, dels, query, write, move, album, confirm, inherit): - """Modifies matching items according to user-specified assignments and - deletions. - - `mods` is a dictionary of field and value pairse indicating - assignments. `dels` is a list of fields to be deleted. - """ - # Parse key=value specifications into a dictionary. - model_cls = library.Album if album else library.Item - - # Get the items to modify. - items, albums = _do_query(lib, query, album, False) - objs = albums if album else items - - # Apply changes *temporarily*, preview them, and collect modified - # objects. - print_("Modifying {} {}s.".format(len(objs), "album" if album else "item")) - changed = [] - templates = { - key: functemplate.template(value) for key, value in mods.items() - } - for obj in objs: - obj_mods = { - key: model_cls._parse(key, obj.evaluate_template(templates[key])) - for key in mods.keys() - } - if print_and_modify(obj, obj_mods, dels) and obj not in changed: - changed.append(obj) - - # Still something to do? - if not changed: - print_("No changes to make.") - return - - # Confirm action. - if confirm: - if write and move: - extra = ", move and write tags" - elif write: - extra = " and write tags" - elif move: - extra = " and move" - else: - extra = "" - - changed = ui.input_select_objects( - "Really modify%s" % extra, - changed, - lambda o: print_and_modify(o, mods, dels), - ) - - # Apply changes to database and files - with lib.transaction(): - for obj in changed: - obj.try_sync(write, move, inherit) - - -def print_and_modify(obj, mods, dels): - """Print the modifications to an item and return a bool indicating - whether any changes were made. - - `mods` is a dictionary of fields and values to update on the object; - `dels` is a sequence of fields to delete. - """ - obj.update(mods) - for field in dels: - try: - del obj[field] - except KeyError: - pass - return ui.show_model_changes(obj) - - -def modify_parse_args(args): - """Split the arguments for the modify subcommand into query parts, - assignments (field=value), and deletions (field!). Returns the result as - a three-tuple in that order. - """ - mods = {} - dels = [] - query = [] - for arg in args: - if arg.endswith("!") and "=" not in arg and ":" not in arg: - dels.append(arg[:-1]) # Strip trailing !. - elif "=" in arg and ":" not in arg.split("=", 1)[0]: - key, val = arg.split("=", 1) - mods[key] = val - else: - query.append(arg) - return query, mods, dels - - -def modify_func(lib, opts, args): - query, mods, dels = modify_parse_args(decargs(args)) - if not mods and not dels: - raise ui.UserError("no modifications specified") - modify_items( - lib, - mods, - dels, - query, - ui.should_write(opts.write), - ui.should_move(opts.move), - opts.album, - not opts.yes, - opts.inherit, - ) - - -modify_cmd = ui.Subcommand( - "modify", help="change metadata fields", aliases=("mod",) -) -modify_cmd.parser.add_option( - "-m", - "--move", - action="store_true", - dest="move", - help="move files in the library directory", -) -modify_cmd.parser.add_option( - "-M", - "--nomove", - action="store_false", - dest="move", - help="don't move files in library", -) -modify_cmd.parser.add_option( - "-w", - "--write", - action="store_true", - default=None, - help="write new metadata to files' tags (default)", -) -modify_cmd.parser.add_option( - "-W", - "--nowrite", - action="store_false", - dest="write", - help="don't write metadata (opposite of -w)", -) -modify_cmd.parser.add_album_option() -modify_cmd.parser.add_format_option(target="item") -modify_cmd.parser.add_option( - "-y", "--yes", action="store_true", help="skip confirmation" -) -modify_cmd.parser.add_option( - "-I", - "--noinherit", - action="store_false", - dest="inherit", - default=True, - help="when modifying albums, don't also change item data", -) -modify_cmd.func = modify_func -default_commands.append(modify_cmd) - - -# move: Move/copy files to the library or a new base directory. - - -def move_items( - lib, dest, query, copy, album, pretend, confirm=False, export=False -): - """Moves or copies items to a new base directory, given by dest. If - dest is None, then the library's base directory is used, making the - command "consolidate" files. - """ - items, albums = _do_query(lib, query, album, False) - objs = albums if album else items - num_objs = len(objs) - - # Filter out files that don't need to be moved. - def isitemmoved(item): - return item.path != item.destination(basedir=dest) - - def isalbummoved(album): - return any(isitemmoved(i) for i in album.items()) - - objs = [o for o in objs if (isalbummoved if album else isitemmoved)(o)] - num_unmoved = num_objs - len(objs) - # Report unmoved files that match the query. - unmoved_msg = "" - if num_unmoved > 0: - unmoved_msg = f" ({num_unmoved} already in place)" - - copy = copy or export # Exporting always copies. - action = "Copying" if copy else "Moving" - act = "copy" if copy else "move" - entity = "album" if album else "item" - log.info( - "{0} {1} {2}{3}{4}.", - action, - len(objs), - entity, - "s" if len(objs) != 1 else "", - unmoved_msg, - ) - if not objs: - return - - if pretend: - if album: - show_path_changes( - [ - (item.path, item.destination(basedir=dest)) - for obj in objs - for item in obj.items() - ] - ) - else: - show_path_changes( - [(obj.path, obj.destination(basedir=dest)) for obj in objs] - ) - else: - if confirm: - objs = ui.input_select_objects( - "Really %s" % act, - objs, - lambda o: show_path_changes( - [(o.path, o.destination(basedir=dest))] - ), - ) - - for obj in objs: - log.debug("moving: {0}", util.displayable_path(obj.path)) - - if export: - # Copy without affecting the database. - obj.move( - operation=MoveOperation.COPY, basedir=dest, store=False - ) - else: - # Ordinary move/copy: store the new path. - if copy: - obj.move(operation=MoveOperation.COPY, basedir=dest) - else: - obj.move(operation=MoveOperation.MOVE, basedir=dest) - - -def move_func(lib, opts, args): - dest = opts.dest - if dest is not None: - dest = normpath(dest) - if not os.path.isdir(syspath(dest)): - raise ui.UserError( - "no such directory: {}".format(displayable_path(dest)) - ) - - move_items( - lib, - dest, - decargs(args), - opts.copy, - opts.album, - opts.pretend, - opts.timid, - opts.export, - ) - - -move_cmd = ui.Subcommand("move", help="move or copy items", aliases=("mv",)) -move_cmd.parser.add_option( - "-d", "--dest", metavar="DIR", dest="dest", help="destination directory" -) -move_cmd.parser.add_option( - "-c", - "--copy", - default=False, - action="store_true", - help="copy instead of moving", -) -move_cmd.parser.add_option( - "-p", - "--pretend", - default=False, - action="store_true", - help="show how files would be moved, but don't touch anything", -) -move_cmd.parser.add_option( - "-t", - "--timid", - dest="timid", - action="store_true", - help="always confirm all actions", -) -move_cmd.parser.add_option( - "-e", - "--export", - default=False, - action="store_true", - help="copy without changing the database path", -) -move_cmd.parser.add_album_option() -move_cmd.func = move_func -default_commands.append(move_cmd) - - -# write: Write tags into files. - - -def write_items(lib, query, pretend, force): - """Write tag information from the database to the respective files - in the filesystem. - """ - items, albums = _do_query(lib, query, False, False) - - for item in items: - # Item deleted? - if not os.path.exists(syspath(item.path)): - log.info("missing file: {0}", util.displayable_path(item.path)) - continue - - # Get an Item object reflecting the "clean" (on-disk) state. - try: - clean_item = library.Item.from_path(item.path) - except library.ReadError as exc: - log.error( - "error reading {0}: {1}", displayable_path(item.path), exc - ) - continue - - # Check for and display changes. - changed = ui.show_model_changes( - item, clean_item, library.Item._media_tag_fields, force - ) - if (changed or force) and not pretend: - # We use `try_sync` here to keep the mtime up to date in the - # database. - item.try_sync(True, False) - - -def write_func(lib, opts, args): - write_items(lib, decargs(args), opts.pretend, opts.force) - - -write_cmd = ui.Subcommand("write", help="write tag information to files") -write_cmd.parser.add_option( - "-p", - "--pretend", - action="store_true", - help="show all changes but do nothing", -) -write_cmd.parser.add_option( - "-f", - "--force", - action="store_true", - help="write tags even if the existing tags match the database", -) -write_cmd.func = write_func -default_commands.append(write_cmd) - - -# config: Show and edit user configuration. - - -def config_func(lib, opts, args): - # Make sure lazy configuration is loaded - config.resolve() - - # Print paths. - if opts.paths: - filenames = [] - for source in config.sources: - if not opts.defaults and source.default: - continue - if source.filename: - filenames.append(source.filename) - - # In case the user config file does not exist, prepend it to the - # list. - user_path = config.user_config_path() - if user_path not in filenames: - filenames.insert(0, user_path) - - for filename in filenames: - print_(displayable_path(filename)) - - # Open in editor. - elif opts.edit: - config_edit() - - # Dump configuration. - else: - config_out = config.dump(full=opts.defaults, redact=opts.redact) - if config_out.strip() != "{}": - print_(config_out) - else: - print("Empty configuration") - - -def config_edit(): - """Open a program to edit the user configuration. - An empty config file is created if no existing config file exists. - """ - path = config.user_config_path() - editor = util.editor_command() - try: - if not os.path.isfile(path): - open(path, "w+").close() - util.interactive_open([path], editor) - except OSError as exc: - message = f"Could not edit configuration: {exc}" - if not editor: - message += ( - ". Please set the VISUAL (or EDITOR) environment variable" - ) - raise ui.UserError(message) - - -config_cmd = ui.Subcommand("config", help="show or edit the user configuration") -config_cmd.parser.add_option( - "-p", - "--paths", - action="store_true", - help="show files that configuration was loaded from", -) -config_cmd.parser.add_option( - "-e", - "--edit", - action="store_true", - help="edit user configuration with $VISUAL (or $EDITOR)", -) -config_cmd.parser.add_option( - "-d", - "--defaults", - action="store_true", - help="include the default configuration", -) -config_cmd.parser.add_option( - "-c", - "--clear", - action="store_false", - dest="redact", - default=True, - help="do not redact sensitive fields", -) -config_cmd.func = config_func -default_commands.append(config_cmd) - - -# completion: print completion script - - -def print_completion(*args): - for line in completion_script(default_commands + plugins.commands()): - print_(line, end="") - if not any(os.path.isfile(syspath(p)) for p in BASH_COMPLETION_PATHS): - log.warning( - "Warning: Unable to find the bash-completion package. " - "Command line completion might not work." - ) - - -BASH_COMPLETION_PATHS = [ - b"/etc/bash_completion", - b"/usr/share/bash-completion/bash_completion", - b"/usr/local/share/bash-completion/bash_completion", - # SmartOS - b"/opt/local/share/bash-completion/bash_completion", - # Homebrew (before bash-completion2) - b"/usr/local/etc/bash_completion", -] - - -def completion_script(commands): - """Yield the full completion shell script as strings. - - ``commands`` is alist of ``ui.Subcommand`` instances to generate - completion data for. - """ - base_script = os.path.join(os.path.dirname(__file__), "completion_base.sh") - with open(base_script) as base_script: - yield base_script.read() - - options = {} - aliases = {} - command_names = [] - - # Collect subcommands - for cmd in commands: - name = cmd.name - command_names.append(name) - - for alias in cmd.aliases: - if re.match(r"^\w+$", alias): - aliases[alias] = name - - options[name] = {"flags": [], "opts": []} - for opts in cmd.parser._get_all_options()[1:]: - if opts.action in ("store_true", "store_false"): - option_type = "flags" - else: - option_type = "opts" - - options[name][option_type].extend( - opts._short_opts + opts._long_opts - ) - - # Add global options - options["_global"] = { - "flags": ["-v", "--verbose"], - "opts": "-l --library -c --config -d --directory -h --help".split(" "), - } - - # Add flags common to all commands - options["_common"] = {"flags": ["-h", "--help"]} - - # Start generating the script - yield "_beet() {\n" - - # Command names - yield " local commands='%s'\n" % " ".join(command_names) - yield "\n" - - # Command aliases - yield " local aliases='%s'\n" % " ".join(aliases.keys()) - for alias, cmd in aliases.items(): - yield " local alias__{}={}\n".format(alias.replace("-", "_"), cmd) - yield "\n" - - # Fields - yield " fields='%s'\n" % " ".join( - set( - list(library.Item._fields.keys()) - + list(library.Album._fields.keys()) - ) - ) - - # Command options - for cmd, opts in options.items(): - for option_type, option_list in opts.items(): - if option_list: - option_list = " ".join(option_list) - yield " local {}__{}='{}'\n".format( - option_type, cmd.replace("-", "_"), option_list - ) - - yield " _beet_dispatch\n" - yield "}\n" - - -completion_cmd = ui.Subcommand( - "completion", - help="print shell script that provides command line completion", -) -completion_cmd.func = print_completion -completion_cmd.hide = True -default_commands.append(completion_cmd) diff --git a/beets/ui/commands/__init__.py b/beets/ui/commands/__init__.py new file mode 100644 index 000000000..e1d0389a3 --- /dev/null +++ b/beets/ui/commands/__init__.py @@ -0,0 +1,67 @@ +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""This module provides the default commands for beets' command-line +interface. +""" + +from beets.util.deprecation import deprecate_imports + +from .completion import completion_cmd +from .config import config_cmd +from .fields import fields_cmd +from .help import HelpCommand +from .import_ import import_cmd +from .list import list_cmd +from .modify import modify_cmd +from .move import move_cmd +from .remove import remove_cmd +from .stats import stats_cmd +from .update import update_cmd +from .version import version_cmd +from .write import write_cmd + + +def __getattr__(name: str): + """Handle deprecated imports.""" + return deprecate_imports( + __name__, + { + "TerminalImportSession": "beets.ui.commands.import_.session", + "PromptChoice": "beets.util", + }, + name, + ) + + +# The list of default subcommands. This is populated with Subcommand +# objects that can be fed to a SubcommandsOptionParser. +default_commands = [ + fields_cmd, + HelpCommand(), + import_cmd, + list_cmd, + update_cmd, + remove_cmd, + stats_cmd, + version_cmd, + modify_cmd, + move_cmd, + write_cmd, + config_cmd, + completion_cmd, +] + + +__all__ = ["default_commands"] diff --git a/beets/ui/commands/completion.py b/beets/ui/commands/completion.py new file mode 100644 index 000000000..776c389b4 --- /dev/null +++ b/beets/ui/commands/completion.py @@ -0,0 +1,117 @@ +"""The 'completion' command: print shell script for command line completion.""" + +import os +import re + +from beets import library, logging, plugins, ui +from beets.util import syspath + +# Global logger. +log = logging.getLogger("beets") + + +def print_completion(*args): + from beets.ui.commands import default_commands + + for line in completion_script(default_commands + plugins.commands()): + ui.print_(line, end="") + if not any(os.path.isfile(syspath(p)) for p in BASH_COMPLETION_PATHS): + log.warning( + "Warning: Unable to find the bash-completion package. " + "Command line completion might not work." + ) + + +completion_cmd = ui.Subcommand( + "completion", + help="print shell script that provides command line completion", +) +completion_cmd.func = print_completion +completion_cmd.hide = True + + +BASH_COMPLETION_PATHS = [ + b"/etc/bash_completion", + b"/usr/share/bash-completion/bash_completion", + b"/usr/local/share/bash-completion/bash_completion", + # SmartOS + b"/opt/local/share/bash-completion/bash_completion", + # Homebrew (before bash-completion2) + b"/usr/local/etc/bash_completion", +] + + +def completion_script(commands): + """Yield the full completion shell script as strings. + + ``commands`` is alist of ``ui.Subcommand`` instances to generate + completion data for. + """ + base_script = os.path.join( + os.path.dirname(__file__), "./completion_base.sh" + ) + with open(base_script) as base_script: + yield base_script.read() + + options = {} + aliases = {} + command_names = [] + + # Collect subcommands + for cmd in commands: + name = cmd.name + command_names.append(name) + + for alias in cmd.aliases: + if re.match(r"^\w+$", alias): + aliases[alias] = name + + options[name] = {"flags": [], "opts": []} + for opts in cmd.parser._get_all_options()[1:]: + if opts.action in ("store_true", "store_false"): + option_type = "flags" + else: + option_type = "opts" + + options[name][option_type].extend( + opts._short_opts + opts._long_opts + ) + + # Add global options + options["_global"] = { + "flags": ["-v", "--verbose"], + "opts": "-l --library -c --config -d --directory -h --help".split(" "), + } + + # Add flags common to all commands + options["_common"] = {"flags": ["-h", "--help"]} + + # Start generating the script + yield "_beet() {\n" + + # Command names + yield f" local commands={' '.join(command_names)!r}\n" + yield "\n" + + # Command aliases + yield f" local aliases={' '.join(aliases.keys())!r}\n" + for alias, cmd in aliases.items(): + yield f" local alias__{alias.replace('-', '_')}={cmd}\n" + yield "\n" + + # Fields + fields = library.Item._fields.keys() | library.Album._fields.keys() + yield f" fields={' '.join(fields)!r}\n" + + # Command options + for cmd, opts in options.items(): + for option_type, option_list in opts.items(): + if option_list: + option_list = " ".join(option_list) + yield ( + " local" + f" {option_type}__{cmd.replace('-', '_')}='{option_list}'\n" + ) + + yield " _beet_dispatch\n" + yield "}\n" diff --git a/beets/ui/completion_base.sh b/beets/ui/commands/completion_base.sh similarity index 100% rename from beets/ui/completion_base.sh rename to beets/ui/commands/completion_base.sh diff --git a/beets/ui/commands/config.py b/beets/ui/commands/config.py new file mode 100644 index 000000000..15d571324 --- /dev/null +++ b/beets/ui/commands/config.py @@ -0,0 +1,93 @@ +"""The 'config' command: show and edit user configuration.""" + +import os + +from beets import config, ui +from beets.util import displayable_path, editor_command, interactive_open + + +def config_func(lib, opts, args): + # Make sure lazy configuration is loaded + config.resolve() + + # Print paths. + if opts.paths: + filenames = [] + for source in config.sources: + if not opts.defaults and source.default: + continue + if source.filename: + filenames.append(source.filename) + + # In case the user config file does not exist, prepend it to the + # list. + user_path = config.user_config_path() + if user_path not in filenames: + filenames.insert(0, user_path) + + for filename in filenames: + ui.print_(displayable_path(filename)) + + # Open in editor. + elif opts.edit: + # Note: This branch *should* be unreachable + # since the normal flow should be short-circuited + # by the special case in ui._raw_main + config_edit(opts) + + # Dump configuration. + else: + config_out = config.dump(full=opts.defaults, redact=opts.redact) + if config_out.strip() != "{}": + ui.print_(config_out) + else: + print("Empty configuration") + + +def config_edit(cli_options): + """Open a program to edit the user configuration. + An empty config file is created if no existing config file exists. + """ + path = cli_options.config or config.user_config_path() + editor = editor_command() + try: + if not os.path.isfile(path): + open(path, "w+").close() + interactive_open([path], editor) + except OSError as exc: + message = f"Could not edit configuration: {exc}" + if not editor: + message += ( + ". Please set the VISUAL (or EDITOR) environment variable" + ) + raise ui.UserError(message) + + +config_cmd = ui.Subcommand("config", help="show or edit the user configuration") +config_cmd.parser.add_option( + "-p", + "--paths", + action="store_true", + help="show files that configuration was loaded from", +) +config_cmd.parser.add_option( + "-e", + "--edit", + action="store_true", + help="edit user configuration with $VISUAL (or $EDITOR)", +) +config_cmd.parser.add_option( + "-d", + "--defaults", + action="store_true", + help="include the default configuration", +) +config_cmd.parser.add_option( + "-c", + "--clear", + action="store_false", + dest="redact", + default=True, + help="do not redact sensitive fields", +) +config_cmd.func = config_func diff --git a/beets/ui/commands/fields.py b/beets/ui/commands/fields.py new file mode 100644 index 000000000..de8f89103 --- /dev/null +++ b/beets/ui/commands/fields.py @@ -0,0 +1,41 @@ +"""The `fields` command: show available fields for queries and format strings.""" + +import textwrap + +from beets import library, ui + + +def _print_keys(query): + """Given a SQLite query result, print the `key` field of each + returned row, with indentation of 2 spaces. + """ + for row in query: + ui.print_(f" {row['key']}") + + +def fields_func(lib, opts, args): + def _print_rows(names): + names.sort() + ui.print_(textwrap.indent("\n".join(names), " ")) + + ui.print_("Item fields:") + _print_rows(library.Item.all_keys()) + + ui.print_("Album fields:") + _print_rows(library.Album.all_keys()) + + with lib.transaction() as tx: + # The SQL uses the DISTINCT to get unique values from the query + unique_fields = "SELECT DISTINCT key FROM ({})" + + ui.print_("Item flexible attributes:") + _print_keys(tx.query(unique_fields.format(library.Item._flex_table))) + + ui.print_("Album flexible attributes:") + _print_keys(tx.query(unique_fields.format(library.Album._flex_table))) + + +fields_cmd = ui.Subcommand( + "fields", help="show fields available for queries and format strings" +) +fields_cmd.func = fields_func diff --git a/beets/ui/commands/help.py b/beets/ui/commands/help.py new file mode 100644 index 000000000..345f94c67 --- /dev/null +++ b/beets/ui/commands/help.py @@ -0,0 +1,22 @@ +"""The 'help' command: show help information for commands.""" + +from beets import ui + + +class HelpCommand(ui.Subcommand): + def __init__(self): + super().__init__( + "help", + aliases=("?",), + help="give detailed help on a specific sub-command", + ) + + def func(self, lib, opts, args): + if args: + cmdname = args[0] + helpcommand = self.root_parser._subcommand_for_name(cmdname) + if not helpcommand: + raise ui.UserError(f"unknown command '{cmdname}'") + helpcommand.print_help() + else: + self.root_parser.print_help() diff --git a/beets/ui/commands/import_/__init__.py b/beets/ui/commands/import_/__init__.py new file mode 100644 index 000000000..b2991f183 --- /dev/null +++ b/beets/ui/commands/import_/__init__.py @@ -0,0 +1,341 @@ +"""The `import` command: import new music into the library.""" + +import os + +from beets import config, logging, plugins, ui +from beets.util import displayable_path, normpath, syspath + +from .session import TerminalImportSession + +# Global logger. +log = logging.getLogger("beets") + + +def paths_from_logfile(path): + """Parse the logfile and yield skipped paths to pass to the `import` + command. + """ + with open(path, encoding="utf-8") as fp: + for i, line in enumerate(fp, start=1): + verb, sep, paths = line.rstrip("\n").partition(" ") + if not sep: + raise ValueError(f"line {i} is invalid") + + # Ignore informational lines that don't need to be re-imported. + if verb in {"import", "duplicate-keep", "duplicate-replace"}: + continue + + if verb not in {"asis", "skip", "duplicate-skip"}: + raise ValueError(f"line {i} contains unknown verb {verb}") + + yield os.path.commonpath(paths.split("; ")) + + +def parse_logfiles(logfiles): + """Parse all `logfiles` and yield paths from it.""" + for logfile in logfiles: + try: + yield from paths_from_logfile(syspath(normpath(logfile))) + except ValueError as err: + raise ui.UserError( + f"malformed logfile {displayable_path(logfile)}: {err}" + ) from err + except OSError as err: + raise ui.UserError( + f"unreadable logfile {displayable_path(logfile)}: {err}" + ) from err + + +def import_files(lib, paths: list[bytes], query): + """Import the files in the given list of paths or matching the + query. + """ + # Check parameter consistency. + if config["import"]["quiet"] and config["import"]["timid"]: + raise ui.UserError("can't be both quiet and timid") + + # Open the log. + if config["import"]["log"].get() is not None: + logpath = syspath(config["import"]["log"].as_filename()) + try: + loghandler = logging.FileHandler(logpath, encoding="utf-8") + except OSError: + raise ui.UserError( + "Could not open log file for writing:" + f" {displayable_path(logpath)}" + ) + else: + loghandler = None + + # Never ask for input in quiet mode. + if config["import"]["resume"].get() == "ask" and config["import"]["quiet"]: + config["import"]["resume"] = False + + session = TerminalImportSession(lib, loghandler, paths, query) + session.run() + + # Emit event. + plugins.send("import", lib=lib, paths=paths) + + +def import_func(lib, opts, args: list[str]): + config["import"].set_args(opts) + + # Special case: --copy flag suppresses import_move (which would + # otherwise take precedence). + if opts.copy: + config["import"]["move"] = False + + if opts.library: + query = args + byte_paths = [] + else: + query = None + paths = args + + # The paths from the logfiles go into a separate list to allow handling + # errors differently from user-specified paths. + paths_from_logfiles = list(parse_logfiles(opts.from_logfiles or [])) + + if not paths and not paths_from_logfiles: + raise ui.UserError("no path specified") + + byte_paths = [os.fsencode(p) for p in paths] + paths_from_logfiles = [os.fsencode(p) for p in paths_from_logfiles] + + # Check the user-specified directories. + for path in byte_paths: + if not os.path.exists(syspath(normpath(path))): + raise ui.UserError( + f"no such file or directory: {displayable_path(path)}" + ) + + # Check the directories from the logfiles, but don't throw an error in + # case those paths don't exist. Maybe some of those paths have already + # been imported and moved separately, so logging a warning should + # suffice. + for path in paths_from_logfiles: + if not os.path.exists(syspath(normpath(path))): + log.warning( + "No such file or directory: {}", displayable_path(path) + ) + continue + + byte_paths.append(path) + + # If all paths were read from a logfile, and none of them exist, throw + # an error + if not byte_paths: + raise ui.UserError("none of the paths are importable") + + import_files(lib, byte_paths, query) + + +def _store_dict(option, opt_str, value, parser): + """Custom action callback to parse options which have ``key=value`` + pairs as values. All such pairs passed for this option are + aggregated into a dictionary. + """ + dest = option.dest + option_values = getattr(parser.values, dest, None) + + if option_values is None: + # This is the first supplied ``key=value`` pair of option. + # Initialize empty dictionary and get a reference to it. + setattr(parser.values, dest, {}) + option_values = getattr(parser.values, dest) + + try: + key, value = value.split("=", 1) + if not (key and value): + raise ValueError + except ValueError: + raise ui.UserError( + f"supplied argument `{value}' is not of the form `key=value'" + ) + + option_values[key] = value + + +import_cmd = ui.Subcommand( + "import", help="import new music", aliases=("imp", "im") +) +import_cmd.parser.add_option( + "-c", + "--copy", + action="store_true", + default=None, + help="copy tracks into library directory (default)", +) +import_cmd.parser.add_option( + "-C", + "--nocopy", + action="store_false", + dest="copy", + help="don't copy tracks (opposite of -c)", +) +import_cmd.parser.add_option( + "-m", + "--move", + action="store_true", + dest="move", + help="move tracks into the library (overrides -c)", +) +import_cmd.parser.add_option( + "-w", + "--write", + action="store_true", + default=None, + help="write new metadata to files' tags (default)", +) +import_cmd.parser.add_option( + "-W", + "--nowrite", + action="store_false", + dest="write", + help="don't write metadata (opposite of -w)", +) +import_cmd.parser.add_option( + "-a", + "--autotag", + action="store_true", + dest="autotag", + help="infer tags for imported files (default)", +) +import_cmd.parser.add_option( + "-A", + "--noautotag", + action="store_false", + dest="autotag", + help="don't infer tags for imported files (opposite of -a)", +) +import_cmd.parser.add_option( + "-p", + "--resume", + action="store_true", + default=None, + help="resume importing if interrupted", +) +import_cmd.parser.add_option( + "-P", + "--noresume", + action="store_false", + dest="resume", + help="do not try to resume importing", +) +import_cmd.parser.add_option( + "-q", + "--quiet", + action="store_true", + dest="quiet", + help="never prompt for input: skip albums instead", +) +import_cmd.parser.add_option( + "--quiet-fallback", + type="string", + dest="quiet_fallback", + help="decision in quiet mode when no strong match: skip or asis", +) +import_cmd.parser.add_option( + "-l", + "--log", + dest="log", + help="file to log untaggable albums for later review", +) +import_cmd.parser.add_option( + "-s", + "--singletons", + action="store_true", + help="import individual tracks instead of full albums", +) +import_cmd.parser.add_option( + "-t", + "--timid", + dest="timid", + action="store_true", + help="always confirm all actions", +) +import_cmd.parser.add_option( + "-L", + "--library", + dest="library", + action="store_true", + help="retag items matching a query", +) +import_cmd.parser.add_option( + "-i", + "--incremental", + dest="incremental", + action="store_true", + help="skip already-imported directories", +) +import_cmd.parser.add_option( + "-I", + "--noincremental", + dest="incremental", + action="store_false", + help="do not skip already-imported directories", +) +import_cmd.parser.add_option( + "-R", + "--incremental-skip-later", + action="store_true", + dest="incremental_skip_later", + help="do not record skipped files during incremental import", +) +import_cmd.parser.add_option( + "-r", + "--noincremental-skip-later", + action="store_false", + dest="incremental_skip_later", + help="record skipped files during incremental import", +) +import_cmd.parser.add_option( + "--from-scratch", + dest="from_scratch", + action="store_true", + help="erase existing metadata before applying new metadata", +) +import_cmd.parser.add_option( + "--flat", + dest="flat", + action="store_true", + help="import an entire tree as a single album", +) +import_cmd.parser.add_option( + "-g", + "--group-albums", + dest="group_albums", + action="store_true", + help="group tracks in a folder into separate albums", +) +import_cmd.parser.add_option( + "--pretend", + dest="pretend", + action="store_true", + help="just print the files to import", +) +import_cmd.parser.add_option( + "-S", + "--search-id", + dest="search_ids", + action="append", + metavar="ID", + help="restrict matching to a specific metadata backend ID", +) +import_cmd.parser.add_option( + "--from-logfile", + dest="from_logfiles", + action="append", + metavar="PATH", + help="read skipped paths from an existing logfile", +) +import_cmd.parser.add_option( + "--set", + dest="set_fields", + action="callback", + callback=_store_dict, + metavar="FIELD=VALUE", + help="set the given fields to the supplied values", +) +import_cmd.func = import_func diff --git a/beets/ui/commands/import_/display.py b/beets/ui/commands/import_/display.py new file mode 100644 index 000000000..113462d19 --- /dev/null +++ b/beets/ui/commands/import_/display.py @@ -0,0 +1,566 @@ +from __future__ import annotations + +import os +from dataclasses import dataclass +from functools import cached_property +from typing import TYPE_CHECKING, TypedDict + +from typing_extensions import NotRequired + +from beets import autotag, config, ui +from beets.autotag import hooks +from beets.util import displayable_path +from beets.util.units import human_seconds_short + +if TYPE_CHECKING: + from collections.abc import Sequence + + import confuse + + from beets.autotag.distance import Distance + from beets.library.models import Item + from beets.ui import ColorName + +VARIOUS_ARTISTS = "Various Artists" + + +class Side(TypedDict): + prefix: str + contents: str + suffix: str + width: NotRequired[int] + + +@dataclass +class ChangeRepresentation: + """Keeps track of all information needed to generate a (colored) text + representation of the changes that will be made if an album or singleton's + tags are changed according to `match`, which must be an AlbumMatch or + TrackMatch object, accordingly. + """ + + cur_artist: str + cur_name: str + match: autotag.hooks.Match + + @cached_property + def changed_prefix(self) -> str: + return ui.colorize("changed", "\u2260") + + @cached_property + def _indentation_config(self) -> confuse.Subview: + return config["ui"]["import"]["indentation"] + + @cached_property + def indent_header(self) -> str: + return ui.indent(self._indentation_config["match_header"].as_number()) + + @cached_property + def indent_detail(self) -> str: + return ui.indent(self._indentation_config["match_details"].as_number()) + + @cached_property + def indent_tracklist(self) -> str: + return ui.indent( + self._indentation_config["match_tracklist"].as_number() + ) + + @cached_property + def layout(self) -> int: + return config["ui"]["import"]["layout"].as_choice( + {"column": 0, "newline": 1} + ) + + def print_layout( + self, + indent: str, + left: Side, + right: Side, + separator: str = " -> ", + max_width: int | None = None, + ) -> None: + if not max_width: + # If no max_width provided, use terminal width + max_width = ui.term_width() + if self.layout == 0: + ui.print_column_layout(indent, left, right, separator, max_width) + else: + ui.print_newline_layout(indent, left, right, separator, max_width) + + def show_match_header(self) -> None: + """Print out a 'header' identifying the suggested match (album name, + artist name,...) and summarizing the changes that would be made should + the user accept the match. + """ + # Print newline at beginning of change block. + ui.print_("") + + # 'Match' line and similarity. + ui.print_( + f"{self.indent_header}Match ({dist_string(self.match.distance)}):" + ) + + artist_name_str = f"{self.match.info.artist} - {self.match.info.name}" + ui.print_( + self.indent_header + + dist_colorize(artist_name_str, self.match.distance) + ) + + # Penalties. + penalties = penalty_string(self.match.distance) + if penalties: + ui.print_(f"{self.indent_header}{penalties}") + + # Disambiguation. + disambig = disambig_string(self.match.info) + if disambig: + ui.print_(f"{self.indent_header}{disambig}") + + # Data URL. + if self.match.info.data_url: + url = ui.colorize("text_faint", f"{self.match.info.data_url}") + ui.print_(f"{self.indent_header}{url}") + + def show_match_details(self) -> None: + """Print out the details of the match, including changes in album name + and artist name. + """ + # Artist. + artist_l, artist_r = self.cur_artist or "", self.match.info.artist + if artist_r == VARIOUS_ARTISTS: + # Hide artists for VA releases. + artist_l, artist_r = "", "" + left: Side + right: Side + if artist_l != artist_r: + artist_l, artist_r = ui.colordiff(artist_l, artist_r) + left = { + "prefix": f"{self.changed_prefix} Artist: ", + "contents": artist_l, + "suffix": "", + } + right = {"prefix": "", "contents": artist_r, "suffix": ""} + self.print_layout(self.indent_detail, left, right) + + else: + ui.print_(f"{self.indent_detail}*", "Artist:", artist_r) + + if self.cur_name: + type_ = self.match.type + name_l, name_r = self.cur_name or "", self.match.info.name + if self.cur_name != self.match.info.name != VARIOUS_ARTISTS: + name_l, name_r = ui.colordiff(name_l, name_r) + left = { + "prefix": f"{self.changed_prefix} {type_}: ", + "contents": name_l, + "suffix": "", + } + right = {"prefix": "", "contents": name_r, "suffix": ""} + self.print_layout(self.indent_detail, left, right) + else: + ui.print_(f"{self.indent_detail}*", f"{type_}:", name_r) + + def make_medium_info_line(self, track_info: hooks.TrackInfo) -> str: + """Construct a line with the current medium's info.""" + track_media = track_info.get("media", "Media") + # Build output string. + if self.match.info.mediums > 1 and track_info.disctitle: + return ( + f"* {track_media} {track_info.medium}: {track_info.disctitle}" + ) + elif self.match.info.mediums > 1: + return f"* {track_media} {track_info.medium}" + elif track_info.disctitle: + return f"* {track_media}: {track_info.disctitle}" + else: + return "" + + def format_index(self, track_info: hooks.TrackInfo | Item) -> str: + """Return a string representing the track index of the given + TrackInfo or Item object. + """ + if isinstance(track_info, hooks.TrackInfo): + index = track_info.index + medium_index = track_info.medium_index + medium = track_info.medium + mediums = self.match.info.mediums + else: + index = medium_index = track_info.track + medium = track_info.disc + mediums = track_info.disctotal + if config["per_disc_numbering"]: + if mediums and mediums > 1: + return f"{medium}-{medium_index}" + else: + return str(medium_index if medium_index is not None else index) + else: + return str(index) + + def make_track_numbers( + self, item: Item, track_info: hooks.TrackInfo + ) -> tuple[str, str, bool]: + """Format colored track indices.""" + cur_track = self.format_index(item) + new_track = self.format_index(track_info) + changed = False + # Choose color based on change. + highlight_color: ColorName + if cur_track != new_track: + changed = True + if item.track in (track_info.index, track_info.medium_index): + highlight_color = "text_highlight_minor" + else: + highlight_color = "text_highlight" + else: + highlight_color = "text_faint" + + lhs_track = ui.colorize(highlight_color, f"(#{cur_track})") + rhs_track = ui.colorize(highlight_color, f"(#{new_track})") + return lhs_track, rhs_track, changed + + @staticmethod + def make_track_titles( + item: Item, track_info: hooks.TrackInfo + ) -> tuple[str, str, bool]: + """Format colored track titles.""" + new_title = track_info.name + if not item.title.strip(): + # If there's no title, we use the filename. Don't colordiff. + cur_title = displayable_path(os.path.basename(item.path)) + return cur_title, new_title, True + else: + # If there is a title, highlight differences. + cur_title = item.title.strip() + cur_col, new_col = ui.colordiff(cur_title, new_title) + return cur_col, new_col, cur_title != new_title + + @staticmethod + def make_track_lengths( + item: Item, track_info: hooks.TrackInfo + ) -> tuple[str, str, bool]: + """Format colored track lengths.""" + changed = False + highlight_color: ColorName + if ( + item.length + and track_info.length + and abs(item.length - track_info.length) + >= config["ui"]["length_diff_thresh"].as_number() + ): + highlight_color = "text_highlight" + changed = True + else: + highlight_color = "text_highlight_minor" + + # Handle nonetype lengths by setting to 0 + cur_length0 = item.length if item.length else 0 + new_length0 = track_info.length if track_info.length else 0 + # format into string + cur_length = f"({human_seconds_short(cur_length0)})" + new_length = f"({human_seconds_short(new_length0)})" + # colorize + lhs_length = ui.colorize(highlight_color, cur_length) + rhs_length = ui.colorize(highlight_color, new_length) + + return lhs_length, rhs_length, changed + + def make_line( + self, item: Item, track_info: hooks.TrackInfo + ) -> tuple[Side, Side]: + """Extract changes from item -> new TrackInfo object, and colorize + appropriately. Returns (lhs, rhs) for column printing. + """ + # Track titles. + lhs_title, rhs_title, diff_title = self.make_track_titles( + item, track_info + ) + # Track number change. + lhs_track, rhs_track, diff_track = self.make_track_numbers( + item, track_info + ) + # Length change. + lhs_length, rhs_length, diff_length = self.make_track_lengths( + item, track_info + ) + + changed = diff_title or diff_track or diff_length + + # Construct lhs and rhs dicts. + # Previously, we printed the penalties, however this is no longer + # the case, thus the 'info' dictionary is unneeded. + # penalties = penalty_string(self.match.distance.tracks[track_info]) + + lhs: Side = { + "prefix": f"{self.changed_prefix if changed else '*'} {lhs_track} ", + "contents": lhs_title, + "suffix": f" {lhs_length}", + } + rhs: Side = {"prefix": "", "contents": "", "suffix": ""} + if not changed: + # Only return the left side, as nothing changed. + return (lhs, rhs) + else: + # Construct a dictionary for the "changed to" side + rhs = { + "prefix": f"{rhs_track} ", + "contents": rhs_title, + "suffix": f" {rhs_length}", + } + return (lhs, rhs) + + def print_tracklist(self, lines: list[tuple[Side, Side]]) -> None: + """Calculates column widths for tracks stored as line tuples: + (left, right). Then prints each line of tracklist. + """ + if len(lines) == 0: + # If no lines provided, e.g. details not required, do nothing. + return + + def get_width(side: Side) -> int: + """Return the width of left or right in uncolorized characters.""" + try: + return len( + ui.uncolorize( + " ".join( + [side["prefix"], side["contents"], side["suffix"]] + ) + ) + ) + except KeyError: + # An empty dictionary -> Nothing to report + return 0 + + # Check how to fit content into terminal window + indent_width = len(self.indent_tracklist) + terminal_width = ui.term_width() + joiner_width = len("".join(["* ", " -> "])) + col_width = (terminal_width - indent_width - joiner_width) // 2 + max_width_l = max(get_width(line_tuple[0]) for line_tuple in lines) + max_width_r = max(get_width(line_tuple[1]) for line_tuple in lines) + + if ( + (max_width_l <= col_width) + and (max_width_r <= col_width) + or ( + ((max_width_l > col_width) or (max_width_r > col_width)) + and ((max_width_l + max_width_r) <= col_width * 2) + ) + ): + # All content fits. Either both maximum widths are below column + # widths, or one of the columns is larger than allowed but the + # other is smaller than allowed. + # In this case we can afford to shrink the columns to fit their + # largest string + col_width_l = max_width_l + col_width_r = max_width_r + else: + # Not all content fits - stick with original half/half split + col_width_l = col_width + col_width_r = col_width + + # Print out each line, using the calculated width from above. + for left, right in lines: + left["width"] = col_width_l + right["width"] = col_width_r + self.print_layout(self.indent_tracklist, left, right) + + +class AlbumChange(ChangeRepresentation): + match: autotag.hooks.AlbumMatch + + def show_match_tracks(self) -> None: + """Print out the tracks of the match, summarizing changes the match + suggests for them. + """ + pairs = sorted( + self.match.item_info_pairs, key=lambda pair: pair[1].index or 0 + ) + # Build up LHS and RHS for track difference display. The `lines` list + # contains `(left, right)` tuples. + lines: list[tuple[Side, Side]] = [] + medium = disctitle = None + for item, track_info in pairs: + # If the track is the first on a new medium, show medium + # number and title. + if medium != track_info.medium or disctitle != track_info.disctitle: + # Create header for new medium + header = self.make_medium_info_line(track_info) + if header != "": + # Print tracks from previous medium + self.print_tracklist(lines) + lines = [] + ui.print_(f"{self.indent_detail}{header}") + # Save new medium details for future comparison. + medium, disctitle = track_info.medium, track_info.disctitle + + # Construct the line tuple for the track. + left, right = self.make_line(item, track_info) + if right["contents"] != "": + lines.append((left, right)) + else: + if config["import"]["detail"]: + lines.append((left, right)) + self.print_tracklist(lines) + + # Missing and unmatched tracks. + if self.match.extra_tracks: + ui.print_( + "Missing tracks" + f" ({len(self.match.extra_tracks)}/{len(self.match.info.tracks)} -" + f" {len(self.match.extra_tracks) / len(self.match.info.tracks):.1%}):" + ) + for track_info in self.match.extra_tracks: + line = f" ! {track_info.title} (#{self.format_index(track_info)})" + if track_info.length: + line += f" ({human_seconds_short(track_info.length)})" + ui.print_(ui.colorize("text_warning", line)) + if self.match.extra_items: + ui.print_(f"Unmatched tracks ({len(self.match.extra_items)}):") + for item in self.match.extra_items: + line = f" ! {item.title} (#{self.format_index(item)})" + if item.length: + line += f" ({human_seconds_short(item.length)})" + ui.print_(ui.colorize("text_warning", line)) + + +class TrackChange(ChangeRepresentation): + """Track change representation, comparing item with match.""" + + match: autotag.hooks.TrackMatch + + +def show_change( + cur_artist: str, cur_album: str, match: hooks.AlbumMatch +) -> None: + """Print out a representation of the changes that will be made if an + album's tags are changed according to `match`, which must be an AlbumMatch + object. + """ + change = AlbumChange(cur_artist, cur_album, match) + + # Print the match header. + change.show_match_header() + + # Print the match details. + change.show_match_details() + + # Print the match tracks. + change.show_match_tracks() + + +def show_item_change(item: Item, match: hooks.TrackMatch) -> None: + """Print out the change that would occur by tagging `item` with the + metadata from `match`, a TrackMatch object. + """ + change = TrackChange(item.artist, item.title, match) + # Print the match header. + change.show_match_header() + # Print the match details. + change.show_match_details() + + +def disambig_string(info: hooks.Info) -> str: + """Generate a string for an AlbumInfo or TrackInfo object that + provides context that helps disambiguate similar-looking albums and + tracks. + """ + if isinstance(info, hooks.AlbumInfo): + disambig = get_album_disambig_fields(info) + elif isinstance(info, hooks.TrackInfo): + disambig = get_singleton_disambig_fields(info) + else: + return "" + + return ", ".join(disambig) + + +def get_singleton_disambig_fields(info: hooks.TrackInfo) -> Sequence[str]: + out = [] + chosen_fields = config["match"]["singleton_disambig_fields"].as_str_seq() + calculated_values = { + "index": f"Index {info.index}", + "track_alt": f"Track {info.track_alt}", + "album": ( + f"[{info.album}]" + if ( + config["import"]["singleton_album_disambig"].get() + and info.get("album") + ) + else "" + ), + } + + for field in chosen_fields: + if field in calculated_values: + out.append(str(calculated_values[field])) + else: + try: + out.append(str(info[field])) + except (AttributeError, KeyError): + print(f"Disambiguation string key {field} does not exist.") + + return out + + +def get_album_disambig_fields(info: hooks.AlbumInfo) -> Sequence[str]: + out = [] + chosen_fields = config["match"]["album_disambig_fields"].as_str_seq() + calculated_values = { + "media": ( + f"{info.mediums}x{info.media}" + if (info.mediums and info.mediums > 1) + else info.media + ), + } + + for field in chosen_fields: + if field in calculated_values: + out.append(str(calculated_values[field])) + else: + try: + out.append(str(info[field])) + except (AttributeError, KeyError): + print(f"Disambiguation string key {field} does not exist.") + + return out + + +def dist_colorize(string: str, dist: Distance) -> str: + """Formats a string as a colorized similarity string according to + a distance. + """ + if dist <= config["match"]["strong_rec_thresh"].as_number(): + string = ui.colorize("text_success", string) + elif dist <= config["match"]["medium_rec_thresh"].as_number(): + string = ui.colorize("text_warning", string) + else: + string = ui.colorize("text_error", string) + return string + + +def dist_string(dist: Distance) -> str: + """Formats a distance (a float) as a colorized similarity percentage + string. + """ + string = f"{(1 - dist) * 100:.1f}%" + return dist_colorize(string, dist) + + +def penalty_string(distance: Distance, limit: int | None = None) -> str: + """Returns a colorized string that indicates all the penalties + applied to a distance object. + """ + penalties = [] + for key in distance.keys(): + key = key.replace("album_", "") + key = key.replace("track_", "") + key = key.replace("_", " ") + penalties.append(key) + if penalties: + if limit and len(penalties) > limit: + penalties = penalties[:limit] + ["..."] + # Prefix penalty string with U+2260: Not Equal To + penalty_string = f"\u2260 {', '.join(penalties)}" + return ui.colorize("changed", penalty_string) + + return "" diff --git a/beets/ui/commands/import_/session.py b/beets/ui/commands/import_/session.py new file mode 100644 index 000000000..9c8c8dd62 --- /dev/null +++ b/beets/ui/commands/import_/session.py @@ -0,0 +1,547 @@ +from collections import Counter +from itertools import chain + +from beets import autotag, config, importer, logging, plugins, ui +from beets.autotag import Recommendation +from beets.util import PromptChoice, displayable_path +from beets.util.units import human_bytes, human_seconds_short + +from .display import ( + disambig_string, + dist_colorize, + penalty_string, + show_change, + show_item_change, +) + +# Global logger. +log = logging.getLogger("beets") + + +class TerminalImportSession(importer.ImportSession): + """An import session that runs in a terminal.""" + + def choose_match(self, task): + """Given an initial autotagging of items, go through an interactive + dance with the user to ask for a choice of metadata. Returns an + AlbumMatch object, ASIS, or SKIP. + """ + # Show what we're tagging. + ui.print_() + + path_str0 = displayable_path(task.paths, "\n") + path_str = ui.colorize("import_path", path_str0) + items_str0 = f"({len(task.items)} items)" + items_str = ui.colorize("import_path_items", items_str0) + ui.print_(" ".join([path_str, items_str])) + + # Let plugins display info or prompt the user before we go through the + # process of selecting candidate. + results = plugins.send( + "import_task_before_choice", session=self, task=task + ) + actions = [action for action in results if action] + + if len(actions) == 1: + return actions[0] + elif len(actions) > 1: + raise plugins.PluginConflictError( + "Only one handler for `import_task_before_choice` may return " + "an action." + ) + + # Take immediate action if appropriate. + action = _summary_judgment(task.rec) + if action == importer.Action.APPLY: + match = task.candidates[0] + show_change(task.cur_artist, task.cur_album, match) + return match + elif action is not None: + return action + + # Loop until we have a choice. + while True: + # Ask for a choice from the user. The result of + # `choose_candidate` may be an `importer.Action`, an + # `AlbumMatch` object for a specific selection, or a + # `PromptChoice`. + choices = self._get_choices(task) + choice = choose_candidate( + task.candidates, + False, + task.rec, + task.cur_artist, + task.cur_album, + itemcount=len(task.items), + choices=choices, + ) + + # Basic choices that require no more action here. + if choice in (importer.Action.SKIP, importer.Action.ASIS): + # Pass selection to main control flow. + return choice + + # Plugin-provided choices. We invoke the associated callback + # function. + elif choice in choices: + post_choice = choice.callback(self, task) + if isinstance(post_choice, importer.Action): + return post_choice + elif isinstance(post_choice, autotag.Proposal): + # Use the new candidates and continue around the loop. + task.candidates = post_choice.candidates + task.rec = post_choice.recommendation + + # Otherwise, we have a specific match selection. + else: + # We have a candidate! Finish tagging. Here, choice is an + # AlbumMatch object. + assert isinstance(choice, autotag.AlbumMatch) + return choice + + def choose_item(self, task): + """Ask the user for a choice about tagging a single item. Returns + either an action constant or a TrackMatch object. + """ + ui.print_() + ui.print_(displayable_path(task.item.path)) + candidates, rec = task.candidates, task.rec + + # Take immediate action if appropriate. + action = _summary_judgment(task.rec) + if action == importer.Action.APPLY: + match = candidates[0] + show_item_change(task.item, match) + return match + elif action is not None: + return action + + while True: + # Ask for a choice. + choices = self._get_choices(task) + choice = choose_candidate( + candidates, True, rec, item=task.item, choices=choices + ) + + if choice in (importer.Action.SKIP, importer.Action.ASIS): + return choice + + elif choice in choices: + post_choice = choice.callback(self, task) + if isinstance(post_choice, importer.Action): + return post_choice + elif isinstance(post_choice, autotag.Proposal): + candidates = post_choice.candidates + rec = post_choice.recommendation + + else: + # Chose a candidate. + assert isinstance(choice, autotag.TrackMatch) + return choice + + def resolve_duplicate(self, task, found_duplicates): + """Decide what to do when a new album or item seems similar to one + that's already in the library. + """ + log.warning( + "This {} is already in the library!", + ("album" if task.is_album else "item"), + ) + + if config["import"]["quiet"]: + # In quiet mode, don't prompt -- just skip. + log.info("Skipping.") + sel = "s" + else: + # Print some detail about the existing and new items so the + # user can make an informed decision. + for duplicate in found_duplicates: + ui.print_( + "Old: " + + summarize_items( + ( + list(duplicate.items()) + if task.is_album + else [duplicate] + ), + not task.is_album, + ) + ) + if config["import"]["duplicate_verbose_prompt"]: + if task.is_album: + for dup in duplicate.items(): + print(f" {dup}") + else: + print(f" {duplicate}") + + ui.print_( + "New: " + + summarize_items( + task.imported_items(), + not task.is_album, + ) + ) + if config["import"]["duplicate_verbose_prompt"]: + for item in task.imported_items(): + print(f" {item}") + + sel = ui.input_options( + ("Skip new", "Keep all", "Remove old", "Merge all") + ) + + if sel == "s": + # Skip new. + task.set_choice(importer.Action.SKIP) + elif sel == "k": + # Keep both. Do nothing; leave the choice intact. + pass + elif sel == "r": + # Remove old. + task.should_remove_duplicates = True + elif sel == "m": + task.should_merge_duplicates = True + else: + assert False + + def should_resume(self, path): + return ui.input_yn( + f"Import of the directory:\n{displayable_path(path)}\n" + "was interrupted. Resume (Y/n)?" + ) + + def _get_choices(self, task): + """Get the list of prompt choices that should be presented to the + user. This consists of both built-in choices and ones provided by + plugins. + + The `before_choose_candidate` event is sent to the plugins, with + session and task as its parameters. Plugins are responsible for + checking the right conditions and returning a list of `PromptChoice`s, + which is flattened and checked for conflicts. + + If two or more choices have the same short letter, a warning is + emitted and all but one choices are discarded, giving preference + to the default importer choices. + + Returns a list of `PromptChoice`s. + """ + # Standard, built-in choices. + choices = [ + PromptChoice("s", "Skip", lambda s, t: importer.Action.SKIP), + PromptChoice("u", "Use as-is", lambda s, t: importer.Action.ASIS), + ] + if task.is_album: + choices += [ + PromptChoice( + "t", "as Tracks", lambda s, t: importer.Action.TRACKS + ), + PromptChoice( + "g", "Group albums", lambda s, t: importer.Action.ALBUMS + ), + ] + choices += [ + PromptChoice("e", "Enter search", manual_search), + PromptChoice("i", "enter Id", manual_id), + PromptChoice("b", "aBort", abort_action), + ] + + # Send the before_choose_candidate event and flatten list. + extra_choices = list( + chain( + *plugins.send( + "before_choose_candidate", session=self, task=task + ) + ) + ) + + # Add a "dummy" choice for the other baked-in option, for + # duplicate checking. + all_choices = ( + [ + PromptChoice("a", "Apply", None), + ] + + choices + + extra_choices + ) + + # Check for conflicts. + short_letters = [c.short for c in all_choices] + if len(short_letters) != len(set(short_letters)): + # Duplicate short letter has been found. + duplicates = [ + i for i, count in Counter(short_letters).items() if count > 1 + ] + for short in duplicates: + # Keep the first of the choices, removing the rest. + dup_choices = [c for c in all_choices if c.short == short] + for c in dup_choices[1:]: + log.warning( + "Prompt choice '{0.long}' removed due to conflict " + "with '{1[0].long}' (short letter: '{0.short}')", + c, + dup_choices, + ) + extra_choices.remove(c) + + return choices + extra_choices + + +def summarize_items(items, singleton): + """Produces a brief summary line describing a set of items. Used for + manually resolving duplicates during import. + + `items` is a list of `Item` objects. `singleton` indicates whether + this is an album or single-item import (if the latter, them `items` + should only have one element). + """ + summary_parts = [] + if not singleton: + summary_parts.append(f"{len(items)} items") + + format_counts = {} + for item in items: + format_counts[item.format] = format_counts.get(item.format, 0) + 1 + if len(format_counts) == 1: + # A single format. + summary_parts.append(items[0].format) + else: + # Enumerate all the formats by decreasing frequencies: + for fmt, count in sorted( + format_counts.items(), + key=lambda fmt_and_count: (-fmt_and_count[1], fmt_and_count[0]), + ): + summary_parts.append(f"{fmt} {count}") + + if items: + average_bitrate = sum([item.bitrate for item in items]) / len(items) + total_duration = sum([item.length for item in items]) + total_filesize = sum([item.filesize for item in items]) + summary_parts.append(f"{int(average_bitrate / 1000)}kbps") + if items[0].format == "FLAC": + sample_bits = ( + f"{round(int(items[0].samplerate) / 1000, 1)}kHz" + f"/{items[0].bitdepth} bit" + ) + summary_parts.append(sample_bits) + summary_parts.append(human_seconds_short(total_duration)) + summary_parts.append(human_bytes(total_filesize)) + + return ", ".join(summary_parts) + + +def _summary_judgment(rec): + """Determines whether a decision should be made without even asking + the user. This occurs in quiet mode and when an action is chosen for + NONE recommendations. Return None if the user should be queried. + Otherwise, returns an action. May also print to the console if a + summary judgment is made. + """ + + if config["import"]["quiet"]: + if rec == Recommendation.strong: + return importer.Action.APPLY + else: + action = config["import"]["quiet_fallback"].as_choice( + { + "skip": importer.Action.SKIP, + "asis": importer.Action.ASIS, + } + ) + elif config["import"]["timid"]: + return None + elif rec == Recommendation.none: + action = config["import"]["none_rec_action"].as_choice( + { + "skip": importer.Action.SKIP, + "asis": importer.Action.ASIS, + "ask": None, + } + ) + else: + return None + + if action == importer.Action.SKIP: + ui.print_("Skipping.") + elif action == importer.Action.ASIS: + ui.print_("Importing as-is.") + return action + + +def choose_candidate( + candidates, + singleton, + rec, + cur_artist=None, + cur_album=None, + item=None, + itemcount=None, + choices=[], +): + """Given a sorted list of candidates, ask the user for a selection + of which candidate to use. Applies to both full albums and + singletons (tracks). Candidates are either AlbumMatch or TrackMatch + objects depending on `singleton`. for albums, `cur_artist`, + `cur_album`, and `itemcount` must be provided. For singletons, + `item` must be provided. + + `choices` is a list of `PromptChoice`s to be used in each prompt. + + Returns one of the following: + * the result of the choice, which may be SKIP or ASIS + * a candidate (an AlbumMatch/TrackMatch object) + * a chosen `PromptChoice` from `choices` + """ + # Sanity check. + if singleton: + assert item is not None + else: + assert cur_artist is not None + assert cur_album is not None + + # Build helper variables for the prompt choices. + choice_opts = tuple(c.long for c in choices) + choice_actions = {c.short: c for c in choices} + + # Zero candidates. + if not candidates: + if singleton: + ui.print_("No matching recordings found.") + else: + ui.print_(f"No matching release found for {itemcount} tracks.") + ui.print_( + "For help, see: " + "https://beets.readthedocs.org/en/latest/faq.html#nomatch" + ) + sel = ui.input_options(choice_opts) + if sel in choice_actions: + return choice_actions[sel] + else: + assert False + + # Is the change good enough? + bypass_candidates = False + if rec != Recommendation.none: + match = candidates[0] + bypass_candidates = True + + while True: + # Display and choose from candidates. + require = rec <= Recommendation.low + + if not bypass_candidates: + # Display list of candidates. + ui.print_("") + ui.print_( + f"Finding tags for {'track' if singleton else 'album'} " + f'"{item.artist if singleton else cur_artist} -' + f' {item.title if singleton else cur_album}".' + ) + + ui.print_(" Candidates:") + for i, match in enumerate(candidates): + # Index, metadata, and distance. + index0 = f"{i + 1}." + index = dist_colorize(index0, match.distance) + dist = f"({(1 - match.distance) * 100:.1f}%)" + distance = dist_colorize(dist, match.distance) + metadata = f"{match.info.artist} - {match.info.name}" + if i == 0: + metadata = dist_colorize(metadata, match.distance) + else: + metadata = ui.colorize("text_highlight_minor", metadata) + line1 = [index, distance, metadata] + ui.print_(f" {' '.join(line1)}") + + # Penalties. + penalties = penalty_string(match.distance, 3) + if penalties: + ui.print_(f"{' ' * 13}{penalties}") + + # Disambiguation + disambig = disambig_string(match.info) + if disambig: + ui.print_(f"{' ' * 13}{disambig}") + + # Ask the user for a choice. + sel = ui.input_options(choice_opts, numrange=(1, len(candidates))) + if sel == "m": + pass + elif sel in choice_actions: + return choice_actions[sel] + else: # Numerical selection. + match = candidates[sel - 1] + if sel != 1: + # When choosing anything but the first match, + # disable the default action. + require = True + bypass_candidates = False + + # Show what we're about to do. + if singleton: + show_item_change(item, match) + else: + show_change(cur_artist, cur_album, match) + + # Exact match => tag automatically if we're not in timid mode. + if rec == Recommendation.strong and not config["import"]["timid"]: + return match + + # Ask for confirmation. + default = config["import"]["default_action"].as_choice( + { + "apply": "a", + "skip": "s", + "asis": "u", + "none": None, + } + ) + if default is None: + require = True + # Bell ring when user interaction is needed. + if config["import"]["bell"]: + ui.print_("\a", end="") + sel = ui.input_options( + ("Apply", "More candidates") + choice_opts, + require=require, + default=default, + ) + if sel == "a": + return match + elif sel in choice_actions: + return choice_actions[sel] + + +def manual_search(session, task): + """Get a new `Proposal` using manual search criteria. + + Input either an artist and album (for full albums) or artist and + track name (for singletons) for manual search. + """ + artist = ui.input_("Artist:").strip() + name = ui.input_("Album:" if task.is_album else "Track:").strip() + + if task.is_album: + _, _, prop = autotag.tag_album(task.items, artist, name) + return prop + else: + return autotag.tag_item(task.item, artist, name) + + +def manual_id(session, task): + """Get a new `Proposal` using a manually-entered ID. + + Input an ID, either for an album ("release") or a track ("recording"). + """ + prompt = f"Enter {'release' if task.is_album else 'recording'} ID:" + search_id = ui.input_(prompt).strip() + + if task.is_album: + _, _, prop = autotag.tag_album(task.items, search_ids=search_id.split()) + return prop + else: + return autotag.tag_item(task.item, search_ids=search_id.split()) + + +def abort_action(session, task): + """A prompt choice callback that aborts the importer.""" + raise importer.ImportAbortError() diff --git a/beets/ui/commands/list.py b/beets/ui/commands/list.py new file mode 100644 index 000000000..cb92b9b79 --- /dev/null +++ b/beets/ui/commands/list.py @@ -0,0 +1,25 @@ +"""The 'list' command: query and show library contents.""" + +from beets import ui + + +def list_items(lib, query, album, fmt=""): + """Print out items in lib matching query. If album, then search for + albums instead of single items. + """ + if album: + for album in lib.albums(query): + ui.print_(format(album, fmt)) + else: + for item in lib.items(query): + ui.print_(format(item, fmt)) + + +def list_func(lib, opts, args): + list_items(lib, args, opts.album) + + +list_cmd = ui.Subcommand("list", help="query the library", aliases=("ls",)) +list_cmd.parser.usage += "\nExample: %prog -f '$album: $title' artist:beatles" +list_cmd.parser.add_all_common_options() +list_cmd.func = list_func diff --git a/beets/ui/commands/modify.py b/beets/ui/commands/modify.py new file mode 100644 index 000000000..186bfb6dd --- /dev/null +++ b/beets/ui/commands/modify.py @@ -0,0 +1,162 @@ +"""The `modify` command: change metadata fields.""" + +from beets import library, ui +from beets.util import functemplate + +from .utils import do_query + + +def modify_items(lib, mods, dels, query, write, move, album, confirm, inherit): + """Modifies matching items according to user-specified assignments and + deletions. + + `mods` is a dictionary of field and value pairse indicating + assignments. `dels` is a list of fields to be deleted. + """ + # Parse key=value specifications into a dictionary. + model_cls = library.Album if album else library.Item + + # Get the items to modify. + items, albums = do_query(lib, query, album, False) + objs = albums if album else items + + # Apply changes *temporarily*, preview them, and collect modified + # objects. + ui.print_(f"Modifying {len(objs)} {'album' if album else 'item'}s.") + changed = [] + templates = { + key: functemplate.template(value) for key, value in mods.items() + } + for obj in objs: + obj_mods = { + key: model_cls._parse(key, obj.evaluate_template(templates[key])) + for key in mods.keys() + } + if print_and_modify(obj, obj_mods, dels) and obj not in changed: + changed.append(obj) + + # Still something to do? + if not changed: + ui.print_("No changes to make.") + return + + # Confirm action. + if confirm: + if write and move: + extra = ", move and write tags" + elif write: + extra = " and write tags" + elif move: + extra = " and move" + else: + extra = "" + + changed = ui.input_select_objects( + f"Really modify{extra}", + changed, + lambda o: print_and_modify(o, mods, dels), + ) + + # Apply changes to database and files + with lib.transaction(): + for obj in changed: + obj.try_sync(write, move, inherit) + + +def print_and_modify(obj, mods, dels): + """Print the modifications to an item and return a bool indicating + whether any changes were made. + + `mods` is a dictionary of fields and values to update on the object; + `dels` is a sequence of fields to delete. + """ + obj.update(mods) + for field in dels: + try: + del obj[field] + except KeyError: + pass + return ui.show_model_changes(obj) + + +def modify_parse_args(args): + """Split the arguments for the modify subcommand into query parts, + assignments (field=value), and deletions (field!). Returns the result as + a three-tuple in that order. + """ + mods = {} + dels = [] + query = [] + for arg in args: + if arg.endswith("!") and "=" not in arg and ":" not in arg: + dels.append(arg[:-1]) # Strip trailing !. + elif "=" in arg and ":" not in arg.split("=", 1)[0]: + key, val = arg.split("=", 1) + mods[key] = val + else: + query.append(arg) + return query, mods, dels + + +def modify_func(lib, opts, args): + query, mods, dels = modify_parse_args(args) + if not mods and not dels: + raise ui.UserError("no modifications specified") + modify_items( + lib, + mods, + dels, + query, + ui.should_write(opts.write), + ui.should_move(opts.move), + opts.album, + not opts.yes, + opts.inherit, + ) + + +modify_cmd = ui.Subcommand( + "modify", help="change metadata fields", aliases=("mod",) +) +modify_cmd.parser.add_option( + "-m", + "--move", + action="store_true", + dest="move", + help="move files in the library directory", +) +modify_cmd.parser.add_option( + "-M", + "--nomove", + action="store_false", + dest="move", + help="don't move files in library", +) +modify_cmd.parser.add_option( + "-w", + "--write", + action="store_true", + default=None, + help="write new metadata to files' tags (default)", +) +modify_cmd.parser.add_option( + "-W", + "--nowrite", + action="store_false", + dest="write", + help="don't write metadata (opposite of -w)", +) +modify_cmd.parser.add_album_option() +modify_cmd.parser.add_format_option(target="item") +modify_cmd.parser.add_option( + "-y", "--yes", action="store_true", help="skip confirmation" +) +modify_cmd.parser.add_option( + "-I", + "--noinherit", + action="store_false", + dest="inherit", + default=True, + help="when modifying albums, don't also change item data", +) +modify_cmd.func = modify_func diff --git a/beets/ui/commands/move.py b/beets/ui/commands/move.py new file mode 100644 index 000000000..40a9d1b83 --- /dev/null +++ b/beets/ui/commands/move.py @@ -0,0 +1,200 @@ +"""The 'move' command: Move/copy files to the library or a new base directory.""" + +import os + +from beets import logging, ui +from beets.util import ( + MoveOperation, + PathLike, + displayable_path, + normpath, + syspath, +) + +from .utils import do_query + +# Global logger. +log = logging.getLogger("beets") + + +def show_path_changes(path_changes): + """Given a list of tuples (source, destination) that indicate the + path changes, log the changes as INFO-level output to the beets log. + The output is guaranteed to be unicode. + + Every pair is shown on a single line if the terminal width permits it, + else it is split over two lines. E.g., + + Source -> Destination + + vs. + + Source + -> Destination + """ + sources, destinations = zip(*path_changes) + + # Ensure unicode output + sources = list(map(displayable_path, sources)) + destinations = list(map(displayable_path, destinations)) + + # Calculate widths for terminal split + col_width = (ui.term_width() - len(" -> ")) // 2 + max_width = len(max(sources + destinations, key=len)) + + if max_width > col_width: + # Print every change over two lines + for source, dest in zip(sources, destinations): + color_source, color_dest = ui.colordiff(source, dest) + ui.print_(f"{color_source} \n -> {color_dest}") + else: + # Print every change on a single line, and add a header + title_pad = max_width - len("Source ") + len(" -> ") + + ui.print_(f"Source {' ' * title_pad} Destination") + for source, dest in zip(sources, destinations): + pad = max_width - len(source) + color_source, color_dest = ui.colordiff(source, dest) + ui.print_(f"{color_source} {' ' * pad} -> {color_dest}") + + +def move_items( + lib, + dest_path: PathLike, + query, + copy, + album, + pretend, + confirm=False, + export=False, +): + """Moves or copies items to a new base directory, given by dest. If + dest is None, then the library's base directory is used, making the + command "consolidate" files. + """ + dest = os.fsencode(dest_path) if dest_path else dest_path + items, albums = do_query(lib, query, album, False) + objs = albums if album else items + num_objs = len(objs) + + # Filter out files that don't need to be moved. + def isitemmoved(item): + return item.path != item.destination(basedir=dest) + + def isalbummoved(album): + return any(isitemmoved(i) for i in album.items()) + + objs = [o for o in objs if (isalbummoved if album else isitemmoved)(o)] + num_unmoved = num_objs - len(objs) + # Report unmoved files that match the query. + unmoved_msg = "" + if num_unmoved > 0: + unmoved_msg = f" ({num_unmoved} already in place)" + + copy = copy or export # Exporting always copies. + action = "Copying" if copy else "Moving" + act = "copy" if copy else "move" + entity = "album" if album else "item" + log.info( + "{} {} {}{}{}.", + action, + len(objs), + entity, + "s" if len(objs) != 1 else "", + unmoved_msg, + ) + if not objs: + return + + if pretend: + if album: + show_path_changes( + [ + (item.path, item.destination(basedir=dest)) + for obj in objs + for item in obj.items() + ] + ) + else: + show_path_changes( + [(obj.path, obj.destination(basedir=dest)) for obj in objs] + ) + else: + if confirm: + objs = ui.input_select_objects( + f"Really {act}", + objs, + lambda o: show_path_changes( + [(o.path, o.destination(basedir=dest))] + ), + ) + + for obj in objs: + log.debug("moving: {.filepath}", obj) + + if export: + # Copy without affecting the database. + obj.move( + operation=MoveOperation.COPY, basedir=dest, store=False + ) + else: + # Ordinary move/copy: store the new path. + if copy: + obj.move(operation=MoveOperation.COPY, basedir=dest) + else: + obj.move(operation=MoveOperation.MOVE, basedir=dest) + + +def move_func(lib, opts, args): + dest = opts.dest + if dest is not None: + dest = normpath(dest) + if not os.path.isdir(syspath(dest)): + raise ui.UserError(f"no such directory: {displayable_path(dest)}") + + move_items( + lib, + dest, + args, + opts.copy, + opts.album, + opts.pretend, + opts.timid, + opts.export, + ) + + +move_cmd = ui.Subcommand("move", help="move or copy items", aliases=("mv",)) +move_cmd.parser.add_option( + "-d", "--dest", metavar="DIR", dest="dest", help="destination directory" +) +move_cmd.parser.add_option( + "-c", + "--copy", + default=False, + action="store_true", + help="copy instead of moving", +) +move_cmd.parser.add_option( + "-p", + "--pretend", + default=False, + action="store_true", + help="show how files would be moved, but don't touch anything", +) +move_cmd.parser.add_option( + "-t", + "--timid", + dest="timid", + action="store_true", + help="always confirm all actions", +) +move_cmd.parser.add_option( + "-e", + "--export", + default=False, + action="store_true", + help="copy without changing the database path", +) +move_cmd.parser.add_album_option() +move_cmd.func = move_func diff --git a/beets/ui/commands/remove.py b/beets/ui/commands/remove.py new file mode 100644 index 000000000..997a4b48c --- /dev/null +++ b/beets/ui/commands/remove.py @@ -0,0 +1,84 @@ +"""The `remove` command: remove items from the library (and optionally delete files).""" + +from beets import ui + +from .utils import do_query + + +def remove_items(lib, query, album, delete, force): + """Remove items matching query from lib. If album, then match and + remove whole albums. If delete, also remove files from disk. + """ + # Get the matching items. + items, albums = do_query(lib, query, album) + objs = albums if album else items + + # Confirm file removal if not forcing removal. + if not force: + # Prepare confirmation with user. + album_str = ( + f" in {len(albums)} album{'s' if len(albums) > 1 else ''}" + if album + else "" + ) + + if delete: + fmt = "$path - $title" + prompt = "Really DELETE" + prompt_all = ( + "Really DELETE" + f" {len(items)} file{'s' if len(items) > 1 else ''}{album_str}" + ) + else: + fmt = "" + prompt = "Really remove from the library?" + prompt_all = ( + "Really remove" + f" {len(items)} item{'s' if len(items) > 1 else ''}{album_str}" + " from the library?" + ) + + # Helpers for printing affected items + def fmt_track(t): + ui.print_(format(t, fmt)) + + def fmt_album(a): + ui.print_() + for i in a.items(): + fmt_track(i) + + fmt_obj = fmt_album if album else fmt_track + + # Show all the items. + for o in objs: + fmt_obj(o) + + # Confirm with user. + objs = ui.input_select_objects( + prompt, objs, fmt_obj, prompt_all=prompt_all + ) + + if not objs: + return + + # Remove (and possibly delete) items. + with lib.transaction(): + for obj in objs: + obj.remove(delete) + + +def remove_func(lib, opts, args): + remove_items(lib, args, opts.album, opts.delete, opts.force) + + +remove_cmd = ui.Subcommand( + "remove", help="remove matching items from the library", aliases=("rm",) +) +remove_cmd.parser.add_option( + "-d", "--delete", action="store_true", help="also remove files from disk" +) +remove_cmd.parser.add_option( + "-f", "--force", action="store_true", help="do not ask when removing items" +) +remove_cmd.parser.add_album_option() +remove_cmd.func = remove_func diff --git a/beets/ui/commands/stats.py b/beets/ui/commands/stats.py new file mode 100644 index 000000000..d51d4d8ae --- /dev/null +++ b/beets/ui/commands/stats.py @@ -0,0 +1,62 @@ +"""The 'stats' command: show library statistics.""" + +import os + +from beets import logging, ui +from beets.util import syspath +from beets.util.units import human_bytes, human_seconds + +# Global logger. +log = logging.getLogger("beets") + + +def show_stats(lib, query, exact): + """Shows some statistics about the matched items.""" + items = lib.items(query) + + total_size = 0 + total_time = 0.0 + total_items = 0 + artists = set() + albums = set() + album_artists = set() + + for item in items: + if exact: + try: + total_size += os.path.getsize(syspath(item.path)) + except OSError as exc: + log.info("could not get size of {.path}: {}", item, exc) + else: + total_size += int(item.length * item.bitrate / 8) + total_time += item.length + total_items += 1 + artists.add(item.artist) + album_artists.add(item.albumartist) + if item.album_id: + albums.add(item.album_id) + + size_str = human_bytes(total_size) + if exact: + size_str += f" ({total_size} bytes)" + + ui.print_(f"""Tracks: {total_items} +Total time: {human_seconds(total_time)} +{f" ({total_time:.2f} seconds)" if exact else ""} +{"Total size" if exact else "Approximate total size"}: {size_str} +Artists: {len(artists)} +Albums: {len(albums)} +Album artists: {len(album_artists)}""") + + +def stats_func(lib, opts, args): + show_stats(lib, args, opts.exact) + + +stats_cmd = ui.Subcommand( + "stats", help="show statistics about the library or a query" +) +stats_cmd.parser.add_option( + "-e", "--exact", action="store_true", help="exact size and time" +) +stats_cmd.func = stats_func diff --git a/beets/ui/commands/update.py b/beets/ui/commands/update.py new file mode 100644 index 000000000..9286bf12b --- /dev/null +++ b/beets/ui/commands/update.py @@ -0,0 +1,196 @@ +"""The `update` command: Update library contents according to on-disk tags.""" + +import os + +from beets import library, logging, ui +from beets.util import ancestry, syspath + +from .utils import do_query + +# Global logger. +log = logging.getLogger("beets") + + +def update_items(lib, query, album, move, pretend, fields, exclude_fields=None): + """For all the items matched by the query, update the library to + reflect the item's embedded tags. + :param fields: The fields to be stored. If not specified, all fields will + be. + :param exclude_fields: The fields to not be stored. If not specified, all + fields will be. + """ + with lib.transaction(): + items, _ = do_query(lib, query, album) + if move and fields is not None and "path" not in fields: + # Special case: if an item needs to be moved, the path field has to + # updated; otherwise the new path will not be reflected in the + # database. + fields.append("path") + if fields is None: + # no fields were provided, update all media fields + item_fields = fields or library.Item._media_fields + if move and "path" not in item_fields: + # move is enabled, add 'path' to the list of fields to update + item_fields.add("path") + else: + # fields was provided, just update those + item_fields = fields + # get all the album fields to update + album_fields = fields or library.Album._fields.keys() + if exclude_fields: + # remove any excluded fields from the item and album sets + item_fields = [f for f in item_fields if f not in exclude_fields] + album_fields = [f for f in album_fields if f not in exclude_fields] + + # Walk through the items and pick up their changes. + affected_albums = set() + for item in items: + # Item deleted? + if not item.path or not os.path.exists(syspath(item.path)): + ui.print_(format(item)) + ui.print_(ui.colorize("text_error", " deleted")) + if not pretend: + item.remove(True) + affected_albums.add(item.album_id) + continue + + # Did the item change since last checked? + if item.current_mtime() <= item.mtime: + log.debug( + "skipping {0.filepath} because mtime is up to date ({0.mtime})", + item, + ) + continue + + # Read new data. + try: + item.read() + except library.ReadError as exc: + log.error("error reading {.filepath}: {}", item, exc) + continue + + # Special-case album artist when it matches track artist. (Hacky + # but necessary for preserving album-level metadata for non- + # autotagged imports.) + if not item.albumartist: + old_item = lib.get_item(item.id) + if old_item.albumartist == old_item.artist == item.artist: + item.albumartist = old_item.albumartist + item._dirty.discard("albumartist") + + # Check for and display changes. + changed = ui.show_model_changes(item, fields=item_fields) + + # Save changes. + if not pretend: + if changed: + # Move the item if it's in the library. + if move and lib.directory in ancestry(item.path): + item.move(store=False) + + item.store(fields=item_fields) + affected_albums.add(item.album_id) + else: + # The file's mtime was different, but there were no + # changes to the metadata. Store the new mtime, + # which is set in the call to read(), so we don't + # check this again in the future. + item.store(fields=item_fields) + + # Skip album changes while pretending. + if pretend: + return + + # Modify affected albums to reflect changes in their items. + for album_id in affected_albums: + if album_id is None: # Singletons. + continue + album = lib.get_album(album_id) + if not album: # Empty albums have already been removed. + log.debug("emptied album {}", album_id) + continue + first_item = album.items().get() + + # Update album structure to reflect an item in it. + for key in library.Album.item_keys: + album[key] = first_item[key] + album.store(fields=album_fields) + + # Move album art (and any inconsistent items). + if move and lib.directory in ancestry(first_item.path): + log.debug("moving album {}", album_id) + + # Manually moving and storing the album. + items = list(album.items()) + for item in items: + item.move(store=False, with_album=False) + item.store(fields=item_fields) + album.move(store=False) + album.store(fields=album_fields) + + +def update_func(lib, opts, args): + # Verify that the library folder exists to prevent accidental wipes. + if not os.path.isdir(syspath(lib.directory)): + ui.print_("Library path is unavailable or does not exist.") + ui.print_(lib.directory) + if not ui.input_yn("Are you sure you want to continue (y/n)?", True): + return + update_items( + lib, + args, + opts.album, + ui.should_move(opts.move), + opts.pretend, + opts.fields, + opts.exclude_fields, + ) + + +update_cmd = ui.Subcommand( + "update", + help="update the library", + aliases=( + "upd", + "up", + ), +) +update_cmd.parser.add_album_option() +update_cmd.parser.add_format_option() +update_cmd.parser.add_option( + "-m", + "--move", + action="store_true", + dest="move", + help="move files in the library directory", +) +update_cmd.parser.add_option( + "-M", + "--nomove", + action="store_false", + dest="move", + help="don't move files in library", +) +update_cmd.parser.add_option( + "-p", + "--pretend", + action="store_true", + help="show all changes but do nothing", +) +update_cmd.parser.add_option( + "-F", + "--field", + default=None, + action="append", + dest="fields", + help="list of fields to update", +) +update_cmd.parser.add_option( + "-e", + "--exclude-field", + default=None, + action="append", + dest="exclude_fields", + help="list of fields to exclude from updates", +) +update_cmd.func = update_func diff --git a/beets/ui/commands/utils.py b/beets/ui/commands/utils.py new file mode 100644 index 000000000..71c104d07 --- /dev/null +++ b/beets/ui/commands/utils.py @@ -0,0 +1,29 @@ +"""Utility functions for beets UI commands.""" + +from beets import ui + + +def do_query(lib, query, album, also_items=True): + """For commands that operate on matched items, performs a query + and returns a list of matching items and a list of matching + albums. (The latter is only nonempty when album is True.) Raises + a UserError if no items match. also_items controls whether, when + fetching albums, the associated items should be fetched also. + """ + if album: + albums = list(lib.albums(query)) + items = [] + if also_items: + for al in albums: + items += al.items() + + else: + albums = [] + items = list(lib.items(query)) + + if album and not albums: + raise ui.UserError("No matching albums found.") + elif not album and not items: + raise ui.UserError("No matching items found.") + + return items, albums diff --git a/beets/ui/commands/version.py b/beets/ui/commands/version.py new file mode 100644 index 000000000..a93c373a4 --- /dev/null +++ b/beets/ui/commands/version.py @@ -0,0 +1,23 @@ +"""The 'version' command: show version information.""" + +from platform import python_version + +import beets +from beets import plugins, ui + + +def show_version(*args): + ui.print_(f"beets version {beets.__version__}") + ui.print_(f"Python version {python_version()}") + # Show plugins. + names = sorted(p.name for p in plugins.find_plugins()) + if names: + ui.print_("plugins:", ", ".join(names)) + else: + ui.print_("no plugins loaded") + + +version_cmd = ui.Subcommand("version", help="output version information") +version_cmd.func = show_version + +__all__ = ["version_cmd"] diff --git a/beets/ui/commands/write.py b/beets/ui/commands/write.py new file mode 100644 index 000000000..05c3c7565 --- /dev/null +++ b/beets/ui/commands/write.py @@ -0,0 +1,60 @@ +"""The `write` command: write tag information to files.""" + +import os + +from beets import library, logging, ui +from beets.util import syspath + +from .utils import do_query + +# Global logger. +log = logging.getLogger("beets") + + +def write_items(lib, query, pretend, force): + """Write tag information from the database to the respective files + in the filesystem. + """ + items, albums = do_query(lib, query, False, False) + + for item in items: + # Item deleted? + if not os.path.exists(syspath(item.path)): + log.info("missing file: {.filepath}", item) + continue + + # Get an Item object reflecting the "clean" (on-disk) state. + try: + clean_item = library.Item.from_path(item.path) + except library.ReadError as exc: + log.error("error reading {.filepath}: {}", item, exc) + continue + + # Check for and display changes. + changed = ui.show_model_changes( + item, clean_item, library.Item._media_tag_fields, force + ) + if (changed or force) and not pretend: + # We use `try_sync` here to keep the mtime up to date in the + # database. + item.try_sync(True, False) + + +def write_func(lib, opts, args): + write_items(lib, args, opts.pretend, opts.force) + + +write_cmd = ui.Subcommand("write", help="write tag information to files") +write_cmd.parser.add_option( + "-p", + "--pretend", + action="store_true", + help="show all changes but do nothing", +) +write_cmd.parser.add_option( + "-f", + "--force", + action="store_true", + help="write tags even if the existing tags match the database", +) +write_cmd.func = write_func diff --git a/beets/util/__init__.py b/beets/util/__init__.py index 68dbaee65..517e076de 100644 --- a/beets/util/__init__.py +++ b/beets/util/__init__.py @@ -28,8 +28,10 @@ import sys import tempfile import traceback from collections import Counter +from collections.abc import Callable, Sequence from contextlib import suppress from enum import Enum +from functools import cache from importlib import import_module from multiprocessing.pool import ThreadPool from pathlib import Path @@ -38,33 +40,36 @@ from typing import ( TYPE_CHECKING, Any, AnyStr, - Callable, - Iterable, + ClassVar, + Generic, NamedTuple, TypeVar, Union, + cast, ) from unidecode import unidecode +import beets from beets.util import hidden if TYPE_CHECKING: - from collections.abc import Iterator, Sequence + from collections.abc import Iterable, Iterator from logging import Logger -if sys.version_info >= (3, 10): - from typing import TypeAlias -else: - from typing_extensions import TypeAlias + from beets.library import Item MAX_FILENAME_LENGTH = 200 WINDOWS_MAGIC_PREFIX = "\\\\?\\" T = TypeVar("T") -BytesOrStr = Union[str, bytes] -PathLike = Union[BytesOrStr, Path] -Replacements: TypeAlias = "Sequence[tuple[Pattern[str], str]]" +PathLike = Union[str, bytes, Path] +StrPath = Union[str, Path] +Replacements = Sequence[tuple[Pattern[str], str]] + +# Here for now to allow for a easy replace later on +# once we can move to a PathLike (mainly used in importer) +PathBytes = bytes class HumanReadableError(Exception): @@ -106,7 +111,7 @@ class HumanReadableError(Exception): elif hasattr(self.reason, "strerror"): # i.e., EnvironmentError return self.reason.strerror else: - return '"{}"'.format(str(self.reason)) + return f'"{self.reason}"' def get_message(self): """Create the human-readable description of the error, sans @@ -120,7 +125,7 @@ class HumanReadableError(Exception): """ if self.tb: logger.debug(self.tb) - logger.error("{0}: {1}", self.error_kind, self.args[0]) + logger.error("{0.error_kind}: {0.args[0]}", self) class FilesystemError(HumanReadableError): @@ -136,18 +141,16 @@ class FilesystemError(HumanReadableError): def get_message(self): # Use a nicer English phrasing for some specific verbs. if self.verb in ("move", "copy", "rename"): - clause = "while {} {} to {}".format( - self._gerund(), - displayable_path(self.paths[0]), - displayable_path(self.paths[1]), + clause = ( + f"while {self._gerund()} {displayable_path(self.paths[0])} to" + f" {displayable_path(self.paths[1])}" ) elif self.verb in ("delete", "write", "create", "read"): - clause = "while {} {}".format( - self._gerund(), displayable_path(self.paths[0]) - ) + clause = f"while {self._gerund()} {displayable_path(self.paths[0])}" else: - clause = "during {} of paths {}".format( - self.verb, ", ".join(displayable_path(p) for p in self.paths) + clause = ( + f"during {self.verb} of paths" + f" {', '.join(displayable_path(p) for p in self.paths)}" ) return f"{self._reasonstr()} {clause}" @@ -164,6 +167,12 @@ class MoveOperation(Enum): REFLINK_AUTO = 5 +class PromptChoice(NamedTuple): + short: str + long: str + callback: Any + + def normpath(path: PathLike) -> bytes: """Provide the canonical form of the path suitable for storing in the database. @@ -217,12 +226,12 @@ def sorted_walk( # Get all the directories and files at this level. try: contents = os.listdir(syspath(bytes_path)) - except OSError as exc: + except OSError: if logger: logger.warning( - "could not list directory {}: {}".format( - displayable_path(bytes_path), exc.strerror - ) + "could not list directory {}", + displayable_path(bytes_path), + exc_info=True, ) return dirs = [] @@ -430,8 +439,8 @@ def syspath(path: PathLike, prefix: bool = True) -> str: if prefix and not str_path.startswith(WINDOWS_MAGIC_PREFIX): if str_path.startswith("\\\\"): # UNC path. Final path should look like \\?\UNC\... - str_path = "UNC" + str_path[1:] - str_path = WINDOWS_MAGIC_PREFIX + str_path + str_path = f"UNC{str_path[1:]}" + str_path = f"{WINDOWS_MAGIC_PREFIX}{str_path}" return str_path @@ -503,8 +512,8 @@ def move(path: bytes, dest: bytes, replace: bool = False): basename = os.path.basename(bytestring_path(dest)) dirname = os.path.dirname(bytestring_path(dest)) tmp = tempfile.NamedTemporaryFile( - suffix=syspath(b".beets", prefix=False), - prefix=syspath(b"." + basename + b".", prefix=False), + suffix=".beets", + prefix=f".{os.fsdecode(basename)}.", dir=syspath(dirname), delete=False, ) @@ -557,7 +566,7 @@ def link(path: bytes, dest: bytes, replace: bool = False): except NotImplementedError: # raised on python >= 3.2 and Windows versions before Vista raise FilesystemError( - "OS does not support symbolic links." "link", + "OS does not support symbolic links.link", (path, dest), traceback.format_exc(), ) @@ -573,20 +582,24 @@ def hardlink(path: bytes, dest: bytes, replace: bool = False): if samefile(path, dest): return - if os.path.exists(syspath(dest)) and not replace: + # Dereference symlinks, expand "~", and convert relative paths to absolute + origin_path = Path(os.fsdecode(path)).expanduser().resolve() + dest_path = Path(os.fsdecode(dest)).expanduser().resolve() + + if dest_path.exists() and not replace: raise FilesystemError("file exists", "rename", (path, dest)) try: - os.link(syspath(path), syspath(dest)) + dest_path.hardlink_to(origin_path) except NotImplementedError: raise FilesystemError( - "OS does not support hard links." "link", + "OS does not support hard links.link", (path, dest), traceback.format_exc(), ) except OSError as exc: if exc.errno == errno.EXDEV: raise FilesystemError( - "Cannot hard link across devices." "link", + "Cannot hard link across devices.link", (path, dest), traceback.format_exc(), ) @@ -694,105 +707,87 @@ def sanitize_path(path: str, replacements: Replacements | None = None) -> str: return os.path.join(*comps) -def truncate_path(path: AnyStr, length: int = MAX_FILENAME_LENGTH) -> AnyStr: - """Given a bytestring path or a Unicode path fragment, truncate the - components to a legal length. In the last component, the extension - is preserved. +def truncate_str(s: str, length: int) -> str: + """Truncate the string to the given byte length. + + If we end up truncating a unicode character in the middle (rendering it invalid), + it is removed: + + >>> s = "🎹🎶" # 8 bytes + >>> truncate_str(s, 6) + '🎹' """ - comps = components(path) + return os.fsencode(s)[:length].decode(sys.getfilesystemencoding(), "ignore") - out = [c[:length] for c in comps] - base, ext = os.path.splitext(comps[-1]) - if ext: - # Last component has an extension. - base = base[: length - len(ext)] - out[-1] = base + ext - return os.path.join(*out) +def truncate_path(str_path: str) -> str: + """Truncate each path part to a legal length preserving the extension.""" + max_length = get_max_filename_length() + path = Path(str_path) + parent_parts = [truncate_str(p, max_length) for p in path.parts[:-1]] + stem = truncate_str(path.stem, max_length - len(path.suffix)) + return f"{Path(*parent_parts, stem)}{path.suffix}" def _legalize_stage( - path: str, - replacements: Replacements | None, - length: int, - extension: str, - fragment: bool, -) -> tuple[BytesOrStr, bool]: + path: str, replacements: Replacements | None, extension: str +) -> tuple[str, bool]: """Perform a single round of path legalization steps - (sanitation/replacement, encoding from Unicode to bytes, - extension-appending, and truncation). Return the path (Unicode if - `fragment` is set, `bytes` otherwise) and whether truncation was - required. + 1. sanitation/replacement + 2. appending the extension + 3. truncation. + + Return the path and whether truncation was required. """ # Perform an initial sanitization including user replacements. path = sanitize_path(path, replacements) - # Encode for the filesystem. - if not fragment: - path = bytestring_path(path) # type: ignore - # Preserve extension. path += extension.lower() # Truncate too-long components. pre_truncate_path = path - path = truncate_path(path, length) + path = truncate_path(path) return path, path != pre_truncate_path def legalize_path( - path: str, - replacements: Replacements | None, - length: int, - extension: bytes, - fragment: bool, -) -> tuple[BytesOrStr, bool]: - """Given a path-like Unicode string, produce a legal path. Return - the path and a flag indicating whether some replacements had to be - ignored (see below). + path: str, replacements: Replacements | None, extension: str +) -> tuple[str, bool]: + """Given a path-like Unicode string, produce a legal path. Return the path + and a flag indicating whether some replacements had to be ignored (see + below). - The legalization process (see `_legalize_stage`) consists of - applying the sanitation rules in `replacements`, encoding the string - to bytes (unless `fragment` is set), truncating components to - `length`, appending the `extension`. + This function uses `_legalize_stage` function to legalize the path, see its + documentation for the details of what this involves. It is called up to + three times in case truncation conflicts with replacements (as can happen + when truncation creates whitespace at the end of the string, for example). - This function performs up to three calls to `_legalize_stage` in - case truncation conflicts with replacements (as can happen when - truncation creates whitespace at the end of the string, for - example). The limited number of iterations iterations avoids the - possibility of an infinite loop of sanitation and truncation - operations, which could be caused by replacement rules that make the - string longer. The flag returned from this function indicates that - the path has to be truncated twice (indicating that replacements - made the string longer again after it was truncated); the - application should probably log some sort of warning. + The limited number of iterations avoids the possibility of an infinite loop + of sanitation and truncation operations, which could be caused by + replacement rules that make the string longer. + + The flag returned from this function indicates that the path has to be + truncated twice (indicating that replacements made the string longer again + after it was truncated); the application should probably log some sort of + warning. """ + suffix = as_string(extension) - if fragment: - # Outputting Unicode. - extension = extension.decode("utf-8", "ignore") - - first_stage_path, _ = _legalize_stage( - path, replacements, length, extension, fragment + first_stage, _ = os.path.splitext( + _legalize_stage(path, replacements, suffix)[0] ) - # Convert back to Unicode with extension removed. - first_stage_path, _ = os.path.splitext(displayable_path(first_stage_path)) - # Re-sanitize following truncation (including user replacements). - second_stage_path, retruncated = _legalize_stage( - first_stage_path, replacements, length, extension, fragment - ) + second_stage, truncated = _legalize_stage(first_stage, replacements, suffix) - # If the path was once again truncated, discard user replacements + if not truncated: + return second_stage, False + + # If the path was truncated, discard user replacements # and run through one last legalization stage. - if retruncated: - second_stage_path, _ = _legalize_stage( - first_stage_path, None, length, extension, fragment - ) - - return second_stage_path, retruncated + return _legalize_stage(first_stage, None, suffix)[0], True def str2bool(value: str) -> bool: @@ -814,7 +809,7 @@ def as_string(value: Any) -> str: return str(value) -def plurality(objs: Sequence[T]) -> tuple[T, int]: +def plurality(objs: Iterable[T]) -> tuple[T, int]: """Given a sequence of hashble objects, returns the object that is most common in the set and the its number of appearance. The sequence must contain at least one object. @@ -825,13 +820,54 @@ def plurality(objs: Sequence[T]) -> tuple[T, int]: return c.most_common(1)[0] +def get_most_common_tags( + items: Sequence[Item], +) -> tuple[dict[str, Any], dict[str, Any]]: + """Extract the likely current metadata for an album given a list of its + items. Return two dictionaries: + - The most common value for each field. + - Whether each field's value was unanimous (values are booleans). + """ + assert items # Must be nonempty. + + likelies = {} + consensus = {} + fields = [ + "artist", + "album", + "albumartist", + "year", + "disctotal", + "mb_albumid", + "label", + "barcode", + "catalognum", + "country", + "media", + "albumdisambig", + "data_source", + ] + for field in fields: + values = [item.get(field) for item in items if item] + likelies[field], freq = plurality(values) + consensus[field] = freq == len(values) + + # If there's an album artist consensus, use this for the artist. + if consensus["albumartist"] and likelies["albumartist"]: + likelies["artist"] = likelies["albumartist"] + + return likelies, consensus + + # stdout and stderr as bytes class CommandOutput(NamedTuple): stdout: bytes stderr: bytes -def command_output(cmd: list[BytesOrStr], shell: bool = False) -> CommandOutput: +def command_output( + cmd: list[str] | list[bytes], shell: bool = False +) -> CommandOutput: """Runs the command and returns its output after it has exited. Returns a CommandOutput. The attributes ``stdout`` and ``stderr`` contain @@ -849,8 +885,6 @@ def command_output(cmd: list[BytesOrStr], shell: bool = False) -> CommandOutput: This replaces `subprocess.check_output` which can have problems if lots of output is sent to stderr. """ - converted_cmd = [os.fsdecode(a) for a in cmd] - devnull = subprocess.DEVNULL proc = subprocess.Popen( @@ -865,22 +899,27 @@ def command_output(cmd: list[BytesOrStr], shell: bool = False) -> CommandOutput: if proc.returncode: raise subprocess.CalledProcessError( returncode=proc.returncode, - cmd=" ".join(converted_cmd), + cmd=" ".join(map(os.fsdecode, cmd)), output=stdout + stderr, ) return CommandOutput(stdout, stderr) -def max_filename_length(path: BytesOrStr, limit=MAX_FILENAME_LENGTH) -> int: +@cache +def get_max_filename_length() -> int: """Attempt to determine the maximum filename length for the filesystem containing `path`. If the value is greater than `limit`, then `limit` is used instead (to prevent errors when a filesystem misreports its capacity). If it cannot be determined (e.g., on Windows), return `limit`. """ + if length := beets.config["max_filename_length"].get(int): + return length + + limit = MAX_FILENAME_LENGTH if hasattr(os, "statvfs"): try: - res = os.statvfs(path) + res = os.statvfs(beets.config["directory"].as_str()) except OSError: return limit return min(res[9], limit) @@ -985,19 +1024,6 @@ def case_sensitive(path: bytes) -> bool: return not os.path.samefile(lower_sys, upper_sys) -def raw_seconds_short(string: str) -> float: - """Formats a human-readable M:SS string as a float (number of seconds). - - Raises ValueError if the conversion cannot take place due to `string` not - being in the right format. - """ - match = re.match(r"^(\d+):([0-5]\d)$", string) - if not match: - raise ValueError("String not in M:SS format") - minutes, seconds = map(int, match.groups()) - return float(minutes * 60 + seconds) - - def asciify_path(path: str, sep_replace: str) -> str: """Decodes all unicode characters in a path into ASCII equivalents. @@ -1035,21 +1061,94 @@ def par_map(transform: Callable[[T], Any], items: Sequence[T]) -> None: pool.join() -class cached_classproperty: - """A decorator implementing a read-only property that is *lazy* in - the sense that the getter is only invoked once. Subsequent accesses - through *any* instance use the cached result. +class cached_classproperty(Generic[T]): + """Descriptor implementing cached class properties. + + Provides class-level dynamic property behavior where the getter function is + called once per class and the result is cached for subsequent access. Unlike + instance properties, this operates on the class rather than instances. """ - def __init__(self, getter): - self.getter = getter - self.cache = {} + cache: ClassVar[dict[tuple[type[object], str], object]] = {} - def __get__(self, instance, owner): - if owner not in self.cache: - self.cache[owner] = self.getter(owner) + name: str = "" - return self.cache[owner] + # Ideally, we would like to use `Callable[[type[T]], Any]` here, + # however, `mypy` is unable to see this as a **class** property, and thinks + # that this callable receives an **instance** of the object, failing the + # type check, for example: + # >>> class Album: + # >>> @cached_classproperty + # >>> def foo(cls): + # >>> reveal_type(cls) # mypy: revealed type is "Album" + # >>> return cls.bar + # + # Argument 1 to "cached_classproperty" has incompatible type + # "Callable[[Album], ...]"; expected "Callable[[type[Album]], ...]" + # + # Therefore, we just use `Any` here, which is not ideal, but works. + def __init__(self, getter: Callable[..., T]) -> None: + """Initialize the descriptor with the property getter function.""" + self.getter: Callable[..., T] = getter + + def __set_name__(self, owner: object, name: str) -> None: + """Capture the attribute name this descriptor is assigned to.""" + self.name = name + + def __get__(self, instance: object, owner: type[object]) -> T: + """Compute and cache if needed, and return the property value.""" + key: tuple[type[object], str] = owner, self.name + if key not in self.cache: + self.cache[key] = self.getter(owner) + + return cast(T, self.cache[key]) + + +class LazySharedInstance(Generic[T]): + """A descriptor that provides access to a lazily-created shared instance of + the containing class, while calling the class constructor to construct a + new object works as usual. + + ``` + ID: int = 0 + + class Foo: + def __init__(): + global ID + + self.id = ID + ID += 1 + + def func(self): + print(self.id) + + shared: LazySharedInstance[Foo] = LazySharedInstance() + + a0 = Foo() + a1 = Foo.shared + a2 = Foo() + a3 = Foo.shared + + a0.func() # 0 + a1.func() # 1 + a2.func() # 2 + a3.func() # 1 + ``` + """ + + _instance: T | None = None + + def __get__(self, instance: T | None, owner: type[T]) -> T: + if instance is not None: + raise RuntimeError( + "shared instances must be obtained from the class property, " + "not an instance" + ) + + if self._instance is None: + self._instance = owner() + + return self._instance def get_module_tempdir(module: str) -> Path: diff --git a/beets/util/artresizer.py b/beets/util/artresizer.py index ffbc2edba..72007d0b5 100644 --- a/beets/util/artresizer.py +++ b/beets/util/artresizer.py @@ -16,23 +16,36 @@ public resizing proxy if neither is available. """ +from __future__ import annotations + import os import os.path import platform import re import subprocess +from abc import ABC, abstractmethod +from enum import Enum from itertools import chain +from typing import TYPE_CHECKING, Any, ClassVar from urllib.parse import urlencode from beets import logging, util -from beets.util import displayable_path, get_temp_filename, syspath +from beets.util import ( + LazySharedInstance, + displayable_path, + get_temp_filename, + syspath, +) + +if TYPE_CHECKING: + from collections.abc import Mapping PROXY_URL = "https://images.weserv.nl/" log = logging.getLogger("beets") -def resize_url(url, maxwidth, quality=0): +def resize_url(url: str, maxwidth: int, quality: int = 0) -> str: """Return a proxied image URL that resizes the original image to maxwidth (preserving aspect ratio). """ @@ -44,25 +57,125 @@ def resize_url(url, maxwidth, quality=0): if quality > 0: params["q"] = quality - return "{}?{}".format(PROXY_URL, urlencode(params)) + return f"{PROXY_URL}?{urlencode(params)}" class LocalBackendNotAvailableError(Exception): pass -_NOT_AVAILABLE = object() +# Singleton pattern that the typechecker understands: +# https://peps.python.org/pep-0484/#support-for-singleton-types-in-unions +class NotAvailable(Enum): + token = 0 -class LocalBackend: +_NOT_AVAILABLE = NotAvailable.token + + +class LocalBackend(ABC): + NAME: ClassVar[str] + @classmethod - def available(cls): + @abstractmethod + def version(cls) -> Any: + """Return the backend version if its dependencies are satisfied or + raise `LocalBackendNotAvailableError`. + """ + pass + + @classmethod + def available(cls) -> bool: + """Return `True` this backend's dependencies are satisfied and it can + be used, `False` otherwise.""" try: cls.version() return True except LocalBackendNotAvailableError: return False + @abstractmethod + def resize( + self, + maxwidth: int, + path_in: bytes, + path_out: bytes | None = None, + quality: int = 0, + max_filesize: int = 0, + ) -> bytes: + """Resize an image to the given width and return the output path. + + On error, logs a warning and returns `path_in`. + """ + pass + + @abstractmethod + def get_size(self, path_in: bytes) -> tuple[int, int] | None: + """Return the (width, height) of the image or None if unavailable.""" + pass + + @abstractmethod + def deinterlace( + self, + path_in: bytes, + path_out: bytes | None = None, + ) -> bytes: + """Remove interlacing from an image and return the output path. + + On error, logs a warning and returns `path_in`. + """ + pass + + @abstractmethod + def get_format(self, path_in: bytes) -> str | None: + """Return the image format (e.g., 'PNG') or None if undetectable.""" + pass + + @abstractmethod + def convert_format( + self, + source: bytes, + target: bytes, + deinterlaced: bool, + ) -> bytes: + """Convert an image to a new format and return the new file path. + + On error, logs a warning and returns `source`. + """ + pass + + @property + def can_compare(self) -> bool: + """Indicate whether image comparison is supported by this backend.""" + return False + + def compare( + self, + im1: bytes, + im2: bytes, + compare_threshold: float, + ) -> bool | None: + """Compare two images and return `True` if they are similar enough, or + `None` if there is an error. + + This must only be called if `self.can_compare()` returns `True`. + """ + # It is an error to call this when ArtResizer.can_compare is not True. + raise NotImplementedError() + + @property + def can_write_metadata(self) -> bool: + """Indicate whether writing metadata to images is supported.""" + return False + + def write_metadata(self, file: bytes, metadata: Mapping[str, str]) -> None: + """Write key-value metadata into the image file. + + This must only be called if `self.can_write_metadata()` returns `True`. + """ + # It is an error to call this when ArtResizer.can_write_metadata is not True. + raise NotImplementedError() + class IMBackend(LocalBackend): NAME = "ImageMagick" @@ -70,11 +183,11 @@ class IMBackend(LocalBackend): # These fields are used as a cache for `version()`. `_legacy` indicates # whether the modern `magick` binary is available or whether to fall back # to the old-style `convert`, `identify`, etc. commands. - _version = None - _legacy = None + _version: tuple[int, int, int] | NotAvailable | None = None + _legacy: bool | None = None @classmethod - def version(cls): + def version(cls) -> tuple[int, int, int]: """Obtain and cache ImageMagick version. Raises `LocalBackendNotAvailableError` if not available. @@ -98,12 +211,17 @@ class IMBackend(LocalBackend): ) cls._legacy = legacy - if cls._version is _NOT_AVAILABLE: + # cls._version is never None here, but mypy doesn't get that + if cls._version is _NOT_AVAILABLE or cls._version is None: raise LocalBackendNotAvailableError() else: return cls._version - def __init__(self): + convert_cmd: list[str] + identify_cmd: list[str] + compare_cmd: list[str] + + def __init__(self) -> None: """Initialize a wrapper around ImageMagick for local image operations. Stores the ImageMagick version and legacy flag. If ImageMagick is not @@ -124,8 +242,13 @@ class IMBackend(LocalBackend): self.compare_cmd = ["magick", "compare"] def resize( - self, maxwidth, path_in, path_out=None, quality=0, max_filesize=0 - ): + self, + maxwidth: int, + path_in: bytes, + path_out: bytes | None = None, + quality: int = 0, + max_filesize: int = 0, + ) -> bytes: """Resize using ImageMagick. Use the ``magick`` program or ``convert`` on older versions. Return @@ -135,7 +258,7 @@ class IMBackend(LocalBackend): path_out = get_temp_filename(__name__, "resize_IM_", path_in) log.debug( - "artresizer: ImageMagick resizing {0} to {1}", + "artresizer: ImageMagick resizing {} to {}", displayable_path(path_in), displayable_path(path_out), ) @@ -145,7 +268,7 @@ class IMBackend(LocalBackend): # with regards to the height. # ImageMagick already seems to default to no interlace, but we include # it here for the sake of explicitness. - cmd = self.convert_cmd + [ + cmd: list[str] = self.convert_cmd + [ syspath(path_in, prefix=False), "-resize", f"{maxwidth}x>", @@ -167,15 +290,15 @@ class IMBackend(LocalBackend): util.command_output(cmd) except subprocess.CalledProcessError: log.warning( - "artresizer: IM convert failed for {0}", + "artresizer: IM convert failed for {}", displayable_path(path_in), ) return path_in return path_out - def get_size(self, path_in): - cmd = self.identify_cmd + [ + def get_size(self, path_in: bytes) -> tuple[int, int] | None: + cmd: list[str] = self.identify_cmd + [ "-format", "%w %h", syspath(path_in, prefix=False), @@ -186,20 +309,30 @@ class IMBackend(LocalBackend): except subprocess.CalledProcessError as exc: log.warning("ImageMagick size query failed") log.debug( - "`convert` exited with (status {}) when " + "`convert` exited with (status {.returncode}) when " "getting size with command {}:\n{}", - exc.returncode, + exc, cmd, exc.output.strip(), ) return None try: - return tuple(map(int, out.split(b" "))) + size = tuple(map(int, out.split(b" "))) except IndexError: log.warning("Could not understand IM output: {0!r}", out) return None - def deinterlace(self, path_in, path_out=None): + if len(size) != 2: + log.warning("Could not understand IM output: {0!r}", out) + return None + + return size + + def deinterlace( + self, + path_in: bytes, + path_out: bytes | None = None, + ) -> bytes: if not path_out: path_out = get_temp_filename(__name__, "deinterlace_IM_", path_in) @@ -217,16 +350,24 @@ class IMBackend(LocalBackend): # FIXME: Should probably issue a warning? return path_in - def get_format(self, filepath): - cmd = self.identify_cmd + ["-format", "%[magick]", syspath(filepath)] + def get_format(self, path_in: bytes) -> str | None: + cmd = self.identify_cmd + ["-format", "%[magick]", syspath(path_in)] try: - return util.command_output(cmd).stdout - except subprocess.CalledProcessError: + # Image formats should really only be ASCII strings such as "PNG", + # if anything else is returned, something is off and we return + # None for safety. + return util.command_output(cmd).stdout.decode("ascii", "strict") + except (subprocess.CalledProcessError, UnicodeError): # FIXME: Should probably issue a warning? return None - def convert_format(self, source, target, deinterlaced): + def convert_format( + self, + source: bytes, + target: bytes, + deinterlaced: bool, + ) -> bytes: cmd = self.convert_cmd + [ syspath(source), *(["-interlace", "none"] if deinterlaced else []), @@ -243,10 +384,15 @@ class IMBackend(LocalBackend): return source @property - def can_compare(self): + def can_compare(self) -> bool: return self.version() > (6, 8, 7) - def compare(self, im1, im2, compare_threshold): + def compare( + self, + im1: bytes, + im2: bytes, + compare_threshold: float, + ) -> bool | None: is_windows = platform.system() == "Windows" # Converting images to grayscale tends to minimize the weight @@ -286,6 +432,10 @@ class IMBackend(LocalBackend): close_fds=not is_windows, ) + # help out mypy + assert convert_proc.stdout is not None + assert convert_proc.stderr is not None + # Check the convert output. We're not interested in the # standard output; that gets piped to the next stage. convert_proc.stdout.close() @@ -294,8 +444,8 @@ class IMBackend(LocalBackend): convert_proc.wait() if convert_proc.returncode: log.debug( - "ImageMagick convert failed with status {}: {!r}", - convert_proc.returncode, + "ImageMagick convert failed with status {.returncode}: {!r}", + convert_proc, convert_stderr, ) return None @@ -305,7 +455,7 @@ class IMBackend(LocalBackend): if compare_proc.returncode: if compare_proc.returncode != 1: log.debug( - "ImageMagick compare failed: {0}, {1}", + "ImageMagick compare failed: {}, {}", displayable_path(im2), displayable_path(im1), ) @@ -325,18 +475,19 @@ class IMBackend(LocalBackend): log.debug("IM output is not a number: {0!r}", out_str) return None - log.debug("ImageMagick compare score: {0}", phash_diff) + log.debug("ImageMagick compare score: {}", phash_diff) return phash_diff <= compare_threshold @property - def can_write_metadata(self): + def can_write_metadata(self) -> bool: return True - def write_metadata(self, file, metadata): - assignments = list( - chain.from_iterable(("-set", k, v) for k, v in metadata.items()) + def write_metadata(self, file: bytes, metadata: Mapping[str, str]) -> None: + assignments = chain.from_iterable( + ("-set", k, v) for k, v in metadata.items() ) - command = self.convert_cmd + [file, *assignments, file] + str_file = os.fsdecode(file) + command = self.convert_cmd + [str_file, *assignments, str_file] util.command_output(command) @@ -345,13 +496,13 @@ class PILBackend(LocalBackend): NAME = "PIL" @classmethod - def version(cls): + def version(cls) -> None: try: __import__("PIL", fromlist=["Image"]) except ImportError: raise LocalBackendNotAvailableError() - def __init__(self): + def __init__(self) -> None: """Initialize a wrapper around PIL for local image operations. If PIL is not available, raise an Exception. @@ -359,8 +510,13 @@ class PILBackend(LocalBackend): self.version() def resize( - self, maxwidth, path_in, path_out=None, quality=0, max_filesize=0 - ): + self, + maxwidth: int, + path_in: bytes, + path_out: bytes | None = None, + quality: int = 0, + max_filesize: int = 0, + ) -> bytes: """Resize using Python Imaging Library (PIL). Return the output path of resized image. """ @@ -370,7 +526,7 @@ class PILBackend(LocalBackend): from PIL import Image log.debug( - "artresizer: PIL resizing {0} to {1}", + "artresizer: PIL resizing {} to {}", displayable_path(path_in), displayable_path(path_out), ) @@ -399,7 +555,7 @@ class PILBackend(LocalBackend): for i in range(5): # 5 attempts is an arbitrary choice filesize = os.stat(syspath(path_out)).st_size - log.debug("PIL Pass {0} : Output size: {1}B", i, filesize) + log.debug("PIL Pass {} : Output size: {}B", i, filesize) if filesize <= max_filesize: return path_out # The relationship between filesize & quality will be @@ -416,7 +572,7 @@ class PILBackend(LocalBackend): progressive=False, ) log.warning( - "PIL Failed to resize file to below {0}B", max_filesize + "PIL Failed to resize file to below {}B", max_filesize ) return path_out @@ -424,12 +580,12 @@ class PILBackend(LocalBackend): return path_out except OSError: log.error( - "PIL cannot create thumbnail for '{0}'", + "PIL cannot create thumbnail for '{}'", displayable_path(path_in), ) return path_in - def get_size(self, path_in): + def get_size(self, path_in: bytes) -> tuple[int, int] | None: from PIL import Image try: @@ -441,7 +597,11 @@ class PILBackend(LocalBackend): ) return None - def deinterlace(self, path_in, path_out=None): + def deinterlace( + self, + path_in: bytes, + path_out: bytes | None = None, + ) -> bytes: if not path_out: path_out = get_temp_filename(__name__, "deinterlace_PIL_", path_in) @@ -455,11 +615,11 @@ class PILBackend(LocalBackend): # FIXME: Should probably issue a warning? return path_in - def get_format(self, filepath): + def get_format(self, path_in: bytes) -> str | None: from PIL import Image, UnidentifiedImageError try: - with Image.open(syspath(filepath)) as im: + with Image.open(syspath(path_in)) as im: return im.format except ( ValueError, @@ -467,10 +627,15 @@ class PILBackend(LocalBackend): UnidentifiedImageError, FileNotFoundError, ): - log.exception("failed to detect image format for {}", filepath) + log.exception("failed to detect image format for {}", path_in) return None - def convert_format(self, source, target, deinterlaced): + def convert_format( + self, + source: bytes, + target: bytes, + deinterlaced: bool, + ) -> bytes: from PIL import Image, UnidentifiedImageError try: @@ -488,18 +653,23 @@ class PILBackend(LocalBackend): return source @property - def can_compare(self): + def can_compare(self) -> bool: return False - def compare(self, im1, im2, compare_threshold): + def compare( + self, + im1: bytes, + im2: bytes, + compare_threshold: float, + ) -> bool | None: # It is an error to call this when ArtResizer.can_compare is not True. raise NotImplementedError() @property - def can_write_metadata(self): + def can_write_metadata(self) -> bool: return True - def write_metadata(self, file, metadata): + def write_metadata(self, file: bytes, metadata: Mapping[str, str]) -> None: from PIL import Image, PngImagePlugin # FIXME: Detect and handle other file types (currently, the only user @@ -507,68 +677,68 @@ class PILBackend(LocalBackend): im = Image.open(syspath(file)) meta = PngImagePlugin.PngInfo() for k, v in metadata.items(): - meta.add_text(k, v, 0) + meta.add_text(k, v, zip=False) im.save(os.fsdecode(file), "PNG", pnginfo=meta) -class Shareable(type): - """A pseudo-singleton metaclass that allows both shared and - non-shared instances. The ``MyClass.shared`` property holds a - lazily-created shared instance of ``MyClass`` while calling - ``MyClass()`` to construct a new object works as usual. - """ - - def __init__(cls, name, bases, dict): - super().__init__(name, bases, dict) - cls._instance = None - - @property - def shared(cls): - if cls._instance is None: - cls._instance = cls() - return cls._instance - - -BACKEND_CLASSES = [ +BACKEND_CLASSES: list[type[LocalBackend]] = [ IMBackend, PILBackend, ] -class ArtResizer(metaclass=Shareable): - """A singleton class that performs image resizes.""" +class ArtResizer: + """A class that dispatches image operations to an available backend.""" - def __init__(self): + local_method: LocalBackend | None + + def __init__(self) -> None: """Create a resizer object with an inferred method.""" # Check if a local backend is available, and store an instance of the # backend class. Otherwise, fallback to the web proxy. for backend_cls in BACKEND_CLASSES: try: self.local_method = backend_cls() - log.debug(f"artresizer: method is {self.local_method.NAME}") + log.debug("artresizer: method is {.local_method.NAME}", self) break except LocalBackendNotAvailableError: continue else: + # FIXME: Turn WEBPROXY into a backend class as well to remove all + # the special casing. Then simply delegate all methods to the + # backends. (How does proxy_url fit in here, however?) + # Use an ABC (or maybe a typing Protocol?) for backend + # methods, such that both individual backends as well as + # ArtResizer implement it. + # It should probably be configurable which backends classes to + # consider, similar to fetchart or lyrics backends (i.e. a list + # of backends sorted by priority). log.debug("artresizer: method is WEBPROXY") self.local_method = None + shared: LazySharedInstance[ArtResizer] = LazySharedInstance() + @property - def method(self): - if self.local: + def method(self) -> str: + if self.local_method is not None: return self.local_method.NAME else: return "WEBPROXY" def resize( - self, maxwidth, path_in, path_out=None, quality=0, max_filesize=0 - ): + self, + maxwidth: int, + path_in: bytes, + path_out: bytes | None = None, + quality: int = 0, + max_filesize: int = 0, + ) -> bytes: """Manipulate an image file according to the method, returning a new path. For PIL or IMAGEMAGIC methods, resizes the image to a temporary file and encodes with the specified quality level. For WEBPROXY, returns `path_in` unmodified. """ - if self.local: + if self.local_method is not None: return self.local_method.resize( maxwidth, path_in, @@ -580,18 +750,22 @@ class ArtResizer(metaclass=Shareable): # Handled by `proxy_url` already. return path_in - def deinterlace(self, path_in, path_out=None): + def deinterlace( + self, + path_in: bytes, + path_out: bytes | None = None, + ) -> bytes: """Deinterlace an image. Only available locally. """ - if self.local: + if self.local_method is not None: return self.local_method.deinterlace(path_in, path_out) else: # FIXME: Should probably issue a warning? return path_in - def proxy_url(self, maxwidth, url, quality=0): + def proxy_url(self, maxwidth: int, url: str, quality: int = 0) -> str: """Modifies an image URL according the method, returning a new URL. For WEBPROXY, a URL on the proxy server is returned. Otherwise, the URL is returned unmodified. @@ -603,42 +777,48 @@ class ArtResizer(metaclass=Shareable): return resize_url(url, maxwidth, quality) @property - def local(self): + def local(self) -> bool: """A boolean indicating whether the resizing method is performed locally (i.e., PIL or ImageMagick). """ return self.local_method is not None - def get_size(self, path_in): + def get_size(self, path_in: bytes) -> tuple[int, int] | None: """Return the size of an image file as an int couple (width, height) in pixels. Only available locally. """ - if self.local: + if self.local_method is not None: return self.local_method.get_size(path_in) else: - # FIXME: Should probably issue a warning? - return path_in + raise RuntimeError( + "image cannot be obtained without artresizer backend" + ) - def get_format(self, path_in): + def get_format(self, path_in: bytes) -> str | None: """Returns the format of the image as a string. Only available locally. """ - if self.local: + if self.local_method is not None: return self.local_method.get_format(path_in) else: # FIXME: Should probably issue a warning? return None - def reformat(self, path_in, new_format, deinterlaced=True): + def reformat( + self, + path_in: bytes, + new_format: str, + deinterlaced: bool = True, + ) -> bytes: """Converts image to desired format, updating its extension, but keeping the same filename. Only available locally. """ - if not self.local: + if self.local_method is None: # FIXME: Should probably issue a warning? return path_in @@ -664,40 +844,45 @@ class ArtResizer(metaclass=Shareable): return result_path @property - def can_compare(self): + def can_compare(self) -> bool: """A boolean indicating whether image comparison is available""" - if self.local: + if self.local_method is not None: return self.local_method.can_compare else: return False - def compare(self, im1, im2, compare_threshold): + def compare( + self, + im1: bytes, + im2: bytes, + compare_threshold: float, + ) -> bool | None: """Return a boolean indicating whether two images are similar. Only available locally. """ - if self.local: + if self.local_method is not None: return self.local_method.compare(im1, im2, compare_threshold) else: # FIXME: Should probably issue a warning? return None @property - def can_write_metadata(self): + def can_write_metadata(self) -> bool: """A boolean indicating whether writing image metadata is supported.""" - if self.local: + if self.local_method is not None: return self.local_method.can_write_metadata else: return False - def write_metadata(self, file, metadata): + def write_metadata(self, file: bytes, metadata: Mapping[str, str]) -> None: """Write key-value metadata to the image file. Only available locally. Currently, expects the image to be a PNG file. """ - if self.local: + if self.local_method is not None: self.local_method.write_metadata(file, metadata) else: # FIXME: Should probably issue a warning? diff --git a/beets/util/bluelet.py b/beets/util/bluelet.py index b81b389e0..3f3a88b1e 100644 --- a/beets/util/bluelet.py +++ b/beets/util/bluelet.py @@ -559,7 +559,7 @@ def spawn(coro): and child coroutines run concurrently. """ if not isinstance(coro, types.GeneratorType): - raise ValueError("%s is not a coroutine" % coro) + raise ValueError(f"{coro} is not a coroutine") return SpawnEvent(coro) @@ -569,7 +569,7 @@ def call(coro): returns a value using end(), then this event returns that value. """ if not isinstance(coro, types.GeneratorType): - raise ValueError("%s is not a coroutine" % coro) + raise ValueError(f"{coro} is not a coroutine") return DelegationEvent(coro) diff --git a/beets/util/config.py b/beets/util/config.py new file mode 100644 index 000000000..218a9d133 --- /dev/null +++ b/beets/util/config.py @@ -0,0 +1,66 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import Collection, Sequence + + +def sanitize_choices( + choices: Sequence[str], choices_all: Collection[str] +) -> list[str]: + """Clean up a stringlist configuration attribute: keep only choices + elements present in choices_all, remove duplicate elements, expand '*' + wildcard while keeping original stringlist order. + """ + seen: set[str] = set() + others = [x for x in choices_all if x not in choices] + res: list[str] = [] + for s in choices: + if s not in seen: + if s in list(choices_all): + res.append(s) + elif s == "*": + res.extend(others) + seen.add(s) + return res + + +def sanitize_pairs( + pairs: Sequence[tuple[str, str]], pairs_all: Sequence[tuple[str, str]] +) -> list[tuple[str, str]]: + """Clean up a single-element mapping configuration attribute as returned + by Confuse's `Pairs` template: keep only two-element tuples present in + pairs_all, remove duplicate elements, expand ('str', '*') and ('*', '*') + wildcards while keeping the original order. Note that ('*', '*') and + ('*', 'whatever') have the same effect. + + For example, + + >>> sanitize_pairs( + ... [('foo', 'baz bar'), ('key', '*'), ('*', '*')], + ... [('foo', 'bar'), ('foo', 'baz'), ('foo', 'foobar'), + ... ('key', 'value')] + ... ) + [('foo', 'baz'), ('foo', 'bar'), ('key', 'value'), ('foo', 'foobar')] + """ + pairs_all = list(pairs_all) + seen: set[tuple[str, str]] = set() + others = [x for x in pairs_all if x not in pairs] + res: list[tuple[str, str]] = [] + for k, values in pairs: + for v in values.split(): + x = (k, v) + if x in pairs_all: + if x not in seen: + seen.add(x) + res.append(x) + elif k == "*": + new = [o for o in others if o not in seen] + seen.update(new) + res.extend(new) + elif v == "*": + new = [o for o in others if o not in seen and o[0] == k] + seen.update(new) + res.extend(new) + return res diff --git a/beets/util/deprecation.py b/beets/util/deprecation.py new file mode 100644 index 000000000..b9ffeae82 --- /dev/null +++ b/beets/util/deprecation.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +import warnings +from importlib import import_module +from typing import TYPE_CHECKING, Any + +from packaging.version import Version + +import beets + +if TYPE_CHECKING: + from logging import Logger + + +def _format_message(old: str, new: str | None = None) -> str: + next_major = f"{Version(beets.__version__).major + 1}.0.0" + msg = f"{old} is deprecated and will be removed in version {next_major}." + if new: + msg += f" Use {new} instead." + + return msg + + +def deprecate_for_user( + logger: Logger, old: str, new: str | None = None +) -> None: + logger.warning(_format_message(old, new)) + + +def deprecate_for_maintainers( + old: str, new: str | None = None, stacklevel: int = 1 +) -> None: + """Issue a deprecation warning visible to maintainers during development. + + Emits a DeprecationWarning that alerts developers about deprecated code + patterns. Unlike user-facing warnings, these are primarily for internal + code maintenance and appear during test runs or with warnings enabled. + """ + warnings.warn( + _format_message(old, new), DeprecationWarning, stacklevel=stacklevel + 1 + ) + + +def deprecate_imports( + old_module: str, new_module_by_name: dict[str, str], name: str +) -> Any: + """Handle deprecated module imports by redirecting to new locations. + + Facilitates gradual migration of module structure by intercepting import + attempts for relocated functionality. Issues deprecation warnings while + transparently providing access to the moved implementation, allowing + existing code to continue working during transition periods. + """ + if new_module := new_module_by_name.get(name): + deprecate_for_maintainers( + f"'{old_module}.{name}'", f"'{new_module}.{name}'", stacklevel=2 + ) + + return getattr(import_module(new_module), name) + raise AttributeError(f"module '{old_module}' has no attribute '{name}'") diff --git a/beets/util/functemplate.py b/beets/util/functemplate.py index b0daefac2..739196cef 100644 --- a/beets/util/functemplate.py +++ b/beets/util/functemplate.py @@ -105,8 +105,6 @@ def compile_func(arg_names, statements, name="_the_func", debug=False): decorator_list=[], ) - # The ast.Module signature changed in 3.8 to accept a list of types to - # ignore. mod = ast.Module([func_def], []) ast.fix_missing_locations(mod) @@ -136,7 +134,7 @@ class Symbol: self.original = original def __repr__(self): - return "Symbol(%s)" % repr(self.ident) + return f"Symbol({self.ident!r})" def evaluate(self, env): """Evaluate the symbol in the environment, returning a Unicode @@ -152,7 +150,7 @@ class Symbol: def translate(self): """Compile the variable lookup.""" ident = self.ident - expr = ex_rvalue(VARIABLE_PREFIX + ident) + expr = ex_rvalue(f"{VARIABLE_PREFIX}{ident}") return [expr], {ident}, set() @@ -165,9 +163,7 @@ class Call: self.original = original def __repr__(self): - return "Call({}, {}, {})".format( - repr(self.ident), repr(self.args), repr(self.original) - ) + return f"Call({self.ident!r}, {self.args!r}, {self.original!r})" def evaluate(self, env): """Evaluate the function call in the environment, returning a @@ -180,7 +176,7 @@ class Call: except Exception as exc: # Function raised exception! Maybe inlining the name of # the exception will help debug. - return "<%s>" % str(exc) + return f"<{exc}>" return str(out) else: return self.original @@ -213,7 +209,7 @@ class Call: ) ) - subexpr_call = ex_call(FUNCTION_PREFIX + self.ident, arg_exprs) + subexpr_call = ex_call(f"{FUNCTION_PREFIX}{self.ident}", arg_exprs) return [subexpr_call], varnames, funcnames @@ -226,7 +222,7 @@ class Expression: self.parts = parts def __repr__(self): - return "Expression(%s)" % (repr(self.parts)) + return f"Expression({self.parts!r})" def evaluate(self, env): """Evaluate the entire expression in the environment, returning @@ -298,9 +294,6 @@ class Parser: GROUP_CLOSE, ESCAPE_CHAR, ) - special_char_re = re.compile( - r"[%s]|\Z" % "".join(re.escape(c) for c in special_chars) - ) escapable_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP) terminator_chars = (GROUP_CLOSE,) @@ -312,24 +305,18 @@ class Parser: """ # Append comma (ARG_SEP) to the list of special characters only when # parsing function arguments. - extra_special_chars = () - special_char_re = self.special_char_re - if self.in_argument: - extra_special_chars = (ARG_SEP,) - special_char_re = re.compile( - r"[%s]|\Z" - % "".join( - re.escape(c) - for c in self.special_chars + extra_special_chars - ) - ) + extra_special_chars = (ARG_SEP,) if self.in_argument else () + special_chars = (*self.special_chars, *extra_special_chars) + special_char_re = re.compile( + rf"[{''.join(map(re.escape, special_chars))}]|\Z" + ) text_parts = [] while self.pos < len(self.string): char = self.string[self.pos] - if char not in self.special_chars + extra_special_chars: + if char not in special_chars: # A non-special character. Skip to the next special # character, treating the interstice as literal text. next_pos = ( @@ -566,9 +553,9 @@ class Template: argnames = [] for varname in varnames: - argnames.append(VARIABLE_PREFIX + varname) + argnames.append(f"{VARIABLE_PREFIX}{varname}") for funcname in funcnames: - argnames.append(FUNCTION_PREFIX + funcname) + argnames.append(f"{FUNCTION_PREFIX}{funcname}") func = compile_func( argnames, @@ -578,9 +565,9 @@ class Template: def wrapper_func(values={}, functions={}): args = {} for varname in varnames: - args[VARIABLE_PREFIX + varname] = values[varname] + args[f"{VARIABLE_PREFIX}{varname}"] = values[varname] for funcname in funcnames: - args[FUNCTION_PREFIX + funcname] = functions[funcname] + args[f"{FUNCTION_PREFIX}{funcname}"] = functions[funcname] parts = func(**args) return "".join(parts) diff --git a/beets/util/hidden.py b/beets/util/hidden.py index d2c66fac0..0a71c91fd 100644 --- a/beets/util/hidden.py +++ b/beets/util/hidden.py @@ -20,10 +20,9 @@ import os import stat import sys from pathlib import Path -from typing import Union -def is_hidden(path: Union[bytes, Path]) -> bool: +def is_hidden(path: bytes | Path) -> bool: """ Determine whether the given path is treated as a 'hidden file' by the OS. """ diff --git a/beets/util/id_extractors.py b/beets/util/id_extractors.py index 04e9e94a7..f66f1690f 100644 --- a/beets/util/id_extractors.py +++ b/beets/util/id_extractors.py @@ -14,36 +14,20 @@ """Helpers around the extraction of album/track ID's from metadata sources.""" +from __future__ import annotations + import re -# Spotify IDs consist of 22 alphanumeric characters -# (zero-left-padded base62 representation of randomly generated UUID4) -spotify_id_regex = { - "pattern": r"(^|open\.spotify\.com/{}/)([0-9A-Za-z]{{22}})", - "match_group": 2, -} +from beets import logging -deezer_id_regex = { - "pattern": r"(^|deezer\.com/)([a-z]*/)?({}/)?(\d+)", - "match_group": 4, -} - -beatport_id_regex = { - "pattern": r"(^|beatport\.com/release/.+/)(\d+)$", - "match_group": 2, -} - -# A note on Bandcamp: There is no such thing as a Bandcamp album or artist ID, -# the URL can be used as the identifier. The Bandcamp metadata source plugin -# works that way - https://github.com/snejus/beetcamp. Bandcamp album -# URLs usually look like: https://nameofartist.bandcamp.com/album/nameofalbum +log = logging.getLogger("beets") -def extract_discogs_id_regex(album_id): - """Returns the Discogs_id or None.""" - # Discogs-IDs are simple integers. In order to avoid confusion with - # other metadata plugins, we only look for very specific formats of the - # input string: +PATTERN_BY_SOURCE = { + "spotify": re.compile(r"(?:^|open\.spotify\.com/[^/]+/)([0-9A-Za-z]{22})"), + "deezer": re.compile(r"(?:^|deezer\.com/)(?:[a-z]*/)?(?:[^/]+/)?(\d+)"), + "beatport": re.compile(r"(?:^|beatport\.com/release/.+/)(\d+)$"), + "musicbrainz": re.compile(r"(\w{8}(?:-\w{4}){3}-\w{12})"), # - plain integer, optionally wrapped in brackets and prefixed by an # 'r', as this is how discogs displays the release ID on its webpage. # - legacy url format: discogs.com//release/ @@ -51,15 +35,35 @@ def extract_discogs_id_regex(album_id): # - current url format: discogs.com/release/- # See #291, #4080 and #4085 for the discussions leading up to these # patterns. - # Regex has been tested here https://regex101.com/r/TOu7kw/1 + "discogs": re.compile( + r"(?:^|\[?r|discogs\.com/(?:[^/]+/)?release/)(\d+)\b" + ), + # There is no such thing as a Bandcamp album or artist ID, the URL can be + # used as the identifier. The Bandcamp metadata source plugin works that way + # - https://github.com/snejus/beetcamp. Bandcamp album URLs usually look + # like: https://nameofartist.bandcamp.com/album/nameofalbum + "bandcamp": re.compile(r"(.+)"), + "tidal": re.compile(r"([^/]+)$"), +} - for pattern in [ - r"^\[?r?(?P\d+)\]?$", - r"discogs\.com/release/(?P\d+)-?", - r"discogs\.com/[^/]+/release/(?P\d+)", - ]: - match = re.search(pattern, album_id) - if match: - return int(match.group("id")) + +def extract_release_id(source: str, id_: str) -> str | None: + """Extract the release ID from a given source and ID. + + Normally, the `id_` is a url string which contains the ID of the + release. This function extracts the ID from the URL based on the + `source` provided. + """ + try: + source_pattern = PATTERN_BY_SOURCE[source.lower()] + except KeyError: + log.debug( + "Unknown source '{}' for ID extraction. Returning id/url as-is.", + source, + ) + return id_ + + if m := source_pattern.search(str(id_)): + return m[1] return None diff --git a/beets/util/pipeline.py b/beets/util/pipeline.py index 98a1addce..2ed593904 100644 --- a/beets/util/pipeline.py +++ b/beets/util/pipeline.py @@ -36,18 +36,20 @@ from __future__ import annotations import queue import sys from threading import Lock, Thread -from typing import Callable, Generator, TypeVar +from typing import TYPE_CHECKING, TypeVar -if sys.version_info >= (3, 11): - from typing import TypeVarTuple, Unpack -else: - from typing_extensions import TypeVarTuple, Unpack +from typing_extensions import TypeVarTuple, Unpack + +if TYPE_CHECKING: + from collections.abc import Callable, Generator BUBBLE = "__PIPELINE_BUBBLE__" POISON = "__PIPELINE_POISON__" DEFAULT_QUEUE_SIZE = 16 +Tq = TypeVar("Tq") + def _invalidate_queue(q, val=None, sync=True): """Breaks a Queue such that it never blocks, always has size 1, @@ -91,7 +93,7 @@ def _invalidate_queue(q, val=None, sync=True): q.mutex.release() -class CountedQueue(queue.Queue): +class CountedQueue(queue.Queue[Tq]): """A queue that keeps track of the number of threads that are still feeding into it. The queue is poisoned when all threads are finished with the queue. @@ -492,64 +494,3 @@ class Pipeline: msgs = next_msgs for msg in msgs: yield msg - - -# Smoke test. -if __name__ == "__main__": - import time - - # Test a normally-terminating pipeline both in sequence and - # in parallel. - def produce(): - for i in range(5): - print("generating %i" % i) - time.sleep(1) - yield i - - def work(): - num = yield - while True: - print("processing %i" % num) - time.sleep(2) - num = yield num * 2 - - def consume(): - while True: - num = yield - time.sleep(1) - print("received %i" % num) - - ts_start = time.time() - Pipeline([produce(), work(), consume()]).run_sequential() - ts_seq = time.time() - Pipeline([produce(), work(), consume()]).run_parallel() - ts_par = time.time() - Pipeline([produce(), (work(), work()), consume()]).run_parallel() - ts_end = time.time() - print("Sequential time:", ts_seq - ts_start) - print("Parallel time:", ts_par - ts_seq) - print("Multiply-parallel time:", ts_end - ts_par) - print() - - # Test a pipeline that raises an exception. - def exc_produce(): - for i in range(10): - print("generating %i" % i) - time.sleep(1) - yield i - - def exc_work(): - num = yield - while True: - print("processing %i" % num) - time.sleep(3) - if num == 3: - raise Exception() - num = yield num * 2 - - def exc_consume(): - while True: - num = yield - print("received %i" % num) - - Pipeline([exc_produce(), exc_work(), exc_consume()]).run_parallel(1) diff --git a/beets/util/units.py b/beets/util/units.py new file mode 100644 index 000000000..f5fcb743b --- /dev/null +++ b/beets/util/units.py @@ -0,0 +1,61 @@ +import re + + +def raw_seconds_short(string: str) -> float: + """Formats a human-readable M:SS string as a float (number of seconds). + + Raises ValueError if the conversion cannot take place due to `string` not + being in the right format. + """ + match = re.match(r"^(\d+):([0-5]\d)$", string) + if not match: + raise ValueError("String not in M:SS format") + minutes, seconds = map(int, match.groups()) + return float(minutes * 60 + seconds) + + +def human_seconds_short(interval): + """Formats a number of seconds as a short human-readable M:SS + string. + """ + interval = int(interval) + return f"{interval // 60}:{interval % 60:02d}" + + +def human_bytes(size): + """Formats size, a number of bytes, in a human-readable way.""" + powers = ["", "K", "M", "G", "T", "P", "E", "Z", "Y", "H"] + unit = "B" + for power in powers: + if size < 1024: + return f"{size:3.1f} {power}{unit}" + size /= 1024.0 + unit = "iB" + return "big" + + +def human_seconds(interval): + """Formats interval, a number of seconds, as a human-readable time + interval using English words. + """ + units = [ + (1, "second"), + (60, "minute"), + (60, "hour"), + (24, "day"), + (7, "week"), + (52, "year"), + (10, "decade"), + ] + for i in range(len(units) - 1): + increment, suffix = units[i] + next_increment, _ = units[i + 1] + interval /= float(increment) + if interval < next_increment: + break + else: + # Last unit. + increment, suffix = units[-1] + interval /= float(increment) + + return f"{interval:3.1f} {suffix}s" diff --git a/beetsplug/_utils/__init__.py b/beetsplug/_utils/__init__.py new file mode 100644 index 000000000..7453f88bf --- /dev/null +++ b/beetsplug/_utils/__init__.py @@ -0,0 +1,3 @@ +from . import art, vfs + +__all__ = ["art", "vfs"] diff --git a/beets/art.py b/beetsplug/_utils/art.py similarity index 84% rename from beets/art.py rename to beetsplug/_utils/art.py index 2ff58c309..656c303ce 100644 --- a/beets/art.py +++ b/beetsplug/_utils/art.py @@ -38,11 +38,7 @@ def get_art(log, item): try: mf = mediafile.MediaFile(syspath(item.path)) except mediafile.UnreadableFileError as exc: - log.warning( - "Could not extract art from {0}: {1}", - displayable_path(item.path), - exc, - ) + log.warning("Could not extract art from {.filepath}: {}", item, exc) return return mf.art @@ -83,16 +79,16 @@ def embed_item( # Get the `Image` object from the file. try: - log.debug("embedding {0}", displayable_path(imagepath)) + log.debug("embedding {}", displayable_path(imagepath)) image = mediafile_image(imagepath, maxwidth) except OSError as exc: - log.warning("could not read image file: {0}", exc) + log.warning("could not read image file: {}", exc) return # Make sure the image kind is safe (some formats only support PNG # and JPEG). if image.mime_type not in ("image/jpeg", "image/png"): - log.info("not embedding image of unsupported type: {}", image.mime_type) + log.info("not embedding image of unsupported type: {.mime_type}", image) return item.try_write(path=itempath, tags={"images": [image]}, id3v23=id3v23) @@ -110,11 +106,11 @@ def embed_album( """Embed album art into all of the album's items.""" imagepath = album.artpath if not imagepath: - log.info("No album art present for {0}", album) + log.info("No album art present for {}", album) return if not os.path.isfile(syspath(imagepath)): log.info( - "Album art not found at {0} for {1}", + "Album art not found at {} for {}", displayable_path(imagepath), album, ) @@ -122,7 +118,7 @@ def embed_album( if maxwidth: imagepath = resize_image(log, imagepath, maxwidth, quality) - log.info("Embedding album art into {0}", album) + log.info("Embedding album art into {}", album) for item in album.items(): embed_item( @@ -143,8 +139,7 @@ def resize_image(log, imagepath, maxwidth, quality): specified quality level. """ log.debug( - "Resizing album art to {0} pixels wide and encoding at quality \ - level {1}", + "Resizing album art to {} pixels wide and encoding at quality level {}", maxwidth, quality, ) @@ -184,18 +179,18 @@ def extract(log, outpath, item): art = get_art(log, item) outpath = bytestring_path(outpath) if not art: - log.info("No album art present in {0}, skipping.", item) + log.info("No album art present in {}, skipping.", item) return # Add an extension to the filename. ext = mediafile.image_extension(art) if not ext: - log.warning("Unknown image type in {0}.", displayable_path(item.path)) + log.warning("Unknown image type in {.filepath}.", item) return - outpath += bytestring_path("." + ext) + outpath += bytestring_path(f".{ext}") log.info( - "Extracting album art from: {0} to: {1}", + "Extracting album art from: {} to: {}", item, displayable_path(outpath), ) @@ -213,7 +208,7 @@ def extract_first(log, outpath, items): def clear(log, lib, query): items = lib.items(query) - log.info("Clearing album art from {0} items", len(items)) + log.info("Clearing album art from {} items", len(items)) for item in items: - log.debug("Clearing art for {0}", item) + log.debug("Clearing art for {}", item) item.try_write(tags={"images": None}) diff --git a/beetsplug/_utils/musicbrainz.py b/beetsplug/_utils/musicbrainz.py new file mode 100644 index 000000000..2fc821df9 --- /dev/null +++ b/beetsplug/_utils/musicbrainz.py @@ -0,0 +1,290 @@ +"""Helpers for communicating with the MusicBrainz webservice. + +Provides rate-limited HTTP session and convenience methods to fetch and +normalize API responses. + +This module centralizes request handling and response shaping so callers can +work with consistently structured data without embedding HTTP or rate-limit +logic throughout the codebase. +""" + +from __future__ import annotations + +import operator +from dataclasses import dataclass, field +from functools import cached_property, singledispatchmethod, wraps +from itertools import groupby +from typing import TYPE_CHECKING, Any, Literal, ParamSpec, TypedDict, TypeVar + +from requests_ratelimiter import LimiterMixin +from typing_extensions import NotRequired, Unpack + +from beets import config, logging + +from .requests import RequestHandler, TimeoutAndRetrySession + +if TYPE_CHECKING: + from collections.abc import Callable + + from requests import Response + + from .._typing import JSONDict + +log = logging.getLogger(__name__) + + +class LimiterTimeoutSession(LimiterMixin, TimeoutAndRetrySession): + """HTTP session that enforces rate limits.""" + + +Entity = Literal[ + "area", + "artist", + "collection", + "event", + "genre", + "instrument", + "label", + "place", + "recording", + "release", + "release-group", + "series", + "work", + "url", +] + + +class LookupKwargs(TypedDict, total=False): + includes: NotRequired[list[str]] + + +class PagingKwargs(TypedDict, total=False): + limit: NotRequired[int] + offset: NotRequired[int] + + +class SearchKwargs(PagingKwargs): + query: NotRequired[str] + + +class BrowseKwargs(LookupKwargs, PagingKwargs, total=False): + pass + + +class BrowseReleaseGroupsKwargs(BrowseKwargs, total=False): + artist: NotRequired[str] + collection: NotRequired[str] + release: NotRequired[str] + + +class BrowseRecordingsKwargs(BrowseReleaseGroupsKwargs, total=False): + work: NotRequired[str] + + +P = ParamSpec("P") +R = TypeVar("R") + + +def require_one_of(*keys: str) -> Callable[[Callable[P, R]], Callable[P, R]]: + required = frozenset(keys) + + def deco(func: Callable[P, R]) -> Callable[P, R]: + @wraps(func) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + # kwargs is a real dict at runtime; safe to inspect here + if not required & kwargs.keys(): + required_str = ", ".join(sorted(required)) + raise ValueError( + f"At least one of {required_str} filter is required" + ) + return func(*args, **kwargs) + + return wrapper + + return deco + + +@dataclass +class MusicBrainzAPI(RequestHandler): + """High-level interface to the MusicBrainz WS/2 API. + + Responsibilities: + + - Configure the API host and request rate from application configuration. + - Offer helpers to fetch common entity types and to run searches. + - Normalize MusicBrainz responses so relation lists are grouped by target + type for easier downstream consumption. + + Documentation: https://musicbrainz.org/doc/MusicBrainz_API + """ + + api_host: str = field(init=False) + rate_limit: float = field(init=False) + + def __post_init__(self) -> None: + mb_config = config["musicbrainz"] + mb_config.add( + { + "host": "musicbrainz.org", + "https": False, + "ratelimit": 1, + "ratelimit_interval": 1, + } + ) + + hostname = mb_config["host"].as_str() + if hostname == "musicbrainz.org": + self.api_host, self.rate_limit = "https://musicbrainz.org", 1.0 + else: + https = mb_config["https"].get(bool) + self.api_host = f"http{'s' if https else ''}://{hostname}" + self.rate_limit = ( + mb_config["ratelimit"].get(int) + / mb_config["ratelimit_interval"].as_number() + ) + + @cached_property + def api_root(self) -> str: + return f"{self.api_host}/ws/2" + + def create_session(self) -> LimiterTimeoutSession: + return LimiterTimeoutSession(per_second=self.rate_limit) + + def request(self, *args, **kwargs) -> Response: + """Ensure all requests specify JSON response format by default.""" + kwargs.setdefault("params", {}) + kwargs["params"]["fmt"] = "json" + return super().request(*args, **kwargs) + + def _get_resource( + self, resource: str, includes: list[str] | None = None, **kwargs + ) -> JSONDict: + """Retrieve and normalize data from the API resource endpoint. + + If requested, includes are appended to the request. The response is + passed through a normalizer that groups relation entries by their + target type so that callers receive a consistently structured mapping. + """ + if includes: + kwargs["inc"] = "+".join(includes) + + return self._group_relations( + self.get_json(f"{self.api_root}/{resource}", params=kwargs) + ) + + def _lookup( + self, entity: Entity, id_: str, **kwargs: Unpack[LookupKwargs] + ) -> JSONDict: + return self._get_resource(f"{entity}/{id_}", **kwargs) + + def _browse(self, entity: Entity, **kwargs) -> list[JSONDict]: + return self._get_resource(entity, **kwargs).get(f"{entity}s", []) + + def search( + self, + entity: Entity, + filters: dict[str, str], + **kwargs: Unpack[SearchKwargs], + ) -> list[JSONDict]: + """Search for MusicBrainz entities matching the given filters. + + * Query is constructed by combining the provided filters using AND logic + * Each filter key-value pair is formatted as 'key:"value"' unless + - 'key' is empty, in which case only the value is used, '"value"' + - 'value' is empty, in which case the filter is ignored + * Values are lowercased and stripped of whitespace. + """ + query = " AND ".join( + ":".join(filter(None, (k, f'"{_v}"'))) + for k, v in filters.items() + if (_v := v.lower().strip()) + ) + log.debug("Searching for MusicBrainz {}s with: {!r}", entity, query) + kwargs["query"] = query + return self._get_resource(entity, **kwargs)[f"{entity}s"] + + def get_release(self, id_: str, **kwargs: Unpack[LookupKwargs]) -> JSONDict: + """Retrieve a release by its MusicBrainz ID.""" + return self._lookup("release", id_, **kwargs) + + def get_recording( + self, id_: str, **kwargs: Unpack[LookupKwargs] + ) -> JSONDict: + """Retrieve a recording by its MusicBrainz ID.""" + return self._lookup("recording", id_, **kwargs) + + def get_work(self, id_: str, **kwargs: Unpack[LookupKwargs]) -> JSONDict: + """Retrieve a work by its MusicBrainz ID.""" + return self._lookup("work", id_, **kwargs) + + @require_one_of("artist", "collection", "release", "work") + def browse_recordings( + self, **kwargs: Unpack[BrowseRecordingsKwargs] + ) -> list[JSONDict]: + """Browse recordings related to the given entities. + + At least one of artist, collection, release, or work must be provided. + """ + return self._browse("recording", **kwargs) + + @require_one_of("artist", "collection", "release") + def browse_release_groups( + self, **kwargs: Unpack[BrowseReleaseGroupsKwargs] + ) -> list[JSONDict]: + """Browse release groups related to the given entities. + + At least one of artist, collection, or release must be provided. + """ + return self._get_resource("release-group", **kwargs)["release-groups"] + + @singledispatchmethod + @classmethod + def _group_relations(cls, data: Any) -> Any: + """Normalize MusicBrainz 'relations' into type-keyed fields recursively. + + This helper rewrites payloads that use a generic 'relations' list into + a structure that is easier to consume downstream. When a mapping + contains 'relations', those entries are regrouped by their 'target-type' + and stored under keys like '-relations'. The original + 'relations' key is removed to avoid ambiguous access patterns. + + The transformation is applied recursively so that nested objects and + sequences are normalized consistently, while non-container values are + left unchanged. + """ + return data + + @_group_relations.register(list) + @classmethod + def _(cls, data: list[Any]) -> list[Any]: + return [cls._group_relations(i) for i in data] + + @_group_relations.register(dict) + @classmethod + def _(cls, data: JSONDict) -> JSONDict: + for k, v in list(data.items()): + if k == "relations": + get_target_type = operator.methodcaller("get", "target-type") + for target_type, group in groupby( + sorted(v, key=get_target_type), get_target_type + ): + relations = [ + {k: v for k, v in item.items() if k != "target-type"} + for item in group + ] + data[f"{target_type}-relations"] = cls._group_relations( + relations + ) + data.pop("relations") + else: + data[k] = cls._group_relations(v) + return data + + +class MusicBrainzAPIMixin: + """Mixin that provides a cached MusicBrainzAPI helper instance.""" + + @cached_property + def mb_api(self) -> MusicBrainzAPI: + return MusicBrainzAPI() diff --git a/beetsplug/_utils/requests.py b/beetsplug/_utils/requests.py new file mode 100644 index 000000000..92d52c9d6 --- /dev/null +++ b/beetsplug/_utils/requests.py @@ -0,0 +1,196 @@ +from __future__ import annotations + +import atexit +import threading +from contextlib import contextmanager +from functools import cached_property +from http import HTTPStatus +from typing import TYPE_CHECKING, Any, ClassVar, Generic, Protocol, TypeVar + +import requests +from requests.adapters import HTTPAdapter +from urllib3.util.retry import Retry + +from beets import __version__ + +if TYPE_CHECKING: + from collections.abc import Iterator + + +class BeetsHTTPError(requests.exceptions.HTTPError): + STATUS: ClassVar[HTTPStatus] + + def __init__(self, *args, **kwargs) -> None: + super().__init__( + f"HTTP Error: {self.STATUS.value} {self.STATUS.phrase}", + *args, + **kwargs, + ) + + +class HTTPNotFoundError(BeetsHTTPError): + STATUS = HTTPStatus.NOT_FOUND + + +class Closeable(Protocol): + """Protocol for objects that have a close method.""" + + def close(self) -> None: ... + + +C = TypeVar("C", bound=Closeable) + + +class SingletonMeta(type, Generic[C]): + """Metaclass ensuring a single shared instance per class. + + Creates one instance per class type on first instantiation, reusing it + for all subsequent calls. Automatically registers cleanup on program exit + for proper resource management. + """ + + _instances: ClassVar[dict[type[Any], Any]] = {} + _lock: ClassVar[threading.Lock] = threading.Lock() + + def __call__(cls, *args: Any, **kwargs: Any) -> C: + if cls not in cls._instances: + with cls._lock: + if cls not in SingletonMeta._instances: + instance = super().__call__(*args, **kwargs) + SingletonMeta._instances[cls] = instance + atexit.register(instance.close) + return SingletonMeta._instances[cls] + + +class TimeoutAndRetrySession(requests.Session, metaclass=SingletonMeta): + """HTTP session with sensible defaults. + + * default beets User-Agent header + * default request timeout + * automatic retries on transient connection or server errors + * raises exceptions for HTTP error status codes + """ + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.headers["User-Agent"] = f"beets/{__version__} https://beets.io/" + + retry = Retry( + connect=2, + total=2, + backoff_factor=1, + # Retry on server errors + status_forcelist=[ + HTTPStatus.INTERNAL_SERVER_ERROR, + HTTPStatus.BAD_GATEWAY, + HTTPStatus.SERVICE_UNAVAILABLE, + HTTPStatus.GATEWAY_TIMEOUT, + ], + ) + adapter = HTTPAdapter(max_retries=retry) + self.mount("https://", adapter) + self.mount("http://", adapter) + + def request(self, *args, **kwargs): + """Execute HTTP request with automatic timeout and status validation. + + Ensures all requests have a timeout (defaults to 10 seconds) and raises + an exception for HTTP error status codes. + """ + kwargs.setdefault("timeout", 10) + r = super().request(*args, **kwargs) + r.raise_for_status() + + return r + + +class RequestHandler: + """Manages HTTP requests with custom error handling and session management. + + Provides a reusable interface for making HTTP requests with automatic + conversion of standard HTTP errors to beets-specific exceptions. Supports + custom session types and error mappings that can be overridden by + subclasses. + + Usage: + Subclass and override :class:`RequestHandler.create_session`, + :class:`RequestHandler.explicit_http_errors` or + :class:`RequestHandler.status_to_error()` to customize behavior. + + Use + + - :class:`RequestHandler.get_json()` to get JSON response data + - :class:`RequestHandler.get()` to get HTTP response object + - :class:`RequestHandler.request()` to invoke arbitrary HTTP methods + + Feel free to define common methods that are used in multiple plugins. + """ + + #: List of custom exceptions to be raised for specific status codes. + explicit_http_errors: ClassVar[list[type[BeetsHTTPError]]] = [ + HTTPNotFoundError + ] + + def create_session(self) -> TimeoutAndRetrySession: + """Create a new HTTP session instance. + + Can be overridden by subclasses to provide custom session types. + """ + return TimeoutAndRetrySession() + + @cached_property + def session(self) -> TimeoutAndRetrySession: + return self.create_session() + + def status_to_error( + self, code: int + ) -> type[requests.exceptions.HTTPError] | None: + """Map HTTP status codes to beets-specific exception types. + + Searches the configured explicit HTTP errors for a matching status code. + Returns None if no specific error type is registered for the given code. + """ + return next( + (e for e in self.explicit_http_errors if e.STATUS == code), None + ) + + @contextmanager + def handle_http_error(self) -> Iterator[None]: + """Convert standard HTTP errors to beets-specific exceptions. + + Wraps operations that may raise HTTPError, automatically translating + recognized status codes into their corresponding beets exception types. + Unrecognized errors are re-raised unchanged. + """ + try: + yield + except requests.exceptions.HTTPError as e: + if beets_error := self.status_to_error(e.response.status_code): + raise beets_error(response=e.response) from e + + raise + + def request(self, *args, **kwargs) -> requests.Response: + """Perform HTTP request using the session with automatic error handling. + + Delegates to the underlying session method while converting recognized + HTTP errors to beets-specific exceptions through the error handler. + """ + with self.handle_http_error(): + return self.session.request(*args, **kwargs) + + def get(self, *args, **kwargs) -> requests.Response: + """Perform HTTP GET request with automatic error handling.""" + return self.request("get", *args, **kwargs) + + def put(self, *args, **kwargs) -> requests.Response: + """Perform HTTP PUT request with automatic error handling.""" + return self.request("put", *args, **kwargs) + + def delete(self, *args, **kwargs) -> requests.Response: + """Perform HTTP DELETE request with automatic error handling.""" + return self.request("delete", *args, **kwargs) + + def get_json(self, *args, **kwargs): + """Fetch and parse JSON data from an HTTP endpoint.""" + return self.get(*args, **kwargs).json() diff --git a/beets/vfs.py b/beetsplug/_utils/vfs.py similarity index 76% rename from beets/vfs.py rename to beetsplug/_utils/vfs.py index cdbf197a6..6294b644c 100644 --- a/beets/vfs.py +++ b/beetsplug/_utils/vfs.py @@ -16,17 +16,25 @@ libraries. """ -from typing import Any, NamedTuple +from __future__ import annotations + +from typing import TYPE_CHECKING, NamedTuple from beets import util +if TYPE_CHECKING: + from beets.library import Library + class Node(NamedTuple): - files: dict[str, Any] - dirs: dict[str, Any] + files: dict[str, int] + # Maps filenames to Item ids. + + dirs: dict[str, Node] + # Maps directory names to child nodes. -def _insert(node, path, itemid): +def _insert(node: Node, path: list[str], itemid: int): """Insert an item into a virtual filesystem node.""" if len(path) == 1: # Last component. Insert file. @@ -40,7 +48,7 @@ def _insert(node, path, itemid): _insert(node.dirs[dirname], rest, itemid) -def libtree(lib): +def libtree(lib: Library) -> Node: """Generates a filesystem-like directory tree for the files contained in `lib`. Filesystem nodes are (files, dirs) named tuples in which both components are dictionaries. The first @@ -49,7 +57,7 @@ def libtree(lib): """ root = Node({}, {}) for item in lib.items(): - dest = item.destination(fragment=True) - parts = util.components(dest) + dest = item.destination(relative_to_libdir=True) + parts = util.components(util.as_string(dest)) _insert(root, parts, item.id) return root diff --git a/beetsplug/absubmit.py b/beetsplug/absubmit.py index bbbc14edf..62a248482 100644 --- a/beetsplug/absubmit.py +++ b/beetsplug/absubmit.py @@ -18,9 +18,9 @@ import errno import hashlib import json import os +import shutil import subprocess import tempfile -from distutils.spawn import find_executable import requests @@ -42,9 +42,7 @@ def call(args): try: return util.command_output(args).stdout except subprocess.CalledProcessError as e: - raise ABSubmitError( - "{} exited with status {}".format(args[0], e.returncode) - ) + raise ABSubmitError(f"{args[0]} exited with status {e.returncode}") class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin): @@ -63,9 +61,7 @@ class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin): # Explicit path to extractor if not os.path.isfile(self.extractor): raise ui.UserError( - "Extractor command does not exist: {0}.".format( - self.extractor - ) + f"Extractor command does not exist: {self.extractor}." ) else: # Implicit path to extractor, search for it in path @@ -84,7 +80,7 @@ class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin): # Get the executable location on the system, which we need # to calculate the SHA-1 hash. - self.extractor = find_executable(self.extractor) + self.extractor = shutil.which(self.extractor) # Calculate extractor hash. self.extractor_sha = hashlib.sha1() @@ -101,8 +97,8 @@ class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin): "with an HTTP scheme" ) elif base_url[-1] != "/": - base_url = base_url + "/" - self.url = base_url + "{mbid}/low-level" + base_url = f"{base_url}/" + self.url = f"{base_url}{{mbid}}/low-level" def commands(self): cmd = ui.Subcommand( @@ -122,8 +118,10 @@ class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin): dest="pretend_fetch", action="store_true", default=False, - help="pretend to perform action, but show \ -only files which would be processed", + help=( + "pretend to perform action, but show only files which would be" + " processed" + ), ) cmd.func = self.command return [cmd] @@ -137,7 +135,7 @@ only files which would be processed", ) else: # Get items from arguments - items = lib.items(ui.decargs(args)) + items = lib.items(args) self.opts = opts util.par_map(self.analyze_submit, items) @@ -157,7 +155,7 @@ only files which would be processed", # If file has no MBID, skip it. if not mbid: self._log.info( - "Not analysing {}, missing " "musicbrainz track id.", item + "Not analysing {}, missing musicbrainz track id.", item ) return None @@ -220,6 +218,6 @@ only files which would be processed", ) else: self._log.debug( - "Successfully submitted AcousticBrainz analysis " "for {}.", + "Successfully submitted AcousticBrainz analysis for {}.", item, ) diff --git a/beetsplug/acousticbrainz.py b/beetsplug/acousticbrainz.py index 899288260..92a1976a1 100644 --- a/beetsplug/acousticbrainz.py +++ b/beetsplug/acousticbrainz.py @@ -97,7 +97,7 @@ class AcousticPlugin(plugins.BeetsPlugin): "with an HTTP scheme" ) elif self.base_url[-1] != "/": - self.base_url = self.base_url + "/" + self.base_url = f"{self.base_url}/" if self.config["auto"]: self.register_listener("import_task_files", self.import_task_files) @@ -116,7 +116,7 @@ class AcousticPlugin(plugins.BeetsPlugin): ) def func(lib, opts, args): - items = lib.items(ui.decargs(args)) + items = lib.items(args) self._fetch_info( items, ui.should_write(), @@ -153,7 +153,7 @@ class AcousticPlugin(plugins.BeetsPlugin): try: data.update(res.json()) except ValueError: - self._log.debug("Invalid Response: {}", res.text) + self._log.debug("Invalid Response: {.text}", res) return {} return data @@ -286,7 +286,7 @@ class AcousticPlugin(plugins.BeetsPlugin): yield v, subdata[k] else: self._log.warning( - "Acousticbrainz did not provide info " "about {}", k + "Acousticbrainz did not provide info about {}", k ) self._log.debug( "Data {} could not be mapped to scheme {} " @@ -300,4 +300,4 @@ class AcousticPlugin(plugins.BeetsPlugin): def _generate_urls(base_url, mbid): """Generates AcousticBrainz end point urls for given `mbid`.""" for level in LEVELS: - yield base_url + mbid + level + yield f"{base_url}{mbid}{level}" diff --git a/beetsplug/advancedrewrite.py b/beetsplug/advancedrewrite.py index 9a5feaaff..8bc63c0cb 100644 --- a/beetsplug/advancedrewrite.py +++ b/beetsplug/advancedrewrite.py @@ -58,7 +58,9 @@ class AdvancedRewritePlugin(BeetsPlugin): def __init__(self): """Parse configuration and register template fields for rewriting.""" super().__init__() + self.register_listener("pluginload", self.loaded) + def loaded(self): template = confuse.Sequence( confuse.OneOf( [ diff --git a/beetsplug/albumtypes.py b/beetsplug/albumtypes.py index b1e143a88..180773f58 100644 --- a/beetsplug/albumtypes.py +++ b/beetsplug/albumtypes.py @@ -14,10 +14,11 @@ """Adds an album template field for formatted album types.""" -from beets.autotag.mb import VARIOUS_ARTISTS_ID from beets.library import Album from beets.plugins import BeetsPlugin +from .musicbrainz import VARIOUS_ARTISTS_ID + class AlbumTypesPlugin(BeetsPlugin): """Adds an album template field for formatted album types.""" diff --git a/beetsplug/aura.py b/beetsplug/aura.py index e7034c1e9..7b75f31e5 100644 --- a/beetsplug/aura.py +++ b/beetsplug/aura.py @@ -16,7 +16,6 @@ import os import re -import sys from collections.abc import Mapping from dataclasses import dataclass from mimetypes import guess_type @@ -30,11 +29,7 @@ from flask import ( request, send_file, ) - -if sys.version_info >= (3, 11): - from typing import Self -else: - from typing_extensions import Self +from typing_extensions import Self from beets import config from beets.dbcore.query import ( @@ -241,14 +236,14 @@ class AURADocument: # Not the last page so work out links.next url if not self.args: # No existing arguments, so current page is 0 - next_url = request.url + "?page=1" + next_url = f"{request.url}?page=1" elif not self.args.get("page", None): # No existing page argument, so add one to the end - next_url = request.url + "&page=1" + next_url = f"{request.url}&page=1" else: # Increment page token by 1 next_url = request.url.replace( - f"page={page}", "page={}".format(page + 1) + f"page={page}", f"page={page + 1}" ) # Get only the items in the page range data = [ @@ -432,9 +427,7 @@ class TrackDocument(AURADocument): return self.error( "404 Not Found", "No track with the requested id.", - "There is no track with an id of {} in the library.".format( - track_id - ), + f"There is no track with an id of {track_id} in the library.", ) return self.single_resource_document( self.get_resource_object(self.lib, track) @@ -518,9 +511,7 @@ class AlbumDocument(AURADocument): return self.error( "404 Not Found", "No album with the requested id.", - "There is no album with an id of {} in the library.".format( - album_id - ), + f"There is no album with an id of {album_id} in the library.", ) return self.single_resource_document( self.get_resource_object(self.lib, album) @@ -605,9 +596,7 @@ class ArtistDocument(AURADocument): return self.error( "404 Not Found", "No artist with the requested id.", - "There is no artist with an id of {} in the library.".format( - artist_id - ), + f"There is no artist with an id of {artist_id} in the library.", ) return self.single_resource_document(artist_resource) @@ -708,7 +697,7 @@ class ImageDocument(AURADocument): relationships = {} # Split id into [parent_type, parent_id, filename] id_split = image_id.split("-") - relationships[id_split[0] + "s"] = { + relationships[f"{id_split[0]}s"] = { "data": [{"type": id_split[0], "id": id_split[1]}] } @@ -732,9 +721,7 @@ class ImageDocument(AURADocument): return self.error( "404 Not Found", "No image with the requested id.", - "There is no image with an id of {} in the library.".format( - image_id - ), + f"There is no image with an id of {image_id} in the library.", ) return self.single_resource_document(image_resource) @@ -780,9 +767,7 @@ def audio_file(track_id): return AURADocument.error( "404 Not Found", "No track with the requested id.", - "There is no track with an id of {} in the library.".format( - track_id - ), + f"There is no track with an id of {track_id} in the library.", ) path = os.fsdecode(track.path) @@ -790,9 +775,8 @@ def audio_file(track_id): return AURADocument.error( "404 Not Found", "No audio file for the requested track.", - ( - "There is no audio file for track {} at the expected location" - ).format(track_id), + f"There is no audio file for track {track_id} at the expected" + " location", ) file_mimetype = guess_type(path)[0] @@ -800,10 +784,8 @@ def audio_file(track_id): return AURADocument.error( "500 Internal Server Error", "Requested audio file has an unknown mimetype.", - ( - "The audio file for track {} has an unknown mimetype. " - "Its file extension is {}." - ).format(track_id, path.split(".")[-1]), + f"The audio file for track {track_id} has an unknown mimetype. " + f"Its file extension is {path.split('.')[-1]}.", ) # Check that the Accept header contains the file's mimetype @@ -815,10 +797,8 @@ def audio_file(track_id): return AURADocument.error( "406 Not Acceptable", "Unsupported MIME type or bitrate parameter in Accept header.", - ( - "The audio file for track {} is only available as {} and " - "bitrate parameters are not supported." - ).format(track_id, file_mimetype), + f"The audio file for track {track_id} is only available as" + f" {file_mimetype} and bitrate parameters are not supported.", ) return send_file( @@ -901,9 +881,7 @@ def image_file(image_id): return AURADocument.error( "404 Not Found", "No image with the requested id.", - "There is no image with an id of {} in the library".format( - image_id - ), + f"There is no image with an id of {image_id} in the library", ) return send_file(img_path) diff --git a/beetsplug/autobpm.py b/beetsplug/autobpm.py index 9c953f711..46d7e672a 100644 --- a/beetsplug/autobpm.py +++ b/beetsplug/autobpm.py @@ -15,10 +15,10 @@ from __future__ import annotations -from collections.abc import Iterable from typing import TYPE_CHECKING import librosa +import numpy as np from beets.plugins import BeetsPlugin from beets.ui import Subcommand, should_write @@ -76,7 +76,10 @@ class AutoBPMPlugin(BeetsPlugin): self._log.error("Failed to measure BPM for {}: {}", path, exc) continue - bpm = round(tempo[0] if isinstance(tempo, Iterable) else tempo) + bpm = round( + float(tempo[0] if isinstance(tempo, np.ndarray) else tempo) + ) + item["bpm"] = bpm self._log.info("Computed BPM for {}: {}", path, bpm) diff --git a/beetsplug/badfiles.py b/beetsplug/badfiles.py index f93f03d5e..070008be8 100644 --- a/beetsplug/badfiles.py +++ b/beetsplug/badfiles.py @@ -110,9 +110,7 @@ class BadFiles(BeetsPlugin): self._log.debug("checking path: {}", dpath) if not os.path.exists(item.path): ui.print_( - "{}: file does not exist".format( - ui.colorize("text_error", dpath) - ) + f"{ui.colorize('text_error', dpath)}: file does not exist" ) # Run the checker against the file if one is found @@ -129,37 +127,32 @@ class BadFiles(BeetsPlugin): except CheckerCommandError as e: if e.errno == errno.ENOENT: self._log.error( - "command not found: {} when validating file: {}", - e.checker, - e.path, + "command not found: {0.checker} when validating file: {0.path}", + e, ) else: - self._log.error("error invoking {}: {}", e.checker, e.msg) + self._log.error("error invoking {0.checker}: {0.msg}", e) return [] error_lines = [] if status > 0: error_lines.append( - "{}: checker exited with status {}".format( - ui.colorize("text_error", dpath), status - ) + f"{ui.colorize('text_error', dpath)}: checker exited with" + f" status {status}" ) for line in output: error_lines.append(f" {line}") elif errors > 0: error_lines.append( - "{}: checker found {} errors or warnings".format( - ui.colorize("text_warning", dpath), errors - ) + f"{ui.colorize('text_warning', dpath)}: checker found" + f" {status} errors or warnings" ) for line in output: error_lines.append(f" {line}") elif self.verbose: - error_lines.append( - "{}: ok".format(ui.colorize("text_success", dpath)) - ) + error_lines.append(f"{ui.colorize('text_success', dpath)}: ok") return error_lines @@ -180,9 +173,8 @@ class BadFiles(BeetsPlugin): def on_import_task_before_choice(self, task, session): if hasattr(task, "_badfiles_checks_failed"): ui.print_( - "{} one or more files failed checks:".format( - ui.colorize("text_warning", "BAD") - ) + f"{ui.colorize('text_warning', 'BAD')} one or more files failed" + " checks:" ) for error in task._badfiles_checks_failed: for error_line in error: @@ -194,7 +186,7 @@ class BadFiles(BeetsPlugin): sel = ui.input_options(["aBort", "skip", "continue"]) if sel == "s": - return importer.action.SKIP + return importer.Action.SKIP elif sel == "c": return None elif sel == "b": @@ -204,7 +196,7 @@ class BadFiles(BeetsPlugin): def command(self, lib, opts, args): # Get items from arguments - items = lib.items(ui.decargs(args)) + items = lib.items(args) self.verbose = opts.verbose def check_and_print(item): diff --git a/beetsplug/bareasc.py b/beetsplug/bareasc.py index 0a867dfe1..d2852bb1d 100644 --- a/beetsplug/bareasc.py +++ b/beetsplug/bareasc.py @@ -23,7 +23,7 @@ from unidecode import unidecode from beets import ui from beets.dbcore.query import StringFieldQuery from beets.plugins import BeetsPlugin -from beets.ui import decargs, print_ +from beets.ui import print_ class BareascQuery(StringFieldQuery[str]): @@ -75,7 +75,7 @@ class BareascPlugin(BeetsPlugin): "bareasc", help="unidecode version of beet list command" ) cmd.parser.usage += ( - "\n" "Example: %prog -f '$album: $title' artist:beatles" + "\nExample: %prog -f '$album: $title' artist:beatles" ) cmd.parser.add_all_common_options() cmd.func = self.unidecode_list @@ -83,14 +83,13 @@ class BareascPlugin(BeetsPlugin): def unidecode_list(self, lib, opts, args): """Emulate normal 'list' command but with unidecode output.""" - query = decargs(args) album = opts.album # Copied from commands.py - list_items if album: - for album in lib.albums(query): + for album in lib.albums(args): bare = unidecode(str(album)) print_(bare) else: - for item in lib.items(query): + for item in lib.items(args): bare = unidecode(str(item)) print_(bare) diff --git a/beetsplug/beatport.py b/beetsplug/beatport.py index fab720c2b..718e0730e 100644 --- a/beetsplug/beatport.py +++ b/beetsplug/beatport.py @@ -14,9 +14,12 @@ """Adds Beatport release and track search support to the autotagger""" +from __future__ import annotations + import json import re from datetime import datetime, timedelta +from typing import TYPE_CHECKING, Literal, overload import confuse from requests_oauthlib import OAuth1Session @@ -29,8 +32,15 @@ from requests_oauthlib.oauth1_session import ( import beets import beets.ui from beets.autotag.hooks import AlbumInfo, TrackInfo -from beets.plugins import BeetsPlugin, MetadataSourcePlugin, get_distance -from beets.util.id_extractors import beatport_id_regex +from beets.metadata_plugins import MetadataSourcePlugin + +if TYPE_CHECKING: + from collections.abc import Iterable, Iterator, Sequence + + from beets.importer import ImportSession + from beets.library import Item + + from ._typing import JSONDict AUTH_ERRORS = (TokenRequestDenied, TokenMissing, VerifierMissing) USER_AGENT = f"beets/{beets.__version__} +https://beets.io/" @@ -40,20 +50,6 @@ class BeatportAPIError(Exception): pass -class BeatportObject: - def __init__(self, data): - self.beatport_id = data["id"] - self.name = str(data["name"]) - if "releaseDate" in data: - self.release_date = datetime.strptime( - data["releaseDate"], "%Y-%m-%d" - ) - if "artists" in data: - self.artists = [(x["id"], str(x["name"])) for x in data["artists"]] - if "genres" in data: - self.genres = [str(x["name"]) for x in data["genres"]] - - class BeatportClient: _api_base = "https://oauth-api.beatport.com" @@ -78,7 +74,7 @@ class BeatportClient: ) self.api.headers = {"User-Agent": USER_AGENT} - def get_authorize_url(self): + def get_authorize_url(self) -> str: """Generate the URL for the user to authorize the application. Retrieves a request token from the Beatport API and returns the @@ -100,38 +96,53 @@ class BeatportClient: self._make_url("/identity/1/oauth/authorize") ) - def get_access_token(self, auth_data): + def get_access_token(self, auth_data: str) -> tuple[str, str]: """Obtain the final access token and secret for the API. :param auth_data: URL-encoded authorization data as displayed at the authorization url (obtained via :py:meth:`get_authorize_url`) after signing in - :type auth_data: unicode - :returns: OAuth resource owner key and secret - :rtype: (unicode, unicode) tuple + :returns: OAuth resource owner key and secret as unicode """ self.api.parse_authorization_response( - "https://beets.io/auth?" + auth_data + f"https://beets.io/auth?{auth_data}" ) access_data = self.api.fetch_access_token( self._make_url("/identity/1/oauth/access-token") ) return access_data["oauth_token"], access_data["oauth_token_secret"] - def search(self, query, release_type="release", details=True): + @overload + def search( + self, + query: str, + release_type: Literal["release"], + details: bool = True, + ) -> Iterator[BeatportRelease]: ... + + @overload + def search( + self, + query: str, + release_type: Literal["track"], + details: bool = True, + ) -> Iterator[BeatportTrack]: ... + + def search( + self, + query: str, + release_type: Literal["release", "track"], + details=True, + ) -> Iterator[BeatportRelease | BeatportTrack]: """Perform a search of the Beatport catalogue. :param query: Query string - :param release_type: Type of releases to search for, can be - 'release' or 'track' + :param release_type: Type of releases to search for. :param details: Retrieve additional information about the search results. Currently this will fetch the tracklist for releases and do nothing for tracks :returns: Search results - :rtype: generator that yields - py:class:`BeatportRelease` or - :py:class:`BeatportTrack` """ response = self._get( "catalog/3/search", @@ -141,20 +152,18 @@ class BeatportClient: ) for item in response: if release_type == "release": + release = BeatportRelease(item) if details: - release = self.get_release(item["id"]) - else: - release = BeatportRelease(item) + release.tracks = self.get_release_tracks(item["id"]) yield release elif release_type == "track": yield BeatportTrack(item) - def get_release(self, beatport_id): + def get_release(self, beatport_id: str) -> BeatportRelease | None: """Get information about a single release. :param beatport_id: Beatport ID of the release :returns: The matching release - :rtype: :py:class:`BeatportRelease` """ response = self._get("/catalog/3/releases", id=beatport_id) if response: @@ -163,35 +172,33 @@ class BeatportClient: return release return None - def get_release_tracks(self, beatport_id): + def get_release_tracks(self, beatport_id: str) -> list[BeatportTrack]: """Get all tracks for a given release. :param beatport_id: Beatport ID of the release :returns: Tracks in the matching release - :rtype: list of :py:class:`BeatportTrack` """ response = self._get( "/catalog/3/tracks", releaseId=beatport_id, perPage=100 ) return [BeatportTrack(t) for t in response] - def get_track(self, beatport_id): + def get_track(self, beatport_id: str) -> BeatportTrack: """Get information about a single track. :param beatport_id: Beatport ID of the track :returns: The matching track - :rtype: :py:class:`BeatportTrack` """ response = self._get("/catalog/3/tracks", id=beatport_id) return BeatportTrack(response[0]) - def _make_url(self, endpoint): + def _make_url(self, endpoint: str) -> str: """Get complete URL for a given API endpoint.""" if not endpoint.startswith("/"): - endpoint = "/" + endpoint - return self._api_base + endpoint + endpoint = f"/{endpoint}" + return f"{self._api_base}{endpoint}" - def _get(self, endpoint, **kwargs): + def _get(self, endpoint: str, **kwargs) -> list[JSONDict]: """Perform a GET request on a given API endpoint. Automatically extracts result data from the response and converts HTTP @@ -200,60 +207,88 @@ class BeatportClient: try: response = self.api.get(self._make_url(endpoint), params=kwargs) except Exception as e: - raise BeatportAPIError( - "Error connecting to Beatport API: {}".format(e) - ) + raise BeatportAPIError(f"Error connecting to Beatport API: {e}") if not response: raise BeatportAPIError( - "Error {0.status_code} for '{0.request.path_url}".format( - response - ) + f"Error {response.status_code} for '{response.request.path_url}" ) return response.json()["results"] -class BeatportRelease(BeatportObject): - def __str__(self): - if len(self.artists) < 4: - artist_str = ", ".join(x[1] for x in self.artists) - else: - artist_str = "Various Artists" - return "".format( - artist_str, - self.name, - self.catalog_number, - ) +class BeatportObject: + beatport_id: str + name: str - def __repr__(self): - return str(self).encode("utf-8") + release_date: datetime | None = None - def __init__(self, data): - BeatportObject.__init__(self, data) - if "catalogNumber" in data: - self.catalog_number = data["catalogNumber"] - if "label" in data: - self.label_name = data["label"]["name"] - if "category" in data: - self.category = data["category"] - if "slug" in data: - self.url = "https://beatport.com/release/{}/{}".format( - data["slug"], data["id"] + artists: list[tuple[str, str]] | None = None + # tuple of artist id and artist name + + def __init__(self, data: JSONDict): + self.beatport_id = str(data["id"]) # given as int in the response + self.name = str(data["name"]) + if "releaseDate" in data: + self.release_date = datetime.strptime( + data["releaseDate"], "%Y-%m-%d" ) + if "artists" in data: + self.artists = [(x["id"], str(x["name"])) for x in data["artists"]] + if "genres" in data: + self.genres = [str(x["name"]) for x in data["genres"]] + + def artists_str(self) -> str | None: + if self.artists is not None: + if len(self.artists) < 4: + artist_str = ", ".join(x[1] for x in self.artists) + else: + artist_str = "Various Artists" + else: + artist_str = None + + return artist_str + + +class BeatportRelease(BeatportObject): + catalog_number: str | None + label_name: str | None + category: str | None + url: str | None + genre: str | None + + tracks: list[BeatportTrack] | None = None + + def __init__(self, data: JSONDict): + super().__init__(data) + + self.catalog_number = data.get("catalogNumber") + self.label_name = data.get("label", {}).get("name") + self.category = data.get("category") self.genre = data.get("genre") + if "slug" in data: + self.url = ( + f"https://beatport.com/release/{data['slug']}/{data['id']}" + ) + + def __str__(self) -> str: + return ( + "" + ) + class BeatportTrack(BeatportObject): - def __str__(self): - artist_str = ", ".join(x[1] for x in self.artists) - return "".format( - artist_str, self.name, self.mix_name - ) + title: str | None + mix_name: str | None + length: timedelta + url: str | None + track_number: int | None + bpm: str | None + initial_key: str | None + genre: str | None - def __repr__(self): - return str(self).encode("utf-8") - - def __init__(self, data): - BeatportObject.__init__(self, data) + def __init__(self, data: JSONDict): + super().__init__(data) if "title" in data: self.title = str(data["title"]) if "mixName" in data: @@ -266,9 +301,7 @@ class BeatportTrack(BeatportObject): except ValueError: pass if "slug" in data: - self.url = "https://beatport.com/track/{}/{}".format( - data["slug"], data["id"] - ) + self.url = f"https://beatport.com/track/{data['slug']}/{data['id']}" self.track_number = data.get("trackNumber") self.bpm = data.get("bpm") self.initial_key = str((data.get("key") or {}).get("shortName")) @@ -280,9 +313,8 @@ class BeatportTrack(BeatportObject): self.genre = str(data["genres"][0].get("name")) -class BeatportPlugin(BeetsPlugin): - data_source = "Beatport" - id_regex = beatport_id_regex +class BeatportPlugin(MetadataSourcePlugin): + _client: BeatportClient | None = None def __init__(self): super().__init__() @@ -291,17 +323,23 @@ class BeatportPlugin(BeetsPlugin): "apikey": "57713c3906af6f5def151b33601389176b37b429", "apisecret": "b3fe08c93c80aefd749fe871a16cd2bb32e2b954", "tokenfile": "beatport_token.json", - "source_weight": 0.5, } ) self.config["apikey"].redact = True self.config["apisecret"].redact = True - self.client = None self.register_listener("import_begin", self.setup) - def setup(self, session=None): - c_key = self.config["apikey"].as_str() - c_secret = self.config["apisecret"].as_str() + @property + def client(self) -> BeatportClient: + if self._client is None: + raise ValueError( + "Beatport client not initialized. Call setup() first." + ) + return self._client + + def setup(self, session: ImportSession): + c_key: str = self.config["apikey"].as_str() + c_secret: str = self.config["apisecret"].as_str() # Get the OAuth token from a file or log in. try: @@ -314,15 +352,15 @@ class BeatportPlugin(BeetsPlugin): token = tokendata["token"] secret = tokendata["secret"] - self.client = BeatportClient(c_key, c_secret, token, secret) + self._client = BeatportClient(c_key, c_secret, token, secret) - def authenticate(self, c_key, c_secret): + def authenticate(self, c_key: str, c_secret: str) -> tuple[str, str]: # Get the link for the OAuth page. auth_client = BeatportClient(c_key, c_secret) try: url = auth_client.get_authorize_url() except AUTH_ERRORS as e: - self._log.debug("authentication error: {0}", e) + self._log.debug("authentication error: {}", e) raise beets.ui.UserError("communication with Beatport failed") beets.ui.print_("To authenticate with Beatport, visit:") @@ -333,69 +371,54 @@ class BeatportPlugin(BeetsPlugin): try: token, secret = auth_client.get_access_token(data) except AUTH_ERRORS as e: - self._log.debug("authentication error: {0}", e) + self._log.debug("authentication error: {}", e) raise beets.ui.UserError("Beatport token request failed") # Save the token for later use. - self._log.debug("Beatport token {0}, secret {1}", token, secret) + self._log.debug("Beatport token {}, secret {}", token, secret) with open(self._tokenfile(), "w") as f: json.dump({"token": token, "secret": secret}, f) return token, secret - def _tokenfile(self): + def _tokenfile(self) -> str: """Get the path to the JSON file for storing the OAuth token.""" return self.config["tokenfile"].get(confuse.Filename(in_app_dir=True)) - def album_distance(self, items, album_info, mapping): - """Returns the Beatport source weight and the maximum source weight - for albums. - """ - return get_distance( - data_source=self.data_source, info=album_info, config=self.config - ) - - def track_distance(self, item, track_info): - """Returns the Beatport source weight and the maximum source weight - for individual tracks. - """ - return get_distance( - data_source=self.data_source, info=track_info, config=self.config - ) - - def candidates(self, items, artist, release, va_likely, extra_tags=None): - """Returns a list of AlbumInfo objects for beatport search results - matching release and artist (if not various). - """ + def candidates( + self, + items: Sequence[Item], + artist: str, + album: str, + va_likely: bool, + ) -> Iterator[AlbumInfo]: if va_likely: - query = release + query = album else: - query = f"{artist} {release}" + query = f"{artist} {album}" try: - return self._get_releases(query) + yield from self._get_releases(query) except BeatportAPIError as e: - self._log.debug("API Error: {0} (query: {1})", e, query) - return [] + self._log.debug("API Error: {} (query: {})", e, query) + return - def item_candidates(self, item, artist, title): - """Returns a list of TrackInfo objects for beatport search results - matching title and artist. - """ + def item_candidates( + self, item: Item, artist: str, title: str + ) -> Iterable[TrackInfo]: query = f"{artist} {title}" try: return self._get_tracks(query) except BeatportAPIError as e: - self._log.debug("API Error: {0} (query: {1})", e, query) + self._log.debug("API Error: {} (query: {})", e, query) return [] - def album_for_id(self, release_id): + def album_for_id(self, album_id: str): """Fetches a release by its Beatport ID and returns an AlbumInfo object or None if the query is not a valid ID or release is not found. """ - self._log.debug("Searching for release {0}", release_id) + self._log.debug("Searching for release {}", album_id) - release_id = self._get_id("album", release_id, self.id_regex) - if release_id is None: + if not (release_id := self._extract_id(album_id)): self._log.debug("Not a valid Beatport release ID.") return None @@ -404,11 +427,12 @@ class BeatportPlugin(BeetsPlugin): return self._get_album_info(release) return None - def track_for_id(self, track_id): + def track_for_id(self, track_id: str): """Fetches a track by its Beatport ID and returns a TrackInfo object or None if the track is not a valid Beatport ID or track is not found. """ - self._log.debug("Searching for track {0}", track_id) + self._log.debug("Searching for track {}", track_id) + # TODO: move to extractor match = re.search(r"(^|beatport\.com/track/.+/)(\d+)$", track_id) if not match: self._log.debug("Not a valid Beatport track ID.") @@ -418,7 +442,7 @@ class BeatportPlugin(BeetsPlugin): return self._get_track_info(bp_track) return None - def _get_releases(self, query): + def _get_releases(self, query: str) -> Iterator[AlbumInfo]: """Returns a list of AlbumInfo objects for a beatport search query.""" # Strip non-word characters from query. Things like "!" and "-" can # cause a query to return no results, even if they match the artist or @@ -428,16 +452,22 @@ class BeatportPlugin(BeetsPlugin): # Strip medium information from query, Things like "CD1" and "disk 1" # can also negate an otherwise positive result. query = re.sub(r"\b(CD|disc)\s*\d+", "", query, flags=re.I) - albums = [self._get_album_info(x) for x in self.client.search(query)] - return albums + for beatport_release in self.client.search(query, "release"): + if beatport_release is None: + continue + yield self._get_album_info(beatport_release) - def _get_album_info(self, release): + def _get_album_info(self, release: BeatportRelease) -> AlbumInfo: """Returns an AlbumInfo object for a Beatport Release object.""" - va = len(release.artists) > 3 + va = release.artists is not None and len(release.artists) > 3 artist, artist_id = self._get_artist(release.artists) if va: artist = "Various Artists" - tracks = [self._get_track_info(x) for x in release.tracks] + tracks: list[TrackInfo] = [] + if release.tracks is not None: + tracks = [self._get_track_info(x) for x in release.tracks] + + release_date = release.release_date return AlbumInfo( album=release.name, @@ -448,18 +478,18 @@ class BeatportPlugin(BeetsPlugin): tracks=tracks, albumtype=release.category, va=va, - year=release.release_date.year, - month=release.release_date.month, - day=release.release_date.day, label=release.label_name, catalognum=release.catalog_number, media="Digital", data_source=self.data_source, data_url=release.url, genre=release.genre, + year=release_date.year if release_date else None, + month=release_date.month if release_date else None, + day=release_date.day if release_date else None, ) - def _get_track_info(self, track): + def _get_track_info(self, track: BeatportTrack) -> TrackInfo: """Returns a TrackInfo object for a Beatport Track object.""" title = track.name if track.mix_name != "Original Mix": @@ -485,9 +515,7 @@ class BeatportPlugin(BeetsPlugin): """Returns an artist string (all artists) and an artist_id (the main artist) for a list of Beatport release or track artists. """ - return MetadataSourcePlugin.get_artist( - artists=artists, id_key=0, name_key=1 - ) + return self.get_artist(artists=artists, id_key=0, name_key=1) def _get_tracks(self, query): """Returns a list of TrackInfo objects for a Beatport query.""" diff --git a/beetsplug/bench.py b/beetsplug/bench.py index 62d512ce7..d77f1f92a 100644 --- a/beetsplug/bench.py +++ b/beetsplug/bench.py @@ -17,10 +17,11 @@ import cProfile import timeit -from beets import importer, library, plugins, ui, vfs +from beets import importer, library, plugins, ui from beets.autotag import match from beets.plugins import BeetsPlugin from beets.util.functemplate import Template +from beetsplug._utils import vfs def aunique_benchmark(lib, prof): @@ -125,7 +126,7 @@ class BenchmarkPlugin(BeetsPlugin): "-i", "--id", default=None, help="album ID to match against" ) match_bench_cmd.func = lambda lib, opts, args: match_benchmark( - lib, opts.profile, ui.decargs(args), opts.id + lib, opts.profile, args, opts.id ) return [aunique_bench_cmd, match_bench_cmd] diff --git a/beetsplug/bpd/__init__.py b/beetsplug/bpd/__init__.py index 9d8b4142b..0359259b7 100644 --- a/beetsplug/bpd/__init__.py +++ b/beetsplug/bpd/__init__.py @@ -30,18 +30,30 @@ from typing import TYPE_CHECKING import beets import beets.ui -from beets import dbcore, vfs +from beets import dbcore, logging from beets.library import Item from beets.plugins import BeetsPlugin -from beets.util import bluelet +from beets.util import as_string, bluelet +from beetsplug._utils import vfs if TYPE_CHECKING: from beets.dbcore.query import Query +log = logging.getLogger(__name__) + + +try: + from . import gstplayer +except ImportError as e: + raise ImportError( + "Gstreamer Python bindings not found." + ' Install "gstreamer1.0" and "python-gi" or similar package to use BPD.' + ) from e + PROTOCOL_VERSION = "0.16.0" BUFSIZE = 1024 -HELLO = "OK MPD %s" % PROTOCOL_VERSION +HELLO = f"OK MPD {PROTOCOL_VERSION}" CLIST_BEGIN = "command_list_begin" CLIST_VERBOSE_BEGIN = "command_list_ok_begin" CLIST_END = "command_list_end" @@ -94,11 +106,6 @@ SUBSYSTEMS = [ ] -# Gstreamer import error. -class NoGstreamerError(Exception): - pass - - # Error-handling, exceptions, parameter parsing. @@ -276,7 +283,7 @@ class BaseServer: if not self.ctrl_sock: self.ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.ctrl_sock.connect((self.ctrl_host, self.ctrl_port)) - self.ctrl_sock.sendall((message + "\n").encode("utf-8")) + self.ctrl_sock.sendall((f"{message}\n").encode()) def _send_event(self, event): """Notify subscribed connections of an event.""" @@ -370,13 +377,13 @@ class BaseServer: if self.password and not conn.authenticated: # Not authenticated. Show limited list of commands. for cmd in SAFE_COMMANDS: - yield "command: " + cmd + yield f"command: {cmd}" else: # Authenticated. Show all commands. for func in dir(self): if func.startswith("cmd_"): - yield "command: " + func[4:] + yield f"command: {func[4:]}" def cmd_notcommands(self, conn): """Lists all unavailable commands.""" @@ -386,7 +393,7 @@ class BaseServer: if func.startswith("cmd_"): cmd = func[4:] if cmd not in SAFE_COMMANDS: - yield "command: " + cmd + yield f"command: {cmd}" else: # Authenticated. No commands are unavailable. @@ -400,22 +407,22 @@ class BaseServer: playlist, playlistlength, and xfade. """ yield ( - "repeat: " + str(int(self.repeat)), - "random: " + str(int(self.random)), - "consume: " + str(int(self.consume)), - "single: " + str(int(self.single)), - "playlist: " + str(self.playlist_version), - "playlistlength: " + str(len(self.playlist)), - "mixrampdb: " + str(self.mixrampdb), + f"repeat: {int(self.repeat)}", + f"random: {int(self.random)}", + f"consume: {int(self.consume)}", + f"single: {int(self.single)}", + f"playlist: {self.playlist_version}", + f"playlistlength: {len(self.playlist)}", + f"mixrampdb: {self.mixrampdb}", ) if self.volume > 0: - yield "volume: " + str(self.volume) + yield f"volume: {self.volume}" if not math.isnan(self.mixrampdelay): - yield "mixrampdelay: " + str(self.mixrampdelay) + yield f"mixrampdelay: {self.mixrampdelay}" if self.crossfade > 0: - yield "xfade: " + str(self.crossfade) + yield f"xfade: {self.crossfade}" if self.current_index == -1: state = "stop" @@ -423,20 +430,20 @@ class BaseServer: state = "pause" else: state = "play" - yield "state: " + state + yield f"state: {state}" if self.current_index != -1: # i.e., paused or playing current_id = self._item_id(self.playlist[self.current_index]) - yield "song: " + str(self.current_index) - yield "songid: " + str(current_id) + yield f"song: {self.current_index}" + yield f"songid: {current_id}" if len(self.playlist) > self.current_index + 1: # If there's a next song, report its index too. next_id = self._item_id(self.playlist[self.current_index + 1]) - yield "nextsong: " + str(self.current_index + 1) - yield "nextsongid: " + str(next_id) + yield f"nextsong: {self.current_index + 1}" + yield f"nextsongid: {next_id}" if self.error: - yield "error: " + self.error + yield f"error: {self.error}" def cmd_clearerror(self, conn): """Removes the persistent error state of the server. This @@ -516,7 +523,7 @@ class BaseServer: def cmd_replay_gain_status(self, conn): """Get the replaygain mode.""" - yield "replay_gain_mode: " + str(self.replay_gain_mode) + yield f"replay_gain_mode: {self.replay_gain_mode}" def cmd_clear(self, conn): """Clear the playlist.""" @@ -637,8 +644,8 @@ class BaseServer: Also a dummy implementation. """ for idx, track in enumerate(self.playlist): - yield "cpos: " + str(idx) - yield "Id: " + str(track.id) + yield f"cpos: {idx}" + yield f"Id: {track.id}" def cmd_currentsong(self, conn): """Sends information about the currently-playing song.""" @@ -753,11 +760,11 @@ class Connection: """Create a new connection for the accepted socket `client`.""" self.server = server self.sock = sock - self.address = "{}:{}".format(*sock.sock.getpeername()) + self.address = ":".join(map(str, sock.sock.getpeername())) def debug(self, message, kind=" "): """Log a debug message about this connection.""" - self.server._log.debug("{}[{}]: {}", kind, self.address, message) + self.server._log.debug("{}[{.address}]: {}", kind, self, message) def run(self): pass @@ -893,9 +900,7 @@ class MPDConnection(Connection): return except BPDIdleError as e: self.idle_subscriptions = e.subsystems - self.debug( - "awaiting: {}".format(" ".join(e.subsystems)), kind="z" - ) + self.debug(f"awaiting: {' '.join(e.subsystems)}", kind="z") yield bluelet.call(self.server.dispatch_events()) @@ -907,7 +912,7 @@ class ControlConnection(Connection): super().__init__(server, sock) def debug(self, message, kind=" "): - self.server._log.debug("CTRL {}[{}]: {}", kind, self.address, message) + self.server._log.debug("CTRL {}[{.address}]: {}", kind, self, message) def run(self): """Listen for control commands and delegate to `ctrl_*` methods.""" @@ -927,7 +932,7 @@ class ControlConnection(Connection): func = command.delegate("ctrl_", self) yield bluelet.call(func(*command.args)) except (AttributeError, TypeError) as e: - yield self.send("ERROR: {}".format(e.args[0])) + yield self.send(f"ERROR: {e.args[0]}") except Exception: yield self.send( ["ERROR: server error", traceback.format_exc().rstrip()] @@ -986,7 +991,7 @@ class Command: of arguments. """ # Attempt to get correct command function. - func_name = prefix + self.name + func_name = f"{prefix}{self.name}" if not hasattr(target, func_name): raise AttributeError(f'unknown command "{self.name}"') func = getattr(target, func_name) @@ -1005,7 +1010,7 @@ class Command: # If the command accepts a variable number of arguments skip the check. if wrong_num and not argspec.varargs: raise TypeError( - 'wrong number of arguments for "{}"'.format(self.name), + f'wrong number of arguments for "{self.name}"', self.name, ) @@ -1099,23 +1104,13 @@ class Server(BaseServer): """ def __init__(self, library, host, port, password, ctrl_port, log): - try: - from beetsplug.bpd import gstplayer - except ImportError as e: - # This is a little hacky, but it's the best I know for now. - if e.args[0].endswith(" gst"): - raise NoGstreamerError() - else: - raise log.info("Starting server...") super().__init__(host, port, password, ctrl_port, log) self.lib = library self.player = gstplayer.GstPlayer(self.play_finished) self.cmd_update(None) - log.info("Server ready and listening on {}:{}".format(host, port)) - log.debug( - "Listening for control signals on {}:{}".format(host, ctrl_port) - ) + log.info("Server ready and listening on {}:{}", host, port) + log.debug("Listening for control signals on {}:{}", host, ctrl_port) def run(self): self.player.run() @@ -1130,23 +1125,21 @@ class Server(BaseServer): def _item_info(self, item): info_lines = [ - "file: " + item.destination(fragment=True), - "Time: " + str(int(item.length)), - "duration: " + f"{item.length:.3f}", - "Id: " + str(item.id), + f"file: {as_string(item.destination(relative_to_libdir=True))}", + f"Time: {int(item.length)}", + "duration: {item.length:.3f}", + f"Id: {item.id}", ] try: pos = self._id_to_index(item.id) - info_lines.append("Pos: " + str(pos)) + info_lines.append(f"Pos: {pos}") except ArgumentNotFoundError: # Don't include position if not in playlist. pass for tagtype, field in self.tagtype_map.items(): - info_lines.append( - "{}: {}".format(tagtype, str(getattr(item, field))) - ) + info_lines.append(f"{tagtype}: {getattr(item, field)}") return info_lines @@ -1209,7 +1202,7 @@ class Server(BaseServer): def _path_join(self, p1, p2): """Smashes together two BPD paths.""" - out = p1 + "/" + p2 + out = f"{p1}/{p2}" return out.replace("//", "/").replace("//", "/") def cmd_lsinfo(self, conn, path="/"): @@ -1227,7 +1220,7 @@ class Server(BaseServer): if dirpath.startswith("/"): # Strip leading slash (libmpc rejects this). dirpath = dirpath[1:] - yield "directory: %s" % dirpath + yield f"directory: {dirpath}" def _listall(self, basepath, node, info=False): """Helper function for recursive listing. If info, show @@ -1239,7 +1232,7 @@ class Server(BaseServer): item = self.lib.get_item(node) yield self._item_info(item) else: - yield "file: " + basepath + yield f"file: {basepath}" else: # List a directory. Recurse into both directories and files. for name, itemid in sorted(node.files.items()): @@ -1248,7 +1241,7 @@ class Server(BaseServer): yield from self._listall(newpath, itemid, info) for name, subdir in sorted(node.dirs.items()): newpath = self._path_join(basepath, name) - yield "directory: " + newpath + yield f"directory: {newpath}" yield from self._listall(newpath, subdir, info) def cmd_listall(self, conn, path="/"): @@ -1282,7 +1275,7 @@ class Server(BaseServer): for item in self._all_items(self._resolve_path(path)): self.playlist.append(item) if send_id: - yield "Id: " + str(item.id) + yield f"Id: {item.id}" self.playlist_version += 1 self._send_event("playlist") @@ -1304,20 +1297,13 @@ class Server(BaseServer): item = self.playlist[self.current_index] yield ( - "bitrate: " + str(item.bitrate / 1000), - "audio: {}:{}:{}".format( - str(item.samplerate), - str(item.bitdepth), - str(item.channels), - ), + f"bitrate: {item.bitrate / 1000}", + f"audio: {item.samplerate}:{item.bitdepth}:{item.channels}", ) (pos, total) = self.player.time() yield ( - "time: {}:{}".format( - str(int(pos)), - str(int(total)), - ), + f"time: {int(pos)}:{int(total)}", "elapsed: " + f"{pos:.3f}", "duration: " + f"{total:.3f}", ) @@ -1337,13 +1323,13 @@ class Server(BaseServer): artists, albums, songs, totaltime = tx.query(statement)[0] yield ( - "artists: " + str(artists), - "albums: " + str(albums), - "songs: " + str(songs), - "uptime: " + str(int(time.time() - self.startup_time)), - "playtime: " + "0", # Missing. - "db_playtime: " + str(int(totaltime)), - "db_update: " + str(int(self.updated_time)), + f"artists: {artists}", + f"albums: {albums}", + f"songs: {songs}", + f"uptime: {int(time.time() - self.startup_time)}", + "playtime: 0", # Missing. + f"db_playtime: {int(totaltime)}", + f"db_update: {int(self.updated_time)}", ) def cmd_decoders(self, conn): @@ -1385,7 +1371,7 @@ class Server(BaseServer): searching. """ for tag in self.tagtype_map: - yield "tagtype: " + tag + yield f"tagtype: {tag}" def _tagtype_lookup(self, tag): """Uses `tagtype_map` to look up the beets column name for an @@ -1460,12 +1446,9 @@ class Server(BaseServer): clause, subvals = query.clause() statement = ( - "SELECT DISTINCT " - + show_key - + " FROM items WHERE " - + clause - + " ORDER BY " - + show_key + f"SELECT DISTINCT {show_key}" + f" FROM items WHERE {clause}" + f" ORDER BY {show_key}" ) self._log.debug(statement) with self.lib.transaction() as tx: @@ -1475,7 +1458,7 @@ class Server(BaseServer): if not row[0]: # Skip any empty values of the field. continue - yield show_tag_canon + ": " + str(row[0]) + yield f"{show_tag_canon}: {row[0]}" def cmd_count(self, conn, tag, value): """Returns the number and total time of songs matching the @@ -1489,8 +1472,8 @@ class Server(BaseServer): ): songs += 1 playtime += item.length - yield "songs: " + str(songs) - yield "playtime: " + str(int(playtime)) + yield f"songs: {songs}" + yield f"playtime: {int(playtime)}" # Persistent playlist manipulation. In MPD this is an optional feature so # these dummy implementations match MPD's behaviour with the feature off. @@ -1616,16 +1599,9 @@ class BPDPlugin(BeetsPlugin): def start_bpd(self, lib, host, port, password, volume, ctrl_port): """Starts a BPD server.""" - try: - server = Server(lib, host, port, password, ctrl_port, self._log) - server.cmd_setvol(None, volume) - server.run() - except NoGstreamerError: - self._log.error("Gstreamer Python bindings not found.") - self._log.error( - 'Install "gstreamer1.0" and "python-gi"' - "or similar package to use BPD." - ) + server = Server(lib, host, port, password, ctrl_port, self._log) + server.cmd_setvol(None, volume) + server.run() def commands(self): cmd = beets.ui.Subcommand( diff --git a/beetsplug/bpd/gstplayer.py b/beetsplug/bpd/gstplayer.py index 03fb179aa..f356b3066 100644 --- a/beetsplug/bpd/gstplayer.py +++ b/beetsplug/bpd/gstplayer.py @@ -27,7 +27,16 @@ import gi from beets import ui -gi.require_version("Gst", "1.0") +try: + gi.require_version("Gst", "1.0") +except ValueError as e: + # on some scenarios, gi may be importable, but we get a ValueError when + # trying to specify the required version. This is problematic in the test + # suite where test_bpd.py has a call to + # pytest.importorskip("beetsplug.bpd"). Re-raising as an ImportError + # makes it so the test collector functions as inteded. + raise ImportError from e + from gi.repository import GLib, Gst # noqa: E402 Gst.init(None) @@ -129,7 +138,7 @@ class GstPlayer: self.player.set_state(Gst.State.NULL) if isinstance(path, str): path = path.encode("utf-8") - uri = "file://" + urllib.parse.quote(path) + uri = f"file://{urllib.parse.quote(path)}" self.player.set_property("uri", uri) self.player.set_state(Gst.State.PLAYING) self.playing = True diff --git a/beetsplug/bpm.py b/beetsplug/bpm.py index 10edfbfd7..d49963b72 100644 --- a/beetsplug/bpm.py +++ b/beetsplug/bpm.py @@ -57,15 +57,14 @@ class BPMPlugin(BeetsPlugin): def commands(self): cmd = ui.Subcommand( "bpm", - help="determine bpm of a song by pressing " "a key to the rhythm", + help="determine bpm of a song by pressing a key to the rhythm", ) cmd.func = self.command return [cmd] def command(self, lib, opts, args): - items = lib.items(ui.decargs(args)) write = ui.should_write() - self.get_bpm(items, write) + self.get_bpm(lib.items(args), write) def get_bpm(self, items, write=False): overwrite = self.config["overwrite"].get(bool) @@ -74,12 +73,12 @@ class BPMPlugin(BeetsPlugin): item = items[0] if item["bpm"]: - self._log.info("Found bpm {0}", item["bpm"]) + self._log.info("Found bpm {}", item["bpm"]) if not overwrite: return self._log.info( - "Press Enter {0} times to the rhythm or Ctrl-D " "to exit", + "Press Enter {} times to the rhythm or Ctrl-D to exit", self.config["max_strokes"].get(int), ) new_bpm = bpm(self.config["max_strokes"].get(int)) @@ -87,4 +86,4 @@ class BPMPlugin(BeetsPlugin): if write: item.try_write() item.store() - self._log.info("Added new bpm {0}", item["bpm"]) + self._log.info("Added new bpm {}", item["bpm"]) diff --git a/beetsplug/bpsync.py b/beetsplug/bpsync.py index 05be94c99..fbdf8cc70 100644 --- a/beetsplug/bpsync.py +++ b/beetsplug/bpsync.py @@ -65,10 +65,9 @@ class BPSyncPlugin(BeetsPlugin): move = ui.should_move(opts.move) pretend = opts.pretend write = ui.should_write(opts.write) - query = ui.decargs(args) - self.singletons(lib, query, move, pretend, write) - self.albums(lib, query, move, pretend, write) + self.singletons(lib, args, move, pretend, write) + self.albums(lib, args, move, pretend, write) def singletons(self, lib, query, move, pretend, write): """Retrieve and apply info from the autotagger for items matched by @@ -83,8 +82,8 @@ class BPSyncPlugin(BeetsPlugin): if not self.is_beatport_track(item): self._log.info( - "Skipping non-{} singleton: {}", - self.beatport_plugin.data_source, + "Skipping non-{.beatport_plugin.data_source} singleton: {}", + self, item, ) continue @@ -108,8 +107,8 @@ class BPSyncPlugin(BeetsPlugin): return False if not album.mb_albumid.isnumeric(): self._log.info( - "Skipping album with invalid {} ID: {}", - self.beatport_plugin.data_source, + "Skipping album with invalid {.beatport_plugin.data_source} ID: {}", + self, album, ) return False @@ -118,8 +117,8 @@ class BPSyncPlugin(BeetsPlugin): return items if not all(self.is_beatport_track(item) for item in items): self._log.info( - "Skipping non-{} release: {}", - self.beatport_plugin.data_source, + "Skipping non-{.beatport_plugin.data_source} release: {}", + self, album, ) return False @@ -140,9 +139,7 @@ class BPSyncPlugin(BeetsPlugin): albuminfo = self.beatport_plugin.album_for_id(album.mb_albumid) if not albuminfo: self._log.info( - "Release ID {} not found for album {}", - album.mb_albumid, - album, + "Release ID {0.mb_albumid} not found for album {0}", album ) continue @@ -152,14 +149,14 @@ class BPSyncPlugin(BeetsPlugin): library_trackid_to_item = { int(item.mb_trackid): item for item in items } - item_to_trackinfo = { - item: beatport_trackid_to_trackinfo[track_id] + item_info_pairs = [ + (item, beatport_trackid_to_trackinfo[track_id]) for track_id, item in library_trackid_to_item.items() - } + ] self._log.info("applying changes to {}", album) with lib.transaction(): - autotag.apply_metadata(albuminfo, item_to_trackinfo) + autotag.apply_metadata(albuminfo, item_info_pairs) changed = False # Find any changed item to apply Beatport changes to album. any_changed_item = items[0] diff --git a/beetsplug/bucket.py b/beetsplug/bucket.py index 9246539fc..40369f74a 100644 --- a/beetsplug/bucket.py +++ b/beetsplug/bucket.py @@ -41,7 +41,7 @@ def span_from_str(span_str): def normalize_year(d, yearfrom): """Convert string to a 4 digits year""" if yearfrom < 100: - raise BucketError("%d must be expressed on 4 digits" % yearfrom) + raise BucketError(f"{yearfrom} must be expressed on 4 digits") # if two digits only, pick closest year that ends by these two # digits starting from yearfrom @@ -55,14 +55,13 @@ def span_from_str(span_str): years = [int(x) for x in re.findall(r"\d+", span_str)] if not years: raise ui.UserError( - "invalid range defined for year bucket '%s': no " - "year found" % span_str + f"invalid range defined for year bucket {span_str!r}: no year found" ) try: years = [normalize_year(x, years[0]) for x in years] except BucketError as exc: raise ui.UserError( - "invalid range defined for year bucket '%s': %s" % (span_str, exc) + f"invalid range defined for year bucket {span_str!r}: {exc}" ) res = {"from": years[0], "str": span_str} @@ -125,22 +124,19 @@ def str2fmt(s): "fromnchars": len(m.group("fromyear")), "tonchars": len(m.group("toyear")), } - res["fmt"] = "{}%s{}{}{}".format( - m.group("bef"), - m.group("sep"), - "%s" if res["tonchars"] else "", - m.group("after"), + res["fmt"] = ( + f"{m['bef']}{{}}{m['sep']}{'{}' if res['tonchars'] else ''}{m['after']}" ) return res def format_span(fmt, yearfrom, yearto, fromnchars, tonchars): """Return a span string representation.""" - args = str(yearfrom)[-fromnchars:] + args = [str(yearfrom)[-fromnchars:]] if tonchars: - args = (str(yearfrom)[-fromnchars:], str(yearto)[-tonchars:]) + args.append(str(yearto)[-tonchars:]) - return fmt % args + return fmt.format(*args) def extract_modes(spans): @@ -169,14 +165,12 @@ def build_alpha_spans(alpha_spans_str, alpha_regexs): else: raise ui.UserError( "invalid range defined for alpha bucket " - "'%s': no alphanumeric character found" % elem + f"'{elem}': no alphanumeric character found" ) spans.append( re.compile( - "^[" - + ASCII_DIGITS[begin_index : end_index + 1] - + ASCII_DIGITS[begin_index : end_index + 1].upper() - + "]" + rf"^[{ASCII_DIGITS[begin_index : end_index + 1]}]", + re.IGNORECASE, ) ) return spans diff --git a/beetsplug/chroma.py b/beetsplug/chroma.py index 369a3cc73..1e9835789 100644 --- a/beetsplug/chroma.py +++ b/beetsplug/chroma.py @@ -18,13 +18,17 @@ autotagger. Requires the pyacoustid library. import re from collections import defaultdict -from functools import partial +from collections.abc import Iterable +from functools import cached_property, partial import acoustid import confuse -from beets import config, plugins, ui, util -from beets.autotag import hooks +from beets import config, ui, util +from beets.autotag.distance import Distance +from beets.autotag.hooks import TrackInfo +from beets.metadata_plugins import MetadataSourcePlugin +from beetsplug.musicbrainz import MusicBrainzPlugin API_KEY = "1vOwZtEn" SCORE_THRESH = 0.5 @@ -86,7 +90,7 @@ def acoustid_match(log, path): duration, fp = acoustid.fingerprint_file(util.syspath(path)) except acoustid.FingerprintGenerationError as exc: log.error( - "fingerprinting of {0} failed: {1}", + "fingerprinting of {} failed: {}", util.displayable_path(repr(path)), exc, ) @@ -94,15 +98,17 @@ def acoustid_match(log, path): fp = fp.decode() _fingerprints[path] = fp try: - res = acoustid.lookup(API_KEY, fp, duration, meta="recordings releases") + res = acoustid.lookup( + API_KEY, fp, duration, meta="recordings releases", timeout=10 + ) except acoustid.AcoustidError as exc: log.debug( - "fingerprint matching {0} failed: {1}", + "fingerprint matching {} failed: {}", util.displayable_path(repr(path)), exc, ) return None - log.debug("chroma: fingerprinted {0}", util.displayable_path(repr(path))) + log.debug("chroma: fingerprinted {}", util.displayable_path(repr(path))) # Ensure the response is usable and parse it. if res["status"] != "ok" or not res.get("results"): @@ -140,7 +146,7 @@ def acoustid_match(log, path): release_ids = [rel["id"] for rel in releases] log.debug( - "matched recordings {0} on releases {1}", recording_ids, release_ids + "matched recordings {} on releases {}", recording_ids, release_ids ) _matches[path] = recording_ids, release_ids @@ -167,10 +173,9 @@ def _all_releases(items): yield release_id -class AcoustidPlugin(plugins.BeetsPlugin): +class AcoustidPlugin(MetadataSourcePlugin): def __init__(self): super().__init__() - self.config.add( { "auto": True, @@ -182,11 +187,15 @@ class AcoustidPlugin(plugins.BeetsPlugin): self.register_listener("import_task_start", self.fingerprint_task) self.register_listener("import_task_apply", apply_acoustid_metadata) + @cached_property + def mb(self) -> MusicBrainzPlugin: + return MusicBrainzPlugin() + def fingerprint_task(self, task, session): return fingerprint_task(self._log, task, session) def track_distance(self, item, info): - dist = hooks.Distance() + dist = Distance() if item.path not in _matches or not info.track_id: # Match failed or no track ID. return dist @@ -195,29 +204,37 @@ class AcoustidPlugin(plugins.BeetsPlugin): dist.add_expr("track_id", info.track_id not in recording_ids) return dist - def candidates(self, items, artist, album, va_likely, extra_tags=None): + def candidates(self, items, artist, album, va_likely): albums = [] for relid in prefix(_all_releases(items), MAX_RELEASES): - album = hooks.album_for_mbid(relid) + album = self.mb.album_for_id(relid) if album: albums.append(album) - self._log.debug("acoustid album candidates: {0}", len(albums)) + self._log.debug("acoustid album candidates: {}", len(albums)) return albums - def item_candidates(self, item, artist, title): + def item_candidates(self, item, artist, title) -> Iterable[TrackInfo]: if item.path not in _matches: return [] recording_ids, _ = _matches[item.path] tracks = [] for recording_id in prefix(recording_ids, MAX_RECORDINGS): - track = hooks.track_for_mbid(recording_id) + track = self.mb.track_for_id(recording_id) if track: tracks.append(track) - self._log.debug("acoustid item candidates: {0}", len(tracks)) + self._log.debug("acoustid item candidates: {}", len(tracks)) return tracks + def album_for_id(self, *args, **kwargs): + # Lookup by fingerprint ID does not make too much sense. + return None + + def track_for_id(self, *args, **kwargs): + # Lookup by fingerprint ID does not make too much sense. + return None + def commands(self): submit_cmd = ui.Subcommand( "submit", help="submit Acoustid fingerprints" @@ -228,7 +245,7 @@ class AcoustidPlugin(plugins.BeetsPlugin): apikey = config["acoustid"]["apikey"].as_str() except confuse.NotFoundError: raise ui.UserError("no Acoustid user API key provided") - submit_items(self._log, apikey, lib.items(ui.decargs(args))) + submit_items(self._log, apikey, lib.items(args)) submit_cmd.func = submit_cmd_func @@ -237,7 +254,7 @@ class AcoustidPlugin(plugins.BeetsPlugin): ) def fingerprint_cmd_func(lib, opts, args): - for item in lib.items(ui.decargs(args)): + for item in lib.items(args): fingerprint_item(self._log, item, write=ui.should_write()) fingerprint_cmd.func = fingerprint_cmd_func @@ -275,11 +292,11 @@ def submit_items(log, userkey, items, chunksize=64): def submit_chunk(): """Submit the current accumulated fingerprint data.""" - log.info("submitting {0} fingerprints", len(data)) + log.info("submitting {} fingerprints", len(data)) try: - acoustid.submit(API_KEY, userkey, data) + acoustid.submit(API_KEY, userkey, data, timeout=10) except acoustid.AcoustidError as exc: - log.warning("acoustid submission error: {0}", exc) + log.warning("acoustid submission error: {}", exc) del data[:] for item in items: @@ -326,31 +343,23 @@ def fingerprint_item(log, item, write=False): """ # Get a fingerprint and length for this track. if not item.length: - log.info("{0}: no duration available", util.displayable_path(item.path)) + log.info("{.filepath}: no duration available", item) elif item.acoustid_fingerprint: if write: - log.info( - "{0}: fingerprint exists, skipping", - util.displayable_path(item.path), - ) + log.info("{.filepath}: fingerprint exists, skipping", item) else: - log.info( - "{0}: using existing fingerprint", - util.displayable_path(item.path), - ) + log.info("{.filepath}: using existing fingerprint", item) return item.acoustid_fingerprint else: - log.info("{0}: fingerprinting", util.displayable_path(item.path)) + log.info("{.filepath}: fingerprinting", item) try: _, fp = acoustid.fingerprint_file(util.syspath(item.path)) item.acoustid_fingerprint = fp.decode() if write: - log.info( - "{0}: writing fingerprint", util.displayable_path(item.path) - ) + log.info("{.filepath}: writing fingerprint", item) item.try_write() if item._db: item.store() return item.acoustid_fingerprint except acoustid.FingerprintGenerationError as exc: - log.info("fingerprint generation failed: {0}", exc) + log.info("fingerprint generation failed: {}", exc) diff --git a/beetsplug/convert.py b/beetsplug/convert.py index a8c32e955..74ced8ae3 100644 --- a/beetsplug/convert.py +++ b/beetsplug/convert.py @@ -25,12 +25,13 @@ from string import Template import mediafile from confuse import ConfigTypeError, Optional -from beets import art, config, plugins, ui, util +from beets import config, plugins, ui, util from beets.library import Item, parse_query_string from beets.plugins import BeetsPlugin from beets.util import par_map from beets.util.artresizer import ArtResizer from beets.util.m3u import M3UFile +from beetsplug._utils import art _fs_lock = threading.Lock() _temp_files = [] # Keep track of temporary transcoded files for deletion. @@ -64,9 +65,7 @@ def get_format(fmt=None): command = format_info["command"] extension = format_info.get("extension", fmt) except KeyError: - raise ui.UserError( - 'convert: format {} needs the "command" field'.format(fmt) - ) + raise ui.UserError(f'convert: format {fmt} needs the "command" field') except ConfigTypeError: command = config["convert"]["formats"][fmt].get(str) extension = fmt @@ -77,8 +76,8 @@ def get_format(fmt=None): command = config["convert"]["command"].as_str() elif "opts" in keys: # Undocumented option for backwards compatibility with < 1.3.1. - command = "ffmpeg -i $source -y {} $dest".format( - config["convert"]["opts"].as_str() + command = ( + f"ffmpeg -i $source -y {config['convert']['opts'].as_str()} $dest" ) if "extension" in keys: extension = config["convert"]["extension"].as_str() @@ -96,12 +95,18 @@ def in_no_convert(item: Item) -> bool: return False -def should_transcode(item, fmt): +def should_transcode(item, fmt, force: bool = False): """Determine whether the item should be transcoded as part of conversion (i.e., its bitrate is high or it has the wrong format). + + If ``force`` is True, safety checks like ``no_convert`` and + ``never_convert_lossy_files`` are ignored and the item is always + transcoded. """ + if force: + return True if in_no_convert(item) or ( - config["convert"]["never_convert_lossy_files"] + config["convert"]["never_convert_lossy_files"].get(bool) and item.format.lower() not in LOSSLESS_FORMATS ): return False @@ -123,20 +128,28 @@ class ConvertPlugin(BeetsPlugin): "threads": os.cpu_count(), "format": "mp3", "id3v23": "inherit", + "write_metadata": True, "formats": { "aac": { - "command": "ffmpeg -i $source -y -vn -acodec aac " - "-aq 1 $dest", + "command": ( + "ffmpeg -i $source -y -vn -acodec aac -aq 1 $dest" + ), "extension": "m4a", }, "alac": { - "command": "ffmpeg -i $source -y -vn -acodec alac $dest", + "command": ( + "ffmpeg -i $source -y -vn -acodec alac $dest" + ), "extension": "m4a", }, "flac": "ffmpeg -i $source -y -vn -acodec flac $dest", "mp3": "ffmpeg -i $source -y -vn -aq 2 $dest", - "opus": "ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest", - "ogg": "ffmpeg -i $source -y -vn -acodec libvorbis -aq 3 $dest", + "opus": ( + "ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest" + ), + "ogg": ( + "ffmpeg -i $source -y -vn -acodec libvorbis -aq 3 $dest" + ), "wma": "ffmpeg -i $source -y -vn -acodec wmav2 -vn $dest", }, "max_bitrate": None, @@ -171,16 +184,17 @@ class ConvertPlugin(BeetsPlugin): "--threads", action="store", type="int", - help="change the number of threads, \ - defaults to maximum available processors", + help=( + "change the number of threads, defaults to maximum available" + " processors" + ), ) cmd.parser.add_option( "-k", "--keep-new", action="store_true", dest="keep_new", - help="keep only the converted \ - and move the old files", + help="keep only the converted and move the old files", ) cmd.parser.add_option( "-d", "--dest", action="store", help="set the destination directory" @@ -204,16 +218,16 @@ class ConvertPlugin(BeetsPlugin): "--link", action="store_true", dest="link", - help="symlink files that do not \ - need transcoding.", + help="symlink files that do not need transcoding.", ) cmd.parser.add_option( "-H", "--hardlink", action="store_true", dest="hardlink", - help="hardlink files that do not \ - need transcoding. Overrides --link.", + help=( + "hardlink files that do not need transcoding. Overrides --link." + ), ) cmd.parser.add_option( "-m", @@ -228,6 +242,16 @@ class ConvertPlugin(BeetsPlugin): drive, relative paths pointing to media files will be used.""", ) + cmd.parser.add_option( + "-F", + "--force", + action="store_true", + dest="force", + help=( + "force transcoding. Ignores no_convert, " + "never_convert_lossy_files, and max_bitrate" + ), + ) cmd.parser.add_album_option() cmd.func = self.convert_func return [cmd] @@ -251,6 +275,7 @@ class ConvertPlugin(BeetsPlugin): hardlink, link, playlist, + force, ) = self._get_opts_and_config(empty_opts) items = task.imported_items() @@ -264,6 +289,7 @@ class ConvertPlugin(BeetsPlugin): hardlink, threads, items, + force, ) # Utilities converted from functions to methods on logging overhaul @@ -282,7 +308,7 @@ class ConvertPlugin(BeetsPlugin): quiet = self.config["quiet"].get(bool) if not quiet and not pretend: - self._log.info("Encoding {0}", util.displayable_path(source)) + self._log.info("Encoding {}", util.displayable_path(source)) command = os.fsdecode(command) source = os.fsdecode(source) @@ -301,7 +327,7 @@ class ConvertPlugin(BeetsPlugin): encode_cmd.append(os.fsdecode(args[i])) if pretend: - self._log.info("{0}", " ".join(ui.decargs(args))) + self._log.info("{}", " ".join(args)) return try: @@ -309,28 +335,25 @@ class ConvertPlugin(BeetsPlugin): except subprocess.CalledProcessError as exc: # Something went wrong (probably Ctrl+C), remove temporary files self._log.info( - "Encoding {0} failed. Cleaning up...", + "Encoding {} failed. Cleaning up...", util.displayable_path(source), ) self._log.debug( - "Command {0} exited with status {1}: {2}", + "Command {0} exited with status {1.returncode}: {1.output}", args, - exc.returncode, - exc.output, + exc, ) util.remove(dest) util.prune_dirs(os.path.dirname(dest)) raise except OSError as exc: raise ui.UserError( - "convert: couldn't invoke '{}': {}".format( - " ".join(ui.decargs(args)), exc - ) + f"convert: couldn't invoke {' '.join(args)!r}: {exc}" ) if not quiet and not pretend: self._log.info( - "Finished encoding {0}", util.displayable_path(source) + "Finished encoding {}", util.displayable_path(source) ) def convert_item( @@ -342,6 +365,7 @@ class ConvertPlugin(BeetsPlugin): pretend=False, link=False, hardlink=False, + force=False, ): """A pipeline thread that converts `Item` objects from a library. @@ -358,7 +382,7 @@ class ConvertPlugin(BeetsPlugin): try: mediafile.MediaFile(util.syspath(item.path)) except mediafile.UnreadableFileError as exc: - self._log.error("Could not open file to convert: {0}", exc) + self._log.error("Could not open file to convert: {}", exc) continue # When keeping the new file in the library, we first move the @@ -367,11 +391,11 @@ class ConvertPlugin(BeetsPlugin): if keep_new: original = dest converted = item.path - if should_transcode(item, fmt): + if should_transcode(item, fmt, force): converted = replace_ext(converted, ext) else: original = item.path - if should_transcode(item, fmt): + if should_transcode(item, fmt, force): dest = replace_ext(dest, ext) converted = dest @@ -384,25 +408,24 @@ class ConvertPlugin(BeetsPlugin): if os.path.exists(util.syspath(dest)): self._log.info( - "Skipping {0} (target file exists)", - util.displayable_path(item.path), + "Skipping {.filepath} (target file exists)", item ) continue if keep_new: if pretend: self._log.info( - "mv {0} {1}", - util.displayable_path(item.path), + "mv {.filepath} {}", + item, util.displayable_path(original), ) else: self._log.info( - "Moving to {0}", util.displayable_path(original) + "Moving to {}", util.displayable_path(original) ) util.move(item.path, original) - if should_transcode(item, fmt): + if should_transcode(item, fmt, force): linked = False try: self.encode(command, original, converted, pretend) @@ -414,10 +437,10 @@ class ConvertPlugin(BeetsPlugin): msg = "ln" if hardlink else ("ln -s" if link else "cp") self._log.info( - "{2} {0} {1}", + "{} {} {}", + msg, util.displayable_path(original), util.displayable_path(converted), - msg, ) else: # No transcoding necessary. @@ -427,9 +450,7 @@ class ConvertPlugin(BeetsPlugin): else ("Linking" if link else "Copying") ) - self._log.info( - "{1} {0}", util.displayable_path(item.path), msg - ) + self._log.info("{} {.filepath}", msg, item) if hardlink: util.hardlink(original, converted) @@ -445,8 +466,9 @@ class ConvertPlugin(BeetsPlugin): if id3v23 == "inherit": id3v23 = None - # Write tags from the database to the converted file. - item.try_write(path=converted, id3v23=id3v23) + # Write tags from the database to the file if requested + if self.config["write_metadata"].get(bool): + item.try_write(path=converted, id3v23=id3v23) if keep_new: # If we're keeping the transcoded file, read it again (after @@ -460,8 +482,7 @@ class ConvertPlugin(BeetsPlugin): if album and album.artpath: maxwidth = self._get_art_resize(album.artpath) self._log.debug( - "embedding album art from {}", - util.displayable_path(album.artpath), + "embedding album art from {.art_filepath}", album ) art.embed_item( self._log, @@ -519,8 +540,7 @@ class ConvertPlugin(BeetsPlugin): if os.path.exists(util.syspath(dest)): self._log.info( - "Skipping {0} (target file exists)", - util.displayable_path(album.artpath), + "Skipping {.art_filepath} (target file exists)", album ) return @@ -530,8 +550,8 @@ class ConvertPlugin(BeetsPlugin): # Either copy or resize (while copying) the image. if maxwidth is not None: self._log.info( - "Resizing cover art from {0} to {1}", - util.displayable_path(album.artpath), + "Resizing cover art from {.art_filepath} to {}", + album, util.displayable_path(dest), ) if not pretend: @@ -541,10 +561,10 @@ class ConvertPlugin(BeetsPlugin): msg = "ln" if hardlink else ("ln -s" if link else "cp") self._log.info( - "{2} {0} {1}", - util.displayable_path(album.artpath), - util.displayable_path(dest), + "{} {.art_filepath} {}", msg, + album, + util.displayable_path(dest), ) else: msg = ( @@ -554,10 +574,10 @@ class ConvertPlugin(BeetsPlugin): ) self._log.info( - "{2} cover art from {0} to {1}", - util.displayable_path(album.artpath), - util.displayable_path(dest), + "{} cover art from {.art_filepath} to {}", msg, + album, + util.displayable_path(dest), ) if hardlink: util.hardlink(album.artpath, dest) @@ -576,16 +596,17 @@ class ConvertPlugin(BeetsPlugin): hardlink, link, playlist, + force, ) = self._get_opts_and_config(opts) if opts.album: - albums = lib.albums(ui.decargs(args)) + albums = lib.albums(args) items = [i for a in albums for i in a.items()] if not pretend: for a in albums: ui.print_(format(a, "")) else: - items = list(lib.items(ui.decargs(args))) + items = list(lib.items(args)) if not pretend: for i in items: ui.print_(format(i, "")) @@ -612,25 +633,20 @@ class ConvertPlugin(BeetsPlugin): hardlink, threads, items, + force, ) if playlist: # Playlist paths are understood as relative to the dest directory. pl_normpath = util.normpath(playlist) pl_dir = os.path.dirname(pl_normpath) - self._log.info("Creating playlist file {0}", pl_normpath) + self._log.info("Creating playlist file {}", pl_normpath) # Generates a list of paths to media files, ensures the paths are # relative to the playlist's location and translates the unicode # strings we get from item.destination to bytes. items_paths = [ os.path.relpath( - util.bytestring_path( - item.destination( - basedir=dest, - path_formats=path_formats, - fragment=False, - ) - ), + item.destination(basedir=dest, path_formats=path_formats), pl_dir, ) for item in items @@ -652,7 +668,7 @@ class ConvertPlugin(BeetsPlugin): tmpdir = self.config["tmpdir"].get() if tmpdir: tmpdir = os.fsdecode(util.bytestring_path(tmpdir)) - fd, dest = tempfile.mkstemp(os.fsdecode(b"." + ext), dir=tmpdir) + fd, dest = tempfile.mkstemp(f".{os.fsdecode(ext)}", dir=tmpdir) os.close(fd) dest = util.bytestring_path(dest) _temp_files.append(dest) # Delete the transcode later. @@ -674,7 +690,7 @@ class ConvertPlugin(BeetsPlugin): if self.config["delete_originals"]: self._log.log( logging.DEBUG if self.config["quiet"] else logging.INFO, - "Removing original file {0}", + "Removing original file {}", source_path, ) util.remove(source_path, False) @@ -740,7 +756,7 @@ class ConvertPlugin(BeetsPlugin): else: hardlink = self.config["hardlink"].get(bool) link = self.config["link"].get(bool) - + force = getattr(opts, "force", False) return ( dest, threads, @@ -750,6 +766,7 @@ class ConvertPlugin(BeetsPlugin): hardlink, link, playlist, + force, ) def _parallel_convert( @@ -763,13 +780,21 @@ class ConvertPlugin(BeetsPlugin): hardlink, threads, items, + force, ): """Run the convert_item function for every items on as many thread as defined in threads """ convert = [ self.convert_item( - dest, keep_new, path_formats, fmt, pretend, link, hardlink + dest, + keep_new, + path_formats, + fmt, + pretend, + link, + hardlink, + force, ) for _ in range(threads) ] diff --git a/beetsplug/deezer.py b/beetsplug/deezer.py index 25815e8d3..ef27dddc7 100644 --- a/beetsplug/deezer.py +++ b/beetsplug/deezer.py @@ -14,38 +14,44 @@ """Adds Deezer release and track search support to the autotagger""" +from __future__ import annotations + import collections import time +from typing import TYPE_CHECKING, Literal import requests -import unidecode from beets import ui from beets.autotag import AlbumInfo, TrackInfo from beets.dbcore import types -from beets.library import DateType -from beets.plugins import BeetsPlugin, MetadataSourcePlugin -from beets.util.id_extractors import deezer_id_regex +from beets.metadata_plugins import ( + IDResponse, + SearchApiMetadataSourcePlugin, + SearchFilter, +) + +if TYPE_CHECKING: + from collections.abc import Sequence + + from beets.library import Item, Library + + from ._typing import JSONDict -class DeezerPlugin(MetadataSourcePlugin, BeetsPlugin): - data_source = "Deezer" - +class DeezerPlugin(SearchApiMetadataSourcePlugin[IDResponse]): item_types = { "deezer_track_rank": types.INTEGER, "deezer_track_id": types.INTEGER, - "deezer_updated": DateType(), + "deezer_updated": types.DATE, } - # Base URLs for the Deezer API # Documentation: https://developers.deezer.com/api/ search_url = "https://api.deezer.com/search/" album_url = "https://api.deezer.com/album/" track_url = "https://api.deezer.com/track/" - id_regex = deezer_id_regex - - def __init__(self): + def __init__(self) -> None: super().__init__() def commands(self): @@ -54,42 +60,23 @@ class DeezerPlugin(MetadataSourcePlugin, BeetsPlugin): "deezerupdate", help=f"Update {self.data_source} rank" ) - def func(lib, opts, args): - items = lib.items(ui.decargs(args)) - self.deezerupdate(items, ui.should_write()) + def func(lib: Library, opts, args): + items = lib.items(args) + self.deezerupdate(list(items), ui.should_write()) deezer_update_cmd.func = func return [deezer_update_cmd] - def fetch_data(self, url): - try: - response = requests.get(url, timeout=10) - response.raise_for_status() - data = response.json() - except requests.exceptions.RequestException as e: - self._log.error("Error fetching data from {}\n Error: {}", url, e) + def album_for_id(self, album_id: str) -> AlbumInfo | None: + """Fetch an album by its Deezer ID or URL.""" + if not (deezer_id := self._extract_id(album_id)): return None - if "error" in data: - self._log.debug("Deezer API error: {}", data["error"]["message"]) - return None - return data - def album_for_id(self, album_id): - """Fetch an album by its Deezer ID or URL and return an - AlbumInfo object or None if the album is not found. + album_url = f"{self.album_url}{deezer_id}" + if not (album_data := self.fetch_data(album_url)): + return None - :param album_id: Deezer ID or URL for the album. - :type album_id: str - :return: AlbumInfo object for album. - :rtype: beets.autotag.hooks.AlbumInfo or None - """ - deezer_id = self._get_id("album", album_id, self.id_regex) - if deezer_id is None: - return None - album_data = self.fetch_data(self.album_url + deezer_id) - if album_data is None: - return None contributors = album_data.get("contributors") if contributors is not None: artist, artist_id = self.get_artist(contributors) @@ -114,7 +101,7 @@ class DeezerPlugin(MetadataSourcePlugin, BeetsPlugin): f"Invalid `release_date` returned by {self.data_source} API: " f"{release_date!r}" ) - tracks_obj = self.fetch_data(self.album_url + deezer_id + "/tracks") + tracks_obj = self.fetch_data(f"{self.album_url}{deezer_id}/tracks") if tracks_obj is None: return None try: @@ -132,7 +119,7 @@ class DeezerPlugin(MetadataSourcePlugin, BeetsPlugin): tracks_data.extend(tracks_obj["data"]) tracks = [] - medium_totals = collections.defaultdict(int) + medium_totals: dict[int | None, int] = collections.defaultdict(int) for i, track_data in enumerate(tracks_data, start=1): track = self._get_track(track_data) track.index = i @@ -150,25 +137,68 @@ class DeezerPlugin(MetadataSourcePlugin, BeetsPlugin): artist_id=artist_id, tracks=tracks, albumtype=album_data["record_type"], - va=len(album_data["contributors"]) == 1 - and artist.lower() == "various artists", + va=( + len(album_data["contributors"]) == 1 + and (artist or "").lower() == "various artists" + ), year=year, month=month, day=day, label=album_data["label"], - mediums=max(medium_totals.keys()), + mediums=max(filter(None, medium_totals.keys())), data_source=self.data_source, data_url=album_data["link"], cover_art_url=album_data.get("cover_xl"), ) - def _get_track(self, track_data): + def track_for_id(self, track_id: str) -> None | TrackInfo: + """Fetch a track by its Deezer ID or URL and return a + TrackInfo object or None if the track is not found. + + :param track_id: (Optional) Deezer ID or URL for the track. Either + ``track_id`` or ``track_data`` must be provided. + + """ + if not (deezer_id := self._extract_id(track_id)): + self._log.debug("Invalid Deezer track_id: {}", track_id) + return None + + if not (track_data := self.fetch_data(f"{self.track_url}{deezer_id}")): + self._log.debug("Track not found: {}", track_id) + return None + + track = self._get_track(track_data) + + # Get album's tracks to set `track.index` (position on the entire + # release) and `track.medium_total` (total number of tracks on + # the track's disc). + if not ( + album_tracks_obj := self.fetch_data( + f"{self.album_url}{track_data['album']['id']}/tracks" + ) + ): + return None + + try: + album_tracks_data = album_tracks_obj["data"] + except KeyError: + self._log.debug( + "Error fetching album tracks for {}", track_data["album"]["id"] + ) + return None + medium_total = 0 + for i, track_data in enumerate(album_tracks_data, start=1): + if track_data["disk_number"] == track.medium: + medium_total += 1 + if track_data["id"] == track.track_id: + track.index = i + track.medium_total = medium_total + return track + + def _get_track(self, track_data: JSONDict) -> TrackInfo: """Convert a Deezer track object dict to a TrackInfo object. :param track_data: Deezer Track object dict - :type track_data: dict - :return: TrackInfo object for track - :rtype: beets.autotag.hooks.TrackInfo """ artist, artist_id = self.get_artist( track_data.get("contributors", [track_data["artist"]]) @@ -190,118 +220,60 @@ class DeezerPlugin(MetadataSourcePlugin, BeetsPlugin): deezer_updated=time.time(), ) - def track_for_id(self, track_id=None, track_data=None): - """Fetch a track by its Deezer ID or URL and return a - TrackInfo object or None if the track is not found. - - :param track_id: (Optional) Deezer ID or URL for the track. Either - ``track_id`` or ``track_data`` must be provided. - :type track_id: str - :param track_data: (Optional) Simplified track object dict. May be - provided instead of ``track_id`` to avoid unnecessary API calls. - :type track_data: dict - :return: TrackInfo object for track - :rtype: beets.autotag.hooks.TrackInfo or None - """ - if track_data is None: - deezer_id = self._get_id("track", track_id, self.id_regex) - if deezer_id is None: - return None - track_data = self.fetch_data(self.track_url + deezer_id) - if track_data is None: - return None - track = self._get_track(track_data) - - # Get album's tracks to set `track.index` (position on the entire - # release) and `track.medium_total` (total number of tracks on - # the track's disc). - album_tracks_obj = self.fetch_data( - self.album_url + str(track_data["album"]["id"]) + "/tracks" - ) - if album_tracks_obj is None: - return None - try: - album_tracks_data = album_tracks_obj["data"] - except KeyError: - self._log.debug( - "Error fetching album tracks for {}", track_data["album"]["id"] - ) - return None - medium_total = 0 - for i, track_data in enumerate(album_tracks_data, start=1): - if track_data["disk_number"] == track.medium: - medium_total += 1 - if track_data["id"] == track.track_id: - track.index = i - track.medium_total = medium_total - return track - - @staticmethod - def _construct_search_query(filters=None, keywords=""): - """Construct a query string with the specified filters and keywords to - be provided to the Deezer Search API - (https://developers.deezer.com/api/search). - - :param filters: (Optional) Field filters to apply. - :type filters: dict - :param keywords: (Optional) Query keywords to use. - :type keywords: str - :return: Query string to be provided to the Search API. - :rtype: str - """ - query_components = [ - keywords, - " ".join(f'{k}:"{v}"' for k, v in filters.items()), - ] - query = " ".join([q for q in query_components if q]) - if not isinstance(query, str): - query = query.decode("utf8") - return unidecode.unidecode(query) - - def _search_api(self, query_type, filters=None, keywords=""): - """Query the Deezer Search API for the specified ``keywords``, applying + def _search_api( + self, + query_type: Literal[ + "album", + "track", + "artist", + "history", + "playlist", + "podcast", + "radio", + "user", + ], + filters: SearchFilter, + query_string: str = "", + ) -> Sequence[IDResponse]: + """Query the Deezer Search API for the specified ``query_string``, applying the provided ``filters``. - :param query_type: The Deezer Search API method to use. Valid types - are: 'album', 'artist', 'history', 'playlist', 'podcast', - 'radio', 'track', 'user', and 'track'. - :type query_type: str - :param filters: (Optional) Field filters to apply. - :type filters: dict - :param keywords: (Optional) Query keywords to use. - :type keywords: str + :param filters: Field filters to apply. + :param query_string: Additional query to include in the search. :return: JSON data for the class:`Response ` object or None if no search results are returned. - :rtype: dict or None """ - query = self._construct_search_query(keywords=keywords, filters=filters) - if not query: - return None - self._log.debug(f"Searching {self.data_source} for '{query}'") + query = self._construct_search_query( + query_string=query_string, filters=filters + ) + self._log.debug("Searching {.data_source} for '{}'", self, query) try: response = requests.get( - self.search_url + query_type, - params={"q": query}, + f"{self.search_url}{query_type}", + params={ + "q": query, + "limit": self.config["search_limit"].get(), + }, timeout=10, ) response.raise_for_status() except requests.exceptions.RequestException as e: self._log.error( - "Error fetching data from {} API\n Error: {}", - self.data_source, + "Error fetching data from {.data_source} API\n Error: {}", + self, e, ) - return None - response_data = response.json().get("data", []) + return () + response_data: Sequence[IDResponse] = response.json().get("data", []) self._log.debug( - "Found {} result(s) from {} for '{}'", + "Found {} result(s) from {.data_source} for '{}'", len(response_data), - self.data_source, + self, query, ) return response_data - def deezerupdate(self, items, write): + def deezerupdate(self, items: Sequence[Item], write: bool): """Obtain rank information from Deezer.""" for index, item in enumerate(items, start=1): self._log.info( @@ -327,3 +299,16 @@ class DeezerPlugin(MetadataSourcePlugin, BeetsPlugin): item.deezer_updated = time.time() if write: item.try_write() + + def fetch_data(self, url: str): + try: + response = requests.get(url, timeout=10) + response.raise_for_status() + data = response.json() + except requests.exceptions.RequestException as e: + self._log.error("Error fetching data from {}\n Error: {}", url, e) + return None + if "error" in data: + self._log.debug("Deezer API error: {}", data["error"]["message"]) + return None + return data diff --git a/beetsplug/discogs.py b/beetsplug/discogs.py index 19521b035..29600a676 100644 --- a/beetsplug/discogs.py +++ b/beetsplug/discogs.py @@ -25,20 +25,27 @@ import re import socket import time import traceback +from functools import cache from string import ascii_lowercase +from typing import TYPE_CHECKING, cast import confuse from discogs_client import Client, Master, Release from discogs_client.exceptions import DiscogsAPIError from requests.exceptions import ConnectionError -from typing_extensions import TypedDict +from typing_extensions import NotRequired, TypedDict import beets import beets.ui from beets import config -from beets.autotag.hooks import AlbumInfo, TrackInfo, string_dist -from beets.plugins import BeetsPlugin, MetadataSourcePlugin, get_distance -from beets.util.id_extractors import extract_discogs_id_regex +from beets.autotag.distance import string_dist +from beets.autotag.hooks import AlbumInfo, TrackInfo +from beets.metadata_plugins import MetadataSourcePlugin + +if TYPE_CHECKING: + from collections.abc import Callable, Iterable, Sequence + + from beets.library import Item USER_AGENT = f"beets/{beets.__version__} +https://beets.io/" API_KEY = "rAzVUQYRaoFjeBjyWuWZ" @@ -54,13 +61,67 @@ CONNECTION_ERRORS = ( ) +TRACK_INDEX_RE = re.compile( + r""" + (.*?) # medium: everything before medium_index. + (\d*?) # medium_index: a number at the end of + # `position`, except if followed by a subtrack index. + # subtrack_index: can only be matched if medium + # or medium_index have been matched, and can be + ( + (?<=\w)\.[\w]+ # a dot followed by a string (A.1, 2.A) + | (?<=\d)[A-Z]+ # a string that follows a number (1A, B2a) + )? + """, + re.VERBOSE, +) + +DISAMBIGUATION_RE = re.compile(r" \(\d+\)") + + class ReleaseFormat(TypedDict): name: str qty: int descriptions: list[str] | None -class DiscogsPlugin(BeetsPlugin): +class Artist(TypedDict): + name: str + anv: str + join: str + role: str + tracks: str + id: str + resource_url: str + + +class Track(TypedDict): + position: str + type_: str + title: str + duration: str + artists: list[Artist] + extraartists: NotRequired[list[Artist]] + + +class TrackWithSubtracks(Track): + sub_tracks: list[TrackWithSubtracks] + + +class IntermediateTrackInfo(TrackInfo): + """Allows work with string mediums from + get_track_info""" + + def __init__( + self, + medium_str: str | None, + **kwargs, + ) -> None: + self.medium_str = medium_str + super().__init__(**kwargs) + + +class DiscogsPlugin(MetadataSourcePlugin): def __init__(self): super().__init__() self.config.add( @@ -68,11 +129,17 @@ class DiscogsPlugin(BeetsPlugin): "apikey": API_KEY, "apisecret": API_SECRET, "tokenfile": "discogs_token.json", - "source_weight": 0.5, "user_token": "", "separator": ", ", "index_tracks": False, "append_style_genre": False, + "strip_disambiguation": True, + "featured_string": "Feat.", + "anv": { + "artist_credit": True, + "artist": False, + "album_artist": False, + }, } ) self.config["apikey"].redact = True @@ -80,7 +147,7 @@ class DiscogsPlugin(BeetsPlugin): self.config["user_token"].redact = True self.setup() - def setup(self, session=None): + def setup(self, session=None) -> None: """Create the `discogs_client` field. Authenticate if necessary.""" c_key = self.config["apikey"].as_str() c_secret = self.config["apisecret"].as_str() @@ -106,22 +173,22 @@ class DiscogsPlugin(BeetsPlugin): self.discogs_client = Client(USER_AGENT, c_key, c_secret, token, secret) - def reset_auth(self): + def reset_auth(self) -> None: """Delete token file & redo the auth steps.""" os.remove(self._tokenfile()) self.setup() - def _tokenfile(self): + def _tokenfile(self) -> str: """Get the path to the JSON file for storing the OAuth token.""" return self.config["tokenfile"].get(confuse.Filename(in_app_dir=True)) - def authenticate(self, c_key, c_secret): + def authenticate(self, c_key: str, c_secret: str) -> tuple[str, str]: # Get the link for the OAuth page. auth_client = Client(USER_AGENT, c_key, c_secret) try: _, _, url = auth_client.get_authorize_url() except CONNECTION_ERRORS as e: - self._log.debug("connection error: {0}", e) + self._log.debug("connection error: {}", e) raise beets.ui.UserError("communication with Discogs failed") beets.ui.print_("To authenticate with Discogs, visit:") @@ -134,139 +201,53 @@ class DiscogsPlugin(BeetsPlugin): except DiscogsAPIError: raise beets.ui.UserError("Discogs authorization failed") except CONNECTION_ERRORS as e: - self._log.debug("connection error: {0}", e) + self._log.debug("connection error: {}", e) raise beets.ui.UserError("Discogs token request failed") # Save the token for later use. - self._log.debug("Discogs token {0}, secret {1}", token, secret) + self._log.debug("Discogs token {}, secret {}", token, secret) with open(self._tokenfile(), "w") as f: json.dump({"token": token, "secret": secret}, f) return token, secret - def album_distance(self, items, album_info, mapping): - """Returns the album distance.""" - return get_distance( - data_source="Discogs", info=album_info, config=self.config - ) + def candidates( + self, items: Sequence[Item], artist: str, album: str, va_likely: bool + ) -> Iterable[AlbumInfo]: + return self.get_albums(f"{artist} {album}" if va_likely else album) - def track_distance(self, item, track_info): - """Returns the track distance.""" - return get_distance( - data_source="Discogs", info=track_info, config=self.config - ) - - def candidates(self, items, artist, album, va_likely, extra_tags=None): - """Returns a list of AlbumInfo objects for discogs search results - matching an album and artist (if not various). - """ - if not album and not artist: - self._log.debug( - "Skipping Discogs query. Files missing album and artist tags." - ) - return [] - - if va_likely: - query = album - else: - query = f"{artist} {album}" - try: - return self.get_albums(query) - except DiscogsAPIError as e: - self._log.debug("API Error: {0} (query: {1})", e, query) - if e.status_code == 401: - self.reset_auth() - return self.candidates(items, artist, album, va_likely) - else: - return [] - except CONNECTION_ERRORS: - self._log.debug("Connection error in album search", exc_info=True) - return [] - - def get_track_from_album_by_title( - self, album_info, title, dist_threshold=0.3 - ): - def compare_func(track_info): - track_title = getattr(track_info, "title", None) - dist = string_dist(track_title, title) - return track_title and dist < dist_threshold - - return self.get_track_from_album(album_info, compare_func) - - def get_track_from_album(self, album_info, compare_func): - """Return the first track of the release where `compare_func` returns - true. - - :return: TrackInfo object. - :rtype: beets.autotag.hooks.TrackInfo - """ - if not album_info: + def get_track_from_album( + self, album_info: AlbumInfo, compare: Callable[[TrackInfo], float] + ) -> TrackInfo | None: + """Return the best matching track of the release.""" + scores_and_tracks = [(compare(t), t) for t in album_info.tracks] + score, track_info = min(scores_and_tracks, key=lambda x: x[0]) + if score > 0.3: return None - for track_info in album_info.tracks: - # check for matching position - if not compare_func(track_info): - continue + track_info["artist"] = album_info.artist + track_info["artist_id"] = album_info.artist_id + track_info["album"] = album_info.album + return track_info - # attach artist info if not provided - if not track_info["artist"]: - track_info["artist"] = album_info.artist - track_info["artist_id"] = album_info.artist_id - # attach album info - track_info["album"] = album_info.album + def item_candidates( + self, item: Item, artist: str, title: str + ) -> Iterable[TrackInfo]: + albums = self.candidates([item], artist, title, False) - return track_info + def compare_func(track_info: TrackInfo) -> float: + return string_dist(track_info.title, title) - return None + tracks = (self.get_track_from_album(a, compare_func) for a in albums) + return list(filter(None, tracks)) - def item_candidates(self, item, artist, title): - """Returns a list of TrackInfo objects for Search API results - matching ``title`` and ``artist``. - :param item: Singleton item to be matched. - :type item: beets.library.Item - :param artist: The artist of the track to be matched. - :type artist: str - :param title: The title of the track to be matched. - :type title: str - :return: Candidate TrackInfo objects. - :rtype: list[beets.autotag.hooks.TrackInfo] - """ - if not artist and not title: - self._log.debug( - "Skipping Discogs query. File missing artist and title tags." - ) - return [] - - query = f"{artist} {title}" - try: - albums = self.get_albums(query) - except DiscogsAPIError as e: - self._log.debug("API Error: {0} (query: {1})", e, query) - if e.status_code == 401: - self.reset_auth() - return self.item_candidates(item, artist, title) - else: - return [] - except CONNECTION_ERRORS: - self._log.debug("Connection error in track search", exc_info=True) - candidates = [] - for album_cur in albums: - self._log.debug("searching within album {0}", album_cur.album) - track_result = self.get_track_from_album_by_title( - album_cur, item["title"] - ) - if track_result: - candidates.append(track_result) - # first 10 results, don't overwhelm with options - return candidates[:10] - - def album_for_id(self, album_id): + def album_for_id(self, album_id: str) -> AlbumInfo | None: """Fetches an album by its Discogs ID and returns an AlbumInfo object or None if the album is not found. """ - self._log.debug("Searching for release {0}", album_id) + self._log.debug("Searching for release {}", album_id) - discogs_id = extract_discogs_id_regex(album_id) + discogs_id = self._extract_id(album_id) if not discogs_id: return None @@ -278,7 +259,7 @@ class DiscogsPlugin(BeetsPlugin): except DiscogsAPIError as e: if e.status_code != 404: self._log.debug( - "API Error: {0} (query: {1})", + "API Error: {} (query: {})", e, result.data["resource_url"], ) @@ -291,7 +272,15 @@ class DiscogsPlugin(BeetsPlugin): return None return self.get_album_info(result) - def get_albums(self, query): + def track_for_id(self, track_id: str) -> TrackInfo | None: + if album := self.album_for_id(track_id): + for track in album.tracks: + if track.track_id == track_id: + return track + + return None + + def get_albums(self, query: str) -> Iterable[AlbumInfo]: """Returns a list of AlbumInfo objects for a discogs search query.""" # Strip non-word characters from query. Things like "!" and "-" can # cause a query to return no results, even if they match the artist or @@ -303,8 +292,9 @@ class DiscogsPlugin(BeetsPlugin): query = re.sub(r"(?i)\b(CD|disc|vinyl)\s*\d+", "", query) try: - releases = self.discogs_client.search(query, type="release").page(1) - + results = self.discogs_client.search(query, type="release") + results.per_page = self.config["search_limit"].get() + releases = results.page(1) except CONNECTION_ERRORS: self._log.debug( "Communication error while searching for {0!r}", @@ -312,24 +302,22 @@ class DiscogsPlugin(BeetsPlugin): exc_info=True, ) return [] - return [ - album for album in map(self.get_album_info, releases[:5]) if album - ] + return filter(None, map(self.get_album_info, releases)) - def get_master_year(self, master_id): + @cache + def get_master_year(self, master_id: str) -> int | None: """Fetches a master release given its Discogs ID and returns its year or None if the master release is not found. """ - self._log.debug("Searching for master release {0}", master_id) + self._log.debug("Getting master release {}", master_id) result = Master(self.discogs_client, {"id": master_id}) try: - year = result.fetch("year") - return year + return result.fetch("year") except DiscogsAPIError as e: if e.status_code != 404: self._log.debug( - "API Error: {0} (query: {1})", + "API Error: {} (query: {})", e, result.data["resource_url"], ) @@ -355,12 +343,38 @@ class DiscogsPlugin(BeetsPlugin): return media, albumtype - def get_album_info(self, result): + def get_artist_with_anv( + self, artists: list[Artist], use_anv: bool = False + ) -> tuple[str, str | None]: + """Iterates through a discogs result, fetching data + if the artist anv is to be used, maps that to the name. + Calls the parent class get_artist method.""" + artist_list: list[dict[str | int, str]] = [] + for artist_data in artists: + a: dict[str | int, str] = { + "name": artist_data["name"], + "id": artist_data["id"], + "join": artist_data.get("join", ""), + } + if use_anv and (anv := artist_data.get("anv", "")): + a["name"] = anv + artist_list.append(a) + artist, artist_id = self.get_artist(artist_list, join_key="join") + return self.strip_disambiguation(artist), artist_id + + def get_album_info(self, result: Release) -> AlbumInfo | None: """Returns an AlbumInfo object for a discogs Release object.""" # Explicitly reload the `Release` fields, as they might not be yet # present if the result is from a `discogs_client.search()`. if not result.data.get("artists"): - result.refresh() + try: + result.refresh() + except CONNECTION_ERRORS: + self._log.debug( + "Connection error in release lookup: {0}", + result, + ) + return None # Sanity check for required fields. The list of required fields is # defined at Guideline 1.3.1.a, but in practice some releases might be @@ -376,16 +390,29 @@ class DiscogsPlugin(BeetsPlugin): self._log.warning("Release does not contain the required fields") return None - artist, artist_id = MetadataSourcePlugin.get_artist( - [a.data for a in result.artists], join_key="join" + artist_data = [a.data for a in result.artists] + album_artist, album_artist_id = self.get_artist_with_anv(artist_data) + album_artist_anv, _ = self.get_artist_with_anv( + artist_data, use_anv=True ) + artist_credit = album_artist_anv + album = re.sub(r" +", " ", result.title) album_id = result.data["id"] # Use `.data` to access the tracklist directly instead of the # convenient `.tracklist` property, which will strip out useful artist # information and leave us with skeleton `Artist` objects that will # each make an API call just to get the same data back. - tracks = self.get_tracks(result.data["tracklist"]) + tracks = self.get_tracks( + result.data["tracklist"], + (album_artist, album_artist_anv, album_artist_id), + ) + + # Assign ANV to the proper fields for tagging + if not self.config["anv"]["artist_credit"]: + artist_credit = album_artist + if self.config["anv"]["album_artist"]: + album_artist = album_artist_anv # Extract information for the optional AlbumInfo fields, if possible. va = result.data["artists"][0].get("name", "").lower() == "various" @@ -401,7 +428,7 @@ class DiscogsPlugin(BeetsPlugin): else: genre = base_genre - discogs_albumid = extract_discogs_id_regex(result.data.get("uri")) + discogs_albumid = self._extract_id(result.data.get("uri")) # Extract information for the optional AlbumInfo fields that are # contained on nested discogs fields. @@ -411,15 +438,20 @@ class DiscogsPlugin(BeetsPlugin): label = catalogno = labelid = None if result.data.get("labels"): - label = result.data["labels"][0].get("name") + label = self.strip_disambiguation( + result.data["labels"][0].get("name") + ) catalogno = result.data["labels"][0].get("catno") labelid = result.data["labels"][0].get("id") cover_art_url = self.select_cover_art(result) - # Additional cleanups (various artists name, catalog number, media). + # Additional cleanups + # (various artists name, catalog number, media, disambiguation). if va: - artist = config["va_name"].as_str() + va_name = config["va_name"].as_str() + album_artist = va_name + artist_credit = va_name if catalogno == "none": catalogno = None # Explicitly set the `media` for the tracks, since it is expected by @@ -427,13 +459,9 @@ class DiscogsPlugin(BeetsPlugin): for track in tracks: track.media = media track.medium_total = mediums.count(track.medium) - if not track.artist: # get_track_info often fails to find artist - track.artist = artist - if not track.artist_id: - track.artist_id = artist_id # Discogs does not have track IDs. Invent our own IDs as proposed # in #2336. - track.track_id = str(album_id) + "-" + track.track_alt + track.track_id = f"{album_id}-{track.track_alt}" track.data_url = data_url track.data_source = "Discogs" @@ -446,8 +474,9 @@ class DiscogsPlugin(BeetsPlugin): return AlbumInfo( album=album, album_id=album_id, - artist=artist, - artist_id=artist_id, + artist=album_artist, + artist_credit=artist_credit, + artist_id=album_artist_id, tracks=tracks, albumtype=albumtype, va=va, @@ -461,15 +490,15 @@ class DiscogsPlugin(BeetsPlugin): genre=genre, media=media, original_year=original_year, - data_source="Discogs", + data_source=self.data_source, data_url=data_url, discogs_albumid=discogs_albumid, discogs_labelid=labelid, - discogs_artistid=artist_id, + discogs_artistid=album_artist_id, cover_art_url=cover_art_url, ) - def select_cover_art(self, result): + def select_cover_art(self, result: Release) -> str | None: """Returns the best candidate image, if any, from a Discogs `Release` object.""" if result.data.get("images") and len(result.data.get("images")) > 0: # The first image in this list appears to be the one displayed first @@ -479,7 +508,7 @@ class DiscogsPlugin(BeetsPlugin): return None - def format(self, classification): + def format(self, classification: Iterable[str]) -> str | None: if classification: return ( self.config["separator"].as_str().join(sorted(classification)) @@ -487,22 +516,17 @@ class DiscogsPlugin(BeetsPlugin): else: return None - def get_tracks(self, tracklist): - """Returns a list of TrackInfo objects for a discogs tracklist.""" - try: - clean_tracklist = self.coalesce_tracks(tracklist) - except Exception as exc: - # FIXME: this is an extra precaution for making sure there are no - # side effects after #2222. It should be removed after further - # testing. - self._log.debug("{}", traceback.format_exc()) - self._log.error("uncaught exception in coalesce_tracks: {}", exc) - clean_tracklist = tracklist - tracks = [] + def _process_clean_tracklist( + self, + clean_tracklist: list[Track], + album_artist_data: tuple[str, str, str | None], + ) -> tuple[list[TrackInfo], dict[int, str], int, list[str], list[str]]: + # Distinct works and intra-work divisions, as defined by index tracks. + tracks: list[TrackInfo] = [] index_tracks = {} index = 0 - # Distinct works and intra-work divisions, as defined by index tracks. - divisions, next_divisions = [], [] + divisions: list[str] = [] + next_divisions: list[str] = [] for track in clean_tracklist: # Only real tracks have `position`. Otherwise, it's an index track. if track["position"]: @@ -512,7 +536,9 @@ class DiscogsPlugin(BeetsPlugin): # divisions. divisions += next_divisions del next_divisions[:] - track_info = self.get_track_info(track, index, divisions) + track_info = self.get_track_info( + track, index, divisions, album_artist_data + ) track_info.track_alt = track["position"] tracks.append(track_info) else: @@ -524,7 +550,29 @@ class DiscogsPlugin(BeetsPlugin): except IndexError: pass index_tracks[index + 1] = track["title"] + return tracks, index_tracks, index, divisions, next_divisions + def get_tracks( + self, + tracklist: list[Track], + album_artist_data: tuple[str, str, str | None], + ) -> list[TrackInfo]: + """Returns a list of TrackInfo objects for a discogs tracklist.""" + try: + clean_tracklist: list[Track] = self.coalesce_tracks( + cast(list[TrackWithSubtracks], tracklist) + ) + except Exception as exc: + # FIXME: this is an extra precaution for making sure there are no + # side effects after #2222. It should be removed after further + # testing. + self._log.debug("{}", traceback.format_exc()) + self._log.error("uncaught exception in coalesce_tracks: {}", exc) + clean_tracklist = tracklist + processed = self._process_clean_tracklist( + clean_tracklist, album_artist_data + ) + tracks, index_tracks, index, divisions, next_divisions = processed # Fix up medium and medium_index for each track. Discogs position is # unreliable, but tracks are in order. medium = None @@ -533,8 +581,8 @@ class DiscogsPlugin(BeetsPlugin): # If a medium has two sides (ie. vinyl or cassette), each pair of # consecutive sides should belong to the same medium. - if all([track.medium is not None for track in tracks]): - m = sorted({track.medium.lower() for track in tracks}) + if all([track.medium_str is not None for track in tracks]): + m = sorted({track.medium_str.lower() for track in tracks}) # If all track.medium are single consecutive letters, assume it is # a 2-sided medium. if "".join(m) in ascii_lowercase: @@ -548,17 +596,17 @@ class DiscogsPlugin(BeetsPlugin): # side_count is the number of mediums or medium sides (in the case # of two-sided mediums) that were seen before. medium_is_index = ( - track.medium + track.medium_str and not track.medium_index and ( - len(track.medium) != 1 + len(track.medium_str) != 1 or # Not within standard incremental medium values (A, B, C, ...). - ord(track.medium) - 64 != side_count + 1 + ord(track.medium_str) - 64 != side_count + 1 ) ) - if not medium_is_index and medium != track.medium: + if not medium_is_index and medium != track.medium_str: side_count += 1 if sides_per_medium == 2: if side_count % sides_per_medium: @@ -569,7 +617,7 @@ class DiscogsPlugin(BeetsPlugin): # Medium changed. Reset index_count. medium_count += 1 index_count = 0 - medium = track.medium + medium = track.medium_str index_count += 1 medium_count = 1 if medium_count == 0 else medium_count @@ -585,22 +633,27 @@ class DiscogsPlugin(BeetsPlugin): disctitle = None track.disctitle = disctitle - return tracks + return cast(list[TrackInfo], tracks) - def coalesce_tracks(self, raw_tracklist): + def coalesce_tracks( + self, raw_tracklist: list[TrackWithSubtracks] + ) -> list[Track]: """Pre-process a tracklist, merging subtracks into a single track. The title for the merged track is the one from the previous index track, if present; otherwise it is a combination of the subtracks titles. """ - def add_merged_subtracks(tracklist, subtracks): + def add_merged_subtracks( + tracklist: list[TrackWithSubtracks], + subtracks: list[TrackWithSubtracks], + ) -> None: """Modify `tracklist` in place, merging a list of `subtracks` into a single track into `tracklist`.""" # Calculate position based on first subtrack, without subindex. idx, medium_idx, sub_idx = self.get_track_index( subtracks[0]["position"] ) - position = "{}{}".format(idx or "", medium_idx or "") + position = f"{idx or ''}{medium_idx or ''}" if tracklist and not tracklist[-1]["position"]: # Assume the previous index track contains the track title. @@ -622,8 +675,8 @@ class DiscogsPlugin(BeetsPlugin): # option is set if self.config["index_tracks"]: for subtrack in subtracks: - subtrack["title"] = "{}: {}".format( - index_track["title"], subtrack["title"] + subtrack["title"] = ( + f"{index_track['title']}: {subtrack['title']}" ) tracklist.extend(subtracks) else: @@ -633,8 +686,8 @@ class DiscogsPlugin(BeetsPlugin): tracklist.append(track) # Pre-process the tracklist, trying to identify subtracks. - subtracks = [] - tracklist = [] + subtracks: list[TrackWithSubtracks] = [] + tracklist: list[TrackWithSubtracks] = [] prev_subindex = "" for track in raw_tracklist: # Regular subtrack (track with subindex). @@ -669,10 +722,32 @@ class DiscogsPlugin(BeetsPlugin): if subtracks: add_merged_subtracks(tracklist, subtracks) - return tracklist + return cast(list[Track], tracklist) - def get_track_info(self, track, index, divisions): + def strip_disambiguation(self, text: str) -> str: + """Removes discogs specific disambiguations from a string. + Turns 'Label Name (5)' to 'Label Name' or 'Artist (1) & Another Artist (2)' + to 'Artist & Another Artist'. Does nothing if strip_disambiguation is False.""" + if not self.config["strip_disambiguation"]: + return text + return DISAMBIGUATION_RE.sub("", text) + + def get_track_info( + self, + track: Track, + index: int, + divisions: list[str], + album_artist_data: tuple[str, str, str | None], + ) -> IntermediateTrackInfo: """Returns a TrackInfo object for a discogs track.""" + + artist, artist_anv, artist_id = album_artist_data + artist_credit = artist_anv + if not self.config["anv"]["artist_credit"]: + artist_credit = artist + if self.config["anv"]["artist"]: + artist = artist_anv + title = track["title"] if self.config["index_tracks"]: prefix = ", ".join(divisions) @@ -680,51 +755,65 @@ class DiscogsPlugin(BeetsPlugin): title = f"{prefix}: {title}" track_id = None medium, medium_index, _ = self.get_track_index(track["position"]) - artist, artist_id = MetadataSourcePlugin.get_artist( - track.get("artists", []), join_key="join" - ) + + # If artists are found on the track, we will use those instead + if artists := track.get("artists", []): + artist, artist_id = self.get_artist_with_anv( + artists, self.config["anv"]["artist"] + ) + artist_credit, _ = self.get_artist_with_anv( + artists, self.config["anv"]["artist_credit"] + ) length = self.get_track_length(track["duration"]) - return TrackInfo( + + # Add featured artists + if extraartists := track.get("extraartists", []): + featured_list = [ + artist + for artist in extraartists + if "Featuring" in artist["role"] + ] + featured, _ = self.get_artist_with_anv( + featured_list, self.config["anv"]["artist"] + ) + featured_credit, _ = self.get_artist_with_anv( + featured_list, self.config["anv"]["artist_credit"] + ) + if featured: + artist += f" {self.config['featured_string']} {featured}" + artist_credit += ( + f" {self.config['featured_string']} {featured_credit}" + ) + return IntermediateTrackInfo( title=title, track_id=track_id, + artist_credit=artist_credit, artist=artist, artist_id=artist_id, length=length, index=index, - medium=medium, + medium_str=medium, medium_index=medium_index, ) - def get_track_index(self, position): + @staticmethod + def get_track_index( + position: str, + ) -> tuple[str | None, str | None, str | None]: """Returns the medium, medium index and subtrack index for a discogs track position.""" # Match the standard Discogs positions (12.2.9), which can have several # forms (1, 1-1, A1, A1.1, A1a, ...). - match = re.match( - r"^(.*?)" # medium: everything before medium_index. - r"(\d*?)" # medium_index: a number at the end of - # `position`, except if followed by a subtrack - # index. - # subtrack_index: can only be matched if medium - # or medium_index have been matched, and can be - r"((?<=\w)\.[\w]+" # - a dot followed by a string (A.1, 2.A) - r"|(?<=\d)[A-Z]+" # - a string that follows a number (1A, B2a) - r")?" - r"$", - position.upper(), - ) - - if match: + medium = index = subindex = None + if match := TRACK_INDEX_RE.fullmatch(position.upper()): medium, index, subindex = match.groups() if subindex and subindex.startswith("."): subindex = subindex[1:] - else: - self._log.debug("Invalid position: {0}", position) - medium = index = subindex = None + return medium or None, index or None, subindex or None - def get_track_length(self, duration): + def get_track_length(self, duration: str) -> int | None: """Returns the track length in seconds for a discogs duration.""" try: length = time.strptime(duration, "%M:%S") diff --git a/beetsplug/duplicates.py b/beetsplug/duplicates.py index 1e30a60a5..904e19262 100644 --- a/beetsplug/duplicates.py +++ b/beetsplug/duplicates.py @@ -19,7 +19,7 @@ import shlex from beets.library import Album, Item from beets.plugins import BeetsPlugin -from beets.ui import Subcommand, UserError, decargs, print_ +from beets.ui import Subcommand, UserError, print_ from beets.util import ( MoveOperation, bytestring_path, @@ -53,6 +53,7 @@ class DuplicatesPlugin(BeetsPlugin): "tiebreak": {}, "strict": False, "tag": "", + "remove": False, } ) @@ -131,6 +132,13 @@ class DuplicatesPlugin(BeetsPlugin): action="store", help="tag matched items with 'k=v' attribute", ) + self._command.parser.add_option( + "-r", + "--remove", + dest="remove", + action="store_true", + help="remove items from library", + ) self._command.parser.add_all_common_options() def commands(self): @@ -141,7 +149,8 @@ class DuplicatesPlugin(BeetsPlugin): copy = bytestring_path(self.config["copy"].as_str()) count = self.config["count"].get(bool) delete = self.config["delete"].get(bool) - fmt = self.config["format"].get(str) + remove = self.config["remove"].get(bool) + fmt_tmpl = self.config["format"].get(str) full = self.config["full"].get(bool) keys = self.config["keys"].as_str_seq() merge = self.config["merge"].get(bool) @@ -154,11 +163,11 @@ class DuplicatesPlugin(BeetsPlugin): if album: if not keys: keys = ["mb_albumid"] - items = lib.albums(decargs(args)) + items = lib.albums(args) else: if not keys: keys = ["mb_trackid", "mb_albumid"] - items = lib.items(decargs(args)) + items = lib.items(args) # If there's nothing to do, return early. The code below assumes # `items` to be non-empty. @@ -166,15 +175,14 @@ class DuplicatesPlugin(BeetsPlugin): return if path: - fmt = "$path" + fmt_tmpl = "$path" # Default format string for count mode. - if count and not fmt: + if count and not fmt_tmpl: if album: - fmt = "$albumartist - $album" + fmt_tmpl = "$albumartist - $album" else: - fmt = "$albumartist - $album - $title" - fmt += ": {0}" + fmt_tmpl = "$albumartist - $album - $title" if checksum: for i in items: @@ -196,15 +204,23 @@ class DuplicatesPlugin(BeetsPlugin): copy=copy, move=move, delete=delete, + remove=remove, tag=tag, - fmt=fmt.format(obj_count), + fmt=f"{fmt_tmpl}: {obj_count}", ) self._command.func = _dup return [self._command] def _process_item( - self, item, copy=False, move=False, delete=False, tag=False, fmt="" + self, + item, + copy=False, + move=False, + delete=False, + tag=False, + fmt="", + remove=False, ): """Process Item `item`.""" print_(format(item, fmt)) @@ -216,6 +232,8 @@ class DuplicatesPlugin(BeetsPlugin): item.store() if delete: item.remove(delete=True) + elif remove: + item.remove(delete=False) if tag: try: k, v = tag.split("=") @@ -236,28 +254,24 @@ class DuplicatesPlugin(BeetsPlugin): checksum = getattr(item, key, False) if not checksum: self._log.debug( - "key {0} on item {1} not cached:" "computing checksum", + "key {} on item {.filepath} not cached:computing checksum", key, - displayable_path(item.path), + item, ) try: checksum = command_output(args).stdout setattr(item, key, checksum) item.store() self._log.debug( - "computed checksum for {0} using {1}", item.title, key + "computed checksum for {.title} using {}", item, key ) except subprocess.CalledProcessError as e: - self._log.debug( - "failed to checksum {0}: {1}", - displayable_path(item.path), - e, - ) + self._log.debug("failed to checksum {.filepath}: {}", item, e) else: self._log.debug( - "key {0} on item {1} cached:" "not computing checksum", + "key {} on item {.filepath} cached:not computing checksum", key, - displayable_path(item.path), + item, ) return key, checksum @@ -275,15 +289,15 @@ class DuplicatesPlugin(BeetsPlugin): values = [v for v in values if v not in (None, "")] if strict and len(values) < len(keys): self._log.debug( - "some keys {0} on item {1} are null or empty:" " skipping", + "some keys {} on item {.filepath} are null or empty: skipping", keys, - displayable_path(obj.path), + obj, ) elif not strict and not len(values): self._log.debug( - "all keys {0} on item {1} are null or empty:" " skipping", + "all keys {} on item {.filepath} are null or empty: skipping", keys, - displayable_path(obj.path), + obj, ) else: key = tuple(values) @@ -341,11 +355,11 @@ class DuplicatesPlugin(BeetsPlugin): value = getattr(o, f, None) if value: self._log.debug( - "key {0} on item {1} is null " - "or empty: setting from item {2}", + "key {} on item {} is null " + "or empty: setting from item {.filepath}", f, displayable_path(objs[0].path), - displayable_path(o.path), + o, ) setattr(objs[0], f, value) objs[0].store() @@ -365,11 +379,11 @@ class DuplicatesPlugin(BeetsPlugin): missing.album_id = objs[0].id missing.add(i._db) self._log.debug( - "item {0} missing from album {1}:" - " merging from {2} into {3}", + "item {} missing from album {}:" + " merging from {.filepath} into {}", missing, objs[0], - displayable_path(o.path), + o, displayable_path(missing.destination()), ) missing.move(operation=MoveOperation.COPY) diff --git a/beetsplug/edit.py b/beetsplug/edit.py index 51b36bdab..46e756122 100644 --- a/beetsplug/edit.py +++ b/beetsplug/edit.py @@ -24,8 +24,9 @@ import yaml from beets import plugins, ui, util from beets.dbcore import types -from beets.importer import action -from beets.ui.commands import PromptChoice, _do_query +from beets.importer import Action +from beets.ui.commands.utils import do_query +from beets.util import PromptChoice # These "safe" types can avoid the format/parse cycle that most fields go # through: they are safe to edit with native YAML types. @@ -46,9 +47,7 @@ def edit(filename, log): try: subprocess.call(cmd) except OSError as exc: - raise ui.UserError( - "could not run editor command {!r}: {}".format(cmd[0], exc) - ) + raise ui.UserError(f"could not run editor command {cmd[0]!r}: {exc}") def dump(arg): @@ -71,9 +70,7 @@ def load(s): for d in yaml.safe_load_all(s): if not isinstance(d, dict): raise ParseError( - "each entry must be a dictionary; found {}".format( - type(d).__name__ - ) + f"each entry must be a dictionary; found {type(d).__name__}" ) # Convert all keys to strings. They started out as strings, @@ -180,8 +177,7 @@ class EditPlugin(plugins.BeetsPlugin): def _edit_command(self, lib, opts, args): """The CLI command function for the `beet edit` command.""" # Get the objects to edit. - query = ui.decargs(args) - items, albums = _do_query(lib, query, opts.album, False) + items, albums = do_query(lib, args, opts.album, False) objs = albums if opts.album else items if not objs: ui.print_("Nothing to edit.") @@ -279,23 +275,18 @@ class EditPlugin(plugins.BeetsPlugin): ui.print_("No changes to apply.") return False - # Confirm the changes. + # For cancel/keep-editing, restore objects to their original + # in-memory state so temp edits don't leak into the session choice = ui.input_options( ("continue Editing", "apply", "cancel") ) if choice == "a": # Apply. return True elif choice == "c": # Cancel. + self.apply_data(objs, new_data, old_data) return False elif choice == "e": # Keep editing. - # Reset the temporary changes to the objects. I we have a - # copy from above, use that, else reload from the database. - objs = [ - (old_obj or obj) for old_obj, obj in zip(objs_old, objs) - ] - for obj in objs: - if not obj.id < 0: - obj.load() + self.apply_data(objs, new_data, old_data) continue # Remove the temporary file before returning. @@ -380,13 +371,11 @@ class EditPlugin(plugins.BeetsPlugin): # Save the new data. if success: - # Return action.RETAG, which makes the importer write the tags + # Return Action.RETAG, which makes the importer write the tags # to the files if needed without re-applying metadata. - return action.RETAG + return Action.RETAG else: - # Edit cancelled / no edits made. Revert changes. - for obj in task.items: - obj.read() + return None def importer_edit_candidate(self, session, task): """Callback for invoking the functionality during an interactive diff --git a/beetsplug/embedart.py b/beetsplug/embedart.py index 740863bf1..cbf40f570 100644 --- a/beetsplug/embedart.py +++ b/beetsplug/embedart.py @@ -20,11 +20,12 @@ from mimetypes import guess_extension import requests -from beets import art, config, ui +from beets import config, ui from beets.plugins import BeetsPlugin -from beets.ui import decargs, print_ +from beets.ui import print_ from beets.util import bytestring_path, displayable_path, normpath, syspath from beets.util.artresizer import ArtResizer +from beetsplug._utils import art def _confirm(objs, album): @@ -35,8 +36,9 @@ def _confirm(objs, album): to items). """ noun = "album" if album else "file" - prompt = "Modify artwork for {} {}{} (Y/n)?".format( - len(objs), noun, "s" if len(objs) > 1 else "" + prompt = ( + "Modify artwork for" + f" {len(objs)} {noun}{'s' if len(objs) > 1 else ''} (Y/n)?" ) # Show all the items or albums. @@ -66,7 +68,7 @@ class EmbedCoverArtPlugin(BeetsPlugin): if self.config["maxwidth"].get(int) and not ArtResizer.shared.local: self.config["maxwidth"] = 0 self._log.warning( - "ImageMagick or PIL not found; " "'maxwidth' option ignored" + "ImageMagick or PIL not found; 'maxwidth' option ignored" ) if ( self.config["compare_threshold"].get(int) @@ -110,12 +112,10 @@ class EmbedCoverArtPlugin(BeetsPlugin): imagepath = normpath(opts.file) if not os.path.isfile(syspath(imagepath)): raise ui.UserError( - "image file {} not found".format( - displayable_path(imagepath) - ) + f"image file {displayable_path(imagepath)} not found" ) - items = lib.items(decargs(args)) + items = lib.items(args) # Confirm with user. if not opts.yes and not _confirm(items, not opts.file): @@ -137,7 +137,7 @@ class EmbedCoverArtPlugin(BeetsPlugin): response = requests.get(opts.url, timeout=5) response.raise_for_status() except requests.exceptions.RequestException as e: - self._log.error("{}".format(e)) + self._log.error("{}", e) return extension = guess_extension(response.headers["Content-Type"]) if extension is None: @@ -149,9 +149,9 @@ class EmbedCoverArtPlugin(BeetsPlugin): with open(tempimg, "wb") as f: f.write(response.content) except Exception as e: - self._log.error("Unable to save image: {}".format(e)) + self._log.error("Unable to save image: {}", e) return - items = lib.items(decargs(args)) + items = lib.items(args) # Confirm with user. if not opts.yes and not _confirm(items, not opts.url): os.remove(tempimg) @@ -169,7 +169,7 @@ class EmbedCoverArtPlugin(BeetsPlugin): ) os.remove(tempimg) else: - albums = lib.albums(decargs(args)) + albums = lib.albums(args) # Confirm with user. if not opts.yes and not _confirm(albums, not opts.file): return @@ -212,7 +212,7 @@ class EmbedCoverArtPlugin(BeetsPlugin): def extract_func(lib, opts, args): if opts.outpath: art.extract_first( - self._log, normpath(opts.outpath), lib.items(decargs(args)) + self._log, normpath(opts.outpath), lib.items(args) ) else: filename = bytestring_path( @@ -223,7 +223,7 @@ class EmbedCoverArtPlugin(BeetsPlugin): "Only specify a name rather than a path for -n" ) return - for album in lib.albums(decargs(args)): + for album in lib.albums(args): artpath = normpath(os.path.join(album.path, filename)) artpath = art.extract_first( self._log, artpath, album.items() @@ -244,11 +244,11 @@ class EmbedCoverArtPlugin(BeetsPlugin): ) def clear_func(lib, opts, args): - items = lib.items(decargs(args)) + items = lib.items(args) # Confirm with user. if not opts.yes and not _confirm(items, False): return - art.clear(self._log, lib, decargs(args)) + art.clear(self._log, lib, args) clear_cmd.func = clear_func @@ -274,7 +274,7 @@ class EmbedCoverArtPlugin(BeetsPlugin): """ if self.config["remove_art_file"] and album.artpath: if os.path.isfile(syspath(album.artpath)): - self._log.debug("Removing album art file for {0}", album) + self._log.debug("Removing album art file for {}", album) os.remove(syspath(album.artpath)) album.artpath = None album.store() diff --git a/beetsplug/embyupdate.py b/beetsplug/embyupdate.py index 2cda6af5e..25f3ed8b3 100644 --- a/beetsplug/embyupdate.py +++ b/beetsplug/embyupdate.py @@ -13,7 +13,6 @@ from urllib.parse import parse_qs, urlencode, urljoin, urlsplit, urlunsplit import requests -from beets import config from beets.plugins import BeetsPlugin @@ -39,9 +38,7 @@ def api_url(host, port, endpoint): hostname_list.insert(0, "http://") hostname = "".join(hostname_list) - joined = urljoin( - "{hostname}:{port}".format(hostname=hostname, port=port), endpoint - ) + joined = urljoin(f"{hostname}:{port}", endpoint) scheme, netloc, path, query_string, fragment = urlsplit(joined) query_params = parse_qs(query_string) @@ -82,12 +79,12 @@ def create_headers(user_id, token=None): headers = {} authorization = ( - 'MediaBrowser UserId="{user_id}", ' + f'MediaBrowser UserId="{user_id}", ' 'Client="other", ' 'Device="beets", ' 'DeviceId="beets", ' 'Version="0.0.0"' - ).format(user_id=user_id) + ) headers["x-emby-authorization"] = authorization @@ -143,17 +140,23 @@ def get_user(host, port, username): class EmbyUpdate(BeetsPlugin): def __init__(self): - super().__init__() + super().__init__("emby") # Adding defaults. - config["emby"].add( + self.config.add( { "host": "http://localhost", "port": 8096, - "apikey": None, + "username": None, "password": None, + "userid": None, + "apikey": None, } ) + self.config["username"].redact = True + self.config["password"].redact = True + self.config["userid"].redact = True + self.config["apikey"].redact = True self.register_listener("database_change", self.listen_for_db_change) @@ -165,12 +168,12 @@ class EmbyUpdate(BeetsPlugin): """When the client exists try to send refresh request to Emby.""" self._log.info("Updating Emby library...") - host = config["emby"]["host"].get() - port = config["emby"]["port"].get() - username = config["emby"]["username"].get() - password = config["emby"]["password"].get() - userid = config["emby"]["userid"].get() - token = config["emby"]["apikey"].get() + host = self.config["host"].get() + port = self.config["port"].get() + username = self.config["username"].get() + password = self.config["password"].get() + userid = self.config["userid"].get() + token = self.config["apikey"].get() # Check if at least a apikey or password is given. if not any([password, token]): @@ -181,7 +184,7 @@ class EmbyUpdate(BeetsPlugin): # Get user information from the Emby API. user = get_user(host, port, username) if not user: - self._log.warning(f"User {username} could not be found.") + self._log.warning("User {} could not be found.", username) return userid = user[0]["Id"] @@ -193,7 +196,7 @@ class EmbyUpdate(BeetsPlugin): # Get authentication token. token = get_token(host, port, headers, auth_data) if not token: - self._log.warning("Could not get token for user {0}", username) + self._log.warning("Could not get token for user {}", username) return # Recreate headers with a token. diff --git a/beetsplug/export.py b/beetsplug/export.py index 9b8ad3580..e6c2b88c7 100644 --- a/beetsplug/export.py +++ b/beetsplug/export.py @@ -144,13 +144,13 @@ class ExportPlugin(BeetsPlugin): items = [] for data_emitter in data_collector( lib, - ui.decargs(args), + args, album=opts.album, ): try: data, item = data_emitter(included_keys or "*") except (mediafile.UnreadableFileError, OSError) as ex: - self._log.error("cannot read file: {0}", ex) + self._log.error("cannot read file: {}", ex) continue for key, value in data.items(): diff --git a/beetsplug/fetchart.py b/beetsplug/fetchart.py index 0da884278..f1cc85f44 100644 --- a/beetsplug/fetchart.py +++ b/beetsplug/fetchart.py @@ -14,10 +14,16 @@ """Fetches album art.""" +from __future__ import annotations + import os import re +from abc import ABC, abstractmethod from collections import OrderedDict from contextlib import closing +from enum import Enum +from functools import cached_property +from typing import TYPE_CHECKING, AnyStr, ClassVar, Literal import confuse import requests @@ -26,9 +32,17 @@ from mediafile import image_mime_type from beets import config, importer, plugins, ui, util from beets.util import bytestring_path, get_temp_filename, sorted_walk, syspath from beets.util.artresizer import ArtResizer +from beets.util.config import sanitize_pairs + +if TYPE_CHECKING: + from collections.abc import Iterable, Iterator, Sequence + + from beets.importer import ImportSession, ImportTask + from beets.library import Album, Library + from beets.logging import BeetsLogger as Logger try: - from bs4 import BeautifulSoup + from bs4 import BeautifulSoup, Tag HAS_BEAUTIFUL_SOUP = True except ImportError: @@ -39,33 +53,54 @@ CONTENT_TYPES = {"image/jpeg": [b"jpg", b"jpeg"], "image/png": [b"png"]} IMAGE_EXTENSIONS = [ext for exts in CONTENT_TYPES.values() for ext in exts] +class ImageAction(Enum): + """Indicates whether an image is useable or requires post-processing.""" + + BAD = 0 + EXACT = 1 + DOWNSCALE = 2 + DOWNSIZE = 3 + DEINTERLACE = 4 + REFORMAT = 5 + + +class MetadataMatch(Enum): + """Indicates whether a `Candidate` matches the search criteria exactly.""" + + EXACT = 0 + FALLBACK = 1 + + +SourceLocation = Literal["local", "remote"] + + class Candidate: """Holds information about a matching artwork, deals with validation of dimension restrictions and resizing. """ - CANDIDATE_BAD = 0 - CANDIDATE_EXACT = 1 - CANDIDATE_DOWNSCALE = 2 - CANDIDATE_DOWNSIZE = 3 - CANDIDATE_DEINTERLACE = 4 - CANDIDATE_REFORMAT = 5 - - MATCH_EXACT = 0 - MATCH_FALLBACK = 1 - def __init__( - self, log, path=None, url=None, source="", match=None, size=None + self, + log: Logger, + source_name: str, + path: None | bytes = None, + url: None | str = None, + match: None | MetadataMatch = None, + size: None | tuple[int, int] = None, ): self._log = log self.path = path self.url = url - self.source = source - self.check = None + self.source_name = source_name + self._check: None | ImageAction = None self.match = match self.size = size - def _validate(self, plugin, skip_check_for=None): + def _validate( + self, + plugin: FetchArtPlugin, + skip_check_for: None | list[ImageAction] = None, + ) -> ImageAction: """Determine whether the candidate artwork is valid based on its dimensions (width and ratio). @@ -74,21 +109,16 @@ class Candidate: validated for a particular operation without changing plugin configuration. - Return `CANDIDATE_BAD` if the file is unusable. - Return `CANDIDATE_EXACT` if the file is usable as-is. - Return `CANDIDATE_DOWNSCALE` if the file must be rescaled. - Return `CANDIDATE_DOWNSIZE` if the file must be resized, and possibly + Return `ImageAction.BAD` if the file is unusable. + Return `ImageAction.EXACT` if the file is usable as-is. + Return `ImageAction.DOWNSCALE` if the file must be rescaled. + Return `ImageAction.DOWNSIZE` if the file must be resized, and possibly also rescaled. - Return `CANDIDATE_DEINTERLACE` if the file must be deinterlaced. - Return `CANDIDATE_REFORMAT` if the file has to be converted. + Return `ImageAction.DEINTERLACE` if the file must be deinterlaced. + Return `ImageAction.REFORMAT` if the file has to be converted. """ if not self.path: - return self.CANDIDATE_BAD - - if skip_check_for is None: - skip_check_for = [] - if isinstance(skip_check_for, int): - skip_check_for = [skip_check_for] + return ImageAction.BAD if not ( plugin.enforce_ratio @@ -98,12 +128,12 @@ class Candidate: or plugin.deinterlace or plugin.cover_format ): - return self.CANDIDATE_EXACT + return ImageAction.EXACT # get_size returns None if no local imaging backend is available if not self.size: self.size = ArtResizer.shared.get_size(self.path) - self._log.debug("image size: {}", self.size) + self._log.debug("image size: {.size}", self) if not self.size: self._log.warning( @@ -113,7 +143,7 @@ class Candidate: "`enforce_ratio` and `max_filesize` " "may be violated." ) - return self.CANDIDATE_EXACT + return ImageAction.EXACT short_edge = min(self.size) long_edge = max(self.size) @@ -121,9 +151,9 @@ class Candidate: # Check minimum dimension. if plugin.minwidth and self.size[0] < plugin.minwidth: self._log.debug( - "image too small ({} < {})", self.size[0], plugin.minwidth + "image too small ({} < {.minwidth})", self.size[0], plugin ) - return self.CANDIDATE_BAD + return ImageAction.BAD # Check aspect ratio. edge_diff = long_edge - short_edge @@ -132,12 +162,12 @@ class Candidate: if edge_diff > plugin.margin_px: self._log.debug( "image is not close enough to being " - "square, ({} - {} > {})", + "square, ({} - {} > {.margin_px})", long_edge, short_edge, - plugin.margin_px, + plugin, ) - return self.CANDIDATE_BAD + return ImageAction.BAD elif plugin.margin_percent: margin_px = plugin.margin_percent * long_edge if edge_diff > margin_px: @@ -148,19 +178,19 @@ class Candidate: short_edge, margin_px, ) - return self.CANDIDATE_BAD + return ImageAction.BAD elif edge_diff: # also reached for margin_px == 0 and margin_percent == 0.0 self._log.debug( "image is not square ({} != {})", self.size[0], self.size[1] ) - return self.CANDIDATE_BAD + return ImageAction.BAD # Check maximum dimension. downscale = False if plugin.maxwidth and self.size[0] > plugin.maxwidth: self._log.debug( - "image needs rescaling ({} > {})", self.size[0], plugin.maxwidth + "image needs rescaling ({} > {.maxwidth})", self.size[0], plugin ) downscale = True @@ -170,9 +200,9 @@ class Candidate: filesize = os.stat(syspath(self.path)).st_size if filesize > plugin.max_filesize: self._log.debug( - "image needs resizing ({}B > {}B)", + "image needs resizing ({}B > {.max_filesize}B)", filesize, - plugin.max_filesize, + plugin, ) downsize = True @@ -183,28 +213,34 @@ class Candidate: reformat = fmt != plugin.cover_format if reformat: self._log.debug( - "image needs reformatting: {} -> {}", + "image needs reformatting: {} -> {.cover_format}", fmt, - plugin.cover_format, + plugin, ) - if downscale and (self.CANDIDATE_DOWNSCALE not in skip_check_for): - return self.CANDIDATE_DOWNSCALE - if reformat and (self.CANDIDATE_REFORMAT not in skip_check_for): - return self.CANDIDATE_REFORMAT + skip_check_for = skip_check_for or [] + + if downscale and (ImageAction.DOWNSCALE not in skip_check_for): + return ImageAction.DOWNSCALE + if reformat and (ImageAction.REFORMAT not in skip_check_for): + return ImageAction.REFORMAT if plugin.deinterlace and ( - self.CANDIDATE_DEINTERLACE not in skip_check_for + ImageAction.DEINTERLACE not in skip_check_for ): - return self.CANDIDATE_DEINTERLACE - if downsize and (self.CANDIDATE_DOWNSIZE not in skip_check_for): - return self.CANDIDATE_DOWNSIZE - return self.CANDIDATE_EXACT + return ImageAction.DEINTERLACE + if downsize and (ImageAction.DOWNSIZE not in skip_check_for): + return ImageAction.DOWNSIZE + return ImageAction.EXACT - def validate(self, plugin, skip_check_for=None): - self.check = self._validate(plugin, skip_check_for) - return self.check + def validate( + self, + plugin: FetchArtPlugin, + skip_check_for: None | list[ImageAction] = None, + ) -> ImageAction: + self._check = self._validate(plugin, skip_check_for) + return self._check - def resize(self, plugin): + def resize(self, plugin: FetchArtPlugin) -> None: """Resize the candidate artwork according to the plugin's configuration until it is valid or no further resizing is possible. @@ -214,25 +250,32 @@ class Candidate: checks_performed = [] # we don't want to resize the image if it's valid or bad - while current_check not in [self.CANDIDATE_BAD, self.CANDIDATE_EXACT]: + while current_check not in [ImageAction.BAD, ImageAction.EXACT]: self._resize(plugin, current_check) checks_performed.append(current_check) current_check = self.validate( plugin, skip_check_for=checks_performed ) - def _resize(self, plugin, check=None): + def _resize( + self, plugin: FetchArtPlugin, check: None | ImageAction = None + ) -> None: """Resize the candidate artwork according to the plugin's configuration and the specified check. """ - if check == self.CANDIDATE_DOWNSCALE: + # This must only be called when _validate returned something other than + # ImageAction.Bad or ImageAction.EXACT; then path and size are known. + assert self.path is not None + assert self.size is not None + + if check == ImageAction.DOWNSCALE: self.path = ArtResizer.shared.resize( plugin.maxwidth, self.path, quality=plugin.quality, max_filesize=plugin.max_filesize, ) - elif check == self.CANDIDATE_DOWNSIZE: + elif check == ImageAction.DOWNSIZE: # dimensions are correct, so maxwidth is set to maximum dimension self.path = ArtResizer.shared.resize( max(self.size), @@ -240,9 +283,9 @@ class Candidate: quality=plugin.quality, max_filesize=plugin.max_filesize, ) - elif check == self.CANDIDATE_DEINTERLACE: + elif check == ImageAction.DEINTERLACE: self.path = ArtResizer.shared.deinterlace(self.path) - elif check == self.CANDIDATE_REFORMAT: + elif check == ImageAction.REFORMAT: self.path = ArtResizer.shared.reformat( self.path, plugin.cover_format, @@ -250,7 +293,7 @@ class Candidate: ) -def _logged_get(log, *args, **kwargs): +def _logged_get(log: Logger, *args, **kwargs) -> requests.Response: """Like `requests.get`, but logs the effective URL to the specified `log` at the `DEBUG` level. @@ -286,7 +329,7 @@ def _logged_get(log, *args, **kwargs): prepped.url, {}, None, None, None ) send_kwargs.update(settings) - log.debug("{}: {}", message, prepped.url) + log.debug("{}: {.url}", message, prepped) return s.send(prepped, **send_kwargs) @@ -295,7 +338,9 @@ class RequestMixin: must be named `self._log`. """ - def request(self, *args, **kwargs): + _log: Logger + + def request(self, *args, **kwargs) -> requests.Response: """Like `requests.get`, but uses the logger `self._log`. See also `_logged_get`. @@ -306,55 +351,88 @@ class RequestMixin: # ART SOURCES ################################################################ -class ArtSource(RequestMixin): - VALID_MATCHING_CRITERIA = ["default"] +class ArtSource(RequestMixin, ABC): + # Specify whether this source fetches local or remote images + LOC: ClassVar[SourceLocation] + # A list of methods to match metadata, sorted by descending accuracy + VALID_MATCHING_CRITERIA: list[str] = ["default"] + # A human-readable name for the art source + NAME: ClassVar[str] + # The key to select the art source in the config. This value will also be + # stored in the database. + ID: ClassVar[str] - def __init__(self, log, config, match_by=None): + def __init__( + self, + log: Logger, + config: confuse.ConfigView, + match_by: None | list[str] = None, + ) -> None: self._log = log self._config = config self.match_by = match_by or self.VALID_MATCHING_CRITERIA + @cached_property + def description(self) -> str: + return f"{self.ID}[{', '.join(self.match_by)}]" + @staticmethod - def add_default_config(config): + def add_default_config(config: confuse.ConfigView) -> None: pass @classmethod - def available(cls, log, config): + def available(cls, log: Logger, config: confuse.ConfigView) -> bool: """Return whether or not all dependencies are met and the art source is in fact usable. """ return True - def get(self, album, plugin, paths): - raise NotImplementedError() + @abstractmethod + def get( + self, + album: Album, + plugin: FetchArtPlugin, + paths: None | Sequence[bytes], + ) -> Iterator[Candidate]: + pass - def _candidate(self, **kwargs): - return Candidate(source=self, log=self._log, **kwargs) + def _candidate(self, **kwargs) -> Candidate: + return Candidate(source_name=self.ID, log=self._log, **kwargs) - def fetch_image(self, candidate, plugin): - raise NotImplementedError() + @abstractmethod + def fetch_image(self, candidate: Candidate, plugin: FetchArtPlugin) -> None: + """Fetch the image to a temporary file if it is not already available + as a local file. - def cleanup(self, candidate): + After calling this, `Candidate.path` is set to the image path if + successful, or to `None` otherwise. + """ + pass + + def cleanup(self, candidate: Candidate) -> None: pass class LocalArtSource(ArtSource): - IS_LOCAL = True - LOC_STR = "local" + LOC = "local" - def fetch_image(self, candidate, plugin): + def fetch_image(self, candidate: Candidate, plugin: FetchArtPlugin) -> None: pass class RemoteArtSource(ArtSource): - IS_LOCAL = False - LOC_STR = "remote" + LOC = "remote" - def fetch_image(self, candidate, plugin): + def fetch_image(self, candidate: Candidate, plugin: FetchArtPlugin) -> None: """Downloads an image from a URL and checks whether it seems to - actually be an image. If so, returns a path to the downloaded image. - Otherwise, returns None. + actually be an image. """ + # This must only be called for candidates that were returned by + # self.get, which are expected to have a url and no path (because they + # haven't been downloaded yet). + assert candidate.path is None + assert candidate.url is not None + if plugin.maxwidth: candidate.url = ArtResizer.shared.proxy_url( plugin.maxwidth, candidate.url @@ -418,7 +496,7 @@ class RemoteArtSource(ArtSource): for chunk in data: fh.write(chunk) self._log.debug( - "downloaded art to: {0}", util.displayable_path(filename) + "downloaded art to: {}", util.displayable_path(filename) ) candidate.path = util.bytestring_path(filename) return @@ -429,7 +507,7 @@ class RemoteArtSource(ArtSource): self._log.debug("error fetching art: {}", exc) return - def cleanup(self, candidate): + def cleanup(self, candidate: Candidate) -> None: if candidate.path: try: util.remove(path=candidate.path) @@ -439,34 +517,39 @@ class RemoteArtSource(ArtSource): class CoverArtArchive(RemoteArtSource): NAME = "Cover Art Archive" + ID = "coverart" VALID_MATCHING_CRITERIA = ["release", "releasegroup"] VALID_THUMBNAIL_SIZES = [250, 500, 1200] URL = "https://coverartarchive.org/release/{mbid}" GROUP_URL = "https://coverartarchive.org/release-group/{mbid}" - def get(self, album, plugin, paths): + def get( + self, + album: Album, + plugin: FetchArtPlugin, + paths: None | Sequence[bytes], + ) -> Iterator[Candidate]: """Return the Cover Art Archive and Cover Art Archive release group URLs using album MusicBrainz release ID and release group ID. """ - def get_image_urls(url, preferred_width=None): + def get_image_urls( + url: str, + preferred_width: None | str = None, + ) -> Iterator[str]: try: response = self.request(url) except requests.RequestException: - self._log.debug( - "{}: error receiving response".format(self.NAME) - ) + self._log.debug("{.NAME}: error receiving response", self) return try: data = response.json() except ValueError: self._log.debug( - "{}: error loading response: {}".format( - self.NAME, response.text - ) + "{.NAME}: error loading response: {.text}", self, response ) return @@ -500,41 +583,53 @@ class CoverArtArchive(RemoteArtSource): if "release" in self.match_by and album.mb_albumid: for url in get_image_urls(release_url, preferred_width): - yield self._candidate(url=url, match=Candidate.MATCH_EXACT) + yield self._candidate(url=url, match=MetadataMatch.EXACT) if "releasegroup" in self.match_by and album.mb_releasegroupid: for url in get_image_urls(release_group_url, preferred_width): - yield self._candidate(url=url, match=Candidate.MATCH_FALLBACK) + yield self._candidate(url=url, match=MetadataMatch.FALLBACK) class Amazon(RemoteArtSource): NAME = "Amazon" - URL = "https://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg" + ID = "amazon" + URL = "https://images.amazon.com/images/P/{}.{:02d}.LZZZZZZZ.jpg" INDICES = (1, 2) - def get(self, album, plugin, paths): + def get( + self, + album: Album, + plugin: FetchArtPlugin, + paths: None | Sequence[bytes], + ) -> Iterator[Candidate]: """Generate URLs using Amazon ID (ASIN) string.""" if album.asin: for index in self.INDICES: yield self._candidate( - url=self.URL % (album.asin, index), - match=Candidate.MATCH_EXACT, + url=self.URL.format(album.asin, index), + match=MetadataMatch.EXACT, ) class AlbumArtOrg(RemoteArtSource): NAME = "AlbumArt.org scraper" + ID = "albumart" URL = "https://www.albumart.org/index_detail.php" PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"' - def get(self, album, plugin, paths): + def get( + self, + album: Album, + plugin: FetchArtPlugin, + paths: None | Sequence[bytes], + ): """Return art URL from AlbumArt.org using album ASIN.""" if not album.asin: return # Get the page from albumart.org. try: resp = self.request(self.URL, params={"asin": album.asin}) - self._log.debug("scraped art URL: {0}", resp.url) + self._log.debug("scraped art URL: {.url}", resp) except requests.RequestException: self._log.debug("error scraping art page") return @@ -543,13 +638,14 @@ class AlbumArtOrg(RemoteArtSource): m = re.search(self.PAT, resp.text) if m: image_url = m.group(1) - yield self._candidate(url=image_url, match=Candidate.MATCH_EXACT) + yield self._candidate(url=image_url, match=MetadataMatch.EXACT) else: self._log.debug("no image found on page") class GoogleImages(RemoteArtSource): NAME = "Google Images" + ID = "google" URL = "https://www.googleapis.com/customsearch/v1" def __init__(self, *args, **kwargs): @@ -558,7 +654,7 @@ class GoogleImages(RemoteArtSource): self.cx = (self._config["google_engine"].get(),) @staticmethod - def add_default_config(config): + def add_default_config(config: confuse.ConfigView): config.add( { "google_key": None, @@ -566,21 +662,27 @@ class GoogleImages(RemoteArtSource): } ) config["google_key"].redact = True + config["google_engine"].redact = True @classmethod - def available(cls, log, config): + def available(cls, log: Logger, config: confuse.ConfigView) -> bool: has_key = bool(config["google_key"].get()) if not has_key: log.debug("google: Disabling art source due to missing key") return has_key - def get(self, album, plugin, paths): + def get( + self, + album: Album, + plugin: FetchArtPlugin, + paths: None | Sequence[bytes], + ) -> Iterator[Candidate]: """Return art URL from google custom search engine given an album title and interpreter. """ if not (album.albumartist and album.album): return - search_string = (album.albumartist + "," + album.album).encode("utf-8") + search_string = f"{album.albumartist},{album.album}".encode() try: response = self.request( @@ -600,20 +702,18 @@ class GoogleImages(RemoteArtSource): try: data = response.json() except ValueError: - self._log.debug( - "google: error loading response: {}".format(response.text) - ) + self._log.debug("google: error loading response: {.text}", response) return if "error" in data: reason = data["error"]["errors"][0]["reason"] - self._log.debug("google fetchart error: {0}", reason) + self._log.debug("google fetchart error: {}", reason) return if "items" in data.keys(): for item in data["items"]: yield self._candidate( - url=item["link"], match=Candidate.MATCH_EXACT + url=item["link"], match=MetadataMatch.EXACT ) @@ -621,8 +721,9 @@ class FanartTV(RemoteArtSource): """Art from fanart.tv requested using their API""" NAME = "fanart.tv" + ID = "fanarttv" API_URL = "https://webservice.fanart.tv/v3/" - API_ALBUMS = API_URL + "music/albums/" + API_ALBUMS = f"{API_URL}music/albums/" PROJECT_KEY = "61a7d0ab4e67162b7a0c7c35915cd48e" def __init__(self, *args, **kwargs): @@ -630,7 +731,7 @@ class FanartTV(RemoteArtSource): self.client_key = self._config["fanarttv_key"].get() @staticmethod - def add_default_config(config): + def add_default_config(config: confuse.ConfigView): config.add( { "fanarttv_key": None, @@ -638,13 +739,18 @@ class FanartTV(RemoteArtSource): ) config["fanarttv_key"].redact = True - def get(self, album, plugin, paths): + def get( + self, + album: Album, + plugin: FetchArtPlugin, + paths: None | Sequence[bytes], + ) -> Iterator[Candidate]: if not album.mb_releasegroupid: return try: response = self.request( - self.API_ALBUMS + album.mb_releasegroupid, + f"{self.API_ALBUMS}{album.mb_releasegroupid}", headers={ "api-key": self.PROJECT_KEY, "client-key": self.client_key, @@ -658,7 +764,7 @@ class FanartTV(RemoteArtSource): data = response.json() except ValueError: self._log.debug( - "fanart.tv: error loading response: {}", response.text + "fanart.tv: error loading response: {.text}", response ) return @@ -686,7 +792,7 @@ class FanartTV(RemoteArtSource): # can this actually occur? else: self._log.debug( - "fanart.tv: unexpected mb_releasegroupid in " "response!" + "fanart.tv: unexpected mb_releasegroupid in response!" ) matches.sort(key=lambda x: int(x["likes"]), reverse=True) @@ -694,21 +800,27 @@ class FanartTV(RemoteArtSource): # fanart.tv has a strict size requirement for album art to be # uploaded yield self._candidate( - url=item["url"], match=Candidate.MATCH_EXACT, size=(1000, 1000) + url=item["url"], match=MetadataMatch.EXACT, size=(1000, 1000) ) class ITunesStore(RemoteArtSource): NAME = "iTunes Store" + ID = "itunes" API_URL = "https://itunes.apple.com/search" - def get(self, album, plugin, paths): + def get( + self, + album: Album, + plugin: FetchArtPlugin, + paths: None | Sequence[bytes], + ) -> Iterator[Candidate]: """Return art URL from iTunes Store given an album title.""" if not (album.albumartist and album.album): return payload = { - "term": album.albumartist + " " + album.album, + "term": f"{album.albumartist} {album.album}", "entity": "album", "media": "music", "limit": 200, @@ -717,13 +829,13 @@ class ITunesStore(RemoteArtSource): r = self.request(self.API_URL, params=payload) r.raise_for_status() except requests.RequestException as e: - self._log.debug("iTunes search failed: {0}", e) + self._log.debug("iTunes search failed: {}", e) return try: candidates = r.json()["results"] except ValueError as e: - self._log.debug("Could not decode json response: {0}", e) + self._log.debug("Could not decode json response: {}", e) return except KeyError as e: self._log.debug( @@ -751,7 +863,7 @@ class ITunesStore(RemoteArtSource): art_url = c["artworkUrl100"] art_url = art_url.replace("100x100bb", image_suffix) yield self._candidate( - url=art_url, match=Candidate.MATCH_EXACT + url=art_url, match=MetadataMatch.EXACT ) except KeyError as e: self._log.debug( @@ -766,7 +878,7 @@ class ITunesStore(RemoteArtSource): "100x100bb", image_suffix ) yield self._candidate( - url=fallback_art_url, match=Candidate.MATCH_FALLBACK + url=fallback_art_url, match=MetadataMatch.FALLBACK ) except KeyError as e: self._log.debug( @@ -778,6 +890,7 @@ class ITunesStore(RemoteArtSource): class Wikipedia(RemoteArtSource): NAME = "Wikipedia (queried through DBpedia)" + ID = "wikipedia" DBPEDIA_URL = "https://dbpedia.org/sparql" WIKIPEDIA_URL = "https://en.wikipedia.org/w/api.php" SPARQL_QUERY = """PREFIX rdf: @@ -802,7 +915,12 @@ class Wikipedia(RemoteArtSource): }} Limit 1""" - def get(self, album, plugin, paths): + def get( + self, + album: Album, + plugin: FetchArtPlugin, + paths: None | Sequence[bytes], + ) -> Iterator[Candidate]: if not (album.albumartist and album.album): return @@ -829,14 +947,14 @@ class Wikipedia(RemoteArtSource): data = dbpedia_response.json() results = data["results"]["bindings"] if results: - cover_filename = "File:" + results[0]["coverFilename"]["value"] + cover_filename = f"File:{results[0]['coverFilename']['value']}" page_id = results[0]["pageId"]["value"] else: self._log.debug("wikipedia: album not found on dbpedia") except (ValueError, KeyError, IndexError): self._log.debug( - "wikipedia: error scraping dbpedia response: {}", - dbpedia_response.text, + "wikipedia: error scraping dbpedia response: {.text}", + dbpedia_response, ) # Ensure we have a filename before attempting to query wikipedia @@ -878,7 +996,7 @@ class Wikipedia(RemoteArtSource): results = data["query"]["pages"][page_id]["images"] for result in results: if re.match( - re.escape(lpart) + r".*?\." + re.escape(rpart), + rf"{re.escape(lpart)}.*?\.{re.escape(rpart)}", result["title"], ): cover_filename = result["title"] @@ -912,9 +1030,7 @@ class Wikipedia(RemoteArtSource): results = data["query"]["pages"] for _, result in results.items(): image_url = result["imageinfo"][0]["url"] - yield self._candidate( - url=image_url, match=Candidate.MATCH_EXACT - ) + yield self._candidate(url=image_url, match=MetadataMatch.EXACT) except (ValueError, KeyError, IndexError): self._log.debug("wikipedia: error scraping imageinfo") return @@ -922,9 +1038,12 @@ class Wikipedia(RemoteArtSource): class FileSystem(LocalArtSource): NAME = "Filesystem" + ID = "filesystem" @staticmethod - def filename_priority(filename, cover_names): + def filename_priority( + filename: AnyStr, cover_names: Sequence[AnyStr] + ) -> list[int]: """Sort order for image names. Return indexes of cover names found in the image filename. This @@ -933,7 +1052,12 @@ class FileSystem(LocalArtSource): """ return [idx for (idx, x) in enumerate(cover_names) if x in filename] - def get(self, album, plugin, paths): + def get( + self, + album: Album, + plugin: FetchArtPlugin, + paths: None | Sequence[bytes], + ) -> Iterator[Candidate]: """Look for album art files in the specified directories.""" if not paths: return @@ -968,29 +1092,40 @@ class FileSystem(LocalArtSource): for fn in images: if re.search(cover_pat, os.path.splitext(fn)[0], re.I): self._log.debug( - "using well-named art file {0}", + "using well-named art file {}", util.displayable_path(fn), ) yield self._candidate( - path=os.path.join(path, fn), match=Candidate.MATCH_EXACT + path=os.path.join(path, fn), match=MetadataMatch.EXACT ) else: remaining.append(fn) + # Fall back to a configured image. + if plugin.fallback: + self._log.debug( + "using fallback art file {}", + util.displayable_path(plugin.fallback), + ) + yield self._candidate( + path=plugin.fallback, match=MetadataMatch.FALLBACK + ) + # Fall back to any image in the folder. if remaining and not plugin.cautious: self._log.debug( - "using fallback art file {0}", + "using fallback art file {}", util.displayable_path(remaining[0]), ) yield self._candidate( path=os.path.join(path, remaining[0]), - match=Candidate.MATCH_FALLBACK, + match=MetadataMatch.FALLBACK, ) class LastFM(RemoteArtSource): NAME = "Last.fm" + ID = "lastfm" # Sizes in priority order. SIZES = OrderedDict( @@ -1005,12 +1140,12 @@ class LastFM(RemoteArtSource): API_URL = "https://ws.audioscrobbler.com/2.0" - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.key = (self._config["lastfm_key"].get(),) @staticmethod - def add_default_config(config): + def add_default_config(config: confuse.ConfigView) -> None: config.add( { "lastfm_key": None, @@ -1019,13 +1154,18 @@ class LastFM(RemoteArtSource): config["lastfm_key"].redact = True @classmethod - def available(cls, log, config): + def available(cls, log: Logger, config: confuse.ConfigView) -> bool: has_key = bool(config["lastfm_key"].get()) if not has_key: log.debug("lastfm: Disabling art source due to missing key") return has_key - def get(self, album, plugin, paths): + def get( + self, + album: Album, + plugin: FetchArtPlugin, + paths: None | Sequence[bytes], + ) -> Iterator[Candidate]: if not album.mb_albumid: return @@ -1049,7 +1189,7 @@ class LastFM(RemoteArtSource): if "error" in data: if data["error"] == 6: self._log.debug( - "lastfm: no results for {}", album.mb_albumid + "lastfm: no results for {.mb_albumid}", album ) else: self._log.error( @@ -1070,19 +1210,18 @@ class LastFM(RemoteArtSource): url=images[size], size=self.SIZES[size] ) except ValueError: - self._log.debug( - "lastfm: error loading response: {}".format(response.text) - ) + self._log.debug("lastfm: error loading response: {.text}", response) return class Spotify(RemoteArtSource): NAME = "Spotify" + ID = "spotify" SPOTIFY_ALBUM_URL = "https://open.spotify.com/album/" @classmethod - def available(cls, log, config): + def available(cls, log: Logger, config: confuse.ConfigView) -> bool: if not HAS_BEAUTIFUL_SOUP: log.debug( "To use Spotify as an album art source, " @@ -1091,31 +1230,44 @@ class Spotify(RemoteArtSource): ) return HAS_BEAUTIFUL_SOUP - def get(self, album, plugin, paths): + def get( + self, + album: Album, + plugin: FetchArtPlugin, + paths: None | Sequence[bytes], + ) -> Iterator[Candidate]: try: - url = self.SPOTIFY_ALBUM_URL + album.items().get().spotify_album_id + url = f"{self.SPOTIFY_ALBUM_URL}{album.items().get().spotify_album_id}" except AttributeError: self._log.debug("Fetchart: no Spotify album ID found") return + try: response = requests.get(url, timeout=10) response.raise_for_status() except requests.RequestException as e: - self._log.debug("Error: " + str(e)) + self._log.debug("Error: {!s}", e) return + try: html = response.text soup = BeautifulSoup(html, "html.parser") - image_url = soup.find("meta", attrs={"property": "og:image"})[ - "content" - ] - yield self._candidate(url=image_url, match=Candidate.MATCH_EXACT) except ValueError: self._log.debug( - "Spotify: error loading response: {}".format(response.text) + "Spotify: error loading response: {.text}", response ) return + tag = soup.find("meta", attrs={"property": "og:image"}) + if tag is None or not isinstance(tag, Tag): + self._log.debug( + "Spotify: Unexpected response, og:image tag missing" + ) + return + + image_url = tag["content"] + yield self._candidate(url=image_url, match=MetadataMatch.EXACT) + class CoverArtUrl(RemoteArtSource): # This source is intended to be used with a plugin that sets the @@ -1124,8 +1276,14 @@ class CoverArtUrl(RemoteArtSource): # use that URL to fetch the image. NAME = "Cover Art URL" + ID = "cover_art_url" - def get(self, album, plugin, paths): + def get( + self, + album: Album, + plugin: FetchArtPlugin, + paths: None | Sequence[bytes], + ) -> Iterator[Candidate]: image_url = None try: # look for cover_art_url on album or first track @@ -1133,49 +1291,32 @@ class CoverArtUrl(RemoteArtSource): image_url = album.cover_art_url else: image_url = album.items().get().cover_art_url - self._log.debug(f"Cover art URL {image_url} found for {album}") + self._log.debug("Cover art URL {} found for {}", image_url, album) except (AttributeError, TypeError): - self._log.debug(f"Cover art URL not found for {album}") + self._log.debug("Cover art URL not found for {}", album) return if image_url: - yield self._candidate(url=image_url, match=Candidate.MATCH_EXACT) + yield self._candidate(url=image_url, match=MetadataMatch.EXACT) else: - self._log.debug(f"Cover art URL not found for {album}") + self._log.debug("Cover art URL not found for {}", album) return -# Try each source in turn. - -# Note that SOURCES_ALL is redundant (and presently unused). However, we keep -# it around nn order not break plugins that "register" (a.k.a. monkey-patch) -# their own fetchart sources. -SOURCES_ALL = [ - "filesystem", - "coverart", - "itunes", - "amazon", - "albumart", - "wikipedia", - "google", - "fanarttv", - "lastfm", - "spotify", -] - -ART_SOURCES = { - "filesystem": FileSystem, - "coverart": CoverArtArchive, - "itunes": ITunesStore, - "albumart": AlbumArtOrg, - "amazon": Amazon, - "wikipedia": Wikipedia, - "google": GoogleImages, - "fanarttv": FanartTV, - "lastfm": LastFM, - "spotify": Spotify, - "cover_art_url": CoverArtUrl, +# All art sources. The order they will be tried in is specified by the config. +ART_SOURCES: set[type[ArtSource]] = { + FileSystem, + CoverArtArchive, + ITunesStore, + AlbumArtOrg, + Amazon, + Wikipedia, + GoogleImages, + FanartTV, + LastFM, + Spotify, + CoverArtUrl, } -SOURCE_NAMES = {v: k for k, v in ART_SOURCES.items()} + # PLUGIN LOGIC ############################################################### @@ -1184,12 +1325,12 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): PAT_PX = r"(0|[1-9][0-9]*)px" PAT_PERCENT = r"(100(\.00?)?|[1-9]?[0-9](\.[0-9]{1,2})?)%" - def __init__(self): + def __init__(self) -> None: super().__init__() # Holds candidates corresponding to downloaded images between # fetching them and placing them in the filesystem. - self.art_candidates = {} + self.art_candidates: dict[ImportTask, Candidate] = {} self.config.add( { @@ -1201,6 +1342,7 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): "enforce_ratio": False, "cautious": False, "cover_names": ["cover", "front", "art", "album", "folder"], + "fallback": None, "sources": [ "filesystem", "coverart", @@ -1215,7 +1357,7 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): "cover_format": None, } ) - for source in ART_SOURCES.values(): + for source in ART_SOURCES: source.add_default_config(self.config) self.minwidth = self.config["minwidth"].get(int) @@ -1236,7 +1378,7 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): self.margin_px = None self.margin_percent = None self.deinterlace = self.config["deinterlace"].get(bool) - if type(self.enforce_ratio) is str: + if isinstance(self.enforce_ratio, str): if self.enforce_ratio[-1] == "%": self.margin_percent = float(self.enforce_ratio[:-1]) / 100 elif self.enforce_ratio[-2:] == "px": @@ -1249,6 +1391,9 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): cover_names = self.config["cover_names"].as_str_seq() self.cover_names = list(map(util.bytestring_path, cover_names)) self.cautious = self.config["cautious"].get(bool) + self.fallback = self.config["fallback"].get( + confuse.Optional(confuse.Filename()) + ) self.store_source = self.config["store_source"].get(bool) self.cover_format = self.config["cover_format"].get( @@ -1261,12 +1406,12 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): self.register_listener("import_task_files", self.assign_art) available_sources = [ - (s_name, c) - for (s_name, s_cls) in ART_SOURCES.items() + (s_cls.ID, c) + for s_cls in ART_SOURCES if s_cls.available(self._log, self.config) for c in s_cls.VALID_MATCHING_CRITERIA ] - sources = plugins.sanitize_pairs( + sources = sanitize_pairs( self.config["sources"].as_pairs(default_value="*"), available_sources, ) @@ -1287,17 +1432,21 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): others.append((s, c)) sources = others + fs + sources_by_name = {s_cls.ID: s_cls for s_cls in ART_SOURCES} + self.sources = [ - ART_SOURCES[s](self._log, self.config, match_by=[c]) + sources_by_name[s](self._log, self.config, match_by=[c]) for s, c in sources ] @staticmethod - def _is_source_file_removal_enabled(): - return config["import"]["delete"] or config["import"]["move"] + def _is_source_file_removal_enabled() -> bool: + return config["import"]["delete"].get(bool) or config["import"][ + "move" + ].get(bool) # Asynchronous; after music is added to the library. - def fetch_art(self, session, task): + def fetch_art(self, session: ImportSession, task: ImportTask) -> None: """Find art for the album being imported.""" if task.is_album: # Only fetch art for full albums. if task.album.artpath and os.path.isfile( @@ -1305,12 +1454,12 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): ): # Album already has art (probably a re-import); skip it. return - if task.choice_flag == importer.action.ASIS: + if task.choice_flag == importer.Action.ASIS: # For as-is imports, don't search Web sources for art. local = True elif task.choice_flag in ( - importer.action.APPLY, - importer.action.RETAG, + importer.Action.APPLY, + importer.Action.RETAG, ): # Search everywhere for art. local = False @@ -1323,22 +1472,24 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): if candidate: self.art_candidates[task] = candidate - def _set_art(self, album, candidate, delete=False): + def _set_art( + self, album: Album, candidate: Candidate, delete: bool = False + ) -> None: album.set_art(candidate.path, delete) if self.store_source: # store the source of the chosen artwork in a flexible field self._log.debug( "Storing art_source for {0.albumartist} - {0.album}", album ) - album.art_source = SOURCE_NAMES[type(candidate.source)] + album.art_source = candidate.source_name album.store() # Synchronous; after music files are put in place. - def assign_art(self, session, task): + def assign_art(self, session: ImportSession, task: ImportTask): """Place the discovered art in the filesystem.""" if task in self.art_candidates: candidate = self.art_candidates.pop(task) - removal_enabled = FetchArtPlugin._is_source_file_removal_enabled() + removal_enabled = self._is_source_file_removal_enabled() self._set_art(task.album, candidate, not removal_enabled) @@ -1346,7 +1497,7 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): task.prune(candidate.path) # Manual album art fetching. - def commands(self): + def commands(self) -> list[ui.Subcommand]: cmd = ui.Subcommand("fetchart", help="download album art") cmd.parser.add_option( "-f", @@ -1365,17 +1516,20 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): help="quiet mode: do not output albums that already have artwork", ) - def func(lib, opts, args): - self.batch_fetch_art( - lib, lib.albums(ui.decargs(args)), opts.force, opts.quiet - ) + def func(lib: Library, opts, args) -> None: + self.batch_fetch_art(lib, lib.albums(args), opts.force, opts.quiet) cmd.func = func return [cmd] # Utilities converted from functions to methods on logging overhaul - def art_for_album(self, album, paths, local_only=False): + def art_for_album( + self, + album: Album, + paths: None | Sequence[bytes], + local_only: bool = False, + ) -> None | Candidate: """Given an Album object, returns a path to downloaded art for the album (or None if no art is found). If `maxwidth`, then images are resized to this maximum pixel size. If `quality` then resized images @@ -1386,22 +1540,22 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): out = None for source in self.sources: - if source.IS_LOCAL or not local_only: + if source.LOC == "local" or not local_only: self._log.debug( - "trying source {0} for album {1.albumartist} - {1.album}", - SOURCE_NAMES[type(source)], + "trying source {0.description}" + " for album {1.albumartist} - {1.album}", + source, album, ) # URLs might be invalid at this point, or the image may not # fulfill the requirements for candidate in source.get(album, self, paths): source.fetch_image(candidate, self) - if candidate.validate(self): + if candidate.validate(self) != ImageAction.BAD: out = candidate + assert out.path is not None # help mypy self._log.debug( - "using {0.LOC_STR} image {1}".format( - source, util.displayable_path(out.path) - ) + "using {.LOC} image {.path}", source, out ) break # Remove temporary files for invalid candidates. @@ -1414,7 +1568,13 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): return out - def batch_fetch_art(self, lib, albums, force, quiet): + def batch_fetch_art( + self, + lib: Library, + albums: Iterable[Album], + force: bool, + quiet: bool, + ) -> None: """Fetch album art for each of the albums. This implements the manual fetchart CLI command. """ @@ -1428,7 +1588,7 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): message = ui.colorize( "text_highlight_minor", "has album art" ) - self._log.info("{0}: {1}", album, message) + ui.print_(f"{album}: {message}") else: # In ordinary invocations, look for images on the # filesystem. When forcing, however, always go to the Web @@ -1441,4 +1601,4 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin): message = ui.colorize("text_success", "found album art") else: message = ui.colorize("text_error", "no art found") - self._log.info("{0}: {1}", album, message) + ui.print_(f"{album}: {message}") diff --git a/beetsplug/fish.py b/beetsplug/fish.py index 4cf9b60a1..b1518f1c4 100644 --- a/beetsplug/fish.py +++ b/beetsplug/fish.py @@ -89,8 +89,9 @@ class FishPlugin(BeetsPlugin): "-o", "--output", default="~/.config/fish/completions/beet.fish", - help="where to save the script. default: " - "~/.config/fish/completions", + help=( + "where to save the script. default: ~/.config/fish/completions" + ), ) return [cmd] @@ -122,23 +123,13 @@ class FishPlugin(BeetsPlugin): for name in names: cmd_names_help.append((name, cmd.help)) # Concatenate the string - totstring = HEAD + "\n" + totstring = f"{HEAD}\n" totstring += get_cmds_list([name[0] for name in cmd_names_help]) totstring += "" if nobasicfields else get_standard_fields(fields) totstring += get_extravalues(lib, extravalues) if extravalues else "" - totstring += ( - "\n" - + "# ====== {} =====".format("setup basic beet completion") - + "\n" * 2 - ) + totstring += "\n# ====== setup basic beet completion =====\n\n" totstring += get_basic_beet_options() - totstring += ( - "\n" - + "# ====== {} =====".format( - "setup field completion for subcommands" - ) - + "\n" - ) + totstring += "\n# ====== setup field completion for subcommands =====\n" totstring += get_subcommands(cmd_names_help, nobasicfields, extravalues) # Set up completion for all the command options totstring += get_all_commands(beetcmds) @@ -150,23 +141,19 @@ class FishPlugin(BeetsPlugin): def _escape(name): # Escape ? in fish if name == "?": - name = "\\" + name + name = f"\\{name}" return name def get_cmds_list(cmds_names): # Make a list of all Beets core & plugin commands - substr = "" - substr += "set CMDS " + " ".join(cmds_names) + ("\n" * 2) - return substr + return f"set CMDS {' '.join(cmds_names)}\n\n" def get_standard_fields(fields): # Make a list of album/track fields and append with ':' - fields = (field + ":" for field in fields) - substr = "" - substr += "set FIELDS " + " ".join(fields) + ("\n" * 2) - return substr + fields = (f"{field}:" for field in fields) + return f"set FIELDS {' '.join(fields)}\n\n" def get_extravalues(lib, extravalues): @@ -175,14 +162,8 @@ def get_extravalues(lib, extravalues): word = "" values_set = get_set_of_values_for_field(lib, extravalues) for fld in extravalues: - extraname = fld.upper() + "S" - word += ( - "set " - + extraname - + " " - + " ".join(sorted(values_set[fld])) - + ("\n" * 2) - ) + extraname = f"{fld.upper()}S" + word += f"set {extraname} {' '.join(sorted(values_set[fld]))}\n\n" return word @@ -226,35 +207,29 @@ def get_subcommands(cmd_name_and_help, nobasicfields, extravalues): for cmdname, cmdhelp in cmd_name_and_help: cmdname = _escape(cmdname) - word += ( - "\n" - + "# ------ {} -------".format("fieldsetups for " + cmdname) - + "\n" - ) + word += f"\n# ------ fieldsetups for {cmdname} -------\n" word += BL_NEED2.format( - ("-a " + cmdname), ("-f " + "-d " + wrap(clean_whitespace(cmdhelp))) + f"-a {cmdname}", f"-f -d {wrap(clean_whitespace(cmdhelp))}" ) if nobasicfields is False: word += BL_USE3.format( cmdname, - ("-a " + wrap("$FIELDS")), - ("-f " + "-d " + wrap("fieldname")), + f"-a {wrap('$FIELDS')}", + f"-f -d {wrap('fieldname')}", ) if extravalues: for f in extravalues: - setvar = wrap("$" + f.upper() + "S") - word += ( - " ".join( - BL_EXTRA3.format( - (cmdname + " " + f + ":"), - ("-f " + "-A " + "-a " + setvar), - ("-d " + wrap(f)), - ).split() - ) - + "\n" + setvar = wrap(f"${f.upper()}S") + word += " ".join( + BL_EXTRA3.format( + f"{cmdname} {f}:", + f"-f -A -a {setvar}", + f"-d {wrap(f)}", + ).split() ) + word += "\n" return word @@ -267,59 +242,44 @@ def get_all_commands(beetcmds): for name in names: name = _escape(name) - word += "\n" - word += ( - ("\n" * 2) - + "# ====== {} =====".format("completions for " + name) - + "\n" - ) + word += f"\n\n\n# ====== completions for {name} =====\n" for option in cmd.parser._get_all_options()[1:]: cmd_l = ( - (" -l " + option._long_opts[0].replace("--", "")) + f" -l {option._long_opts[0].replace('--', '')}" if option._long_opts else "" ) cmd_s = ( - (" -s " + option._short_opts[0].replace("-", "")) + f" -s {option._short_opts[0].replace('-', '')}" if option._short_opts else "" ) cmd_need_arg = " -r " if option.nargs in [1] else "" cmd_helpstr = ( - (" -d " + wrap(" ".join(option.help.split()))) + f" -d {wrap(' '.join(option.help.split()))}" if option.help else "" ) cmd_arglist = ( - (" -a " + wrap(" ".join(option.choices))) + f" -a {wrap(' '.join(option.choices))}" if option.choices else "" ) - word += ( - " ".join( - BL_USE3.format( - name, - ( - cmd_need_arg - + cmd_s - + cmd_l - + " -f " - + cmd_arglist - ), - cmd_helpstr, - ).split() - ) - + "\n" + word += " ".join( + BL_USE3.format( + name, + f"{cmd_need_arg}{cmd_s}{cmd_l} -f {cmd_arglist}", + cmd_helpstr, + ).split() ) + word += "\n" - word = word + " ".join( - BL_USE3.format( - name, - ("-s " + "h " + "-l " + "help" + " -f "), - ("-d " + wrap("print help") + "\n"), - ).split() + word = word + BL_USE3.format( + name, + "-s h -l help -f", + f"-d {wrap('print help')}", ) return word @@ -332,9 +292,9 @@ def clean_whitespace(word): def wrap(word): # Need " or ' around strings but watch out if they're in the string sptoken = '"' - if ('"') in word and ("'") in word: + if '"' in word and ("'") in word: word.replace('"', sptoken) - return '"' + word + '"' + return f'"{word}"' tok = '"' if "'" in word else "'" - return tok + word + tok + return f"{tok}{word}{tok}" diff --git a/beetsplug/fromfilename.py b/beetsplug/fromfilename.py index 103e82901..c3fb4bc6b 100644 --- a/beetsplug/fromfilename.py +++ b/beetsplug/fromfilename.py @@ -12,8 +12,8 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. -"""If the title is empty, try to extract track and title from the -filename. +"""If the title is empty, try to extract it from the filename +(possibly also extract track and artist) """ import os @@ -25,12 +25,12 @@ from beets.util import displayable_path # Filename field extraction patterns. PATTERNS = [ # Useful patterns. - r"^(?P.+)[\-_](?P.+)[\-_](?P<tag>.*)$", - r"^(?P<track>\d+)[\s.\-_]+(?P<artist>.+)[\-_](?P<title>.+)[\-_](?P<tag>.*)$", - r"^(?P<artist>.+)[\-_](?P<title>.+)$", - r"^(?P<track>\d+)[\s.\-_]+(?P<artist>.+)[\-_](?P<title>.+)$", - r"^(?P<track>\d+)[\s.\-_]+(?P<title>.+)$", - r"^(?P<track>\d+)\s+(?P<title>.+)$", + ( + r"^(?P<track>\d+)\.?\s*-\s*(?P<artist>.+?)\s*-\s*(?P<title>.+?)" + r"(\s*-\s*(?P<tag>.*))?$" + ), + r"^(?P<artist>.+?)\s*-\s*(?P<title>.+?)(\s*-\s*(?P<tag>.*))?$", + r"^(?P<track>\d+)\.?[\s_-]+(?P<title>.+)$", r"^(?P<title>.+) by (?P<artist>.+)$", r"^(?P<track>\d+).*$", r"^(?P<title>.+)$", @@ -98,6 +98,7 @@ def apply_matches(d, log): # Given both an "artist" and "title" field, assume that one is # *actually* the artist, which must be uniform, and use the other # for the title. This, of course, won't work for VA albums. + # Only check for "artist": patterns containing it, also contain "title" if "artist" in keys: if equal_fields(d, "artist"): artist = some_map["artist"] @@ -112,21 +113,22 @@ def apply_matches(d, log): for item in d: if not item.artist: item.artist = artist - log.info("Artist replaced with: {}".format(item.artist)) - - # No artist field: remaining field is the title. - else: + log.info("Artist replaced with: {.artist}", item) + # otherwise, if the pattern contains "title", use that for title_field + elif "title" in keys: title_field = "title" + else: + title_field = None - # Apply the title and track. + # Apply the title and track, if any. for item in d: - if bad_title(item.title): + if title_field and bad_title(item.title): item.title = str(d[item][title_field]) - log.info("Title replaced with: {}".format(item.title)) + log.info("Title replaced with: {.title}", item) if "track" in d[item] and item.track == 0: item.track = int(d[item]["track"]) - log.info("Track replaced with: {}".format(item.track)) + log.info("Track replaced with: {.track}", item) # Plugin structure and hook into import process. @@ -160,6 +162,7 @@ class FromFilenamePlugin(plugins.BeetsPlugin): # Look for useful information in the filenames. for pattern in PATTERNS: + self._log.debug(f"Trying pattern: {pattern}") d = all_matches(names, pattern) if d: apply_matches(d, self._log) diff --git a/beetsplug/ftintitle.py b/beetsplug/ftintitle.py index a85aa9719..fde7ff92a 100644 --- a/beetsplug/ftintitle.py +++ b/beetsplug/ftintitle.py @@ -17,25 +17,67 @@ from __future__ import annotations import re +from functools import cached_property, lru_cache from typing import TYPE_CHECKING -from beets import plugins, ui -from beets.util import displayable_path +from beets import config, plugins, ui if TYPE_CHECKING: from beets.importer import ImportSession, ImportTask - from beets.library import Item + from beets.library import Album, Item -def split_on_feat(artist: str) -> tuple[str, str | None]: +DEFAULT_BRACKET_KEYWORDS: tuple[str, ...] = ( + "abridged", + "acapella", + "club", + "demo", + "edit", + "edition", + "extended", + "instrumental", + "live", + "mix", + "radio", + "release", + "remaster", + "remastered", + "remix", + "rmx", + "unabridged", + "unreleased", + "version", + "vip", +) + + +def split_on_feat( + artist: str, + for_artist: bool = True, + custom_words: list[str] | None = None, +) -> tuple[str, str | None]: """Given an artist string, split the "main" artist from any artist on the right-hand side of a string like "feat". Return the main artist, which is always a string, and the featuring artist, which may be a string or None if none is present. """ - # split on the first "feat". - regex = re.compile(plugins.feat_tokens(), re.IGNORECASE) - parts = tuple(s.strip() for s in regex.split(artist, 1)) + # Try explicit featuring tokens first (ft, feat, featuring, etc.) + # to avoid splitting on generic separators like "&" when both are present + regex_explicit = re.compile( + plugins.feat_tokens(for_artist=False, custom_words=custom_words), + re.IGNORECASE, + ) + parts = tuple(s.strip() for s in regex_explicit.split(artist, 1)) + if len(parts) == 2: + return parts + + # Fall back to all tokens including generic separators if no explicit match + if for_artist: + regex = re.compile( + plugins.feat_tokens(for_artist, custom_words), re.IGNORECASE + ) + parts = tuple(s.strip() for s in regex.split(artist, 1)) + if len(parts) == 1: return parts[0], None else: @@ -43,46 +85,96 @@ def split_on_feat(artist: str) -> tuple[str, str | None]: return parts -def contains_feat(title: str) -> bool: +def contains_feat(title: str, custom_words: list[str] | None = None) -> bool: """Determine whether the title contains a "featured" marker.""" return bool( re.search( - plugins.feat_tokens(for_artist=False), + plugins.feat_tokens(for_artist=False, custom_words=custom_words), title, flags=re.IGNORECASE, ) ) -def find_feat_part(artist: str, albumartist: str) -> str | None: +def find_feat_part( + artist: str, + albumartist: str | None, + custom_words: list[str] | None = None, +) -> str | None: """Attempt to find featured artists in the item's artist fields and return the results. Returns None if no featured artist found. """ - # Look for the album artist in the artist field. If it's not - # present, give up. - albumartist_split = artist.split(albumartist, 1) - if len(albumartist_split) <= 1: - return None + # Handle a wider variety of extraction cases if the album artist is + # contained within the track artist. + if albumartist and albumartist in artist: + albumartist_split = artist.split(albumartist, 1) - # If the last element of the split (the right-hand side of the - # album artist) is nonempty, then it probably contains the - # featured artist. - elif albumartist_split[1] != "": - # Extract the featured artist from the right-hand side. - _, feat_part = split_on_feat(albumartist_split[1]) - return feat_part + # If the last element of the split (the right-hand side of the + # album artist) is nonempty, then it probably contains the + # featured artist. + if albumartist_split[1] != "": + # Extract the featured artist from the right-hand side. + _, feat_part = split_on_feat( + albumartist_split[1], custom_words=custom_words + ) + return feat_part - # Otherwise, if there's nothing on the right-hand side, look for a - # featuring artist on the left-hand side. - else: - lhs, rhs = split_on_feat(albumartist_split[0]) - if lhs: - return lhs + # Otherwise, if there's nothing on the right-hand side, + # look for a featuring artist on the left-hand side. + else: + lhs, _ = split_on_feat( + albumartist_split[0], custom_words=custom_words + ) + if lhs: + return lhs - return None + # Fall back to conservative handling of the track artist without relying + # on albumartist, which covers compilations using a 'Various Artists' + # albumartist and album tracks by a guest artist featuring a third artist. + _, feat_part = split_on_feat(artist, False, custom_words) + return feat_part + + +def _album_artist_no_feat(album: Album) -> str: + custom_words = config["ftintitle"]["custom_words"].as_str_seq() + return split_on_feat(album["albumartist"], False, list(custom_words))[0] class FtInTitlePlugin(plugins.BeetsPlugin): + @cached_property + def bracket_keywords(self) -> list[str]: + return self.config["bracket_keywords"].as_str_seq() + + @staticmethod + @lru_cache(maxsize=256) + def _bracket_position_pattern(keywords: tuple[str, ...]) -> re.Pattern[str]: + """ + Build a compiled regex to find the first bracketed segment that contains + any of the provided keywords. + + Cached by keyword tuple to avoid recompiling on every track/title. + """ + kw_inner = "|".join(map(re.escape, keywords)) + + # If we have keywords, require one of them to appear in the bracket text. + # If kw == "", the lookahead becomes true and we match any bracket content. + kw = rf"\b(?={kw_inner})\b" if kw_inner else "" + return re.compile( + rf""" + (?: # non-capturing group for the split + \s*? # optional whitespace before brackets + (?= # any bracket containing a keyword + \([^)]*{kw}.*?\) + | \[[^]]*{kw}.*?\] + | <[^>]*{kw}.*? > + | \{{[^}}]*{kw}.*?\}} + | $ # or the end of the string + ) + ) + """, + re.IGNORECASE | re.VERBOSE, + ) + def __init__(self) -> None: super().__init__() @@ -90,8 +182,11 @@ class FtInTitlePlugin(plugins.BeetsPlugin): { "auto": True, "drop": False, - "format": "feat. {0}", + "format": "feat. {}", "keep_in_artist": False, + "preserve_album_artist": True, + "custom_words": [], + "bracket_keywords": list(DEFAULT_BRACKET_KEYWORDS), } ) @@ -111,15 +206,29 @@ class FtInTitlePlugin(plugins.BeetsPlugin): if self.config["auto"]: self.import_stages = [self.imported] + self.album_template_fields["album_artist_no_feat"] = ( + _album_artist_no_feat + ) + def commands(self) -> list[ui.Subcommand]: def func(lib, opts, args): self.config.set_args(opts) drop_feat = self.config["drop"].get(bool) keep_in_artist_field = self.config["keep_in_artist"].get(bool) + preserve_album_artist = self.config["preserve_album_artist"].get( + bool + ) + custom_words = self.config["custom_words"].get(list) write = ui.should_write() - for item in lib.items(ui.decargs(args)): - if self.ft_in_title(item, drop_feat, keep_in_artist_field): + for item in lib.items(args): + if self.ft_in_title( + item, + drop_feat, + keep_in_artist_field, + preserve_album_artist, + custom_words, + ): item.store() if write: item.try_write() @@ -131,9 +240,17 @@ class FtInTitlePlugin(plugins.BeetsPlugin): """Import hook for moving featuring artist automatically.""" drop_feat = self.config["drop"].get(bool) keep_in_artist_field = self.config["keep_in_artist"].get(bool) + preserve_album_artist = self.config["preserve_album_artist"].get(bool) + custom_words = self.config["custom_words"].get(list) for item in task.imported_items(): - if self.ft_in_title(item, drop_feat, keep_in_artist_field): + if self.ft_in_title( + item, + drop_feat, + keep_in_artist_field, + preserve_album_artist, + custom_words, + ): item.store() def update_metadata( @@ -142,6 +259,7 @@ class FtInTitlePlugin(plugins.BeetsPlugin): feat_part: str, drop_feat: bool, keep_in_artist_field: bool, + custom_words: list[str], ) -> None: """Choose how to add new artists to the title and set the new metadata. Also, print out messages about any changes that are made. @@ -151,23 +269,30 @@ class FtInTitlePlugin(plugins.BeetsPlugin): # In case the artist is kept, do not update the artist fields. if keep_in_artist_field: self._log.info( - "artist: {0} (Not changing due to keep_in_artist)", item.artist + "artist: {.artist} (Not changing due to keep_in_artist)", item ) else: - self._log.info("artist: {0} -> {1}", item.artist, item.albumartist) - item.artist = item.albumartist + track_artist, _ = split_on_feat( + item.artist, custom_words=custom_words + ) + self._log.info("artist: {0.artist} -> {1}", item, track_artist) + item.artist = track_artist if item.artist_sort: # Just strip the featured artist from the sort name. - item.artist_sort, _ = split_on_feat(item.artist_sort) + item.artist_sort, _ = split_on_feat( + item.artist_sort, custom_words=custom_words + ) # Only update the title if it does not already contain a featured # artist and if we do not drop featuring information. - if not drop_feat and not contains_feat(item.title): + if not drop_feat and not contains_feat(item.title, custom_words): feat_format = self.config["format"].as_str() - new_format = feat_format.format(feat_part) - new_title = f"{item.title} {new_format}" - self._log.info("title: {0} -> {1}", item.title, new_title) + formatted = feat_format.format(feat_part) + new_title = self.insert_ft_into_title( + item.title, formatted, self.bracket_keywords + ) + self._log.info("title: {.title} -> {}", item, new_title) item.title = new_title def ft_in_title( @@ -175,6 +300,8 @@ class FtInTitlePlugin(plugins.BeetsPlugin): item: Item, drop_feat: bool, keep_in_artist_field: bool, + preserve_album_artist: bool, + custom_words: list[str], ) -> bool: """Look for featured artists in the item's artist fields and move them to the title. @@ -188,22 +315,49 @@ class FtInTitlePlugin(plugins.BeetsPlugin): # Check whether there is a featured artist on this track and the # artist field does not exactly match the album artist field. In # that case, we attempt to move the featured artist to the title. - if not albumartist or albumartist == artist: + if preserve_album_artist and albumartist and artist == albumartist: return False - _, featured = split_on_feat(artist) + _, featured = split_on_feat(artist, custom_words=custom_words) if not featured: return False - self._log.info("{}", displayable_path(item.path)) + self._log.info("{.filepath}", item) # Attempt to find the featured artist. - feat_part = find_feat_part(artist, albumartist) + feat_part = find_feat_part(artist, albumartist, custom_words) if not feat_part: self._log.info("no featuring artists found") return False # If we have a featuring artist, move it to the title. - self.update_metadata(item, feat_part, drop_feat, keep_in_artist_field) + self.update_metadata( + item, feat_part, drop_feat, keep_in_artist_field, custom_words + ) return True + + @staticmethod + def find_bracket_position( + title: str, keywords: list[str] | None = None + ) -> int | None: + normalized = ( + DEFAULT_BRACKET_KEYWORDS if keywords is None else tuple(keywords) + ) + pattern = FtInTitlePlugin._bracket_position_pattern(normalized) + m: re.Match[str] | None = pattern.search(title) + return m.start() if m else None + + @classmethod + def insert_ft_into_title( + cls, title: str, feat_part: str, keywords: list[str] | None = None + ) -> str: + """Insert featured artist before the first bracket containing + remix/edit keywords if present. + """ + normalized = ( + DEFAULT_BRACKET_KEYWORDS if keywords is None else tuple(keywords) + ) + pattern = cls._bracket_position_pattern(normalized) + parts = pattern.split(title, maxsplit=1) + return f" {feat_part} ".join(parts).strip() diff --git a/beetsplug/hook.py b/beetsplug/hook.py index 5ce5ef828..b8869eca4 100644 --- a/beetsplug/hook.py +++ b/beetsplug/hook.py @@ -14,27 +14,21 @@ """Allows custom commands to be run when an event is emitted by beets""" +from __future__ import annotations + +import os import shlex import string import subprocess -import sys +from typing import Any from beets.plugins import BeetsPlugin -class CodingFormatter(string.Formatter): - """A variant of `string.Formatter` that converts everything to `unicode` - strings. +class BytesToStrFormatter(string.Formatter): + """A variant of `string.Formatter` that converts `bytes` to `str`.""" - This was necessary on Python 2, in needs to be kept for backwards - compatibility. - """ - - def __init__(self, coding): - """Creates a new coding formatter with the provided coding.""" - self._coding = coding - - def convert_field(self, value, conversion): + def convert_field(self, value: Any, conversion: str | None) -> Any: """Converts the provided value given a conversion type. This method decodes the converted value using the formatter's coding. @@ -42,7 +36,7 @@ class CodingFormatter(string.Formatter): converted = super().convert_field(value, conversion) if isinstance(converted, bytes): - return converted.decode(self._coding) + return os.fsdecode(converted) return converted @@ -68,19 +62,19 @@ class HookPlugin(BeetsPlugin): def create_and_register_hook(self, event, command): def hook_function(**kwargs): if command is None or len(command) == 0: - self._log.error('invalid command "{0}"', command) + self._log.error('invalid command "{}"', command) return # For backwards compatibility, use a string formatter that decodes - # bytes (in particular, paths) to unicode strings. - formatter = CodingFormatter(sys.getfilesystemencoding()) + # bytes (in particular, paths) to strings. + formatter = BytesToStrFormatter() command_pieces = [ formatter.format(piece, event=event, **kwargs) for piece in shlex.split(command) ] self._log.debug( - 'running command "{0}" for event {1}', + 'running command "{}" for event {}', " ".join(command_pieces), event, ) @@ -89,9 +83,9 @@ class HookPlugin(BeetsPlugin): subprocess.check_call(command_pieces) except subprocess.CalledProcessError as exc: self._log.error( - "hook for {0} exited with status {1}", event, exc.returncode + "hook for {} exited with status {.returncode}", event, exc ) except OSError as exc: - self._log.error("hook for {0} failed: {1}", event, exc) + self._log.error("hook for {} failed: {}", event, exc) self.register_listener(event, hook_function) diff --git a/beetsplug/ihate.py b/beetsplug/ihate.py index 35788ea05..54a61384c 100644 --- a/beetsplug/ihate.py +++ b/beetsplug/ihate.py @@ -15,7 +15,7 @@ """Warns you about things you hate (or even blocks import).""" -from beets.importer import action +from beets.importer import Action from beets.library import Album, Item, parse_query_string from beets.plugins import BeetsPlugin @@ -65,15 +65,15 @@ class IHatePlugin(BeetsPlugin): skip_queries = self.config["skip"].as_str_seq() warn_queries = self.config["warn"].as_str_seq() - if task.choice_flag == action.APPLY: + if task.choice_flag == Action.APPLY: if skip_queries or warn_queries: self._log.debug("processing your hate") if self.do_i_hate_this(task, skip_queries): - task.choice_flag = action.SKIP - self._log.info("skipped: {0}", summary(task)) + task.choice_flag = Action.SKIP + self._log.info("skipped: {}", summary(task)) return if self.do_i_hate_this(task, warn_queries): - self._log.info("you may hate this: {0}", summary(task)) + self._log.info("you may hate this: {}", summary(task)) else: self._log.debug("nothing to do") else: diff --git a/beetsplug/importadded.py b/beetsplug/importadded.py index 61a14fba9..f728a104f 100644 --- a/beetsplug/importadded.py +++ b/beetsplug/importadded.py @@ -58,8 +58,7 @@ class ImportAddedPlugin(BeetsPlugin): or session.config["reflink"] ): self._log.debug( - "In place import detected, recording mtimes from " - "source paths" + "In place import detected, recording mtimes from source paths" ) items = ( [task.item] @@ -95,7 +94,7 @@ class ImportAddedPlugin(BeetsPlugin): mtime = os.stat(util.syspath(source)).st_mtime self.item_mtime[destination] = mtime self._log.debug( - "Recorded mtime {0} for item '{1}' imported from " "'{2}'", + "Recorded mtime {} for item '{}' imported from '{}'", mtime, util.displayable_path(destination), util.displayable_path(source), @@ -104,9 +103,9 @@ class ImportAddedPlugin(BeetsPlugin): def update_album_times(self, lib, album): if self.reimported_album(album): self._log.debug( - "Album '{0}' is reimported, skipping import of " + "Album '{.filepath}' is reimported, skipping import of " "added dates for the album and its items.", - util.displayable_path(album.path), + album, ) return @@ -120,18 +119,17 @@ class ImportAddedPlugin(BeetsPlugin): item.store() album.added = min(album_mtimes) self._log.debug( - "Import of album '{0}', selected album.added={1} " + "Import of album '{0.album}', selected album.added={0.added} " "from item file mtimes.", - album.album, - album.added, + album, ) album.store() def update_item_times(self, lib, item): if self.reimported_item(item): self._log.debug( - "Item '{0}' is reimported, skipping import of " "added date.", - util.displayable_path(item.path), + "Item '{.filepath}' is reimported, skipping import of added date.", + item, ) return mtime = self.item_mtime.pop(item.path, None) @@ -140,9 +138,8 @@ class ImportAddedPlugin(BeetsPlugin): if self.config["preserve_mtimes"].get(bool): self.write_item_mtime(item, mtime) self._log.debug( - "Import of item '{0}', selected item.added={1}", - util.displayable_path(item.path), - item.added, + "Import of item '{0.filepath}', selected item.added={0.added}", + item, ) item.store() @@ -154,7 +151,6 @@ class ImportAddedPlugin(BeetsPlugin): if self.config["preserve_write_mtimes"].get(bool): self.write_item_mtime(item, item.added) self._log.debug( - "Write of item '{0}', selected item.added={1}", - util.displayable_path(item.path), - item.added, + "Write of item '{0.filepath}', selected item.added={0.added}", + item, ) diff --git a/beetsplug/importfeeds.py b/beetsplug/importfeeds.py index 0a5a6afe4..a74746f8b 100644 --- a/beetsplug/importfeeds.py +++ b/beetsplug/importfeeds.py @@ -50,7 +50,7 @@ def _build_m3u_filename(basename): path = normpath( os.path.join( config["importfeeds"]["dir"].as_filename(), - date + "_" + basename + ".m3u", + f"{date}_{basename}.m3u", ) ) return path @@ -136,7 +136,7 @@ class ImportFeedsPlugin(BeetsPlugin): if "echo" in formats: self._log.info("Location of imported music:") for path in paths: - self._log.info(" {0}", path) + self._log.info(" {}", path) def album_imported(self, lib, album): self._record_items(lib, album.album, album.items()) diff --git a/beetsplug/importsource.py b/beetsplug/importsource.py new file mode 100644 index 000000000..e42be3f1f --- /dev/null +++ b/beetsplug/importsource.py @@ -0,0 +1,169 @@ +"""Adds a `source_path` attribute to imported albums indicating from what path +the album was imported from. Also suggests removing that source path in case +you've removed the album from the library. + +""" + +import os +from pathlib import Path +from shutil import rmtree + +from beets.dbcore.query import PathQuery +from beets.plugins import BeetsPlugin +from beets.ui import colorize as colorize_text +from beets.ui import input_options + + +class ImportSourcePlugin(BeetsPlugin): + """Main plugin class.""" + + def __init__(self): + """Initialize the plugin and read configuration.""" + super().__init__() + self.config.add( + { + "suggest_removal": False, + } + ) + self.import_stages = [self.import_stage] + self.register_listener("item_removed", self.suggest_removal) + # In order to stop future removal suggestions for an album we keep + # track of `mb_albumid`s in this set. + self.stop_suggestions_for_albums = set() + # During reimports (import --library) both the import_task_choice and + # the item_removed event are triggered. The item_removed event is + # triggered first. For the import_task_choice event we prevent removal + # suggestions using the existing stop_suggestions_for_album mechanism. + self.register_listener( + "import_task_choice", self.prevent_suggest_removal + ) + + def prevent_suggest_removal(self, session, task): + if task.skip: + return + for item in task.imported_items(): + if "mb_albumid" in item: + self.stop_suggestions_for_albums.add(item.mb_albumid) + + def import_stage(self, _, task): + """Event handler for albums import finished.""" + for item in task.imported_items(): + # During reimports (import --library), we prevent overwriting the + # source_path attribute with the path from the music library + if "source_path" in item: + self._log.info( + "Preserving source_path of reimported item {}", item.id + ) + continue + item["source_path"] = item.path + item.try_sync(write=False, move=False) + + def suggest_removal(self, item): + """Prompts the user to delete the original path the item was imported from.""" + if ( + not self.config["suggest_removal"] + or item.mb_albumid in self.stop_suggestions_for_albums + ): + return + + if "source_path" not in item: + self._log.warning( + "Item without source_path (probably imported before plugin " + "usage): {}", + item.filepath, + ) + return + + srcpath = Path(os.fsdecode(item.source_path)) + if not srcpath.is_file(): + self._log.warning( + "Original source file no longer exists or is not accessible: {}", + srcpath, + ) + return + + if not ( + os.access(srcpath, os.W_OK) + and os.access(srcpath.parent, os.W_OK | os.X_OK) + ): + self._log.warning( + "Original source file cannot be deleted (insufficient permissions): {}", + srcpath, + ) + return + + # We ask the user whether they'd like to delete the item's source + # directory + item_path = colorize_text("text_warning", item.filepath) + source_path = colorize_text("text_warning", srcpath) + + print( + f"The item:\n{item_path}\nis originated from:\n{source_path}\n" + "What would you like to do?" + ) + + resp = input_options( + [ + "Delete the item's source", + "Recursively delete the source's directory", + "do Nothing", + "do nothing and Stop suggesting to delete items from this album", + ], + require=True, + ) + + # Handle user response + if resp == "d": + self._log.info( + "Deleting the item's source file: {}", + srcpath, + ) + srcpath.unlink() + + elif resp == "r": + self._log.info( + "Searching for other items with a source_path attr containing: {}", + srcpath.parent, + ) + + source_dir_query = PathQuery( + "source_path", + srcpath.parent, + # The "source_path" attribute may not be present in all + # items of the library, so we avoid errors with this: + fast=False, + ) + + print("Doing so will delete the following items' sources as well:") + for searched_item in item._db.items(source_dir_query): + print(colorize_text("text_warning", searched_item.filepath)) + + print("Would you like to continue?") + continue_resp = input_options( + ["Yes", "delete None", "delete just the File"], + require=False, # Yes is the a default + ) + + if continue_resp == "y": + self._log.info( + "Deleting the item's source directory: {}", + srcpath.parent, + ) + rmtree(srcpath.parent) + + elif continue_resp == "n": + self._log.info("doing nothing - aborting hook function") + return + + elif continue_resp == "f": + self._log.info( + "removing just the item's original source: {}", + srcpath, + ) + srcpath.unlink() + + elif resp == "s": + self.stop_suggestions_for_albums.add(item.mb_albumid) + + else: + self._log.info("Doing nothing") diff --git a/beetsplug/info.py b/beetsplug/info.py index d759d6066..cc78aaffe 100644 --- a/beetsplug/info.py +++ b/beetsplug/info.py @@ -117,7 +117,6 @@ def print_data(data, item=None, fmt=None): return maxwidth = max(len(key) for key in formatted) - lineformat = f"{{0:>{maxwidth}}}: {{1}}" if path: ui.print_(displayable_path(path)) @@ -126,7 +125,7 @@ def print_data(data, item=None, fmt=None): value = formatted[field] if isinstance(value, list): value = "; ".join(value) - ui.print_(lineformat.format(field, value)) + ui.print_(f"{field:>{maxwidth}}: {value}") def print_data_keys(data, item=None): @@ -139,12 +138,11 @@ def print_data_keys(data, item=None): if len(formatted) == 0: return - line_format = "{0}{{0}}".format(" " * 4) if path: ui.print_(displayable_path(path)) for field in sorted(formatted): - ui.print_(line_format.format(field)) + ui.print_(f" {field}") class InfoPlugin(BeetsPlugin): @@ -215,13 +213,13 @@ class InfoPlugin(BeetsPlugin): summary = {} for data_emitter in data_collector( lib, - ui.decargs(args), + args, album=opts.album, ): try: data, item = data_emitter(included_keys or "*") except (mediafile.UnreadableFileError, OSError) as ex: - self._log.error("cannot read file: {0}", ex) + self._log.error("cannot read file: {}", ex) continue if opts.summarize: @@ -232,7 +230,7 @@ class InfoPlugin(BeetsPlugin): if opts.keys_only: print_data_keys(data, item) else: - fmt = ui.decargs([opts.format])[0] if opts.format else None + fmt = [opts.format][0] if opts.format else None print_data(data, item, fmt) first = False diff --git a/beetsplug/inline.py b/beetsplug/inline.py index 4092c46d0..860a205ee 100644 --- a/beetsplug/inline.py +++ b/beetsplug/inline.py @@ -28,8 +28,7 @@ class InlineError(Exception): def __init__(self, code, exc): super().__init__( - ("error in inline path field code:\n" "%s\n%s: %s") - % (code, type(exc).__name__, str(exc)) + f"error in inline path field code:\n{code}\n{type(exc).__name__}: {exc}" ) @@ -37,7 +36,8 @@ def _compile_func(body): """Given Python code for a function body, return a compiled callable that invokes that code. """ - body = "def {}():\n {}".format(FUNC_NAME, body.replace("\n", "\n ")) + body = body.replace("\n", "\n ") + body = f"def {FUNC_NAME}():\n {body}" code = compile(body, "inline", "exec") env = {} eval(code, env) @@ -60,19 +60,19 @@ class InlinePlugin(BeetsPlugin): for key, view in itertools.chain( config["item_fields"].items(), config["pathfields"].items() ): - self._log.debug("adding item field {0}", key) - func = self.compile_inline(view.as_str(), False) + self._log.debug("adding item field {}", key) + func = self.compile_inline(view.as_str(), False, key) if func is not None: self.template_fields[key] = func # Album fields. for key, view in config["album_fields"].items(): - self._log.debug("adding album field {0}", key) - func = self.compile_inline(view.as_str(), True) + self._log.debug("adding album field {}", key) + func = self.compile_inline(view.as_str(), True, key) if func is not None: self.album_template_fields[key] = func - def compile_inline(self, python_code, album): + def compile_inline(self, python_code, album, field_name): """Given a Python expression or function body, compile it as a path field function. The returned function takes a single argument, an Item, and returns a Unicode string. If the expression cannot be @@ -87,7 +87,7 @@ class InlinePlugin(BeetsPlugin): func = _compile_func(python_code) except SyntaxError: self._log.error( - "syntax error in inline field definition:\n" "{0}", + "syntax error in inline field definition:\n{}", traceback.format_exc(), ) return @@ -97,7 +97,12 @@ class InlinePlugin(BeetsPlugin): is_expr = True def _dict_for(obj): - out = dict(obj) + out = {} + for key in obj.keys(computed=False): + if key == field_name: + continue + out[key] = obj._get(key) + if album: out["items"] = list(obj.items()) return out diff --git a/beetsplug/ipfs.py b/beetsplug/ipfs.py index 1c8c89aa9..8b6d57fd3 100644 --- a/beetsplug/ipfs.py +++ b/beetsplug/ipfs.py @@ -74,29 +74,29 @@ class IPFSPlugin(BeetsPlugin): def func(lib, opts, args): if opts.add: - for album in lib.albums(ui.decargs(args)): + for album in lib.albums(args): if len(album.items()) == 0: self._log.info( - "{0} does not contain items, aborting", album + "{} does not contain items, aborting", album ) self.ipfs_add(album) album.store() if opts.get: - self.ipfs_get(lib, ui.decargs(args)) + self.ipfs_get(lib, args) if opts.publish: self.ipfs_publish(lib) if opts._import: - self.ipfs_import(lib, ui.decargs(args)) + self.ipfs_import(lib, args) if opts._list: - self.ipfs_list(lib, ui.decargs(args)) + self.ipfs_list(lib, args) if opts.play: - self.ipfs_play(lib, opts, ui.decargs(args)) + self.ipfs_play(lib, opts, args) cmd.func = func return [cmd] @@ -122,13 +122,13 @@ class IPFSPlugin(BeetsPlugin): return False try: if album.ipfs: - self._log.debug("{0} already added", album_dir) + self._log.debug("{} already added", album_dir) # Already added to ipfs return False except AttributeError: pass - self._log.info("Adding {0} to ipfs", album_dir) + self._log.info("Adding {} to ipfs", album_dir) if self.config["nocopy"]: cmd = "ipfs add --nocopy -q -r".split() @@ -138,7 +138,7 @@ class IPFSPlugin(BeetsPlugin): try: output = util.command_output(cmd).stdout.split() except (OSError, subprocess.CalledProcessError) as exc: - self._log.error("Failed to add {0}, error: {1}", album_dir, exc) + self._log.error("Failed to add {}, error: {}", album_dir, exc) return False length = len(output) @@ -146,12 +146,12 @@ class IPFSPlugin(BeetsPlugin): line = line.strip() if linenr == length - 1: # last printed line is the album hash - self._log.info("album: {0}", line) + self._log.info("album: {}", line) album.ipfs = line else: try: item = album.items()[linenr] - self._log.info("item: {0}", line) + self._log.info("item: {}", line) item.ipfs = line item.store() except IndexError: @@ -180,11 +180,11 @@ class IPFSPlugin(BeetsPlugin): util.command_output(cmd) except (OSError, subprocess.CalledProcessError) as err: self._log.error( - "Failed to get {0} from ipfs.\n{1}", _hash, err.output + "Failed to get {} from ipfs.\n{.output}", _hash, err ) return False - self._log.info("Getting {0} from ipfs", _hash) + self._log.info("Getting {} from ipfs", _hash) imp = ui.commands.TerminalImportSession( lib, loghandler=None, query=None, paths=[_hash] ) @@ -208,7 +208,7 @@ class IPFSPlugin(BeetsPlugin): msg = f"Failed to publish library. Error: {err}" self._log.error(msg) return False - self._log.info("hash of library: {0}", output) + self._log.info("hash of library: {}", output) def ipfs_import(self, lib, args): _hash = args[0] @@ -232,7 +232,7 @@ class IPFSPlugin(BeetsPlugin): try: util.command_output(cmd) except (OSError, subprocess.CalledProcessError): - self._log.error(f"Could not import {_hash}") + self._log.error("Could not import {}", _hash) return False # add all albums from remotes into a combined library @@ -306,7 +306,7 @@ class IPFSPlugin(BeetsPlugin): items.append(item) if len(items) < 1: return False - self._log.info("Adding '{0}' to temporary library", album) + self._log.info("Adding '{}' to temporary library", album) new_album = tmplib.add_album(items) new_album.ipfs = album.ipfs new_album.store(inherit=False) diff --git a/beetsplug/keyfinder.py b/beetsplug/keyfinder.py index 87f0cc427..e2aff24e5 100644 --- a/beetsplug/keyfinder.py +++ b/beetsplug/keyfinder.py @@ -43,7 +43,7 @@ class KeyFinderPlugin(BeetsPlugin): return [cmd] def command(self, lib, opts, args): - self.find_key(lib.items(ui.decargs(args)), write=ui.should_write()) + self.find_key(lib.items(args), write=ui.should_write()) def imported(self, session, task): self.find_key(task.imported_items()) @@ -65,7 +65,7 @@ class KeyFinderPlugin(BeetsPlugin): command + [util.syspath(item.path)] ).stdout except (subprocess.CalledProcessError, OSError) as exc: - self._log.error("execution failed: {0}", exc) + self._log.error("execution failed: {}", exc) continue try: @@ -73,7 +73,7 @@ class KeyFinderPlugin(BeetsPlugin): except IndexError: # Sometimes keyfinder-cli returns 0 but with no key, usually # when the file is silent or corrupt, so we log and skip. - self._log.error("no key returned for path: {0}", item.path) + self._log.error("no key returned for path: {.path}", item) continue try: @@ -84,9 +84,7 @@ class KeyFinderPlugin(BeetsPlugin): item["initial_key"] = key self._log.info( - "added computed initial key {0} for {1}", - key, - util.displayable_path(item.path), + "added computed initial key {} for {.filepath}", key, item ) if write: diff --git a/beetsplug/kodiupdate.py b/beetsplug/kodiupdate.py index d5d699537..890ab16c4 100644 --- a/beetsplug/kodiupdate.py +++ b/beetsplug/kodiupdate.py @@ -25,7 +25,6 @@ Put something like the following in your config.yaml to configure: import requests -from beets import config from beets.plugins import BeetsPlugin @@ -53,14 +52,15 @@ def update_kodi(host, port, user, password): class KodiUpdate(BeetsPlugin): def __init__(self): - super().__init__() + super().__init__("kodi") # Adding defaults. - config["kodi"].add( + self.config.add( [{"host": "localhost", "port": 8080, "user": "kodi", "pwd": "kodi"}] ) - config["kodi"]["pwd"].redact = True + self.config["user"].redact = True + self.config["pwd"].redact = True self.register_listener("database_change", self.listen_for_db_change) def listen_for_db_change(self, lib, model): @@ -71,7 +71,7 @@ class KodiUpdate(BeetsPlugin): """When the client exists try to send refresh request to Kodi server.""" self._log.info("Requesting a Kodi library update...") - kodi = config["kodi"].get() + kodi = self.config.get() # Backwards compatibility in case not configured as an array if not isinstance(kodi, list): @@ -96,10 +96,10 @@ class KodiUpdate(BeetsPlugin): continue self._log.info( - "Kodi update triggered for {0}:{1}", + "Kodi update triggered for {}:{}", instance["host"], instance["port"], ) except requests.exceptions.RequestException as e: - self._log.warning("Kodi update failed: {0}", str(e)) + self._log.warning("Kodi update failed: {}", str(e)) continue diff --git a/beetsplug/lastgenre/__init__.py b/beetsplug/lastgenre/__init__.py index 30b44e187..e622096cf 100644 --- a/beetsplug/lastgenre/__init__.py +++ b/beetsplug/lastgenre/__init__.py @@ -22,17 +22,25 @@ The scraper script used is available here: https://gist.github.com/1241307 """ -import codecs +from __future__ import annotations + import os import traceback -from typing import Union +from functools import singledispatchmethod +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable import pylast import yaml from beets import config, library, plugins, ui from beets.library import Album, Item -from beets.util import normpath, plurality, unique_list +from beets.util import plurality, unique_list + +if TYPE_CHECKING: + import optparse + + from beets.library import LibModel LASTFM = pylast.LastFMNetwork(api_key=plugins.LASTFM_KEY) @@ -42,15 +50,15 @@ PYLAST_EXCEPTIONS = ( pylast.NetworkError, ) -REPLACE = { - "\u2010": "-", -} - # Canonicalization tree processing. -def flatten_tree(elem, path, branches): +def flatten_tree( + elem: dict[Any, Any] | list[Any] | str, + path: list[str], + branches: list[list[str]], +) -> None: """Flatten nested lists/dictionaries into lists of strings (branches). """ @@ -67,7 +75,7 @@ def flatten_tree(elem, path, branches): branches.append(path + [str(elem)]) -def find_parents(candidate, branches): +def find_parents(candidate: str, branches: list[list[str]]) -> list[str]: """Find parents genre of a given genre, ordered from the closest to the further parent. """ @@ -87,7 +95,7 @@ C14N_TREE = os.path.join(os.path.dirname(__file__), "genres-tree.yaml") class LastGenrePlugin(plugins.BeetsPlugin): - def __init__(self): + def __init__(self) -> None: super().__init__() self.config.add( @@ -104,50 +112,65 @@ class LastGenrePlugin(plugins.BeetsPlugin): "separator": ", ", "prefer_specific": False, "title_case": True, - "extended_debug": False, + "pretend": False, } ) self.setup() - def setup(self): + def setup(self) -> None: """Setup plugin from config options""" if self.config["auto"]: self.import_stages = [self.imported] - self._genre_cache = {} + self._genre_cache: dict[str, list[str]] = {} + self.whitelist = self._load_whitelist() + self.c14n_branches, self.canonicalize = self._load_c14n_tree() - # Read the whitelist file if enabled. - self.whitelist = set() + def _load_whitelist(self) -> set[str]: + """Load the whitelist from a text file. + + Default whitelist is used if config is True, empty string or set to "nothing". + """ + whitelist = set() wl_filename = self.config["whitelist"].get() - if wl_filename in (True, ""): # Indicates the default whitelist. + if wl_filename in (True, "", None): # Indicates the default whitelist. wl_filename = WHITELIST if wl_filename: - wl_filename = normpath(wl_filename) - with open(wl_filename, "rb") as f: - for line in f: - line = line.decode("utf-8").strip().lower() - if line and not line.startswith("#"): - self.whitelist.add(line) + self._log.debug("Loading whitelist {}", wl_filename) + text = Path(wl_filename).expanduser().read_text(encoding="utf-8") + for line in text.splitlines(): + if (line := line.strip().lower()) and not line.startswith("#"): + whitelist.add(line) - # Read the genres tree for canonicalization if enabled. - self.c14n_branches = [] + return whitelist + + def _load_c14n_tree(self) -> tuple[list[list[str]], bool]: + """Load the canonicalization tree from a YAML file. + + Default tree is used if config is True, empty string, set to "nothing" + or if prefer_specific is enabled. + """ + c14n_branches: list[list[str]] = [] c14n_filename = self.config["canonical"].get() - self.canonicalize = c14n_filename is not False - + canonicalize = c14n_filename is not False # Default tree - if c14n_filename in (True, ""): - c14n_filename = C14N_TREE - elif not self.canonicalize and self.config["prefer_specific"].get(): + if c14n_filename in (True, "", None) or ( # prefer_specific requires a tree, load default tree + not canonicalize and self.config["prefer_specific"].get() + ): c14n_filename = C14N_TREE - # Read the tree if c14n_filename: - self._log.debug("Loading canonicalization tree {0}", c14n_filename) - c14n_filename = normpath(c14n_filename) - with codecs.open(c14n_filename, "r", encoding="utf-8") as f: + self._log.debug("Loading canonicalization tree {}", c14n_filename) + with Path(c14n_filename).expanduser().open(encoding="utf-8") as f: genres_tree = yaml.safe_load(f) - flatten_tree(genres_tree, [], self.c14n_branches) + flatten_tree(genres_tree, [], c14n_branches) + return c14n_branches, canonicalize + + def _tunelog(self, msg: str, *args: Any, **kwargs: Any) -> None: + """Log tuning messages at DEBUG level when verbosity level is high enough.""" + if config["verbose"].as_number() >= 3: + self._log.debug(msg, *args, **kwargs) @property def sources(self) -> tuple[str, ...]: @@ -165,7 +188,7 @@ class LastGenrePlugin(plugins.BeetsPlugin): # More canonicalization and general helpers. - def _get_depth(self, tag): + def _get_depth(self, tag: str) -> int | None: """Find the depth of a tag in the genres tree.""" depth = None for key, value in enumerate(self.c14n_branches): @@ -174,7 +197,7 @@ class LastGenrePlugin(plugins.BeetsPlugin): break return depth - def _sort_by_depth(self, tags): + def _sort_by_depth(self, tags: list[str]) -> list[str]: """Given a list of tags, sort the tags by their depths in the genre tree. """ @@ -184,31 +207,28 @@ class LastGenrePlugin(plugins.BeetsPlugin): return [p[1] for p in depth_tag_pairs] def _resolve_genres(self, tags: list[str]) -> list[str]: - """Filter, deduplicate, sort, canonicalize provided genres list. + """Canonicalize, sort and filter a list of genres. - Returns an empty list if the input tags list is empty. - If canonicalization is enabled, it extends the list by incorporating parent genres from the canonicalization tree. When a whitelist is set, only parent tags that pass a validity check (_is_valid) are included; - otherwise, it adds the oldest ancestor. - - During canonicalization, it stops adding parent tags if the count of - tags reaches the configured limit (count). + otherwise, it adds the oldest ancestor. Adding parent tags is stopped + when the count of tags reaches the configured limit (count). - The tags list is then deduplicated to ensure only unique genres are retained. - - Optionally, if the 'prefer_specific' configuration is enabled, the - list is sorted by the specificity (depth in the canonicalization tree) - of the genres. - - The method then filters the tag list, ensuring that only valid - genres (those that pass the _is_valid method) are kept. If a - whitelist is set, only genres in the whitelist are considered valid - (which may even result in no genres at all being retained). - - Finally, the filtered list of genres, limited to - the configured count is returned. + - If the 'prefer_specific' configuration is enabled, the list is sorted + by the specificity (depth in the canonicalization tree) of the genres. + - Finally applies whitelist filtering to ensure that only valid + genres are kept. (This may result in no genres at all being retained). + - Returns the filtered list of genres, limited to the configured count. """ if not tags: return [] count = self.config["count"].get(int) + + # Canonicalization (if enabled) if self.canonicalize: # Extend the list to consider tags parents in the c14n tree tags_all = [] @@ -242,22 +262,18 @@ class LastGenrePlugin(plugins.BeetsPlugin): # c14n only adds allowed genres but we may have had forbidden genres in # the original tags list - valid_tags = self._filter_valid_genres(tags) - return valid_tags[: self.config["count"].get(int)] + valid_tags = [t for t in tags if self._is_valid(t)] + return valid_tags[:count] - def fetch_genre(self, lastfm_obj): - """Return the genre for a pylast entity or None if no suitable genre - can be found. Ex. 'Electronic, House, Dance' + def fetch_genre( + self, lastfm_obj: pylast.Album | pylast.Artist | pylast.Track + ) -> list[str]: + """Return genres for a pylast entity. Returns an empty list if + no suitable genres are found. """ min_weight = self.config["min_weight"].get(int) return self._tags_for(lastfm_obj, min_weight) - def _filter_valid_genres(self, genres: list[str]) -> list[str]: - """Filter list of genres, only keep valid.""" - if not genres: - return [] - return [x for x in genres if self._is_valid(x)] - def _is_valid(self, genre: str) -> bool: """Check if the genre is valid. @@ -270,8 +286,10 @@ class LastGenrePlugin(plugins.BeetsPlugin): # Cached last.fm entity lookups. - def _last_lookup(self, entity, method, *args): - """Get a genre based on the named entity using the callable `method` + def _last_lookup( + self, entity: str, method: Callable[..., Any], *args: str + ) -> list[str]: + """Get genres based on the named entity using the callable `method` whose arguments are given in the sequence `args`. The genre lookup is cached based on the entity name and the arguments. @@ -281,42 +299,31 @@ class LastGenrePlugin(plugins.BeetsPlugin): """ # Shortcut if we're missing metadata. if any(not s for s in args): - return None + return [] key = f"{entity}.{'-'.join(str(a) for a in args)}" if key not in self._genre_cache: - args = [a.replace("\u2010", "-") for a in args] - self._genre_cache[key] = self.fetch_genre(method(*args)) + args_replaced = [a.replace("\u2010", "-") for a in args] + self._genre_cache[key] = self.fetch_genre(method(*args_replaced)) genre = self._genre_cache[key] - if self.config["extended_debug"]: - self._log.debug(f"last.fm (unfiltered) {entity} tags: {genre}") + self._tunelog("last.fm (unfiltered) {} tags: {}", entity, genre) return genre - def fetch_album_genre(self, obj): - """Return the album genre for this Item or Album.""" - return self._filter_valid_genres( - self._last_lookup( - "album", LASTFM.get_album, obj.albumartist, obj.album - ) + def fetch_album_genre(self, albumartist: str, albumtitle: str) -> list[str]: + """Return genres from Last.fm for the album by albumartist.""" + return self._last_lookup( + "album", LASTFM.get_album, albumartist, albumtitle ) - def fetch_album_artist_genre(self, obj): - """Return the album artist genre for this Item or Album.""" - return self._filter_valid_genres( - self._last_lookup("artist", LASTFM.get_artist, obj.albumartist) - ) + def fetch_artist_genre(self, artist: str) -> list[str]: + """Return genres from Last.fm for the artist.""" + return self._last_lookup("artist", LASTFM.get_artist, artist) - def fetch_artist_genre(self, item): - """Returns the track artist genre for this Item.""" - return self._filter_valid_genres( - self._last_lookup("artist", LASTFM.get_artist, item.artist) - ) - - def fetch_track_genre(self, obj): - """Returns the track genre for this Item.""" - return self._filter_valid_genres( - self._last_lookup("track", LASTFM.get_track, obj.artist, obj.title) + def fetch_track_genre(self, trackartist: str, tracktitle: str) -> list[str]: + """Return genres from Last.fm for the track by artist.""" + return self._last_lookup( + "track", LASTFM.get_track, trackartist, tracktitle ) # Main processing: _get_genre() and helpers. @@ -330,7 +337,7 @@ class LastGenrePlugin(plugins.BeetsPlugin): return self.config["separator"].as_str().join(formatted) - def _get_existing_genres(self, obj: Union[Album, Item]) -> list[str]: + def _get_existing_genres(self, obj: LibModel) -> list[str]: """Return a list of genres for this Item or Album. Empty string genres are removed.""" separator = self.config["separator"].get() @@ -346,14 +353,12 @@ class LastGenrePlugin(plugins.BeetsPlugin): self, old: list[str], new: list[str] ) -> list[str]: """Combine old and new genres and process via _resolve_genres.""" - self._log.debug(f"valid last.fm tags: {new}") - self._log.debug(f"existing genres taken into account: {old}") + self._log.debug("raw last.fm tags: {}", new) + self._log.debug("existing genres taken into account: {}", old) combined = old + new return self._resolve_genres(combined) - def _get_genre( - self, obj: Union[Album, Item] - ) -> tuple[Union[str, None], ...]: + def _get_genre(self, obj: LibModel) -> tuple[str | None, ...]: """Get the final genre string for an Album or Item object. `self.sources` specifies allowed genre sources. Starting with the first @@ -372,9 +377,24 @@ class LastGenrePlugin(plugins.BeetsPlugin): applied, while "artist, any" means only new last.fm genres are included and the whitelist feature was disabled. """ + + def _try_resolve_stage( + stage_label: str, keep_genres: list[str], new_genres: list[str] + ) -> tuple[str, str] | None: + """Try to resolve genres for a given stage and log the result.""" + resolved_genres = self._combine_resolve_and_log( + keep_genres, new_genres + ) + if resolved_genres: + suffix = "whitelist" if self.whitelist else "any" + label = f"{stage_label}, {suffix}" + if keep_genres: + label = f"keep + {label}" + return self._format_and_stringify(resolved_genres), label + return None + keep_genres = [] new_genres = [] - label = "" genres = self._get_existing_genres(obj) if genres and not self.config["force"]: @@ -393,53 +413,69 @@ class LastGenrePlugin(plugins.BeetsPlugin): # Run through stages: track, album, artist, # album artist, or most popular track genre. if isinstance(obj, library.Item) and "track" in self.sources: - if new_genres := self.fetch_track_genre(obj): - label = "track" + if new_genres := self.fetch_track_genre(obj.artist, obj.title): + if result := _try_resolve_stage( + "track", keep_genres, new_genres + ): + return result - if not new_genres and "album" in self.sources: - if new_genres := self.fetch_album_genre(obj): - label = "album" + if "album" in self.sources: + if new_genres := self.fetch_album_genre(obj.albumartist, obj.album): + if result := _try_resolve_stage( + "album", keep_genres, new_genres + ): + return result - if not new_genres and "artist" in self.sources: - new_genres = None + if "artist" in self.sources: + new_genres = [] if isinstance(obj, library.Item): - new_genres = self.fetch_artist_genre(obj) - label = "artist" + new_genres = self.fetch_artist_genre(obj.artist) + stage_label = "artist" elif obj.albumartist != config["va_name"].as_str(): - new_genres = self.fetch_album_artist_genre(obj) - label = "album artist" + new_genres = self.fetch_artist_genre(obj.albumartist) + stage_label = "album artist" + if not new_genres: + self._tunelog( + 'No album artist genre found for "{}", ' + "trying multi-valued field...", + obj.albumartist, + ) + for albumartist in obj.albumartists: + self._tunelog( + 'Fetching artist genre for "{}"', albumartist + ) + new_genres += self.fetch_artist_genre(albumartist) + if new_genres: + stage_label = "multi-valued album artist" else: # For "Various Artists", pick the most popular track genre. item_genres = [] + assert isinstance(obj, Album) # Type narrowing for mypy for item in obj.items(): item_genre = None if "track" in self.sources: - item_genre = self.fetch_track_genre(item) + item_genre = self.fetch_track_genre( + item.artist, item.title + ) if not item_genre: - item_genre = self.fetch_artist_genre(item) + item_genre = self.fetch_artist_genre(item.artist) if item_genre: item_genres += item_genre if item_genres: most_popular, rank = plurality(item_genres) new_genres = [most_popular] - label = "most popular track" + stage_label = "most popular track" self._log.debug( 'Most popular track genre "{}" ({}) for VA album.', most_popular, rank, ) - # Return with a combined or freshly fetched genre list. - if new_genres: - resolved_genres = self._combine_resolve_and_log( - keep_genres, new_genres - ) - if resolved_genres: - suffix = "whitelist" if self.whitelist else "any" - label += f", {suffix}" - if keep_genres: - label = f"keep + {label}" - return self._format_and_stringify(resolved_genres), label + if new_genres: + if result := _try_resolve_stage( + stage_label, keep_genres, new_genres + ): + return result # Nothing found, leave original if configured and valid. if obj.genre and self.config["keep_existing"]: @@ -455,8 +491,47 @@ class LastGenrePlugin(plugins.BeetsPlugin): # Beets plugin hooks and CLI. - def commands(self): + def _fetch_and_log_genre(self, obj: LibModel) -> None: + """Fetch genre and log it.""" + self._log.info(str(obj)) + obj.genre, label = self._get_genre(obj) + self._log.debug("Resolved ({}): {}", label, obj.genre) + + ui.show_model_changes(obj, fields=["genre"], print_obj=False) + + @singledispatchmethod + def _process(self, obj: LibModel, write: bool) -> None: + """Process an object, dispatching to the appropriate method.""" + raise NotImplementedError + + @_process.register + def _process_track(self, obj: Item, write: bool) -> None: + """Process a single track/item.""" + self._fetch_and_log_genre(obj) + if not self.config["pretend"]: + obj.try_sync(write=write, move=False) + + @_process.register + def _process_album(self, obj: Album, write: bool) -> None: + """Process an entire album.""" + self._fetch_and_log_genre(obj) + if "track" in self.sources: + for item in obj.items(): + self._process(item, write) + + if not self.config["pretend"]: + obj.try_sync( + write=write, move=False, inherit="track" not in self.sources + ) + + def commands(self) -> list[ui.Subcommand]: lastgenre_cmd = ui.Subcommand("lastgenre", help="fetch genres") + lastgenre_cmd.parser.add_option( + "-p", + "--pretend", + action="store_true", + help="show actions but do nothing", + ) lastgenre_cmd.parser.add_option( "-f", "--force", @@ -506,96 +581,30 @@ class LastGenrePlugin(plugins.BeetsPlugin): dest="album", help="match albums instead of items (default)", ) - lastgenre_cmd.parser.add_option( - "-d", - "--debug", - action="store_true", - dest="extended_debug", - help="extended last.fm debug logging", - ) lastgenre_cmd.parser.set_defaults(album=True) - def lastgenre_func(lib, opts, args): - write = ui.should_write() + def lastgenre_func( + lib: library.Library, opts: optparse.Values, args: list[str] + ) -> None: self.config.set_args(opts) - if opts.album: - # Fetch genres for whole albums - for album in lib.albums(ui.decargs(args)): - album.genre, src = self._get_genre(album) - self._log.info( - 'genre for album "{0.album}" ({1}): {0.genre}', - album, - src, - ) - if "track" in self.sources: - album.store(inherit=False) - else: - album.store() - - for item in album.items(): - # If we're using track-level sources, also look up each - # track on the album. - if "track" in self.sources: - item.genre, src = self._get_genre(item) - item.store() - self._log.info( - 'genre for track "{0.title}" ({1}): {0.genre}', - item, - src, - ) - - if write: - item.try_write() - else: - # Just query singletons, i.e. items that are not part of - # an album - for item in lib.items(ui.decargs(args)): - item.genre, src = self._get_genre(item) - item.store() - self._log.info( - "genre for track {0.title} ({1}): {0.genre}", item, src - ) + method = lib.albums if opts.album else lib.items + for obj in method(args): + self._process(obj, write=ui.should_write()) lastgenre_cmd.func = lastgenre_func return [lastgenre_cmd] - def imported(self, session, task): - """Event hook called when an import task finishes.""" - if task.is_album: - album = task.album - album.genre, src = self._get_genre(album) - self._log.debug( - 'genre for album "{0.album}" ({1}): {0.genre}', album, src - ) + def imported( + self, session: library.Session, task: library.ImportTask + ) -> None: + self._process(task.album if task.is_album else task.item, write=False) - # If we're using track-level sources, store the album genre only, - # then also look up individual track genres. - if "track" in self.sources: - album.store(inherit=False) - for item in album.items(): - item.genre, src = self._get_genre(item) - self._log.debug( - 'genre for track "{0.title}" ({1}): {0.genre}', - item, - src, - ) - item.store() - # Store the album genre and inherit to tracks. - else: - album.store() - - else: - item = task.item - item.genre, src = self._get_genre(item) - self._log.debug( - 'genre for track "{0.title}" ({1}): {0.genre}', - item, - src, - ) - item.store() - - def _tags_for(self, obj, min_weight=None): + def _tags_for( + self, + obj: pylast.Album | pylast.Artist | pylast.Track, + min_weight: int | None = None, + ) -> list[str]: """Core genre identification routine. Given a pylast entity (album or track), return a list of @@ -607,18 +616,19 @@ class LastGenrePlugin(plugins.BeetsPlugin): # Work around an inconsistency in pylast where # Album.get_top_tags() does not return TopItem instances. # https://github.com/pylast/pylast/issues/86 + obj_to_query: Any = obj if isinstance(obj, pylast.Album): - obj = super(pylast.Album, obj) + obj_to_query = super(pylast.Album, obj) try: - res = obj.get_top_tags() + res: Any = obj_to_query.get_top_tags() except PYLAST_EXCEPTIONS as exc: - self._log.debug("last.fm error: {0}", exc) + self._log.debug("last.fm error: {}", exc) return [] except Exception as exc: # Isolate bugs in pylast. self._log.debug("{}", traceback.format_exc()) - self._log.error("error in pylast library: {0}", exc) + self._log.error("error in pylast library: {}", exc) return [] # Filter by weight (optionally). @@ -626,6 +636,6 @@ class LastGenrePlugin(plugins.BeetsPlugin): res = [el for el in res if (int(el.weight or 0)) >= min_weight] # Get strings from tags. - res = [el.item.get_name().lower() for el in res] + tags: list[str] = [el.item.get_name().lower() for el in res] - return res + return tags diff --git a/beetsplug/lastgenre/genres-tree.yaml b/beetsplug/lastgenre/genres-tree.yaml index c8ae42478..d7acfbc1f 100644 --- a/beetsplug/lastgenre/genres-tree.yaml +++ b/beetsplug/lastgenre/genres-tree.yaml @@ -9,6 +9,7 @@ - cape jazz - chimurenga - coupé-décalé + - egyptian - fuji music - genge - highlife @@ -35,6 +36,7 @@ - sega - seggae - semba + - shangaan electro - soukous - taarab - zouglou @@ -133,6 +135,7 @@ - chutney - chutney soca - compas + - folklore argentino - mambo - merengue - méringue @@ -185,6 +188,7 @@ - humor - parody music - stand-up + - kabarett - country: - alternative country: - cowpunk @@ -250,7 +254,6 @@ - acid breaks - baltimore club - big beat - - breakbeat hardcore - broken beat - florida breaks - nu skool breaks @@ -287,12 +290,15 @@ - jump-up - liquid funk - neurofunk - - oldschool jungle: + - jungle: - darkside jungle - ragga jungle + - oldschool jungle - raggacore - sambass - techstep + - leftfield + - halftime - electro: - crunk - electro backbeat @@ -343,6 +349,7 @@ - hardcore: - bouncy house - bouncy techno + - breakbeat hardcore - breakcore - digital hardcore - doomcore @@ -400,6 +407,8 @@ - power electronics - power noise - witch house + - juke: + - footwork - post-disco: - boogie - dance-pop @@ -414,6 +423,7 @@ - techno: - acid techno - detroit techno + - dub techno - free tekno - ghettotech - minimal @@ -469,7 +479,6 @@ - chap hop - christian hip hop - conscious hip hop - - country-rap - crunkcore - cumbia rap - east coast hip hop: @@ -481,6 +490,7 @@ - freestyle rap - g-funk - gangsta rap + - glitch hop - golden age hip hop - hip hop soul - hip pop @@ -521,11 +531,14 @@ - west coast hip hop: - chicano rap - jerkin' + - austrian hip hop + - german hip hop - jazz: - asian american jazz - avant-garde jazz - bebop - boogie-woogie + - brass band - british dance band - chamber jazz - continental jazz @@ -568,14 +581,13 @@ - vocal jazz - west coast gypsy jazz - west coast jazz -- other: - - worldbeat +- kids music: + - kinderlieder - pop: - adult contemporary - arab pop - baroque pop - bubblegum pop - - chanson - christian pop - classical crossover - europop: @@ -640,6 +652,7 @@ - beat music - chinese rock - christian rock + - classic rock - dark cabaret - desert rock - experimental rock @@ -720,6 +733,7 @@ - art punk - christian punk - deathrock + - deutschpunk - folk punk: - celtic punk - gypsy punk @@ -762,5 +776,18 @@ - dancehall - ska: - 2 tone - - dub - rocksteady + - dub +- soundtrack: +- singer-songwriter: + - cantautorato + - cantautor + - cantautora + - chanson + - canción de autor + - nueva canción +- world: + - world dub + - world fusion + - worldbeat + diff --git a/beetsplug/lastgenre/genres.txt b/beetsplug/lastgenre/genres.txt index 28b1225c3..571b6f350 100644 --- a/beetsplug/lastgenre/genres.txt +++ b/beetsplug/lastgenre/genres.txt @@ -160,10 +160,14 @@ calypso jazz calypso-style baila campursari canatronic +canción de autor candombe canon canrock cantata +cantautorato +cantautor +cantautora cante chico cante jondo canterbury scene @@ -371,6 +375,7 @@ desert rock desi detroit blues detroit techno +dub techno dhamar dhimotiká dhrupad @@ -684,7 +689,7 @@ indo rock indonesian pop indoyíftika industrial death metal -industrial hip-hop +industrial hip hop industrial metal industrial music industrial musical @@ -1069,10 +1074,10 @@ nortec norteño northern soul nota -nu breaks nu jazz nu metal nu soul +nu skool breaks nueva canción nyatiti néo kýma diff --git a/beetsplug/lastimport.py b/beetsplug/lastimport.py index f59205b99..baa522d14 100644 --- a/beetsplug/lastimport.py +++ b/beetsplug/lastimport.py @@ -31,6 +31,7 @@ class LastImportPlugin(plugins.BeetsPlugin): "api_key": plugins.LASTFM_KEY, } ) + config["lastfm"]["user"].redact = True config["lastfm"]["api_key"].redact = True self.config.add( { @@ -69,7 +70,7 @@ class CustomUser(pylast.User): tuple with the total number of pages of results. Includes an MBID, if found. """ - doc = self._request(self.ws_prefix + "." + method, cacheable, params) + doc = self._request(f"{self.ws_prefix}.{method}", cacheable, params) toptracks_node = doc.getElementsByTagName("toptracks")[0] total_pages = int(toptracks_node.getAttribute("totalPages")) @@ -119,7 +120,7 @@ def import_lastfm(lib, log): if not user: raise ui.UserError("You must specify a user name for lastimport") - log.info("Fetching last.fm library for @{0}", user) + log.info("Fetching last.fm library for @{}", user) page_total = 1 page_current = 0 @@ -129,7 +130,7 @@ def import_lastfm(lib, log): # Iterate through a yet to be known page total count while page_current < page_total: log.info( - "Querying page #{0}{1}...", + "Querying page #{}{}...", page_current + 1, f"/{page_total}" if page_total > 1 else "", ) @@ -146,27 +147,27 @@ def import_lastfm(lib, log): unknown_total += unknown break else: - log.error("ERROR: unable to read page #{0}", page_current + 1) + log.error("ERROR: unable to read page #{}", page_current + 1) if retry < retry_limit: log.info( - "Retrying page #{0}... ({1}/{2} retry)", + "Retrying page #{}... ({}/{} retry)", page_current + 1, retry + 1, retry_limit, ) else: log.error( - "FAIL: unable to fetch page #{0}, ", - "tried {1} times", + "FAIL: unable to fetch page #{}, ", + "tried {} times", page_current, retry + 1, ) page_current += 1 log.info("... done!") - log.info("finished processing {0} song pages", page_total) - log.info("{0} unknown play-counts", unknown_total) - log.info("{0} play-counts imported", found_total) + log.info("finished processing {} song pages", page_total) + log.info("{} unknown play-counts", unknown_total) + log.info("{} play-counts imported", found_total) def fetch_tracks(user, page, limit): @@ -200,7 +201,7 @@ def process_tracks(lib, tracks, log): total = len(tracks) total_found = 0 total_fails = 0 - log.info("Received {0} tracks in this page, processing...", total) + log.info("Received {} tracks in this page, processing...", total) for num in range(0, total): song = None @@ -219,7 +220,7 @@ def process_tracks(lib, tracks, log): else None ) - log.debug("query: {0} - {1} ({2})", artist, title, album) + log.debug("query: {} - {} ({})", artist, title, album) # First try to query by musicbrainz's trackid if trackid: @@ -230,7 +231,7 @@ def process_tracks(lib, tracks, log): # If not, try just album/title if song is None: log.debug( - "no album match, trying by album/title: {0} - {1}", album, title + "no album match, trying by album/title: {} - {}", album, title ) query = dbcore.AndQuery( [ @@ -267,10 +268,9 @@ def process_tracks(lib, tracks, log): count = int(song.get("play_count", 0)) new_count = int(tracks[num].get("playcount", 1)) log.debug( - "match: {0} - {1} ({2}) " "updating: play_count {3} => {4}", - song.artist, - song.title, - song.album, + "match: {0.artist} - {0.title} ({0.album}) updating:" + " play_count {1} => {2}", + song, count, new_count, ) @@ -279,11 +279,11 @@ def process_tracks(lib, tracks, log): total_found += 1 else: total_fails += 1 - log.info(" - No match: {0} - {1} ({2})", artist, title, album) + log.info(" - No match: {} - {} ({})", artist, title, album) if total_fails > 0: log.info( - "Acquired {0}/{1} play-counts ({2} unknown)", + "Acquired {}/{} play-counts ({} unknown)", total_found, total, total_fails, diff --git a/beetsplug/limit.py b/beetsplug/limit.py index 0a13a78aa..aae99a717 100644 --- a/beetsplug/limit.py +++ b/beetsplug/limit.py @@ -25,7 +25,7 @@ from itertools import islice from beets.dbcore import FieldQuery from beets.plugins import BeetsPlugin -from beets.ui import Subcommand, decargs, print_ +from beets.ui import Subcommand, print_ def lslimit(lib, opts, args): @@ -36,11 +36,10 @@ def lslimit(lib, opts, args): if (opts.head or opts.tail or 0) < 0: raise ValueError("Limit value must be non-negative") - query = decargs(args) if opts.album: - objs = lib.albums(query) + objs = lib.albums(args) else: - objs = lib.items(query) + objs = lib.items(args) if opts.head is not None: objs = islice(objs, opts.head) diff --git a/beetsplug/listenbrainz.py b/beetsplug/listenbrainz.py index 37a7920b9..fa73bd6b8 100644 --- a/beetsplug/listenbrainz.py +++ b/beetsplug/listenbrainz.py @@ -2,18 +2,18 @@ import datetime -import musicbrainzngs import requests from beets import config, ui from beets.plugins import BeetsPlugin from beetsplug.lastimport import process_tracks +from ._utils.musicbrainz import MusicBrainzAPIMixin -class ListenBrainzPlugin(BeetsPlugin): + +class ListenBrainzPlugin(MusicBrainzAPIMixin, BeetsPlugin): """A Beets plugin for interacting with ListenBrainz.""" - data_source = "ListenBrainz" ROOT = "http://api.listenbrainz.org/1/" def __init__(self): @@ -27,7 +27,7 @@ class ListenBrainzPlugin(BeetsPlugin): def commands(self): """Add beet UI commands to interact with ListenBrainz.""" lbupdate_cmd = ui.Subcommand( - "lbimport", help=f"Import {self.data_source} history" + "lbimport", help="Import ListenBrainz history" ) def func(lib, opts, args): @@ -42,14 +42,14 @@ class ListenBrainzPlugin(BeetsPlugin): unknown_total = 0 ls = self.get_listens() tracks = self.get_tracks_from_listens(ls) - log.info(f"Found {len(ls)} listens") + log.info("Found {} listens", len(ls)) if tracks: found, unknown = process_tracks(lib, tracks, log) found_total += found unknown_total += unknown log.info("... done!") - log.info("{0} unknown play-counts", unknown_total) - log.info("{0} play-counts imported", found_total) + log.info("{} unknown play-counts", unknown_total) + log.info("{} play-counts imported", found_total) def _make_request(self, url, params=None): """Makes a request to the ListenBrainz API.""" @@ -63,7 +63,7 @@ class ListenBrainzPlugin(BeetsPlugin): response.raise_for_status() return response.json() except requests.exceptions.RequestException as e: - self._log.debug(f"Invalid Search Error: {e}") + self._log.debug("Invalid Search Error: {}", e) return None def get_listens(self, min_ts=None, max_ts=None, count=None): @@ -130,17 +130,16 @@ class ListenBrainzPlugin(BeetsPlugin): ) return tracks - def get_mb_recording_id(self, track): + def get_mb_recording_id(self, track) -> str | None: """Returns the MusicBrainz recording ID for a track.""" - resp = musicbrainzngs.search_recordings( - query=track["track_metadata"].get("track_name"), - release=track["track_metadata"].get("release_name"), - strict=True, + results = self.mb_api.search( + "recording", + { + "": track["track_metadata"].get("track_name"), + "release": track["track_metadata"].get("release_name"), + }, ) - if resp.get("recording-count") == "1": - return resp.get("recording-list")[0].get("id") - else: - return None + return next((r["id"] for r in results), None) def get_playlists_createdfor(self, username): """Returns a list of playlists created by a user.""" @@ -156,7 +155,7 @@ class ListenBrainzPlugin(BeetsPlugin): playlist_info = playlist.get("playlist") if playlist_info.get("creator") == "listenbrainz": title = playlist_info.get("title") - self._log.debug(f"Playlist title: {title}") + self._log.debug("Playlist title: {}", title) playlist_type = ( "Exploration" if "Exploration" in title else "Jams" ) @@ -179,9 +178,7 @@ class ListenBrainzPlugin(BeetsPlugin): listenbrainz_playlists, key=lambda x: x["date"], reverse=True ) for playlist in listenbrainz_playlists: - self._log.debug( - f'Playlist: {playlist["type"]} - {playlist["date"]}' - ) + self._log.debug("Playlist: {0[type]} - {0[date]}", playlist) return listenbrainz_playlists def get_playlist(self, identifier): @@ -210,17 +207,16 @@ class ListenBrainzPlugin(BeetsPlugin): track_info = [] for track in tracks: identifier = track.get("identifier") - resp = musicbrainzngs.get_recording_by_id( + recording = self.mb_api.get_recording( identifier, includes=["releases", "artist-credits"] ) - recording = resp.get("recording") title = recording.get("title") artist_credit = recording.get("artist-credit", []) if artist_credit: artist = artist_credit[0].get("artist", {}).get("name") else: artist = None - releases = recording.get("release-list", []) + releases = recording.get("releases", []) if releases: album = releases[0].get("title") date = releases[0].get("date") diff --git a/beetsplug/loadext.py b/beetsplug/loadext.py index cc673dab2..f20580217 100644 --- a/beetsplug/loadext.py +++ b/beetsplug/loadext.py @@ -25,7 +25,7 @@ class LoadExtPlugin(BeetsPlugin): super().__init__() if not Database.supports_extensions: - self._log.warn( + self._log.warning( "loadext is enabled but the current SQLite " "installation does not support extensions" ) diff --git a/beetsplug/lyrics.py b/beetsplug/lyrics.py index cb48e2424..d6e14c175 100644 --- a/beetsplug/lyrics.py +++ b/beetsplug/lyrics.py @@ -16,7 +16,6 @@ from __future__ import annotations -import atexit import itertools import math import re @@ -25,10 +24,9 @@ from contextlib import contextmanager, suppress from dataclasses import dataclass from functools import cached_property, partial, total_ordering from html import unescape -from http import HTTPStatus from itertools import groupby from pathlib import Path -from typing import TYPE_CHECKING, Iterable, Iterator, NamedTuple +from typing import TYPE_CHECKING, NamedTuple from urllib.parse import quote, quote_plus, urlencode, urlparse import langdetect @@ -36,15 +34,20 @@ import requests from bs4 import BeautifulSoup from unidecode import unidecode -import beets from beets import plugins, ui -from beets.autotag.hooks import string_dist +from beets.autotag.distance import string_dist +from beets.util.config import sanitize_choices + +from ._utils.requests import HTTPNotFoundError, RequestHandler if TYPE_CHECKING: - from logging import Logger + from collections.abc import Iterable, Iterator + + import confuse from beets.importer import ImportTask from beets.library import Item, Library + from beets.logging import BeetsLogger as Logger from ._typing import ( GeniusAPI, @@ -54,41 +57,12 @@ if TYPE_CHECKING: TranslatorAPI, ) -USER_AGENT = f"beets/{beets.__version__}" INSTRUMENTAL_LYRICS = "[Instrumental]" -class NotFoundError(requests.exceptions.HTTPError): - pass - - class CaptchaError(requests.exceptions.HTTPError): - pass - - -class TimeoutSession(requests.Session): - def request(self, *args, **kwargs): - """Wrap the request method to raise an exception on HTTP errors.""" - kwargs.setdefault("timeout", 10) - r = super().request(*args, **kwargs) - if r.status_code == HTTPStatus.NOT_FOUND: - raise NotFoundError("HTTP Error: Not Found", response=r) - if 300 <= r.status_code < 400: - raise CaptchaError("Captcha is required", response=r) - - r.raise_for_status() - - return r - - -r_session = TimeoutSession() -r_session.headers.update({"User-Agent": USER_AGENT}) - - -@atexit.register -def close_session(): - """Close the requests session on shut down.""" - r_session.close() + def __init__(self, *args, **kwargs) -> None: + super().__init__("Captcha is required", *args, **kwargs) # Utilities. @@ -153,7 +127,7 @@ def search_pairs(item): # examples include (live), (remix), and (acoustic). r"(.+?)\s+[(].*[)]$", # Remove any featuring artists from the title - r"(.*?) {}".format(plugins.feat_tokens(for_artist=False)), + rf"(.*?) {plugins.feat_tokens(for_artist=False)}", # Remove part of title after colon ':' for songs with subtitles r"(.+?)\s*:.*", ] @@ -184,8 +158,17 @@ def slug(text: str) -> str: return re.sub(r"\W+", "-", unidecode(text).lower().strip()).strip("-") -class RequestHandler: - _log: beets.logging.Logger +class LyricsRequestHandler(RequestHandler): + _log: Logger + + def status_to_error(self, code: int) -> type[requests.HTTPError] | None: + if err := super().status_to_error(code): + return err + + if 300 <= code < 400: + return CaptchaError + + return None def debug(self, message: str, *args) -> None: """Log a debug message with the class name.""" @@ -206,7 +189,7 @@ class RequestHandler: return f"{url}?{urlencode(params)}" - def fetch_text( + def get_text( self, url: str, params: JSONDict | None = None, **kwargs ) -> str: """Return text / HTML data from the given URL. @@ -216,21 +199,21 @@ class RequestHandler: """ url = self.format_url(url, params) self.debug("Fetching HTML from {}", url) - r = r_session.get(url, **kwargs) + r = self.get(url, **kwargs) r.encoding = None return r.text - def fetch_json(self, url: str, params: JSONDict | None = None, **kwargs): + def get_json(self, url: str, params: JSONDict | None = None, **kwargs): """Return JSON data from the given URL.""" url = self.format_url(url, params) self.debug("Fetching JSON from {}", url) - return r_session.get(url, **kwargs).json() + return super().get_json(url, **kwargs) def post_json(self, url: str, params: JSONDict | None = None, **kwargs): """Send POST request and return JSON response.""" url = self.format_url(url, params) self.debug("Posting JSON to {}", url) - return r_session.post(url, **kwargs).json() + return self.request("post", url, **kwargs).json() @contextmanager def handle_request(self) -> Iterator[None]: @@ -249,8 +232,10 @@ class BackendClass(type): return cls.__name__.lower() -class Backend(RequestHandler, metaclass=BackendClass): - def __init__(self, config, log): +class Backend(LyricsRequestHandler, metaclass=BackendClass): + config: confuse.Subview + + def __init__(self, config: confuse.Subview, log: Logger) -> None: self._log = log self.config = config @@ -354,10 +339,10 @@ class LRCLib(Backend): if album: get_params["album_name"] = album - yield self.fetch_json(self.SEARCH_URL, params=base_params) + yield self.get_json(self.SEARCH_URL, params=base_params) - with suppress(NotFoundError): - yield [self.fetch_json(self.GET_URL, params=get_params)] + with suppress(HTTPNotFoundError): + yield [self.get_json(self.GET_URL, params=get_params)] @classmethod def pick_best_match(cls, lyrics: Iterable[LRCLyrics]) -> LRCLyrics | None: @@ -405,7 +390,7 @@ class MusiXmatch(Backend): def fetch(self, artist: str, title: str, *_) -> tuple[str, str] | None: url = self.build_url(artist, title) - html = self.fetch_text(url) + html = self.get_text(url) if "We detected that your IP is blocked" in html: self.warn("Failed: Blocked IP address") return None @@ -507,9 +492,9 @@ class SearchBackend(SoupMixin, Backend): # log out the candidate that did not make it but was close. # This may show a matching candidate with some noise in the name self.debug( - "({}, {}) does not match ({}, {}) but dist was close: {:.2f}", - result.artist, - result.title, + "({0.artist}, {0.title}) does not match ({1}, {2}) but dist" + " was close: {3:.2f}", + result, target_artist, target_title, max_dist, @@ -530,7 +515,7 @@ class SearchBackend(SoupMixin, Backend): def fetch(self, artist: str, title: str, *_) -> tuple[str, str] | None: """Fetch lyrics for the given artist and title.""" for result in self.get_results(artist, title): - if (html := self.fetch_text(result.url)) and ( + if (html := self.get_text(result.url)) and ( lyrics := self.scrape(html) ): return lyrics, result.url @@ -557,10 +542,10 @@ class Genius(SearchBackend): @cached_property def headers(self) -> dict[str, str]: - return {"Authorization": f'Bearer {self.config["genius_api_key"]}'} + return {"Authorization": f"Bearer {self.config['genius_api_key']}"} def search(self, artist: str, title: str) -> Iterable[SearchResult]: - search_data: GeniusAPI.Search = self.fetch_json( + search_data: GeniusAPI.Search = self.get_json( self.SEARCH_URL, params={"q": f"{artist} {title}"}, headers=self.headers, @@ -581,7 +566,7 @@ class Tekstowo(SearchBackend): """Fetch lyrics from Tekstowo.pl.""" BASE_URL = "https://www.tekstowo.pl" - SEARCH_URL = BASE_URL + "/szukaj,{}.html" + SEARCH_URL = f"{BASE_URL}/szukaj,{{}}.html" def build_url(self, artist, title): artistitle = f"{artist.title()} {title.title()}" @@ -589,7 +574,7 @@ class Tekstowo(SearchBackend): return self.SEARCH_URL.format(quote_plus(unidecode(artistitle))) def search(self, artist: str, title: str) -> Iterable[SearchResult]: - if html := self.fetch_text(self.build_url(title, artist)): + if html := self.get_text(self.build_url(title, artist)): soup = self.get_soup(html) for tag in soup.select("div[class=flex-group] > a[title*=' - ']"): artist, title = str(tag["title"]).split(" - ", 1) @@ -643,7 +628,7 @@ class Google(SearchBackend): re.IGNORECASE | re.VERBOSE, ) #: Split cleaned up URL title into artist and title parts. - URL_TITLE_PARTS_RE = re.compile(r" +(?:[ :|-]+|par|by) +") + URL_TITLE_PARTS_RE = re.compile(r" +(?:[ :|-]+|par|by) +|, ") SOURCE_DIST_FACTOR = {"www.azlyrics.com": 0.5, "www.songlyrics.com": 0.6} @@ -655,12 +640,12 @@ class Google(SearchBackend): html = Html.remove_ads(super().pre_process_html(html)) return Html.remove_formatting(Html.merge_paragraphs(html)) - def fetch_text(self, *args, **kwargs) -> str: + def get_text(self, *args, **kwargs) -> str: """Handle an error so that we can continue with the next URL.""" kwargs.setdefault("allow_redirects", False) with self.handle_request(): try: - return super().fetch_text(*args, **kwargs) + return super().get_text(*args, **kwargs) except CaptchaError: self.ignored_domains.add(urlparse(args[0]).netloc) raise @@ -701,8 +686,8 @@ class Google(SearchBackend): result_artist, result_title = "", parts[0] else: # sort parts by their similarity to the artist - parts.sort(key=lambda p: cls.get_part_dist(artist, title, p)) - result_artist, result_title = parts[0], " ".join(parts[1:]) + result_artist = min(parts, key=lambda p: string_dist(artist, p)) + result_title = min(parts, key=lambda p: string_dist(title, p)) return SearchResult(result_artist, result_title, item["link"]) @@ -716,7 +701,7 @@ class Google(SearchBackend): "excludeTerms": ", ".join(self.EXCLUDE_PAGES), } - data: GoogleCustomSearchAPI.Response = self.fetch_json( + data: GoogleCustomSearchAPI.Response = self.get_json( self.SEARCH_URL, params=params ) for item in data.get("items", []): @@ -741,11 +726,13 @@ class Google(SearchBackend): @dataclass -class Translator(RequestHandler): +class Translator(LyricsRequestHandler): TRANSLATE_URL = "https://api.cognitive.microsofttranslator.com/translate" LINE_PARTS_RE = re.compile(r"^(\[\d\d:\d\d.\d\d\]|) *(.*)$") SEPARATOR = " | " - remove_translations = partial(re.compile(r" / [^\n]+").sub, "") + remove_translations = staticmethod( + partial(re.compile(r" / [^\n]+").sub, "") + ) _log: Logger api_key: str @@ -837,15 +824,16 @@ class Translator(RequestHandler): lyrics_language = langdetect.detect(new_lyrics).upper() if lyrics_language == self.to_language: self.info( - "🔵 Lyrics are already in the target language {}", - self.to_language, + "🔵 Lyrics are already in the target language {.to_language}", + self, ) return new_lyrics if self.from_languages and lyrics_language not in self.from_languages: self.info( - "🔵 Configuration {} does not permit translating from {}", - self.from_languages, + "🔵 Configuration {.from_languages} does not permit translating" + " from {}", + self, lyrics_language, ) return new_lyrics @@ -853,7 +841,7 @@ class Translator(RequestHandler): lyrics, *url = new_lyrics.split("\n\nSource: ") with self.handle_request(): translated_lines = self.append_translations(lyrics.splitlines()) - self.info("🟢 Translated lyrics to {}", self.to_language) + self.info("🟢 Translated lyrics to {.to_language}", self) return "\n\nSource: ".join(["\n".join(translated_lines), *url]) @@ -913,17 +901,17 @@ class RestFiles: def write_artist(self, artist: str, items: Iterable[Item]) -> None: parts = [ - f'{artist}\n{"=" * len(artist)}', + f"{artist}\n{'=' * len(artist)}", ".. contents::\n :local:", ] for album, items in groupby(items, key=lambda i: i.album): - parts.append(f'{album}\n{"-" * len(album)}') + parts.append(f"{album}\n{'-' * len(album)}") parts.extend( part for i in items if (title := f":index:`{i.title.strip()}`") for part in ( - f'{title}\n{"~" * len(title)}', + f"{title}\n{'~' * len(title)}", textwrap.indent(i.lyrics, "| "), ) ) @@ -941,23 +929,23 @@ class RestFiles: d = self.directory text = f""" ReST files generated. to build, use one of: - sphinx-build -b html {d} {d/"html"} - sphinx-build -b epub {d} {d/"epub"} - sphinx-build -b latex {d} {d/"latex"} && make -C {d/"latex"} all-pdf + sphinx-build -b html {d} {d / "html"} + sphinx-build -b epub {d} {d / "epub"} + sphinx-build -b latex {d} {d / "latex"} && make -C {d / "latex"} all-pdf """ ui.print_(textwrap.dedent(text)) -class LyricsPlugin(RequestHandler, plugins.BeetsPlugin): +class LyricsPlugin(LyricsRequestHandler, plugins.BeetsPlugin): BACKEND_BY_NAME = { b.name: b for b in [LRCLib, Google, Genius, Tekstowo, MusiXmatch] } @cached_property def backends(self) -> list[Backend]: - user_sources = self.config["sources"].get() + user_sources = self.config["sources"].as_str_seq() - chosen = plugins.sanitize_choices(user_sources, self.BACKEND_BY_NAME) + chosen = sanitize_choices(user_sources, self.BACKEND_BY_NAME) if "google" in chosen and not self.config["google_API_key"].get(): self.warn("Disabling Google source: no API key configured.") chosen.remove("google") @@ -1089,7 +1077,7 @@ class LyricsPlugin(RequestHandler, plugins.BeetsPlugin): return if lyrics := self.find_lyrics(item): - self.info("🟢 Found lyrics: {0}", item) + self.info("🟢 Found lyrics: {}", item) if translator := self.translator: lyrics = translator.translate(lyrics, item.lyrics) else: diff --git a/beetsplug/mbcollection.py b/beetsplug/mbcollection.py index 1c010bf50..f89670dd3 100644 --- a/beetsplug/mbcollection.py +++ b/beetsplug/mbcollection.py @@ -13,48 +13,151 @@ # included in all copies or substantial portions of the Software. +from __future__ import annotations + import re +from dataclasses import dataclass, field +from functools import cached_property +from typing import TYPE_CHECKING, ClassVar -import musicbrainzngs +from requests.auth import HTTPDigestAuth -from beets import config, ui +from beets import __version__, config, ui from beets.plugins import BeetsPlugin from beets.ui import Subcommand -SUBMISSION_CHUNK_SIZE = 200 -FETCH_CHUNK_SIZE = 100 -UUID_REGEX = r"^[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}$" +from ._utils.musicbrainz import MusicBrainzAPI + +if TYPE_CHECKING: + from collections.abc import Iterable, Iterator + + from requests import Response + + from beets.importer import ImportSession, ImportTask + from beets.library import Album, Library + + from ._typing import JSONDict + +UUID_PAT = re.compile(r"^[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}$") -def mb_call(func, *args, **kwargs): - """Call a MusicBrainz API function and catch exceptions.""" - try: - return func(*args, **kwargs) - except musicbrainzngs.AuthenticationError: - raise ui.UserError("authentication with MusicBrainz failed") - except (musicbrainzngs.ResponseError, musicbrainzngs.NetworkError) as exc: - raise ui.UserError(f"MusicBrainz API error: {exc}") - except musicbrainzngs.UsageError: - raise ui.UserError("MusicBrainz credentials missing") +@dataclass +class MusicBrainzUserAPI(MusicBrainzAPI): + """MusicBrainz API client with user authentication. + In order to retrieve private user collections and modify them, we need to + authenticate the requests with the user's MusicBrainz credentials. -def submit_albums(collection_id, release_ids): - """Add all of the release IDs to the indicated collection. Multiple - requests are made if there are many release IDs to submit. + See documentation for authentication details: + https://musicbrainz.org/doc/MusicBrainz_API#Authentication + + Note that the documentation misleadingly states HTTP 'basic' authentication, + and I had to reverse-engineer musicbrainzngs to discover that it actually + uses HTTP 'digest' authentication. """ - for i in range(0, len(release_ids), SUBMISSION_CHUNK_SIZE): - chunk = release_ids[i : i + SUBMISSION_CHUNK_SIZE] - mb_call(musicbrainzngs.add_releases_to_collection, collection_id, chunk) + auth: HTTPDigestAuth = field(init=False) -class MusicBrainzCollectionPlugin(BeetsPlugin): - def __init__(self): - super().__init__() + def __post_init__(self) -> None: + super().__post_init__() config["musicbrainz"]["pass"].redact = True - musicbrainzngs.auth( + self.auth = HTTPDigestAuth( config["musicbrainz"]["user"].as_str(), config["musicbrainz"]["pass"].as_str(), ) + + def request(self, *args, **kwargs) -> Response: + """Authenticate and include required client param in all requests.""" + kwargs.setdefault("params", {}) + kwargs["params"]["client"] = f"beets-{__version__}" + kwargs["auth"] = self.auth + return super().request(*args, **kwargs) + + def browse_collections(self) -> list[JSONDict]: + """Get all collections for the authenticated user.""" + return self._browse("collection") + + +@dataclass +class MBCollection: + """Representation of a user's MusicBrainz collection. + + Provides convenient, chunked operations for retrieving releases and updating + the collection via the MusicBrainz web API. Fetch and submission limits are + controlled by class-level constants to avoid oversized requests. + """ + + SUBMISSION_CHUNK_SIZE: ClassVar[int] = 200 + FETCH_CHUNK_SIZE: ClassVar[int] = 100 + + data: JSONDict + mb_api: MusicBrainzUserAPI + + @property + def id(self) -> str: + """Unique identifier assigned to the collection by MusicBrainz.""" + return self.data["id"] + + @property + def release_count(self) -> int: + """Total number of releases recorded in the collection.""" + return self.data["release-count"] + + @property + def releases_url(self) -> str: + """Complete API endpoint URL for listing releases in this collection.""" + return f"{self.mb_api.api_root}/collection/{self.id}/releases" + + @property + def releases(self) -> list[JSONDict]: + """Retrieve all releases in the collection, fetched in successive pages. + + The fetch is performed in chunks and returns a flattened sequence of + release records. + """ + offsets = list(range(0, self.release_count, self.FETCH_CHUNK_SIZE)) + return [r for offset in offsets for r in self.get_releases(offset)] + + def get_releases(self, offset: int) -> list[JSONDict]: + """Fetch a single page of releases beginning at a given position.""" + return self.mb_api.get_json( + self.releases_url, + params={"limit": self.FETCH_CHUNK_SIZE, "offset": offset}, + )["releases"] + + @classmethod + def get_id_chunks(cls, id_list: list[str]) -> Iterator[list[str]]: + """Yield successive sublists of identifiers sized for safe submission. + + Splits a long sequence of identifiers into batches that respect the + service's submission limits to avoid oversized requests. + """ + for i in range(0, len(id_list), cls.SUBMISSION_CHUNK_SIZE): + yield id_list[i : i + cls.SUBMISSION_CHUNK_SIZE] + + def add_releases(self, releases: list[str]) -> None: + """Add releases to the collection in batches.""" + for chunk in self.get_id_chunks(releases): + # Need to escape semicolons: https://github.com/psf/requests/issues/6990 + self.mb_api.put(f"{self.releases_url}/{'%3B'.join(chunk)}") + + def remove_releases(self, releases: list[str]) -> None: + """Remove releases from the collection in chunks.""" + for chunk in self.get_id_chunks(releases): + # Need to escape semicolons: https://github.com/psf/requests/issues/6990 + self.mb_api.delete(f"{self.releases_url}/{'%3B'.join(chunk)}") + + +def submit_albums(collection: MBCollection, release_ids): + """Add all of the release IDs to the indicated collection. Multiple + requests are made if there are many release IDs to submit. + """ + collection.add_releases(release_ids) + + +class MusicBrainzCollectionPlugin(BeetsPlugin): + def __init__(self) -> None: + super().__init__() self.config.add( { "auto": False, @@ -65,45 +168,32 @@ class MusicBrainzCollectionPlugin(BeetsPlugin): if self.config["auto"]: self.import_stages = [self.imported] - def _get_collection(self): - collections = mb_call(musicbrainzngs.get_collections) - if not collections["collection-list"]: + @cached_property + def mb_api(self) -> MusicBrainzUserAPI: + return MusicBrainzUserAPI() + + @cached_property + def collection(self) -> MBCollection: + if not (collections := self.mb_api.browse_collections()): raise ui.UserError("no collections exist for user") - # Get all collection IDs, avoiding event collections - collection_ids = [x["id"] for x in collections["collection-list"]] - if not collection_ids: - raise ui.UserError("No collection found.") + # Get all release collection IDs, avoiding event collections + if not ( + collection_by_id := { + c["id"]: c for c in collections if c["entity-type"] == "release" + } + ): + raise ui.UserError("No release collection found.") # Check that the collection exists so we can present a nice error - collection = self.config["collection"].as_str() - if collection: - if collection not in collection_ids: - raise ui.UserError( - "invalid collection ID: {}".format(collection) - ) - return collection + if collection_id := self.config["collection"].as_str(): + if not (collection := collection_by_id.get(collection_id)): + raise ui.UserError(f"invalid collection ID: {collection_id}") + else: + # No specified collection. Just return the first collection ID + collection = next(iter(collection_by_id.values())) - # No specified collection. Just return the first collection ID - return collection_ids[0] - - def _get_albums_in_collection(self, id): - def _fetch(offset): - res = mb_call( - musicbrainzngs.get_releases_in_collection, - id, - limit=FETCH_CHUNK_SIZE, - offset=offset, - )["collection"] - return [x["id"] for x in res["release-list"]], res["release-count"] - - offset = 0 - albums_in_collection, release_count = _fetch(offset) - for i in range(0, release_count, FETCH_CHUNK_SIZE): - albums_in_collection += _fetch(offset)[0] - offset += FETCH_CHUNK_SIZE - - return albums_in_collection + return MBCollection(collection, self.mb_api) def commands(self): mbupdate = Subcommand("mbupdate", help="Update MusicBrainz collection") @@ -118,45 +208,33 @@ class MusicBrainzCollectionPlugin(BeetsPlugin): mbupdate.func = self.update_collection return [mbupdate] - def remove_missing(self, collection_id, lib_albums): - lib_ids = {x.mb_albumid for x in lib_albums} - albums_in_collection = self._get_albums_in_collection(collection_id) - remove_me = list(set(albums_in_collection) - lib_ids) - for i in range(0, len(remove_me), FETCH_CHUNK_SIZE): - chunk = remove_me[i : i + FETCH_CHUNK_SIZE] - mb_call( - musicbrainzngs.remove_releases_from_collection, - collection_id, - chunk, - ) - - def update_collection(self, lib, opts, args): + def update_collection(self, lib: Library, opts, args) -> None: self.config.set_args(opts) remove_missing = self.config["remove"].get(bool) self.update_album_list(lib, lib.albums(), remove_missing) - def imported(self, session, task): + def imported(self, session: ImportSession, task: ImportTask) -> None: """Add each imported album to the collection.""" if task.is_album: - self.update_album_list(session.lib, [task.album]) + self.update_album_list( + session.lib, [task.album], remove_missing=False + ) - def update_album_list(self, lib, album_list, remove_missing=False): + def update_album_list( + self, lib: Library, albums: Iterable[Album], remove_missing: bool + ) -> None: """Update the MusicBrainz collection from a list of Beets albums""" - collection_id = self._get_collection() + collection = self.collection # Get a list of all the album IDs. - album_ids = [] - for album in album_list: - aid = album.mb_albumid - if aid: - if re.match(UUID_REGEX, aid): - album_ids.append(aid) - else: - self._log.info("skipping invalid MBID: {0}", aid) + album_ids = [id_ for a in albums if UUID_PAT.match(id_ := a.mb_albumid)] # Submit to MusicBrainz. - self._log.info("Updating MusicBrainz collection {0}...", collection_id) - submit_albums(collection_id, album_ids) + self._log.info("Updating MusicBrainz collection {}...", collection.id) + collection.add_releases(album_ids) if remove_missing: - self.remove_missing(collection_id, lib.albums()) + lib_ids = {x.mb_albumid for x in lib.albums()} + albums_in_collection = {r["id"] for r in collection.releases} + collection.remove_releases(list(albums_in_collection - lib_ids)) + self._log.info("...MusicBrainz collection updated.") diff --git a/beetsplug/mbpseudo.py b/beetsplug/mbpseudo.py new file mode 100644 index 000000000..30ef2e428 --- /dev/null +++ b/beetsplug/mbpseudo.py @@ -0,0 +1,349 @@ +# This file is part of beets. +# Copyright 2025, Alexis Sarda-Espinosa. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Adds pseudo-releases from MusicBrainz as candidates during import.""" + +from __future__ import annotations + +import itertools +from copy import deepcopy +from typing import TYPE_CHECKING, Any + +import mediafile +from typing_extensions import override + +from beets import config +from beets.autotag.distance import Distance, distance +from beets.autotag.hooks import AlbumInfo +from beets.autotag.match import assign_items +from beets.plugins import find_plugins +from beets.util.id_extractors import extract_release_id +from beetsplug.musicbrainz import ( + MusicBrainzPlugin, + _merge_pseudo_and_actual_album, + _preferred_alias, +) + +if TYPE_CHECKING: + from collections.abc import Iterable, Sequence + + from beets.autotag import AlbumMatch + from beets.library import Item + from beetsplug._typing import JSONDict + +_STATUS_PSEUDO = "Pseudo-Release" + + +class MusicBrainzPseudoReleasePlugin(MusicBrainzPlugin): + def __init__(self) -> None: + super().__init__() + + self.config.add( + { + "scripts": [], + "custom_tags_only": False, + "album_custom_tags": { + "album_transl": "album", + "album_artist_transl": "artist", + }, + "track_custom_tags": { + "title_transl": "title", + "artist_transl": "artist", + }, + } + ) + + self._scripts = self.config["scripts"].as_str_seq() + self._log.debug("Desired scripts: {0}", self._scripts) + + album_custom_tags = self.config["album_custom_tags"].get().keys() + track_custom_tags = self.config["track_custom_tags"].get().keys() + self._log.debug( + "Custom tags for albums and tracks: {0} + {1}", + album_custom_tags, + track_custom_tags, + ) + for custom_tag in album_custom_tags | track_custom_tags: + if not isinstance(custom_tag, str): + continue + + media_field = mediafile.MediaField( + mediafile.MP3DescStorageStyle(custom_tag), + mediafile.MP4StorageStyle( + f"----:com.apple.iTunes:{custom_tag}" + ), + mediafile.StorageStyle(custom_tag), + mediafile.ASFStorageStyle(custom_tag), + ) + try: + self.add_media_field(custom_tag, media_field) + except ValueError: + # ignore errors due to duplicates + pass + + self.register_listener("pluginload", self._on_plugins_loaded) + self.register_listener("album_matched", self._adjust_final_album_match) + + # noinspection PyMethodMayBeStatic + def _on_plugins_loaded(self): + for plugin in find_plugins(): + if isinstance(plugin, MusicBrainzPlugin) and not isinstance( + plugin, MusicBrainzPseudoReleasePlugin + ): + raise RuntimeError( + "The musicbrainz plugin should not be enabled together with" + " the mbpseudo plugin" + ) + + @override + def candidates( + self, + items: Sequence[Item], + artist: str, + album: str, + va_likely: bool, + ) -> Iterable[AlbumInfo]: + if len(self._scripts) == 0: + yield from super().candidates(items, artist, album, va_likely) + else: + for album_info in super().candidates( + items, artist, album, va_likely + ): + if isinstance(album_info, PseudoAlbumInfo): + self._log.debug( + "Using {0} release for distance calculations for album {1}", + album_info.determine_best_ref(items), + album_info.album_id, + ) + yield album_info # first yield pseudo to give it priority + yield album_info.get_official_release() + else: + yield album_info + + @override + def album_info(self, release: JSONDict) -> AlbumInfo: + official_release = super().album_info(release) + + if release.get("status") == _STATUS_PSEUDO: + return official_release + + if (ids := self._intercept_mb_release(release)) and ( + album_id := self._extract_id(ids[0]) + ): + raw_pseudo_release = self.mb_api.get_release(album_id) + pseudo_release = super().album_info(raw_pseudo_release) + + if self.config["custom_tags_only"].get(bool): + self._replace_artist_with_alias( + raw_pseudo_release, pseudo_release + ) + self._add_custom_tags(official_release, pseudo_release) + return official_release + else: + return PseudoAlbumInfo( + pseudo_release=_merge_pseudo_and_actual_album( + pseudo_release, official_release + ), + official_release=official_release, + ) + else: + return official_release + + def _intercept_mb_release(self, data: JSONDict) -> list[str]: + album_id = data["id"] if "id" in data else None + if self._has_desired_script(data) or not isinstance(album_id, str): + return [] + + return [ + pr_id + for rel in data.get("release-relations", []) + if (pr_id := self._wanted_pseudo_release_id(album_id, rel)) + is not None + ] + + def _has_desired_script(self, release: JSONDict) -> bool: + if len(self._scripts) == 0: + return False + elif script := release.get("text-representation", {}).get("script"): + return script in self._scripts + else: + return False + + def _wanted_pseudo_release_id( + self, + album_id: str, + relation: JSONDict, + ) -> str | None: + if ( + len(self._scripts) == 0 + or relation.get("type", "") != "transl-tracklisting" + or relation.get("direction", "") != "forward" + or "release" not in relation + ): + return None + + release = relation["release"] + if "id" in release and self._has_desired_script(release): + self._log.debug( + "Adding pseudo-release {0} for main release {1}", + release["id"], + album_id, + ) + return release["id"] + else: + return None + + def _replace_artist_with_alias( + self, + raw_pseudo_release: JSONDict, + pseudo_release: AlbumInfo, + ): + """Use the pseudo-release's language to search for artist + alias if the user hasn't configured import languages.""" + + if len(config["import"]["languages"].as_str_seq()) > 0: + return + + lang = raw_pseudo_release.get("text-representation", {}).get("language") + artist_credits = raw_pseudo_release.get("release-group", {}).get( + "artist-credit", [] + ) + aliases = [ + artist_credit.get("artist", {}).get("aliases", []) + for artist_credit in artist_credits + ] + + if lang and len(lang) >= 2 and len(aliases) > 0: + locale = lang[0:2] + aliases_flattened = list(itertools.chain.from_iterable(aliases)) + self._log.debug( + "Using locale '{0}' to search aliases {1}", + locale, + aliases_flattened, + ) + if alias_dict := _preferred_alias(aliases_flattened, [locale]): + if alias := alias_dict.get("name"): + self._log.debug("Got alias '{0}'", alias) + pseudo_release.artist = alias + for track in pseudo_release.tracks: + track.artist = alias + + def _add_custom_tags( + self, + official_release: AlbumInfo, + pseudo_release: AlbumInfo, + ): + for tag_key, pseudo_key in ( + self.config["album_custom_tags"].get().items() + ): + official_release[tag_key] = pseudo_release[pseudo_key] + + track_custom_tags = self.config["track_custom_tags"].get().items() + for track, pseudo_track in zip( + official_release.tracks, pseudo_release.tracks + ): + for tag_key, pseudo_key in track_custom_tags: + track[tag_key] = pseudo_track[pseudo_key] + + def _adjust_final_album_match(self, match: AlbumMatch): + album_info = match.info + if isinstance(album_info, PseudoAlbumInfo): + self._log.debug( + "Switching {0} to pseudo-release source for final proposal", + album_info.album_id, + ) + album_info.use_pseudo_as_ref() + new_pairs, *_ = assign_items(match.items, album_info.tracks) + album_info.mapping = dict(new_pairs) + + if album_info.data_source == self.data_source: + album_info.data_source = "MusicBrainz" + + @override + def _extract_id(self, url: str) -> str | None: + return extract_release_id("MusicBrainz", url) + + +class PseudoAlbumInfo(AlbumInfo): + """This is a not-so-ugly hack. + + We want the pseudo-release to result in a distance that is lower or equal to that of + the official release, otherwise it won't qualify as a good candidate. However, if + the input is in a script that's different from the pseudo-release (and we want to + translate/transliterate it in the library), it will receive unwanted penalties. + + This class is essentially a view of the ``AlbumInfo`` of both official and + pseudo-releases, where it's possible to change the details that are exposed to other + parts of the auto-tagger, enabling a "fair" distance calculation based on the + current input's script but still preferring the translation/transliteration in the + final proposal. + """ + + def __init__( + self, + pseudo_release: AlbumInfo, + official_release: AlbumInfo, + **kwargs, + ): + super().__init__(pseudo_release.tracks, **kwargs) + self.__dict__["_pseudo_source"] = True + self.__dict__["_official_release"] = official_release + for k, v in pseudo_release.items(): + if k not in kwargs: + self[k] = v + + def get_official_release(self) -> AlbumInfo: + return self.__dict__["_official_release"] + + def determine_best_ref(self, items: Sequence[Item]) -> str: + self.use_pseudo_as_ref() + pseudo_dist = self._compute_distance(items) + + self.use_official_as_ref() + official_dist = self._compute_distance(items) + + if official_dist < pseudo_dist: + self.use_official_as_ref() + return "official" + else: + self.use_pseudo_as_ref() + return "pseudo" + + def _compute_distance(self, items: Sequence[Item]) -> Distance: + mapping, _, _ = assign_items(items, self.tracks) + return distance(items, self, mapping) + + def use_pseudo_as_ref(self): + self.__dict__["_pseudo_source"] = True + + def use_official_as_ref(self): + self.__dict__["_pseudo_source"] = False + + def __getattr__(self, attr: str) -> Any: + # ensure we don't duplicate an official release's id, always return pseudo's + if self.__dict__["_pseudo_source"] or attr == "album_id": + return super().__getattr__(attr) + else: + return self.__dict__["_official_release"].__getattr__(attr) + + def __deepcopy__(self, memo): + cls = self.__class__ + result = cls.__new__(cls) + + memo[id(self)] = result + result.__dict__.update(self.__dict__) + for k, v in self.items(): + result[k] = deepcopy(v, memo) + + return result diff --git a/beetsplug/mbsubmit.py b/beetsplug/mbsubmit.py index d215e616c..f6d197256 100644 --- a/beetsplug/mbsubmit.py +++ b/beetsplug/mbsubmit.py @@ -26,8 +26,7 @@ import subprocess from beets import ui from beets.autotag import Recommendation from beets.plugins import BeetsPlugin -from beets.ui.commands import PromptChoice -from beets.util import displayable_path +from beets.util import PromptChoice, displayable_path from beetsplug.info import print_data @@ -73,7 +72,7 @@ class MBSubmitPlugin(BeetsPlugin): subprocess.Popen([picard_path] + paths) self._log.info("launched picard from\n{}", picard_path) except OSError as exc: - self._log.error(f"Could not open picard, got error:\n{exc}") + self._log.error("Could not open picard, got error:\n{}", exc) def print_tracks(self, session, task): for i in sorted(task.items, key=lambda i: i.track): @@ -86,7 +85,7 @@ class MBSubmitPlugin(BeetsPlugin): ) def func(lib, opts, args): - items = lib.items(ui.decargs(args)) + items = lib.items(args) self._mbsubmit(items) mbsubmit_cmd.func = func diff --git a/beetsplug/mbsync.py b/beetsplug/mbsync.py index 2e62b7b7e..5b74b67c9 100644 --- a/beetsplug/mbsync.py +++ b/beetsplug/mbsync.py @@ -16,8 +16,7 @@ from collections import defaultdict -from beets import autotag, library, ui, util -from beets.autotag import hooks +from beets import autotag, library, metadata_plugins, ui, util from beets.plugins import BeetsPlugin, apply_item_changes @@ -64,10 +63,9 @@ class MBSyncPlugin(BeetsPlugin): move = ui.should_move(opts.move) pretend = opts.pretend write = ui.should_write(opts.write) - query = ui.decargs(args) - self.singletons(lib, query, move, pretend, write) - self.albums(lib, query, move, pretend, write) + self.singletons(lib, args, move, pretend, write) + self.albums(lib, args, move, pretend, write) def singletons(self, lib, query, move, pretend, write): """Retrieve and apply info from the autotagger for items matched by @@ -80,7 +78,9 @@ class MBSyncPlugin(BeetsPlugin): ) continue - if not (track_info := hooks.track_for_id(item.mb_trackid)): + if not ( + track_info := metadata_plugins.track_for_id(item.mb_trackid) + ): self._log.info( "Recording ID not found: {0.mb_trackid} for track {0}", item ) @@ -101,7 +101,9 @@ class MBSyncPlugin(BeetsPlugin): self._log.info("Skipping album with no mb_albumid: {}", album) continue - if not (album_info := hooks.album_for_id(album.mb_albumid)): + if not ( + album_info := metadata_plugins.album_for_id(album.mb_albumid) + ): self._log.info( "Release ID {0.mb_albumid} not found for album {0}", album ) @@ -119,18 +121,20 @@ class MBSyncPlugin(BeetsPlugin): # Construct a track mapping according to MBIDs (release track MBIDs # first, if available, and recording MBIDs otherwise). This should # work for albums that have missing or extra tracks. - mapping = {} + item_info_pairs = [] items = list(album.items()) for item in items: if ( item.mb_releasetrackid and item.mb_releasetrackid in releasetrack_index ): - mapping[item] = releasetrack_index[item.mb_releasetrackid] + item_info_pairs.append( + (item, releasetrack_index[item.mb_releasetrackid]) + ) else: candidates = track_index[item.mb_trackid] if len(candidates) == 1: - mapping[item] = candidates[0] + item_info_pairs.append((item, candidates[0])) else: # If there are multiple copies of a recording, they are # disambiguated using their disc and track number. @@ -139,13 +143,13 @@ class MBSyncPlugin(BeetsPlugin): c.medium_index == item.track and c.medium == item.disc ): - mapping[item] = c + item_info_pairs.append((item, c)) break # Apply. self._log.debug("applying changes to {}", album) with lib.transaction(): - autotag.apply_metadata(album_info, mapping) + autotag.apply_metadata(album_info, item_info_pairs) changed = False # Find any changed item to apply changes to album. any_changed_item = items[0] diff --git a/beetsplug/metasync/__init__.py b/beetsplug/metasync/__init__.py index 2466efe54..d4e31851e 100644 --- a/beetsplug/metasync/__init__.py +++ b/beetsplug/metasync/__init__.py @@ -49,7 +49,7 @@ def load_meta_sources(): meta_sources = {} for module_path, class_name in SOURCES.items(): - module = import_module(METASYNC_MODULE + "." + module_path) + module = import_module(f"{METASYNC_MODULE}.{module_path}") meta_sources[class_name.lower()] = getattr(module, class_name) return meta_sources @@ -97,7 +97,6 @@ class MetaSyncPlugin(BeetsPlugin): def func(self, lib, opts, args): """Command handler for the metasync function.""" pretend = opts.pretend - query = ui.decargs(args) sources = [] for source in opts.sources: @@ -106,7 +105,7 @@ class MetaSyncPlugin(BeetsPlugin): sources = sources or self.config["source"].as_str_seq() meta_source_instances = {} - items = lib.items(query) + items = lib.items(args) # Avoid needlessly instantiating meta sources (can be expensive) if not items: @@ -118,13 +117,13 @@ class MetaSyncPlugin(BeetsPlugin): try: cls = META_SOURCES[player] except KeyError: - self._log.error("Unknown metadata source '{}'".format(player)) + self._log.error("Unknown metadata source '{}'", player) try: meta_source_instances[player] = cls(self.config, self._log) except (ImportError, ConfigValueError) as e: self._log.error( - f"Failed to instantiate metadata source {player!r}: {e}" + "Failed to instantiate metadata source {!r}: {}", player, e ) # Avoid needlessly iterating over items diff --git a/beetsplug/metasync/amarok.py b/beetsplug/metasync/amarok.py index f8dcbe3f3..47e6a1a65 100644 --- a/beetsplug/metasync/amarok.py +++ b/beetsplug/metasync/amarok.py @@ -20,7 +20,6 @@ from time import mktime from xml.sax.saxutils import quoteattr from beets.dbcore import types -from beets.library import DateType from beets.util import displayable_path from beetsplug.metasync import MetaSource @@ -41,15 +40,16 @@ class Amarok(MetaSource): "amarok_score": types.FLOAT, "amarok_uid": types.STRING, "amarok_playcount": types.INTEGER, - "amarok_firstplayed": DateType(), - "amarok_lastplayed": DateType(), + "amarok_firstplayed": types.DATE, + "amarok_lastplayed": types.DATE, } - query_xml = '<query version="1.0"> \ - <filters> \ - <and><include field="filename" value=%s /></and> \ - </filters> \ - </query>' + query_xml = """ + <query version="1.0"> + <filters> + <and><include field="filename" value={} /></and> + </filters> + </query>""" def __init__(self, config, log): super().__init__(config, log) @@ -69,7 +69,7 @@ class Amarok(MetaSource): # of the result set. So query for the filename and then try to match # the correct item from the results we get back results = self.collection.Query( - self.query_xml % quoteattr(basename(path)) + self.query_xml.format(quoteattr(basename(path))) ) for result in results: if result["xesam:url"] != path: diff --git a/beetsplug/metasync/itunes.py b/beetsplug/metasync/itunes.py index 02f592fdc..6f441ef8b 100644 --- a/beetsplug/metasync/itunes.py +++ b/beetsplug/metasync/itunes.py @@ -26,7 +26,6 @@ from confuse import ConfigValueError from beets import util from beets.dbcore import types -from beets.library import DateType from beets.util import bytestring_path, syspath from beetsplug.metasync import MetaSource @@ -63,9 +62,9 @@ class Itunes(MetaSource): "itunes_rating": types.INTEGER, # 0..100 scale "itunes_playcount": types.INTEGER, "itunes_skipcount": types.INTEGER, - "itunes_lastplayed": DateType(), - "itunes_lastskipped": DateType(), - "itunes_dateadded": DateType(), + "itunes_lastplayed": types.DATE, + "itunes_lastskipped": types.DATE, + "itunes_dateadded": types.DATE, } def __init__(self, config, log): @@ -77,12 +76,12 @@ class Itunes(MetaSource): library_path = config["itunes"]["library"].as_filename() try: - self._log.debug(f"loading iTunes library from {library_path}") + self._log.debug("loading iTunes library from {}", library_path) with create_temporary_copy(library_path) as library_copy: with open(library_copy, "rb") as library_copy_f: raw_library = plistlib.load(library_copy_f) except OSError as e: - raise ConfigValueError("invalid iTunes library: " + e.strerror) + raise ConfigValueError(f"invalid iTunes library: {e.strerror}") except Exception: # It's likely the user configured their '.itl' library (<> xml) if os.path.splitext(library_path)[1].lower() != ".xml": @@ -92,7 +91,7 @@ class Itunes(MetaSource): ) else: hint = "" - raise ConfigValueError("invalid iTunes library" + hint) + raise ConfigValueError(f"invalid iTunes library{hint}") # Make the iTunes library queryable using the path self.collection = { @@ -105,7 +104,7 @@ class Itunes(MetaSource): result = self.collection.get(util.bytestring_path(item.path).lower()) if not result: - self._log.warning(f"no iTunes match found for {item}") + self._log.warning("no iTunes match found for {}", item) return item.itunes_rating = result.get("Rating") diff --git a/beetsplug/missing.py b/beetsplug/missing.py index ccaa65320..63a7bae22 100644 --- a/beetsplug/missing.py +++ b/beetsplug/missing.py @@ -18,15 +18,15 @@ from collections import defaultdict from collections.abc import Iterator -import musicbrainzngs -from musicbrainzngs.musicbrainz import MusicBrainzError +import requests -from beets import config -from beets.autotag import hooks +from beets import config, metadata_plugins from beets.dbcore import types from beets.library import Album, Item, Library from beets.plugins import BeetsPlugin -from beets.ui import Subcommand, decargs, print_ +from beets.ui import Subcommand, print_ + +from ._utils.musicbrainz import MusicBrainzAPIMixin MB_ARTIST_QUERY = r"mb_albumartistid::^\w{8}-\w{4}-\w{4}-\w{4}-\w{12}$" @@ -86,7 +86,7 @@ def _item(track_info, album_info, album_id): ) -class MissingPlugin(BeetsPlugin): +class MissingPlugin(MusicBrainzAPIMixin, BeetsPlugin): """List missing tracks""" album_types = { @@ -136,7 +136,7 @@ class MissingPlugin(BeetsPlugin): albms = self.config["album"].get() helper = self._missing_albums if albms else self._missing_tracks - helper(lib, decargs(args)) + helper(lib, args) self._command.func = _miss return [self._command] @@ -190,19 +190,19 @@ class MissingPlugin(BeetsPlugin): calculating_total = self.config["total"].get() for (artist, artist_id), album_ids in album_ids_by_artist.items(): try: - resp = musicbrainzngs.browse_release_groups(artist=artist_id) - except MusicBrainzError as err: + resp = self.mb_api.browse_release_groups(artist=artist_id) + except requests.exceptions.RequestException: self._log.info( - "Couldn't fetch info for artist '{}' ({}) - '{}'", + "Couldn't fetch info for artist '{}' ({})", artist, artist_id, - err, + exc_info=True, ) continue missing_titles = [ f"{artist} - {rg['title']}" - for rg in resp["release-group-list"] + for rg in resp if rg["id"] not in album_ids ] @@ -223,12 +223,12 @@ class MissingPlugin(BeetsPlugin): item_mbids = {x.mb_trackid for x in album.items()} # fetch missing items # TODO: Implement caching that without breaking other stuff - if album_info := hooks.album_for_id(album.mb_albumid): + if album_info := metadata_plugins.album_for_id(album.mb_albumid): for track_info in album_info.tracks: if track_info.track_id not in item_mbids: self._log.debug( - "track {0} in album {1}", - track_info.track_id, - album_info.album_id, + "track {.track_id} in album {.album_id}", + track_info, + album_info, ) yield _item(track_info, album_info, album.id) diff --git a/beetsplug/mpdstats.py b/beetsplug/mpdstats.py index 6d4c269d1..0a3e1de02 100644 --- a/beetsplug/mpdstats.py +++ b/beetsplug/mpdstats.py @@ -18,14 +18,16 @@ import time import mpd -from beets import config, library, plugins, ui +from beets import config, plugins, ui from beets.dbcore import types +from beets.dbcore.query import PathQuery from beets.util import displayable_path # If we lose the connection, how many times do we want to retry and how # much time should we wait between retries? RETRIES = 10 RETRY_INTERVAL = 5 +DUPLICATE_PLAY_THRESHOLD = 10.0 mpd_config = config["mpd"] @@ -49,8 +51,8 @@ class MPDClientWrapper: if not self.strip_path.endswith("/"): self.strip_path += "/" - self._log.debug("music_directory: {0}", self.music_directory) - self._log.debug("strip_path: {0}", self.strip_path) + self._log.debug("music_directory: {.music_directory}", self) + self._log.debug("strip_path: {.strip_path}", self) self.client = mpd.MPDClient() @@ -62,7 +64,7 @@ class MPDClientWrapper: if host[0] in ["/", "~"]: host = os.path.expanduser(host) - self._log.info("connecting to {0}:{1}", host, port) + self._log.info("connecting to {}:{}", host, port) try: self.client.connect(host, port) except OSError as e: @@ -87,7 +89,7 @@ class MPDClientWrapper: try: return getattr(self.client, command)() except (OSError, mpd.ConnectionError) as err: - self._log.error("{0}", err) + self._log.error("{}", err) if retries <= 0: # if we exited without breaking, we couldn't reconnect in time :( @@ -121,7 +123,7 @@ class MPDClientWrapper: result = os.path.join(self.music_directory, file) else: result = entry["file"] - self._log.debug("returning: {0}", result) + self._log.debug("returning: {}", result) return result, entry.get("id") def status(self): @@ -142,7 +144,9 @@ class MPDStats: self.do_rating = mpd_config["rating"].get(bool) self.rating_mix = mpd_config["rating_mix"].get(float) - self.time_threshold = 10.0 # TODO: maybe add config option? + self.played_ratio_threshold = mpd_config["played_ratio_threshold"].get( + float + ) self.now_playing = None self.mpd = MPDClientWrapper(log) @@ -160,12 +164,12 @@ class MPDStats: def get_item(self, path): """Return the beets item related to path.""" - query = library.PathQuery("path", path) + query = PathQuery("path", path) item = self.lib.items(query).get() if item: return item else: - self._log.info("item not found: {0}", displayable_path(path)) + self._log.info("item not found: {}", displayable_path(path)) def update_item(self, item, attribute, value=None, increment=None): """Update the beets item. Set attribute to value or increment the value @@ -184,10 +188,10 @@ class MPDStats: item.store() self._log.debug( - "updated: {0} = {1} [{2}]", + "updated: {} = {} [{.filepath}]", attribute, item[attribute], - displayable_path(item.path), + item, ) def update_rating(self, item, skipped): @@ -215,10 +219,8 @@ class MPDStats: Returns whether the change was manual (skipped previous song or not) """ - diff = abs(song["remaining"] - (time.time() - song["started"])) - - skipped = diff >= self.time_threshold - + elapsed = song["elapsed_at_start"] + (time.time() - song["started"]) + skipped = elapsed / song["duration"] < self.played_ratio_threshold if skipped: self.handle_skipped(song) else: @@ -232,12 +234,12 @@ class MPDStats: def handle_played(self, song): """Updates the play count of a song.""" self.update_item(song["beets_item"], "play_count", increment=1) - self._log.info("played {0}", displayable_path(song["path"])) + self._log.info("played {}", displayable_path(song["path"])) def handle_skipped(self, song): """Updates the skip count of a song.""" self.update_item(song["beets_item"], "skip_count", increment=1) - self._log.info("skipped {0}", displayable_path(song["path"])) + self._log.info("skipped {}", displayable_path(song["path"])) def on_stop(self, status): self._log.info("stop") @@ -255,13 +257,10 @@ class MPDStats: def on_play(self, status): path, songid = self.mpd.currentsong() - if not path: return played, duration = map(int, status["time"].split(":", 1)) - remaining = duration - played - if self.now_playing: if self.now_playing["path"] != path: self.handle_song_change(self.now_playing) @@ -272,22 +271,23 @@ class MPDStats: # after natural song start. diff = abs(time.time() - self.now_playing["started"]) - if diff <= self.time_threshold: + if diff <= DUPLICATE_PLAY_THRESHOLD: return if self.now_playing["path"] == path and played == 0: self.handle_song_change(self.now_playing) if is_url(path): - self._log.info("playing stream {0}", displayable_path(path)) + self._log.info("playing stream {}", displayable_path(path)) self.now_playing = None return - self._log.info("playing {0}", displayable_path(path)) + self._log.info("playing {}", displayable_path(path)) self.now_playing = { "started": time.time(), - "remaining": remaining, + "elapsed_at_start": played, + "duration": duration, "path": path, "id": songid, "beets_item": self.get_item(path), @@ -307,12 +307,12 @@ class MPDStats: if "player" in events: status = self.mpd.status() - handler = getattr(self, "on_" + status["state"], None) + handler = getattr(self, f"on_{status['state']}", None) if handler: handler(status) else: - self._log.debug('unhandled status "{0}"', status) + self._log.debug('unhandled status "{}"', status) events = self.mpd.events() @@ -321,7 +321,7 @@ class MPDStatsPlugin(plugins.BeetsPlugin): item_types = { "play_count": types.INTEGER, "skip_count": types.INTEGER, - "last_played": library.DateType(), + "last_played": types.DATE, "rating": types.FLOAT, } @@ -336,6 +336,7 @@ class MPDStatsPlugin(plugins.BeetsPlugin): "host": os.environ.get("MPD_HOST", "localhost"), "port": int(os.environ.get("MPD_PORT", 6600)), "password": "", + "played_ratio_threshold": 0.85, } ) mpd_config["password"].redact = True diff --git a/beetsplug/mpdupdate.py b/beetsplug/mpdupdate.py index cb53afaa5..5d8fc598b 100644 --- a/beetsplug/mpdupdate.py +++ b/beetsplug/mpdupdate.py @@ -101,8 +101,8 @@ class MPDUpdatePlugin(BeetsPlugin): try: s = BufferedSocket(host, port) - except OSError as e: - self._log.warning("MPD connection failed: {0}", str(e.strerror)) + except OSError: + self._log.warning("MPD connection failed", exc_info=True) return resp = s.readline() @@ -111,7 +111,7 @@ class MPDUpdatePlugin(BeetsPlugin): return if password: - s.send(b'password "%s"\n' % password.encode("utf8")) + s.send(f'password "{password}"\n'.encode()) resp = s.readline() if b"OK" not in resp: self._log.warning("Authentication failed: {0!r}", resp) diff --git a/beetsplug/musicbrainz.py b/beetsplug/musicbrainz.py new file mode 100644 index 000000000..137189cdc --- /dev/null +++ b/beetsplug/musicbrainz.py @@ -0,0 +1,837 @@ +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Searches for albums in the MusicBrainz database.""" + +from __future__ import annotations + +from collections import Counter +from contextlib import suppress +from functools import cached_property +from itertools import product +from typing import TYPE_CHECKING, Any +from urllib.parse import urljoin + +from confuse.exceptions import NotFoundError + +import beets +import beets.autotag.hooks +from beets import config, plugins, util +from beets.metadata_plugins import MetadataSourcePlugin +from beets.util.deprecation import deprecate_for_user +from beets.util.id_extractors import extract_release_id + +from ._utils.musicbrainz import MusicBrainzAPIMixin +from ._utils.requests import HTTPNotFoundError + +if TYPE_CHECKING: + from collections.abc import Iterable, Sequence + from typing import Literal + + from beets.library import Item + + from ._typing import JSONDict + +VARIOUS_ARTISTS_ID = "89ad4ac3-39f7-470e-963a-56509c546377" + +BASE_URL = "https://musicbrainz.org/" + +SKIPPED_TRACKS = ["[data track]"] + +FIELDS_TO_MB_KEYS = { + "barcode": "barcode", + "catalognum": "catno", + "country": "country", + "label": "label", + "media": "format", + "year": "date", +} + + +RELEASE_INCLUDES = [ + "artists", + "media", + "recordings", + "release-groups", + "labels", + "artist-credits", + "aliases", + "recording-level-rels", + "work-rels", + "work-level-rels", + "artist-rels", + "isrcs", + "url-rels", + "release-rels", + "genres", + "tags", +] + +TRACK_INCLUDES = [ + "artists", + "aliases", + "isrcs", + "work-level-rels", + "artist-rels", +] + +BROWSE_INCLUDES = [ + "artist-credits", + "work-rels", + "artist-rels", + "recording-rels", + "release-rels", +] +BROWSE_CHUNKSIZE = 100 +BROWSE_MAXTRACKS = 500 + + +def _preferred_alias( + aliases: list[JSONDict], languages: list[str] | None = None +) -> JSONDict | None: + """Given a list of alias structures for an artist credit, select + and return the user's preferred alias or None if no matching + """ + if not aliases: + return None + + # Only consider aliases that have locales set. + valid_aliases = [a for a in aliases if "locale" in a] + + # Get any ignored alias types and lower case them to prevent case issues + ignored_alias_types = config["import"]["ignored_alias_types"].as_str_seq() + ignored_alias_types = [a.lower() for a in ignored_alias_types] + + # Search configured locales in order. + if languages is None: + languages = config["import"]["languages"].as_str_seq() + + for locale in languages: + # Find matching primary aliases for this locale that are not + # being ignored + matches = [] + for alias in valid_aliases: + if ( + alias["locale"] == locale + and alias.get("primary") + and (alias.get("type") or "").lower() not in ignored_alias_types + ): + matches.append(alias) + + # Skip to the next locale if we have no matches + if not matches: + continue + + return matches[0] + + return None + + +def _multi_artist_credit( + credit: list[JSONDict], include_join_phrase: bool +) -> tuple[list[str], list[str], list[str]]: + """Given a list representing an ``artist-credit`` block, accumulate + data into a triple of joined artist name lists: canonical, sort, and + credit. + """ + artist_parts = [] + artist_sort_parts = [] + artist_credit_parts = [] + for el in credit: + alias = _preferred_alias(el["artist"].get("aliases", ())) + + # An artist. + if alias: + cur_artist_name = alias["name"] + else: + cur_artist_name = el["artist"]["name"] + artist_parts.append(cur_artist_name) + + # Artist sort name. + if alias: + artist_sort_parts.append(alias["sort-name"]) + elif "sort-name" in el["artist"]: + artist_sort_parts.append(el["artist"]["sort-name"]) + else: + artist_sort_parts.append(cur_artist_name) + + # Artist credit. + if "name" in el: + artist_credit_parts.append(el["name"]) + else: + artist_credit_parts.append(cur_artist_name) + + if include_join_phrase and (joinphrase := el.get("joinphrase")): + artist_parts.append(joinphrase) + artist_sort_parts.append(joinphrase) + artist_credit_parts.append(joinphrase) + + return ( + artist_parts, + artist_sort_parts, + artist_credit_parts, + ) + + +def track_url(trackid: str) -> str: + return urljoin(BASE_URL, f"recording/{trackid}") + + +def _flatten_artist_credit(credit: list[JSONDict]) -> tuple[str, str, str]: + """Given a list representing an ``artist-credit`` block, flatten the + data into a triple of joined artist name strings: canonical, sort, and + credit. + """ + artist_parts, artist_sort_parts, artist_credit_parts = _multi_artist_credit( + credit, include_join_phrase=True + ) + return ( + "".join(artist_parts), + "".join(artist_sort_parts), + "".join(artist_credit_parts), + ) + + +def _artist_ids(credit: list[JSONDict]) -> list[str]: + """ + Given a list representing an ``artist-credit``, + return a list of artist IDs + """ + artist_ids: list[str] = [] + for el in credit: + if isinstance(el, dict): + artist_ids.append(el["artist"]["id"]) + + return artist_ids + + +def _get_related_artist_names(relations, relation_type): + """Given a list representing the artist relationships extract the names of + the remixers and concatenate them. + """ + related_artists = [] + + for relation in relations: + if relation["type"] == relation_type: + related_artists.append(relation["artist"]["name"]) + + return ", ".join(related_artists) + + +def album_url(albumid: str) -> str: + return urljoin(BASE_URL, f"release/{albumid}") + + +def _preferred_release_event( + release: dict[str, Any], +) -> tuple[str | None, str | None]: + """Given a release, select and return the user's preferred release + event as a tuple of (country, release_date). Fall back to the + default release event if a preferred event is not found. + """ + preferred_countries: Sequence[str] = config["match"]["preferred"][ + "countries" + ].as_str_seq() + + for country in preferred_countries: + for event in release.get("release-events", {}): + try: + if area := event.get("area"): + if country in area["iso-3166-1-codes"]: + return country, event["date"] + except KeyError: + pass + + return release.get("country"), release.get("date") + + +def _set_date_str( + info: beets.autotag.hooks.AlbumInfo, + date_str: str, + original: bool = False, +): + """Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo + object, set the object's release date fields appropriately. If + `original`, then set the original_year, etc., fields. + """ + if date_str: + date_parts = date_str.split("-") + for key in ("year", "month", "day"): + if date_parts: + date_part = date_parts.pop(0) + try: + date_num = int(date_part) + except ValueError: + continue + + if original: + key = f"original_{key}" + setattr(info, key, date_num) + + +def _merge_pseudo_and_actual_album( + pseudo: beets.autotag.hooks.AlbumInfo, actual: beets.autotag.hooks.AlbumInfo +) -> beets.autotag.hooks.AlbumInfo: + """ + Merges a pseudo release with its actual release. + + This implementation is naive, it doesn't overwrite fields, + like status or ids. + + According to the ticket PICARD-145, the main release id should be used. + But the ticket has been in limbo since over a decade now. + It also suggests the introduction of the tag `musicbrainz_pseudoreleaseid`, + but as of this field can't be found in any official Picard docs, + hence why we did not implement that for now. + """ + merged = pseudo.copy() + from_actual = { + k: actual[k] + for k in [ + "media", + "mediums", + "country", + "catalognum", + "year", + "month", + "day", + "original_year", + "original_month", + "original_day", + "label", + "barcode", + "asin", + "style", + "genre", + ] + } + merged.update(from_actual) + return merged + + +class MusicBrainzPlugin(MusicBrainzAPIMixin, MetadataSourcePlugin): + @cached_property + def genres_field(self) -> str: + return f"{self.config['genres_tag'].as_choice(['genre', 'tag'])}s" + + def __init__(self): + """Set up the python-musicbrainz-ngs module according to settings + from the beets configuration. This should be called at startup. + """ + super().__init__() + self.config.add( + { + "genres": False, + "genres_tag": "genre", + "external_ids": { + "discogs": False, + "bandcamp": False, + "spotify": False, + "deezer": False, + "tidal": False, + }, + "extra_tags": [], + }, + ) + # TODO: Remove in 3.0.0 + with suppress(NotFoundError): + self.config["search_limit"] = self.config["match"][ + "searchlimit" + ].get() + deprecate_for_user( + self._log, + "'musicbrainz.searchlimit' configuration option", + "'musicbrainz.search_limit'", + ) + + def track_info( + self, + recording: JSONDict, + index: int | None = None, + medium: int | None = None, + medium_index: int | None = None, + medium_total: int | None = None, + ) -> beets.autotag.hooks.TrackInfo: + """Translates a MusicBrainz recording result dictionary into a beets + ``TrackInfo`` object. Three parameters are optional and are used + only for tracks that appear on releases (non-singletons): ``index``, + the overall track number; ``medium``, the disc number; + ``medium_index``, the track's index on its medium; ``medium_total``, + the number of tracks on the medium. Each number is a 1-based index. + """ + info = beets.autotag.hooks.TrackInfo( + title=recording["title"], + track_id=recording["id"], + index=index, + medium=medium, + medium_index=medium_index, + medium_total=medium_total, + data_source=self.data_source, + data_url=track_url(recording["id"]), + ) + + if recording.get("artist-credit"): + # Get the artist names. + ( + info.artist, + info.artist_sort, + info.artist_credit, + ) = _flatten_artist_credit(recording["artist-credit"]) + + ( + info.artists, + info.artists_sort, + info.artists_credit, + ) = _multi_artist_credit( + recording["artist-credit"], include_join_phrase=False + ) + + info.artists_ids = _artist_ids(recording["artist-credit"]) + info.artist_id = info.artists_ids[0] + + if recording.get("artist-relations"): + info.remixer = _get_related_artist_names( + recording["artist-relations"], relation_type="remixer" + ) + + if recording.get("length"): + info.length = int(recording["length"]) / 1000.0 + + info.trackdisambig = recording.get("disambiguation") + + if recording.get("isrcs"): + info.isrc = ";".join(recording["isrcs"]) + + lyricist = [] + composer = [] + composer_sort = [] + for work_relation in recording.get("work-relations", ()): + if work_relation["type"] != "performance": + continue + info.work = work_relation["work"]["title"] + info.mb_workid = work_relation["work"]["id"] + if "disambiguation" in work_relation["work"]: + info.work_disambig = work_relation["work"]["disambiguation"] + + for artist_relation in work_relation["work"].get( + "artist-relations", () + ): + if "type" in artist_relation: + type = artist_relation["type"] + if type == "lyricist": + lyricist.append(artist_relation["artist"]["name"]) + elif type == "composer": + composer.append(artist_relation["artist"]["name"]) + composer_sort.append( + artist_relation["artist"]["sort-name"] + ) + if lyricist: + info.lyricist = ", ".join(lyricist) + if composer: + info.composer = ", ".join(composer) + info.composer_sort = ", ".join(composer_sort) + + arranger = [] + for artist_relation in recording.get("artist-relations", ()): + if "type" in artist_relation: + type = artist_relation["type"] + if type == "arranger": + arranger.append(artist_relation["artist"]["name"]) + if arranger: + info.arranger = ", ".join(arranger) + + # Supplementary fields provided by plugins + extra_trackdatas = plugins.send("mb_track_extract", data=recording) + for extra_trackdata in extra_trackdatas: + info.update(extra_trackdata) + + return info + + def album_info(self, release: JSONDict) -> beets.autotag.hooks.AlbumInfo: + """Takes a MusicBrainz release result dictionary and returns a beets + AlbumInfo object containing the interesting data about that release. + """ + # Get artist name using join phrases. + artist_name, artist_sort_name, artist_credit_name = ( + _flatten_artist_credit(release["artist-credit"]) + ) + + ( + artists_names, + artists_sort_names, + artists_credit_names, + ) = _multi_artist_credit( + release["artist-credit"], include_join_phrase=False + ) + + ntracks = sum(len(m["tracks"]) for m in release["media"]) + + # The MusicBrainz API omits 'relations' + # when the release has more than 500 tracks. So we use browse_recordings + # on chunks of tracks to recover the same information in this case. + if ntracks > BROWSE_MAXTRACKS: + self._log.debug("Album {} has too many tracks", release["id"]) + recording_list = [] + for i in range(0, ntracks, BROWSE_CHUNKSIZE): + self._log.debug("Retrieving tracks starting at {}", i) + recording_list.extend( + self.mb_api.browse_recordings( + release=release["id"], offset=i + ) + ) + track_map = {r["id"]: r for r in recording_list} + for medium in release["media"]: + for recording in medium["tracks"]: + recording_info = track_map[recording["recording"]["id"]] + recording["recording"] = recording_info + + # Basic info. + track_infos = [] + index = 0 + for medium in release["media"]: + disctitle = medium.get("title") + format = medium.get("format") + + if format in config["match"]["ignored_media"].as_str_seq(): + continue + + all_tracks = medium["tracks"] + if ( + "data-tracks" in medium + and not config["match"]["ignore_data_tracks"] + ): + all_tracks += medium["data-tracks"] + track_count = len(all_tracks) + + if "pregap" in medium: + all_tracks.insert(0, medium["pregap"]) + + for track in all_tracks: + if ( + "title" in track["recording"] + and track["recording"]["title"] in SKIPPED_TRACKS + ): + continue + + if ( + "video" in track["recording"] + and track["recording"]["video"] + and config["match"]["ignore_video_tracks"] + ): + continue + + # Basic information from the recording. + index += 1 + ti = self.track_info( + track["recording"], + index, + int(medium["position"]), + int(track["position"]), + track_count, + ) + ti.release_track_id = track["id"] + ti.disctitle = disctitle + ti.media = format + ti.track_alt = track["number"] + + # Prefer track data, where present, over recording data. + if track.get("title"): + ti.title = track["title"] + if track.get("artist-credit"): + # Get the artist names. + ( + ti.artist, + ti.artist_sort, + ti.artist_credit, + ) = _flatten_artist_credit(track["artist-credit"]) + + ( + ti.artists, + ti.artists_sort, + ti.artists_credit, + ) = _multi_artist_credit( + track["artist-credit"], include_join_phrase=False + ) + + ti.artists_ids = _artist_ids(track["artist-credit"]) + ti.artist_id = ti.artists_ids[0] + if track.get("length"): + ti.length = int(track["length"]) / (1000.0) + + track_infos.append(ti) + + album_artist_ids = _artist_ids(release["artist-credit"]) + info = beets.autotag.hooks.AlbumInfo( + album=release["title"], + album_id=release["id"], + artist=artist_name, + artist_id=album_artist_ids[0], + artists=artists_names, + artists_ids=album_artist_ids, + tracks=track_infos, + mediums=len(release["media"]), + artist_sort=artist_sort_name, + artists_sort=artists_sort_names, + artist_credit=artist_credit_name, + artists_credit=artists_credit_names, + data_source=self.data_source, + data_url=album_url(release["id"]), + barcode=release.get("barcode"), + ) + info.va = info.artist_id == VARIOUS_ARTISTS_ID + if info.va: + info.artist = config["va_name"].as_str() + info.asin = release.get("asin") + info.releasegroup_id = release["release-group"]["id"] + info.albumstatus = release.get("status") + + if release["release-group"].get("title"): + info.release_group_title = release["release-group"].get("title") + + # Get the disambiguation strings at the release and release group level. + if release["release-group"].get("disambiguation"): + info.releasegroupdisambig = release["release-group"].get( + "disambiguation" + ) + if release.get("disambiguation"): + info.albumdisambig = release.get("disambiguation") + + # Get the "classic" Release type. This data comes from a legacy API + # feature before MusicBrainz supported multiple release types. + if "type" in release["release-group"]: + reltype = release["release-group"]["type"] + if reltype: + info.albumtype = reltype.lower() + + # Set the new-style "primary" and "secondary" release types. + albumtypes = [] + if "primary-type" in release["release-group"]: + rel_primarytype = release["release-group"]["primary-type"] + if rel_primarytype: + albumtypes.append(rel_primarytype.lower()) + if "secondary-types" in release["release-group"]: + if release["release-group"]["secondary-types"]: + for sec_type in release["release-group"]["secondary-types"]: + albumtypes.append(sec_type.lower()) + info.albumtypes = albumtypes + + # Release events. + info.country, release_date = _preferred_release_event(release) + release_group_date = release["release-group"].get("first-release-date") + if not release_date: + # Fall back if release-specific date is not available. + release_date = release_group_date + + if release_date: + _set_date_str(info, release_date, False) + _set_date_str(info, release_group_date, True) + + # Label name. + if release.get("label-info"): + label_info = release["label-info"][0] + if label_info.get("label"): + label = label_info["label"]["name"] + if label != "[no label]": + info.label = label + info.catalognum = label_info.get("catalog-number") + + # Text representation data. + if release.get("text-representation"): + rep = release["text-representation"] + info.script = rep.get("script") + info.language = rep.get("language") + + # Media (format). + if release["media"]: + # If all media are the same, use that medium name + if len({m.get("format") for m in release["media"]}) == 1: + info.media = release["media"][0].get("format") + # Otherwise, let's just call it "Media" + else: + info.media = "Media" + + if self.config["genres"]: + sources = [ + release["release-group"].get(self.genres_field, []), + release.get(self.genres_field, []), + ] + genres: Counter[str] = Counter() + for source in sources: + for genreitem in source: + genres[genreitem["name"]] += int(genreitem["count"]) + info.genre = "; ".join( + genre + for genre, _count in sorted(genres.items(), key=lambda g: -g[1]) + ) + + # We might find links to external sources (Discogs, Bandcamp, ...) + external_ids = self.config["external_ids"].get() + wanted_sources = { + site for site, wanted in external_ids.items() if wanted + } + if wanted_sources and (url_rels := release.get("url-relations")): + urls = {} + + for source, url in product(wanted_sources, url_rels): + if f"{source}.com" in (target := url["url"]["resource"]): + urls[source] = target + self._log.debug( + "Found link to {} release via MusicBrainz", + source.capitalize(), + ) + + for source, url in urls.items(): + setattr( + info, f"{source}_album_id", extract_release_id(source, url) + ) + + extra_albumdatas = plugins.send("mb_album_extract", data=release) + for extra_albumdata in extra_albumdatas: + info.update(extra_albumdata) + + return info + + @cached_property + def extra_mb_field_by_tag(self) -> dict[str, str]: + """Map configured extra tags to their MusicBrainz API field names. + + Process user configuration to determine which additional MusicBrainz + fields should be included in search queries. + """ + mb_field_by_tag = { + t: FIELDS_TO_MB_KEYS[t] + for t in self.config["extra_tags"].as_str_seq() + if t in FIELDS_TO_MB_KEYS + } + if mb_field_by_tag: + self._log.debug("Additional search terms: {}", mb_field_by_tag) + + return mb_field_by_tag + + def get_album_criteria( + self, items: Sequence[Item], artist: str, album: str, va_likely: bool + ) -> dict[str, str]: + criteria = {"release": album} | ( + {"arid": VARIOUS_ARTISTS_ID} if va_likely else {"artist": artist} + ) + + for tag, mb_field in self.extra_mb_field_by_tag.items(): + if tag == "tracks": + value = str(len(items)) + elif tag == "alias": + value = album + else: + most_common, _ = util.plurality(i.get(tag) for i in items) + value = str(most_common) + if tag == "catalognum": + value = value.replace(" ", "") + + criteria[mb_field] = value + + return criteria + + def _search_api( + self, + query_type: Literal["recording", "release"], + filters: dict[str, str], + ) -> list[JSONDict]: + """Perform MusicBrainz API search and return results. + + Execute a search against the MusicBrainz API for recordings or releases + using the provided criteria. Handles API errors by converting them into + MusicBrainzAPIError exceptions with contextual information. + """ + return self.mb_api.search( + query_type, filters, limit=self.config["search_limit"].get() + ) + + def candidates( + self, + items: Sequence[Item], + artist: str, + album: str, + va_likely: bool, + ) -> Iterable[beets.autotag.hooks.AlbumInfo]: + criteria = self.get_album_criteria(items, artist, album, va_likely) + release_ids = (r["id"] for r in self._search_api("release", criteria)) + + for id_ in release_ids: + with suppress(HTTPNotFoundError): + if album_info := self.album_for_id(id_): + yield album_info + + def item_candidates( + self, item: Item, artist: str, title: str + ) -> Iterable[beets.autotag.hooks.TrackInfo]: + criteria = {"artist": artist, "recording": title, "alias": title} + + yield from filter( + None, map(self.track_info, self._search_api("recording", criteria)) + ) + + def album_for_id( + self, album_id: str + ) -> beets.autotag.hooks.AlbumInfo | None: + """Fetches an album by its MusicBrainz ID and returns an AlbumInfo + object or None if the album is not found. May raise a + MusicBrainzAPIError. + """ + self._log.debug("Requesting MusicBrainz release {}", album_id) + if not (albumid := self._extract_id(album_id)): + self._log.debug("Invalid MBID ({}).", album_id) + return None + + res = self.mb_api.get_release(albumid, includes=RELEASE_INCLUDES) + + # resolve linked release relations + actual_res = None + + if res.get("status") == "Pseudo-Release" and ( + relations := res.get("release-relations") + ): + for rel in relations: + if ( + rel["type"] == "transl-tracklisting" + and rel["direction"] == "backward" + ): + actual_res = self.mb_api.get_release( + rel["release"]["id"], includes=RELEASE_INCLUDES + ) + + # release is potentially a pseudo release + release = self.album_info(res) + + # should be None unless we're dealing with a pseudo release + if actual_res is not None: + actual_release = self.album_info(actual_res) + return _merge_pseudo_and_actual_album(release, actual_release) + else: + return release + + def track_for_id( + self, track_id: str + ) -> beets.autotag.hooks.TrackInfo | None: + """Fetches a track by its MusicBrainz ID. Returns a TrackInfo object + or None if no track is found. May raise a MusicBrainzAPIError. + """ + if not (trackid := self._extract_id(track_id)): + self._log.debug("Invalid MBID ({}).", track_id) + return None + + with suppress(HTTPNotFoundError): + return self.track_info( + self.mb_api.get_recording(trackid, includes=TRACK_INCLUDES) + ) + + return None diff --git a/beetsplug/parentwork.py b/beetsplug/parentwork.py index 26f8f224f..15fcdefa8 100644 --- a/beetsplug/parentwork.py +++ b/beetsplug/parentwork.py @@ -16,59 +16,19 @@ and work composition date """ -import musicbrainzngs +from __future__ import annotations + +from typing import Any + +import requests from beets import ui from beets.plugins import BeetsPlugin - -def direct_parent_id(mb_workid, work_date=None): - """Given a Musicbrainz work id, find the id one of the works the work is - part of and the first composition date it encounters. - """ - work_info = musicbrainzngs.get_work_by_id( - mb_workid, includes=["work-rels", "artist-rels"] - ) - if "artist-relation-list" in work_info["work"] and work_date is None: - for artist in work_info["work"]["artist-relation-list"]: - if artist["type"] == "composer": - if "end" in artist.keys(): - work_date = artist["end"] - - if "work-relation-list" in work_info["work"]: - for direct_parent in work_info["work"]["work-relation-list"]: - if ( - direct_parent["type"] == "parts" - and direct_parent.get("direction") == "backward" - ): - direct_id = direct_parent["work"]["id"] - return direct_id, work_date - return None, work_date +from ._utils.musicbrainz import MusicBrainzAPIMixin -def work_parent_id(mb_workid): - """Find the parent work id and composition date of a work given its id.""" - work_date = None - while True: - new_mb_workid, work_date = direct_parent_id(mb_workid, work_date) - if not new_mb_workid: - return mb_workid, work_date - mb_workid = new_mb_workid - return mb_workid, work_date - - -def find_parentwork_info(mb_workid): - """Get the MusicBrainz information dict about a parent work, including - the artist relations, and the composition date for a work's parent work. - """ - parent_id, work_date = work_parent_id(mb_workid) - work_info = musicbrainzngs.get_work_by_id( - parent_id, includes=["artist-rels"] - ) - return work_info, work_date - - -class ParentWorkPlugin(BeetsPlugin): +class ParentWorkPlugin(MusicBrainzAPIMixin, BeetsPlugin): def __init__(self): super().__init__() @@ -88,8 +48,8 @@ class ParentWorkPlugin(BeetsPlugin): force_parent = self.config["force"].get(bool) write = ui.should_write() - for item in lib.items(ui.decargs(args)): - changed = self.find_work(item, force_parent) + for item in lib.items(args): + changed = self.find_work(item, force_parent, verbose=True) if changed: item.store() if write: @@ -116,7 +76,7 @@ class ParentWorkPlugin(BeetsPlugin): force_parent = self.config["force"].get(bool) for item in task.imported_items(): - self.find_work(item, force_parent) + self.find_work(item, force_parent, verbose=False) item.store() def get_info(self, item, work_info): @@ -130,14 +90,13 @@ class ParentWorkPlugin(BeetsPlugin): parentwork_info = {} composer_exists = False - if "artist-relation-list" in work_info["work"]: - for artist in work_info["work"]["artist-relation-list"]: - if artist["type"] == "composer": - composer_exists = True - parent_composer.append(artist["artist"]["name"]) - parent_composer_sort.append(artist["artist"]["sort-name"]) - if "end" in artist.keys(): - parentwork_info["parentwork_date"] = artist["end"] + for artist in work_info.get("artist-relations", []): + if artist["type"] == "composer": + composer_exists = True + parent_composer.append(artist["artist"]["name"]) + parent_composer_sort.append(artist["artist"]["sort-name"]) + if "end" in artist.keys(): + parentwork_info["parentwork_date"] = artist["end"] parentwork_info["parent_composer"] = ", ".join(parent_composer) parentwork_info["parent_composer_sort"] = ", ".join( @@ -149,23 +108,21 @@ class ParentWorkPlugin(BeetsPlugin): "no composer for {}; add one at " "https://musicbrainz.org/work/{}", item, - work_info["work"]["id"], + work_info["id"], ) - parentwork_info["parentwork"] = work_info["work"]["title"] - parentwork_info["mb_parentworkid"] = work_info["work"]["id"] + parentwork_info["parentwork"] = work_info["title"] + parentwork_info["mb_parentworkid"] = work_info["id"] - if "disambiguation" in work_info["work"]: - parentwork_info["parentwork_disambig"] = work_info["work"][ - "disambiguation" - ] + if "disambiguation" in work_info: + parentwork_info["parentwork_disambig"] = work_info["disambiguation"] else: parentwork_info["parentwork_disambig"] = None return parentwork_info - def find_work(self, item, force): + def find_work(self, item, force, verbose): """Finds the parent work of a recording and populates the tags accordingly. @@ -179,10 +136,8 @@ class ParentWorkPlugin(BeetsPlugin): if not item.mb_workid: self._log.info( - "No work for {}, \ -add one at https://musicbrainz.org/recording/{}", + "No work for {0}, add one at https://musicbrainz.org/recording/{0.mb_trackid}", item, - item.mb_trackid, ) return @@ -192,9 +147,9 @@ add one at https://musicbrainz.org/recording/{}", work_changed = item.parentwork_workid_current != item.mb_workid if force or not hasparent or work_changed: try: - work_info, work_date = find_parentwork_info(item.mb_workid) - except musicbrainzngs.musicbrainz.WebServiceError as e: - self._log.debug("error fetching work: {}", e) + work_info, work_date = self.find_parentwork_info(item.mb_workid) + except requests.exceptions.RequestException: + self._log.debug("error fetching work", item, exc_info=True) return parent_info = self.get_info(item, work_info) parent_info["parentwork_workid_current"] = item.mb_workid @@ -221,16 +176,51 @@ add one at https://musicbrainz.org/recording/{}", if work_date: item["work_date"] = work_date - return ui.show_model_changes( - item, - fields=[ - "parentwork", - "parentwork_disambig", - "mb_parentworkid", - "parent_composer", - "parent_composer_sort", - "work_date", - "parentwork_workid_current", - "parentwork_date", - ], - ) + if verbose: + return ui.show_model_changes( + item, + fields=[ + "parentwork", + "parentwork_disambig", + "mb_parentworkid", + "parent_composer", + "parent_composer_sort", + "work_date", + "parentwork_workid_current", + "parentwork_date", + ], + ) + + def find_parentwork_info( + self, mb_workid: str + ) -> tuple[dict[str, Any], str | None]: + """Get the MusicBrainz information dict about a parent work, including + the artist relations, and the composition date for a work's parent work. + """ + work_date = None + + parent_id: str | None = mb_workid + + while parent_id: + current_id = parent_id + work_info = self.mb_api.get_work( + current_id, includes=["work-rels", "artist-rels"] + ) + work_date = work_date or next( + ( + end + for a in work_info.get("artist-relations", []) + if a["type"] == "composer" and (end := a.get("end")) + ), + None, + ) + parent_id = next( + ( + w["work"]["id"] + for w in work_info.get("work-relations", []) + if w["type"] == "parts" and w["direction"] == "backward" + ), + None, + ) + + return work_info, work_date diff --git a/beetsplug/play.py b/beetsplug/play.py index ddebd7d41..0d96ee97f 100644 --- a/beetsplug/play.py +++ b/beetsplug/play.py @@ -21,13 +21,17 @@ from os.path import relpath from beets import config, ui, util from beets.plugins import BeetsPlugin from beets.ui import Subcommand -from beets.ui.commands import PromptChoice -from beets.util import get_temp_filename +from beets.util import PromptChoice, get_temp_filename # Indicate where arguments should be inserted into the command string. # If this is missing, they're placed at the end. ARGS_MARKER = "$args" +# Indicate where the playlist file (with absolute path) should be inserted into +# the command string. If this is missing, its placed at the end, but before +# arguments. +PLS_MARKER = "$playlist" + def play( command_str, @@ -43,7 +47,7 @@ def play( """ # Print number of tracks or albums to be played, log command to be run. item_type += "s" if len(selection) > 1 else "" - ui.print_("Playing {} {}.".format(len(selection), item_type)) + ui.print_(f"Playing {len(selection)} {item_type}.") log.debug("executing command: {} {!r}", command_str, open_args) try: @@ -107,7 +111,7 @@ class PlayPlugin(BeetsPlugin): # Perform search by album and add folders rather than tracks to # playlist. if opts.album: - selection = lib.albums(ui.decargs(args)) + selection = lib.albums(args) paths = [] sort = lib.get_default_album_sort() @@ -120,7 +124,7 @@ class PlayPlugin(BeetsPlugin): # Perform item query and add tracks to playlist. else: - selection = lib.items(ui.decargs(args)) + selection = lib.items(args) paths = [item.path for item in selection] item_type = "track" @@ -132,8 +136,23 @@ class PlayPlugin(BeetsPlugin): return open_args = self._playlist_or_paths(paths) + open_args_str = [ + p.decode("utf-8") for p in self._playlist_or_paths(paths) + ] command_str = self._command_str(opts.args) + if PLS_MARKER in command_str: + if not config["play"]["raw"]: + command_str = command_str.replace( + PLS_MARKER, "".join(open_args_str) + ) + self._log.debug( + "command altered by PLS_MARKER to: {}", command_str + ) + open_args = [] + else: + command_str = command_str.replace(PLS_MARKER, " ") + # Check if the selection exceeds configured threshold. If True, # cancel, otherwise proceed with play command. if opts.yes or not self._exceeds_threshold( @@ -154,7 +173,7 @@ class PlayPlugin(BeetsPlugin): return f"{command_str} {args}" else: # Don't include the marker in the command. - return command_str.replace(" " + ARGS_MARKER, "") + return command_str.replace(f" {ARGS_MARKER}", "") def _playlist_or_paths(self, paths): """Return either the raw paths of items or a playlist of the items.""" @@ -162,6 +181,7 @@ class PlayPlugin(BeetsPlugin): return paths else: return [self._create_tmp_playlist(paths)] + return [shlex.quote(self._create_tmp_playlist(paths))] def _exceeds_threshold( self, selection, command_str, open_args, item_type="track" @@ -179,9 +199,7 @@ class PlayPlugin(BeetsPlugin): ui.print_( ui.colorize( "text_warning", - "You are about to queue {} {}.".format( - len(selection), item_type - ), + f"You are about to queue {len(selection)} {item_type}.", ) ) diff --git a/beetsplug/playlist.py b/beetsplug/playlist.py index cf1d500e8..07c12e0e0 100644 --- a/beetsplug/playlist.py +++ b/beetsplug/playlist.py @@ -12,17 +12,20 @@ # included in all copies or substantial portions of the Software. -import fnmatch import os import tempfile from collections.abc import Sequence +from pathlib import Path import beets -from beets.dbcore.query import InQuery -from beets.library import BLOB_TYPE +from beets.dbcore.query import BLOB_TYPE, InQuery from beets.util import path_as_posix +def is_m3u_file(path: str) -> bool: + return Path(path).suffix.lower() in {".m3u", ".m3u8"} + + class PlaylistQuery(InQuery[bytes]): """Matches files listed by a playlist file.""" @@ -46,7 +49,7 @@ class PlaylistQuery(InQuery[bytes]): paths = [] for playlist_path in playlist_paths: - if not fnmatch.fnmatch(playlist_path, "*.[mM]3[uU]"): + if not is_m3u_file(playlist_path): # This is not am M3U playlist, skip this candidate continue @@ -120,7 +123,7 @@ class PlaylistPlugin(beets.plugins.BeetsPlugin): def cli_exit(self, lib): for playlist in self.find_playlists(): - self._log.info(f"Updating playlist: {playlist}") + self._log.info("Updating playlist: {}", playlist) base_dir = beets.util.bytestring_path( self.relative_to if self.relative_to @@ -130,26 +133,21 @@ class PlaylistPlugin(beets.plugins.BeetsPlugin): try: self.update_playlist(playlist, base_dir) except beets.util.FilesystemError: - self._log.error( - "Failed to update playlist: {}".format( - beets.util.displayable_path(playlist) - ) - ) + self._log.error("Failed to update playlist: {}", playlist) def find_playlists(self): """Find M3U playlists in the playlist directory.""" + playlist_dir = beets.util.syspath(self.playlist_dir) try: - dir_contents = os.listdir(beets.util.syspath(self.playlist_dir)) + dir_contents = os.listdir(playlist_dir) except OSError: self._log.warning( - "Unable to open playlist directory {}".format( - beets.util.displayable_path(self.playlist_dir) - ) + "Unable to open playlist directory {.playlist_dir}", self ) return for filename in dir_contents: - if fnmatch.fnmatch(filename, "*.[mM]3[uU]"): + if is_m3u_file(filename): yield os.path.join(self.playlist_dir, filename) def update_playlist(self, filename, base_dir): @@ -192,9 +190,10 @@ class PlaylistPlugin(beets.plugins.BeetsPlugin): if changes or deletions: self._log.info( - "Updated playlist {} ({} changes, {} deletions)".format( - filename, changes, deletions - ) + "Updated playlist {} ({} changes, {} deletions)", + filename, + changes, + deletions, ) beets.util.copy(new_playlist, filename, replace=True) beets.util.remove(new_playlist) diff --git a/beetsplug/plexupdate.py b/beetsplug/plexupdate.py index 9b4419c71..5e255d45b 100644 --- a/beetsplug/plexupdate.py +++ b/beetsplug/plexupdate.py @@ -22,9 +22,7 @@ def get_music_section( ): """Getting the section key for the music library in Plex.""" api_endpoint = append_token("library/sections", token) - url = urljoin( - "{}://{}:{}".format(get_protocol(secure), host, port), api_endpoint - ) + url = urljoin(f"{get_protocol(secure)}://{host}:{port}", api_endpoint) # Sends request. r = requests.get( @@ -54,9 +52,7 @@ def update_plex(host, port, token, library_name, secure, ignore_cert_errors): ) api_endpoint = f"library/sections/{section_key}/refresh" api_endpoint = append_token(api_endpoint, token) - url = urljoin( - "{}://{}:{}".format(get_protocol(secure), host, port), api_endpoint - ) + url = urljoin(f"{get_protocol(secure)}://{host}:{port}", api_endpoint) # Sends request and returns requests object. r = requests.get( @@ -70,7 +66,7 @@ def update_plex(host, port, token, library_name, secure, ignore_cert_errors): def append_token(url, token): """Appends the Plex Home token to the api call if required.""" if token: - url += "?" + urlencode({"X-Plex-Token": token}) + url += f"?{urlencode({'X-Plex-Token': token})}" return url diff --git a/beetsplug/random.py b/beetsplug/random.py index 05f2cdf77..c791af414 100644 --- a/beetsplug/random.py +++ b/beetsplug/random.py @@ -16,17 +16,16 @@ from beets.plugins import BeetsPlugin from beets.random import random_objs -from beets.ui import Subcommand, decargs, print_ +from beets.ui import Subcommand, print_ def random_func(lib, opts, args): """Select some random items or albums and print the results.""" # Fetch all the objects matching the query into a list. - query = decargs(args) if opts.album: - objs = list(lib.albums(query)) + objs = list(lib.albums(args)) else: - objs = list(lib.items(query)) + objs = list(lib.items(args)) # Print a random subset. objs = random_objs( diff --git a/beetsplug/replace.py b/beetsplug/replace.py new file mode 100644 index 000000000..0c570877b --- /dev/null +++ b/beetsplug/replace.py @@ -0,0 +1,122 @@ +import shutil +from pathlib import Path + +import mediafile + +from beets import ui, util +from beets.library import Item, Library +from beets.plugins import BeetsPlugin + + +class ReplacePlugin(BeetsPlugin): + def commands(self): + cmd = ui.Subcommand( + "replace", help="replace audio file while keeping tags" + ) + cmd.func = self.run + return [cmd] + + def run(self, lib: Library, args: list[str]) -> None: + if len(args) < 2: + raise ui.UserError("Usage: beet replace <query> <new_file_path>") + + new_file_path: Path = Path(args[-1]) + item_query: list[str] = args[:-1] + + self.file_check(new_file_path) + + item_list = list(lib.items(item_query)) + + if not item_list: + raise ui.UserError("No matching songs found.") + + song = self.select_song(item_list) + + if not song: + ui.print_("Operation cancelled.") + return + + if not self.confirm_replacement(new_file_path, song): + ui.print_("Aborting replacement.") + return + + self.replace_file(new_file_path, song) + + def file_check(self, filepath: Path) -> None: + """Check if the file exists and is supported""" + if not filepath.is_file(): + raise ui.UserError( + f"'{util.displayable_path(filepath)}' is not a valid file." + ) + + try: + mediafile.MediaFile(util.syspath(filepath)) + except mediafile.FileTypeError as fte: + raise ui.UserError(fte) + + def select_song(self, items: list[Item]): + """Present a menu of matching songs and get user selection.""" + ui.print_("\nMatching songs:") + for i, item in enumerate(items, 1): + ui.print_(f"{i}. {util.displayable_path(item)}") + + while True: + try: + index = int( + input( + f"Which song would you like to replace? " + f"[1-{len(items)}] (0 to cancel): " + ) + ) + if index == 0: + return None + if 1 <= index <= len(items): + return items[index - 1] + ui.print_( + f"Invalid choice. Please enter a number " + f"between 1 and {len(items)}." + ) + except ValueError: + ui.print_("Invalid input. Please type in a number.") + + def confirm_replacement(self, new_file_path: Path, song: Item): + """Get user confirmation for the replacement.""" + original_file_path: Path = Path(song.path.decode()) + + if not original_file_path.exists(): + raise ui.UserError("The original song file was not found.") + + ui.print_( + f"\nReplacing: {util.displayable_path(new_file_path)} " + f"-> {util.displayable_path(original_file_path)}" + ) + decision: str = ( + input("Are you sure you want to replace this track? (y/N): ") + .strip() + .casefold() + ) + return decision in {"yes", "y"} + + def replace_file(self, new_file_path: Path, song: Item) -> None: + """Replace the existing file with the new one.""" + original_file_path = Path(song.path.decode()) + dest = original_file_path.with_suffix(new_file_path.suffix) + + try: + shutil.move(util.syspath(new_file_path), util.syspath(dest)) + except Exception as e: + raise ui.UserError(f"Error replacing file: {e}") + + if ( + new_file_path.suffix != original_file_path.suffix + and original_file_path.exists() + ): + try: + original_file_path.unlink() + except Exception as e: + raise ui.UserError(f"Could not delete original file: {e}") + + song.path = str(dest).encode() + song.store() + + ui.print_("Replacement successful.") diff --git a/beetsplug/replaygain.py b/beetsplug/replaygain.py index 5ee9aa486..a8c887caa 100644 --- a/beetsplug/replaygain.py +++ b/beetsplug/replaygain.py @@ -28,7 +28,7 @@ from abc import ABC, abstractmethod from dataclasses import dataclass from multiprocessing.pool import ThreadPool from threading import Event, Thread -from typing import TYPE_CHECKING, Any, Callable, TypeVar, cast +from typing import TYPE_CHECKING, Any, TypeVar from beets import ui from beets.plugins import BeetsPlugin @@ -36,7 +36,7 @@ from beets.util import command_output, displayable_path, syspath if TYPE_CHECKING: import optparse - from collections.abc import Sequence + from collections.abc import Callable, Sequence from logging import Logger from confuse import ConfigView @@ -62,7 +62,7 @@ class FatalGstreamerPluginReplayGainError(FatalReplayGainError): loading the required plugins.""" -def call(args: list[Any], log: Logger, **kwargs: Any): +def call(args: list[str], log: Logger, **kwargs: Any): """Execute the command and return its output or raise a ReplayGainError on failure. """ @@ -70,14 +70,7 @@ def call(args: list[Any], log: Logger, **kwargs: Any): return command_output(args, **kwargs) except subprocess.CalledProcessError as e: log.debug(e.output.decode("utf8", "ignore")) - raise ReplayGainError( - "{} exited with status {}".format(args[0], e.returncode) - ) - except UnicodeEncodeError: - # Due to a bug in Python 2's subprocess on Windows, Unicode - # filenames can fail to encode on that platform. See: - # https://github.com/google-code-export/beets/issues/499 - raise ReplayGainError("argument encoding failed") + raise ReplayGainError(f"{args[0]} exited with status {e.returncode}") def db_to_lufs(db: float) -> float: @@ -148,9 +141,8 @@ class RgTask: item.rg_track_peak = track_gain.peak item.store() self._log.debug( - "applied track gain {0} LU, peak {1} of FS", - item.rg_track_gain, - item.rg_track_peak, + "applied track gain {0.rg_track_gain} LU, peak {0.rg_track_peak} of FS", + item, ) def _store_album_gain(self, item: Item, album_gain: Gain): @@ -162,9 +154,8 @@ class RgTask: item.rg_album_peak = album_gain.peak item.store() self._log.debug( - "applied album gain {0} LU, peak {1} of FS", - item.rg_album_gain, - item.rg_album_peak, + "applied album gain {0.rg_album_gain} LU, peak {0.rg_album_peak} of FS", + item, ) def _store_track(self, write: bool): @@ -175,15 +166,14 @@ class RgTask: # `track_gains` without throwing FatalReplayGainError # => raise non-fatal exception & continue raise ReplayGainError( - "ReplayGain backend `{}` failed for track {}".format( - self.backend_name, item - ) + f"ReplayGain backend `{self.backend_name}` failed for track" + f" {item}" ) self._store_track_gain(item, self.track_gains[0]) if write: item.try_write() - self._log.debug("done analyzing {0}", item) + self._log.debug("done analyzing {}", item) def _store_album(self, write: bool): """Store track/album gains for all tracks of the task in the database.""" @@ -196,17 +186,15 @@ class RgTask: # `album_gain` without throwing FatalReplayGainError # => raise non-fatal exception & continue raise ReplayGainError( - "ReplayGain backend `{}` failed " - "for some tracks in album {}".format( - self.backend_name, self.album - ) + f"ReplayGain backend `{self.backend_name}` failed " + f"for some tracks in album {self.album}" ) for item, track_gain in zip(self.items, self.track_gains): self._store_track_gain(item, track_gain) self._store_album_gain(item, self.album_gain) if write: item.try_write() - self._log.debug("done analyzing {0}", item) + self._log.debug("done analyzing {}", item) def store(self, write: bool): """Store computed gains for the items of this task in the database.""" @@ -240,7 +228,7 @@ class R128Task(RgTask): def _store_track_gain(self, item: Item, track_gain: Gain): item.r128_track_gain = track_gain.gain item.store() - self._log.debug("applied r128 track gain {0} LU", item.r128_track_gain) + self._log.debug("applied r128 track gain {.r128_track_gain} LU", item) def _store_album_gain(self, item: Item, album_gain: Gain): """ @@ -249,7 +237,7 @@ class R128Task(RgTask): """ item.r128_album_gain = album_gain.gain item.store() - self._log.debug("applied r128 album gain {0} LU", item.r128_album_gain) + self._log.debug("applied r128 album gain {.r128_album_gain} LU", item) AnyRgTask = TypeVar("AnyRgTask", bound=RgTask) @@ -390,10 +378,7 @@ class FfmpegBackend(Backend): album_gain = target_level_lufs - album_gain self._log.debug( - "{}: gain {} LU, peak {}", - task.album, - album_gain, - album_peak, + "{.album}: gain {} LU, peak {}", task, album_gain, album_peak ) task.album_gain = Gain(album_gain, album_peak) @@ -403,20 +388,18 @@ class FfmpegBackend(Backend): def _construct_cmd( self, item: Item, peak_method: PeakMethod | None - ) -> list[str | bytes]: + ) -> list[str]: """Construct the shell command to analyse items.""" return [ self._ffmpeg_path, "-nostats", "-hide_banner", "-i", - item.path, + str(item.filepath), "-map", "a:0", "-filter", - "ebur128=peak={}".format( - "none" if peak_method is None else peak_method.name - ), + f"ebur128=peak={'none' if peak_method is None else peak_method.name}", "-f", "null", "-", @@ -438,9 +421,9 @@ class FfmpegBackend(Backend): target_level_lufs = db_to_lufs(target_level) # call ffmpeg - self._log.debug(f"analyzing {item}") + self._log.debug("analyzing {}", item) cmd = self._construct_cmd(item, peak_method) - self._log.debug("executing {0}", " ".join(map(displayable_path, cmd))) + self._log.debug("executing {}", " ".join(map(displayable_path, cmd))) output = call(cmd, self._log).stderr.splitlines() # parse output @@ -508,12 +491,10 @@ class FfmpegBackend(Backend): if self._parse_float(b"M: " + line[1]) >= gating_threshold: n_blocks += 1 self._log.debug( - "{}: {} blocks over {} LUFS".format( - item, n_blocks, gating_threshold - ) + "{}: {} blocks over {} LUFS", item, n_blocks, gating_threshold ) - self._log.debug("{}: gain {} LU, peak {}".format(item, gain, peak)) + self._log.debug("{}: gain {} LU, peak {}", item, gain, peak) return Gain(gain, peak), n_blocks @@ -533,9 +514,7 @@ class FfmpegBackend(Backend): if output[i].startswith(search): return i raise ReplayGainError( - "ffmpeg output: missing {} after line {}".format( - repr(search), start_line - ) + f"ffmpeg output: missing {search!r} after line {start_line}" ) def _parse_float(self, line: bytes) -> float: @@ -576,13 +555,13 @@ class CommandBackend(Backend): } ) - self.command = cast(str, config["command"].as_str()) + self.command: str = config["command"].as_str() if self.command: # Explicit executable path. if not os.path.isfile(self.command): raise FatalReplayGainError( - "replaygain command does not exist: {}".format(self.command) + f"replaygain command does not exist: {self.command}" ) else: # Check whether the program is in $PATH. @@ -660,7 +639,7 @@ class CommandBackend(Backend): # tag-writing; this turns the mp3gain/aacgain tool into a gain # calculator rather than a tag manipulator because we take care # of changing tags ourselves. - cmd: list[bytes | str] = [self.command, "-o", "-s", "s"] + cmd: list[str] = [self.command, "-o", "-s", "s"] if self.noclip: # Adjust to avoid clipping. cmd = cmd + ["-k"] @@ -670,8 +649,8 @@ class CommandBackend(Backend): cmd = cmd + ["-d", str(int(target_level - 89))] cmd = cmd + [syspath(i.path) for i in items] - self._log.debug("analyzing {0} files", len(items)) - self._log.debug("executing {0}", " ".join(map(displayable_path, cmd))) + self._log.debug("analyzing {} files", len(items)) + self._log.debug("executing {}", " ".join(map(displayable_path, cmd))) output = call(cmd, self._log).stdout self._log.debug("analysis finished") return self.parse_tool_output( @@ -687,7 +666,7 @@ class CommandBackend(Backend): for line in text.split(b"\n")[1 : num_lines + 1]: parts = line.split(b"\t") if len(parts) != 6 or parts[0] == b"File": - self._log.debug("bad tool output: {0}", text) + self._log.debug("bad tool output: {}", text) raise ReplayGainError("mp3gain failed") # _file = parts[0] @@ -1039,7 +1018,7 @@ class AudioToolsBackend(Backend): os.fsdecode(syspath(item.path)) ) except OSError: - raise ReplayGainError(f"File {item.path} was not found") + raise ReplayGainError(f"File {item.filepath} was not found") except self._mod_audiotools.UnsupportedFile: raise ReplayGainError(f"Unsupported file type {item.format}") @@ -1112,9 +1091,8 @@ class AudioToolsBackend(Backend): ) self._log.debug( - "ReplayGain for track {0} - {1}: {2:.2f}, {3:.2f}", - item.artist, - item.title, + "ReplayGain for track {0.artist} - {0.title}: {1:.2f}, {2:.2f}", + item, rg_track_gain, rg_track_peak, ) @@ -1139,7 +1117,7 @@ class AudioToolsBackend(Backend): ) track_gains.append(Gain(gain=rg_track_gain, peak=rg_track_peak)) self._log.debug( - "ReplayGain for track {0}: {1:.2f}, {2:.2f}", + "ReplayGain for track {}: {.2f}, {.2f}", item, rg_track_gain, rg_track_peak, @@ -1152,8 +1130,8 @@ class AudioToolsBackend(Backend): rg_album_gain, task.target_level ) self._log.debug( - "ReplayGain for album {0}: {1:.2f}, {2:.2f}", - task.items[0].album, + "ReplayGain for album {.items[0].album}: {.2f}, {.2f}", + task, rg_album_gain, rg_album_peak, ) @@ -1168,7 +1146,9 @@ class ExceptionWatcher(Thread): Once an exception occurs, raise it and execute a callback. """ - def __init__(self, queue: queue.Queue, callback: Callable[[], None]): + def __init__( + self, queue: queue.Queue[Exception], callback: Callable[[], None] + ): self._queue = queue self._callback = callback self._stopevent = Event() @@ -1204,7 +1184,9 @@ BACKENDS: dict[str, type[Backend]] = {b.NAME: b for b in BACKEND_CLASSES} class ReplayGainPlugin(BeetsPlugin): """Provides ReplayGain analysis.""" - def __init__(self): + pool: ThreadPool | None = None + + def __init__(self) -> None: super().__init__() # default backend is 'command' for backward-compatibility. @@ -1225,17 +1207,15 @@ class ReplayGainPlugin(BeetsPlugin): # FIXME: Consider renaming the configuration option and deprecating the # old name 'overwrite'. - self.force_on_import = cast(bool, self.config["overwrite"].get(bool)) + self.force_on_import: bool = self.config["overwrite"].get(bool) # Remember which backend is used for CLI feedback self.backend_name = self.config["backend"].as_str() if self.backend_name not in BACKENDS: raise ui.UserError( - "Selected ReplayGain backend {} is not supported. " - "Please select one of: {}".format( - self.backend_name, ", ".join(BACKENDS.keys()) - ) + f"Selected ReplayGain backend {self.backend_name} is not" + f" supported. Please select one of: {', '.join(BACKENDS)}" ) # FIXME: Consider renaming the configuration option to 'peak_method' @@ -1243,10 +1223,9 @@ class ReplayGainPlugin(BeetsPlugin): peak_method = self.config["peak"].as_str() if peak_method not in PeakMethod.__members__: raise ui.UserError( - "Selected ReplayGain peak method {} is not supported. " - "Please select one of: {}".format( - peak_method, ", ".join(PeakMethod.__members__) - ) + f"Selected ReplayGain peak method {peak_method} is not" + " supported. Please select one of:" + f" {', '.join(PeakMethod.__members__)}" ) # This only applies to plain old rg tags, r128 doesn't store peak # values. @@ -1268,9 +1247,6 @@ class ReplayGainPlugin(BeetsPlugin): except (ReplayGainError, FatalReplayGainError) as e: raise ui.UserError(f"replaygain initialization failed: {e}") - # Start threadpool lazily. - self.pool = None - def should_use_r128(self, item: Item) -> bool: """Checks the plugin setting to decide whether the calculation should be done using the EBU R128 standard and use R128_ tags instead. @@ -1354,19 +1330,19 @@ class ReplayGainPlugin(BeetsPlugin): items, nothing is done. """ if not force and not self.album_requires_gain(album): - self._log.info("Skipping album {0}", album) + self._log.info("Skipping album {}", album) return items_iter = iter(album.items()) use_r128 = self.should_use_r128(next(items_iter)) if any(use_r128 != self.should_use_r128(i) for i in items_iter): self._log.error( - "Cannot calculate gain for album {0} (incompatible formats)", + "Cannot calculate gain for album {} (incompatible formats)", album, ) return - self._log.info("analyzing {0}", album) + self._log.info("analyzing {}", album) discs: dict[int, list[Item]] = {} if self.config["per_disc"].get(bool): @@ -1390,7 +1366,7 @@ class ReplayGainPlugin(BeetsPlugin): callback=store_cb, ) except ReplayGainError as e: - self._log.info("ReplayGain error: {0}", e) + self._log.info("ReplayGain error: {}", e) except FatalReplayGainError as e: raise ui.UserError(f"Fatal replay gain error: {e}") @@ -1402,7 +1378,7 @@ class ReplayGainPlugin(BeetsPlugin): in the item, nothing is done. """ if not force and not self.track_requires_gain(item): - self._log.info("Skipping track {0}", item) + self._log.info("Skipping track {}", item) return use_r128 = self.should_use_r128(item) @@ -1419,7 +1395,7 @@ class ReplayGainPlugin(BeetsPlugin): callback=store_cb, ) except ReplayGainError as e: - self._log.info("ReplayGain error: {0}", e) + self._log.info("ReplayGain error: {}", e) except FatalReplayGainError as e: raise ui.UserError(f"Fatal replay gain error: {e}") @@ -1427,7 +1403,7 @@ class ReplayGainPlugin(BeetsPlugin): """Open a `ThreadPool` instance in `self.pool`""" if self.pool is None and self.backend_instance.do_parallel: self.pool = ThreadPool(threads) - self.exc_queue: queue.Queue = queue.Queue() + self.exc_queue: queue.Queue[Exception] = queue.Queue() signal.signal(signal.SIGINT, self._interrupt) @@ -1491,7 +1467,7 @@ class ReplayGainPlugin(BeetsPlugin): def import_begin(self, session: ImportSession): """Handle `import_begin` event -> open pool""" - threads = cast(int, self.config["threads"].get(int)) + threads: int = self.config["threads"].get(int) if ( self.config["parallel_on_import"] @@ -1526,26 +1502,22 @@ class ReplayGainPlugin(BeetsPlugin): # Bypass self.open_pool() if called with `--threads 0` if opts.threads != 0: - threads = opts.threads or cast( - int, self.config["threads"].get(int) - ) + threads: int = opts.threads or self.config["threads"].get(int) self.open_pool(threads) if opts.album: - albums = lib.albums(ui.decargs(args)) + albums = lib.albums(args) self._log.info( - "Analyzing {} albums ~ {} backend...".format( - len(albums), self.backend_name - ) + f"Analyzing {len(albums)} albums ~" + f" {self.backend_name} backend..." ) for album in albums: self.handle_album(album, write, force) else: - items = lib.items(ui.decargs(args)) + items = lib.items(args) self._log.info( - "Analyzing {} tracks ~ {} backend...".format( - len(items), self.backend_name - ) + f"Analyzing {len(items)} tracks ~" + f" {self.backend_name} backend..." ) for item in items: self.handle_track(item, write, force) @@ -1564,8 +1536,10 @@ class ReplayGainPlugin(BeetsPlugin): "--threads", dest="threads", type=int, - help="change the number of threads, \ - defaults to maximum available processors", + help=( + "change the number of threads, defaults to maximum available" + " processors" + ), ) cmd.parser.add_option( "-f", @@ -1573,8 +1547,10 @@ class ReplayGainPlugin(BeetsPlugin): dest="force", action="store_true", default=False, - help="analyze all files, including those that " - "already have ReplayGain metadata", + help=( + "analyze all files, including those that already have" + " ReplayGain metadata" + ), ) cmd.parser.add_option( "-w", diff --git a/beetsplug/rewrite.py b/beetsplug/rewrite.py index 83829d657..1cc21ad75 100644 --- a/beetsplug/rewrite.py +++ b/beetsplug/rewrite.py @@ -57,9 +57,9 @@ class RewritePlugin(BeetsPlugin): raise ui.UserError("invalid rewrite specification") if fieldname not in library.Item._fields: raise ui.UserError( - "invalid field name (%s) in rewriter" % fieldname + f"invalid field name ({fieldname}) in rewriter" ) - self._log.debug("adding template field {0}", key) + self._log.debug("adding template field {}", key) pattern = re.compile(pattern.lower()) rules[fieldname].append((pattern, value)) if fieldname == "artist": diff --git a/beetsplug/scrub.py b/beetsplug/scrub.py index 630a4e6e6..c39894137 100644 --- a/beetsplug/scrub.py +++ b/beetsplug/scrub.py @@ -58,10 +58,8 @@ class ScrubPlugin(BeetsPlugin): def commands(self): def scrub_func(lib, opts, args): # Walk through matching files and remove tags. - for item in lib.items(ui.decargs(args)): - self._log.info( - "scrubbing: {0}", util.displayable_path(item.path) - ) + for item in lib.items(args): + self._log.info("scrubbing: {.filepath}", item) self._scrub_item(item, opts.write) scrub_cmd = ui.Subcommand("scrub", help="clean audio tags") @@ -110,7 +108,7 @@ class ScrubPlugin(BeetsPlugin): f.save() except (OSError, mutagen.MutagenError) as exc: self._log.error( - "could not scrub {0}: {1}", util.displayable_path(path), exc + "could not scrub {}: {}", util.displayable_path(path), exc ) def _scrub_item(self, item, restore): @@ -124,7 +122,7 @@ class ScrubPlugin(BeetsPlugin): util.syspath(item.path), config["id3v23"].get(bool) ) except mediafile.UnreadableFileError as exc: - self._log.error("could not open file to scrub: {0}", exc) + self._log.error("could not open file to scrub: {}", exc) return images = mf.images @@ -144,12 +142,10 @@ class ScrubPlugin(BeetsPlugin): mf.images = images mf.save() except mediafile.UnreadableFileError as exc: - self._log.error("could not write tags: {0}", exc) + self._log.error("could not write tags: {}", exc) def import_task_files(self, session, task): """Automatically scrub imported files.""" for item in task.imported_items(): - self._log.debug( - "auto-scrubbing {0}", util.displayable_path(item.path) - ) + self._log.debug("auto-scrubbing {.filepath}", item) self._scrub_item(item, ui.should_write()) diff --git a/beetsplug/smartplaylist.py b/beetsplug/smartplaylist.py index d758c0255..ed417f2b9 100644 --- a/beetsplug/smartplaylist.py +++ b/beetsplug/smartplaylist.py @@ -14,14 +14,16 @@ """Generates smart playlists based on beets queries.""" +from __future__ import annotations + import os +from typing import Any, TypeAlias from urllib.parse import quote from urllib.request import pathname2url from beets import ui -from beets.dbcore import OrQuery -from beets.dbcore.query import MultipleSort, ParsingError -from beets.library import Album, Item, parse_query_string +from beets.dbcore.query import ParsingError, Query, Sort +from beets.library import Album, Item, Library, parse_query_string from beets.plugins import BeetsPlugin from beets.plugins import send as send_event from beets.util import ( @@ -34,9 +36,17 @@ from beets.util import ( syspath, ) +QueryAndSort = tuple[Query, Sort] +PlaylistQuery = Query | tuple[QueryAndSort, ...] | None +PlaylistMatch: TypeAlias = tuple[ + str, + tuple[PlaylistQuery, Sort | None], + tuple[PlaylistQuery, Sort | None], +] + class SmartPlaylistPlugin(BeetsPlugin): - def __init__(self): + def __init__(self) -> None: super().__init__() self.config.add( { @@ -55,13 +65,13 @@ class SmartPlaylistPlugin(BeetsPlugin): ) self.config["prefix"].redact = True # May contain username/password. - self._matched_playlists = None - self._unmatched_playlists = None + self._matched_playlists: set[PlaylistMatch] = set() + self._unmatched_playlists: set[PlaylistMatch] = set() if self.config["auto"]: self.register_listener("database_change", self.db_change) - def commands(self): + def commands(self) -> list[ui.Subcommand]: spl_update = ui.Subcommand( "splupdate", help="update the smart playlists. Playlist names may be " @@ -124,24 +134,23 @@ class SmartPlaylistPlugin(BeetsPlugin): spl_update.func = self.update_cmd return [spl_update] - def update_cmd(self, lib, opts, args): + def update_cmd(self, lib: Library, opts: Any, args: list[str]) -> None: self.build_queries() if args: - args = set(ui.decargs(args)) - for a in list(args): + args_set = set(args) + for a in list(args_set): if not a.endswith(".m3u"): - args.add(f"{a}.m3u") + args_set.add(f"{a}.m3u") playlists = { (name, q, a_q) for name, q, a_q in self._unmatched_playlists - if name in args + if name in args_set } if not playlists: + unmatched = [name for name, _, _ in self._unmatched_playlists] raise ui.UserError( - "No playlist matching any of {} found".format( - [name for name, _, _ in self._unmatched_playlists] - ) + f"No playlist matching any of {unmatched} found" ) self._matched_playlists = playlists @@ -152,16 +161,32 @@ class SmartPlaylistPlugin(BeetsPlugin): self.__apply_opts_to_config(opts) self.update_playlists(lib, opts.pretend) - def __apply_opts_to_config(self, opts): + def __apply_opts_to_config(self, opts: Any) -> None: for k, v in opts.__dict__.items(): if v is not None and k in self.config: self.config[k] = v - def build_queries(self): + def _parse_one_query( + self, playlist: dict[str, Any], key: str, model_cls: type + ) -> tuple[PlaylistQuery, Sort | None]: + qs = playlist.get(key) + if qs is None: + return None, None + if isinstance(qs, str): + return parse_query_string(qs, model_cls) + if len(qs) == 1: + return parse_query_string(qs[0], model_cls) + + queries_and_sorts: tuple[QueryAndSort, ...] = tuple( + parse_query_string(q, model_cls) for q in qs + ) + return queries_and_sorts, None + + def build_queries(self) -> None: """ Instantiate queries for the playlists. - Each playlist has 2 queries: one or items one for albums, each with a + Each playlist has 2 queries: one for items, one for albums, each with a sort. We must also remember its name. _unmatched_playlists is a set of tuples (name, (q, q_sort), (album_q, album_q_sort)). @@ -170,7 +195,7 @@ class SmartPlaylistPlugin(BeetsPlugin): More precisely - it will be NullSort when a playlist query ('query' or 'album_query') is a single item or a list with 1 element - - it will be None when there are multiple items i a query + - it will be None when there are multiple items in a query """ self._unmatched_playlists = set() self._matched_playlists = set() @@ -180,76 +205,58 @@ class SmartPlaylistPlugin(BeetsPlugin): self._log.warning("playlist configuration is missing name") continue - playlist_data = (playlist["name"],) try: - for key, model_cls in (("query", Item), ("album_query", Album)): - qs = playlist.get(key) - if qs is None: - query_and_sort = None, None - elif isinstance(qs, str): - query_and_sort = parse_query_string(qs, model_cls) - elif len(qs) == 1: - query_and_sort = parse_query_string(qs[0], model_cls) - else: - # multiple queries and sorts - queries, sorts = zip( - *(parse_query_string(q, model_cls) for q in qs) - ) - query = OrQuery(queries) - final_sorts = [] - for s in sorts: - if s: - if isinstance(s, MultipleSort): - final_sorts += s.sorts - else: - final_sorts.append(s) - if not final_sorts: - sort = None - elif len(final_sorts) == 1: - (sort,) = final_sorts - else: - sort = MultipleSort(final_sorts) - query_and_sort = query, sort - - playlist_data += (query_and_sort,) - + q_match = self._parse_one_query(playlist, "query", Item) + a_match = self._parse_one_query(playlist, "album_query", Album) except ParsingError as exc: self._log.warning( "invalid query in playlist {}: {}", playlist["name"], exc ) continue - self._unmatched_playlists.add(playlist_data) + self._unmatched_playlists.add((playlist["name"], q_match, a_match)) - def matches(self, model, query, album_query): - if album_query and isinstance(model, Album): - return album_query.match(model) - if query and isinstance(model, Item): - return query.match(model) + def _matches_query(self, model: Item | Album, query: PlaylistQuery) -> bool: + if not query: + return False + if isinstance(query, (list, tuple)): + return any(q.match(model) for q, _ in query) + return query.match(model) + + def matches( + self, + model: Item | Album, + query: PlaylistQuery, + album_query: PlaylistQuery, + ) -> bool: + if isinstance(model, Album): + return self._matches_query(model, album_query) + if isinstance(model, Item): + return self._matches_query(model, query) return False - def db_change(self, lib, model): + def db_change(self, lib: Library, model: Item | Album) -> None: if self._unmatched_playlists is None: self.build_queries() for playlist in self._unmatched_playlists: n, (q, _), (a_q, _) = playlist if self.matches(model, q, a_q): - self._log.debug("{0} will be updated because of {1}", n, model) + self._log.debug("{} will be updated because of {}", n, model) self._matched_playlists.add(playlist) self.register_listener("cli_exit", self.update_playlists) self._unmatched_playlists -= self._matched_playlists - def update_playlists(self, lib, pretend=False): + def update_playlists(self, lib: Library, pretend: bool = False) -> None: if pretend: self._log.info( - "Showing query results for {0} smart playlists...", + "Showing query results for {} smart playlists...", len(self._matched_playlists), ) else: self._log.info( - "Updating {0} smart playlists...", len(self._matched_playlists) + "Updating {} smart playlists...", len(self._matched_playlists) ) playlist_dir = self.config["playlist_dir"].as_filename() @@ -261,19 +268,38 @@ class SmartPlaylistPlugin(BeetsPlugin): relative_to = normpath(relative_to) # Maps playlist filenames to lists of track filenames. - m3us = {} + m3us: dict[str, list[PlaylistItem]] = {} for playlist in self._matched_playlists: name, (query, q_sort), (album_query, a_q_sort) = playlist if pretend: self._log.info("Results for playlist {}:", name) else: - self._log.info("Creating playlist {0}", name) + self._log.info("Creating playlist {}", name) items = [] - if query: + # Handle tuple/list of queries (preserves order) + # Track seen items to avoid duplicates when an item matches + # multiple queries + seen_ids = set() + + if isinstance(query, (list, tuple)): + for q, sort in query: + for item in lib.items(q, sort): + if item.id not in seen_ids: + items.append(item) + seen_ids.add(item.id) + elif query: items.extend(lib.items(query, q_sort)) - if album_query: + + if isinstance(album_query, (list, tuple)): + for q, sort in album_query: + for album in lib.albums(q, sort): + for item in album.items(): + if item.id not in seen_ids: + items.append(item) + seen_ids.add(item.id) + elif album_query: for album in lib.albums(album_query, a_q_sort): items.extend(album.items()) @@ -293,7 +319,9 @@ class SmartPlaylistPlugin(BeetsPlugin): if self.config["forward_slash"].get(): item_uri = path_as_posix(item_uri) if self.config["urlencode"]: - item_uri = bytestring_path(pathname2url(item_uri)) + item_uri = bytestring_path( + pathname2url(os.fsdecode(item_uri)) + ) item_uri = prefix + item_uri if item_uri not in m3us[m3u_name]: @@ -327,29 +355,28 @@ class SmartPlaylistPlugin(BeetsPlugin): if extm3u: attr = [(k, entry.item[k]) for k in keys] al = [ - f" {key}=\"{quote(str(value), safe='/:')}\"" + f' {key}="{quote(str(value), safe="/:")}"' for key, value in attr ] attrs = "".join(al) - comment = "#EXTINF:{}{},{} - {}\n".format( - int(item.length), attrs, item.artist, item.title + comment = ( + f"#EXTINF:{int(item.length)}{attrs}," + f"{item.artist} - {item.title}\n" ) f.write(comment.encode("utf-8") + entry.uri + b"\n") # Send an event when playlists were updated. - send_event("smartplaylist_update") + send_event("smartplaylist_update") # type: ignore if pretend: self._log.info( - "Displayed results for {0} playlists", + "Displayed results for {} playlists", len(self._matched_playlists), ) else: - self._log.info( - "{0} playlists updated", len(self._matched_playlists) - ) + self._log.info("{} playlists updated", len(self._matched_playlists)) class PlaylistItem: - def __init__(self, item, uri): + def __init__(self, item: Item, uri: bytes) -> None: self.item = item self.uri = uri diff --git a/beetsplug/spotify.py b/beetsplug/spotify.py index 55a77a8a7..ab920cdd4 100644 --- a/beetsplug/spotify.py +++ b/beetsplug/spotify.py @@ -13,38 +13,86 @@ # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. -"""Adds Spotify release and track search support to the autotagger, along with -Spotify playlist construction. +"""Adds Spotify release and track search support to the autotagger. + +Also includes Spotify playlist construction. """ +from __future__ import annotations + import base64 import collections import json import re +import threading import time import webbrowser +from typing import TYPE_CHECKING, Any, Literal, Union import confuse import requests -import unidecode from beets import ui from beets.autotag.hooks import AlbumInfo, TrackInfo from beets.dbcore import types -from beets.library import DateType -from beets.plugins import BeetsPlugin, MetadataSourcePlugin -from beets.util.id_extractors import spotify_id_regex +from beets.library import Library +from beets.metadata_plugins import ( + IDResponse, + SearchApiMetadataSourcePlugin, + SearchFilter, +) + +if TYPE_CHECKING: + from collections.abc import Sequence + + from beets.library import Library + from beetsplug._typing import JSONDict DEFAULT_WAITING_TIME = 5 -class SpotifyAPIError(Exception): +class SearchResponseAlbums(IDResponse): + """A response returned by the Spotify API. + + We only use items and disregard the pagination information. i.e. + res["albums"]["items"][0]. + + There are more fields in the response, but we only type the ones we + currently use. + + see https://developer.spotify.com/documentation/web-api/reference/search + + """ + + album_type: str + available_markets: Sequence[str] + name: str + + +class SearchResponseTracks(IDResponse): + """A track response returned by the Spotify API.""" + + album: SearchResponseAlbums + available_markets: Sequence[str] + popularity: int + name: str + + +class APIError(Exception): pass -class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): - data_source = "Spotify" +class AudioFeaturesUnavailableError(Exception): + """Raised when audio features API returns 403 (deprecated).""" + pass + + +class SpotifyPlugin( + SearchApiMetadataSourcePlugin[ + Union[SearchResponseAlbums, SearchResponseTracks] + ] +): item_types = { "spotify_track_popularity": types.INTEGER, "spotify_acousticness": types.FLOAT, @@ -59,7 +107,7 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): "spotify_tempo": types.FLOAT, "spotify_time_signature": types.INTEGER, "spotify_valence": types.FLOAT, - "spotify_updated": DateType(), + "spotify_updated": types.DATE, } # Base URLs for the Spotify API @@ -71,8 +119,6 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): track_url = "https://api.spotify.com/v1/tracks/" audio_features_url = "https://api.spotify.com/v1/audio-features/" - id_regex = spotify_id_regex - spotify_audio_features = { "acousticness": "spotify_acousticness", "danceability": "spotify_danceability", @@ -95,45 +141,48 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): "mode": "list", "tiebreak": "popularity", "show_failures": False, - "artist_field": "albumartist", - "album_field": "album", - "track_field": "title", "region_filter": None, "regex": [], "client_id": "4e414367a1d14c75a5c5129a627fcab8", - "client_secret": "f82bdc09b2254f1a8286815d02fd46dc", + "client_secret": "4a9b5b7848e54e118a7523b1c7c3e1e5", "tokenfile": "spotify_token.json", } ) + self.config["client_id"].redact = True self.config["client_secret"].redact = True - self.tokenfile = self.config["tokenfile"].get( - confuse.Filename(in_app_dir=True) - ) # Path to the JSON file for storing the OAuth access token. + self.audio_features_available = ( + True # Track if audio features API is available + ) + self._audio_features_lock = ( + threading.Lock() + ) # Protects audio_features_available self.setup() def setup(self): """Retrieve previously saved OAuth token or generate a new one.""" + try: - with open(self.tokenfile) as f: + with open(self._tokenfile()) as f: token_data = json.load(f) except OSError: self._authenticate() else: self.access_token = token_data["access_token"] - def _authenticate(self): - """Request an access token via the Client Credentials Flow: - https://developer.spotify.com/documentation/general/guides/authorization-guide/#client-credentials-flow - """ + def _tokenfile(self) -> str: + """Get the path to the JSON file for storing the OAuth token.""" + return self.config["tokenfile"].get(confuse.Filename(in_app_dir=True)) + + def _authenticate(self) -> None: + """Request an access token via the Client Credentials Flow: https://developer.spotify.com/documentation/general/guides/authorization-guide/#client-credentials-flow""" + c_id: str = self.config["client_id"].as_str() + c_secret: str = self.config["client_secret"].as_str() + headers = { - "Authorization": "Basic {}".format( - base64.b64encode( - ":".join( - self.config[k].as_str() - for k in ("client_id", "client_secret") - ).encode() - ).decode() + "Authorization": ( + "Basic" + f" {base64.b64encode(f'{c_id}:{c_secret}'.encode()).decode()}" ) } response = requests.post( @@ -146,35 +195,38 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): response.raise_for_status() except requests.exceptions.HTTPError as e: raise ui.UserError( - "Spotify authorization failed: {}\n{}".format(e, response.text) + f"Spotify authorization failed: {e}\n{response.text}" ) self.access_token = response.json()["access_token"] # Save the token for later use. - self._log.debug( - "{} access token: {}", self.data_source, self.access_token - ) - with open(self.tokenfile, "w") as f: + self._log.debug("{0.data_source} access token: {0.access_token}", self) + with open(self._tokenfile(), "w") as f: json.dump({"access_token": self.access_token}, f) def _handle_response( - self, request_type, url, params=None, retry_count=0, max_retries=3 - ): + self, + method: Literal["get", "post", "put", "delete"], + url: str, + params: Any = None, + retry_count: int = 0, + max_retries: int = 3, + ) -> JSONDict: """Send a request, reauthenticating if necessary. - :param request_type: Type of :class:`Request` constructor, - e.g. ``requests.get``, ``requests.post``, etc. - :type request_type: function + :param method: HTTP method to use for the request. :param url: URL for the new :class:`Request` object. - :type url: str - :param params: (optional) list of tuples or bytes to send + :param dict params: (optional) list of tuples or bytes to send in the query string for the :class:`Request`. - :type params: dict - :return: JSON data for the class:`Response <Response>` object. - :rtype: dict + """ + + if retry_count > max_retries: + raise APIError("Maximum retries reached.") + try: - response = request_type( + response = requests.request( + method, url, headers={"Authorization": f"Bearer {self.access_token}"}, params=params, @@ -184,75 +236,109 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): return response.json() except requests.exceptions.ReadTimeout: self._log.error("ReadTimeout.") - raise SpotifyAPIError("Request timed out.") + raise APIError("Request timed out.") except requests.exceptions.ConnectionError as e: - self._log.error(f"Network error: {e}") - raise SpotifyAPIError("Network error.") + self._log.error("Network error: {}", e) + raise APIError("Network error.") except requests.exceptions.RequestException as e: + if e.response is None: + self._log.error("Request failed: {}", e) + raise APIError("Request failed.") if e.response.status_code == 401: self._log.debug( - f"{self.data_source} access token has expired. " - f"Reauthenticating." + "{.data_source} access token has expired. Reauthenticating.", + self, ) self._authenticate() - return self._handle_response(request_type, url, params=params) + return self._handle_response( + method, + url, + params=params, + retry_count=retry_count + 1, + ) elif e.response.status_code == 404: - raise SpotifyAPIError( + raise APIError( + f"API Error: {e.response.status_code}\n" + f"URL: {url}\nparams: {params}" + ) + elif e.response.status_code == 403: + # Check if this is the audio features endpoint + if url.startswith(self.audio_features_url): + raise AudioFeaturesUnavailableError( + "Audio features API returned 403 " + "(deprecated or unavailable)" + ) + raise APIError( f"API Error: {e.response.status_code}\n" f"URL: {url}\nparams: {params}" ) elif e.response.status_code == 429: - if retry_count >= max_retries: - raise SpotifyAPIError("Maximum retries reached.") - seconds = response.headers.get( + seconds = e.response.headers.get( "Retry-After", DEFAULT_WAITING_TIME ) self._log.debug( - f"Too many API requests. Retrying after " - f"{seconds} seconds." + "Too many API requests. Retrying after {} seconds.", seconds ) time.sleep(int(seconds) + 1) return self._handle_response( - request_type, + method, url, params=params, retry_count=retry_count + 1, ) elif e.response.status_code == 503: self._log.error("Service Unavailable.") - raise SpotifyAPIError("Service Unavailable.") + raise APIError("Service Unavailable.") elif e.response.status_code == 502: self._log.error("Bad Gateway.") - raise SpotifyAPIError("Bad Gateway.") + raise APIError("Bad Gateway.") elif e.response is not None: - raise SpotifyAPIError( - f"{self.data_source} API error:\n{e.response.text}\n" + raise APIError( + f"{self.data_source} API error:\n" + f"{e.response.text}\n" f"URL:\n{url}\nparams:\n{params}" ) else: - self._log.error(f"Request failed. Error: {e}") - raise SpotifyAPIError("Request failed.") + self._log.error("Request failed. Error: {}", e) + raise APIError("Request failed.") - def album_for_id(self, album_id): + def _multi_artist_credit( + self, artists: list[dict[str | int, str]] + ) -> tuple[list[str], list[str]]: + """Given a list of artist dictionaries, accumulate data into a pair + of lists: the first being the artist names, and the second being the + artist IDs. + """ + artist_names = [] + artist_ids = [] + for artist in artists: + artist_names.append(artist["name"]) + artist_ids.append(artist["id"]) + return artist_names, artist_ids + + def album_for_id(self, album_id: str) -> AlbumInfo | None: """Fetch an album by its Spotify ID or URL and return an AlbumInfo object or None if the album is not found. - :param album_id: Spotify ID or URL for the album - :type album_id: str - :return: AlbumInfo object for album + :param str album_id: Spotify ID or URL for the album + + :returns: AlbumInfo object for album :rtype: beets.autotag.hooks.AlbumInfo or None + """ - spotify_id = self._get_id("album", album_id, self.id_regex) - if spotify_id is None: + if not (spotify_id := self._extract_id(album_id)): return None album_data = self._handle_response( - requests.get, self.album_url + spotify_id + "get", f"{self.album_url}{spotify_id}" ) if album_data["name"] == "": self._log.debug("Album removed from Spotify: {}", album_id) return None - artist, artist_id = self.get_artist(album_data["artists"]) + artists_names, artists_ids = self._multi_artist_credit( + album_data["artists"] + ) + artist = ", ".join(artists_names) date_parts = [ int(part) for part in album_data["release_date"].split("-") @@ -271,21 +357,17 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): else: raise ui.UserError( "Invalid `release_date_precision` returned " - "by {} API: '{}'".format( - self.data_source, release_date_precision - ) + f"by {self.data_source} API: '{release_date_precision}'" ) tracks_data = album_data["tracks"] tracks_items = tracks_data["items"] while tracks_data["next"]: - tracks_data = self._handle_response( - requests.get, tracks_data["next"] - ) + tracks_data = self._handle_response("get", tracks_data["next"]) tracks_items.extend(tracks_data["items"]) tracks = [] - medium_totals = collections.defaultdict(int) + medium_totals: dict[int | None, int] = collections.defaultdict(int) for i, track_data in enumerate(tracks_items, start=1): track = self._get_track(track_data) track.index = i @@ -299,8 +381,10 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): album_id=spotify_id, spotify_album_id=spotify_id, artist=artist, - artist_id=artist_id, - spotify_artist_id=artist_id, + artist_id=artists_ids[0] if len(artists_ids) > 0 else None, + spotify_artist_id=artists_ids[0] if len(artists_ids) > 0 else None, + artists=artists_names, + artists_ids=artists_ids, tracks=tracks, albumtype=album_data["album_type"], va=len(album_data["artists"]) == 1 @@ -309,21 +393,24 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): month=month, day=day, label=album_data["label"], - mediums=max(medium_totals.keys()), + mediums=max(filter(None, medium_totals.keys())), data_source=self.data_source, data_url=album_data["external_urls"]["spotify"], ) - def _get_track(self, track_data): + def _get_track(self, track_data: JSONDict) -> TrackInfo: """Convert a Spotify track object dict to a TrackInfo object. :param track_data: Simplified track object (https://developer.spotify.com/documentation/web-api/reference/object-model/#track-object-simplified) - :type track_data: dict - :return: TrackInfo object for track - :rtype: beets.autotag.hooks.TrackInfo + + :returns: TrackInfo object for track + """ - artist, artist_id = self.get_artist(track_data["artists"]) + artists_names, artists_ids = self._multi_artist_credit( + track_data["artists"] + ) + artist = ", ".join(artists_names) # Get album information for spotify tracks try: @@ -336,8 +423,10 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): spotify_track_id=track_data["id"], artist=artist, album=album, - artist_id=artist_id, - spotify_artist_id=artist_id, + artist_id=artists_ids[0] if len(artists_ids) > 0 else None, + spotify_artist_id=artists_ids[0] if len(artists_ids) > 0 else None, + artists=artists_names, + artists_ids=artists_ids, length=track_data["duration_ms"] / 1000, index=track_data["track_number"], medium=track_data["disc_number"], @@ -346,33 +435,32 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): data_url=track_data["external_urls"]["spotify"], ) - def track_for_id(self, track_id=None, track_data=None): - """Fetch a track by its Spotify ID or URL and return a - TrackInfo object or None if the track is not found. + def track_for_id(self, track_id: str) -> None | TrackInfo: + """Fetch a track by its Spotify ID or URL. + + Returns a TrackInfo object or None if the track is not found. - :param track_id: (Optional) Spotify ID or URL for the track. Either - ``track_id`` or ``track_data`` must be provided. - :type track_id: str - :param track_data: (Optional) Simplified track object dict. May be - provided instead of ``track_id`` to avoid unnecessary API calls. - :type track_data: dict - :return: TrackInfo object for track - :rtype: beets.autotag.hooks.TrackInfo or None """ - if track_data is None: - spotify_id = self._get_id("track", track_id, self.id_regex) - if spotify_id is None: - return None - track_data = self._handle_response( - requests.get, self.track_url + spotify_id + + if not (spotify_id := self._extract_id(track_id)): + self._log.debug("Invalid Spotify ID: {}", track_id) + return None + + if not ( + track_data := self._handle_response( + "get", f"{self.track_url}{spotify_id}" ) + ): + self._log.debug("Track not found: {}", track_id) + return None + track = self._get_track(track_data) # Get album's tracks to set `track.index` (position on the entire # release) and `track.medium_total` (total number of tracks on # the track's disc). album_data = self._handle_response( - requests.get, self.album_url + track_data["album"]["id"] + "get", f"{self.album_url}{track_data['album']['id']}" ) medium_total = 0 for i, track_data in enumerate(album_data["tracks"]["items"], start=1): @@ -383,71 +471,54 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): track.medium_total = medium_total return track - @staticmethod - def _construct_search_query(filters=None, keywords=""): - """Construct a query string with the specified filters and keywords to - be provided to the Spotify Search API - (https://developer.spotify.com/documentation/web-api/reference/search/search/#writing-a-query---guidelines). - - :param filters: (Optional) Field filters to apply. - :type filters: dict - :param keywords: (Optional) Query keywords to use. - :type keywords: str - :return: Query string to be provided to the Search API. - :rtype: str - """ - query_components = [ - keywords, - " ".join(":".join((k, v)) for k, v in filters.items()), - ] - query = " ".join([q for q in query_components if q]) - if not isinstance(query, str): - query = query.decode("utf8") - return unidecode.unidecode(query) - - def _search_api(self, query_type, filters=None, keywords=""): - """Query the Spotify Search API for the specified ``keywords``, + def _search_api( + self, + query_type: Literal["album", "track"], + filters: SearchFilter, + query_string: str = "", + ) -> Sequence[SearchResponseAlbums | SearchResponseTracks]: + """Query the Spotify Search API for the specified ``query_string``, applying the provided ``filters``. - :param query_type: Item type to search across. Valid types are: - 'album', 'artist', 'playlist', and 'track'. - :type query_type: str - :param filters: (Optional) Field filters to apply. - :type filters: dict - :param keywords: (Optional) Query keywords to use. - :type keywords: str - :return: JSON data for the class:`Response <Response>` object or None - if no search results are returned. - :rtype: dict or None + :param query_type: Item type to search across. Valid types are: 'album', + 'artist', 'playlist', and 'track'. + :param filters: Field filters to apply. + :param query_string: Additional query to include in the search. + """ - query = self._construct_search_query(keywords=keywords, filters=filters) - if not query: - return None - self._log.debug(f"Searching {self.data_source} for '{query}'") + query = self._construct_search_query( + filters=filters, query_string=query_string + ) + + self._log.debug("Searching {.data_source} for '{}'", self, query) try: response = self._handle_response( - requests.get, + "get", self.search_url, - params={"q": query, "type": query_type}, + params={ + "q": query, + "type": query_type, + "limit": self.config["search_limit"].get(), + }, ) - except SpotifyAPIError as e: + except APIError as e: self._log.debug("Spotify API error: {}", e) - return [] - response_data = response.get(query_type + "s", {}).get("items", []) + return () + response_data = response.get(f"{query_type}s", {}).get("items", []) self._log.debug( - "Found {} result(s) from {} for '{}'", + "Found {} result(s) from {.data_source} for '{}'", len(response_data), - self.data_source, + self, query, ) return response_data - def commands(self): + def commands(self) -> list[ui.Subcommand]: # autotagger import command def queries(lib, opts, args): success = self._parse_opts(opts) if success: - results = self._match_library_tracks(lib, ui.decargs(args)) + results = self._match_library_tracks(lib, args) self._output_match_results(results) spotify_cmd = ui.Subcommand( @@ -457,17 +528,17 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): "-m", "--mode", action="store", - help='"open" to open {} with playlist, ' - '"list" to print (default)'.format(self.data_source), + help=( + f'"open" to open {self.data_source} with playlist, ' + '"list" to print (default)' + ), ) spotify_cmd.parser.add_option( "-f", "--show-failures", action="store_true", dest="show_failures", - help="list tracks that did not match a {} ID".format( - self.data_source - ), + help=f"list tracks that did not match a {self.data_source} ID", ) spotify_cmd.func = queries @@ -485,7 +556,7 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): ) def func(lib, opts, args): - items = lib.items(ui.decargs(args)) + items = lib.items(args) self._fetch_info(items, ui.should_write(), opts.force_refetch) sync_cmd.func = func @@ -500,24 +571,24 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): if self.config["mode"].get() not in ["list", "open"]: self._log.warning( - "{0} is not a valid mode", self.config["mode"].get() + "{} is not a valid mode", self.config["mode"].get() ) return False self.opts = opts return True - def _match_library_tracks(self, library, keywords): - """Get a list of simplified track object dicts for library tracks - matching the specified ``keywords``. + def _match_library_tracks(self, library: Library, keywords: str): + """Get simplified track object dicts for library tracks. + + Matches tracks based on the specified ``keywords``. :param library: beets library object to query. - :type library: beets.library.Library :param keywords: Query to match library items against. - :type keywords: str - :return: List of simplified track object dicts for library items - matching the specified query. - :rtype: list[dict] + + :returns: List of simplified track object dicts for library + items matching the specified query. + """ results = [] failures = [] @@ -526,8 +597,8 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): if not items: self._log.debug( - "Your beets query returned no items, skipping {}.", - self.data_source, + "Your beets query returned no items, skipping {.data_source}.", + self, ) return @@ -548,25 +619,32 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): regex["search"], regex["replace"], value ) - # Custom values can be passed in the config (just in case) - artist = item[self.config["artist_field"].get()] - album = item[self.config["album_field"].get()] - keywords = item[self.config["track_field"].get()] + artist = item["artist"] or item["albumartist"] + album = item["album"] + query_string = item["title"] # Query the Web API for each track, look for the items' JSON data - query_filters = {"artist": artist, "album": album} + query_filters: SearchFilter = {} + if artist: + query_filters["artist"] = artist + if album: + query_filters["album"] = album + response_data_tracks = self._search_api( - query_type="track", keywords=keywords, filters=query_filters + query_type="track", + query_string=query_string, + filters=query_filters, ) if not response_data_tracks: query = self._construct_search_query( - keywords=keywords, filters=query_filters + query_string=query_string, filters=query_filters ) + failures.append(query) continue # Apply market filter if requested - region_filter = self.config["region_filter"].get() + region_filter: str = self.config["region_filter"].get() if region_filter: response_data_tracks = [ track_data @@ -579,8 +657,8 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): or self.config["tiebreak"].get() == "first" ): self._log.debug( - "{} track(s) found, count: {}", - self.data_source, + "{.data_source} track(s) found, count: {}", + self, len(response_data_tracks), ) chosen_result = response_data_tracks[0] @@ -591,7 +669,11 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): len(response_data_tracks), ) chosen_result = max( - response_data_tracks, key=lambda x: x["popularity"] + response_data_tracks, + key=lambda x: x[ + # We are sure this is a track response! + "popularity" # type: ignore[typeddict-item] + ], ) results.append(chosen_result) @@ -599,49 +681,49 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): if failure_count > 0: if self.config["show_failures"].get(): self._log.info( - "{} track(s) did not match a {} ID:", + "{} track(s) did not match a {.data_source} ID:", failure_count, - self.data_source, + self, ) for track in failures: self._log.info("track: {}", track) self._log.info("") else: self._log.warning( - "{} track(s) did not match a {} ID:\n" + "{} track(s) did not match a {.data_source} ID:\n" "use --show-failures to display", failure_count, - self.data_source, + self, ) return results def _output_match_results(self, results): - """Open a playlist or print Spotify URLs for the provided track - object dicts. + """Open a playlist or print Spotify URLs. + + Uses the provided track object dicts. + + :param list[dict] results: List of simplified track object dicts + (https://developer.spotify.com/documentation/web-api/ + reference/object-model/#track-object-simplified) - :param results: List of simplified track object dicts - (https://developer.spotify.com/documentation/web-api/reference/object-model/#track-object-simplified) - :type results: list[dict] """ if results: spotify_ids = [track_data["id"] for track_data in results] if self.config["mode"].get() == "open": self._log.info( - "Attempting to open {} with playlist".format( - self.data_source - ) + "Attempting to open {.data_source} with playlist", self ) - spotify_url = "spotify:trackset:Playlist:" + ",".join( - spotify_ids + spotify_url = ( + f"spotify:trackset:Playlist:{','.join(spotify_ids)}" ) webbrowser.open(spotify_url) else: for spotify_id in spotify_ids: - print(self.open_track_url + spotify_id) + print(f"{self.open_track_url}{spotify_id}") else: self._log.warning( - f"No {self.data_source} tracks found from beets query" + "No {.data_source} tracks found from beets query", self ) def _fetch_info(self, items, write, force): @@ -670,43 +752,69 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin): item["isrc"] = isrc item["ean"] = ean item["upc"] = upc - audio_features = self.track_audio_features(spotify_track_id) - if audio_features is None: - self._log.info("No audio features found for: {}", item) - continue - for feature in audio_features.keys(): - if feature in self.spotify_audio_features.keys(): - item[self.spotify_audio_features[feature]] = audio_features[ - feature - ] + + if self.audio_features_available: + audio_features = self.track_audio_features(spotify_track_id) + if audio_features is None: + self._log.info("No audio features found for: {}", item) + else: + for feature, value in audio_features.items(): + if feature in self.spotify_audio_features: + item[self.spotify_audio_features[feature]] = value + else: + self._log.debug("Audio features API unavailable, skipping") + item["spotify_updated"] = time.time() item.store() if write: item.try_write() - def track_info(self, track_id=None): + def track_info(self, track_id: str): """Fetch a track's popularity and external IDs using its Spotify ID.""" - track_data = self._handle_response( - requests.get, self.track_url + track_id - ) + track_data = self._handle_response("get", f"{self.track_url}{track_id}") + external_ids = track_data.get("external_ids", {}) + popularity = track_data.get("popularity") self._log.debug( "track_popularity: {} and track_isrc: {}", - track_data.get("popularity"), - track_data.get("external_ids").get("isrc"), + popularity, + external_ids.get("isrc"), ) return ( - track_data.get("popularity"), - track_data.get("external_ids").get("isrc"), - track_data.get("external_ids").get("ean"), - track_data.get("external_ids").get("upc"), + popularity, + external_ids.get("isrc"), + external_ids.get("ean"), + external_ids.get("upc"), ) - def track_audio_features(self, track_id=None): - """Fetch track audio features by its Spotify ID.""" + def track_audio_features(self, track_id: str): + """Fetch track audio features by its Spotify ID. + + Thread-safe: avoids redundant API calls and logs the 403 warning only + once. + + """ + # Fast path: if we've already detected unavailability, skip the call. + with self._audio_features_lock: + if not self.audio_features_available: + return None + try: return self._handle_response( - requests.get, self.audio_features_url + track_id + "get", f"{self.audio_features_url}{track_id}" ) - except SpotifyAPIError as e: + except AudioFeaturesUnavailableError: + # Disable globally in a thread-safe manner and warn once. + should_log = False + with self._audio_features_lock: + if self.audio_features_available: + self.audio_features_available = False + should_log = True + if should_log: + self._log.warning( + "Audio features API is unavailable (403 error). " + "Skipping audio features for remaining tracks." + ) + return None + except APIError as e: self._log.debug("Spotify API error: {}", e) return None diff --git a/beetsplug/subsonicplaylist.py b/beetsplug/subsonicplaylist.py index 606cdc8bd..6c11ab918 100644 --- a/beetsplug/subsonicplaylist.py +++ b/beetsplug/subsonicplaylist.py @@ -115,7 +115,7 @@ class SubsonicPlaylistPlugin(BeetsPlugin): )[0] if playlists.attrib.get("code", "200") != "200": alt_error = ( - "error getting playlists," " but no error message found" + "error getting playlists, but no error message found" ) self._log.warn(playlists.attrib.get("message", alt_error)) return @@ -168,9 +168,7 @@ class SubsonicPlaylistPlugin(BeetsPlugin): params["v"] = "1.12.0" params["c"] = "beets" resp = requests.get( - "{}/rest/{}?{}".format( - self.config["base_url"].get(), endpoint, urlencode(params) - ), + f"{self.config['base_url'].get()}/rest/{endpoint}?{urlencode(params)}", timeout=10, ) return resp @@ -182,5 +180,5 @@ class SubsonicPlaylistPlugin(BeetsPlugin): for track in tracks: if track not in output: output[track] = ";" - output[track] += name + ";" + output[track] += f"{name};" return output diff --git a/beetsplug/subsonicupdate.py b/beetsplug/subsonicupdate.py index 2a537e35f..673cc94a8 100644 --- a/beetsplug/subsonicupdate.py +++ b/beetsplug/subsonicupdate.py @@ -36,7 +36,6 @@ from binascii import hexlify import requests -from beets import config from beets.plugins import BeetsPlugin __author__ = "https://github.com/maffo999" @@ -44,9 +43,9 @@ __author__ = "https://github.com/maffo999" class SubsonicUpdate(BeetsPlugin): def __init__(self): - super().__init__() + super().__init__("subsonic") # Set default configuration values - config["subsonic"].add( + self.config.add( { "user": "admin", "pass": "admin", @@ -54,7 +53,8 @@ class SubsonicUpdate(BeetsPlugin): "auth": "token", } ) - config["subsonic"]["pass"].redact = True + self.config["user"].redact = True + self.config["pass"].redact = True self.register_listener("database_change", self.db_change) self.register_listener("smartplaylist_update", self.spl_update) @@ -64,25 +64,23 @@ class SubsonicUpdate(BeetsPlugin): def spl_update(self): self.register_listener("cli_exit", self.start_scan) - @staticmethod - def __create_token(): + def __create_token(self): """Create salt and token from given password. :return: The generated salt and hashed token """ - password = config["subsonic"]["pass"].as_str() + password = self.config["pass"].as_str() # Pick the random sequence and salt the password r = string.ascii_letters + string.digits salt = "".join([random.choice(r) for _ in range(6)]) - salted_password = password + salt + salted_password = f"{password}{salt}" token = hashlib.md5(salted_password.encode("utf-8")).hexdigest() # Put together the payload of the request to the server and the URL return salt, token - @staticmethod - def __format_url(endpoint): + def __format_url(self, endpoint): """Get the Subsonic URL to trigger the given endpoint. Uses either the url config option or the deprecated host, port, and context_path config options together. @@ -90,27 +88,27 @@ class SubsonicUpdate(BeetsPlugin): :return: Endpoint for updating Subsonic """ - url = config["subsonic"]["url"].as_str() + url = self.config["url"].as_str() if url and url.endswith("/"): url = url[:-1] # @deprecated("Use url config option instead") if not url: - host = config["subsonic"]["host"].as_str() - port = config["subsonic"]["port"].get(int) - context_path = config["subsonic"]["contextpath"].as_str() + host = self.config["host"].as_str() + port = self.config["port"].get(int) + context_path = self.config["contextpath"].as_str() if context_path == "/": context_path = "" url = f"http://{host}:{port}{context_path}" - return url + f"/rest/{endpoint}" + return f"{url}/rest/{endpoint}" def start_scan(self): - user = config["subsonic"]["user"].as_str() - auth = config["subsonic"]["auth"].as_str() + user = self.config["user"].as_str() + auth = self.config["auth"].as_str() url = self.__format_url("startScan") - self._log.debug("URL is {0}", url) - self._log.debug("auth type is {0}", config["subsonic"]["auth"]) + self._log.debug("URL is {}", url) + self._log.debug("auth type is {.config[auth]}", self) if auth == "token": salt, token = self.__create_token() @@ -123,7 +121,7 @@ class SubsonicUpdate(BeetsPlugin): "f": "json", } elif auth == "password": - password = config["subsonic"]["pass"].as_str() + password = self.config["pass"].as_str() encpass = hexlify(password.encode()).decode() payload = { "u": user, @@ -147,14 +145,15 @@ class SubsonicUpdate(BeetsPlugin): and json["subsonic-response"]["status"] == "ok" ): count = json["subsonic-response"]["scanStatus"]["count"] - self._log.info(f"Updating Subsonic; scanning {count} tracks") + self._log.info("Updating Subsonic; scanning {} tracks", count) elif ( response.status_code == 200 and json["subsonic-response"]["status"] == "failed" ): - error_message = json["subsonic-response"]["error"]["message"] - self._log.error(f"Error: {error_message}") + self._log.error( + "Error: {[subsonic-response][error][message]}", json + ) else: - self._log.error("Error: {0}", json) + self._log.error("Error: {}", json) except Exception as error: - self._log.error(f"Error: {error}") + self._log.error("Error: {}", error) diff --git a/beetsplug/the.py b/beetsplug/the.py index 42da708a3..664d4c01e 100644 --- a/beetsplug/the.py +++ b/beetsplug/the.py @@ -23,7 +23,7 @@ __version__ = "1.1" PATTERN_THE = "^the\\s" PATTERN_A = "^[a][n]?\\s" -FORMAT = "{0}, {1}" +FORMAT = "{}, {}" class ThePlugin(BeetsPlugin): @@ -38,7 +38,7 @@ class ThePlugin(BeetsPlugin): { "the": True, "a": True, - "format": "{0}, {1}", + "format": "{}, {}", "strip": False, "patterns": [], } @@ -50,11 +50,11 @@ class ThePlugin(BeetsPlugin): try: re.compile(p) except re.error: - self._log.error("invalid pattern: {0}", p) + self._log.error("invalid pattern: {}", p) else: if not (p.startswith("^") or p.endswith("$")): self._log.warning( - 'warning: "{0}" will not ' "match string start/end", + 'warning: "{}" will not match string start/end', p, ) if self.config["a"]: @@ -94,7 +94,7 @@ class ThePlugin(BeetsPlugin): for p in self.patterns: r = self.unthe(text, p) if r != text: - self._log.debug('"{0}" -> "{1}"', text, r) + self._log.debug('"{}" -> "{}"', text, r) break return r else: diff --git a/beetsplug/thumbnails.py b/beetsplug/thumbnails.py index f0755c0f9..651eaf3ac 100644 --- a/beetsplug/thumbnails.py +++ b/beetsplug/thumbnails.py @@ -28,7 +28,7 @@ from pathlib import PurePosixPath from xdg import BaseDirectory from beets.plugins import BeetsPlugin -from beets.ui import Subcommand, decargs +from beets.ui import Subcommand from beets.util import bytestring_path, displayable_path, syspath from beets.util.artresizer import ArtResizer @@ -78,7 +78,7 @@ class ThumbnailsPlugin(BeetsPlugin): def process_query(self, lib, opts, args): self.config.set_args(opts) if self._check_local_ok(): - for album in lib.albums(decargs(args)): + for album in lib.albums(args): self.process_album(album) def _check_local_ok(self): @@ -104,21 +104,21 @@ class ThumbnailsPlugin(BeetsPlugin): f"Thumbnails: ArtResizer backend {ArtResizer.shared.method}" f" unexpectedly cannot write image metadata." ) - self._log.debug(f"using {ArtResizer.shared.method} to write metadata") + self._log.debug("using {.shared.method} to write metadata", ArtResizer) uri_getter = GioURI() if not uri_getter.available: uri_getter = PathlibURI() - self._log.debug("using {0.name} to compute URIs", uri_getter) + self._log.debug("using {.name} to compute URIs", uri_getter) self.get_uri = uri_getter.uri return True def process_album(self, album): """Produce thumbnails for the album folder.""" - self._log.debug("generating thumbnail for {0}", album) + self._log.debug("generating thumbnail for {}", album) if not album.artpath: - self._log.info("album {0} has no art", album) + self._log.info("album {} has no art", album) return if self.config["dolphin"]: @@ -127,7 +127,7 @@ class ThumbnailsPlugin(BeetsPlugin): size = ArtResizer.shared.get_size(album.artpath) if not size: self._log.warning( - "problem getting the picture size for {0}", album.artpath + "problem getting the picture size for {.artpath}", album ) return @@ -137,9 +137,9 @@ class ThumbnailsPlugin(BeetsPlugin): wrote &= self.make_cover_thumbnail(album, 128, NORMAL_DIR) if wrote: - self._log.info("wrote thumbnail for {0}", album) + self._log.info("wrote thumbnail for {}", album) else: - self._log.info("nothing to do for {0}", album) + self._log.info("nothing to do for {}", album) def make_cover_thumbnail(self, album, size, target_dir): """Make a thumbnail of given size for `album` and put it in @@ -154,16 +154,16 @@ class ThumbnailsPlugin(BeetsPlugin): ): if self.config["force"]: self._log.debug( - "found a suitable {1}x{1} thumbnail for {0}, " + "found a suitable {0}x{0} thumbnail for {1}, " "forcing regeneration", - album, size, + album, ) else: self._log.debug( - "{1}x{1} thumbnail for {0} exists and is " "recent enough", - album, + "{0}x{0} thumbnail for {1} exists and is recent enough", size, + album, ) return False resized = ArtResizer.shared.resize(size, album.artpath, target) @@ -192,7 +192,7 @@ class ThumbnailsPlugin(BeetsPlugin): ArtResizer.shared.write_metadata(image_path, metadata) except Exception: self._log.exception( - "could not write metadata to {0}", displayable_path(image_path) + "could not write metadata to {}", displayable_path(image_path) ) def make_dolphin_cover_thumbnail(self, album): @@ -202,9 +202,9 @@ class ThumbnailsPlugin(BeetsPlugin): artfile = os.path.split(album.artpath)[1] with open(syspath(outfilename), "w") as f: f.write("[Desktop Entry]\n") - f.write("Icon=./{}".format(artfile.decode("utf-8"))) + f.write(f"Icon=./{artfile.decode('utf-8')}") f.close() - self._log.debug("Wrote file {0}", displayable_path(outfilename)) + self._log.debug("Wrote file {}", displayable_path(outfilename)) class URIGetter: @@ -230,8 +230,7 @@ def copy_c_string(c_string): # This is a pretty dumb way to get a string copy, but it seems to # work. A more surefire way would be to allocate a ctypes buffer and copy # the data with `memcpy` or somesuch. - s = ctypes.cast(c_string, ctypes.c_char_p).value - return b"" + s + return ctypes.cast(c_string, ctypes.c_char_p).value class GioURI(URIGetter): @@ -266,9 +265,7 @@ class GioURI(URIGetter): g_file_ptr = self.libgio.g_file_new_for_path(path) if not g_file_ptr: raise RuntimeError( - "No gfile pointer received for {}".format( - displayable_path(path) - ) + f"No gfile pointer received for {displayable_path(path)}" ) try: diff --git a/beetsplug/titlecase.py b/beetsplug/titlecase.py new file mode 100644 index 000000000..e7003fd28 --- /dev/null +++ b/beetsplug/titlecase.py @@ -0,0 +1,253 @@ +# This file is part of beets. +# Copyright 2025, Henry Oberholtzer +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Apply NYT manual of style title case rules, to text. +Title case logic is derived from the python-titlecase library. +Provides a template function and a tag modification function.""" + +import re +from functools import cached_property +from typing import TypedDict + +from titlecase import titlecase + +from beets import ui +from beets.autotag.hooks import AlbumInfo, Info +from beets.importer import ImportSession, ImportTask +from beets.library import Item +from beets.plugins import BeetsPlugin + +__author__ = "henryoberholtzer@gmail.com" +__version__ = "1.0" + + +class PreservedText(TypedDict): + words: dict[str, str] + phrases: dict[str, re.Pattern[str]] + + +class TitlecasePlugin(BeetsPlugin): + def __init__(self) -> None: + super().__init__() + + self.config.add( + { + "auto": True, + "preserve": [], + "fields": [], + "replace": [], + "separators": [], + "force_lowercase": False, + "small_first_last": True, + "the_artist": True, + "all_caps": False, + "all_lowercase": False, + "after_choice": False, + } + ) + + """ + auto - Automatically apply titlecase to new import metadata. + preserve - Provide a list of strings with specific case requirements. + fields - Fields to apply titlecase to. + replace - List of pairs, first is the target, second is the replacement + separators - Other characters to treat like periods. + force_lowercase - Lowercase the string before titlecase. + small_first_last - If small characters should be cased at the start of strings. + the_artist - If the plugin infers the field to be an artist field + (e.g. the field contains "artist") + It will capitalize a lowercase The, helpful for the artist names + that start with 'The', like 'The Who' or 'The Talking Heads' when + they are not at the start of a string. Superseded by preserved phrases. + all_caps - If the alphabet in the string is all uppercase, do not modify. + all_lowercase - If the alphabet in the string is all lowercase, do not modify. + """ + # Register template function + self.template_funcs["titlecase"] = self.titlecase + + # Register UI subcommands + self._command = ui.Subcommand( + "titlecase", + help="Apply titlecasing to metadata specified in config.", + ) + + if self.config["auto"].get(bool): + if self.config["after_choice"].get(bool): + self.import_stages = [self.imported] + else: + self.register_listener( + "trackinfo_received", self.received_info_handler + ) + self.register_listener( + "albuminfo_received", self.received_info_handler + ) + + @cached_property + def force_lowercase(self) -> bool: + return self.config["force_lowercase"].get(bool) + + @cached_property + def replace(self) -> list[tuple[str, str]]: + return self.config["replace"].as_pairs() + + @cached_property + def the_artist(self) -> bool: + return self.config["the_artist"].get(bool) + + @cached_property + def fields_to_process(self) -> set[str]: + fields = set(self.config["fields"].as_str_seq()) + self._log.debug(f"fields: {', '.join(fields)}") + return fields + + @cached_property + def preserve(self) -> PreservedText: + strings = self.config["preserve"].as_str_seq() + preserved: PreservedText = {"words": {}, "phrases": {}} + for s in strings: + if " " in s: + preserved["phrases"][s] = re.compile( + rf"\b{re.escape(s)}\b", re.IGNORECASE + ) + else: + preserved["words"][s.upper()] = s + return preserved + + @cached_property + def separators(self) -> re.Pattern[str] | None: + if separators := "".join( + dict.fromkeys(self.config["separators"].as_str_seq()) + ): + return re.compile(rf"(.*?[{re.escape(separators)}]+)(\s*)(?=.)") + return None + + @cached_property + def small_first_last(self) -> bool: + return self.config["small_first_last"].get(bool) + + @cached_property + def all_caps(self) -> bool: + return self.config["all_caps"].get(bool) + + @cached_property + def all_lowercase(self) -> bool: + return self.config["all_lowercase"].get(bool) + + @cached_property + def the_artist_regexp(self) -> re.Pattern[str]: + return re.compile(r"\bthe\b") + + def titlecase_callback(self, word, **kwargs) -> str | None: + """Callback function for words to preserve case of.""" + if preserved_word := self.preserve["words"].get(word.upper(), ""): + return preserved_word + return None + + def received_info_handler(self, info: Info): + """Calls titlecase fields for AlbumInfo or TrackInfo + Processes the tracks field for AlbumInfo + """ + self.titlecase_fields(info) + if isinstance(info, AlbumInfo): + for track in info.tracks: + self.titlecase_fields(track) + + def commands(self) -> list[ui.Subcommand]: + def func(lib, opts, args): + write = ui.should_write() + for item in lib.items(args): + self._log.info(f"titlecasing {item.title}:") + self.titlecase_fields(item) + item.store() + if write: + item.try_write() + + self._command.func = func + return [self._command] + + def titlecase_fields(self, item: Item | Info) -> None: + """Applies titlecase to fields, except + those excluded by the default exclusions and the + set exclude lists. + """ + for field in self.fields_to_process: + init_field = getattr(item, field, "") + if init_field: + if isinstance(init_field, list) and isinstance( + init_field[0], str + ): + cased_list: list[str] = [ + self.titlecase(i, field) for i in init_field + ] + if cased_list != init_field: + setattr(item, field, cased_list) + self._log.debug( + f"{field}: {', '.join(init_field)} ->", + f"{', '.join(cased_list)}", + ) + elif isinstance(init_field, str): + cased: str = self.titlecase(init_field, field) + if cased != init_field: + setattr(item, field, cased) + self._log.debug(f"{field}: {init_field} -> {cased}") + else: + self._log.debug(f"{field}: no string present") + else: + self._log.debug(f"{field}: does not exist on {type(item)}") + + def titlecase(self, text: str, field: str = "") -> str: + """Titlecase the given text.""" + # Check we should split this into two substrings. + if self.separators: + if len(splits := self.separators.findall(text)): + split_cased = "".join( + [self.titlecase(s[0], field) + s[1] for s in splits] + ) + # Add on the remaining portion + return split_cased + self.titlecase( + text[len(split_cased) :], field + ) + # Check if A-Z is all uppercase or all lowercase + if self.all_lowercase and text.islower(): + return text + elif self.all_caps and text.isupper(): + return text + # Any necessary replacements go first, mainly punctuation. + titlecased = text.lower() if self.force_lowercase else text + for pair in self.replace: + target, replacement = pair + titlecased = titlecased.replace(target, replacement) + # General titlecase operation + titlecased = titlecase( + titlecased, + small_first_last=self.small_first_last, + callback=self.titlecase_callback, + ) + # Apply "The Artist" feature + if self.the_artist and "artist" in field: + titlecased = self.the_artist_regexp.sub("The", titlecased) + # More complicated phrase replacements. + for phrase, regexp in self.preserve["phrases"].items(): + titlecased = regexp.sub(phrase, titlecased) + return titlecased + + def imported(self, session: ImportSession, task: ImportTask) -> None: + """Import hook for titlecasing on import.""" + for item in task.imported_items(): + try: + self._log.debug(f"titlecasing {item.title}:") + self.titlecase_fields(item) + item.store() + except Exception as e: + self._log.debug(f"titlecasing exception {e}") diff --git a/beetsplug/types.py b/beetsplug/types.py index 9ba3aac66..561ce6828 100644 --- a/beetsplug/types.py +++ b/beetsplug/types.py @@ -15,7 +15,6 @@ from confuse import ConfigValueError -from beets import library from beets.dbcore import types from beets.plugins import BeetsPlugin @@ -42,9 +41,9 @@ class TypesPlugin(BeetsPlugin): elif value.get() == "bool": mytypes[key] = types.BOOLEAN elif value.get() == "date": - mytypes[key] = library.DateType() + mytypes[key] = types.DATE else: raise ConfigValueError( - "unknown type '{}' for the '{}' field".format(value, key) + f"unknown type '{value}' for the '{key}' field" ) return mytypes diff --git a/beetsplug/unimported.py b/beetsplug/unimported.py index b473a346a..20ae195a7 100644 --- a/beetsplug/unimported.py +++ b/beetsplug/unimported.py @@ -34,7 +34,7 @@ class Unimported(BeetsPlugin): def commands(self): def print_unimported(lib, opts, args): ignore_exts = [ - ("." + x).encode() + f".{x}".encode() for x in self.config["ignore_extensions"].as_str_seq() ] ignore_dirs = [ diff --git a/beetsplug/web/__init__.py b/beetsplug/web/__init__.py index 175cec4a9..28bc20152 100644 --- a/beetsplug/web/__init__.py +++ b/beetsplug/web/__init__.py @@ -17,16 +17,29 @@ import base64 import json import os +import typing as t import flask -from flask import g, jsonify +from flask import jsonify from unidecode import unidecode from werkzeug.routing import BaseConverter, PathConverter import beets.library from beets import ui, util +from beets.dbcore.query import PathQuery from beets.plugins import BeetsPlugin +# Type checking hacks + +if t.TYPE_CHECKING: + + class LibraryCtx(flask.ctx._AppCtxGlobals): + lib: beets.library.Library + + g = LibraryCtx() +else: + from flask import g + # Utilities. @@ -76,7 +89,7 @@ def json_generator(items, root, expand=False): representation :returns: generator that yields strings """ - yield '{"%s":[' % root + yield f'{{"{root}":[' first = True for item in items: if first: @@ -231,9 +244,7 @@ def _get_unique_table_field_values(model, field, sort_field): raise KeyError with g.lib.transaction() as tx: rows = tx.query( - "SELECT DISTINCT '{}' FROM '{}' ORDER BY '{}'".format( - field, model._table, sort_field - ) + f"SELECT DISTINCT {field} FROM {model._table} ORDER BY {sort_field}" ) return [row[0] for row in rows] @@ -307,18 +318,8 @@ def all_items(): def item_file(item_id): item = g.lib.get_item(item_id) - # On Windows under Python 2, Flask wants a Unicode path. On Python 3, it - # *always* wants a Unicode path. - if os.name == "nt": - item_path = util.syspath(item.path) - else: - item_path = os.fsdecode(item.path) - + item_path = util.syspath(item.path) base_filename = os.path.basename(item_path) - if isinstance(base_filename, bytes): - unicode_base_filename = util.displayable_path(base_filename) - else: - unicode_base_filename = base_filename try: # Imitate http.server behaviour @@ -326,7 +327,7 @@ def item_file(item_id): except UnicodeError: safe_filename = unidecode(base_filename) else: - safe_filename = unicode_base_filename + safe_filename = base_filename response = flask.send_file( item_path, as_attachment=True, download_name=safe_filename @@ -342,7 +343,7 @@ def item_query(queries): @app.route("/item/path/<everything:path>") def item_at_path(path): - query = beets.library.PathQuery("path", path.encode("utf-8")) + query = PathQuery("path", path.encode("utf-8")) item = g.lib.items(query).get() if item: return flask.jsonify(_rep(item)) @@ -469,7 +470,7 @@ class WebPlugin(BeetsPlugin): ) def func(lib, opts, args): - args = ui.decargs(args) + args = args if args: self.config["host"] = args.pop(0) if args: @@ -485,7 +486,7 @@ class WebPlugin(BeetsPlugin): # Enable CORS if required. if self.config["cors"]: self._log.info( - "Enabling CORS with origin: {0}", self.config["cors"] + "Enabling CORS with origin: {}", self.config["cors"] ) from flask_cors import CORS diff --git a/beetsplug/web/static/beets.js b/beetsplug/web/static/beets.js index 97af70110..0600d09d0 100644 --- a/beetsplug/web/static/beets.js +++ b/beetsplug/web/static/beets.js @@ -241,6 +241,11 @@ var AppView = Backbone.View.extend({ 'pause': _.bind(this.audioPause, this), 'ended': _.bind(this.audioEnded, this) }); + if ("mediaSession" in navigator) { + navigator.mediaSession.setActionHandler("nexttrack", () => { + this.playNext(); + }); + } }, showItems: function(items) { this.shownItems = items; @@ -266,7 +271,9 @@ var AppView = Backbone.View.extend({ playItem: function(item) { var url = 'item/' + item.get('id') + '/file'; $('#player audio').attr('src', url); - $('#player audio').get(0).play(); + $('#player audio').get(0).play().then(() => { + this.updateMediaSession(item); + }); if (this.playingItem != null) { this.playingItem.entryView.setPlaying(false); @@ -275,6 +282,26 @@ var AppView = Backbone.View.extend({ this.playingItem = item; }, + updateMediaSession: function (item) { + if ("mediaSession" in navigator) { + const album_id = item.get("album_id"); + const album_art_url = "album/" + album_id + "/art"; + navigator.mediaSession.metadata = new MediaMetadata({ + title: item.get("title"), + artist: item.get("artist"), + album: item.get("album"), + artwork: [ + { src: album_art_url, sizes: "96x96" }, + { src: album_art_url, sizes: "128x128" }, + { src: album_art_url, sizes: "192x192" }, + { src: album_art_url, sizes: "256x256" }, + { src: album_art_url, sizes: "384x384" }, + { src: album_art_url, sizes: "512x512" }, + ], + }); + } + }, + audioPause: function() { this.playingItem.entryView.setPlaying(false); }, @@ -284,7 +311,9 @@ var AppView = Backbone.View.extend({ }, audioEnded: function() { this.playingItem.entryView.setPlaying(false); - + this.playNext(); + }, + playNext: function(){ // Try to play the next track. var idx = this.shownItems.indexOf(this.playingItem); if (idx == -1) { diff --git a/beetsplug/web/templates/index.html b/beetsplug/web/templates/index.html index 0fdd46d15..fe88a20ad 100644 --- a/beetsplug/web/templates/index.html +++ b/beetsplug/web/templates/index.html @@ -1,14 +1,17 @@ <!DOCTYPE html> -<html> +<html lang="en"> <head> + <meta charset="utf-8"> + <meta name="viewport" content="width=device-width, initial-scale=1"> + <meta name="description" content="the music geek’s media organizer"> + <meta name="keywords" + content="beets, media, music, library, metadata, player, tagger, grep, transcoder, organizer"> <title>beets - - + href="{{ url_for('static', filename='beets.css') }}" + type="text/css"> - + @@ -17,18 +20,14 @@

beets

- - - - +
-
@@ -36,15 +35,14 @@
- -
-
- -
-
- +
+
diff --git a/beetsplug/zero.py b/beetsplug/zero.py index bda4052ab..ab1bfa5ca 100644 --- a/beetsplug/zero.py +++ b/beetsplug/zero.py @@ -19,9 +19,9 @@ import re import confuse from mediafile import MediaFile -from beets.importer import action +from beets.importer import Action from beets.plugins import BeetsPlugin -from beets.ui import Subcommand, decargs, input_yn +from beets.ui import Subcommand, input_yn __author__ = "baobab@heresiarch.info" @@ -41,6 +41,7 @@ class ZeroPlugin(BeetsPlugin): "fields": [], "keep_fields": [], "update_database": False, + "omit_single_disc": False, } ) @@ -75,11 +76,11 @@ class ZeroPlugin(BeetsPlugin): zero_command = Subcommand("zero", help="set fields to null") def zero_fields(lib, opts, args): - if not decargs(args) and not input_yn( + if not args and not input_yn( "Remove fields for all items? (Y/n)", True ): return - for item in lib.items(decargs(args)): + for item in lib.items(args): self.process_item(item) zero_command.func = zero_fields @@ -90,10 +91,10 @@ class ZeroPlugin(BeetsPlugin): Do some sanity checks then compile the regexes. """ if field not in MediaFile.fields(): - self._log.error("invalid field: {0}", field) + self._log.error("invalid field: {}", field) elif field in ("id", "path", "album_id"): self._log.warning( - "field '{0}' ignored, zeroing " "it would be dangerous", field + "field '{}' ignored, zeroing it would be dangerous", field ) else: try: @@ -105,7 +106,7 @@ class ZeroPlugin(BeetsPlugin): self.fields_to_progs[field] = [] def import_task_choice_event(self, session, task): - if task.choice_flag == action.ASIS and not self.warned: + if task.choice_flag == Action.ASIS and not self.warned: self._log.warning('cannot zero in "as-is" mode') self.warned = True # TODO request write in as-is mode @@ -123,9 +124,14 @@ class ZeroPlugin(BeetsPlugin): """ fields_set = False + if "disc" in tags and self.config["omit_single_disc"].get(bool): + if item.disctotal == 1: + fields_set = True + self._log.debug("disc: {.disc} -> None", item) + tags["disc"] = None + if not self.fields_to_progs: - self._log.warning("no fields, nothing to do") - return False + self._log.warning("no fields list to remove") for field, progs in self.fields_to_progs.items(): if field in tags: @@ -137,7 +143,7 @@ class ZeroPlugin(BeetsPlugin): if match: fields_set = True - self._log.debug("{0}: {1} -> None", field, value) + self._log.debug("{}: {} -> None", field, value) tags[field] = None if self.config["update_database"]: item[field] = None diff --git a/codecov.yml b/codecov.yml index c4b333ad3..dbfa484f5 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,5 +1,6 @@ -# Don't post a comment on pull requests. -comment: off +comment: + layout: "header, diff, files" + require_changes: true # Sets non-blocking status checks # https://docs.codecov.com/docs/commit-status#informational @@ -11,7 +12,4 @@ coverage: patch: default: informational: true - changes: no - -github_checks: - annotations: false + changes: false diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 000000000..1f041cc9d --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1,2 @@ +_build +generated/ \ No newline at end of file diff --git a/docs/Makefile b/docs/Makefile index f940dd931..d642530f1 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -6,6 +6,7 @@ SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build +SOURCEDIR = . # When both are available, use Sphinx 2.x for autodoc compatibility. ifeq ($(shell which sphinx-build2 >/dev/null 2>&1 ; echo $$?),0) @@ -39,7 +40,7 @@ help: @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: - -rm -rf $(BUILDDIR)/* + -rm -rf $(BUILDDIR)/* $(SOURCEDIR)/api/generated/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html diff --git a/docs/_templates/autosummary/base.rst b/docs/_templates/autosummary/base.rst new file mode 100644 index 000000000..6ea19ecd9 --- /dev/null +++ b/docs/_templates/autosummary/base.rst @@ -0,0 +1,3 @@ +{{ fullname | escape | underline}} +.. currentmodule:: {{ module }} +.. auto{{ objtype }}:: {{ objname }} diff --git a/docs/_templates/autosummary/class.rst b/docs/_templates/autosummary/class.rst new file mode 100644 index 000000000..3259e9279 --- /dev/null +++ b/docs/_templates/autosummary/class.rst @@ -0,0 +1,38 @@ +{{ name | escape | underline}} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }} + :members: <-- add at least this line + :private-members: + :show-inheritance: <-- plus I want to show inheritance... + :inherited-members: <-- ...and inherited members too + + {% block methods %} + .. automethod:: __init__ + + {% if methods %} + .. rubric:: {{ _('Public methods summary') }} + + .. autosummary:: + {% for item in methods %} + ~{{ name }}.{{ item }} + {%- endfor %} + {% for item in _methods %} + ~{{ name }}.{{ item }} + {%- endfor %} + {% endif %} + {% endblock %} + + .. rubric:: {{ _('Methods definition') }} + +{% if objname in related_typeddicts %} +Related TypedDicts +------------------ + +{% for typeddict in related_typeddicts[objname] %} +.. autotypeddict:: {{ typeddict }} + :show-inheritance: + +{% endfor %} +{% endif %} diff --git a/docs/_templates/autosummary/module.rst b/docs/_templates/autosummary/module.rst new file mode 100644 index 000000000..923bc55f8 --- /dev/null +++ b/docs/_templates/autosummary/module.rst @@ -0,0 +1,11 @@ +{{ fullname | escape | underline}} +{% block modules %} +{% if modules %} +.. rubric:: Modules + +{% for item in modules %} +{{ item }} + +{%- endfor %} +{% endif %} +{% endblock %} diff --git a/docs/api/database.rst b/docs/api/database.rst new file mode 100644 index 000000000..b8c2235a2 --- /dev/null +++ b/docs/api/database.rst @@ -0,0 +1,44 @@ +Database +======== + +.. currentmodule:: beets.library + +Library +------- + +.. autosummary:: + :toctree: generated/ + + Library + +Models +------ + +.. autosummary:: + :toctree: generated/ + + LibModel + Album + Item + +Transactions +------------ + +.. currentmodule:: beets.dbcore.db + +.. autosummary:: + :toctree: generated/ + + Transaction + +Queries +------- + +.. currentmodule:: beets.dbcore.query + +.. autosummary:: + :toctree: generated/ + + Query + FieldQuery + AndQuery diff --git a/docs/api/index.rst b/docs/api/index.rst new file mode 100644 index 000000000..a1ecc4f72 --- /dev/null +++ b/docs/api/index.rst @@ -0,0 +1,10 @@ +API Reference +============= + +.. toctree:: + :maxdepth: 2 + :titlesonly: + + plugins + plugin_utilities + database diff --git a/docs/api/plugin_utilities.rst b/docs/api/plugin_utilities.rst new file mode 100644 index 000000000..8c4355a43 --- /dev/null +++ b/docs/api/plugin_utilities.rst @@ -0,0 +1,16 @@ +Plugin Utilities +================ + +.. currentmodule:: beetsplug._utils.requests + +.. autosummary:: + :toctree: generated/ + + RequestHandler + +.. currentmodule:: beetsplug._utils.musicbrainz + +.. autosummary:: + :toctree: generated/ + + MusicBrainzAPI diff --git a/docs/api/plugins.rst b/docs/api/plugins.rst new file mode 100644 index 000000000..2ce8dbed6 --- /dev/null +++ b/docs/api/plugins.rst @@ -0,0 +1,17 @@ +Plugins +======= + +.. currentmodule:: beets.plugins + +.. autosummary:: + :toctree: generated/ + + BeetsPlugin + +.. currentmodule:: beets.metadata_plugins + +.. autosummary:: + :toctree: generated/ + + MetadataSourcePlugin + SearchApiMetadataSourcePlugin diff --git a/docs/changelog.rst b/docs/changelog.rst index e52f329b0..2ba9f5cbd 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -1,133 +1,524 @@ Changelog ========= -Changelog goes here! Please add your entry to the bottom of one of the lists below! +Changelog goes here! Please add your entry to the bottom of one of the lists +below! Unreleased ---------- -Beets now requires Python 3.9 or later since support for EOL Python 3.8 has +Beets now requires Python 3.10 or later since support for EOL Python 3.9 has been dropped. New features: -* :doc:`plugins/lastgenre`: The new configuration option, ``keep_existing``, - provides more fine-grained control over how pre-populated genre tags are - handled. The ``force`` option now behaves in a more conventional manner. - :bug:`4982` -* :doc:`plugins/lyrics`: Add new configuration option ``dist_thresh`` to - control the maximum allowed distance between the lyrics search result and the - tagged item's artist and title. This is useful for preventing false positives - when fetching lyrics. -* :doc:`plugins/lyrics`: Rewrite lyrics translation functionality to use Azure - AI Translator API and add relevant instructions to the documentation. -* :doc:`plugins/missing`: Add support for all metadata sources. -* :doc:`plugins/mbsync`: Add support for all metadata sorces. +- :doc:`plugins/fetchart`: Added config setting for a fallback cover art image. +- :doc:`plugins/ftintitle`: Added argument for custom feat. words in ftintitle. +- :doc:`plugins/ftintitle`: Added album template value ``album_artist_no_feat``. +- :doc:`plugins/musicbrainz`: Allow selecting tags or genres to populate the + genres tag. +- :doc:`plugins/ftintitle`: Added argument to skip the processing of artist and + album artist are the same in ftintitle. +- :doc:`plugins/play`: Added `$playlist` marker to precisely edit the playlist + filepath into the command calling the player program. +- :doc:`plugins/lastgenre`: For tuning plugin settings ``-vvv`` can be passed to + receive extra verbose logging around last.fm results and how they are + resolved. The ``extended_debug`` config setting and ``--debug`` option have + been removed. +- :doc:`plugins/importsource`: Added new plugin that tracks original import + paths and optionally suggests removing source files when items are removed + from the library. +- :doc:`plugins/mbpseudo`: Add a new `mbpseudo` plugin to proactively receive + MusicBrainz pseudo-releases as recommendations during import. +- Added support for Python 3.13. +- :doc:`/plugins/convert`: ``force`` can be passed to override checks like + no_convert, never_convert_lossy_files, same format, and max_bitrate +- :doc:`plugins/titlecase`: Add the `titlecase` plugin to allow users to resolve + differences in metadata source styles. +- :doc:`plugins/spotify`: Added support for multi-artist albums and tracks, + saving all contributing artists to the respective fields. +- :doc:`plugins/fetchart`: Fix colorized output text. +- :doc:`plugins/ftintitle`: Featured artists are now inserted before brackets + containing remix/edit-related keywords (e.g., "Remix", "Live", "Edit") instead + of being appended at the end. This improves formatting for titles like "Song 1 + (Carol Remix) ft. Bob" which becomes "Song 1 ft. Bob (Carol Remix)". A variety + of brackets are supported and a new ``bracket_keywords`` configuration option + allows customizing the keywords. Setting ``bracket_keywords`` to an empty list + matches any bracket content regardless of keywords. Bug fixes: -* :doc:`plugins/thumbnails`: Fix API call to GIO on big endian architectures - (like s390x) in thumbnails plugin. - :bug:`5708` -* :doc:`plugins/listenbrainz`: Fix rST formatting for URLs of Listenbrainz API Key documentation and config.yaml. -* :doc:`plugins/listenbrainz`: Fix ``UnboundLocalError`` in cases where 'mbid' is not defined. -* :doc:`plugins/fetchart`: Fix fetchart bug where a tempfile could not be deleted due to never being - properly closed. - :bug:`5521` -* :doc:`plugins/lyrics`: LRCLib will fallback to plain lyrics if synced lyrics - are not found and `synced` flag is set to `yes`. -* Synchronise files included in the source distribution with what we used to - have before the introduction of Poetry. - :bug:`5531` - :bug:`5526` -* :ref:`write-cmd`: Fix the issue where for certain files differences in - ``mb_artistid``, ``mb_albumartistid`` and ``albumtype`` fields are shown on - every attempt to write tags. Note: your music needs to be reimported with - ``beet import -LI`` or synchronised with ``beet mbsync`` in order to fix - this! - :bug:`5265` - :bug:`5371` - :bug:`4715` -* :ref:`import-cmd`: Fix ``MemoryError`` and improve performance tagging large - albums by replacing ``munkres`` library with ``lap.lapjv``. - :bug:`5207` -* :ref:`query-sort`: Fix a bug that would raise an exception when sorting on - a non-string field that is not populated in all items. - :bug:`5512` -* :doc:`plugins/lastgenre`: Fix track-level genre handling. Now when an album-level - genre is set already, single tracks don't fall back to the album's genre and - request their own last.fm genre. Also log messages regarding what's been - tagged are now more polished. - :bug:`5582` -* Fix ambiguous column name ``sqlite3.OperationalError`` that occured in album - queries that filtered album track titles, for example ``beet list -a keyword - title:foo``. -* :doc:`plugins/lyrics`: Rewrite lyrics tests using pytest to provide isolated - configuration for each test case. This fixes the issue where some tests - failed because they read developers' local lyrics configuration. - :bug:`5133` -* :doc:`plugins/lyrics`: Do not attempt to search for lyrics if either the - artist or title is missing and ignore ``artist_sort`` value if it is empty. - :bug:`2635` -* :doc:`plugins/lyrics`: Fix fetching lyrics from ``lrclib`` source. If we - cannot find lyrics for a specific album, artist, title combination, the - plugin now tries to search for the artist and title and picks the most - relevant result. Update the default ``sources`` configuration to prioritize - ``lrclib`` over other sources since it returns reliable results quicker than - others. - :bug:`5102` -* :doc:`plugins/lyrics`: Fix the issue with ``genius`` backend not being able - to match lyrics when there is a slight variation in the artist name. - :bug:`4791` -* :doc:`plugins/lyrics`: Fix plugin crash when ``genius`` backend returns empty - lyrics. - :bug:`5583` -* ImageMagick 7.1.1-44 is now supported. +- :doc:`/plugins/spotify`: Updated Spotify API credentials. :bug:`6270` +- :doc:`/plugins/smartplaylist`: Fixed an issue where multiple queries in a + playlist configuration were not preserving their order, causing items to + appear in database order rather than the order specified in the config. + :bug:`6183` +- :doc:`plugins/inline`: Fix recursion error when an inline field definition + shadows a built-in item field (e.g., redefining ``track_no``). Inline + expressions now skip self-references during evaluation to avoid infinite + recursion. :bug:`6115` +- When hardlinking from a symlink (e.g. importing a symlink with hardlinking + enabled), dereference the symlink then hardlink, rather than creating a new + (potentially broken) symlink :bug:`5676` +- :doc:`/plugins/spotify`: The plugin now gracefully handles audio-features API + deprecation (HTTP 403 errors). When a 403 error is encountered from the + audio-features endpoint, the plugin logs a warning once and skips audio + features for all remaining tracks in the session, avoiding unnecessary API + calls and rate limit exhaustion. +- Running `beet --config config -e` now edits `` rather than + the default config path. :bug:`5652` +- :doc:`plugins/lyrics`: Accepts strings for lyrics sources (previously only + accepted a list of strings). :bug:`5962` +- Fix a bug introduced in release 2.4.0 where import from any valid + import-log-file always threw a "none of the paths are importable" error. +- :doc:`/plugins/web`: repair broken `/item/values/…` and `/albums/values/…` + endpoints. Previously, due to single-quotes (ie. string literal) in the SQL + query, the query eg. `GET /item/values/albumartist` would return the literal + "albumartist" instead of a list of unique album artists. +- Sanitize log messages by removing control characters preventing terminal + rendering issues. +- When using :doc:`plugins/fromfilename` together with :doc:`plugins/edit`, + temporary tags extracted from filenames are no longer lost when discarding or + cancelling an edit session during import. :bug:`6104` +- :ref:`update-cmd` :doc:`plugins/edit` fix display formatting of field changes + to clearly show added and removed flexible fields. +- :doc:`plugins/lastgenre`: Fix the issue where last.fm doesn't return any + result in the artist genre stage because "concatenation" words in the artist + name (like "feat.", "+", or "&") prevent it. Using the albumartists list field + and fetching a genre for each artist separately improves the chance of + receiving valid results in that stage. +- :doc:`/plugins/ftintitle`: Fixed artist name splitting to prioritize explicit + featuring tokens (feat, ft, featuring) over generic separators (&, and), + preventing incorrect splits when both are present. + +For plugin developers: + +- A new plugin event, ``album_matched``, is sent when an album that is being + imported has been matched to its metadata and the corresponding distance has + been calculated. +- Added a reusable requests handler which can be used by plugins to make HTTP + requests with built-in retry and backoff logic. It uses beets user-agent and + configures timeouts. See :class:`~beetsplug._utils.requests.RequestHandler` + for documentation. +- Replaced dependency on ``python-musicbrainzngs`` with a lightweight custom + MusicBrainz client implementation and updated relevant plugins accordingly: + + - :doc:`plugins/listenbrainz` + - :doc:`plugins/mbcollection` + - :doc:`plugins/mbpseudo` + - :doc:`plugins/missing` + - :doc:`plugins/musicbrainz` + - :doc:`plugins/parentwork` + + See :class:`~beetsplug._utils.musicbrainz.MusicBrainzAPI` for documentation. For packagers: -* The minimum supported Python version is now 3.9. -* External plugin developers: ``beetsplug/__init__.py`` file can be removed - from your plugin as beets now uses native/implicit namespace package setup. +- The minimum supported Python version is now 3.10. +- An unused dependency on ``mock`` has been removed. Other changes: -* Release workflow: fix the issue where the new release tag is created for the +- The documentation chapter :doc:`dev/paths` has been moved to the "For + Developers" section and revised to reflect current best practices (pathlib + usage). +- Refactored the ``beets/ui/commands.py`` monolithic file (2000+ lines) into + multiple modules within the ``beets/ui/commands`` directory for better + maintainability. +- :doc:`plugins/bpd`: Raise ImportError instead of ValueError when GStreamer is + unavailable, enabling ``importorskip`` usage in pytest setup. +- Finally removed gmusic plugin and all related code/docs as the Google Play + Music service was shut down in 2020. + +2.5.1 (October 14, 2025) +------------------------ + +New features: + +- :doc:`plugins/zero`: Add new configuration option, ``omit_single_disc``, to + allow zeroing the disc number on write for single-disc albums. Defaults to + False. + +Bug fixes: + +- |BeetsPlugin|: load the last plugin class defined in the plugin namespace. + :bug:`6093` + +For packagers: + +- Fixed issue with legacy metadata plugins not copying properties from the base + class. +- Reverted the following: When installing ``beets`` via git or locally the + version string now reflects the current git branch and commit hash. + :bug:`6089` + +Other changes: + +- Removed outdated mailing list contact information from the documentation + :bug:`5462`. +- :doc:`guides/main`: Modernized the *Getting Started* guide with tabbed + sections and dropdown menus. Installation instructions have been streamlined, + and a new subpage now provides additional setup details. +- Documentation: introduced a new role ``conf`` for documenting configuration + options. This role provides consistent formatting and creates references + automatically. Applied it to :doc:`plugins/deezer`, :doc:`plugins/discogs`, + :doc:`plugins/musicbrainz` and :doc:`plugins/spotify` plugins documentation. + +2.5.0 (October 11, 2025) +------------------------ + +New features: + +- :doc:`plugins/lastgenre`: Add a ``--pretend`` option to preview genre changes + without storing or writing them. +- :doc:`plugins/convert`: Add a config option to disable writing metadata to + converted files. +- :doc:`plugins/discogs`: New config option + :conf:`plugins.discogs:strip_disambiguation` to toggle stripping discogs + numeric disambiguation on artist and label fields. +- :doc:`plugins/discogs` Added support for featured artists. :bug:`6038` +- :doc:`plugins/discogs` New configuration option + :conf:`plugins.discogs:featured_string` to change the default string used to + join featured artists. The default string is `Feat.`. +- :doc:`plugins/discogs` Support for `artist_credit` in Discogs tags. + :bug:`3354` +- :doc:`plugins/discogs` Support for name variations and config options to + specify where the variations are written. :bug:`3354` +- :doc:`plugins/web` Support for `nexttrack` keyboard press + +Bug fixes: + +- :doc:`plugins/musicbrainz` Refresh flexible MusicBrainz metadata on reimport + so format changes are applied. :bug:`6036` +- :doc:`plugins/spotify` Ensure ``spotifysync`` keeps popularity, ISRC, and + related fields current even when audio features requests fail. :bug:`6061` +- :doc:`plugins/spotify` Fixed an issue where track matching and lookups could + return incorrect or misleading results when using the Spotify plugin. The + problem occurred primarily when no album was provided or when the album field + was an empty string. :bug:`5189` +- :doc:`plugins/spotify` Removed old and undocumented config options + `artist_field`, `album_field` and `track` that were causing issues with track + matching. :bug:`5189` +- :doc:`plugins/spotify` Fixed an issue where candidate lookup would not find + matches due to query escaping (single vs double quotes). +- :doc:`plugins/discogs` Fixed inconsistency in stripping disambiguation from + artists but not labels. :bug:`5366` +- :doc:`plugins/chroma` :doc:`plugins/bpsync` Fix plugin loading issue caused by + an import of another |BeetsPlugin| class. :bug:`6033` +- :doc:`/plugins/fromfilename`: Fix :bug:`5218`, improve the code (refactor + regexps, allow for more cases, add some logging), add tests. +- Metadata source plugins: Fixed data source penalty calculation that was + incorrectly applied during import matching. The + :conf:`plugins.index:source_weight` configuration option has been renamed to + :conf:`plugins.index:data_source_mismatch_penalty` to better reflect its + purpose. :bug:`6066` + +Other changes: + +- :doc:`plugins/index`: Clarify that musicbrainz must be mentioned if plugin + list modified :bug:`6020` +- :doc:`/faq`: Add check for musicbrainz plugin if auto-tagger can't find a + match :bug:`6020` +- :doc:`guides/tagger`: Section on no matching release found, related to + possibly disabled musicbrainz plugin :bug:`6020` +- Moved ``art.py`` utility module from ``beets`` into ``beetsplug`` namespace as + it is not used in the core beets codebase. It can now be found in + ``beetsplug._utils``. +- Moved ``vfs.py`` utility module from ``beets`` into ``beetsplug`` namespace as + it is not used in the core beets codebase. It can now be found in + ``beetsplug._utils``. +- :class:`beets.metadata_plugin.MetadataSourcePlugin`: Remove discogs specific + disambiguation stripping. +- When installing ``beets`` via git or locally the version string now reflects + the current git branch and commit hash. :bug:`4448` +- :ref:`match-config`: ``match.distance_weights.source`` configuration has been + renamed to ``match.distance_weights.data_source`` for consistency with the + name of the field it refers to. + +For developers and plugin authors: + +- Typing improvements in ``beets/logging.py``: ``getLogger`` now returns + ``BeetsLogger`` when called with a name, or ``RootLogger`` when called without + a name. +- The ``track_distance()`` and ``album_distance()`` methods have been removed + from ``MetadataSourcePlugin``. Distance calculation for data source mismatches + is now handled automatically by the core matching logic. This change + simplifies the plugin architecture and fixes incorrect penalty calculations. + :bug:`6066` +- Metadata source plugins are now registered globally when instantiated, which + makes their handling slightly more efficient. + +2.4.0 (September 13, 2025) +-------------------------- + +New features: + +- :doc:`plugins/musicbrainz`: The MusicBrainz autotagger has been moved to a + separate plugin. The default :ref:`plugins-config` includes ``musicbrainz``, + but if you've customized your ``plugins`` list in your configuration, you'll + need to explicitly add ``musicbrainz`` to continue using this functionality. + Configuration option :conf:`plugins.musicbrainz:enabled` has thus been + deprecated. :bug:`2686` :bug:`4605` +- :doc:`plugins/web`: Show notifications when a track plays. This uses the Media + Session API to customize media notifications. +- :doc:`plugins/discogs`: Add configurable :conf:`plugins.discogs:search_limit` + option to limit the number of results returned by the Discogs metadata search + queries. +- :doc:`plugins/discogs`: Implement ``track_for_id`` method to allow retrieving + singletons by their Discogs ID. :bug:`4661` +- :doc:`plugins/replace`: Add new plugin. +- :doc:`plugins/duplicates`: Add ``--remove`` option, allowing to remove from + the library without deleting media files. :bug:`5832` +- :doc:`plugins/playlist`: Support files with the ``.m3u8`` extension. + :bug:`5829` +- :doc:`plugins/mbcollection`: When getting the user collections, only consider + collections of releases, and ignore collections of other entity types. +- :doc:`plugins/mpdstats`: Add new configuration option, + ``played_ratio_threshold``, to allow configuring the percentage the song must + be played for it to be counted as played instead of skipped. +- :doc:`plugins/web`: Display artist and album as part of the search results. +- :doc:`plugins/spotify` :doc:`plugins/deezer`: Add new configuration option + :conf:`plugins.index:search_limit` to limit the number of results returned by + search queries. + +Bug fixes: + +- :doc:`plugins/musicbrainz`: fix regression where user configured + :conf:`plugins.musicbrainz:extra_tags` have been read incorrectly. :bug:`5788` +- tests: Fix library tests failing on Windows when run from outside ``D:/``. + :bug:`5802` +- Fix an issue where calling ``Library.add`` would cause the ``database_change`` + event to be sent twice, not once. :bug:`5560` +- Fix ``HiddenFileTest`` by using ``bytestring_path()``. +- tests: Fix tests failing without ``langdetect`` (by making it required). + :bug:`5797` +- :doc:`plugins/musicbrainz`: Fix the MusicBrainz search not taking into account + the album/recording aliases +- :doc:`/plugins/spotify`: Fix the issue with that every query to spotify was + ascii encoded. This resulted in bad matches for queries that contained special + e.g. non latin characters as 盗作. If you want to keep the legacy behavior set + the config option ``spotify.search_query_ascii: yes``. :bug:`5699` +- :doc:`plugins/discogs`: Beets will no longer crash if a release has been + deleted, and returns a 404. +- :doc:`plugins/lastgenre`: Fix the issue introduced in Beets 2.3.0 where + non-whitelisted last.fm genres were not canonicalized to parent genres. + :bug:`5930` +- :doc:`plugins/chroma`: AcoustID lookup HTTP requests will now time out after + 10 seconds, rather than hanging the entire import process. +- :doc:`/plugins/deezer`: Fix the issue with that every query to deezer was + ascii encoded. This resulted in bad matches for queries that contained special + e.g. non latin characters as 盗作. If you want to keep the legacy behavior set + the config option ``deezer.search_query_ascii: yes``. :bug:`5860` +- Fixed regression with :doc:`/plugins/listenbrainz` where the plugin could not + be loaded :bug:`5975` +- :doc:`/plugins/fromfilename`: Beets will no longer crash if a track's title + field is missing. + +For packagers: + +- Optional :conf:`plugins.musicbrainz:extra_tags` parameter has been removed + from ``BeetsPlugin.candidates`` method signature since it is never passed in. + If you override this method in your plugin, feel free to remove this + parameter. +- Loosened ``typing_extensions`` dependency in pyproject.toml to apply to every + python version. + +For plugin developers: + +- The ``fetchart`` plugins has seen a few changes to function signatures and + source registration in the process of introducing typings to the code. Custom + art sources might need to be adapted. +- We split the responsibilities of plugins into two base classes + + 1. |BeetsPlugin| is the base class for all plugins, any plugin needs to + inherit from this class. + 2. :class:`beets.metadata_plugin.MetadataSourcePlugin` allows plugins to act + like metadata sources. E.g. used by the MusicBrainz plugin. All plugins in + the beets repo are opted into this class where applicable. If you are + maintaining a plugin that acts like a metadata source, i.e. you expose any + of ``track_for_id``, ``album_for_id``, ``candidates``, ``item_candidates``, + ``album_distance``, ``track_distance`` methods, please update your plugin + to inherit from the new baseclass, as otherwise your plugin will stop + working with the next major release. + +- Several definitions have been moved: + + - ``BLOB_TYPE`` constant, ``PathQuery`` and ``SingletonQuery`` queries have + moved from ``beets.library`` to ``beets.dbcore.query`` module + - ``DateType``, ``DurationType``, ``PathType`` types and ``MusicalKey`` class + have moved from ``beets.library`` to ``beets.dbcore.types`` module. + - ``Distance`` has moved from ``beets.autotag`` to ``beets.autotag.distance`` + module. + - ``beets.autotag.current_metadata`` has been renamed to + ``beets.util.get_most_common_tags``. + + Old imports are now deprecated and will be removed in version ``3.0.0``. + +- ``beets.ui.decargs`` is deprecated and will be removed in version ``3.0.0``. +- Beets is now PEP 561 compliant, which means that it provides type hints for + all public APIs. This allows IDEs to provide better autocompletion and type + checking for downstream users of the beets API. +- ``plugins.find_plugins`` function does not anymore load plugins. You need to + explicitly call ``plugins.load_plugins()`` to load them. +- ``plugins.load_plugins`` function does not anymore accept the list of plugins + to load. Instead, it loads all plugins that are configured by + :ref:`plugins-config` configuration. +- Flexible fields, which can be used by plugins to store additional metadata, + now also support list values. Previously, beets would throw an error while + storing the data in the SQL database due to missing type conversion. + :bug:`5698` + +Other changes: + +- Refactor: Split responsibilities of Plugins into MetaDataPlugins and general + Plugins. +- Documentation structure for auto generated API references changed slightly. + Autogenerated API references are now located in the ``docs/api`` subdirectory. +- :doc:`/plugins/substitute`: Fix rST formatting for example cases so that each + case is shown on separate lines. +- :doc:`/plugins/ftintitle`: Process items whose albumartist is not contained in + the artist field, including compilations using Various Artists as an + albumartist and album tracks by guest artists featuring a third artist. +- Refactored library.py file by splitting it into multiple modules within the + beets/library directory. +- Added a test to check that all plugins can be imported without errors. +- :doc:`/guides/main`: Add instructions to install beets on Void Linux. +- :doc:`plugins/lastgenre`: Refactor loading whitelist and canonicalization + file. :bug:`5979` +- :doc:`plugins/lastgenre`: Updated and streamlined the genre whitelist and + canonicalization tree :bug:`5977` +- UI: Update default ``text_diff_added`` color from **bold red** to **bold + green.** +- UI: Use ``text_diff_added`` and ``text_diff_removed`` colors in **all** diff + comparisons, including case differences. + +2.3.1 (May 14, 2025) +-------------------- + +Bug fixes: + +- :doc:`/reference/pathformat`: Fixed a regression where path legalization + incorrectly removed parts of user-configured path formats that followed a dot + (**.**). :bug:`5771` + +For packagers: + +- Force ``poetry`` version below 2 to avoid it mangling file modification times + in ``sdist`` package. :bug:`5770` + +2.3.0 (May 07, 2025) +-------------------- + +Beets now requires Python 3.9 or later since support for EOL Python 3.8 has been +dropped. + +New features: + +- :doc:`plugins/lastgenre`: The new configuration option, ``keep_existing``, + provides more fine-grained control over how pre-populated genre tags are + handled. The ``force`` option now behaves in a more conventional manner. + :bug:`4982` +- :doc:`plugins/lyrics`: Add new configuration option ``dist_thresh`` to control + the maximum allowed distance between the lyrics search result and the tagged + item's artist and title. This is useful for preventing false positives when + fetching lyrics. +- :doc:`plugins/lyrics`: Rewrite lyrics translation functionality to use Azure + AI Translator API and add relevant instructions to the documentation. +- :doc:`plugins/missing`: Add support for all metadata sources. +- :doc:`plugins/mbsync`: Add support for all metadata sorces. + +Bug fixes: + +- :doc:`plugins/thumbnails`: Fix API call to GIO on big endian architectures + (like s390x) in thumbnails plugin. :bug:`5708` +- :doc:`plugins/listenbrainz`: Fix rST formatting for URLs of Listenbrainz API + Key documentation and config.yaml. +- :doc:`plugins/listenbrainz`: Fix ``UnboundLocalError`` in cases where 'mbid' + is not defined. +- :doc:`plugins/fetchart`: Fix fetchart bug where a tempfile could not be + deleted due to never being properly closed. :bug:`5521` +- :doc:`plugins/lyrics`: LRCLib will fallback to plain lyrics if synced lyrics + are not found and ``synced`` flag is set to ``yes``. +- Synchronise files included in the source distribution with what we used to + have before the introduction of Poetry. :bug:`5531` :bug:`5526` +- :ref:`write-cmd`: Fix the issue where for certain files differences in + ``mb_artistid``, ``mb_albumartistid`` and ``albumtype`` fields are shown on + every attempt to write tags. Note: your music needs to be reimported with + ``beet import -LI`` or synchronised with ``beet mbsync`` in order to fix this! + :bug:`5265` :bug:`5371` :bug:`4715` +- :ref:`import-cmd`: Fix ``MemoryError`` and improve performance tagging large + albums by replacing ``munkres`` library with ``lap.lapjv``. :bug:`5207` +- :ref:`query-sort`: Fix a bug that would raise an exception when sorting on a + non-string field that is not populated in all items. :bug:`5512` +- :doc:`plugins/lastgenre`: Fix track-level genre handling. Now when an + album-level genre is set already, single tracks don't fall back to the album's + genre and request their own last.fm genre. Also log messages regarding what's + been tagged are now more polished. :bug:`5582` +- Fix ambiguous column name ``sqlite3.OperationalError`` that occured in album + queries that filtered album track titles, for example ``beet list -a keyword + title:foo``. +- :doc:`plugins/lyrics`: Rewrite lyrics tests using pytest to provide isolated + configuration for each test case. This fixes the issue where some tests failed + because they read developers' local lyrics configuration. :bug:`5133` +- :doc:`plugins/lyrics`: Do not attempt to search for lyrics if either the + artist or title is missing and ignore ``artist_sort`` value if it is empty. + :bug:`2635` +- :doc:`plugins/lyrics`: Fix fetching lyrics from ``lrclib`` source. If we + cannot find lyrics for a specific album, artist, title combination, the plugin + now tries to search for the artist and title and picks the most relevant + result. Update the default ``sources`` configuration to prioritize ``lrclib`` + over other sources since it returns reliable results quicker than others. + :bug:`5102` +- :doc:`plugins/lyrics`: Fix the issue with ``genius`` backend not being able to + match lyrics when there is a slight variation in the artist name. :bug:`4791` +- :doc:`plugins/lyrics`: Fix plugin crash when ``genius`` backend returns empty + lyrics. :bug:`5583` +- ImageMagick 7.1.1-44 is now supported. +- :doc:`plugins/parentwork`: Only output parentwork changes when running in + verbose mode. + +For packagers: + +- The minimum supported Python version is now 3.9. +- External plugin developers: ``beetsplug/__init__.py`` file can be removed from + your plugin as beets now uses native/implicit namespace package setup. + +Other changes: + +- Release workflow: fix the issue where the new release tag is created for the wrong (outdated) commit. Now the tag is created in the same workflow step - right after committing the version update. - :bug:`5539` -* :doc:`/plugins/smartplaylist`: URL-encode additional item `fields` within generated - EXTM3U playlists instead of JSON-encoding them. -* typehints: `./beets/importer.py` file now has improved typehints. -* typehints: `./beets/plugins.py` file now includes typehints. -* :doc:`plugins/ftintitle`: Optimize the plugin by avoiding unnecessary writes + right after committing the version update. :bug:`5539` +- :doc:`/plugins/smartplaylist`: URL-encode additional item ``fields`` within + generated EXTM3U playlists instead of JSON-encoding them. +- typehints: ``./beets/importer.py`` file now has improved typehints. +- typehints: ``./beets/plugins.py`` file now includes typehints. +- :doc:`plugins/ftintitle`: Optimize the plugin by avoiding unnecessary writes to the database. -* Database models are now serializable with pickle. +- Database models are now serializable with pickle. 2.2.0 (December 02, 2024) ------------------------- New features: -* :doc:`/plugins/substitute`: Allow the replacement string to use capture groups +- :doc:`/plugins/substitute`: Allow the replacement string to use capture groups from the match. It is thus possible to create more general rules, applying to many different artists at once. Bug fixes: -* Check if running python from the Microsoft Store and provide feedback to install - from python.org. - :bug:`5467` -* Fix bug where matcher doesn't consider medium number when importing. This makes - it difficult to import hybrid SACDs and other releases with duplicate tracks. - :bug:`5148` -* Bring back test files and the manual to the source distribution tarball. +- Check if running python from the Microsoft Store and provide feedback to + install from python.org. :bug:`5467` +- Fix bug where matcher doesn't consider medium number when importing. This + makes it difficult to import hybrid SACDs and other releases with duplicate + tracks. :bug:`5148` +- Bring back test files and the manual to the source distribution tarball. :bug:`5513` Other changes: -* Changed `bitesize` label to `good first issue`. Our `contribute`_ page is now - automatically populated with these issues. :bug:`4855` +- Changed ``bitesize`` label to ``good first issue``. Our contribute_ page is + now automatically populated with these issues. :bug:`4855` .. _contribute: https://github.com/beetbox/beets/contribute @@ -136,90 +527,90 @@ Other changes: New features: -* New template function added: ``%capitalize``. Converts the first letter of - the text to uppercase and the rest to lowercase. -* Ability to query albums with track db fields and vice-versa, for example +- New template function added: ``%capitalize``. Converts the first letter of the + text to uppercase and the rest to lowercase. +- Ability to query albums with track db fields and vice-versa, for example ``beet list -a title:something`` or ``beet list artpath:cover``. Consequently album queries involving ``path`` field have been sped up, like ``beet list -a path:/path/``. -* :doc:`plugins/ftintitle`: New ``keep_in_artist`` option for the plugin, which +- :doc:`plugins/ftintitle`: New ``keep_in_artist`` option for the plugin, which allows keeping the "feat." part in the artist metadata while still changing the title. -* :doc:`plugins/autobpm`: Add new configuration option ``beat_track_kwargs`` - which enables adjusting keyword arguments supplied to librosa's - ``beat_track`` function call. -* Beets now uses ``platformdirs`` to determine the default music directory. - This location varies between systems -- for example, users can configure it - on Unix systems via ``user-dirs.dirs(5)``. +- :doc:`plugins/autobpm`: Add new configuration option ``beat_track_kwargs`` + which enables adjusting keyword arguments supplied to librosa's ``beat_track`` + function call. +- Beets now uses ``platformdirs`` to determine the default music directory. This + location varies between systems -- for example, users can configure it on Unix + systems via ``user-dirs.dirs(5)``. Bug fixes: -* :doc:`plugins/ftintitle`: The detection of a "feat. X" part in a song title does not produce any false - positives caused by words like "and" or "with" anymore. :bug:`5441` -* :doc:`plugins/ftintitle`: The detection of a "feat. X" part now also matches such parts if they are in - parentheses or brackets. :bug:`5436` -* Improve naming of temporary files by separating the random part with the file extension. -* Fix the ``auto`` value for the :ref:`reflink` config option. -* Fix lyrics plugin only getting part of the lyrics from ``Genius.com`` :bug:`4815` -* Album flexible fields are now correctly saved. For instance MusicBrainz external links - such as `bandcamp_album_id` will be available on albums in addition to tracks. - For albums already in your library, a re-import is required for the fields to be added. - Such a re-import can be done with, in this case, `beet import -L data_source:=MusicBrainz`. -* :doc:`plugins/autobpm`: Fix the ``TypeError`` where tempo was being returned +- :doc:`plugins/ftintitle`: The detection of a "feat. X" part in a song title + does not produce any false positives caused by words like "and" or "with" + anymore. :bug:`5441` +- :doc:`plugins/ftintitle`: The detection of a "feat. X" part now also matches + such parts if they are in parentheses or brackets. :bug:`5436` +- Improve naming of temporary files by separating the random part with the file + extension. +- Fix the ``auto`` value for the :ref:`reflink` config option. +- Fix lyrics plugin only getting part of the lyrics from ``Genius.com`` + :bug:`4815` +- Album flexible fields are now correctly saved. For instance MusicBrainz + external links such as ``bandcamp_album_id`` will be available on albums in + addition to tracks. For albums already in your library, a re-import is + required for the fields to be added. Such a re-import can be done with, in + this case, ``beet import -L data_source:=MusicBrainz``. +- :doc:`plugins/autobpm`: Fix the ``TypeError`` where tempo was being returned as a numpy array. Update ``librosa`` dependency constraint to prevent similar - issues in the future. - :bug:`5289` -* :doc:`plugins/discogs`: Fix the ``TypeError`` when there is no description. -* Use single quotes in all SQL queries - :bug:`4709` -* :doc:`plugins/lyrics`: Update ``tekstowo`` backend to fetch lyrics directly - since recent updates to their website made it unsearchable. - :bug:`5456` -* :doc:`plugins/convert`: Fixed the convert plugin ``no_convert`` option so - that it no longer treats "and" and "or" queries the same. To maintain - previous behaviour add commas between your query keywords. For help see + issues in the future. :bug:`5289` +- :doc:`plugins/discogs`: Fix the ``TypeError`` when there is no description. +- Use single quotes in all SQL queries :bug:`4709` +- :doc:`plugins/lyrics`: Update ``tekstowo`` backend to fetch lyrics directly + since recent updates to their website made it unsearchable. :bug:`5456` +- :doc:`plugins/convert`: Fixed the convert plugin ``no_convert`` option so that + it no longer treats "and" and "or" queries the same. To maintain previous + behaviour add commas between your query keywords. For help see :ref:`combiningqueries`. -* Fix the ``TypeError`` when :ref:`set_fields` is provided non-string values. :bug:`4840` +- Fix the ``TypeError`` when :ref:`set_fields` is provided non-string values. + :bug:`4840` For packagers: -* The minimum supported Python version is now 3.8. -* The ``beet`` script has been removed from the repository. -* The ``typing_extensions`` is required for Python 3.10 and below. +- The minimum supported Python version is now 3.8. +- The ``beet`` script has been removed from the repository. +- The ``typing_extensions`` is required for Python 3.10 and below. Other changes: -* :doc:`contributing`: The project now uses ``poetry`` for packaging and +- :doc:`contributing`: The project now uses ``poetry`` for packaging and dependency management. This change affects project management and mostly affects beets developers. Please see updates in :ref:`getting-the-source` and :ref:`testing` for more information. -* :doc:`contributing`: Since ``poetry`` now manages local virtual environments, - `tox` has been replaced by a task runner ``poethepoet``. This change affects +- :doc:`contributing`: Since ``poetry`` now manages local virtual environments, + ``tox`` has been replaced by a task runner ``poethepoet``. This change affects beets developers and contributors. Please see updates in the - :ref:`development-tools` section for more details. Type ``poe`` while in - the project directory to see the available commands. -* Installation instructions have been made consistent across plugins + :ref:`development-tools` section for more details. Type ``poe`` while in the + project directory to see the available commands. +- Installation instructions have been made consistent across plugins documentation. Users should simply install ``beets`` with an ``extra`` of the corresponding plugin name in order to install extra dependencies for that plugin. -* GitHub workflows have been reorganised for clarity: style, linting, type and +- GitHub workflows have been reorganised for clarity: style, linting, type and docs checks now live in separate jobs and are named accordingly. -* Added caching for dependency installation in all CI jobs which speeds them up +- Added caching for dependency installation in all CI jobs which speeds them up a bit, especially the tests. -* The linting workflow has been made to run only when Python files or +- The linting workflow has been made to run only when Python files or documentation is changed, and they only check the changed files. When dependencies are updated (``poetry.lock``), then the entire code base is checked. -* The long-deprecated ``beets.util.confit`` module has been removed. This may +- The long-deprecated ``beets.util.confit`` module has been removed. This may cause extremely outdated external plugins to fail to load. -* :doc:`plugins/autobpm`: Add plugin dependencies to ``pyproject.toml`` under +- :doc:`plugins/autobpm`: Add plugin dependencies to ``pyproject.toml`` under the ``autobpm`` extra and update the plugin installation instructions in the - docs. - Since importing the bpm calculation functionality from ``librosa`` takes + docs. Since importing the bpm calculation functionality from ``librosa`` takes around 4 seconds, update the plugin to only do so when it actually needs to - calculate the bpm. Previously this import was being done immediately, so - every ``beet`` invocation was being delayed by a couple of seconds. - :bug:`5185` + calculate the bpm. Previously this import was being done immediately, so every + ``beet`` invocation was being delayed by a couple of seconds. :bug:`5185` 2.0.0 (May 30, 2024) -------------------- @@ -229,911 +620,782 @@ for Python 3.6). Major new features: -* The beets importer UI received a major overhaul. Several new configuration +- The beets importer UI received a major overhaul. Several new configuration options are available for customizing layout and colors: :ref:`ui_options`. :bug:`3721` :bug:`5028` New features: -* :doc:`/plugins/edit`: Prefer editor from ``VISUAL`` environment variable over ``EDITOR``. -* :ref:`config-cmd`: Prefer editor from ``VISUAL`` environment variable over ``EDITOR``. -* :doc:`/plugins/listenbrainz`: Add initial support for importing history and playlists from `ListenBrainz` - :bug:`1719` -* :doc:`plugins/mbsubmit`: add new prompt choices helping further to submit unmatched tracks to MusicBrainz faster. -* :doc:`plugins/spotify`: We now fetch track's ISRC, EAN, and UPC identifiers from Spotify when using the ``spotifysync`` command. - :bug:`4992` -* :doc:`plugins/discogs`: supply a value for the `cover_art_url` attribute, for use by `fetchart`. - :bug:`429` -* :ref:`update-cmd`: added ```-e``` flag for excluding fields from being updated. -* :doc:`/plugins/deezer`: Import rank and other attributes from Deezer during import and add a function to update the rank of existing items. - :bug:`4841` -* resolve transl-tracklisting relations for pseudo releases and merge data with the actual release - :bug:`654` -* Fetchart: Use the right field (`spotify_album_id`) to obtain the Spotify album id - :bug:`4803` -* Prevent reimporting album if it is permanently removed from Spotify +- :doc:`/plugins/edit`: Prefer editor from ``VISUAL`` environment variable over + ``EDITOR``. +- :ref:`config-cmd`: Prefer editor from ``VISUAL`` environment variable over + ``EDITOR``. +- :doc:`/plugins/listenbrainz`: Add initial support for importing history and + playlists from ``ListenBrainz`` :bug:`1719` +- :doc:`plugins/mbsubmit`: add new prompt choices helping further to submit + unmatched tracks to MusicBrainz faster. +- :doc:`plugins/spotify`: We now fetch track's ISRC, EAN, and UPC identifiers + from Spotify when using the ``spotifysync`` command. :bug:`4992` +- :doc:`plugins/discogs`: supply a value for the ``cover_art_url`` attribute, + for use by ``fetchart``. :bug:`429` +- :ref:`update-cmd`: added ``-e`` flag for excluding fields from being updated. +- :doc:`/plugins/deezer`: Import rank and other attributes from Deezer during + import and add a function to update the rank of existing items. :bug:`4841` +- resolve transl-tracklisting relations for pseudo releases and merge data with + the actual release :bug:`654` +- Fetchart: Use the right field (``spotify_album_id``) to obtain the Spotify + album id :bug:`4803` +- Prevent reimporting album if it is permanently removed from Spotify :bug:`4800` -* Added option to use `cover_art_url` as an album art source in the `fetchart` plugin. - :bug:`4707` -* :doc:`/plugins/fetchart`: The plugin can now get album art from `spotify`. -* Added option to specify a URL in the `embedart` plugin. - :bug:`83` -* :ref:`list-cmd` `singleton:true` queries have been made faster -* :ref:`list-cmd` `singleton:1` and `singleton:0` can now alternatively be used in queries, same as `comp` -* --from-logfile now parses log files using a UTF-8 encoding in `beets/beets/ui/commands.py`. - :bug:`4693` -* :doc:`/plugins/bareasc` lookups have been made faster -* :ref:`list-cmd` lookups using the pattern operator `::` have been made faster -* Added additional error handling for `spotify` plugin. - :bug:`4686` -* We now import the remixer field from Musicbrainz into the library. - :bug:`4428` -* :doc:`/plugins/mbsubmit`: Added a new `mbsubmit` command to print track information to be submitted to MusicBrainz after initial import. - :bug:`4455` -* Added `spotify_updated` field to track when the information was last updated. -* We now import and tag the `album` information when importing singletons using Spotify source. - :bug:`4398` -* :doc:`/plugins/spotify`: The plugin now provides an additional command - `spotifysync` that allows getting track popularity and audio features - information from Spotify. - :bug:`4094` -* :doc:`/plugins/spotify`: The plugin now records Spotify-specific IDs in the - `spotify_album_id`, `spotify_artist_id`, and `spotify_track_id` fields. +- Added option to use ``cover_art_url`` as an album art source in the + ``fetchart`` plugin. :bug:`4707` +- :doc:`/plugins/fetchart`: The plugin can now get album art from ``spotify``. +- Added option to specify a URL in the ``embedart`` plugin. :bug:`83` +- :ref:`list-cmd` ``singleton:true`` queries have been made faster +- :ref:`list-cmd` ``singleton:1`` and ``singleton:0`` can now alternatively be + used in queries, same as ``comp`` +- --from-logfile now parses log files using a UTF-8 encoding in + ``beets/beets/ui/commands.py``. :bug:`4693` +- :doc:`/plugins/bareasc` lookups have been made faster +- :ref:`list-cmd` lookups using the pattern operator ``::`` have been made + faster +- Added additional error handling for ``spotify`` plugin. :bug:`4686` +- We now import the remixer field from Musicbrainz into the library. :bug:`4428` +- :doc:`/plugins/mbsubmit`: Added a new ``mbsubmit`` command to print track + information to be submitted to MusicBrainz after initial import. :bug:`4455` +- Added ``spotify_updated`` field to track when the information was last + updated. +- We now import and tag the ``album`` information when importing singletons + using Spotify source. :bug:`4398` +- :doc:`/plugins/spotify`: The plugin now provides an additional command + ``spotifysync`` that allows getting track popularity and audio features + information from Spotify. :bug:`4094` +- :doc:`/plugins/spotify`: The plugin now records Spotify-specific IDs in the + ``spotify_album_id``, ``spotify_artist_id``, and ``spotify_track_id`` fields. :bug:`4348` -* Create the parental directories for database if they do not exist. - :bug:`3808` :bug:`4327` -* :ref:`musicbrainz-config`: a new :ref:`musicbrainz.enabled` option allows disabling - the MusicBrainz metadata source during the autotagging process -* :doc:`/plugins/kodiupdate`: Now supports multiple kodi instances - :bug:`4101` -* Add the item fields ``bitrate_mode``, ``encoder_info`` and ``encoder_settings``. -* Add query prefixes ``=`` and ``~``. -* A new configuration option, :ref:`duplicate_keys`, lets you change which - fields the beets importer uses to identify duplicates. - :bug:`1133` :bug:`4199` -* Add :ref:`exact match ` queries, using the prefixes ``=`` and - ``=~``. - :bug:`4251` -* :doc:`/plugins/discogs`: Permit appending style to genre. -* :doc:`plugins/discogs`: Implement item_candidates for matching singletons. -* :doc:`plugins/discogs`: Check for compliant discogs_client module. -* :doc:`/plugins/convert`: Add a new `auto_keep` option that automatically - converts files but keeps the *originals* in the library. - :bug:`1840` :bug:`4302` -* Added a ``-P`` (or ``--disable-plugins``) flag to specify one/multiple plugin(s) to be - disabled at startup. -* :ref:`import-options`: Add support for re-running the importer on paths in - log files that were created with the ``-l`` (or ``--logfile``) argument. +- Create the parental directories for database if they do not exist. :bug:`3808` + :bug:`4327` +- :ref:`musicbrainz-config`: a new :conf:`plugins.musicbrainz:enabled` option + allows disabling the MusicBrainz metadata source during the autotagging + process +- :doc:`/plugins/kodiupdate`: Now supports multiple kodi instances :bug:`4101` +- Add the item fields ``bitrate_mode``, ``encoder_info`` and + ``encoder_settings``. +- Add query prefixes ``=`` and ``~``. +- A new configuration option, :ref:`duplicate_keys`, lets you change which + fields the beets importer uses to identify duplicates. :bug:`1133` :bug:`4199` +- Add :ref:`exact match ` queries, using the prefixes ``=`` and + ``=~``. :bug:`4251` +- :doc:`/plugins/discogs`: Permit appending style to genre. +- :doc:`plugins/discogs`: Implement item_candidates for matching singletons. +- :doc:`plugins/discogs`: Check for compliant discogs_client module. +- :doc:`/plugins/convert`: Add a new ``auto_keep`` option that automatically + converts files but keeps the *originals* in the library. :bug:`1840` + :bug:`4302` +- Added a ``-P`` (or ``--disable-plugins``) flag to specify one/multiple + plugin(s) to be disabled at startup. +- :ref:`import-options`: Add support for re-running the importer on paths in log + files that were created with the ``-l`` (or ``--logfile``) argument. :bug:`4379` :bug:`4387` -* Preserve mtimes from archives - :bug:`4392` -* Add :ref:`%sunique{} ` template to disambiguate between singletons. +- Preserve mtimes from archives :bug:`4392` +- Add :ref:`%sunique{} ` template to disambiguate between singletons. :bug:`4438` -* Add a new ``import.ignored_alias_types`` config option to allow for - specific alias types to be skipped over when importing items/albums. -* :doc:`/plugins/smartplaylist`: A new ``--pretend`` option lets the user see +- Add a new ``import.ignored_alias_types`` config option to allow for specific + alias types to be skipped over when importing items/albums. +- :doc:`/plugins/smartplaylist`: A new ``--pretend`` option lets the user see what a new or changed smart playlist saved in the config is actually - returning. - :bug:`4573` -* :doc:`/plugins/fromfilename`: Add debug log messages that inform when the + returning. :bug:`4573` +- :doc:`/plugins/fromfilename`: Add debug log messages that inform when the plugin replaced bad (missing) artist, title or tracknumber metadata. :bug:`4561` :bug:`4600` -* :ref:`musicbrainz-config`: MusicBrainz release pages often link to related +- :ref:`musicbrainz-config`: MusicBrainz release pages often link to related metadata sources like Discogs, Bandcamp, Spotify, Deezer and Beatport. When - enabled via the :ref:`musicbrainz.external_ids` options, release ID's will be - extracted from those URL's and imported to the library. - :bug:`4220` -* :doc:`/plugins/convert`: Add support for generating m3u8 playlists together - with converted media files. - :bug:`4373` -* Fetch the ``release_group_title`` field from MusicBrainz. - :bug: `4809` -* :doc:`plugins/discogs`: Add support for applying album information on - singleton imports. - :bug: `4716` -* :doc:`/plugins/smartplaylist`: During explicit runs of the ``splupdate`` + enabled via the :conf:`plugins.musicbrainz:external_ids` options, release ID's + will be extracted from those URL's and imported to the library. :bug:`4220` +- :doc:`/plugins/convert`: Add support for generating m3u8 playlists together + with converted media files. :bug:`4373` +- Fetch the ``release_group_title`` field from MusicBrainz. :bug:`4809` +- :doc:`plugins/discogs`: Add support for applying album information on + singleton imports. :bug:`4716` +- :doc:`/plugins/smartplaylist`: During explicit runs of the ``splupdate`` command, the log message "Creating playlist ..."" is now displayed instead of hidden in the debug log, which states some form of progress through the UI. :bug:`4861` -* :doc:`plugins/subsonicupdate`: Updates are now triggered whenever either the - beets database is changed or a smart playlist is created/updated. - :bug: `4862` -* :doc:`plugins/importfeeds`: Add a new output format allowing to save a - playlist once per import session. - :bug: `4863` -* Make ArtResizer work with :pypi:`PIL`/:pypi:`pillow` 10.0.0 removals. +- :doc:`plugins/subsonicupdate`: Updates are now triggered whenever either the + beets database is changed or a smart playlist is created/updated. :bug:`4862` +- :doc:`plugins/importfeeds`: Add a new output format allowing to save a + playlist once per import session. :bug:`4863` +- Make ArtResizer work with :pypi:`PIL`/:pypi:`pillow` 10.0.0 removals. :bug:`4869` -* A new configuration option, :ref:`duplicate_verbose_prompt`, allows changing - how duplicates are presented during import. - :bug: `4866` -* :doc:`/plugins/embyupdate`: Add handling for private users by adding - ``userid`` config option. - :bug:`4402` -* :doc:`/plugins/substitute`: Add the new plugin `substitute` as an alternative - to the `rewrite` plugin. The main difference between them being that - `rewrite` modifies files' metadata and `substitute` does not. +- A new configuration option, :ref:`duplicate_verbose_prompt`, allows changing + how duplicates are presented during import. :bug:`4866` +- :doc:`/plugins/embyupdate`: Add handling for private users by adding + ``userid`` config option. :bug:`4402` +- :doc:`/plugins/substitute`: Add the new plugin ``substitute`` as an + alternative to the ``rewrite`` plugin. The main difference between them being + that ``rewrite`` modifies files' metadata and ``substitute`` does not. :bug:`2786` -* Add support for ``artists`` and ``albumartists`` multi-valued tags. - :bug:`505` -* :doc:`/plugins/autobpm`: Add the `autobpm` plugin which uses Librosa to - calculate the BPM of the audio. - :bug:`3856` -* :doc:`/plugins/fetchart`: Fix the error with CoverArtArchive where the - `maxwidth` option would not be used to download a pre-sized thumbnail for +- Add support for ``artists`` and ``albumartists`` multi-valued tags. :bug:`505` +- :doc:`/plugins/autobpm`: Add the ``autobpm`` plugin which uses Librosa to + calculate the BPM of the audio. :bug:`3856` +- :doc:`/plugins/fetchart`: Fix the error with CoverArtArchive where the + ``maxwidth`` option would not be used to download a pre-sized thumbnail for release groups, as is already done with releases. -* :doc:`/plugins/fetchart`: Fix the error with CoverArtArchive where no cover - would be found when the `maxwidth` option matches a pre-sized thumbnail size, - but no thumbnail is provided by CAA. We now fallback to the raw image. -* :doc:`/plugins/advancedrewrite`: Add an advanced version of the `rewrite` +- :doc:`/plugins/fetchart`: Fix the error with CoverArtArchive where no cover + would be found when the ``maxwidth`` option matches a pre-sized thumbnail + size, but no thumbnail is provided by CAA. We now fallback to the raw image. +- :doc:`/plugins/advancedrewrite`: Add an advanced version of the ``rewrite`` plugin which allows to replace fields based on a given library query. -* :doc:`/plugins/lyrics`: Add LRCLIB as a new lyrics provider and a new - `synced` option to prefer synced lyrics over plain lyrics. -* :ref:`import-cmd`: Expose import.quiet_fallback as CLI option. -* :ref:`import-cmd`: Expose `import.incremental_skip_later` as CLI option. -* :doc:`/plugins/smartplaylist`: Expose config options as CLI options. -* :doc:`/plugins/smartplaylist`: Add new option `smartplaylist.output`. -* :doc:`/plugins/smartplaylist`: Add new option `smartplaylist.uri_format`. -* Sorted the default configuration file into categories. - :bug:`4987` -* :doc:`/plugins/convert`: Don't treat WAVE (`.wav`) files as lossy anymore - when using the `never_convert_lossy_files` option. They will get transcoded +- :doc:`/plugins/lyrics`: Add LRCLIB as a new lyrics provider and a new + ``synced`` option to prefer synced lyrics over plain lyrics. +- :ref:`import-cmd`: Expose import.quiet_fallback as CLI option. +- :ref:`import-cmd`: Expose ``import.incremental_skip_later`` as CLI option. +- :doc:`/plugins/smartplaylist`: Expose config options as CLI options. +- :doc:`/plugins/smartplaylist`: Add new option ``smartplaylist.output``. +- :doc:`/plugins/smartplaylist`: Add new option ``smartplaylist.uri_format``. +- Sorted the default configuration file into categories. :bug:`4987` +- :doc:`/plugins/convert`: Don't treat WAVE (``.wav``) files as lossy anymore + when using the ``never_convert_lossy_files`` option. They will get transcoded like the other lossless formats. -* Add support for `barcode` field. - :bug:`3172` -* :doc:`/plugins/smartplaylist`: Add new config option `smartplaylist.fields`. -* :doc:`/plugins/fetchart`: Defer source removal config option evaluation to - the point where they are used really, supporting temporary config changes. +- Add support for ``barcode`` field. :bug:`3172` +- :doc:`/plugins/smartplaylist`: Add new config option ``smartplaylist.fields``. +- :doc:`/plugins/fetchart`: Defer source removal config option evaluation to the + point where they are used really, supporting temporary config changes. Bug fixes: -* Improve ListenBrainz error handling. - :bug:`5459` -* :doc:`/plugins/deezer`: Improve requests error handling. -* :doc:`/plugins/lastimport`: Improve error handling in the `process_tracks` function and enable it to be used with other plugins. -* :doc:`/plugins/spotify`: Improve handling of ConnectionError. -* :doc:`/plugins/deezer`: Improve Deezer plugin error handling and set requests timeout to 10 seconds. - :bug:`4983` -* :doc:`/plugins/spotify`: Add bad gateway (502) error handling. -* :doc:`/plugins/spotify`: Add a limit of 3 retries, instead of retrying endlessly when the API is not available. -* Fix a crash when the Spotify API timeouts or does not return a `Retry-After` interval. - :bug:`4942` -* :doc:`/plugins/scrub`: Fixed the import behavior where scrubbed database tags - were restored to newly imported tracks with config settings ``scrub.auto: yes`` - and ``import.write: no``. - :bug:`4326` -* :doc:`/plugins/deezer`: Fixed the error where Deezer plugin would crash if non-Deezer id is passed during import. -* :doc:`/plugins/fetchart`: Fix fetching from Cover Art Archive when the - `maxwidth` option is set to one of the supported Cover Art Archive widths. -* :doc:`/plugins/discogs`: Fix "Discogs plugin replacing Feat. or Ft. with - a comma" by fixing an oversight that removed a functionality from the code - base when the MetadataSourcePlugin abstract class was introduced in PR's - #3335 and #3371. - :bug:`4401` -* :doc:`/plugins/convert`: Set default ``max_bitrate`` value to ``None`` to +- Improve ListenBrainz error handling. :bug:`5459` +- :doc:`/plugins/deezer`: Improve requests error handling. +- :doc:`/plugins/lastimport`: Improve error handling in the ``process_tracks`` + function and enable it to be used with other plugins. +- :doc:`/plugins/spotify`: Improve handling of ConnectionError. +- :doc:`/plugins/deezer`: Improve Deezer plugin error handling and set requests + timeout to 10 seconds. :bug:`4983` +- :doc:`/plugins/spotify`: Add bad gateway (502) error handling. +- :doc:`/plugins/spotify`: Add a limit of 3 retries, instead of retrying + endlessly when the API is not available. +- Fix a crash when the Spotify API timeouts or does not return a ``Retry-After`` + interval. :bug:`4942` +- :doc:`/plugins/scrub`: Fixed the import behavior where scrubbed database tags + were restored to newly imported tracks with config settings ``scrub.auto: + yes`` and ``import.write: no``. :bug:`4326` +- :doc:`/plugins/deezer`: Fixed the error where Deezer plugin would crash if + non-Deezer id is passed during import. +- :doc:`/plugins/fetchart`: Fix fetching from Cover Art Archive when the + ``maxwidth`` option is set to one of the supported Cover Art Archive widths. +- :doc:`/plugins/discogs`: Fix "Discogs plugin replacing Feat. or Ft. with a + comma" by fixing an oversight that removed a functionality from the code base + when the MetadataSourcePlugin abstract class was introduced in PR's #3335 and + #3371. :bug:`4401` +- :doc:`/plugins/convert`: Set default ``max_bitrate`` value to ``None`` to avoid transcoding when this parameter is not set. :bug:`4472` -* :doc:`/plugins/replaygain`: Avoid a crash when errors occur in the analysis - backend. - :bug:`4506` -* We now use Python's defaults for command-line argument encoding, which - should reduce the chance for errors and "file not found" failures when - invoking other command-line tools, especially on Windows. - :bug:`4507` -* We now respect the Spotify API's rate limiting, which avoids crashing when the API reports code 429 (too many requests). - :bug:`4370` -* Fix implicit paths OR queries (e.g. ``beet list /path/ , /other-path/``) - which have previously been returning the entire library. - :bug:`1865` -* The Discogs release ID is now populated correctly to the discogs_albumid - field again (it was no longer working after Discogs changed their release URL - format). - :bug:`4225` -* The autotagger no longer considers all matches without a MusicBrainz ID as - duplicates of each other. - :bug:`4299` -* :doc:`/plugins/convert`: Resize album art when embedding - :bug:`2116` -* :doc:`/plugins/deezer`: Fix auto tagger pagination issues (fetch beyond the +- :doc:`/plugins/replaygain`: Avoid a crash when errors occur in the analysis + backend. :bug:`4506` +- We now use Python's defaults for command-line argument encoding, which should + reduce the chance for errors and "file not found" failures when invoking other + command-line tools, especially on Windows. :bug:`4507` +- We now respect the Spotify API's rate limiting, which avoids crashing when the + API reports code 429 (too many requests). :bug:`4370` +- Fix implicit paths OR queries (e.g. ``beet list /path/ , /other-path/``) which + have previously been returning the entire library. :bug:`1865` +- The Discogs release ID is now populated correctly to the discogs_albumid field + again (it was no longer working after Discogs changed their release URL + format). :bug:`4225` +- The autotagger no longer considers all matches without a MusicBrainz ID as + duplicates of each other. :bug:`4299` +- :doc:`/plugins/convert`: Resize album art when embedding :bug:`2116` +- :doc:`/plugins/deezer`: Fix auto tagger pagination issues (fetch beyond the first 25 tracks of a release). -* :doc:`/plugins/spotify`: Fix auto tagger pagination issues (fetch beyond the +- :doc:`/plugins/spotify`: Fix auto tagger pagination issues (fetch beyond the first 50 tracks of a release). -* :doc:`/plugins/lyrics`: Fix Genius search by using query params instead of body. -* :doc:`/plugins/unimported`: The new ``ignore_subdirectories`` configuration +- :doc:`/plugins/lyrics`: Fix Genius search by using query params instead of + body. +- :doc:`/plugins/unimported`: The new ``ignore_subdirectories`` configuration option added in 1.6.0 now has a default value if it hasn't been set. -* :doc:`/plugins/deezer`: Tolerate missing fields when searching for singleton - tracks. - :bug:`4116` -* :doc:`/plugins/replaygain`: The type of the internal ``r128_track_gain`` and +- :doc:`/plugins/deezer`: Tolerate missing fields when searching for singleton + tracks. :bug:`4116` +- :doc:`/plugins/replaygain`: The type of the internal ``r128_track_gain`` and ``r128_album_gain`` fields was changed from integer to float to fix loss of - precision due to truncation. - :bug:`4169` -* Fix a regression in the previous release that caused a `TypeError` when - moving files across filesystems. - :bug:`4168` -* :doc:`/plugins/convert`: Deleting the original files during conversion no + precision due to truncation. :bug:`4169` +- Fix a regression in the previous release that caused a ``TypeError`` when + moving files across filesystems. :bug:`4168` +- :doc:`/plugins/convert`: Deleting the original files during conversion no longer logs output when the ``quiet`` flag is enabled. -* :doc:`plugins/web`: Fix handling of "query" requests. Previously queries +- :doc:`plugins/web`: Fix handling of "query" requests. Previously queries consisting of more than one token (separated by a slash) always returned an empty result. -* :doc:`/plugins/discogs`: Skip Discogs query on insufficiently tagged files +- :doc:`/plugins/discogs`: Skip Discogs query on insufficiently tagged files (artist and album tags missing) to prevent arbitrary candidate results. :bug:`4227` -* :doc:`plugins/lyrics`: Fixed issues with the Tekstowo.pl and Genius - backends where some non-lyrics content got included in the lyrics -* :doc:`plugins/limit`: Better header formatting to improve index -* :doc:`plugins/replaygain`: Correctly handle the ``overwrite`` config option, - which forces recomputing ReplayGain values on import even for tracks - that already have the tags. -* :doc:`plugins/embedart`: Fix a crash when using recent versions of - ImageMagick and the ``compare_threshold`` option. - :bug:`4272` -* :doc:`plugins/lyrics`: Fixed issue with Genius header being included in lyrics, - added test case of up-to-date Genius html -* :doc:`plugins/importadded`: Fix a bug with recently added reflink import option - that causes a crash when ImportAdded plugin enabled. - :bug:`4389` -* :doc:`plugins/convert`: Fix a bug with the `wma` format alias. -* :doc:`/plugins/web`: Fix get file from item. -* :doc:`/plugins/lastgenre`: Fix a duplicated entry for trip hop in the - default genre list. - :bug:`4510` -* :doc:`plugins/lyrics`: Fixed issue with Tekstowo backend not actually checking - if the found song matches. - :bug:`4406` -* :doc:`plugins/embedart`: Add support for ImageMagick 7.1.1-12 - :bug:`4836` -* :doc:`/plugins/fromfilename`: Fix failed detection of - filename patterns. - :bug:`4561` :bug:`4600` -* Fix issue where deletion of flexible fields on an album doesn't cascade to items - :bug:`4662` -* Fix issue where ``beet write`` continuously retags the ``albumtypes`` metadata +- :doc:`plugins/lyrics`: Fixed issues with the Tekstowo.pl and Genius backends + where some non-lyrics content got included in the lyrics +- :doc:`plugins/limit`: Better header formatting to improve index +- :doc:`plugins/replaygain`: Correctly handle the ``overwrite`` config option, + which forces recomputing ReplayGain values on import even for tracks that + already have the tags. +- :doc:`plugins/embedart`: Fix a crash when using recent versions of ImageMagick + and the ``compare_threshold`` option. :bug:`4272` +- :doc:`plugins/lyrics`: Fixed issue with Genius header being included in + lyrics, added test case of up-to-date Genius html +- :doc:`plugins/importadded`: Fix a bug with recently added reflink import + option that causes a crash when ImportAdded plugin enabled. :bug:`4389` +- :doc:`plugins/convert`: Fix a bug with the ``wma`` format alias. +- :doc:`/plugins/web`: Fix get file from item. +- :doc:`/plugins/lastgenre`: Fix a duplicated entry for trip hop in the default + genre list. :bug:`4510` +- :doc:`plugins/lyrics`: Fixed issue with Tekstowo backend not actually checking + if the found song matches. :bug:`4406` +- :doc:`plugins/embedart`: Add support for ImageMagick 7.1.1-12 :bug:`4836` +- :doc:`/plugins/fromfilename`: Fix failed detection of <track> <title> filename + patterns. :bug:`4561` :bug:`4600` +- Fix issue where deletion of flexible fields on an album doesn't cascade to + items :bug:`4662` +- Fix issue where ``beet write`` continuously retags the ``albumtypes`` metadata field in files. Additionally broken data could have been added to the library when the tag was read from file back into the library using ``beet update``. It is required for all users to **check if such broken data is present in the library**. Following the instructions `described here <https://github.com/beetbox/beets/pull/4582#issuecomment-1445023493>`_, a sanity check and potential fix is easily possible. :bug:`4528` -* Fix updating "data_source" on re-imports and improve logging when flexible - attributes are being re-imported. - :bug:`4726` -* :doc:`/plugins/fetchart`: Correctly select the cover art from fanart.tv with +- Fix updating "data_source" on re-imports and improve logging when flexible + attributes are being re-imported. :bug:`4726` +- :doc:`/plugins/fetchart`: Correctly select the cover art from fanart.tv with the highest number of likes -* :doc:`/plugins/lyrics`: Fix a crash with the Google backend when processing +- :doc:`/plugins/lyrics`: Fix a crash with the Google backend when processing some web pages. :bug:`4875` -* Modifying flexible attributes of albums now cascade to the individual album +- Modifying flexible attributes of albums now cascade to the individual album tracks, similar to how fixed album attributes have been cascading to tracks - already. A new option ``--noinherit/-I`` to :ref:`modify <modify-cmd>` - allows changing this behaviour. - :bug:`4822` -* Fix bug where an interrupted import process poisons the database, causing - a null path that can't be removed. - :bug:`4906` -* :doc:`/plugins/discogs`: Fix bug where empty artist and title fields would - return None instead of an empty list. - :bug:`4973` -* Fix bug regarding displaying tracks that have been changed not being - displayed unless the detail configuration is enabled. -* :doc:`/plugins/web`: Fix range request support, allowing to play large audio/ + already. A new option ``--noinherit/-I`` to :ref:`modify <modify-cmd>` allows + changing this behaviour. :bug:`4822` +- Fix bug where an interrupted import process poisons the database, causing a + null path that can't be removed. :bug:`4906` +- :doc:`/plugins/discogs`: Fix bug where empty artist and title fields would + return None instead of an empty list. :bug:`4973` +- Fix bug regarding displaying tracks that have been changed not being displayed + unless the detail configuration is enabled. +- :doc:`/plugins/web`: Fix range request support, allowing to play large audio/ opus files using e.g. a browser/firefox or gstreamer/mopidy directly. -* Fix bug where `zsh` completion script made assumptions about the specific - variant of `awk` installed and required specific settings for `sqlite3` - and caching in `zsh`. - :bug:`3546` -* Remove unused functions :bug:`5103` -* Fix bug where all media types are reported as the first media type when - importing with MusicBrainz as the data source - :bug:`4947` -* Fix bug where unimported plugin would not ignore children directories of - ignored directories. - :bug:`5130` -* Fix bug where some plugin commands hang indefinitely due to a missing - `requests` timeout. -* Fix cover art resizing logic to support multiple steps of resizing - :bug:`5151` -* :doc:`/plugins/convert`: Fix attempt to convert and perform side-effects if +- Fix bug where ``zsh`` completion script made assumptions about the specific + variant of ``awk`` installed and required specific settings for ``sqlite3`` + and caching in ``zsh``. :bug:`3546` +- Remove unused functions :bug:`5103` +- Fix bug where all media types are reported as the first media type when + importing with MusicBrainz as the data source :bug:`4947` +- Fix bug where unimported plugin would not ignore children directories of + ignored directories. :bug:`5130` +- Fix bug where some plugin commands hang indefinitely due to a missing + ``requests`` timeout. +- Fix cover art resizing logic to support multiple steps of resizing :bug:`5151` +- :doc:`/plugins/convert`: Fix attempt to convert and perform side-effects if library file is not readable. For plugin developers: -* beets now explicitly prevents multiple plugins to define replacement - functions for the same field. When previously defining `template_fields` - for the same field in two plugins, the last loaded plugin would silently - overwrite the function defined by the other plugin. - Now, beets will raise an exception when this happens. - :bug:`5002` -* Allow reuse of some parts of beets' testing components. This may ease the - work for externally developed plugins or related software (e.g. the beets - plugin for Mopidy), if they need to create an in-memory instance of a beets - music library for their tests. +- beets now explicitly prevents multiple plugins to define replacement functions + for the same field. When previously defining ``template_fields`` for the same + field in two plugins, the last loaded plugin would silently overwrite the + function defined by the other plugin. Now, beets will raise an exception when + this happens. :bug:`5002` +- Allow reuse of some parts of beets' testing components. This may ease the work + for externally developed plugins or related software (e.g. the beets plugin + for Mopidy), if they need to create an in-memory instance of a beets music + library for their tests. For packagers: -* As noted above, the minimum Python version is now 3.7. -* We fixed a version for the dependency on the `Confuse`_ library. - :bug:`4167` -* The minimum required version of :pypi:`mediafile` is now 0.9.0. +- As noted above, the minimum Python version is now 3.7. +- We fixed a version for the dependency on the Confuse_ library. :bug:`4167` +- The minimum required version of :pypi:`mediafile` is now 0.9.0. Other changes: -* Add ``sphinx`` and ``sphinx_rtd_theme`` as dependencies for a new ``docs`` extra - :bug:`4643` -* :doc:`/plugins/absubmit`: Deprecate the ``absubmit`` plugin since - AcousticBrainz has stopped accepting new submissions. - :bug:`4627` -* :doc:`/plugins/acousticbrainz`: Deprecate the ``acousticbrainz`` plugin - since the AcousticBrainz project has shut down. - :bug:`4627` -* :doc:`/plugins/limit`: Limit query results to head or tail (``lslimit`` +- Add ``sphinx`` and ``sphinx_rtd_theme`` as dependencies for a new ``docs`` + extra :bug:`4643` +- :doc:`/plugins/absubmit`: Deprecate the ``absubmit`` plugin since + AcousticBrainz has stopped accepting new submissions. :bug:`4627` +- :doc:`/plugins/acousticbrainz`: Deprecate the ``acousticbrainz`` plugin since + the AcousticBrainz project has shut down. :bug:`4627` +- :doc:`/plugins/limit`: Limit query results to head or tail (``lslimit`` command only) -* :doc:`/plugins/fish`: Add ``--output`` option. -* :doc:`/plugins/lyrics`: Remove Musixmatch from default enabled sources as - they are currently blocking requests from the beets user agent. - :bug:`4585` -* :doc:`/faq`: :ref:`multidisc`: Elaborated the multi-disc FAQ :bug:`4806` -* :doc:`/faq`: :ref:`src`: Removed some long lines. -* Refactor the test cases to avoid test smells. +- :doc:`/plugins/fish`: Add ``--output`` option. +- :doc:`/plugins/lyrics`: Remove Musixmatch from default enabled sources as they + are currently blocking requests from the beets user agent. :bug:`4585` +- :doc:`/faq`: :ref:`multidisc`: Elaborated the multi-disc FAQ :bug:`4806` +- :doc:`/faq`: :ref:`src`: Removed some long lines. +- Refactor the test cases to avoid test smells. 1.6.0 (November 27, 2021) ------------------------- -This release is our first experiment with time-based releases! We are aiming -to publish a new release of beets every 3 months. We therefore have a healthy -but not dizzyingly long list of new features and fixes. +This release is our first experiment with time-based releases! We are aiming to +publish a new release of beets every 3 months. We therefore have a healthy but +not dizzyingly long list of new features and fixes. With this release, beets now requires Python 3.6 or later (it removes support for Python 2.7, 3.4, and 3.5). There are also a few other dependency -changes---if you're a maintainer of a beets package for a package manager, -thank you for your ongoing efforts, and please see the list of notes below. +changes---if you're a maintainer of a beets package for a package manager, thank +you for your ongoing efforts, and please see the list of notes below. Major new features: -* When fetching genres from MusicBrainz, we now include genres from the - release group (in addition to the release). We also prioritize genres based - on the number of votes. - Thanks to :user:`aereaux`. -* Primary and secondary release types from MusicBrainz are now stored in a new - ``albumtypes`` field. - Thanks to :user:`edgars-supe`. - :bug:`2200` -* An accompanying new :doc:`/plugins/albumtypes` includes some options for - formatting this new ``albumtypes`` field. - Thanks to :user:`edgars-supe`. -* The :ref:`modify-cmd` and :ref:`import-cmd` can now use - :doc:`/reference/pathformat` formats when setting fields. - For example, you can now do ``beet modify title='$track $title'`` to put - track numbers into songs' titles. - :bug:`488` +- When fetching genres from MusicBrainz, we now include genres from the release + group (in addition to the release). We also prioritize genres based on the + number of votes. Thanks to :user:`aereaux`. +- Primary and secondary release types from MusicBrainz are now stored in a new + ``albumtypes`` field. Thanks to :user:`edgars-supe`. :bug:`2200` +- An accompanying new :doc:`/plugins/albumtypes` includes some options for + formatting this new ``albumtypes`` field. Thanks to :user:`edgars-supe`. +- The :ref:`modify-cmd` and :ref:`import-cmd` can now use + :doc:`/reference/pathformat` formats when setting fields. For example, you can + now do ``beet modify title='$track $title'`` to put track numbers into songs' + titles. :bug:`488` Other new things: -* :doc:`/plugins/permissions`: The plugin now sets cover art permissions to +- :doc:`/plugins/permissions`: The plugin now sets cover art permissions to match the audio file permissions. -* :doc:`/plugins/unimported`: A new configuration option supports excluding +- :doc:`/plugins/unimported`: A new configuration option supports excluding specific subdirectories in library. -* :doc:`/plugins/info`: Add support for an ``--album`` flag. -* :doc:`/plugins/export`: Similarly add support for an ``--album`` flag. -* ``beet move`` now highlights path differences in color (when enabled). -* When moving files and a direct rename of a file is not possible (for - example, when crossing filesystems), beets now copies to a temporary file in - the target folder first and then moves to the destination instead of - directly copying the target path. This gets us closer to always updating - files atomically. - Thanks to :user:`catap`. - :bug:`4060` -* :doc:`/plugins/fetchart`: Add a new option to store cover art as - non-progressive image. This is useful for DAPs that do not support - progressive images. Set ``deinterlace: yes`` in your configuration to enable - this conversion. -* :doc:`/plugins/fetchart`: Add a new option to change the file format of - cover art images. This may also be useful for DAPs that only support some - image formats. -* Support flexible attributes in ``%aunique``. - :bug:`2678` :bug:`3553` -* Make ``%aunique`` faster, especially when using inline fields. - :bug:`4145` +- :doc:`/plugins/info`: Add support for an ``--album`` flag. +- :doc:`/plugins/export`: Similarly add support for an ``--album`` flag. +- ``beet move`` now highlights path differences in color (when enabled). +- When moving files and a direct rename of a file is not possible (for example, + when crossing filesystems), beets now copies to a temporary file in the target + folder first and then moves to the destination instead of directly copying the + target path. This gets us closer to always updating files atomically. Thanks + to :user:`catap`. :bug:`4060` +- :doc:`/plugins/fetchart`: Add a new option to store cover art as + non-progressive image. This is useful for DAPs that do not support progressive + images. Set ``deinterlace: yes`` in your configuration to enable this + conversion. +- :doc:`/plugins/fetchart`: Add a new option to change the file format of cover + art images. This may also be useful for DAPs that only support some image + formats. +- Support flexible attributes in ``%aunique``. :bug:`2678` :bug:`3553` +- Make ``%aunique`` faster, especially when using inline fields. :bug:`4145` Bug fixes: -* :doc:`/plugins/lyrics`: Fix a crash when Beautiful Soup is not installed. +- :doc:`/plugins/lyrics`: Fix a crash when Beautiful Soup is not installed. :bug:`4027` -* :doc:`/plugins/discogs`: Support a new Discogs URL format for IDs. - :bug:`4080` -* :doc:`/plugins/discogs`: Remove built-in rate-limiting because the Discogs - Python library we use now has its own rate-limiting. - :bug:`4108` -* :doc:`/plugins/export`: Fix some duplicated output. -* :doc:`/plugins/aura`: Fix a potential security hole when serving image - files. +- :doc:`/plugins/discogs`: Support a new Discogs URL format for IDs. :bug:`4080` +- :doc:`/plugins/discogs`: Remove built-in rate-limiting because the Discogs + Python library we use now has its own rate-limiting. :bug:`4108` +- :doc:`/plugins/export`: Fix some duplicated output. +- :doc:`/plugins/aura`: Fix a potential security hole when serving image files. :bug:`4160` For plugin developers: -* :py:meth:`beets.library.Item.destination` now accepts a `replacements` +- :py:meth:`beets.library.Item.destination` now accepts a ``replacements`` argument to be used in favor of the default. -* The `pluginload` event is now sent after plugin types and queries are +- The ``pluginload`` event is now sent after plugin types and queries are available, not before. -* A new plugin event, `album_removed`, is called when an album is removed from +- A new plugin event, ``album_removed``, is called when an album is removed from the library (even when its file is not deleted from disk). Here are some notes for packagers: -* As noted above, the minimum Python version is now 3.6. -* We fixed a flaky test, named `test_album_art` in the `test_zero.py` file, - that some distributions had disabled. Disabling this test should no longer - be necessary. - :bug:`4037` :bug:`4038` -* This version of beets no longer depends on the `six`_ library. - :bug:`4030` -* The `gmusic` plugin was removed since Google Play Music has been shut down. - Thus, the optional dependency on `gmusicapi` does not exist anymore. +- As noted above, the minimum Python version is now 3.6. +- We fixed a flaky test, named ``test_album_art`` in the ``test_zero.py`` file, + that some distributions had disabled. Disabling this test should no longer be + necessary. :bug:`4037` :bug:`4038` +- This version of beets no longer depends on the six_ library. :bug:`4030` +- The ``gmusic`` plugin was removed since Google Play Music has been shut down. + Thus, the optional dependency on ``gmusicapi`` does not exist anymore. :bug:`4089` 1.5.0 (August 19, 2021) ----------------------- This long overdue release of beets includes far too many exciting and useful -features than could ever be satisfactorily enumerated. -As a technical detail, it also introduces two new external libraries: -`MediaFile`_ and `Confuse`_ used to be part of beets but are now reusable -dependencies---packagers, please take note. -Finally, this is the last version of beets where we intend to support Python -2.x and 3.5; future releases will soon require Python 3.6. +features than could ever be satisfactorily enumerated. As a technical detail, it +also introduces two new external libraries: MediaFile_ and Confuse_ used to be +part of beets but are now reusable dependencies---packagers, please take note. +Finally, this is the last version of beets where we intend to support Python 2.x +and 3.5; future releases will soon require Python 3.6. -One non-technical change is that we moved our official ``#beets`` home -on IRC from freenode to `Libera.Chat`_. +One non-technical change is that we moved our official ``#beets`` home on IRC +from freenode to Libera.Chat_. -.. _Libera.Chat: https://libera.chat/ +.. _libera.chat: https://libera.chat/ Major new features: -* Fields in queries now fall back to an item's album and check its fields too. +- Fields in queries now fall back to an item's album and check its fields too. Notably, this allows querying items by an album's attribute: in other words, - ``beet list foo:bar`` will not only find tracks with the `foo` attribute; it - will also find tracks *on albums* that have the `foo` attribute. This may be - particularly useful in the :ref:`path-format-config`, which matches - individual items to decide which path to use. - Thanks to :user:`FichteFoll`. - :bug:`2797` :bug:`2988` -* A new :ref:`reflink` config option instructs the importer to create fast, + ``beet list foo:bar`` will not only find tracks with the ``foo`` attribute; it + will also find tracks *on albums* that have the ``foo`` attribute. This may be + particularly useful in the :ref:`path-format-config`, which matches individual + items to decide which path to use. Thanks to :user:`FichteFoll`. :bug:`2797` + :bug:`2988` +- A new :ref:`reflink` config option instructs the importer to create fast, copy-on-write file clones on filesystems that support them. Thanks to :user:`rubdos`. -* A new :doc:`/plugins/unimported` lets you find untracked files in your - library directory. -* The :doc:`/plugins/aura` has arrived! Try out the future of remote music +- A new :doc:`/plugins/unimported` lets you find untracked files in your library + directory. +- The :doc:`/plugins/aura` has arrived! Try out the future of remote music library access today. -* We now fetch information about `works`_ from MusicBrainz. - MusicBrainz matches provide the fields ``work`` (the title), ``mb_workid`` - (the MBID), and ``work_disambig`` (the disambiguation string). - Thanks to :user:`dosoe`. +- We now fetch information about works_ from MusicBrainz. MusicBrainz matches + provide the fields ``work`` (the title), ``mb_workid`` (the MBID), and + ``work_disambig`` (the disambiguation string). Thanks to :user:`dosoe`. :bug:`2580` :bug:`3272` -* A new :doc:`/plugins/parentwork` gets information about the original work, - which is useful for classical music. - Thanks to :user:`dosoe`. - :bug:`2580` :bug:`3279` -* :doc:`/plugins/bpd`: BPD now supports most of the features of version 0.16 - of the MPD protocol. This is enough to get it talking to more complicated - clients like ncmpcpp, but there are still some incompatibilities, largely due - to MPD commands we don't support yet. (Let us know if you find an MPD client - that doesn't get along with BPD!) - :bug:`3214` :bug:`800` -* A new :doc:`/plugins/deezer` can autotag tracks and albums using the - `Deezer`_ database. - Thanks to :user:`rhlahuja`. - :bug:`3355` -* A new :doc:`/plugins/bareasc` provides a new query type: "bare ASCII" - queries that ignore accented characters, treating them as though they - were plain ASCII characters. Use the ``#`` prefix with :ref:`list-cmd` or - other commands. :bug:`3882` -* :doc:`/plugins/fetchart`: The plugin can now get album art from `last.fm`_. +- A new :doc:`/plugins/parentwork` gets information about the original work, + which is useful for classical music. Thanks to :user:`dosoe`. :bug:`2580` + :bug:`3279` +- :doc:`/plugins/bpd`: BPD now supports most of the features of version 0.16 of + the MPD protocol. This is enough to get it talking to more complicated clients + like ncmpcpp, but there are still some incompatibilities, largely due to MPD + commands we don't support yet. (Let us know if you find an MPD client that + doesn't get along with BPD!) :bug:`3214` :bug:`800` +- A new :doc:`/plugins/deezer` can autotag tracks and albums using the Deezer_ + database. Thanks to :user:`rhlahuja`. :bug:`3355` +- A new :doc:`/plugins/bareasc` provides a new query type: "bare ASCII" queries + that ignore accented characters, treating them as though they were plain ASCII + characters. Use the ``#`` prefix with :ref:`list-cmd` or other commands. + :bug:`3882` +- :doc:`/plugins/fetchart`: The plugin can now get album art from last.fm_. :bug:`3530` -* :doc:`/plugins/web`: The API now supports the HTTP `DELETE` and `PATCH` - methods for modifying items. - They are disabled by default; set ``readonly: no`` in your configuration - file to enable modification via the API. +- :doc:`/plugins/web`: The API now supports the HTTP ``DELETE`` and ``PATCH`` + methods for modifying items. They are disabled by default; set ``readonly: + no`` in your configuration file to enable modification via the API. :bug:`3870` Other new things: -* ``beet remove`` now also allows interactive selection of items from the query, +- ``beet remove`` now also allows interactive selection of items from the query, similar to ``beet modify``. -* Enable HTTPS for MusicBrainz by default and add configuration option - `https` for custom servers. See :ref:`musicbrainz-config` for more details. -* :doc:`/plugins/mpdstats`: Add a new `strip_path` option to help build the +- Enable HTTPS for MusicBrainz by default and add configuration option + :conf:`plugins.musicbrainz:https` for custom servers. See + :ref:`musicbrainz-config` for more details. +- :doc:`/plugins/mpdstats`: Add a new ``strip_path`` option to help build the right local path from MPD information. -* :doc:`/plugins/convert`: Conversion can now parallelize conversion jobs on +- :doc:`/plugins/convert`: Conversion can now parallelize conversion jobs on Python 3. -* :doc:`/plugins/lastgenre`: Add a new `title_case` config option to make +- :doc:`/plugins/lastgenre`: Add a new ``title_case`` config option to make title-case formatting optional. -* There's a new message when running ``beet config`` when there's no available - configuration file. - :bug:`3779` -* When importing a duplicate album, the prompt now says "keep all" instead of +- There's a new message when running ``beet config`` when there's no available + configuration file. :bug:`3779` +- When importing a duplicate album, the prompt now says "keep all" instead of "keep both" to reflect that there may be more than two albums involved. :bug:`3569` -* :doc:`/plugins/chroma`: The plugin now updates file metadata after - generating fingerprints through the `submit` command. -* :doc:`/plugins/lastgenre`: Added more heavy metal genres to the built-in - genre filter lists. -* A new :doc:`/plugins/subsonicplaylist` can import playlists from a Subsonic +- :doc:`/plugins/chroma`: The plugin now updates file metadata after generating + fingerprints through the ``submit`` command. +- :doc:`/plugins/lastgenre`: Added more heavy metal genres to the built-in genre + filter lists. +- A new :doc:`/plugins/subsonicplaylist` can import playlists from a Subsonic server. -* :doc:`/plugins/subsonicupdate`: The plugin now automatically chooses between +- :doc:`/plugins/subsonicupdate`: The plugin now automatically chooses between token- and password-based authentication based on the server version. -* A new :ref:`extra_tags` configuration option lets you use more metadata in - MusicBrainz queries to further narrow the search. -* A new :doc:`/plugins/fish` adds `Fish shell`_ tab autocompletion to beets. -* :doc:`plugins/fetchart` and :doc:`plugins/embedart`: Added a new ``quality`` +- A new :conf:`plugins.musicbrainz:extra_tags` configuration option lets you use + more metadata in MusicBrainz queries to further narrow the search. +- A new :doc:`/plugins/fish` adds `Fish shell`_ tab autocompletion to beets. +- :doc:`plugins/fetchart` and :doc:`plugins/embedart`: Added a new ``quality`` option that controls the quality of the image output when the image is resized. -* :doc:`plugins/keyfinder`: Added support for `keyfinder-cli`_. - Thanks to :user:`BrainDamage`. -* :doc:`plugins/fetchart`: Added a new ``high_resolution`` config option to - allow downloading of higher resolution iTunes artwork (at the expense of - file size). - :bug:`3391` -* :doc:`plugins/discogs`: The plugin applies two new fields: `discogs_labelid` - and `discogs_artistid`. - :bug:`3413` -* :doc:`/plugins/export`: Added a new ``-f`` (``--format``) flag, - which can export your data as JSON, JSON lines, CSV, or XML. - Thanks to :user:`austinmm`. +- :doc:`plugins/keyfinder`: Added support for keyfinder-cli_. Thanks to + :user:`BrainDamage`. +- :doc:`plugins/fetchart`: Added a new ``high_resolution`` config option to + allow downloading of higher resolution iTunes artwork (at the expense of file + size). :bug:`3391` +- :doc:`plugins/discogs`: The plugin applies two new fields: ``discogs_labelid`` + and ``discogs_artistid``. :bug:`3413` +- :doc:`/plugins/export`: Added a new ``-f`` (``--format``) flag, which can + export your data as JSON, JSON lines, CSV, or XML. Thanks to :user:`austinmm`. :bug:`3402` -* :doc:`/plugins/convert`: Added a new ``-l`` (``--link``) flag and ``link`` - option as well as the ``-H`` (``--hardlink``) flag and ``hardlink`` - option, which symlink or hardlink files that do not need to - be converted (instead of copying them). - :bug:`2324` -* :doc:`/plugins/replaygain`: The plugin now supports a ``per_disc`` option - that enables calculation of album ReplayGain on disc level instead of album - level. - Thanks to :user:`samuelnilsson`. - :bug:`293` -* :doc:`/plugins/replaygain`: The new ``ffmpeg`` ReplayGain backend supports - ``R128_`` tags. - :bug:`3056` -* :doc:`plugins/replaygain`: A new ``r128_targetlevel`` configuration option +- :doc:`/plugins/convert`: Added a new ``-l`` (``--link``) flag and ``link`` + option as well as the ``-H`` (``--hardlink``) flag and ``hardlink`` option, + which symlink or hardlink files that do not need to be converted (instead of + copying them). :bug:`2324` +- :doc:`/plugins/replaygain`: The plugin now supports a ``per_disc`` option that + enables calculation of album ReplayGain on disc level instead of album level. + Thanks to :user:`samuelnilsson`. :bug:`293` +- :doc:`/plugins/replaygain`: The new ``ffmpeg`` ReplayGain backend supports + ``R128_`` tags. :bug:`3056` +- :doc:`plugins/replaygain`: A new ``r128_targetlevel`` configuration option defines the reference volume for files using ``R128_`` tags. ``targetlevel`` - only configures the reference volume for ``REPLAYGAIN_`` files. - :bug:`3065` -* :doc:`/plugins/discogs`: The plugin now collects the "style" field. - Thanks to :user:`thedevilisinthedetails`. - :bug:`2579` :bug:`3251` -* :doc:`/plugins/absubmit`: By default, the plugin now avoids re-analyzing - files that already have AcousticBrainz data. - There are new ``force`` and ``pretend`` options to help control this new - behavior. - Thanks to :user:`SusannaMaria`. + only configures the reference volume for ``REPLAYGAIN_`` files. :bug:`3065` +- :doc:`/plugins/discogs`: The plugin now collects the "style" field. Thanks to + :user:`thedevilisinthedetails`. :bug:`2579` :bug:`3251` +- :doc:`/plugins/absubmit`: By default, the plugin now avoids re-analyzing files + that already have AcousticBrainz data. There are new ``force`` and ``pretend`` + options to help control this new behavior. Thanks to :user:`SusannaMaria`. :bug:`3318` -* :doc:`/plugins/discogs`: The plugin now also gets genre information and a - new ``discogs_albumid`` field from the Discogs API. - Thanks to :user:`thedevilisinthedetails`. - :bug:`465` :bug:`3322` -* :doc:`/plugins/acousticbrainz`: The plugin now fetches two more additional - fields: ``moods_mirex`` and ``timbre``. - Thanks to :user:`malcops`. - :bug:`2860` -* :doc:`/plugins/playlist` and :doc:`/plugins/smartplaylist`: A new - ``forward_slash`` config option facilitates compatibility with MPD on - Windows. - Thanks to :user:`MartyLake`. - :bug:`3331` :bug:`3334` -* The `data_source` field, which indicates which metadata source was used +- :doc:`/plugins/discogs`: The plugin now also gets genre information and a new + ``discogs_albumid`` field from the Discogs API. Thanks to + :user:`thedevilisinthedetails`. :bug:`465` :bug:`3322` +- :doc:`/plugins/acousticbrainz`: The plugin now fetches two more additional + fields: ``moods_mirex`` and ``timbre``. Thanks to :user:`malcops`. :bug:`2860` +- :doc:`/plugins/playlist` and :doc:`/plugins/smartplaylist`: A new + ``forward_slash`` config option facilitates compatibility with MPD on Windows. + Thanks to :user:`MartyLake`. :bug:`3331` :bug:`3334` +- The ``data_source`` field, which indicates which metadata source was used during an autotagging import, is now also applied as an album-level flexible - attribute. - :bug:`3350` :bug:`1693` -* :doc:`/plugins/beatport`: The plugin now gets the musical key, BPM, and - genre for each track. - :bug:`2080` -* A new :doc:`/plugins/bpsync` can synchronize metadata changes from the + attribute. :bug:`3350` :bug:`1693` +- :doc:`/plugins/beatport`: The plugin now gets the musical key, BPM, and genre + for each track. :bug:`2080` +- A new :doc:`/plugins/bpsync` can synchronize metadata changes from the Beatport database (like the existing :doc:`/plugins/mbsync` for MusicBrainz). -* :doc:`/plugins/hook`: The plugin now treats non-zero exit codes as errors. +- :doc:`/plugins/hook`: The plugin now treats non-zero exit codes as errors. :bug:`3409` -* :doc:`/plugins/subsonicupdate`: A new ``url`` configuration replaces the - older (and now deprecated) separate ``host``, ``port``, and ``contextpath`` - config options. As a consequence, the plugin can now talk to Subsonic over - HTTPS. - Thanks to :user:`jef`. - :bug:`3449` -* :doc:`/plugins/discogs`: The new ``index_tracks`` option enables - incorporation of work names and intra-work divisions into imported track - titles. - Thanks to :user:`cole-miller`. - :bug:`3459` -* :doc:`/plugins/web`: The query API now interprets backslashes as path - separators to support path queries. - Thanks to :user:`nmeum`. - :bug:`3567` -* ``beet import`` now handles tar archives with bzip2 or gzip compression. +- :doc:`/plugins/subsonicupdate`: A new ``url`` configuration replaces the older + (and now deprecated) separate ``host``, ``port``, and ``contextpath`` config + options. As a consequence, the plugin can now talk to Subsonic over HTTPS. + Thanks to :user:`jef`. :bug:`3449` +- :doc:`/plugins/discogs`: The new :conf:`plugins.discogs:index_tracks` option + enables incorporation of work names and intra-work divisions into imported + track titles. Thanks to :user:`cole-miller`. :bug:`3459` +- :doc:`/plugins/web`: The query API now interprets backslashes as path + separators to support path queries. Thanks to :user:`nmeum`. :bug:`3567` +- ``beet import`` now handles tar archives with bzip2 or gzip compression. :bug:`3606` -* ``beet import`` *also* now handles 7z archives, via the `py7zr`_ library. - Thanks to :user:`arogl`. - :bug:`3906` -* :doc:`/plugins/plexupdate`: Added an option to use a secure connection to - Plex server, and to ignore certificate validation errors if necessary. - :bug:`2871` -* :doc:`/plugins/convert`: A new ``delete_originals`` configuration option can - delete the source files after conversion during import. - Thanks to :user:`logan-arens`. - :bug:`2947` -* There is a new ``--plugins`` (or ``-p``) CLI flag to specify a list of - plugins to load. -* A new :ref:`genres` option fetches genre information from MusicBrainz. This - functionality depends on functionality that is currently unreleased in the - `python-musicbrainzngs`_ library: see PR `#266 - <https://github.com/alastair/python-musicbrainzngs/pull/266>`_. - Thanks to :user:`aereaux`. -* :doc:`/plugins/replaygain`: Analysis now happens in parallel using the - ``command`` and ``ffmpeg`` backends. - :bug:`3478` -* :doc:`plugins/replaygain`: The bs1770gain backend is removed. - Thanks to :user:`SamuelCook`. -* Added ``trackdisambig`` which stores the recording disambiguation from - MusicBrainz for each track. - :bug:`1904` -* :doc:`plugins/fetchart`: The new ``max_filesize`` configuration sets a - maximum target image file size. -* :doc:`/plugins/badfiles`: Checkers can now run during import with the +- ``beet import`` *also* now handles 7z archives, via the py7zr_ library. Thanks + to :user:`arogl`. :bug:`3906` +- :doc:`/plugins/plexupdate`: Added an option to use a secure connection to Plex + server, and to ignore certificate validation errors if necessary. :bug:`2871` +- :doc:`/plugins/convert`: A new ``delete_originals`` configuration option can + delete the source files after conversion during import. Thanks to + :user:`logan-arens`. :bug:`2947` +- There is a new ``--plugins`` (or ``-p``) CLI flag to specify a list of plugins + to load. +- A new :conf:`plugins.musicbrainz:genres` option fetches genre information from + MusicBrainz. This functionality depends on functionality that is currently + unreleased in the python-musicbrainzngs_ library: see PR `#266 + <https://github.com/alastair/python-musicbrainzngs/pull/266>`_. Thanks to + :user:`aereaux`. +- :doc:`/plugins/replaygain`: Analysis now happens in parallel using the + ``command`` and ``ffmpeg`` backends. :bug:`3478` +- :doc:`plugins/replaygain`: The bs1770gain backend is removed. Thanks to + :user:`SamuelCook`. +- Added ``trackdisambig`` which stores the recording disambiguation from + MusicBrainz for each track. :bug:`1904` +- :doc:`plugins/fetchart`: The new ``max_filesize`` configuration sets a maximum + target image file size. +- :doc:`/plugins/badfiles`: Checkers can now run during import with the ``check_on_import`` config option. -* :doc:`/plugins/export`: The plugin is now much faster when using the - `--include-keys` option is used. - Thanks to :user:`ssssam`. -* The importer's :ref:`set_fields` option now saves all updated fields to - on-disk metadata. - :bug:`3925` :bug:`3927` -* We now fetch ISRC identifiers from MusicBrainz. - Thanks to :user:`aereaux`. -* :doc:`/plugins/metasync`: The plugin now also fetches the "Date Added" field - from iTunes databases and stores it in the ``itunes_dateadded`` field. - Thanks to :user:`sandersantema`. -* :doc:`/plugins/lyrics`: Added a new Tekstowo.pl lyrics provider. Thanks to +- :doc:`/plugins/export`: The plugin is now much faster when using the + ``--include-keys`` option is used. Thanks to :user:`ssssam`. +- The importer's :ref:`set_fields` option now saves all updated fields to + on-disk metadata. :bug:`3925` :bug:`3927` +- We now fetch ISRC identifiers from MusicBrainz. Thanks to :user:`aereaux`. +- :doc:`/plugins/metasync`: The plugin now also fetches the "Date Added" field + from iTunes databases and stores it in the ``itunes_dateadded`` field. Thanks + to :user:`sandersantema`. +- :doc:`/plugins/lyrics`: Added a new Tekstowo.pl lyrics provider. Thanks to various people for the implementation and for reporting issues with the - initial version. - :bug:`3344` :bug:`3904` :bug:`3905` :bug:`3994` -* ``beet update`` will now confirm that the user still wants to update if - their library folder cannot be found, preventing the user from accidentally - wiping out their beets database. - Thanks to user: `logan-arens`. - :bug:`1934` + initial version. :bug:`3344` :bug:`3904` :bug:`3905` :bug:`3994` +- ``beet update`` will now confirm that the user still wants to update if their + library folder cannot be found, preventing the user from accidentally wiping + out their beets database. Thanks to user: ``logan-arens``. :bug:`1934` Fixes: -* Adapt to breaking changes in Python's ``ast`` module in Python 3.8. -* :doc:`/plugins/beatport`: Fix the assignment of the `genre` field, and - rename `musical_key` to `initial_key`. - :bug:`3387` -* :doc:`/plugins/lyrics`: Fixed the Musixmatch backend for lyrics pages when - lyrics are divided into multiple elements on the webpage, and when the - lyrics are missing. -* :doc:`/plugins/web`: Allow use of the backslash character in regex queries. +- Adapt to breaking changes in Python's ``ast`` module in Python 3.8. +- :doc:`/plugins/beatport`: Fix the assignment of the ``genre`` field, and + rename ``musical_key`` to ``initial_key``. :bug:`3387` +- :doc:`/plugins/lyrics`: Fixed the Musixmatch backend for lyrics pages when + lyrics are divided into multiple elements on the webpage, and when the lyrics + are missing. +- :doc:`/plugins/web`: Allow use of the backslash character in regex queries. :bug:`3867` -* :doc:`/plugins/web`: Fixed a small bug that caused the album art path to be - redacted even when ``include_paths`` option is set. - :bug:`3866` -* :doc:`/plugins/discogs`: Fixed a bug with the ``index_tracks`` option that - sometimes caused the index to be discarded. Also, remove the extra semicolon - that was added when there is no index track. -* :doc:`/plugins/subsonicupdate`: The API client was using the `POST` method - rather the `GET` method. - Also includes better exception handling, response parsing, and tests. -* :doc:`/plugins/the`: Fixed incorrect regex for "the" that matched any - 3-letter combination of the letters t, h, e. - :bug:`3701` -* :doc:`/plugins/fetchart`: Fixed a bug that caused the plugin to not take +- :doc:`/plugins/web`: Fixed a small bug that caused the album art path to be + redacted even when ``include_paths`` option is set. :bug:`3866` +- :doc:`/plugins/discogs`: Fixed a bug with the + :conf:`plugins.discogs:index_tracks` option that sometimes caused the index to + be discarded. Also, remove the extra semicolon that was added when there is no + index track. +- :doc:`/plugins/subsonicupdate`: The API client was using the ``POST`` method + rather the ``GET`` method. Also includes better exception handling, response + parsing, and tests. +- :doc:`/plugins/the`: Fixed incorrect regex for "the" that matched any 3-letter + combination of the letters t, h, e. :bug:`3701` +- :doc:`/plugins/fetchart`: Fixed a bug that caused the plugin to not take environment variables, such as proxy servers, into account when making - requests. - :bug:`3450` -* :doc:`/plugins/fetchart`: Temporary files for fetched album art that fail + requests. :bug:`3450` +- :doc:`/plugins/fetchart`: Temporary files for fetched album art that fail validation are now removed. -* :doc:`/plugins/inline`: In function-style field definitions that refer to - flexible attributes, values could stick around from one function invocation - to the next. This meant that, when displaying a list of objects, later - objects could seem to reuse values from earlier objects when they were - missing a value for a given field. These values are now properly undefined. - :bug:`2406` -* :doc:`/plugins/bpd`: Seeking by fractions of a second now works as intended, - fixing crashes in MPD clients like mpDris2 on seek. - The ``playlistid`` command now works properly in its zero-argument form. - :bug:`3214` -* :doc:`/plugins/replaygain`: Fix a Python 3 incompatibility in the Python - Audio Tools backend. - :bug:`3305` -* :doc:`/plugins/importadded`: Fixed a crash that occurred when the - ``after_write`` signal was emitted. - :bug:`3301` -* :doc:`plugins/replaygain`: Fix the storage format for R128 gain tags. +- :doc:`/plugins/inline`: In function-style field definitions that refer to + flexible attributes, values could stick around from one function invocation to + the next. This meant that, when displaying a list of objects, later objects + could seem to reuse values from earlier objects when they were missing a value + for a given field. These values are now properly undefined. :bug:`2406` +- :doc:`/plugins/bpd`: Seeking by fractions of a second now works as intended, + fixing crashes in MPD clients like mpDris2 on seek. The ``playlistid`` command + now works properly in its zero-argument form. :bug:`3214` +- :doc:`/plugins/replaygain`: Fix a Python 3 incompatibility in the Python Audio + Tools backend. :bug:`3305` +- :doc:`/plugins/importadded`: Fixed a crash that occurred when the + ``after_write`` signal was emitted. :bug:`3301` +- :doc:`plugins/replaygain`: Fix the storage format for R128 gain tags. :bug:`3311` :bug:`3314` -* :doc:`/plugins/discogs`: Fixed a crash that occurred when the master URI - isn't set in the API response. - :bug:`2965` :bug:`3239` -* :doc:`/plugins/spotify`: Fix handling of year-only release dates - returned by the Spotify albums API. - Thanks to :user:`rhlahuja`. - :bug:`3343` -* Fixed a bug that caused the UI to display incorrect track numbers for tracks - with index 0 when the ``per_disc_numbering`` option was set. - :bug:`3346` -* ``none_rec_action`` does not import automatically when ``timid`` is enabled. - Thanks to :user:`RollingStar`. - :bug:`3242` -* Fix a bug that caused a crash when tagging items with the beatport plugin. +- :doc:`/plugins/discogs`: Fixed a crash that occurred when the master URI isn't + set in the API response. :bug:`2965` :bug:`3239` +- :doc:`/plugins/spotify`: Fix handling of year-only release dates returned by + the Spotify albums API. Thanks to :user:`rhlahuja`. :bug:`3343` +- Fixed a bug that caused the UI to display incorrect track numbers for tracks + with index 0 when the ``per_disc_numbering`` option was set. :bug:`3346` +- ``none_rec_action`` does not import automatically when ``timid`` is enabled. + Thanks to :user:`RollingStar`. :bug:`3242` +- Fix a bug that caused a crash when tagging items with the beatport plugin. :bug:`3374` -* ``beet import`` now logs which files are ignored when in debug mode. +- ``beet import`` now logs which files are ignored when in debug mode. :bug:`3764` -* :doc:`/plugins/bpd`: Fix the transition to next track when in consume mode. - Thanks to :user:`aereaux`. - :bug:`3437` -* :doc:`/plugins/lyrics`: Fix a corner-case with Genius lowercase artist names +- :doc:`/plugins/bpd`: Fix the transition to next track when in consume mode. + Thanks to :user:`aereaux`. :bug:`3437` +- :doc:`/plugins/lyrics`: Fix a corner-case with Genius lowercase artist names :bug:`3446` -* :doc:`/plugins/parentwork`: Don't save tracks when nothing has changed. +- :doc:`/plugins/parentwork`: Don't save tracks when nothing has changed. :bug:`3492` -* Added a warning when configuration files defined in the `include` directive - of the configuration file fail to be imported. - :bug:`3498` -* Added normalization to integer values in the database, which should avoid +- Added a warning when configuration files defined in the ``include`` directive + of the configuration file fail to be imported. :bug:`3498` +- Added normalization to integer values in the database, which should avoid problems where fields like ``bpm`` would sometimes store non-integer values. :bug:`762` :bug:`3507` :bug:`3508` -* Fix a crash when querying for null values. - :bug:`3516` :bug:`3517` -* :doc:`/plugins/lyrics`: Tolerate a missing lyrics div in the Genius scraper. - Thanks to :user:`thejli21`. - :bug:`3535` :bug:`3554` -* :doc:`/plugins/lyrics`: Use the artist sort name to search for lyrics, which - can help find matches when the artist name has special characters. - Thanks to :user:`hashhar`. - :bug:`3340` :bug:`3558` -* :doc:`/plugins/replaygain`: Trying to calculate volume gain for an album - consisting of some formats using ``ReplayGain`` and some using ``R128`` - will no longer crash; instead it is skipped and and a message is logged. - The log message has also been rewritten for to improve clarity. - Thanks to :user:`autrimpo`. - :bug:`3533` -* :doc:`/plugins/lyrics`: Adapt the Genius backend to changes in markup to - reduce the scraping failure rate. - :bug:`3535` :bug:`3594` -* :doc:`/plugins/lyrics`: Fix a crash when writing ReST files for a query - without results or fetched lyrics. - :bug:`2805` -* :doc:`/plugins/fetchart`: Attempt to fetch pre-resized thumbnails from Cover +- Fix a crash when querying for null values. :bug:`3516` :bug:`3517` +- :doc:`/plugins/lyrics`: Tolerate a missing lyrics div in the Genius scraper. + Thanks to :user:`thejli21`. :bug:`3535` :bug:`3554` +- :doc:`/plugins/lyrics`: Use the artist sort name to search for lyrics, which + can help find matches when the artist name has special characters. Thanks to + :user:`hashhar`. :bug:`3340` :bug:`3558` +- :doc:`/plugins/replaygain`: Trying to calculate volume gain for an album + consisting of some formats using ``ReplayGain`` and some using ``R128`` will + no longer crash; instead it is skipped and and a message is logged. The log + message has also been rewritten for to improve clarity. Thanks to + :user:`autrimpo`. :bug:`3533` +- :doc:`/plugins/lyrics`: Adapt the Genius backend to changes in markup to + reduce the scraping failure rate. :bug:`3535` :bug:`3594` +- :doc:`/plugins/lyrics`: Fix a crash when writing ReST files for a query + without results or fetched lyrics. :bug:`2805` +- :doc:`/plugins/fetchart`: Attempt to fetch pre-resized thumbnails from Cover Art Archive if the ``maxwidth`` option matches one of the sizes supported by - the Cover Art Archive API. - Thanks to :user:`trolley`. - :bug:`3637` -* :doc:`/plugins/ipfs`: Fix Python 3 compatibility. - Thanks to :user:`musoke`. + the Cover Art Archive API. Thanks to :user:`trolley`. :bug:`3637` +- :doc:`/plugins/ipfs`: Fix Python 3 compatibility. Thanks to :user:`musoke`. :bug:`2554` -* Fix a bug that caused metadata starting with something resembling a drive +- Fix a bug that caused metadata starting with something resembling a drive letter to be incorrectly split into an extra directory after the colon. :bug:`3685` -* :doc:`/plugins/mpdstats`: Don't record a skip when stopping MPD, as MPD keeps - the current track in the queue. - Thanks to :user:`aereaux`. - :bug:`3722` -* String-typed fields are now normalized to string values, avoiding an +- :doc:`/plugins/mpdstats`: Don't record a skip when stopping MPD, as MPD keeps + the current track in the queue. Thanks to :user:`aereaux`. :bug:`3722` +- String-typed fields are now normalized to string values, avoiding an occasional crash when using both the :doc:`/plugins/fetchart` and the - :doc:`/plugins/discogs` together. - :bug:`3773` :bug:`3774` -* Fix a bug causing PIL to generate poor quality JPEGs when resizing artwork. + :doc:`/plugins/discogs` together. :bug:`3773` :bug:`3774` +- Fix a bug causing PIL to generate poor quality JPEGs when resizing artwork. :bug:`3743` -* :doc:`plugins/keyfinder`: Catch output from ``keyfinder-cli`` that is missing key. - :bug:`2242` -* :doc:`plugins/replaygain`: Disable parallel analysis on import by default. +- :doc:`plugins/keyfinder`: Catch output from ``keyfinder-cli`` that is missing + key. :bug:`2242` +- :doc:`plugins/replaygain`: Disable parallel analysis on import by default. :bug:`3819` -* :doc:`/plugins/mpdstats`: Fix Python 2/3 compatibility - :bug:`3798` -* :doc:`/plugins/discogs`: Replace the deprecated official `discogs-client` - library with the community supported `python3-discogs-client`_ library. +- :doc:`/plugins/mpdstats`: Fix Python 2/3 compatibility :bug:`3798` +- :doc:`/plugins/discogs`: Replace the deprecated official ``discogs-client`` + library with the community supported python3-discogs-client_ library. :bug:`3608` -* :doc:`/plugins/chroma`: Fixed submitting AcoustID information for tracks - that already have a fingerprint. - :bug:`3834` -* Allow equals within the value part of the ``--set`` option to the ``beet - import`` command. - :bug:`2984` -* Duplicates can now generate checksums. Thanks :user:`wisp3rwind` - for the pointer to how to solve. Thanks to :user:`arogl`. - :bug:`2873` -* Templates that use ``%ifdef`` now produce the expected behavior when used in - conjunction with non-string fields from the :doc:`/plugins/types`. - :bug:`3852` -* :doc:`/plugins/lyrics`: Fix crashes when a website could not be retrieved, - affecting at least the Genius source. - :bug:`3970` -* :doc:`/plugins/duplicates`: Fix a crash when running the ``dup`` command with - a query that returns no results. - :bug:`3943` -* :doc:`/plugins/beatport`: Fix the default assignment of the musical key. +- :doc:`/plugins/chroma`: Fixed submitting AcoustID information for tracks that + already have a fingerprint. :bug:`3834` +- Allow equals within the value part of the ``--set`` option to the ``beet + import`` command. :bug:`2984` +- Duplicates can now generate checksums. Thanks :user:`wisp3rwind` for the + pointer to how to solve. Thanks to :user:`arogl`. :bug:`2873` +- Templates that use ``%ifdef`` now produce the expected behavior when used in + conjunction with non-string fields from the :doc:`/plugins/types`. :bug:`3852` +- :doc:`/plugins/lyrics`: Fix crashes when a website could not be retrieved, + affecting at least the Genius source. :bug:`3970` +- :doc:`/plugins/duplicates`: Fix a crash when running the ``dup`` command with + a query that returns no results. :bug:`3943` +- :doc:`/plugins/beatport`: Fix the default assignment of the musical key. :bug:`3377` -* :doc:`/plugins/lyrics`: Improved searching on the Genius backend when the - artist contains special characters. - :bug:`3634` -* :doc:`/plugins/parentwork`: Also get the composition date of the parent work, - instead of just the child work. - Thanks to :user:`aereaux`. - :bug:`3650` -* :doc:`/plugins/lyrics`: Fix a bug in the heuristic for detecting valid - lyrics in the Google source. - :bug:`2969` -* :doc:`/plugins/thumbnails`: Fix a crash due to an incorrect string type on - Python 3. - :bug:`3360` -* :doc:`/plugins/fetchart`: The Cover Art Archive source now iterates over - all front images instead of blindly selecting the first one. -* :doc:`/plugins/lyrics`: Removed the LyricWiki source (the site shut down on +- :doc:`/plugins/lyrics`: Improved searching on the Genius backend when the + artist contains special characters. :bug:`3634` +- :doc:`/plugins/parentwork`: Also get the composition date of the parent work, + instead of just the child work. Thanks to :user:`aereaux`. :bug:`3650` +- :doc:`/plugins/lyrics`: Fix a bug in the heuristic for detecting valid lyrics + in the Google source. :bug:`2969` +- :doc:`/plugins/thumbnails`: Fix a crash due to an incorrect string type on + Python 3. :bug:`3360` +- :doc:`/plugins/fetchart`: The Cover Art Archive source now iterates over all + front images instead of blindly selecting the first one. +- :doc:`/plugins/lyrics`: Removed the LyricWiki source (the site shut down on 21/09/2020). -* :doc:`/plugins/subsonicupdate`: The plugin is now functional again. A new - `auth` configuration option is required in the configuration to specify the - flavor of authentication to use. - :bug:`4002` +- :doc:`/plugins/subsonicupdate`: The plugin is now functional again. A new + ``auth`` configuration option is required in the configuration to specify the + flavor of authentication to use. :bug:`4002` For plugin developers: -* `MediaFile`_ has been split into a standalone project. Where you used to do +- MediaFile_ has been split into a standalone project. Where you used to do ``from beets import mediafile``, now just do ``import mediafile``. Beets re-exports MediaFile at the old location for backwards-compatibility, but a deprecation warning is raised if you do this since we might drop this wrapper in a future release. -* Similarly, we've replaced beets' configuration library (previously called - Confit) with a standalone version called `Confuse`_. Where you used to do - ``from beets.util import confit``, now just do ``import confuse``. The code - is almost identical apart from the name change. Again, we'll re-export at the - old location (with a deprecation warning) for backwards compatibility, but - we might stop doing this in a future release. -* ``beets.util.command_output`` now returns a named tuple containing both the +- Similarly, we've replaced beets' configuration library (previously called + Confit) with a standalone version called Confuse_. Where you used to do ``from + beets.util import confit``, now just do ``import confuse``. The code is almost + identical apart from the name change. Again, we'll re-export at the old + location (with a deprecation warning) for backwards compatibility, but we + might stop doing this in a future release. +- ``beets.util.command_output`` now returns a named tuple containing both the standard output and the standard error data instead of just stdout alone. - Client code will need to access the ``stdout`` attribute on the return - value. - Thanks to :user:`zsinskri`. - :bug:`3329` -* There were sporadic failures in ``test.test_player``. Hopefully these are - fixed. If they resurface, please reopen the relevant issue. - :bug:`3309` :bug:`3330` -* The ``beets.plugins.MetadataSourcePlugin`` base class has been added to - simplify development of plugins which query album, track, and search - APIs to provide metadata matches for the importer. Refer to the + Client code will need to access the ``stdout`` attribute on the return value. + Thanks to :user:`zsinskri`. :bug:`3329` +- There were sporadic failures in ``test.test_player``. Hopefully these are + fixed. If they resurface, please reopen the relevant issue. :bug:`3309` + :bug:`3330` +- The ``beets.plugins.MetadataSourcePlugin`` base class has been added to + simplify development of plugins which query album, track, and search APIs to + provide metadata matches for the importer. Refer to the :doc:`/plugins/spotify` and the :doc:`/plugins/deezer` for examples of using - this template class. - :bug:`3355` -* Accessing fields on an `Item` now falls back to the album's - attributes. So, for example, ``item.foo`` will first look for a field `foo` on - `item` and, if it doesn't exist, next tries looking for a field named `foo` - on the album that contains `item`. If you specifically want to access an - item's attributes, use ``Item.get(key, with_album=False)``. :bug:`2988` -* ``Item.keys`` also has a ``with_album`` argument now, defaulting to ``True``. -* A ``revision`` attribute has been added to ``Database``. It is increased on + this template class. :bug:`3355` +- Accessing fields on an ``Item`` now falls back to the album's attributes. So, + for example, ``item.foo`` will first look for a field ``foo`` on ``item`` and, + if it doesn't exist, next tries looking for a field named ``foo`` on the album + that contains ``item``. If you specifically want to access an item's + attributes, use ``Item.get(key, with_album=False)``. :bug:`2988` +- ``Item.keys`` also has a ``with_album`` argument now, defaulting to ``True``. +- A ``revision`` attribute has been added to ``Database``. It is increased on every transaction that mutates it. :bug:`2988` -* The classes ``AlbumInfo`` and ``TrackInfo`` now convey arbitrary attributes +- The classes ``AlbumInfo`` and ``TrackInfo`` now convey arbitrary attributes instead of a fixed, built-in set of field names (which was important to - address :bug:`1547`). - Thanks to :user:`dosoe`. -* Two new events, ``mb_album_extract`` and ``mb_track_extract``, let plugins - add new fields based on MusicBrainz data. Thanks to :user:`dosoe`. + address :bug:`1547`). Thanks to :user:`dosoe`. +- Two new events, ``mb_album_extract`` and ``mb_track_extract``, let plugins add + new fields based on MusicBrainz data. Thanks to :user:`dosoe`. For packagers: -* Beets' library for manipulating media file metadata has now been split to a - standalone project called `MediaFile`_, released as :pypi:`mediafile`. Beets - now depends on this new package. Beets now depends on Mutagen transitively - through MediaFile rather than directly, except in the case of one of beets' - plugins (in particular, the :doc:`/plugins/scrub`). -* Beets' library for configuration has been split into a standalone project - called `Confuse`_, released as :pypi:`confuse`. Beets now depends on this - package. Confuse has existed separately for some time and is used by - unrelated projects, but until now we've been bundling a copy within beets. -* We attempted to fix an unreliable test, so a patch to `skip <https://sources.debian.org/src/beets/1.4.7-2/debian/patches/skip-broken-test/>`_ - or `repair <https://build.opensuse.org/package/view_file/openSUSE:Factory/beets/fix_test_command_line_option_relative_to_working_dir.diff?expand=1>`_ - the test may no longer be necessary. -* This version drops support for Python 3.4. -* We have removed an optional dependency on bs1770gain. +- Beets' library for manipulating media file metadata has now been split to a + standalone project called MediaFile_, released as :pypi:`mediafile`. Beets now + depends on this new package. Beets now depends on Mutagen transitively through + MediaFile rather than directly, except in the case of one of beets' plugins + (in particular, the :doc:`/plugins/scrub`). +- Beets' library for configuration has been split into a standalone project + called Confuse_, released as :pypi:`confuse`. Beets now depends on this + package. Confuse has existed separately for some time and is used by unrelated + projects, but until now we've been bundling a copy within beets. +- We attempted to fix an unreliable test, so a patch to skip-broken-test_ or + repairing_ may no longer be necessary. +- This version drops support for Python 3.4. +- We have removed an optional dependency on bs1770gain. + +.. _confuse: https://github.com/beetbox/confuse + +.. _deezer: https://www.deezer.com + +.. _fish shell: https://fishshell.com/ -.. _Fish shell: https://fishshell.com/ -.. _MediaFile: https://github.com/beetbox/mediafile -.. _Confuse: https://github.com/beetbox/confuse -.. _works: https://musicbrainz.org/doc/Work -.. _Deezer: https://www.deezer.com .. _keyfinder-cli: https://github.com/EvanPurkhiser/keyfinder-cli + .. _last.fm: https://last.fm -.. _python3-discogs-client: https://github.com/joalla/discogs_client + +.. _mediafile: https://github.com/beetbox/mediafile + .. _py7zr: https://pypi.org/project/py7zr/ +.. _python3-discogs-client: https://github.com/joalla/discogs_client + +.. _repairing: https://build.opensuse.org/package/view_file/openSUSE:Factory/beets/fix_test_command_line_option_relative_to_working_dir.diff?expand=1 + +.. _skip-broken-test: https://sources.debian.org/src/beets/1.4.7-2/debian/patches/skip-broken-test/ + +.. _works: https://musicbrainz.org/doc/Work + 1.4.9 (May 30, 2019) -------------------- @@ -1143,754 +1405,644 @@ beets' dependencies in the next version. The new feature is: -* You can use the `NO_COLOR`_ environment variable to disable terminal colors. +- You can use the NO_COLOR_ environment variable to disable terminal colors. :bug:`3273` There are some fixes in this release: -* Fix a regression in the last release that made the image resizer fail to - detect older versions of ImageMagick. - :bug:`3269` -* :doc:`/plugins/gmusic`: The ``oauth_file`` config option now supports more - flexible path values, including ``~`` for the home directory. - :bug:`3270` -* :doc:`/plugins/gmusic`: Fix a crash when using version 12.0.0 or later of - the ``gmusicapi`` module. - :bug:`3270` -* Fix an incompatibility with Python 3.8's AST changes. - :bug:`3278` +- Fix a regression in the last release that made the image resizer fail to + detect older versions of ImageMagick. :bug:`3269` +- ``/plugins/gmusic``: The ``oauth_file`` config option now supports more + flexible path values, including ``~`` for the home directory. :bug:`3270` +- ``/plugins/gmusic``: Fix a crash when using version 12.0.0 or later of the + ``gmusicapi`` module. :bug:`3270` +- Fix an incompatibility with Python 3.8's AST changes. :bug:`3278` Here's a note for packagers: -* ``pathlib`` is now an optional test dependency on Python 3.4+, removing the - need for `a Debian patch <https://sources.debian.org/src/beets/1.4.7-2/debian/patches/pathlib-is-stdlib/>`_. - :bug:`3275` +- ``pathlib`` is now an optional test dependency on Python 3.4+, removing the + need for `Debian pathlib patch`_ :bug:`3275` -.. _NO_COLOR: https://no-color.org +.. _debian pathlib patch: https://sources.debian.org/src/beets/1.4.7-2/debian/patches/pathlib-is-stdlib/ + +.. _no_color: https://no-color.org 1.4.8 (May 16, 2019) -------------------- -This release is far too long in coming, but it's a good one. There is the -usual torrent of new features and a ridiculously long line of fixes, but there -are also some crucial maintenance changes. -We officially support Python 3.7 and 3.8, and some performance optimizations -can (anecdotally) make listing your library more than three times faster than -in the previous version. +This release is far too long in coming, but it's a good one. There is the usual +torrent of new features and a ridiculously long line of fixes, but there are +also some crucial maintenance changes. We officially support Python 3.7 and 3.8, +and some performance optimizations can (anecdotally) make listing your library +more than three times faster than in the previous version. The new core features are: -* A new :ref:`config-aunique` configuration option allows setting default +- A new :ref:`config-aunique` configuration option allows setting default options for the :ref:`aunique` template function. -* The ``albumdisambig`` field no longer includes the MusicBrainz release group +- The ``albumdisambig`` field no longer includes the MusicBrainz release group disambiguation comment. A new ``releasegroupdisambig`` field has been added. :bug:`3024` -* The :ref:`modify-cmd` command now allows resetting fixed attributes. For +- The :ref:`modify-cmd` command now allows resetting fixed attributes. For example, ``beet modify -a artist:beatles artpath!`` resets ``artpath`` - attribute from matching albums back to the default value. - :bug:`2497` -* A new importer option, :ref:`ignore_data_tracks`, lets you skip audio tracks + attribute from matching albums back to the default value. :bug:`2497` +- A new importer option, :ref:`ignore_data_tracks`, lets you skip audio tracks contained in data files. :bug:`3021` There are some new plugins: -* The :doc:`/plugins/playlist` can query the beets library using M3U playlists. - Thanks to :user:`Holzhaus` and :user:`Xenopathic`. - :bug:`123` :bug:`3145` -* The :doc:`/plugins/loadext` allows loading of SQLite extensions, primarily - for use with the ICU SQLite extension for internationalization. - :bug:`3160` :bug:`3226` -* The :doc:`/plugins/subsonicupdate` can automatically update your Subsonic - library. - Thanks to :user:`maffo999`. - :bug:`3001` +- The :doc:`/plugins/playlist` can query the beets library using M3U playlists. + Thanks to :user:`Holzhaus` and :user:`Xenopathic`. :bug:`123` :bug:`3145` +- The :doc:`/plugins/loadext` allows loading of SQLite extensions, primarily for + use with the ICU SQLite extension for internationalization. :bug:`3160` + :bug:`3226` +- The :doc:`/plugins/subsonicupdate` can automatically update your Subsonic + library. Thanks to :user:`maffo999`. :bug:`3001` And many improvements to existing plugins: -* :doc:`/plugins/lastgenre`: Added option ``-A`` to match individual tracks - and singletons. - :bug:`3220` :bug:`3219` -* :doc:`/plugins/play`: The plugin can now emit a UTF-8 BOM, fixing some - issues with foobar2000 and Winamp. - Thanks to :user:`mz2212`. - :bug:`2944` -* :doc:`/plugins/gmusic`: - * Add a new option to automatically upload to Google Play Music library on - track import. - Thanks to :user:`shuaiscott`. - * Add new options for Google Play Music authentication. - Thanks to :user:`thetarkus`. - :bug:`3002` -* :doc:`/plugins/replaygain`: ``albumpeak`` on large collections is calculated - as the average, not the maximum. - :bug:`3008` :bug:`3009` -* :doc:`/plugins/chroma`: - * Now optionally has a bias toward looking up more relevant releases - according to the :ref:`preferred` configuration options. - Thanks to :user:`archer4499`. - :bug:`3017` - * Fingerprint values are now properly stored as strings, which prevents - strange repeated output when running ``beet write``. - Thanks to :user:`Holzhaus`. - :bug:`3097` :bug:`2942` -* :doc:`/plugins/convert`: The plugin now has an ``id3v23`` option that allows - you to override the global ``id3v23`` option. - Thanks to :user:`Holzhaus`. - :bug:`3104` -* :doc:`/plugins/spotify`: - * The plugin now uses OAuth for authentication to the Spotify API. - Thanks to :user:`rhlahuja`. - :bug:`2694` :bug:`3123` - * The plugin now works as an import metadata - provider: you can match tracks and albums using the Spotify database. - Thanks to :user:`rhlahuja`. - :bug:`3123` -* :doc:`/plugins/ipfs`: The plugin now supports a ``nocopy`` option which - passes that flag to ipfs. - Thanks to :user:`wildthyme`. -* :doc:`/plugins/discogs`: The plugin now has rate limiting for the Discogs API. - :bug:`3081` -* :doc:`/plugins/mpdstats`, :doc:`/plugins/mpdupdate`: These plugins now use - the ``MPD_PORT`` environment variable if no port is specified in the - configuration file. - :bug:`3223` -* :doc:`/plugins/bpd`: - * MPD protocol commands ``consume`` and ``single`` are now supported along - with updated semantics for ``repeat`` and ``previous`` and new fields for - ``status``. The bpd server now understands and ignores some additional - commands. - :bug:`3200` :bug:`800` - * MPD protocol command ``idle`` is now supported, allowing the MPD version - to be bumped to 0.14. - :bug:`3205` :bug:`800` - * MPD protocol command ``decoders`` is now supported. - :bug:`3222` - * The plugin now uses the main beets logging system. - The special-purpose ``--debug`` flag has been removed. - Thanks to :user:`arcresu`. - :bug:`3196` -* :doc:`/plugins/mbsync`: The plugin no longer queries MusicBrainz when either - the ``mb_albumid`` or ``mb_trackid`` field is invalid. - See also the discussion on `Google Groups`_ - Thanks to :user:`arogl`. -* :doc:`/plugins/export`: The plugin now also exports ``path`` field if the user - explicitly specifies it with ``-i`` parameter. This only works when exporting - library fields. - :bug:`3084` -* :doc:`/plugins/acousticbrainz`: The plugin now declares types for all its - fields, which enables easier querying and avoids a problem where very small - numbers would be stored as strings. - Thanks to :user:`rain0r`. - :bug:`2790` :bug:`3238` +- :doc:`/plugins/lastgenre`: Added option ``-A`` to match individual tracks and + singletons. :bug:`3220` :bug:`3219` +- :doc:`/plugins/play`: The plugin can now emit a UTF-8 BOM, fixing some issues + with foobar2000 and Winamp. Thanks to :user:`mz2212`. :bug:`2944` +- ``/plugins/gmusic``: -.. _Google Groups: https://groups.google.com/forum/#!searchin/beets-users/mbsync|sort:date/beets-users/iwCF6bNdh9A/i1xl4Gx8BQAJ + - Add a new option to automatically upload to Google Play Music library on + track import. Thanks to :user:`shuaiscott`. + - Add new options for Google Play Music authentication. Thanks to + :user:`thetarkus`. :bug:`3002` + +- :doc:`/plugins/replaygain`: ``albumpeak`` on large collections is calculated + as the average, not the maximum. :bug:`3008` :bug:`3009` +- :doc:`/plugins/chroma`: + + - Now optionally has a bias toward looking up more relevant releases according + to the :ref:`preferred` configuration options. Thanks to :user:`archer4499`. + :bug:`3017` + - Fingerprint values are now properly stored as strings, which prevents + strange repeated output when running ``beet write``. Thanks to + :user:`Holzhaus`. :bug:`3097` :bug:`2942` + +- :doc:`/plugins/convert`: The plugin now has an ``id3v23`` option that allows + you to override the global ``id3v23`` option. Thanks to :user:`Holzhaus`. + :bug:`3104` +- :doc:`/plugins/spotify`: + + - The plugin now uses OAuth for authentication to the Spotify API. Thanks to + :user:`rhlahuja`. :bug:`2694` :bug:`3123` + - The plugin now works as an import metadata provider: you can match tracks + and albums using the Spotify database. Thanks to :user:`rhlahuja`. + :bug:`3123` + +- :doc:`/plugins/ipfs`: The plugin now supports a ``nocopy`` option which passes + that flag to ipfs. Thanks to :user:`wildthyme`. +- :doc:`/plugins/discogs`: The plugin now has rate limiting for the Discogs API. + :bug:`3081` +- :doc:`/plugins/mpdstats`, :doc:`/plugins/mpdupdate`: These plugins now use the + ``MPD_PORT`` environment variable if no port is specified in the configuration + file. :bug:`3223` +- :doc:`/plugins/bpd`: + + - MPD protocol commands ``consume`` and ``single`` are now supported along + with updated semantics for ``repeat`` and ``previous`` and new fields for + ``status``. The bpd server now understands and ignores some additional + commands. :bug:`3200` :bug:`800` + - MPD protocol command ``idle`` is now supported, allowing the MPD version to + be bumped to 0.14. :bug:`3205` :bug:`800` + - MPD protocol command ``decoders`` is now supported. :bug:`3222` + - The plugin now uses the main beets logging system. The special-purpose + ``--debug`` flag has been removed. Thanks to :user:`arcresu`. :bug:`3196` + +- :doc:`/plugins/mbsync`: The plugin no longer queries MusicBrainz when either + the ``mb_albumid`` or ``mb_trackid`` field is invalid. See also the discussion + on `Google Groups`_ Thanks to :user:`arogl`. +- :doc:`/plugins/export`: The plugin now also exports ``path`` field if the user + explicitly specifies it with ``-i`` parameter. This only works when exporting + library fields. :bug:`3084` +- :doc:`/plugins/acousticbrainz`: The plugin now declares types for all its + fields, which enables easier querying and avoids a problem where very small + numbers would be stored as strings. Thanks to :user:`rain0r`. :bug:`2790` + :bug:`3238` + +.. _google groups: https://groups.google.com/forum/#!searchin/beets-users/mbsync|sort:date/beets-users/iwCF6bNdh9A/i1xl4Gx8BQAJ Some improvements have been focused on improving beets' performance: -* Querying the library is now faster: - * We only convert fields that need to be displayed. - Thanks to :user:`pprkut`. - :bug:`3089` - * We now compile templates once and reuse them instead of recompiling them - to print out each matching object. - Thanks to :user:`SimonPersson`. - :bug:`3258` - * Querying the library for items is now faster, for all queries that do not - need to access album level properties. This was implemented by lazily - fetching the album only when needed. - Thanks to :user:`SimonPersson`. - :bug:`3260` -* :doc:`/plugins/absubmit`, :doc:`/plugins/badfiles`: Analysis now works in - parallel (on Python 3 only). - Thanks to :user:`bemeurer`. - :bug:`2442` :bug:`3003` -* :doc:`/plugins/mpdstats`: Use the ``currentsong`` MPD command instead of +- Querying the library is now faster: + + - We only convert fields that need to be displayed. Thanks to :user:`pprkut`. + :bug:`3089` + - We now compile templates once and reuse them instead of recompiling them to + print out each matching object. Thanks to :user:`SimonPersson`. :bug:`3258` + - Querying the library for items is now faster, for all queries that do not + need to access album level properties. This was implemented by lazily + fetching the album only when needed. Thanks to :user:`SimonPersson`. + :bug:`3260` + +- :doc:`/plugins/absubmit`, :doc:`/plugins/badfiles`: Analysis now works in + parallel (on Python 3 only). Thanks to :user:`bemeurer`. :bug:`2442` + :bug:`3003` +- :doc:`/plugins/mpdstats`: Use the ``currentsong`` MPD command instead of ``playlist`` to get the current song, improving performance when the playlist - is long. - Thanks to :user:`ray66`. - :bug:`3207` :bug:`2752` + is long. Thanks to :user:`ray66`. :bug:`3207` :bug:`2752` Several improvements are related to usability: -* The disambiguation string for identifying albums in the importer now shows - the catalog number. - Thanks to :user:`8h2a`. - :bug:`2951` -* Added whitespace padding to missing tracks dialog to improve readability. - Thanks to :user:`jams2`. - :bug:`2962` -* The :ref:`move-cmd` command now lists the number of items already in-place. - Thanks to :user:`RollingStar`. - :bug:`3117` -* Modify selection can now be applied early without selecting every item. +- The disambiguation string for identifying albums in the importer now shows the + catalog number. Thanks to :user:`8h2a`. :bug:`2951` +- Added whitespace padding to missing tracks dialog to improve readability. + Thanks to :user:`jams2`. :bug:`2962` +- The :ref:`move-cmd` command now lists the number of items already in-place. + Thanks to :user:`RollingStar`. :bug:`3117` +- Modify selection can now be applied early without selecting every item. :bug:`3083` -* Beets now emits more useful messages during startup if SQLite returns an error. The - SQLite error message is now attached to the beets message. +- Beets now emits more useful messages during startup if SQLite returns an + error. The SQLite error message is now attached to the beets message. :bug:`3005` -* Fixed a confusing typo when the :doc:`/plugins/convert` plugin copies the art - covers. - :bug:`3063` +- Fixed a confusing typo when the :doc:`/plugins/convert` plugin copies the art + covers. :bug:`3063` Many fixes have been focused on issues where beets would previously crash: -* Avoid a crash when archive extraction fails during import. - :bug:`3041` -* Missing album art file during an update no longer causes a fatal exception +- Avoid a crash when archive extraction fails during import. :bug:`3041` +- Missing album art file during an update no longer causes a fatal exception (instead, an error is logged and the missing file path is removed from the - library). - :bug:`3030` -* When updating the database, beets no longer tries to move album art twice. + library). :bug:`3030` +- When updating the database, beets no longer tries to move album art twice. :bug:`3189` -* Fix an unhandled exception when pruning empty directories. - :bug:`1996` :bug:`3209` -* :doc:`/plugins/fetchart`: Added network connection error handling to backends - so that beets won't crash if a request fails. - Thanks to :user:`Holzhaus`. +- Fix an unhandled exception when pruning empty directories. :bug:`1996` + :bug:`3209` +- :doc:`/plugins/fetchart`: Added network connection error handling to backends + so that beets won't crash if a request fails. Thanks to :user:`Holzhaus`. :bug:`1579` -* :doc:`/plugins/badfiles`: Avoid a crash when the underlying tool emits - undecodable output. - :bug:`3165` -* :doc:`/plugins/beatport`: Avoid a crash when the server produces an error. +- :doc:`/plugins/badfiles`: Avoid a crash when the underlying tool emits + undecodable output. :bug:`3165` +- :doc:`/plugins/beatport`: Avoid a crash when the server produces an error. :bug:`3184` -* :doc:`/plugins/bpd`: Fix crashes in the bpd server during exception handling. +- :doc:`/plugins/bpd`: Fix crashes in the bpd server during exception handling. :bug:`3200` -* :doc:`/plugins/bpd`: Fix a crash triggered when certain clients tried to list - the albums belonging to a particular artist. - :bug:`3007` :bug:`3215` -* :doc:`/plugins/replaygain`: Avoid a crash when the ``bs1770gain`` tool emits - malformed XML. - :bug:`2983` :bug:`3247` +- :doc:`/plugins/bpd`: Fix a crash triggered when certain clients tried to list + the albums belonging to a particular artist. :bug:`3007` :bug:`3215` +- :doc:`/plugins/replaygain`: Avoid a crash when the ``bs1770gain`` tool emits + malformed XML. :bug:`2983` :bug:`3247` There are many fixes related to compatibility with our dependencies including addressing changes interfaces: -* On Python 2, pin the :pypi:`jellyfish` requirement to version 0.6.0 for +- On Python 2, pin the :pypi:`jellyfish` requirement to version 0.6.0 for compatibility. -* Fix compatibility with Python 3.7 and its change to a name in the - :stdlib:`re` module. - :bug:`2978` -* Fix several uses of deprecated standard-library features on Python 3.7. - Thanks to :user:`arcresu`. - :bug:`3197` -* Fix compatibility with pre-release versions of Python 3.8. - :bug:`3201` :bug:`3202` -* :doc:`/plugins/web`: Fix an error when using more recent versions of Flask - with CORS enabled. - Thanks to :user:`rveachkc`. - :bug:`2979`: :bug:`2980` -* Avoid some deprecation warnings with certain versions of the MusicBrainz - library. - Thanks to :user:`zhelezov`. - :bug:`2826` :bug:`3092` -* Restore iTunes Store album art source, and remove the dependency on +- Fix compatibility with Python 3.7 and its change to a name in the :stdlib:`re` + module. :bug:`2978` +- Fix several uses of deprecated standard-library features on Python 3.7. Thanks + to :user:`arcresu`. :bug:`3197` +- Fix compatibility with pre-release versions of Python 3.8. :bug:`3201` + :bug:`3202` +- :doc:`/plugins/web`: Fix an error when using more recent versions of Flask + with CORS enabled. Thanks to :user:`rveachkc`. :bug:`2979`: :bug:`2980` +- Avoid some deprecation warnings with certain versions of the MusicBrainz + library. Thanks to :user:`zhelezov`. :bug:`2826` :bug:`3092` +- Restore iTunes Store album art source, and remove the dependency on :pypi:`python-itunes`, which had gone unmaintained and was not - Python-3-compatible. - Thanks to :user:`ocelma` for creating :pypi:`python-itunes` in the first place. - Thanks to :user:`nathdwek`. + Python-3-compatible. Thanks to :user:`ocelma` for creating + :pypi:`python-itunes` in the first place. Thanks to :user:`nathdwek`. :bug:`2371` :bug:`2551` :bug:`2718` -* :doc:`/plugins/lastgenre`, :doc:`/plugins/edit`: Avoid a deprecation warnings - from the :pypi:`PyYAML` library by switching to the safe loader. - Thanks to :user:`translit` and :user:`sbraz`. - :bug:`3192` :bug:`3225` -* Fix a problem when resizing images with :pypi:`PIL`/:pypi:`pillow` on Python 3. - Thanks to :user:`architek`. - :bug:`2504` :bug:`3029` +- :doc:`/plugins/lastgenre`, :doc:`/plugins/edit`: Avoid a deprecation warnings + from the :pypi:`PyYAML` library by switching to the safe loader. Thanks to + :user:`translit` and :user:`sbraz`. :bug:`3192` :bug:`3225` +- Fix a problem when resizing images with :pypi:`PIL`/:pypi:`pillow` on Python + 3. Thanks to :user:`architek`. :bug:`2504` :bug:`3029` And there are many other fixes: -* R128 normalization tags are now properly deleted from files when the values - are missing. - Thanks to :user:`autrimpo`. - :bug:`2757` -* Display the artist credit when matching albums if the :ref:`artist_credit` - configuration option is set. - :bug:`2953` -* With the :ref:`from_scratch` configuration option set, only writable fields +- R128 normalization tags are now properly deleted from files when the values + are missing. Thanks to :user:`autrimpo`. :bug:`2757` +- Display the artist credit when matching albums if the :ref:`artist_credit` + configuration option is set. :bug:`2953` +- With the :ref:`from_scratch` configuration option set, only writable fields are cleared. Beets now no longer ignores the format your music is saved in. :bug:`2972` -* The ``%aunique`` template function now works correctly with the - ``-f/--format`` option. - :bug:`3043` -* Fixed the ordering of items when manually selecting changes while updating - tags - Thanks to :user:`TaizoSimpson`. - :bug:`3501` -* The ``%title`` template function now works correctly with apostrophes. - Thanks to :user:`GuilhermeHideki`. - :bug:`3033` -* :doc:`/plugins/lastgenre`: It's now possible to set the ``prefer_specific`` - option without also setting ``canonical``. - :bug:`2973` -* :doc:`/plugins/fetchart`: The plugin now respects the ``ignore`` and - ``ignore_hidden`` settings. - :bug:`1632` -* :doc:`/plugins/hook`: Fix byte string interpolation in hook commands. +- The ``%aunique`` template function now works correctly with the + ``-f/--format`` option. :bug:`3043` +- Fixed the ordering of items when manually selecting changes while updating + tags Thanks to :user:`TaizoSimpson`. :bug:`3501` +- The ``%title`` template function now works correctly with apostrophes. Thanks + to :user:`GuilhermeHideki`. :bug:`3033` +- :doc:`/plugins/lastgenre`: It's now possible to set the ``prefer_specific`` + option without also setting ``canonical``. :bug:`2973` +- :doc:`/plugins/fetchart`: The plugin now respects the ``ignore`` and + ``ignore_hidden`` settings. :bug:`1632` +- :doc:`/plugins/hook`: Fix byte string interpolation in hook commands. :bug:`2967` :bug:`3167` -* :doc:`/plugins/the`: Log a message when something has changed, not when it - hasn't. - Thanks to :user:`arcresu`. - :bug:`3195` -* :doc:`/plugins/lastgenre`: The ``force`` config option now actually works. +- :doc:`/plugins/the`: Log a message when something has changed, not when it + hasn't. Thanks to :user:`arcresu`. :bug:`3195` +- :doc:`/plugins/lastgenre`: The ``force`` config option now actually works. :bug:`2704` :bug:`3054` -* Resizing image files with ImageMagick now avoids problems on systems where +- Resizing image files with ImageMagick now avoids problems on systems where there is a ``convert`` command that is *not* ImageMagick's by using the - ``magick`` executable when it is available. - Thanks to :user:`ababyduck`. + ``magick`` executable when it is available. Thanks to :user:`ababyduck`. :bug:`2093` :bug:`3236` There is one new thing for plugin developers to know about: -* In addition to prefix-based field queries, plugins can now define *named - queries* that are not associated with any specific field. - For example, the new :doc:`/plugins/playlist` supports queries like - ``playlist:name`` although there is no field named ``playlist``. - See :ref:`extend-query` for details. +- In addition to prefix-based field queries, plugins can now define *named + queries* that are not associated with any specific field. For example, the new + :doc:`/plugins/playlist` supports queries like ``playlist:name`` although + there is no field named ``playlist``. See :ref:`extend-query` for details. And some messages for packagers: -* Note the changes to the dependencies on :pypi:`jellyfish` and :pypi:`munkres`. -* The optional :pypi:`python-itunes` dependency has been removed. -* Python versions 3.7 and 3.8 are now supported. +- Note the changes to the dependencies on :pypi:`jellyfish` and :pypi:`munkres`. +- The optional :pypi:`python-itunes` dependency has been removed. +- Python versions 3.7 and 3.8 are now supported. 1.4.7 (May 29, 2018) -------------------- -This new release includes lots of new features in the importer and the -metadata source backends that it uses. -We've changed how the beets importer handles non-audio tracks listed in -metadata sources like MusicBrainz: +This new release includes lots of new features in the importer and the metadata +source backends that it uses. We've changed how the beets importer handles +non-audio tracks listed in metadata sources like MusicBrainz: -* The importer now ignores non-audio tracks (namely, data and video tracks) +- The importer now ignores non-audio tracks (namely, data and video tracks) listed in MusicBrainz. Also, a new option, :ref:`ignore_video_tracks`, lets - you return to the old behavior and include these video tracks. - :bug:`1210` -* A new importer option, :ref:`ignored_media`, can let you skip certain media - formats. - :bug:`2688` + you return to the old behavior and include these video tracks. :bug:`1210` +- A new importer option, :ref:`ignored_media`, can let you skip certain media + formats. :bug:`2688` There are other subtle improvements to metadata handling in the importer: -* In the MusicBrainz backend, beets now imports the - ``musicbrainz_releasetrackid`` field. This is a first step toward - :bug:`406`. +- In the MusicBrainz backend, beets now imports the + ``musicbrainz_releasetrackid`` field. This is a first step toward :bug:`406`. Thanks to :user:`Rawrmonkeys`. -* A new importer configuration option, :ref:`artist_credit`, will tell beets - to prefer the artist credit over the artist when autotagging. - :bug:`1249` +- A new importer configuration option, :ref:`artist_credit`, will tell beets to + prefer the artist credit over the artist when autotagging. :bug:`1249` And there are even more new features: -* :doc:`/plugins/replaygain`: The ``beet replaygain`` command now has +- :doc:`/plugins/replaygain`: The ``beet replaygain`` command now has ``--force``, ``--write`` and ``--nowrite`` options. :bug:`2778` -* A new importer configuration option, :ref:`incremental_skip_later`, lets you - avoid recording skipped directories to the list of "processed" directories - in :ref:`incremental` mode. This way, you can revisit them later with - another import. - Thanks to :user:`sekjun9878`. - :bug:`2773` -* :doc:`/plugins/fetchart`: The configuration options now support - finer-grained control via the ``sources`` option. You can now specify the - search order for different *matching strategies* within different backends. -* :doc:`/plugins/web`: A new ``cors_supports_credentials`` configuration - option lets in-browser clients communicate with the server even when it is - protected by an authorization mechanism (a proxy with HTTP authentication - enabled, for example). -* A new :doc:`/plugins/sonosupdate` plugin automatically notifies Sonos - controllers to update the music library when the beets library changes. - Thanks to :user:`cgtobi`. -* :doc:`/plugins/discogs`: The plugin now stores master release IDs into - ``mb_releasegroupid``. It also "simulates" track IDs using the release ID - and the track list position. - Thanks to :user:`dbogdanov`. - :bug:`2336` -* :doc:`/plugins/discogs`: Fetch the original year from master releases. +- A new importer configuration option, :ref:`incremental_skip_later`, lets you + avoid recording skipped directories to the list of "processed" directories in + :ref:`incremental` mode. This way, you can revisit them later with another + import. Thanks to :user:`sekjun9878`. :bug:`2773` +- :doc:`/plugins/fetchart`: The configuration options now support finer-grained + control via the ``sources`` option. You can now specify the search order for + different *matching strategies* within different backends. +- :doc:`/plugins/web`: A new ``cors_supports_credentials`` configuration option + lets in-browser clients communicate with the server even when it is protected + by an authorization mechanism (a proxy with HTTP authentication enabled, for + example). +- A new :doc:`/plugins/sonosupdate` plugin automatically notifies Sonos + controllers to update the music library when the beets library changes. Thanks + to :user:`cgtobi`. +- :doc:`/plugins/discogs`: The plugin now stores master release IDs into + ``mb_releasegroupid``. It also "simulates" track IDs using the release ID and + the track list position. Thanks to :user:`dbogdanov`. :bug:`2336` +- :doc:`/plugins/discogs`: Fetch the original year from master releases. :bug:`1122` There are lots and lots of fixes: -* :doc:`/plugins/replaygain`: Fix a corner-case with the ``bs1770gain`` backend +- :doc:`/plugins/replaygain`: Fix a corner-case with the ``bs1770gain`` backend where ReplayGain values were assigned to the wrong files. The plugin now - requires version 0.4.6 or later of the ``bs1770gain`` tool. - :bug:`2777` -* :doc:`/plugins/lyrics`: The plugin no longer crashes in the Genius source - when BeautifulSoup is not found. Instead, it just logs a message and - disables the source. - :bug:`2911` -* :doc:`/plugins/lyrics`: Handle network and API errors when communicating - with Genius. :bug:`2771` -* :doc:`/plugins/lyrics`: The ``lyrics`` command previously wrote ReST files - by default, even when you didn't ask for them. This default has been fixed. -* :doc:`/plugins/lyrics`: When writing ReST files, the ``lyrics`` command - now groups lyrics by the ``albumartist`` field, rather than ``artist``. + requires version 0.4.6 or later of the ``bs1770gain`` tool. :bug:`2777` +- :doc:`/plugins/lyrics`: The plugin no longer crashes in the Genius source when + BeautifulSoup is not found. Instead, it just logs a message and disables the + source. :bug:`2911` +- :doc:`/plugins/lyrics`: Handle network and API errors when communicating with + Genius. :bug:`2771` +- :doc:`/plugins/lyrics`: The ``lyrics`` command previously wrote ReST files by + default, even when you didn't ask for them. This default has been fixed. +- :doc:`/plugins/lyrics`: When writing ReST files, the ``lyrics`` command now + groups lyrics by the ``albumartist`` field, rather than ``artist``. :bug:`2924` -* Plugins can now see updated import task state, such as when rejecting the +- Plugins can now see updated import task state, such as when rejecting the initial candidates and finding new ones via a manual search. Notably, this - means that the importer prompt options that the :doc:`/plugins/edit` - provides show up more reliably after doing a secondary import search. - :bug:`2441` :bug:`2731` -* :doc:`/plugins/importadded`: Fix a crash on non-autotagged imports. - Thanks to :user:`m42i`. - :bug:`2601` :bug:`1918` -* :doc:`/plugins/plexupdate`: The Plex token is now redacted in configuration - output. - Thanks to :user:`Kovrinic`. - :bug:`2804` -* Avoid a crash when importing a non-ASCII filename when using an ASCII locale - on Unix under Python 3. - :bug:`2793` :bug:`2803` -* Fix a problem caused by time zone misalignment that could make date queries + means that the importer prompt options that the :doc:`/plugins/edit` provides + show up more reliably after doing a secondary import search. :bug:`2441` + :bug:`2731` +- :doc:`/plugins/importadded`: Fix a crash on non-autotagged imports. Thanks to + :user:`m42i`. :bug:`2601` :bug:`1918` +- :doc:`/plugins/plexupdate`: The Plex token is now redacted in configuration + output. Thanks to :user:`Kovrinic`. :bug:`2804` +- Avoid a crash when importing a non-ASCII filename when using an ASCII locale + on Unix under Python 3. :bug:`2793` :bug:`2803` +- Fix a problem caused by time zone misalignment that could make date queries fail to match certain dates that are near the edges of a range. For example, querying for dates within a certain month would fail to match dates within - hours of the end of that month. - :bug:`2652` -* :doc:`/plugins/convert`: The plugin now runs before other plugin-provided + hours of the end of that month. :bug:`2652` +- :doc:`/plugins/convert`: The plugin now runs before other plugin-provided import stages, which addresses an issue with generating ReplayGain data - incompatible between the source and target file formats. - Thanks to :user:`autrimpo`. - :bug:`2814` -* :doc:`/plugins/ftintitle`: The ``drop`` config option had no effect; it now - does what it says it should do. - :bug:`2817` -* Importing a release with multiple release events now selects the - event based on the order of your :ref:`preferred` countries rather than - the order of release events in MusicBrainz. :bug:`2816` -* :doc:`/plugins/web`: The time display in the web interface would incorrectly jump - at the 30-second mark of every minute. Now, it correctly changes over at zero - seconds. :bug:`2822` -* :doc:`/plugins/web`: Fetching album art now works (instead of throwing an - exception) under Python 3. - Additionally, the server will now return a 404 response when the album ID - is unknown (instead of throwing an exception and producing a 500 response). - :bug:`2823` -* :doc:`/plugins/web`: Fix an exception on Python 3 for filenames with + incompatible between the source and target file formats. Thanks to + :user:`autrimpo`. :bug:`2814` +- :doc:`/plugins/ftintitle`: The ``drop`` config option had no effect; it now + does what it says it should do. :bug:`2817` +- Importing a release with multiple release events now selects the event based + on the order of your :ref:`preferred` countries rather than the order of + release events in MusicBrainz. :bug:`2816` +- :doc:`/plugins/web`: The time display in the web interface would incorrectly + jump at the 30-second mark of every minute. Now, it correctly changes over at + zero seconds. :bug:`2822` +- :doc:`/plugins/web`: Fetching album art now works (instead of throwing an + exception) under Python 3. Additionally, the server will now return a 404 + response when the album ID is unknown (instead of throwing an exception and + producing a 500 response). :bug:`2823` +- :doc:`/plugins/web`: Fix an exception on Python 3 for filenames with non-Latin1 characters. (These characters are now converted to their ASCII - equivalents.) - :bug:`2815` -* Partially fix bash completion for subcommand names that contain hyphens. - Thanks to :user:`jhermann`. - :bug:`2836` :bug:`2837` -* :doc:`/plugins/replaygain`: Really fix album gain calculation using the + equivalents.) :bug:`2815` +- Partially fix bash completion for subcommand names that contain hyphens. + Thanks to :user:`jhermann`. :bug:`2836` :bug:`2837` +- :doc:`/plugins/replaygain`: Really fix album gain calculation using the GStreamer backend. :bug:`2846` -* Avoid an error when doing a "no-op" move on non-existent files (i.e., moving - a file onto itself). :bug:`2863` -* :doc:`/plugins/discogs`: Fix the ``medium`` and ``medium_index`` values, which - were occasionally incorrect for releases with two-sided mediums such as - vinyl. Also fix the ``medium_total`` value, which now contains total number - of tracks on the medium to which a track belongs, not the total number of - different mediums present on the release. - Thanks to :user:`dbogdanov`. - :bug:`2887` -* The importer now supports audio files contained in data tracks when they are +- Avoid an error when doing a "no-op" move on non-existent files (i.e., moving a + file onto itself). :bug:`2863` +- :doc:`/plugins/discogs`: Fix the ``medium`` and ``medium_index`` values, which + were occasionally incorrect for releases with two-sided mediums such as vinyl. + Also fix the ``medium_total`` value, which now contains total number of tracks + on the medium to which a track belongs, not the total number of different + mediums present on the release. Thanks to :user:`dbogdanov`. :bug:`2887` +- The importer now supports audio files contained in data tracks when they are listed in MusicBrainz: the corresponding audio tracks are now merged into the main track list. Thanks to :user:`jdetrey`. :bug:`1638` -* :doc:`/plugins/keyfinder`: Avoid a crash when trying to process unmatched +- :doc:`/plugins/keyfinder`: Avoid a crash when trying to process unmatched tracks. :bug:`2537` -* :doc:`/plugins/mbsync`: Support MusicBrainz recording ID changes, relying - on release track IDs instead. Thanks to :user:`jdetrey`. :bug:`1234` -* :doc:`/plugins/mbsync`: We can now successfully update albums even when the +- :doc:`/plugins/mbsync`: Support MusicBrainz recording ID changes, relying on + release track IDs instead. Thanks to :user:`jdetrey`. :bug:`1234` +- :doc:`/plugins/mbsync`: We can now successfully update albums even when the first track has a missing MusicBrainz recording ID. :bug:`2920` There are a couple of changes for developers: -* Plugins can now run their import stages *early*, before other plugins. Use - the ``early_import_stages`` list instead of plain ``import_stages`` to - request this behavior. - :bug:`2814` -* We again properly send ``albuminfo_received`` and ``trackinfo_received`` in +- Plugins can now run their import stages *early*, before other plugins. Use the + ``early_import_stages`` list instead of plain ``import_stages`` to request + this behavior. :bug:`2814` +- We again properly send ``albuminfo_received`` and ``trackinfo_received`` in all cases, most notably when using the ``mbsync`` plugin. This was a - regression since version 1.4.1. - :bug:`2921` + regression since version 1.4.1. :bug:`2921` 1.4.6 (December 21, 2017) ------------------------- -The highlight of this release is "album merging," an oft-requested option in -the importer to add new tracks to an existing album you already have in your +The highlight of this release is "album merging," an oft-requested option in the +importer to add new tracks to an existing album you already have in your library. This way, you no longer need to resort to removing the partial album from your library, combining the files manually, and importing again. Here are the larger new features in this release: -* When the importer finds duplicate albums, you can now merge all the +- When the importer finds duplicate albums, you can now merge all the tracks---old and new---together and try importing them as a single, combined - album. - Thanks to :user:`udiboy1209`. - :bug:`112` :bug:`2725` -* :doc:`/plugins/lyrics`: The plugin can now produce reStructuredText files - for beautiful, readable books of lyrics. Thanks to :user:`anarcat`. - :bug:`2628` -* A new :ref:`from_scratch` configuration option makes the importer remove old + album. Thanks to :user:`udiboy1209`. :bug:`112` :bug:`2725` +- :doc:`/plugins/lyrics`: The plugin can now produce reStructuredText files for + beautiful, readable books of lyrics. Thanks to :user:`anarcat`. :bug:`2628` +- A new :ref:`from_scratch` configuration option makes the importer remove old metadata before applying new metadata. This new feature complements the :doc:`zero </plugins/zero>` and :doc:`scrub </plugins/scrub>` plugins but is - slightly different: beets clears out all the old tags it knows about and - only keeps the new data it gets from the remote metadata source. - Thanks to :user:`tummychow`. - :bug:`934` :bug:`2755` + slightly different: beets clears out all the old tags it knows about and only + keeps the new data it gets from the remote metadata source. Thanks to + :user:`tummychow`. :bug:`934` :bug:`2755` There are also somewhat littler, but still great, new features: -* :doc:`/plugins/convert`: A new ``no_convert`` option lets you skip - transcoding items matching a query. Instead, the files are just copied - as-is. Thanks to :user:`Stunner`. - :bug:`2732` :bug:`2751` -* :doc:`/plugins/fetchart`: A new quiet switch that only prints out messages - when album art is missing. - Thanks to :user:`euri10`. - :bug:`2683` -* :doc:`/plugins/mbcollection`: You can configure a custom MusicBrainz - collection via the new ``collection`` configuration option. - :bug:`2685` -* :doc:`/plugins/mbcollection`: The collection update command can now remove +- :doc:`/plugins/convert`: A new ``no_convert`` option lets you skip transcoding + items matching a query. Instead, the files are just copied as-is. Thanks to + :user:`Stunner`. :bug:`2732` :bug:`2751` +- :doc:`/plugins/fetchart`: A new quiet switch that only prints out messages + when album art is missing. Thanks to :user:`euri10`. :bug:`2683` +- :doc:`/plugins/mbcollection`: You can configure a custom MusicBrainz + collection via the new ``collection`` configuration option. :bug:`2685` +- :doc:`/plugins/mbcollection`: The collection update command can now remove albums from collections that are longer in the beets library. -* :doc:`/plugins/fetchart`: The ``clearart`` command now asks for confirmation - before touching your files. - Thanks to :user:`konman2`. - :bug:`2708` :bug:`2427` -* :doc:`/plugins/mpdstats`: The plugin now correctly updates song statistics +- :doc:`/plugins/fetchart`: The ``clearart`` command now asks for confirmation + before touching your files. Thanks to :user:`konman2`. :bug:`2708` :bug:`2427` +- :doc:`/plugins/mpdstats`: The plugin now correctly updates song statistics when MPD switches from a song to a stream and when it plays the same song - multiple times consecutively. - :bug:`2707` -* :doc:`/plugins/acousticbrainz`: The plugin can now be configured to write only - a specific list of tags. - Thanks to :user:`woparry`. + multiple times consecutively. :bug:`2707` +- :doc:`/plugins/acousticbrainz`: The plugin can now be configured to write only + a specific list of tags. Thanks to :user:`woparry`. There are lots and lots of bug fixes: -* :doc:`/plugins/hook`: Fixed a problem where accessing non-string properties - of ``item`` or ``album`` (e.g., ``item.track``) would cause a crash. - Thanks to :user:`broddo`. - :bug:`2740` -* :doc:`/plugins/play`: When ``relative_to`` is set, the plugin correctly - emits relative paths even when querying for albums rather than tracks. - Thanks to :user:`j000`. - :bug:`2702` -* We suppress a spurious Python warning about a ``BrokenPipeError`` being - ignored. This was an issue when using beets in simple shell scripts. - Thanks to :user:`Azphreal`. - :bug:`2622` :bug:`2631` -* :doc:`/plugins/replaygain`: Fix a regression in the previous release related +- :doc:`/plugins/hook`: Fixed a problem where accessing non-string properties of + ``item`` or ``album`` (e.g., ``item.track``) would cause a crash. Thanks to + :user:`broddo`. :bug:`2740` +- :doc:`/plugins/play`: When ``relative_to`` is set, the plugin correctly emits + relative paths even when querying for albums rather than tracks. Thanks to + :user:`j000`. :bug:`2702` +- We suppress a spurious Python warning about a ``BrokenPipeError`` being + ignored. This was an issue when using beets in simple shell scripts. Thanks to + :user:`Azphreal`. :bug:`2622` :bug:`2631` +- :doc:`/plugins/replaygain`: Fix a regression in the previous release related to the new R128 tags. :bug:`2615` :bug:`2623` -* :doc:`/plugins/lyrics`: The MusixMatch backend now detects and warns - when the server has blocked the client. - Thanks to :user:`anarcat`. :bug:`2634` :bug:`2632` -* :doc:`/plugins/importfeeds`: Fix an error on Python 3 in certain +- :doc:`/plugins/lyrics`: The MusixMatch backend now detects and warns when the + server has blocked the client. Thanks to :user:`anarcat`. :bug:`2634` + :bug:`2632` +- :doc:`/plugins/importfeeds`: Fix an error on Python 3 in certain configurations. Thanks to :user:`djl`. :bug:`2467` :bug:`2658` -* :doc:`/plugins/edit`: Fix a bug when editing items during a re-import with - the ``-L`` flag. Previously, diffs against against unrelated items could be - shown or beets could crash. :bug:`2659` -* :doc:`/plugins/kodiupdate`: Fix the server URL and add better error - reporting. +- :doc:`/plugins/edit`: Fix a bug when editing items during a re-import with the + ``-L`` flag. Previously, diffs against against unrelated items could be shown + or beets could crash. :bug:`2659` +- :doc:`/plugins/kodiupdate`: Fix the server URL and add better error reporting. :bug:`2662` -* Fixed a problem where "no-op" modifications would reset files' mtimes, +- Fixed a problem where "no-op" modifications would reset files' mtimes, resulting in unnecessary writes. This most prominently affected the :doc:`/plugins/edit` when saving the text file without making changes to some music. :bug:`2667` -* :doc:`/plugins/chroma`: Fix a crash when running the ``submit`` command on +- :doc:`/plugins/chroma`: Fix a crash when running the ``submit`` command on Python 3 on Windows with non-ASCII filenames. :bug:`2671` -* :doc:`/plugins/absubmit`: Fix an occasional crash on Python 3 when the AB +- :doc:`/plugins/absubmit`: Fix an occasional crash on Python 3 when the AB analysis tool produced non-ASCII metadata. :bug:`2673` -* :doc:`/plugins/duplicates`: Use the default tiebreak for items or albums - when the configuration only specifies a tiebreak for the other kind of - entity. - Thanks to :user:`cgevans`. - :bug:`2758` -* :doc:`/plugins/duplicates`: Fix the ``--key`` command line option, which was +- :doc:`/plugins/duplicates`: Use the default tiebreak for items or albums when + the configuration only specifies a tiebreak for the other kind of entity. + Thanks to :user:`cgevans`. :bug:`2758` +- :doc:`/plugins/duplicates`: Fix the ``--key`` command line option, which was ignored. -* :doc:`/plugins/replaygain`: Fix album ReplayGain calculation with the +- :doc:`/plugins/replaygain`: Fix album ReplayGain calculation with the GStreamer backend. :bug:`2636` -* :doc:`/plugins/scrub`: Handle errors when manipulating files using newer +- :doc:`/plugins/scrub`: Handle errors when manipulating files using newer versions of Mutagen. :bug:`2716` -* :doc:`/plugins/fetchart`: The plugin no longer gets skipped during import - when the "Edit Candidates" option is used from the :doc:`/plugins/edit`. +- :doc:`/plugins/fetchart`: The plugin no longer gets skipped during import when + the "Edit Candidates" option is used from the :doc:`/plugins/edit`. :bug:`2734` -* Fix a crash when numeric metadata fields contain just a minus or plus sign +- Fix a crash when numeric metadata fields contain just a minus or plus sign with no following numbers. Thanks to :user:`eigengrau`. :bug:`2741` -* :doc:`/plugins/fromfilename`: Recognize file names that contain *only* a - track number, such as `01.mp3`. Also, the plugin now allows underscores as a - separator between fields. - Thanks to :user:`Vrihub`. - :bug:`2738` :bug:`2759` -* Fixed an issue where images would be resized according to their longest - edge, instead of their width, when using the ``maxwidth`` config option in - the :doc:`/plugins/fetchart` and :doc:`/plugins/embedart`. Thanks to +- :doc:`/plugins/fromfilename`: Recognize file names that contain *only* a track + number, such as ``01.mp3``. Also, the plugin now allows underscores as a + separator between fields. Thanks to :user:`Vrihub`. :bug:`2738` :bug:`2759` +- Fixed an issue where images would be resized according to their longest edge, + instead of their width, when using the ``maxwidth`` config option in the + :doc:`/plugins/fetchart` and :doc:`/plugins/embedart`. Thanks to :user:`sekjun9878`. :bug:`2729` There are some changes for developers: -* "Fixed fields" in Album and Item objects are now more strict about translating - missing values into type-specific null-like values. This should help in - cases where a string field is unexpectedly `None` sometimes instead of just +- "Fixed fields" in Album and Item objects are now more strict about translating + missing values into type-specific null-like values. This should help in cases + where a string field is unexpectedly ``None`` sometimes instead of just showing up as an empty string. :bug:`2605` -* Refactored the move functions the `beets.library` module and the - `manipulate_files` function in `beets.importer` to use a single parameter - describing the file operation instead of multiple Boolean flags. - There is a new numerated type describing how to move, copy, or link files. - :bug:`2682` +- Refactored the move functions the ``beets.library`` module and the + ``manipulate_files`` function in ``beets.importer`` to use a single parameter + describing the file operation instead of multiple Boolean flags. There is a + new numerated type describing how to move, copy, or link files. :bug:`2682` 1.4.5 (June 20, 2017) --------------------- -Version 1.4.5 adds some oft-requested features. When you're importing files, -you can now manually set fields on the new music. Date queries have gotten -much more powerful: you can write precise queries down to the second, and we -now have *relative* queries like ``-1w``, which means *one week ago*. +Version 1.4.5 adds some oft-requested features. When you're importing files, you +can now manually set fields on the new music. Date queries have gotten much more +powerful: you can write precise queries down to the second, and we now have +*relative* queries like ``-1w``, which means *one week ago*. Here are the new features: -* You can now set fields to certain values during :ref:`import-cmd`, using +- You can now set fields to certain values during :ref:`import-cmd`, using either a ``--set field=value`` command-line flag or a new :ref:`set_fields` - configuration option under the `importer` section. - Thanks to :user:`bartkl`. :bug:`1881` :bug:`2581` -* :ref:`Date queries <datequery>` can now include times, so you can filter - your music down to the second. Thanks to :user:`discopatrick`. :bug:`2506` + configuration option under the ``importer`` section. Thanks to :user:`bartkl`. + :bug:`1881` :bug:`2581` +- :ref:`Date queries <datequery>` can now include times, so you can filter your + music down to the second. Thanks to :user:`discopatrick`. :bug:`2506` :bug:`2528` -* :ref:`Date queries <datequery>` can also be *relative*. You can say - ``added:-1w..`` to match music added in the last week, for example. Thanks - to :user:`euri10`. :bug:`2598` -* A new :doc:`/plugins/gmusic` lets you interact with your Google Play Music +- :ref:`Date queries <datequery>` can also be *relative*. You can say + ``added:-1w..`` to match music added in the last week, for example. Thanks to + :user:`euri10`. :bug:`2598` +- A new ``/plugins/gmusic`` lets you interact with your Google Play Music library. Thanks to :user:`tigranl`. :bug:`2553` :bug:`2586` -* :doc:`/plugins/replaygain`: We now keep R128 data in separate tags from +- :doc:`/plugins/replaygain`: We now keep R128 data in separate tags from classic ReplayGain data for formats that need it (namely, Ogg Opus). A new - `r128` configuration option enables this behavior for specific formats. + ``r128`` configuration option enables this behavior for specific formats. Thanks to :user:`autrimpo`. :bug:`2557` :bug:`2560` -* The :ref:`move-cmd` command gained a new ``--export`` flag, which copies - files to an external location without changing their paths in the library - database. Thanks to :user:`SpirosChadoulos`. :bug:`435` :bug:`2510` +- The :ref:`move-cmd` command gained a new ``--export`` flag, which copies files + to an external location without changing their paths in the library database. + Thanks to :user:`SpirosChadoulos`. :bug:`435` :bug:`2510` There are also some bug fixes: -* :doc:`/plugins/lastgenre`: Fix a crash when using the `prefer_specific` and - `canonical` options together. Thanks to :user:`yacoob`. :bug:`2459` +- :doc:`/plugins/lastgenre`: Fix a crash when using the ``prefer_specific`` and + ``canonical`` options together. Thanks to :user:`yacoob`. :bug:`2459` :bug:`2583` -* :doc:`/plugins/web`: Fix a crash on Windows under Python 2 when serving +- :doc:`/plugins/web`: Fix a crash on Windows under Python 2 when serving non-ASCII filenames. Thanks to :user:`robot3498712`. :bug:`2592` :bug:`2593` -* :doc:`/plugins/metasync`: Fix a crash in the Amarok backend when filenames +- :doc:`/plugins/metasync`: Fix a crash in the Amarok backend when filenames contain quotes. Thanks to :user:`aranc23`. :bug:`2595` :bug:`2596` -* More informative error messages are displayed when the file format is not +- More informative error messages are displayed when the file format is not recognized. :bug:`2599` 1.4.4 (June 10, 2017) --------------------- This release built up a longer-than-normal list of nifty new features. We now -support DSF audio files and the importer can hard-link your files, for -example. +support DSF audio files and the importer can hard-link your files, for example. Here's a full list of new features: -* Added support for DSF files, once a future version of Mutagen is released - that supports them. Thanks to :user:`docbobo`. :bug:`459` :bug:`2379` -* A new :ref:`hardlink` config option instructs the importer to create hard +- Added support for DSF files, once a future version of Mutagen is released that + supports them. Thanks to :user:`docbobo`. :bug:`459` :bug:`2379` +- A new :ref:`hardlink` config option instructs the importer to create hard links on filesystems that support them. Thanks to :user:`jacobwgillespie`. :bug:`2445` -* A new :doc:`/plugins/kodiupdate` lets you keep your Kodi library in sync - with beets. Thanks to :user:`Pauligrinder`. :bug:`2411` -* A new :ref:`bell` configuration option under the ``import`` section enables - a terminal bell when input is required. Thanks to :user:`SpirosChadoulos`. +- A new :doc:`/plugins/kodiupdate` lets you keep your Kodi library in sync with + beets. Thanks to :user:`Pauligrinder`. :bug:`2411` +- A new :ref:`bell` configuration option under the ``import`` section enables a + terminal bell when input is required. Thanks to :user:`SpirosChadoulos`. :bug:`2366` :bug:`2495` -* A new field, ``composer_sort``, is now supported and fetched from - MusicBrainz. - Thanks to :user:`dosoe`. - :bug:`2519` :bug:`2529` -* The MusicBrainz backend and :doc:`/plugins/discogs` now both provide a new - attribute called ``track_alt`` that stores more nuanced, possibly - non-numeric track index data. For example, some vinyl or tape media will - report the side of the record using a letter instead of a number in that - field. :bug:`1831` :bug:`2363` -* :doc:`/plugins/web`: Added a new endpoint, ``/item/path/foo``, which will +- A new field, ``composer_sort``, is now supported and fetched from MusicBrainz. + Thanks to :user:`dosoe`. :bug:`2519` :bug:`2529` +- The MusicBrainz backend and :doc:`/plugins/discogs` now both provide a new + attribute called ``track_alt`` that stores more nuanced, possibly non-numeric + track index data. For example, some vinyl or tape media will report the side + of the record using a letter instead of a number in that field. :bug:`1831` + :bug:`2363` +- :doc:`/plugins/web`: Added a new endpoint, ``/item/path/foo``, which will return the item info for the file at the given path, or 404. -* :doc:`/plugins/web`: Added a new config option, ``include_paths``, - which will cause paths to be included in item API responses if set to true. -* The ``%aunique`` template function for :ref:`aunique` now takes a third - argument that specifies which brackets to use around the disambiguator - value. The argument can be any two characters that represent the left and - right brackets. It defaults to `[]` and can also be blank to turn off - bracketing. :bug:`2397` :bug:`2399` -* Added a ``--move`` or ``-m`` option to the importer so that the files can be - moved to the library instead of being copied or added "in place." - :bug:`2252` :bug:`2429` -* :doc:`/plugins/badfiles`: Added a ``--verbose`` or ``-v`` option. Results are +- :doc:`/plugins/web`: Added a new config option, ``include_paths``, which will + cause paths to be included in item API responses if set to true. +- The ``%aunique`` template function for :ref:`aunique` now takes a third + argument that specifies which brackets to use around the disambiguator value. + The argument can be any two characters that represent the left and right + brackets. It defaults to ``[]`` and can also be blank to turn off bracketing. + :bug:`2397` :bug:`2399` +- Added a ``--move`` or ``-m`` option to the importer so that the files can be + moved to the library instead of being copied or added "in place." :bug:`2252` + :bug:`2429` +- :doc:`/plugins/badfiles`: Added a ``--verbose`` or ``-v`` option. Results are now displayed only for corrupted files by default and for all the files when the verbose option is set. :bug:`1654` :bug:`2434` -* :doc:`/plugins/embedart`: The explicit ``embedart`` command now asks for - confirmation before embedding art into music files. Thanks to - :user:`Stunner`. :bug:`1999` -* You can now run beets by typing `python -m beets`. :bug:`2453` -* :doc:`/plugins/smartplaylist`: Different playlist specifications that - generate identically-named playlist files no longer conflict; instead, the - resulting lists of tracks are concatenated. :bug:`2468` -* :doc:`/plugins/missing`: A new mode lets you see missing albums from artists +- :doc:`/plugins/embedart`: The explicit ``embedart`` command now asks for + confirmation before embedding art into music files. Thanks to :user:`Stunner`. + :bug:`1999` +- You can now run beets by typing ``python -m beets``. :bug:`2453` +- :doc:`/plugins/smartplaylist`: Different playlist specifications that generate + identically-named playlist files no longer conflict; instead, the resulting + lists of tracks are concatenated. :bug:`2468` +- :doc:`/plugins/missing`: A new mode lets you see missing albums from artists you have in your library. Thanks to :user:`qlyoung`. :bug:`2481` -* :doc:`/plugins/web` : Add new `reverse_proxy` config option to allow serving +- :doc:`/plugins/web` : Add new ``reverse_proxy`` config option to allow serving the web plugins under a reverse proxy. -* Importing a release with multiple release events now selects the - event based on your :ref:`preferred` countries. :bug:`2501` -* :doc:`/plugins/play`: A new ``-y`` or ``--yes`` parameter lets you skip - the warning message if you enqueue more items than the warning threshold - usually allows. -* Fix a bug where commands which forked subprocesses would sometimes prevent - further inputs. This bug mainly affected :doc:`/plugins/convert`. - Thanks to :user:`jansol`. - :bug:`2488` - :bug:`2524` +- Importing a release with multiple release events now selects the event based + on your :ref:`preferred` countries. :bug:`2501` +- :doc:`/plugins/play`: A new ``-y`` or ``--yes`` parameter lets you skip the + warning message if you enqueue more items than the warning threshold usually + allows. +- Fix a bug where commands which forked subprocesses would sometimes prevent + further inputs. This bug mainly affected :doc:`/plugins/convert`. Thanks to + :user:`jansol`. :bug:`2488` :bug:`2524` There are also quite a few fixes: -* In the :ref:`replace` configuration option, we now replace a leading hyphen +- In the :ref:`replace` configuration option, we now replace a leading hyphen (-) with an underscore. :bug:`549` :bug:`2509` -* :doc:`/plugins/absubmit`: We no longer filter audio files for specific +- :doc:`/plugins/absubmit`: We no longer filter audio files for specific formats---we will attempt the submission process for all formats. :bug:`2471` -* :doc:`/plugins/mpdupdate`: Fix Python 3 compatibility. :bug:`2381` -* :doc:`/plugins/replaygain`: Fix Python 3 compatibility in the ``bs1770gain`` +- :doc:`/plugins/mpdupdate`: Fix Python 3 compatibility. :bug:`2381` +- :doc:`/plugins/replaygain`: Fix Python 3 compatibility in the ``bs1770gain`` backend. :bug:`2382` -* :doc:`/plugins/bpd`: Report playback times as integers. :bug:`2394` -* :doc:`/plugins/mpdstats`: Fix Python 3 compatibility. The plugin also now +- :doc:`/plugins/bpd`: Report playback times as integers. :bug:`2394` +- :doc:`/plugins/mpdstats`: Fix Python 3 compatibility. The plugin also now requires version 0.4.2 or later of the ``python-mpd2`` library. :bug:`2405` -* :doc:`/plugins/mpdstats`: Improve handling of MPD status queries. -* :doc:`/plugins/badfiles`: Fix Python 3 compatibility. -* Fix some cases where album-level ReplayGain/SoundCheck metadata would be +- :doc:`/plugins/mpdstats`: Improve handling of MPD status queries. +- :doc:`/plugins/badfiles`: Fix Python 3 compatibility. +- Fix some cases where album-level ReplayGain/SoundCheck metadata would be written to files incorrectly. :bug:`2426` -* :doc:`/plugins/badfiles`: The command no longer bails out if the validator +- :doc:`/plugins/badfiles`: The command no longer bails out if the validator command is not found or exits with an error. :bug:`2430` :bug:`2433` -* :doc:`/plugins/lyrics`: The Google search backend no longer crashes when the +- :doc:`/plugins/lyrics`: The Google search backend no longer crashes when the server responds with an error. :bug:`2437` -* :doc:`/plugins/discogs`: You can now authenticate with Discogs using a +- :doc:`/plugins/discogs`: You can now authenticate with Discogs using a personal access token. :bug:`2447` -* Fix Python 3 compatibility when extracting rar archives in the importer. +- Fix Python 3 compatibility when extracting rar archives in the importer. Thanks to :user:`Lompik`. :bug:`2443` :bug:`2448` -* :doc:`/plugins/duplicates`: Fix Python 3 compatibility when using the - ``copy`` and ``move`` options. :bug:`2444` -* :doc:`/plugins/mbsubmit`: The tracks are now sorted properly. Thanks to +- :doc:`/plugins/duplicates`: Fix Python 3 compatibility when using the ``copy`` + and ``move`` options. :bug:`2444` +- :doc:`/plugins/mbsubmit`: The tracks are now sorted properly. Thanks to :user:`awesomer`. :bug:`2457` -* :doc:`/plugins/thumbnails`: Fix a string-related crash on Python 3. +- :doc:`/plugins/thumbnails`: Fix a string-related crash on Python 3. :bug:`2466` -* :doc:`/plugins/beatport`: More than just 10 songs are now fetched per album. +- :doc:`/plugins/beatport`: More than just 10 songs are now fetched per album. :bug:`2469` -* On Python 3, the :ref:`terminal_encoding` setting is respected again for - output and printing will no longer crash on systems configured with a - limited encoding. -* :doc:`/plugins/convert`: The default configuration uses FFmpeg's built-in - AAC codec instead of faac. Thanks to :user:`jansol`. :bug:`2484` -* Fix the importer's detection of multi-disc albums when other subdirectories +- On Python 3, the :ref:`terminal_encoding` setting is respected again for + output and printing will no longer crash on systems configured with a limited + encoding. +- :doc:`/plugins/convert`: The default configuration uses FFmpeg's built-in AAC + codec instead of faac. Thanks to :user:`jansol`. :bug:`2484` +- Fix the importer's detection of multi-disc albums when other subdirectories are present. :bug:`2493` -* Invalid date queries now print an error message instead of being silently +- Invalid date queries now print an error message instead of being silently ignored. Thanks to :user:`discopatrick`. :bug:`2513` :bug:`2517` -* When the SQLite database stops being accessible, we now print a friendly - error message. Thanks to :user:`Mary011196`. :bug:`1676` :bug:`2508` -* :doc:`/plugins/web`: Avoid a crash when sending binary data, such as +- When the SQLite database stops being accessible, we now print a friendly error + message. Thanks to :user:`Mary011196`. :bug:`1676` :bug:`2508` +- :doc:`/plugins/web`: Avoid a crash when sending binary data, such as Chromaprint fingerprints, in music attributes. :bug:`2542` :bug:`2532` -* Fix a hang when parsing templates that end in newlines. :bug:`2562` -* Fix a crash when reading non-ASCII characters in configuration files on +- Fix a hang when parsing templates that end in newlines. :bug:`2562` +- Fix a crash when reading non-ASCII characters in configuration files on Windows under Python 3. :bug:`2456` :bug:`2565` :bug:`2566` We removed backends from two metadata plugins because of bitrot: -* :doc:`/plugins/lyrics`: The Lyrics.com backend has been removed. (It stopped - working because of changes to the site's URL structure.) - :bug:`2548` :bug:`2549` -* :doc:`/plugins/fetchart`: The documentation no longer recommends iTunes - Store artwork lookup because the unmaintained `python-itunes`_ is broken. - Want to adopt it? :bug:`2371` :bug:`1610` +- :doc:`/plugins/lyrics`: The Lyrics.com backend has been removed. (It stopped + working because of changes to the site's URL structure.) :bug:`2548` + :bug:`2549` +- :doc:`/plugins/fetchart`: The documentation no longer recommends iTunes Store + artwork lookup because the unmaintained python-itunes_ is broken. Want to + adopt it? :bug:`2371` :bug:`1610` .. _python-itunes: https://github.com/ocelma/python-itunes @@ -1901,86 +2053,84 @@ Happy new year! This new version includes a cornucopia of new features from contributors, including new tags related to classical music and a new :doc:`/plugins/absubmit` for performing acoustic analysis on your music. The :doc:`/plugins/random` has a new mode that lets you generate time-limited -music---for example, you might generate a random playlist that lasts the -perfect length for your walk to work. We also access as many Web services as -possible over secure connections now---HTTPS everywhere! +music---for example, you might generate a random playlist that lasts the perfect +length for your walk to work. We also access as many Web services as possible +over secure connections now---HTTPS everywhere! The most visible new features are: -* We now support the composer, lyricist, and arranger tags. The MusicBrainz - data source will fetch data for these fields when the next version of - `python-musicbrainzngs`_ is released. Thanks to :user:`ibmibmibm`. - :bug:`506` :bug:`507` :bug:`1547` :bug:`2333` -* A new :doc:`/plugins/absubmit` lets you run acoustic analysis software and +- We now support the composer, lyricist, and arranger tags. The MusicBrainz data + source will fetch data for these fields when the next version of + python-musicbrainzngs_ is released. Thanks to :user:`ibmibmibm`. :bug:`506` + :bug:`507` :bug:`1547` :bug:`2333` +- A new :doc:`/plugins/absubmit` lets you run acoustic analysis software and upload the results for others to use. Thanks to :user:`inytar`. :bug:`2253` :bug:`2342` -* :doc:`/plugins/play`: The plugin now provides an importer prompt choice to - play the music you're about to import. Thanks to :user:`diomekes`. - :bug:`2008` :bug:`2360` -* We now use SSL to access Web services whenever possible. That includes - MusicBrainz itself, several album art sources, some lyrics sources, and - other servers. Thanks to :user:`tigranl`. :bug:`2307` -* :doc:`/plugins/random`: A new ``--time`` option lets you generate a random +- :doc:`/plugins/play`: The plugin now provides an importer prompt choice to + play the music you're about to import. Thanks to :user:`diomekes`. :bug:`2008` + :bug:`2360` +- We now use SSL to access Web services whenever possible. That includes + MusicBrainz itself, several album art sources, some lyrics sources, and other + servers. Thanks to :user:`tigranl`. :bug:`2307` +- :doc:`/plugins/random`: A new ``--time`` option lets you generate a random playlist that takes a given amount of time. Thanks to :user:`diomekes`. :bug:`2305` :bug:`2322` Some smaller new features: -* :doc:`/plugins/zero`: A new ``zero`` command manually triggers the zero +- :doc:`/plugins/zero`: A new ``zero`` command manually triggers the zero plugin. Thanks to :user:`SJoshBrown`. :bug:`2274` :bug:`2329` -* :doc:`/plugins/acousticbrainz`: The plugin will avoid re-downloading data - for files that already have it by default. You can override this behavior - using a new ``force`` option. Thanks to :user:`SusannaMaria`. :bug:`2347` - :bug:`2349` -* :doc:`/plugins/bpm`: The ``import.write`` configuration option now - decides whether or not to write tracks after updating their BPM. :bug:`1992` +- :doc:`/plugins/acousticbrainz`: The plugin will avoid re-downloading data for + files that already have it by default. You can override this behavior using a + new ``force`` option. Thanks to :user:`SusannaMaria`. :bug:`2347` :bug:`2349` +- :doc:`/plugins/bpm`: The ``import.write`` configuration option now decides + whether or not to write tracks after updating their BPM. :bug:`1992` And the fixes: -* :doc:`/plugins/bpd`: Fix a crash on non-ASCII MPD commands. :bug:`2332` -* :doc:`/plugins/scrub`: Avoid a crash when files cannot be read or written. +- :doc:`/plugins/bpd`: Fix a crash on non-ASCII MPD commands. :bug:`2332` +- :doc:`/plugins/scrub`: Avoid a crash when files cannot be read or written. :bug:`2351` -* :doc:`/plugins/scrub`: The image type values on scrubbed files are preserved +- :doc:`/plugins/scrub`: The image type values on scrubbed files are preserved instead of being reset to "other." :bug:`2339` -* :doc:`/plugins/web`: Fix a crash on Python 3 when serving files from the +- :doc:`/plugins/web`: Fix a crash on Python 3 when serving files from the filesystem. :bug:`2353` -* :doc:`/plugins/discogs`: Improve the handling of releases that contain +- :doc:`/plugins/discogs`: Improve the handling of releases that contain subtracks. :bug:`2318` -* :doc:`/plugins/discogs`: Fix a crash when a release does not contain format +- :doc:`/plugins/discogs`: Fix a crash when a release does not contain format information, and increase robustness when other fields are missing. :bug:`2302` -* :doc:`/plugins/lyrics`: The plugin now reports a beets-specific User-Agent +- :doc:`/plugins/lyrics`: The plugin now reports a beets-specific User-Agent header when requesting lyrics. :bug:`2357` -* :doc:`/plugins/embyupdate`: The plugin now checks whether an API key or a +- :doc:`/plugins/embyupdate`: The plugin now checks whether an API key or a password is provided in the configuration. -* :doc:`/plugins/play`: The misspelled configuration option - ``warning_treshold`` is no longer supported. +- :doc:`/plugins/play`: The misspelled configuration option ``warning_treshold`` + is no longer supported. For plugin developers: when providing new importer prompt choices (see :ref:`append_prompt_choices`), you can now provide new candidates for the user -to consider. For example, you might provide an alternative strategy for -picking between the available alternatives or for looking up a release on -MusicBrainz. +to consider. For example, you might provide an alternative strategy for picking +between the available alternatives or for looking up a release on MusicBrainz. 1.4.2 (December 16, 2016) ------------------------- This is just a little bug fix release. With 1.4.2, we're also confident enough to recommend that anyone who's interested give Python 3 a try: bugs may still -lurk, but we've deemed things safe enough for broad adoption. If you can, -please install beets with ``pip3`` instead of ``pip2`` this time and let us -know how it goes! +lurk, but we've deemed things safe enough for broad adoption. If you can, please +install beets with ``pip3`` instead of ``pip2`` this time and let us know how it +goes! Here are the fixes: -* :doc:`/plugins/badfiles`: Fix a crash on non-ASCII filenames. :bug:`2299` -* The ``%asciify{}`` path formatting function and the :ref:`asciify-paths` +- :doc:`/plugins/badfiles`: Fix a crash on non-ASCII filenames. :bug:`2299` +- The ``%asciify{}`` path formatting function and the :ref:`asciify-paths` setting properly substitute path separators generated by converting some Unicode characters, such as ½ and ¢, into ASCII. -* :doc:`/plugins/convert`: Fix a logging-related crash when filenames contain +- :doc:`/plugins/convert`: Fix a logging-related crash when filenames contain curly braces. Thanks to :user:`kierdavis`. :bug:`2323` -* We've rolled back some changes to the included zsh completion script that - were causing problems for some users. :bug:`2266` +- We've rolled back some changes to the included zsh completion script that were + causing problems for some users. :bug:`2266` Also, we've removed some special handling for logging in the :doc:`/plugins/discogs` that we believe was unnecessary. If spurious log @@ -1990,98 +2140,94 @@ messages appear in this version, please let us know by filing a bug. ------------------------- Version 1.4 has **alpha-level** Python 3 support. Thanks to the heroic efforts -of :user:`jrobeson`, beets should run both under Python 2.7, as before, and -now under Python 3.4 and above. The support is still new: it undoubtedly -contains bugs, so it may replace all your music with Limp Bizkit---but if -you're brave and you have backups, please try installing on Python 3. Let us -know how it goes. +of :user:`jrobeson`, beets should run both under Python 2.7, as before, and now +under Python 3.4 and above. The support is still new: it undoubtedly contains +bugs, so it may replace all your music with Limp Bizkit---but if you're brave +and you have backups, please try installing on Python 3. Let us know how it +goes. If you package beets for distribution, here's what you'll want to know: -* This version of beets now depends on the `six`_ library. -* We also bumped our minimum required version of `Mutagen`_ to 1.33 (from - 1.27). -* Please don't package beets as a Python 3 application *yet*, even though most +- This version of beets now depends on the six_ library. +- We also bumped our minimum required version of Mutagen_ to 1.33 (from 1.27). +- Please don't package beets as a Python 3 application *yet*, even though most things work under Python 3.4 and later. This version also makes a few changes to the command-line interface and configuration that you may need to know about: -* :doc:`/plugins/duplicates`: The ``duplicates`` command no longer accepts +- :doc:`/plugins/duplicates`: The ``duplicates`` command no longer accepts multiple field arguments in the form ``-k title albumartist album``. Each argument must be prefixed with ``-k``, as in ``-k title -k albumartist -k album``. -* The old top-level ``colors`` configuration option has been removed (the +- The old top-level ``colors`` configuration option has been removed (the setting is now under ``ui``). -* The deprecated ``list_format_album`` and ``list_format_item`` - configuration options have been removed (see :ref:`format_album` and - :ref:`format_item`). +- The deprecated ``list_format_album`` and ``list_format_item`` configuration + options have been removed (see :ref:`format_album` and :ref:`format_item`). The are a few new features: -* :doc:`/plugins/mpdupdate`, :doc:`/plugins/mpdstats`: When the ``host`` option +- :doc:`/plugins/mpdupdate`, :doc:`/plugins/mpdstats`: When the ``host`` option is not set, these plugins will now look for the ``$MPD_HOST`` environment variable before falling back to ``localhost``. Thanks to :user:`tarruda`. :bug:`2175` -* :doc:`/plugins/web`: Added an ``expand`` option to show the items of an - album. :bug:`2050` -* :doc:`/plugins/embyupdate`: The plugin can now use an API key instead of a +- :doc:`/plugins/web`: Added an ``expand`` option to show the items of an album. + :bug:`2050` +- :doc:`/plugins/embyupdate`: The plugin can now use an API key instead of a password to authenticate with Emby. :bug:`2045` :bug:`2117` -* :doc:`/plugins/acousticbrainz`: The plugin now adds a ``bpm`` field. -* ``beet --version`` now includes the Python version used to run beets. -* :doc:`/reference/pathformat` can now include unescaped commas (``,``) when +- :doc:`/plugins/acousticbrainz`: The plugin now adds a ``bpm`` field. +- ``beet --version`` now includes the Python version used to run beets. +- :doc:`/reference/pathformat` can now include unescaped commas (``,``) when they are not part of a function call. :bug:`2166` :bug:`2213` -* The :ref:`update-cmd` command takes a new ``-F`` flag to specify the fields - to update. Thanks to :user:`dangmai`. :bug:`2229` :bug:`2231` +- The :ref:`update-cmd` command takes a new ``-F`` flag to specify the fields to + update. Thanks to :user:`dangmai`. :bug:`2229` :bug:`2231` And there are a few bug fixes too: -* :doc:`/plugins/convert`: The plugin no longer asks for confirmation if the +- :doc:`/plugins/convert`: The plugin no longer asks for confirmation if the query did not return anything to convert. :bug:`2260` :bug:`2262` -* :doc:`/plugins/embedart`: The plugin now uses ``jpg`` as an extension rather - than ``jpeg``, to ensure consistency with the :doc:`plugins/fetchart`. - Thanks to :user:`tweitzel`. :bug:`2254` :bug:`2255` -* :doc:`/plugins/embedart`: The plugin now works for all jpeg files, including - those that are only recognizable by their magic bytes. - :bug:`1545` :bug:`2255` -* :doc:`/plugins/web`: The JSON output is no longer pretty-printed (for a - space savings). :bug:`2050` -* :doc:`/plugins/permissions`: Fix a regression in the previous release where +- :doc:`/plugins/embedart`: The plugin now uses ``jpg`` as an extension rather + than ``jpeg``, to ensure consistency with the :doc:`plugins/fetchart`. Thanks + to :user:`tweitzel`. :bug:`2254` :bug:`2255` +- :doc:`/plugins/embedart`: The plugin now works for all jpeg files, including + those that are only recognizable by their magic bytes. :bug:`1545` :bug:`2255` +- :doc:`/plugins/web`: The JSON output is no longer pretty-printed (for a space + savings). :bug:`2050` +- :doc:`/plugins/permissions`: Fix a regression in the previous release where the plugin would always fail to set permissions (and log a warning). :bug:`2089` -* :doc:`/plugins/beatport`: Use track numbers from Beatport (instead of - determining them from the order of tracks) and set the `medium_index` - value. -* With :ref:`per_disc_numbering` enabled, some metadata sources (notably, the +- :doc:`/plugins/beatport`: Use track numbers from Beatport (instead of + determining them from the order of tracks) and set the ``medium_index`` value. +- With :ref:`per_disc_numbering` enabled, some metadata sources (notably, the :doc:`/plugins/beatport`) would not set the track number at all. This is fixed. :bug:`2085` -* :doc:`/plugins/play`: Fix ``$args`` getting passed verbatim to the play +- :doc:`/plugins/play`: Fix ``$args`` getting passed verbatim to the play command if it was set in the configuration but ``-A`` or ``--args`` was omitted. -* With :ref:`ignore_hidden` enabled, non-UTF-8 filenames would cause a crash. +- With :ref:`ignore_hidden` enabled, non-UTF-8 filenames would cause a crash. This is fixed. :bug:`2168` -* :doc:`/plugins/embyupdate`: Fixes authentication header problem that caused - a problem that it was not possible to get tokens from the Emby API. -* :doc:`/plugins/lyrics`: Some titles use a colon to separate the main title - from a subtitle. To find more matches, the plugin now also searches for - lyrics using the part part preceding the colon character. :bug:`2206` -* Fix a crash when a query uses a date field and some items are missing that +- :doc:`/plugins/embyupdate`: Fixes authentication header problem that caused a + problem that it was not possible to get tokens from the Emby API. +- :doc:`/plugins/lyrics`: Some titles use a colon to separate the main title + from a subtitle. To find more matches, the plugin now also searches for lyrics + using the part part preceding the colon character. :bug:`2206` +- Fix a crash when a query uses a date field and some items are missing that field. :bug:`1938` -* :doc:`/plugins/discogs`: Subtracks are now detected and combined into a - single track, two-sided mediums are treated as single discs, and tracks - have ``media``, ``medium_total`` and ``medium`` set correctly. :bug:`2222` +- :doc:`/plugins/discogs`: Subtracks are now detected and combined into a single + track, two-sided mediums are treated as single discs, and tracks have + ``media``, ``medium_total`` and ``medium`` set correctly. :bug:`2222` :bug:`2228`. -* :doc:`/plugins/missing`: ``missing`` is now treated as an integer, allowing +- :doc:`/plugins/missing`: ``missing`` is now treated as an integer, allowing the use of (for example) ranges in queries. -* :doc:`/plugins/smartplaylist`: Playlist names will be sanitized to - ensure valid filenames. :bug:`2258` -* The ID3 APIC tag now uses the Latin-1 encoding when possible instead of a +- :doc:`/plugins/smartplaylist`: Playlist names will be sanitized to ensure + valid filenames. :bug:`2258` +- The ID3 APIC tag now uses the Latin-1 encoding when possible instead of a Unicode encoding. This should increase compatibility with other software, especially with iTunes and when using ID3v2.3. Thanks to :user:`lazka`. :bug:`899` :bug:`2264` :bug:`2270` -The last release, 1.3.19, also erroneously reported its version as "1.3.18" -when you typed ``beet version``. This has been corrected. +The last release, 1.3.19, also erroneously reported its version as "1.3.18" when +you typed ``beet version``. This has been corrected. .. _six: https://pypi.org/project/six/ @@ -2097,42 +2243,42 @@ this herald a new age of cross-platform reliability for beets. New features: -* :doc:`/plugins/beatport`: This metadata source plugin has arisen from the +- :doc:`/plugins/beatport`: This metadata source plugin has arisen from the dead! It now works with Beatport's new OAuth-based API. Thanks to :user:`jbaiter`. :bug:`1989` :bug:`2067` -* :doc:`/plugins/bpd`: The plugin now uses the modern GStreamer 1.0 instead of +- :doc:`/plugins/bpd`: The plugin now uses the modern GStreamer 1.0 instead of the old 0.10. Thanks to :user:`philippbeckmann`. :bug:`2057` :bug:`2062` -* A new ``--force`` option for the :ref:`remove-cmd` command allows removal of +- A new ``--force`` option for the :ref:`remove-cmd` command allows removal of items without prompting beforehand. :bug:`2042` -* A new :ref:`duplicate_action` importer config option controls how duplicate +- A new :ref:`duplicate_action` importer config option controls how duplicate albums or tracks treated in import task. :bug:`185` Some fixes for Windows: -* Queries are now detected as paths when they contain backslashes (in - addition to forward slashes). This only applies on Windows. -* :doc:`/plugins/embedart`: Image similarity comparison with ImageMagick - should now work on Windows. -* :doc:`/plugins/fetchart`: The plugin should work more reliably with - non-ASCII paths. +- Queries are now detected as paths when they contain backslashes (in addition + to forward slashes). This only applies on Windows. +- :doc:`/plugins/embedart`: Image similarity comparison with ImageMagick should + now work on Windows. +- :doc:`/plugins/fetchart`: The plugin should work more reliably with non-ASCII + paths. And other fixes: -* :doc:`/plugins/replaygain`: The ``bs1770gain`` backend now correctly - calculates sample peak instead of true peak. This comes with a major - speed increase. :bug:`2031` -* :doc:`/plugins/lyrics`: Avoid a crash and a spurious warning introduced in - the last version about a Google API key, which appeared even when you hadn't +- :doc:`/plugins/replaygain`: The ``bs1770gain`` backend now correctly + calculates sample peak instead of true peak. This comes with a major speed + increase. :bug:`2031` +- :doc:`/plugins/lyrics`: Avoid a crash and a spurious warning introduced in the + last version about a Google API key, which appeared even when you hadn't enabled the Google lyrics source. -* Fix a hard-coded path to ``bash-completion`` to work better with Homebrew +- Fix a hard-coded path to ``bash-completion`` to work better with Homebrew installations. Thanks to :user:`bismark`. :bug:`2038` -* Fix a crash introduced in the previous version when the standard input was +- Fix a crash introduced in the previous version when the standard input was connected to a Unix pipe. :bug:`2041` -* Fix a crash when specifying non-ASCII format strings on the command line - with the ``-f`` option for many commands. :bug:`2063` -* :doc:`/plugins/fetchart`: Determine the file extension for downloaded images - based on the image's magic bytes. The plugin prints a warning if result is - not consistent with the server-supplied ``Content-Type`` header. In previous +- Fix a crash when specifying non-ASCII format strings on the command line with + the ``-f`` option for many commands. :bug:`2063` +- :doc:`/plugins/fetchart`: Determine the file extension for downloaded images + based on the image's magic bytes. The plugin prints a warning if result is not + consistent with the server-supplied ``Content-Type`` header. In previous versions, the plugin would use a ``.jpg`` extension for all images. :bug:`2053` @@ -2144,273 +2290,270 @@ command-line tools and an :doc:`/plugins/export` that can dump data from the beets database as JSON. You can also automatically translate lyrics using a machine translation service. -The ``echonest`` plugin has been removed in this version because the API it -used is `shutting down`_. You might want to try the -:doc:`/plugins/acousticbrainz` instead. +The ``echonest`` plugin has been removed in this version because the API it used +is `shutting down`_. You might want to try the :doc:`/plugins/acousticbrainz` +instead. .. _shutting down: https://developer.spotify.com/news-stories/2016/03/29/api-improvements-update/ Some of the larger new features: -* The new :doc:`/plugins/hook` lets you execute commands in response to beets +- The new :doc:`/plugins/hook` lets you execute commands in response to beets events. -* The new :doc:`/plugins/export` can export data from beets' database as - JSON. Thanks to :user:`GuilhermeHideki`. -* :doc:`/plugins/lyrics`: The plugin can now translate the fetched lyrics to +- The new :doc:`/plugins/export` can export data from beets' database as JSON. + Thanks to :user:`GuilhermeHideki`. +- :doc:`/plugins/lyrics`: The plugin can now translate the fetched lyrics to your native language using the Bing translation API. Thanks to :user:`Kraymer`. -* :doc:`/plugins/fetchart`: Album art can now be fetched from `fanart.tv`_. +- :doc:`/plugins/fetchart`: Album art can now be fetched from fanart.tv_. Smaller new things: -* There are two new functions available in templates: ``%first`` and ``%ifdef``. +- There are two new functions available in templates: ``%first`` and ``%ifdef``. See :ref:`template-functions`. -* :doc:`/plugins/convert`: A new `album_art_maxwidth` setting lets you resize +- :doc:`/plugins/convert`: A new ``album_art_maxwidth`` setting lets you resize album art while copying it. -* :doc:`/plugins/convert`: The `extension` setting is now optional for +- :doc:`/plugins/convert`: The ``extension`` setting is now optional for conversion formats. By default, the extension is the same as the name of the configured format. -* :doc:`/plugins/importadded`: A new `preserve_write_mtimes` option - lets you preserve mtime of files even when beets updates their metadata. -* :doc:`/plugins/fetchart`: The `enforce_ratio` option now lets you tolerate - images that are *almost* square but differ slightly from an exact 1:1 - aspect ratio. -* :doc:`/plugins/fetchart`: The plugin can now optionally save the artwork's +- :doc:`/plugins/importadded`: A new ``preserve_write_mtimes`` option lets you + preserve mtime of files even when beets updates their metadata. +- :doc:`/plugins/fetchart`: The ``enforce_ratio`` option now lets you tolerate + images that are *almost* square but differ slightly from an exact 1:1 aspect + ratio. +- :doc:`/plugins/fetchart`: The plugin can now optionally save the artwork's source in an attribute in the database. -* The :ref:`terminal_encoding` configuration option can now also override the +- The :ref:`terminal_encoding` configuration option can now also override the *input* encoding. (Previously, it only affected the encoding of the standard *output* stream.) -* A new :ref:`ignore_hidden` configuration option lets you ignore files that +- A new :ref:`ignore_hidden` configuration option lets you ignore files that your OS marks as invisible. -* :doc:`/plugins/web`: A new `values` endpoint lets you get the distinct values - of a field. Thanks to :user:`sumpfralle`. :bug:`2010` +- :doc:`/plugins/web`: A new ``values`` endpoint lets you get the distinct + values of a field. Thanks to :user:`sumpfralle`. :bug:`2010` .. _fanart.tv: https://fanart.tv/ Fixes: -* Fix a problem with the :ref:`stats-cmd` command in exact mode when filenames +- Fix a problem with the :ref:`stats-cmd` command in exact mode when filenames on Windows use non-ASCII characters. :bug:`1891` -* Fix a crash when iTunes Sound Check tags contained invalid data. :bug:`1895` -* :doc:`/plugins/mbcollection`: The plugin now redacts your MusicBrainz - password in the ``beet config`` output. :bug:`1907` -* :doc:`/plugins/scrub`: Fix an occasional problem where scrubbing on import +- Fix a crash when iTunes Sound Check tags contained invalid data. :bug:`1895` +- :doc:`/plugins/mbcollection`: The plugin now redacts your MusicBrainz password + in the ``beet config`` output. :bug:`1907` +- :doc:`/plugins/scrub`: Fix an occasional problem where scrubbing on import could undo the :ref:`id3v23` setting. :bug:`1903` -* :doc:`/plugins/lyrics`: Add compatibility with some changes to the - LyricsWiki page markup. :bug:`1912` :bug:`1909` -* :doc:`/plugins/lyrics`: Fix retrieval from Musixmatch by improving the way - we guess the URL for lyrics on that service. :bug:`1880` -* :doc:`/plugins/edit`: Fail gracefully when the configured text editor - command can't be invoked. :bug:`1927` -* :doc:`/plugins/fetchart`: Fix a crash in the Wikipedia backend on non-ASCII +- :doc:`/plugins/lyrics`: Add compatibility with some changes to the LyricsWiki + page markup. :bug:`1912` :bug:`1909` +- :doc:`/plugins/lyrics`: Fix retrieval from Musixmatch by improving the way we + guess the URL for lyrics on that service. :bug:`1880` +- :doc:`/plugins/edit`: Fail gracefully when the configured text editor command + can't be invoked. :bug:`1927` +- :doc:`/plugins/fetchart`: Fix a crash in the Wikipedia backend on non-ASCII artist and album names. :bug:`1960` -* :doc:`/plugins/convert`: Change the default `ogg` encoding quality from 2 to - 3 (to fit the default from the `oggenc(1)` manpage). :bug:`1982` -* :doc:`/plugins/convert`: The `never_convert_lossy_files` option now +- :doc:`/plugins/convert`: Change the default ``ogg`` encoding quality from 2 to + 3 (to fit the default from the ``oggenc(1)`` manpage). :bug:`1982` +- :doc:`/plugins/convert`: The ``never_convert_lossy_files`` option now considers AIFF a lossless format. :bug:`2005` -* :doc:`/plugins/web`: A proper 404 error, instead of an internal exception, - is returned when missing album art is requested. Thanks to - :user:`sumpfralle`. :bug:`2011` -* Tolerate more malformed floating-point numbers in metadata tags. :bug:`2014` -* The :ref:`ignore` configuration option now includes the ``lost+found`` +- :doc:`/plugins/web`: A proper 404 error, instead of an internal exception, is + returned when missing album art is requested. Thanks to :user:`sumpfralle`. + :bug:`2011` +- Tolerate more malformed floating-point numbers in metadata tags. :bug:`2014` +- The :ref:`ignore` configuration option now includes the ``lost+found`` directory by default. -* :doc:`/plugins/acousticbrainz`: AcousticBrainz lookups are now done over +- :doc:`/plugins/acousticbrainz`: AcousticBrainz lookups are now done over HTTPS. Thanks to :user:`Freso`. :bug:`2007` 1.3.17 (February 7, 2016) ------------------------- This release introduces one new plugin to fetch audio information from the -`AcousticBrainz`_ project and another plugin to make it easier to submit your -handcrafted metadata back to MusicBrainz. -The importer also gained two oft-requested features: a way to skip the initial -search process by specifying an ID ahead of time, and a way to *manually* -provide metadata in the middle of the import process (via the -:doc:`/plugins/edit`). +AcousticBrainz_ project and another plugin to make it easier to submit your +handcrafted metadata back to MusicBrainz. The importer also gained two +oft-requested features: a way to skip the initial search process by specifying +an ID ahead of time, and a way to *manually* provide metadata in the middle of +the import process (via the :doc:`/plugins/edit`). -Also, as of this release, the beets project has some new Internet homes! Our -new domain name is `beets.io`_, and we have a shiny new GitHub organization: -`beetbox`_. +Also, as of this release, the beets project has some new Internet homes! Our new +domain name is beets.io_, and we have a shiny new GitHub organization: beetbox_. Here are the big new features: -* A new :doc:`/plugins/acousticbrainz` fetches acoustic-analysis information - from the `AcousticBrainz`_ project. Thanks to :user:`opatel99`, and thanks - to `Google Code-In`_! :bug:`1784` -* A new :doc:`/plugins/mbsubmit` lets you print music's current metadata in a +- A new :doc:`/plugins/acousticbrainz` fetches acoustic-analysis information + from the AcousticBrainz_ project. Thanks to :user:`opatel99`, and thanks to + `Google Code-In`_! :bug:`1784` +- A new :doc:`/plugins/mbsubmit` lets you print music's current metadata in a format that the MusicBrainz data parser can understand. You can trigger it during an interactive import session. :bug:`1779` -* A new ``--search-id`` importer option lets you manually specify - IDs (i.e., MBIDs or Discogs IDs) for imported music. Doing this skips the - initial candidate search, which can be important for huge albums where this - initial lookup is slow. - Also, the ``enter Id`` prompt choice now accepts several IDs, separated by - spaces. :bug:`1808` -* :doc:`/plugins/edit`: You can now edit metadata *on the fly* during the - import process. The plugin provides two new interactive options: one to edit - *your music's* metadata, and one to edit the *matched metadata* retrieved - from MusicBrainz (or another data source). This feature is still in its - early stages, so please send feedback if you find anything missing. - :bug:`1846` :bug:`396` +- A new ``--search-id`` importer option lets you manually specify IDs (i.e., + MBIDs or Discogs IDs) for imported music. Doing this skips the initial + candidate search, which can be important for huge albums where this initial + lookup is slow. Also, the ``enter Id`` prompt choice now accepts several IDs, + separated by spaces. :bug:`1808` +- :doc:`/plugins/edit`: You can now edit metadata *on the fly* during the import + process. The plugin provides two new interactive options: one to edit *your + music's* metadata, and one to edit the *matched metadata* retrieved from + MusicBrainz (or another data source). This feature is still in its early + stages, so please send feedback if you find anything missing. :bug:`1846` + :bug:`396` There are even more new features: -* :doc:`/plugins/fetchart`: The Google Images backend has been restored. It - now requires an API key from Google. Thanks to :user:`lcharlick`. - :bug:`1778` -* :doc:`/plugins/info`: A new option will print only fields' names and not - their values. Thanks to :user:`GuilhermeHideki`. :bug:`1812` -* The :ref:`fields-cmd` command now displays flexible attributes. - Thanks to :user:`GuilhermeHideki`. :bug:`1818` -* The :ref:`modify-cmd` command lets you interactively select which albums or +- :doc:`/plugins/fetchart`: The Google Images backend has been restored. It now + requires an API key from Google. Thanks to :user:`lcharlick`. :bug:`1778` +- :doc:`/plugins/info`: A new option will print only fields' names and not their + values. Thanks to :user:`GuilhermeHideki`. :bug:`1812` +- The :ref:`fields-cmd` command now displays flexible attributes. Thanks to + :user:`GuilhermeHideki`. :bug:`1818` +- The :ref:`modify-cmd` command lets you interactively select which albums or items you want to change. :bug:`1843` -* The :ref:`move-cmd` command gained a new ``--timid`` flag to print and - confirm which files you want to move. :bug:`1843` -* The :ref:`move-cmd` command no longer prints filenames for files that - don't actually need to be moved. :bug:`1583` +- The :ref:`move-cmd` command gained a new ``--timid`` flag to print and confirm + which files you want to move. :bug:`1843` +- The :ref:`move-cmd` command no longer prints filenames for files that don't + actually need to be moved. :bug:`1583` -.. _Google Code-In: https://codein.withgoogle.com/ -.. _AcousticBrainz: https://acousticbrainz.org/ +.. _acousticbrainz: https://acousticbrainz.org/ + +.. _google code-in: https://codein.withgoogle.com/ Fixes: -* :doc:`/plugins/play`: Fix a regression in the last version where there was - no default command. :bug:`1793` -* :doc:`/plugins/lastimport`: The plugin now works again after being broken by +- :doc:`/plugins/play`: Fix a regression in the last version where there was no + default command. :bug:`1793` +- :doc:`/plugins/lastimport`: The plugin now works again after being broken by some unannounced changes to the Last.fm API. :bug:`1574` -* :doc:`/plugins/play`: Fixed a typo in a configuration option. The option is - now ``warning_threshold`` instead of ``warning_treshold``, but we kept the - old name around for compatibility. Thanks to :user:`JesseWeinstein`. - :bug:`1802` :bug:`1803` -* :doc:`/plugins/edit`: Editing metadata now moves files, when appropriate - (like the :ref:`modify-cmd` command). :bug:`1804` -* The :ref:`stats-cmd` command no longer crashes when files are missing or +- :doc:`/plugins/play`: Fixed a typo in a configuration option. The option is + now ``warning_threshold`` instead of ``warning_treshold``, but we kept the old + name around for compatibility. Thanks to :user:`JesseWeinstein`. :bug:`1802` + :bug:`1803` +- :doc:`/plugins/edit`: Editing metadata now moves files, when appropriate (like + the :ref:`modify-cmd` command). :bug:`1804` +- The :ref:`stats-cmd` command no longer crashes when files are missing or inaccessible. :bug:`1806` -* :doc:`/plugins/fetchart`: Possibly fix a Unicode-related crash when using - some versions of pyOpenSSL. :bug:`1805` -* :doc:`/plugins/replaygain`: Fix an intermittent crash with the GStreamer +- :doc:`/plugins/fetchart`: Possibly fix a Unicode-related crash when using some + versions of pyOpenSSL. :bug:`1805` +- :doc:`/plugins/replaygain`: Fix an intermittent crash with the GStreamer backend. :bug:`1855` -* :doc:`/plugins/lastimport`: The plugin now works with the beets API key by +- :doc:`/plugins/lastimport`: The plugin now works with the beets API key by default. You can still provide a different key the configuration. -* :doc:`/plugins/replaygain`: Fix a crash using the Python Audio Tools - backend. :bug:`1873` +- :doc:`/plugins/replaygain`: Fix a crash using the Python Audio Tools backend. + :bug:`1873` + +.. _beetbox: https://github.com/beetbox .. _beets.io: https://beets.io/ -.. _Beetbox: https://github.com/beetbox 1.3.16 (December 28, 2015) -------------------------- The big news in this release is a new :doc:`interactive editor plugin -</plugins/edit>`. It's really nifty: you can now change your music's metadata -by making changes in a visual text editor, which can sometimes be far more +</plugins/edit>`. It's really nifty: you can now change your music's metadata by +making changes in a visual text editor, which can sometimes be far more efficient than the built-in :ref:`modify-cmd` command. No more carefully retyping the same artist name with slight capitalization changes. -This version also adds an oft-requested "not" operator to beets' queries, so -you can exclude music from any operation. It also brings friendlier formatting -(and querying!) of song durations. +This version also adds an oft-requested "not" operator to beets' queries, so you +can exclude music from any operation. It also brings friendlier formatting (and +querying!) of song durations. The big new stuff: -* A new :doc:`/plugins/edit` lets you manually edit your music's metadata - using your favorite text editor. :bug:`164` :bug:`1706` -* Queries can now use "not" logic. Type a ``^`` before part of a query to - *exclude* matching music from the results. For example, ``beet list -a - beatles ^album:1`` will find all your albums by the Beatles except for their - singles compilation, "1." See :ref:`not_query`. :bug:`819` :bug:`1728` -* A new :doc:`/plugins/embyupdate` can trigger a library refresh on an `Emby`_ +- A new :doc:`/plugins/edit` lets you manually edit your music's metadata using + your favorite text editor. :bug:`164` :bug:`1706` +- Queries can now use "not" logic. Type a ``^`` before part of a query to + *exclude* matching music from the results. For example, ``beet list -a beatles + ^album:1`` will find all your albums by the Beatles except for their singles + compilation, "1." See :ref:`not_query`. :bug:`819` :bug:`1728` +- A new :doc:`/plugins/embyupdate` can trigger a library refresh on an Emby_ server when your beets database changes. -* Track length is now displayed as "M:SS" rather than a raw number of seconds. +- Track length is now displayed as "M:SS" rather than a raw number of seconds. Queries on track length also accept this format: for example, ``beet list - length:5:30..`` will find all your tracks that have a duration over 5 - minutes and 30 seconds. You can turn off this new behavior using the + length:5:30..`` will find all your tracks that have a duration over 5 minutes + and 30 seconds. You can turn off this new behavior using the ``format_raw_length`` configuration option. :bug:`1749` Smaller changes: -* Three commands, ``modify``, ``update``, and ``mbsync``, would previously - move files by default after changing their metadata. Now, these commands - will only move files if you have the :ref:`config-import-copy` or - :ref:`config-import-move` options enabled in your importer configuration. - This way, if you configure the importer not to touch your filenames, other - commands will respect that decision by default too. Each command also - sprouted a ``--move`` command-line option to override this default (in - addition to the ``--nomove`` flag they already had). :bug:`1697` -* A new configuration option, ``va_name``, controls the album artist name for +- Three commands, ``modify``, ``update``, and ``mbsync``, would previously move + files by default after changing their metadata. Now, these commands will only + move files if you have the :ref:`config-import-copy` or + :ref:`config-import-move` options enabled in your importer configuration. This + way, if you configure the importer not to touch your filenames, other commands + will respect that decision by default too. Each command also sprouted a + ``--move`` command-line option to override this default (in addition to the + ``--nomove`` flag they already had). :bug:`1697` +- A new configuration option, ``va_name``, controls the album artist name for various-artists albums. The setting defaults to "Various Artists," the MusicBrainz standard. In order to match MusicBrainz, the :doc:`/plugins/discogs` also adopts the same setting. -* :doc:`/plugins/info`: The ``info`` command now accepts a ``-f/--format`` +- :doc:`/plugins/info`: The ``info`` command now accepts a ``-f/--format`` option for customizing how items are displayed, just like the built-in ``list`` command. :bug:`1737` Some changes for developers: -* Two new :ref:`plugin hooks <plugin_events>`, ``albuminfo_received`` and +- Two new :ref:`plugin hooks <plugin_events>`, ``albuminfo_received`` and ``trackinfo_received``, let plugins intercept metadata as soon as it is received, before it is applied to music in the database. :bug:`872` -* Plugins can now add options to the interactive importer prompts. See +- Plugins can now add options to the interactive importer prompts. See :ref:`append_prompt_choices`. :bug:`1758` Fixes: -* :doc:`/plugins/plexupdate`: Fix a crash when Plex libraries use non-ASCII +- :doc:`/plugins/plexupdate`: Fix a crash when Plex libraries use non-ASCII collection names. :bug:`1649` -* :doc:`/plugins/discogs`: Maybe fix a crash when using some versions of the +- :doc:`/plugins/discogs`: Maybe fix a crash when using some versions of the ``requests`` library. :bug:`1656` -* Fix a race in the importer when importing two albums with the same artist - and name in quick succession. The importer would fail to detect them as - duplicates, claiming that there were "empty albums" in the database even - when there were not. :bug:`1652` -* :doc:`plugins/lastgenre`: Clean up the reggae-related genres somewhat. - Thanks to :user:`Freso`. :bug:`1661` -* The importer now correctly moves album art files when re-importing. - :bug:`314` -* :doc:`/plugins/fetchart`: In auto mode, the plugin now skips albums that +- Fix a race in the importer when importing two albums with the same artist and + name in quick succession. The importer would fail to detect them as + duplicates, claiming that there were "empty albums" in the database even when + there were not. :bug:`1652` +- :doc:`plugins/lastgenre`: Clean up the reggae-related genres somewhat. Thanks + to :user:`Freso`. :bug:`1661` +- The importer now correctly moves album art files when re-importing. :bug:`314` +- :doc:`/plugins/fetchart`: In auto mode, the plugin now skips albums that already have art attached to them so as not to interfere with re-imports. :bug:`314` -* :doc:`plugins/fetchart`: The plugin now only resizes album art if necessary, +- :doc:`plugins/fetchart`: The plugin now only resizes album art if necessary, rather than always by default. :bug:`1264` -* :doc:`plugins/fetchart`: Fix a bug where a database reference to a +- :doc:`plugins/fetchart`: Fix a bug where a database reference to a non-existent album art file would prevent the command from fetching new art. :bug:`1126` -* :doc:`/plugins/thumbnails`: Fix a crash with Unicode paths. :bug:`1686` -* :doc:`/plugins/embedart`: The ``remove_art_file`` option now works on import +- :doc:`/plugins/thumbnails`: Fix a crash with Unicode paths. :bug:`1686` +- :doc:`/plugins/embedart`: The ``remove_art_file`` option now works on import (as well as with the explicit command). :bug:`1662` :bug:`1675` -* :doc:`/plugins/metasync`: Fix a crash when syncing with recent versions of +- :doc:`/plugins/metasync`: Fix a crash when syncing with recent versions of iTunes. :bug:`1700` -* :doc:`/plugins/duplicates`: Fix a crash when merging items. :bug:`1699` -* :doc:`/plugins/smartplaylist`: More gracefully handle malformed queries and +- :doc:`/plugins/duplicates`: Fix a crash when merging items. :bug:`1699` +- :doc:`/plugins/smartplaylist`: More gracefully handle malformed queries and missing configuration. -* Fix a crash with some files with unreadable iTunes SoundCheck metadata. +- Fix a crash with some files with unreadable iTunes SoundCheck metadata. :bug:`1666` -* :doc:`/plugins/thumbnails`: Fix a nasty segmentation fault crash that arose +- :doc:`/plugins/thumbnails`: Fix a nasty segmentation fault crash that arose with some library versions. :bug:`1433` -* :doc:`/plugins/convert`: Fix a crash with Unicode paths in ``--pretend`` - mode. :bug:`1735` -* Fix a crash when sorting by nonexistent fields on queries. :bug:`1734` -* Probably fix some mysterious errors when dealing with images using - ImageMagick on Windows. :bug:`1721` -* Fix a crash when writing some Unicode comment strings to MP3s that used - older encodings. The encoding is now always updated to UTF-8. :bug:`879` -* :doc:`/plugins/fetchart`: The Google Images backend has been removed. It - used an API that has been shut down. :bug:`1760` -* :doc:`/plugins/lyrics`: Fix a crash in the Google backend when searching for +- :doc:`/plugins/convert`: Fix a crash with Unicode paths in ``--pretend`` mode. + :bug:`1735` +- Fix a crash when sorting by nonexistent fields on queries. :bug:`1734` +- Probably fix some mysterious errors when dealing with images using ImageMagick + on Windows. :bug:`1721` +- Fix a crash when writing some Unicode comment strings to MP3s that used older + encodings. The encoding is now always updated to UTF-8. :bug:`879` +- :doc:`/plugins/fetchart`: The Google Images backend has been removed. It used + an API that has been shut down. :bug:`1760` +- :doc:`/plugins/lyrics`: Fix a crash in the Google backend when searching for bands with regular-expression characters in their names, like Sunn O))). :bug:`1673` -* :doc:`/plugins/scrub`: In ``auto`` mode, the plugin now *actually* only - scrubs files on import, as the documentation always claimed it did---not - every time files were written, as it previously did. :bug:`1657` -* :doc:`/plugins/scrub`: Also in ``auto`` mode, album art is now correctly +- :doc:`/plugins/scrub`: In ``auto`` mode, the plugin now *actually* only scrubs + files on import, as the documentation always claimed it did---not every time + files were written, as it previously did. :bug:`1657` +- :doc:`/plugins/scrub`: Also in ``auto`` mode, album art is now correctly restored. :bug:`1657` -* Possibly allow flexible attributes to be used with the ``%aunique`` template +- Possibly allow flexible attributes to be used with the ``%aunique`` template function. :bug:`1775` -* :doc:`/plugins/lyrics`: The Genius backend is now more robust to - communication errors. The backend has also been disabled by default, since - the API it depends on is currently down. :bug:`1770` +- :doc:`/plugins/lyrics`: The Genius backend is now more robust to communication + errors. The backend has also been disabled by default, since the API it + depends on is currently down. :bug:`1770` -.. _Emby: https://emby.media +.. _emby: https://emby.media 1.3.15 (October 17, 2015) ------------------------- @@ -2418,169 +2561,167 @@ Fixes: This release adds a new plugin for checking file quality and a new source for lyrics. The larger features are: -* A new :doc:`/plugins/badfiles` helps you scan for corruption in your music +- A new :doc:`/plugins/badfiles` helps you scan for corruption in your music collection. Thanks to :user:`fxthomas`. :bug:`1568` -* :doc:`/plugins/lyrics`: You can now fetch lyrics from Genius.com. - Thanks to :user:`sadatay`. :bug:`1626` :bug:`1639` -* :doc:`/plugins/zero`: The plugin can now use a "whitelist" policy as an +- :doc:`/plugins/lyrics`: You can now fetch lyrics from Genius.com. Thanks to + :user:`sadatay`. :bug:`1626` :bug:`1639` +- :doc:`/plugins/zero`: The plugin can now use a "whitelist" policy as an alternative to the (default) "blacklist" mode. Thanks to :user:`adkow`. :bug:`1621` :bug:`1641` And there are smaller new features too: -* Add new color aliases for standard terminal color names (e.g., cyan and +- Add new color aliases for standard terminal color names (e.g., cyan and magenta). Thanks to :user:`mathstuf`. :bug:`1548` -* :doc:`/plugins/play`: A new ``--args`` option lets you specify options for - the player command. :bug:`1532` -* :doc:`/plugins/play`: A new ``raw`` configuration option lets the command - work with players (such as VLC) that expect music filenames as arguments, - rather than in a playlist. Thanks to :user:`nathdwek`. :bug:`1578` -* :doc:`/plugins/play`: You can now configure the number of tracks that - trigger a "lots of music" warning. :bug:`1577` -* :doc:`/plugins/embedart`: A new ``remove_art_file`` option lets you clean up +- :doc:`/plugins/play`: A new ``--args`` option lets you specify options for the + player command. :bug:`1532` +- :doc:`/plugins/play`: A new ``raw`` configuration option lets the command work + with players (such as VLC) that expect music filenames as arguments, rather + than in a playlist. Thanks to :user:`nathdwek`. :bug:`1578` +- :doc:`/plugins/play`: You can now configure the number of tracks that trigger + a "lots of music" warning. :bug:`1577` +- :doc:`/plugins/embedart`: A new ``remove_art_file`` option lets you clean up if you prefer *only* embedded album art. Thanks to :user:`jackwilsdon`. :bug:`1591` :bug:`733` -* :doc:`/plugins/plexupdate`: A new ``library_name`` option allows you to select +- :doc:`/plugins/plexupdate`: A new ``library_name`` option allows you to select which Plex library to update. :bug:`1572` :bug:`1595` -* A new ``include`` option lets you import external configuration files. +- A new ``include`` option lets you import external configuration files. This release has plenty of fixes: -* :doc:`/plugins/lastgenre`: Fix a bug that prevented tag popularity from - being considered. Thanks to :user:`svoos`. :bug:`1559` -* Fixed a bug where plugins wouldn't be notified of the deletion of an item's +- :doc:`/plugins/lastgenre`: Fix a bug that prevented tag popularity from being + considered. Thanks to :user:`svoos`. :bug:`1559` +- Fixed a bug where plugins wouldn't be notified of the deletion of an item's art, for example with the ``clearart`` command from the :doc:`/plugins/embedart`. Thanks to :user:`nathdwek`. :bug:`1565` -* :doc:`/plugins/fetchart`: The Google Images source is disabled by default - (as it was before beets 1.3.9), as is the Wikipedia source (which was - causing lots of unnecessary delays due to DBpedia downtime). To re-enable - these sources, add ``wikipedia google`` to your ``sources`` configuration - option. -* The :ref:`list-cmd` command's help output now has a small query and format +- :doc:`/plugins/fetchart`: The Google Images source is disabled by default (as + it was before beets 1.3.9), as is the Wikipedia source (which was causing lots + of unnecessary delays due to DBpedia downtime). To re-enable these sources, + add ``wikipedia google`` to your ``sources`` configuration option. +- The :ref:`list-cmd` command's help output now has a small query and format string example. Thanks to :user:`pkess`. :bug:`1582` -* :doc:`/plugins/fetchart`: The plugin now fetches PNGs but not GIFs. (It - still fetches JPEGs.) This avoids an error when trying to embed images, - since not all formats support GIFs. :bug:`1588` -* Date fields are now written in the correct order (year-month-day), which +- :doc:`/plugins/fetchart`: The plugin now fetches PNGs but not GIFs. (It still + fetches JPEGs.) This avoids an error when trying to embed images, since not + all formats support GIFs. :bug:`1588` +- Date fields are now written in the correct order (year-month-day), which eliminates an intermittent bug where the latter two fields would not get written to files. Thanks to :user:`jdetrey`. :bug:`1303` :bug:`1589` -* :doc:`/plugins/replaygain`: Avoid a crash when the PyAudioTools backend +- :doc:`/plugins/replaygain`: Avoid a crash when the PyAudioTools backend encounters an error. :bug:`1592` -* The case sensitivity of path queries is more useful now: rather than just +- The case sensitivity of path queries is more useful now: rather than just guessing based on the platform, we now check the case sensitivity of your filesystem. :bug:`1586` -* Case-insensitive path queries might have returned nothing because of a - wrong SQL query. -* Fix a crash when a query contains a "+" or "-" alone in a component. +- Case-insensitive path queries might have returned nothing because of a wrong + SQL query. +- Fix a crash when a query contains a "+" or "-" alone in a component. :bug:`1605` -* Fixed unit of file size to powers of two (MiB, GiB, etc.) instead of powers - of ten (MB, GB, etc.). :bug:`1623` +- Fixed unit of file size to powers of two (MiB, GiB, etc.) instead of powers of + ten (MB, GB, etc.). :bug:`1623` 1.3.14 (August 2, 2015) ----------------------- -This is mainly a bugfix release, but we also have a nifty new plugin for -`ipfs`_ and a bunch of new configuration options. +This is mainly a bugfix release, but we also have a nifty new plugin for ipfs_ +and a bunch of new configuration options. The new features: -* A new :doc:`/plugins/ipfs` lets you share music via a new, global, +- A new :doc:`/plugins/ipfs` lets you share music via a new, global, decentralized filesystem. :bug:`1397` -* :doc:`/plugins/duplicates`: You can now merge duplicate - track metadata (when detecting duplicate items), or duplicate album - tracks (when detecting duplicate albums). -* :doc:`/plugins/duplicates`: Duplicate resolution now uses an ordering to +- :doc:`/plugins/duplicates`: You can now merge duplicate track metadata (when + detecting duplicate items), or duplicate album tracks (when detecting + duplicate albums). +- :doc:`/plugins/duplicates`: Duplicate resolution now uses an ordering to prioritize duplicates. By default, it prefers music with more complete metadata, but you can configure it to use any list of attributes. -* :doc:`/plugins/metasync`: Added a new backend to fetch metadata from iTunes. +- :doc:`/plugins/metasync`: Added a new backend to fetch metadata from iTunes. This plugin is still in an experimental phase. :bug:`1450` -* The `move` command has a new ``--pretend`` option, making the command show +- The ``move`` command has a new ``--pretend`` option, making the command show how the items will be moved without actually changing anything. -* The importer now supports matching of "pregap" or HTOA (hidden track-one +- The importer now supports matching of "pregap" or HTOA (hidden track-one audio) tracks when they are listed in MusicBrainz. (This feature depends on a - new version of the `python-musicbrainzngs`_ library that is not yet released, but - will start working when it is available.) Thanks to :user:`ruippeixotog`. + new version of the python-musicbrainzngs_ library that is not yet released, + but will start working when it is available.) Thanks to :user:`ruippeixotog`. :bug:`1104` :bug:`1493` -* :doc:`/plugins/plexupdate`: A new ``token`` configuration option lets you +- :doc:`/plugins/plexupdate`: A new ``token`` configuration option lets you specify a key for Plex Home setups. Thanks to :user:`edcarroll`. :bug:`1494` Fixes: -* :doc:`/plugins/fetchart`: Complain when the `enforce_ratio` - or `min_width` options are enabled but no local imaging backend is available - to carry them out. :bug:`1460` -* :doc:`/plugins/importfeeds`: Avoid generating incorrect m3u filename when - both of the `m3u` and `m3u_multi` options are enabled. :bug:`1490` -* :doc:`/plugins/duplicates`: Avoid a crash when misconfigured. :bug:`1457` -* :doc:`/plugins/mpdstats`: Avoid a crash when the music played is not in the +- :doc:`/plugins/fetchart`: Complain when the ``enforce_ratio`` or ``min_width`` + options are enabled but no local imaging backend is available to carry them + out. :bug:`1460` +- :doc:`/plugins/importfeeds`: Avoid generating incorrect m3u filename when both + of the ``m3u`` and ``m3u_multi`` options are enabled. :bug:`1490` +- :doc:`/plugins/duplicates`: Avoid a crash when misconfigured. :bug:`1457` +- :doc:`/plugins/mpdstats`: Avoid a crash when the music played is not in the beets library. Thanks to :user:`CodyReichert`. :bug:`1443` -* Fix a crash with ArtResizer on Windows systems (affecting - :doc:`/plugins/embedart`, :doc:`/plugins/fetchart`, - and :doc:`/plugins/thumbnails`). :bug:`1448` -* :doc:`/plugins/permissions`: Fix an error with non-ASCII paths. :bug:`1449` -* Fix sorting by paths when the :ref:`sort_case_insensitive` option is - enabled. :bug:`1451` -* :doc:`/plugins/embedart`: Avoid an error when trying to embed invalid images +- Fix a crash with ArtResizer on Windows systems (affecting + :doc:`/plugins/embedart`, :doc:`/plugins/fetchart`, and + :doc:`/plugins/thumbnails`). :bug:`1448` +- :doc:`/plugins/permissions`: Fix an error with non-ASCII paths. :bug:`1449` +- Fix sorting by paths when the :ref:`sort_case_insensitive` option is enabled. + :bug:`1451` +- :doc:`/plugins/embedart`: Avoid an error when trying to embed invalid images into MPEG-4 files. -* :doc:`/plugins/fetchart`: The Wikipedia source can now better deal artists +- :doc:`/plugins/fetchart`: The Wikipedia source can now better deal artists that use non-standard capitalization (e.g., alt-J, dEUS). -* :doc:`/plugins/web`: Fix searching for non-ASCII queries. Thanks to +- :doc:`/plugins/web`: Fix searching for non-ASCII queries. Thanks to :user:`oldtopman`. :bug:`1470` -* :doc:`/plugins/mpdupdate`: We now recommend the newer ``python-mpd2`` - library instead of its unmaintained parent. Thanks to :user:`Somasis`. - :bug:`1472` -* The importer interface and log file now output a useful list of files - (instead of the word "None") when in album-grouping mode. :bug:`1475` - :bug:`825` -* Fix some logging errors when filenames and other user-provided strings - contain curly braces. :bug:`1481` -* Regular expression queries over paths now work more reliably with non-ASCII +- :doc:`/plugins/mpdupdate`: We now recommend the newer ``python-mpd2`` library + instead of its unmaintained parent. Thanks to :user:`Somasis`. :bug:`1472` +- The importer interface and log file now output a useful list of files (instead + of the word "None") when in album-grouping mode. :bug:`1475` :bug:`825` +- Fix some logging errors when filenames and other user-provided strings contain + curly braces. :bug:`1481` +- Regular expression queries over paths now work more reliably with non-ASCII characters in filenames. :bug:`1482` -* Fix a bug where the autotagger's :ref:`ignored` setting was sometimes, well, +- Fix a bug where the autotagger's :ref:`ignored` setting was sometimes, well, ignored. :bug:`1487` -* Fix a bug with Unicode strings when generating image thumbnails. :bug:`1485` -* :doc:`/plugins/keyfinder`: Fix handling of Unicode paths. :bug:`1502` -* :doc:`/plugins/fetchart`: When album art is already present, the message is +- Fix a bug with Unicode strings when generating image thumbnails. :bug:`1485` +- :doc:`/plugins/keyfinder`: Fix handling of Unicode paths. :bug:`1502` +- :doc:`/plugins/fetchart`: When album art is already present, the message is now printed in the ``text_highlight_minor`` color (light gray). Thanks to :user:`Somasis`. :bug:`1512` -* Some messages in the console UI now use plural nouns correctly. Thanks to +- Some messages in the console UI now use plural nouns correctly. Thanks to :user:`JesseWeinstein`. :bug:`1521` -* Sorting numerical fields (such as track) now works again. :bug:`1511` -* :doc:`/plugins/replaygain`: Missing GStreamer plugins now cause a helpful +- Sorting numerical fields (such as track) now works again. :bug:`1511` +- :doc:`/plugins/replaygain`: Missing GStreamer plugins now cause a helpful error message instead of a crash. :bug:`1518` -* Fix an edge case when producing sanitized filenames where the maximum path +- Fix an edge case when producing sanitized filenames where the maximum path length conflicted with the :ref:`replace` rules. Thanks to Ben Ockmore. :bug:`496` :bug:`1361` -* Fix an incompatibility with OS X 10.11 (where ``/usr/sbin`` seems not to be - on the user's path by default). -* Fix an incompatibility with certain JPEG files. Here's a relevant `Python +- Fix an incompatibility with OS X 10.11 (where ``/usr/sbin`` seems not to be on + the user's path by default). +- Fix an incompatibility with certain JPEG files. Here's a relevant `Python bug`_. Thanks to :user:`nathdwek`. :bug:`1545` -* Fix the :ref:`group_albums` importer mode so that it works correctly when +- Fix the :ref:`group_albums` importer mode so that it works correctly when files are not already in order by album. :bug:`1550` -* The ``fields`` command no longer separates built-in fields from +- The ``fields`` command no longer separates built-in fields from plugin-provided ones. This distinction was becoming increasingly unreliable. -* :doc:`/plugins/duplicates`: Fix a Unicode warning when paths contained +- :doc:`/plugins/duplicates`: Fix a Unicode warning when paths contained non-ASCII characters. :bug:`1551` -* :doc:`/plugins/fetchart`: Work around a urllib3 bug that could cause a - crash. :bug:`1555` :bug:`1556` -* When you edit the configuration file with ``beet config -e`` and the file - does not exist, beets creates an empty file before editing it. This fixes an - error on OS X, where the ``open`` command does not work with non-existent - files. :bug:`1480` -* :doc:`/plugins/convert`: Fix a problem with filename encoding on Windows - under Python 3. :bug:`2515` :bug:`2516` +- :doc:`/plugins/fetchart`: Work around a urllib3 bug that could cause a crash. + :bug:`1555` :bug:`1556` +- When you edit the configuration file with ``beet config -e`` and the file does + not exist, beets creates an empty file before editing it. This fixes an error + on OS X, where the ``open`` command does not work with non-existent files. + :bug:`1480` +- :doc:`/plugins/convert`: Fix a problem with filename encoding on Windows under + Python 3. :bug:`2515` :bug:`2516` -.. _Python bug: https://bugs.python.org/issue16512 .. _ipfs: https://ipfs.io +.. _python bug: https://bugs.python.org/issue16512 + 1.3.13 (April 24, 2015) ----------------------- This is a tiny bug-fix release. It copes with a dependency upgrade that broke beets. There are just two fixes: -* Fix compatibility with `Jellyfish`_ version 0.5.0. -* :doc:`/plugins/embedart`: In ``auto`` mode (the import hook), the plugin now +- Fix compatibility with Jellyfish_ version 0.5.0. +- :doc:`/plugins/embedart`: In ``auto`` mode (the import hook), the plugin now respects the ``write`` config option under ``import``. If this is disabled, album art is no longer embedded on import in order to leave files untouched---in effect, ``auto`` is implicitly disabled. :bug:`1427` @@ -2588,62 +2729,61 @@ beets. There are just two fixes: 1.3.12 (April 18, 2015) ----------------------- -This little update makes queries more powerful, sorts music more -intelligently, and removes a performance bottleneck. There's an experimental -new plugin for synchronizing metadata with music players. +This little update makes queries more powerful, sorts music more intelligently, +and removes a performance bottleneck. There's an experimental new plugin for +synchronizing metadata with music players. -Packagers should also note a new dependency in this version: the `Jellyfish`_ +Packagers should also note a new dependency in this version: the Jellyfish_ Python library makes our text comparisons (a big part of the auto-tagging process) go much faster. New features: -* Queries can now use **"or" logic**: if you use a comma to separate parts of a +- Queries can now use **"or" logic**: if you use a comma to separate parts of a query, items and albums will match *either* side of the comma. For example, - ``beet ls foo , bar`` will get all the items matching `foo` or matching - `bar`. See :ref:`combiningqueries`. :bug:`1423` -* The autotagger's **matching algorithm is faster**. We now use the - `Jellyfish`_ library to compute string similarity, which is better optimized - than our hand-rolled edit distance implementation. :bug:`1389` -* Sorting is now **case insensitive** by default. This means that artists will - be sorted lexicographically regardless of case. For example, the artist - alt-J will now properly sort before YACHT. (Previously, it would have ended - up at the end of the list, after all the capital-letter artists.) - You can turn this new behavior off using the :ref:`sort_case_insensitive` - configuration option. See :ref:`query-sort`. :bug:`1429` -* An experimental new :doc:`/plugins/metasync` lets you get metadata from your + ``beet ls foo , bar`` will get all the items matching ``foo`` or matching + ``bar``. See :ref:`combiningqueries`. :bug:`1423` +- The autotagger's **matching algorithm is faster**. We now use the Jellyfish_ + library to compute string similarity, which is better optimized than our + hand-rolled edit distance implementation. :bug:`1389` +- Sorting is now **case insensitive** by default. This means that artists will + be sorted lexicographically regardless of case. For example, the artist alt-J + will now properly sort before YACHT. (Previously, it would have ended up at + the end of the list, after all the capital-letter artists.) You can turn this + new behavior off using the :ref:`sort_case_insensitive` configuration option. + See :ref:`query-sort`. :bug:`1429` +- An experimental new :doc:`/plugins/metasync` lets you get metadata from your favorite music players, starting with Amarok. :bug:`1386` -* :doc:`/plugins/fetchart`: There are new settings to control what constitutes - "acceptable" images. The `minwidth` option constrains the minimum image - width in pixels and the `enforce_ratio` option requires that images be +- :doc:`/plugins/fetchart`: There are new settings to control what constitutes + "acceptable" images. The ``minwidth`` option constrains the minimum image + width in pixels and the ``enforce_ratio`` option requires that images be square. :bug:`1394` Little fixes and improvements: -* :doc:`/plugins/fetchart`: Remove a hard size limit when fetching from the +- :doc:`/plugins/fetchart`: Remove a hard size limit when fetching from the Cover Art Archive. -* The output of the :ref:`fields-cmd` command is now sorted. Thanks to +- The output of the :ref:`fields-cmd` command is now sorted. Thanks to :user:`multikatt`. :bug:`1402` -* :doc:`/plugins/replaygain`: Fix a number of issues with the new - ``bs1770gain`` backend on Windows. Also, fix missing debug output in import - mode. :bug:`1398` -* Beets should now be better at guessing the appropriate output encoding on - Windows. (Specifically, the console output encoding is guessed separately - from the encoding for command-line arguments.) A bug was also fixed where - beets would ignore the locale settings and use UTF-8 by default. :bug:`1419` -* :doc:`/plugins/discogs`: Better error handling when we can't communicate - with Discogs on setup. :bug:`1417` -* :doc:`/plugins/importadded`: Fix a crash when importing singletons in-place. +- :doc:`/plugins/replaygain`: Fix a number of issues with the new ``bs1770gain`` + backend on Windows. Also, fix missing debug output in import mode. :bug:`1398` +- Beets should now be better at guessing the appropriate output encoding on + Windows. (Specifically, the console output encoding is guessed separately from + the encoding for command-line arguments.) A bug was also fixed where beets + would ignore the locale settings and use UTF-8 by default. :bug:`1419` +- :doc:`/plugins/discogs`: Better error handling when we can't communicate with + Discogs on setup. :bug:`1417` +- :doc:`/plugins/importadded`: Fix a crash when importing singletons in-place. :bug:`1416` -* :doc:`/plugins/fuzzy`: Fix a regression causing a crash in the last release. +- :doc:`/plugins/fuzzy`: Fix a regression causing a crash in the last release. :bug:`1422` -* Fix a crash when the importer cannot open its log file. Thanks to +- Fix a crash when the importer cannot open its log file. Thanks to :user:`barsanuphe`. :bug:`1426` -* Fix an error when trying to write tags for items with flexible fields called - `date` and `original_date` (which are not built-in beets fields). +- Fix an error when trying to write tags for items with flexible fields called + ``date`` and ``original_date`` (which are not built-in beets fields). :bug:`1404` -.. _Jellyfish: https://github.com/sunlightlabs/jellyfish +.. _jellyfish: https://github.com/sunlightlabs/jellyfish 1.3.11 (April 5, 2015) ---------------------- @@ -2658,184 +2798,179 @@ evolved plugin for using album art as directory thumbnails in file managers. There's a new source for album art, and the importer now records the source of match data. This is a particularly huge release---there's lots more below. -There's one big change with this release: **Python 2.6 is no longer -supported**. You'll need Python 2.7. Please trust us when we say this let us -remove a surprising number of ugly hacks throughout the code. +There's one big change with this release: **Python 2.6 is no longer supported**. +You'll need Python 2.7. Please trust us when we say this let us remove a +surprising number of ugly hacks throughout the code. Major new features and bigger changes: -* There are now **multiple levels of output verbosity**. On the command line, - you can make beets somewhat verbose with ``-v`` or very verbose with - ``-vv``. For the importer especially, this makes the first verbose mode much - more manageable, while still preserving an option for overwhelmingly verbose - debug output. :bug:`1244` -* A new :doc:`/plugins/filefilter` lets you write regular expressions to +- There are now **multiple levels of output verbosity**. On the command line, + you can make beets somewhat verbose with ``-v`` or very verbose with ``-vv``. + For the importer especially, this makes the first verbose mode much more + manageable, while still preserving an option for overwhelmingly verbose debug + output. :bug:`1244` +- A new :doc:`/plugins/filefilter` lets you write regular expressions to automatically **avoid importing** certain files. Thanks to :user:`mried`. :bug:`1186` -* A new :doc:`/plugins/thumbnails` generates cover-art **thumbnails for - album folders** for Freedesktop.org-compliant file managers. (This replaces - the :doc:`/plugins/freedesktop`, which only worked with the Dolphin file - manager.) -* :doc:`/plugins/replaygain`: There is a new backend that uses the - `bs1770gain`_ analysis tool. Thanks to :user:`jmwatte`. :bug:`1343` -* A new ``filesize`` field on items indicates the number of bytes in the file. +- A new :doc:`/plugins/thumbnails` generates cover-art **thumbnails for album + folders** for Freedesktop.org-compliant file managers. (This replaces the + :doc:`/plugins/freedesktop`, which only worked with the Dolphin file manager.) +- :doc:`/plugins/replaygain`: There is a new backend that uses the bs1770gain_ + analysis tool. Thanks to :user:`jmwatte`. :bug:`1343` +- A new ``filesize`` field on items indicates the number of bytes in the file. :bug:`1291` -* A new :ref:`searchlimit` configuration option allows you to specify how many - search results you wish to see when looking up releases at MusicBrainz - during import. :bug:`1245` -* The importer now records the data source for a match in a new - flexible attribute ``data_source`` on items and albums. :bug:`1311` -* The colors used in the terminal interface are now configurable via the new - config option ``colors``, nested under the option ``ui``. (Also, the `color` +- A new :conf:`plugins.index:search_limit` configuration option allows you to + specify how many search results you wish to see when looking up releases at + MusicBrainz during import. :bug:`1245` +- The importer now records the data source for a match in a new flexible + attribute ``data_source`` on items and albums. :bug:`1311` +- The colors used in the terminal interface are now configurable via the new + config option ``colors``, nested under the option ``ui``. (Also, the ``color`` config option has been moved from top-level to under ``ui``. Beets will respect the old color setting, but will warn the user with a deprecation message.) :bug:`1238` -* :doc:`/plugins/fetchart`: There's a new Wikipedia image source that uses +- :doc:`/plugins/fetchart`: There's a new Wikipedia image source that uses DBpedia to find albums. Thanks to Tom Jaspers. :bug:`1194` -* In the :ref:`config-cmd` command, the output is now redacted by default. +- In the :ref:`config-cmd` command, the output is now redacted by default. Sensitive information like passwords and API keys is not included. The new ``--clear`` option disables redaction. :bug:`1376` You should probably also know about these core changes to the way beets works: -* As mentioned above, Python 2.6 is no longer supported. -* The ``tracktotal`` attribute is now a *track-level field* instead of an - album-level one. This field stores the total number of tracks on the - album, or if the :ref:`per_disc_numbering` config option is set, the total - number of tracks on a particular medium (i.e., disc). The field was causing - problems with that :ref:`per_disc_numbering` mode: different discs on the - same album needed different track totals. The field can now work correctly - in either mode. -* To replace ``tracktotal`` as an album-level field, there is a new - ``albumtotal`` computed attribute that provides the total number of tracks - on the album. (The :ref:`per_disc_numbering` option has no influence on this +- As mentioned above, Python 2.6 is no longer supported. +- The ``tracktotal`` attribute is now a *track-level field* instead of an + album-level one. This field stores the total number of tracks on the album, or + if the :ref:`per_disc_numbering` config option is set, the total number of + tracks on a particular medium (i.e., disc). The field was causing problems + with that :ref:`per_disc_numbering` mode: different discs on the same album + needed different track totals. The field can now work correctly in either + mode. +- To replace ``tracktotal`` as an album-level field, there is a new + ``albumtotal`` computed attribute that provides the total number of tracks on + the album. (The :ref:`per_disc_numbering` option has no influence on this field.) -* The `list_format_album` and `list_format_item` configuration keys - now affect (almost) every place where objects are printed and logged. - (Previously, they only controlled the :ref:`list-cmd` command and a few - other scattered pieces.) :bug:`1269` -* Relatedly, the ``beet`` program now accept top-level options - ``--format-item`` and ``--format-album`` before any subcommand to control - how items and albums are displayed. :bug:`1271` -* `list_format_album` and `list_format_album` have respectively been - renamed :ref:`format_album` and :ref:`format_item`. The old names still work - but each triggers a warning message. :bug:`1271` -* :ref:`Path queries <pathquery>` are automatically triggered only if the - path targeted by the query exists. Previously, just having a slash somewhere - in the query was enough, so ``beet ls AC/DC`` wouldn't work to refer to the - artist. +- The ``list_format_album`` and ``list_format_item`` configuration keys now + affect (almost) every place where objects are printed and logged. (Previously, + they only controlled the :ref:`list-cmd` command and a few other scattered + pieces.) :bug:`1269` +- Relatedly, the ``beet`` program now accept top-level options ``--format-item`` + and ``--format-album`` before any subcommand to control how items and albums + are displayed. :bug:`1271` +- ``list_format_album`` and ``list_format_album`` have respectively been renamed + :ref:`format_album` and :ref:`format_item`. The old names still work but each + triggers a warning message. :bug:`1271` +- :ref:`Path queries <pathquery>` are automatically triggered only if the path + targeted by the query exists. Previously, just having a slash somewhere in the + query was enough, so ``beet ls AC/DC`` wouldn't work to refer to the artist. There are also lots of medium-sized features in this update: -* :doc:`/plugins/duplicates`: The command has a new ``--strict`` option - that will only report duplicates if all attributes are explicitly set. - :bug:`1000` -* :doc:`/plugins/smartplaylist`: Playlist updating should now be faster: the - plugin detects, for each playlist, whether it needs to be regenerated, - instead of obliviously regenerating all of them. The ``splupdate`` command - can now also take additional parameters that indicate the names of the - playlists to regenerate. -* :doc:`/plugins/play`: The command shows the output of the underlying player +- :doc:`/plugins/duplicates`: The command has a new ``--strict`` option that + will only report duplicates if all attributes are explicitly set. :bug:`1000` +- :doc:`/plugins/smartplaylist`: Playlist updating should now be faster: the + plugin detects, for each playlist, whether it needs to be regenerated, instead + of obliviously regenerating all of them. The ``splupdate`` command can now + also take additional parameters that indicate the names of the playlists to + regenerate. +- :doc:`/plugins/play`: The command shows the output of the underlying player command and lets you interact with it. :bug:`1321` -* The summary shown to compare duplicate albums during import now displays - the old and new filesizes. :bug:`1291` -* :doc:`/plugins/lastgenre`: Add *comedy*, *humor*, and *stand-up* as well as - a longer list of classical music genre tags to the built-in whitelist and +- The summary shown to compare duplicate albums during import now displays the + old and new filesizes. :bug:`1291` +- :doc:`/plugins/lastgenre`: Add *comedy*, *humor*, and *stand-up* as well as a + longer list of classical music genre tags to the built-in whitelist and canonicalization tree. :bug:`1206` :bug:`1239` :bug:`1240` -* :doc:`/plugins/web`: Add support for *cross-origin resource sharing* for - more flexible in-browser clients. Thanks to Andre Miller. :bug:`1236` - :bug:`1237` -* :doc:`plugins/mbsync`: A new ``-f/--format`` option controls the output - format when listing unrecognized items. The output is also now more helpful - by default. :bug:`1246` -* :doc:`/plugins/fetchart`: A new option, ``-n``, extracts the cover art of - all matched albums into their respective directories. Another new flag, - ``-a``, associates the extracted files with the albums in the database. - :bug:`1261` -* :doc:`/plugins/info`: A new option, ``-i``, can display only a specified +- :doc:`/plugins/web`: Add support for *cross-origin resource sharing* for more + flexible in-browser clients. Thanks to Andre Miller. :bug:`1236` :bug:`1237` +- :doc:`plugins/mbsync`: A new ``-f/--format`` option controls the output format + when listing unrecognized items. The output is also now more helpful by + default. :bug:`1246` +- :doc:`/plugins/fetchart`: A new option, ``-n``, extracts the cover art of all + matched albums into their respective directories. Another new flag, ``-a``, + associates the extracted files with the albums in the database. :bug:`1261` +- :doc:`/plugins/info`: A new option, ``-i``, can display only a specified subset of properties. :bug:`1287` -* The number of missing/unmatched tracks is shown during import. :bug:`1088` -* :doc:`/plugins/permissions`: The plugin now also adjusts the permissions of +- The number of missing/unmatched tracks is shown during import. :bug:`1088` +- :doc:`/plugins/permissions`: The plugin now also adjusts the permissions of the directories. (Previously, it only affected files.) :bug:`1308` :bug:`1324` -* :doc:`/plugins/ftintitle`: You can now configure the format that the plugin +- :doc:`/plugins/ftintitle`: You can now configure the format that the plugin uses to add the artist to the title. Thanks to :user:`amishb`. :bug:`1377` And many little fixes and improvements: -* :doc:`/plugins/replaygain`: Stop applying replaygain directly to source files +- :doc:`/plugins/replaygain`: Stop applying replaygain directly to source files when using the mp3gain backend. :bug:`1316` -* Path queries are case-sensitive on non-Windows OSes. :bug:`1165` -* :doc:`/plugins/lyrics`: Silence a warning about insecure requests in the new +- Path queries are case-sensitive on non-Windows OSes. :bug:`1165` +- :doc:`/plugins/lyrics`: Silence a warning about insecure requests in the new MusixMatch backend. :bug:`1204` -* Fix a crash when ``beet`` is invoked without arguments. :bug:`1205` +- Fix a crash when ``beet`` is invoked without arguments. :bug:`1205` :bug:`1207` -* :doc:`/plugins/fetchart`: Do not attempt to import directories as album art. +- :doc:`/plugins/fetchart`: Do not attempt to import directories as album art. :bug:`1177` :bug:`1211` -* :doc:`/plugins/mpdstats`: Avoid double-counting some play events. :bug:`773` +- :doc:`/plugins/mpdstats`: Avoid double-counting some play events. :bug:`773` :bug:`1212` -* Fix a crash when the importer deals with Unicode metadata in ``--pretend`` +- Fix a crash when the importer deals with Unicode metadata in ``--pretend`` mode. :bug:`1214` -* :doc:`/plugins/smartplaylist`: Fix ``album_query`` so that individual files +- :doc:`/plugins/smartplaylist`: Fix ``album_query`` so that individual files are added to the playlist instead of directories. :bug:`1225` -* Remove the ``beatport`` plugin. `Beatport`_ has shut off public access to - their API and denied our request for an account. We have not heard from the - company since 2013, so we are assuming access will not be restored. -* Incremental imports now (once again) show a "skipped N directories" message. -* :doc:`/plugins/embedart`: Handle errors in ImageMagick's output. :bug:`1241` -* :doc:`/plugins/keyfinder`: Parse the underlying tool's output more robustly. +- Remove the ``beatport`` plugin. Beatport_ has shut off public access to their + API and denied our request for an account. We have not heard from the company + since 2013, so we are assuming access will not be restored. +- Incremental imports now (once again) show a "skipped N directories" message. +- :doc:`/plugins/embedart`: Handle errors in ImageMagick's output. :bug:`1241` +- :doc:`/plugins/keyfinder`: Parse the underlying tool's output more robustly. :bug:`1248` -* :doc:`/plugins/embedart`: We now show a comprehensible error message when +- :doc:`/plugins/embedart`: We now show a comprehensible error message when ``beet embedart -f FILE`` is given a non-existent path. :bug:`1252` -* Fix a crash when a file has an unrecognized image type tag. Thanks to - Matthias Kiefer. :bug:`1260` -* :doc:`/plugins/importfeeds` and :doc:`/plugins/smartplaylist`: Automatically +- Fix a crash when a file has an unrecognized image type tag. Thanks to Matthias + Kiefer. :bug:`1260` +- :doc:`/plugins/importfeeds` and :doc:`/plugins/smartplaylist`: Automatically create parent directories for playlist files (instead of crashing when the parent directory does not exist). :bug:`1266` -* The :ref:`write-cmd` command no longer tries to "write" non-writable fields, +- The :ref:`write-cmd` command no longer tries to "write" non-writable fields, such as the bitrate. :bug:`1268` -* The error message when MusicBrainz is not reachable on the network is now - much clearer. Thanks to Tom Jaspers. :bug:`1190` :bug:`1272` -* Improve error messages when parsing query strings with shlex. :bug:`1290` -* :doc:`/plugins/embedart`: Fix a crash that occurred when used together - with the *check* plugin. :bug:`1241` -* :doc:`/plugins/scrub`: Log an error instead of stopping when the ``beet +- The error message when MusicBrainz is not reachable on the network is now much + clearer. Thanks to Tom Jaspers. :bug:`1190` :bug:`1272` +- Improve error messages when parsing query strings with shlex. :bug:`1290` +- :doc:`/plugins/embedart`: Fix a crash that occurred when used together with + the *check* plugin. :bug:`1241` +- :doc:`/plugins/scrub`: Log an error instead of stopping when the ``beet scrub`` command cannot write a file. Also, avoid problems on Windows with Unicode filenames. :bug:`1297` -* :doc:`/plugins/discogs`: Handle and log more kinds of communication - errors. :bug:`1299` :bug:`1305` -* :doc:`/plugins/lastgenre`: Bugs in the `pylast` library can no longer crash +- :doc:`/plugins/discogs`: Handle and log more kinds of communication errors. + :bug:`1299` :bug:`1305` +- :doc:`/plugins/lastgenre`: Bugs in the ``pylast`` library can no longer crash beets. -* :doc:`/plugins/convert`: You can now configure the temporary directory for +- :doc:`/plugins/convert`: You can now configure the temporary directory for conversions. Thanks to :user:`autochthe`. :bug:`1382` :bug:`1383` -* :doc:`/plugins/rewrite`: Fix a regression that prevented the plugin's +- :doc:`/plugins/rewrite`: Fix a regression that prevented the plugin's rewriting from applying to album-level fields like ``$albumartist``. :bug:`1393` -* :doc:`/plugins/play`: The plugin now sorts items according to the +- :doc:`/plugins/play`: The plugin now sorts items according to the configuration in album mode. -* :doc:`/plugins/fetchart`: The name for extracted art files is taken from the +- :doc:`/plugins/fetchart`: The name for extracted art files is taken from the ``art_filename`` configuration option. :bug:`1258` -* When there's a parse error in a query (for example, when you type a - malformed date in a :ref:`date query <datequery>`), beets now stops with an - error instead of silently ignoring the query component. -* :doc:`/plugins/smartplaylist`: Stream-friendly smart playlists. - The ``splupdate`` command can now also add a URL-encodable prefix to every - path in the playlist file. +- When there's a parse error in a query (for example, when you type a malformed + date in a :ref:`date query <datequery>`), beets now stops with an error + instead of silently ignoring the query component. +- :doc:`/plugins/smartplaylist`: Stream-friendly smart playlists. The + ``splupdate`` command can now also add a URL-encodable prefix to every path in + the playlist file. For developers: -* The ``database_change`` event now sends the item or album that is subject to - a change. -* The ``OptionParser`` is now a ``CommonOptionsParser`` that offers facilities +- The ``database_change`` event now sends the item or album that is subject to a + change. +- The ``OptionParser`` is now a ``CommonOptionsParser`` that offers facilities for adding usual options (``--album``, ``--path`` and ``--format``). See :ref:`add_subcommands`. :bug:`1271` -* The logging system in beets has been overhauled. Plugins now each have their +- The logging system in beets has been overhauled. Plugins now each have their own logger, which helps by automatically adjusting the verbosity level in - import mode and by prefixing the plugin's name. Logging levels are - dynamically set when a plugin is called, depending on how it is called - (import stage, event or direct command). Finally, logging calls can (and - should!) use modern ``{}``-style string formatting lazily. See - :ref:`plugin-logging` in the plugin API docs. -* A new ``import_task_created`` event lets you manipulate import tasks + import mode and by prefixing the plugin's name. Logging levels are dynamically + set when a plugin is called, depending on how it is called (import stage, + event or direct command). Finally, logging calls can (and should!) use modern + ``{}``-style string formatting lazily. See :ref:`plugin-logging` in the plugin + API docs. +- A new ``import_task_created`` event lets you manipulate import tasks immediately after they are initialized. It's also possible to replace the originally created tasks by returning new ones using this event. @@ -2846,12 +2981,12 @@ For developers: This version adds a healthy helping of new features and fixes a critical MPEG-4--related bug. There are more lyrics sources, there new plugins for -managing permissions and integrating with `Plex`_, and the importer has a new +managing permissions and integrating with Plex_, and the importer has a new ``--pretend`` flag that shows which music *would* be imported. One backwards-compatibility note: the :doc:`/plugins/lyrics` now requires the -`requests`_ library. If you use this plugin, you will need to install the -library by typing ``pip install requests`` or the equivalent for your OS. +requests_ library. If you use this plugin, you will need to install the library +by typing ``pip install requests`` or the equivalent for your OS. Also, as an advance warning, this will be one of the last releases to support Python 2.6. If you have a system that cannot run Python 2.7, please consider @@ -2859,180 +2994,178 @@ upgrading soon. The new features are: -* A new :doc:`/plugins/permissions` makes it easy to fix permissions on music +- A new :doc:`/plugins/permissions` makes it easy to fix permissions on music files as they are imported. Thanks to :user:`xsteadfastx`. :bug:`1098` -* A new :doc:`/plugins/plexupdate` lets you notify a `Plex`_ server when the +- A new :doc:`/plugins/plexupdate` lets you notify a Plex_ server when the database changes. Thanks again to xsteadfastx. :bug:`1120` -* The :ref:`import-cmd` command now has a ``--pretend`` flag that lists the +- The :ref:`import-cmd` command now has a ``--pretend`` flag that lists the files that will be imported. Thanks to :user:`mried`. :bug:`1162` -* :doc:`/plugins/lyrics`: Add `Musixmatch`_ source and introduce a new - ``sources`` config option that lets you choose exactly where to look for - lyrics and in which order. -* :doc:`/plugins/lyrics`: Add Brazilian and Spanish sources to Google custom +- :doc:`/plugins/lyrics`: Add Musixmatch_ source and introduce a new ``sources`` + config option that lets you choose exactly where to look for lyrics and in + which order. +- :doc:`/plugins/lyrics`: Add Brazilian and Spanish sources to Google custom search engine. -* Add a warning when importing a directory that contains no music. :bug:`1116` +- Add a warning when importing a directory that contains no music. :bug:`1116` :bug:`1127` -* :doc:`/plugins/zero`: Can now remove embedded images. :bug:`1129` :bug:`1100` -* The :ref:`config-cmd` command can now be used to edit the configuration even +- :doc:`/plugins/zero`: Can now remove embedded images. :bug:`1129` :bug:`1100` +- The :ref:`config-cmd` command can now be used to edit the configuration even when it has syntax errors. :bug:`1123` :bug:`1128` -* :doc:`/plugins/lyrics`: Added a new ``force`` config option. :bug:`1150` +- :doc:`/plugins/lyrics`: Added a new ``force`` config option. :bug:`1150` As usual, there are loads of little fixes and improvements: -* Fix a new crash with the latest version of Mutagen (1.26). -* :doc:`/plugins/lyrics`: Avoid fetching truncated lyrics from the Google - backed by merging text blocks separated by empty ``<div>`` tags before - scraping. -* We now print a better error message when the database file is corrupted. -* :doc:`/plugins/discogs`: Only prompt for authentication when running the +- Fix a new crash with the latest version of Mutagen (1.26). +- :doc:`/plugins/lyrics`: Avoid fetching truncated lyrics from the Google backed + by merging text blocks separated by empty ``<div>`` tags before scraping. +- We now print a better error message when the database file is corrupted. +- :doc:`/plugins/discogs`: Only prompt for authentication when running the :ref:`import-cmd` command. :bug:`1123` -* When deleting fields with the :ref:`modify-cmd` command, do not crash when - the field cannot be removed (i.e., when it does not exist, when it is a - built-in field, or when it is a computed field). :bug:`1124` -* The deprecated ``echonest_tempo`` plugin has been removed. Please use the +- When deleting fields with the :ref:`modify-cmd` command, do not crash when the + field cannot be removed (i.e., when it does not exist, when it is a built-in + field, or when it is a computed field). :bug:`1124` +- The deprecated ``echonest_tempo`` plugin has been removed. Please use the ``echonest`` plugin instead. -* ``echonest`` plugin: Fingerprint-based lookup has been removed in - accordance with `API changes`_. :bug:`1121` -* ``echonest`` plugin: Avoid a crash when the song has no duration - information. :bug:`896` -* :doc:`/plugins/lyrics`: Avoid a crash when retrieving non-ASCII lyrics from +- ``echonest`` plugin: Fingerprint-based lookup has been removed in accordance + with `API changes`_. :bug:`1121` +- ``echonest`` plugin: Avoid a crash when the song has no duration information. + :bug:`896` +- :doc:`/plugins/lyrics`: Avoid a crash when retrieving non-ASCII lyrics from the Google backend. :bug:`1135` :bug:`1136` -* :doc:`/plugins/smartplaylist`: Sort specifiers are now respected in queries. +- :doc:`/plugins/smartplaylist`: Sort specifiers are now respected in queries. Thanks to :user:`djl`. :bug:`1138` :bug:`1137` -* :doc:`/plugins/ftintitle` and :doc:`/plugins/lyrics`: Featuring artists can - now be detected when they use the Spanish word *con*. :bug:`1060` - :bug:`1143` -* :doc:`/plugins/mbcollection`: Fix an "HTTP 400" error caused by a change in +- :doc:`/plugins/ftintitle` and :doc:`/plugins/lyrics`: Featuring artists can + now be detected when they use the Spanish word *con*. :bug:`1060` :bug:`1143` +- :doc:`/plugins/mbcollection`: Fix an "HTTP 400" error caused by a change in the MusicBrainz API. :bug:`1152` -* The ``%`` and ``_`` characters in path queries do not invoke their - special SQL meaning anymore. :bug:`1146` -* :doc:`/plugins/convert`: Command-line argument construction now works - on Windows. Thanks to :user:`mluds`. :bug:`1026` :bug:`1157` :bug:`1158` -* :doc:`/plugins/embedart`: Fix an erroneous missing-art error on Windows. +- The ``%`` and ``_`` characters in path queries do not invoke their special SQL + meaning anymore. :bug:`1146` +- :doc:`/plugins/convert`: Command-line argument construction now works on + Windows. Thanks to :user:`mluds`. :bug:`1026` :bug:`1157` :bug:`1158` +- :doc:`/plugins/embedart`: Fix an erroneous missing-art error on Windows. Thanks to :user:`mluds`. :bug:`1163` -* :doc:`/plugins/importadded`: Now works with in-place and symlinked imports. +- :doc:`/plugins/importadded`: Now works with in-place and symlinked imports. :bug:`1170` -* :doc:`/plugins/ftintitle`: The plugin is now quiet when it runs as part of - the import process. Thanks to :user:`Freso`. :bug:`1176` :bug:`1172` -* :doc:`/plugins/ftintitle`: Fix weird behavior when the same artist appears +- :doc:`/plugins/ftintitle`: The plugin is now quiet when it runs as part of the + import process. Thanks to :user:`Freso`. :bug:`1176` :bug:`1172` +- :doc:`/plugins/ftintitle`: Fix weird behavior when the same artist appears twice in the artist string. Thanks to Marc Addeo. :bug:`1179` :bug:`1181` -* :doc:`/plugins/lastgenre`: Match songs more robustly when they contain - dashes. Thanks to :user:`djl`. :bug:`1156` -* The :ref:`config-cmd` command can now use ``$EDITOR`` variables with +- :doc:`/plugins/lastgenre`: Match songs more robustly when they contain dashes. + Thanks to :user:`djl`. :bug:`1156` +- The :ref:`config-cmd` command can now use ``$EDITOR`` variables with arguments. -.. _API changes: https://web.archive.org/web/20160814092627/https://developer.echonest.com/forums/thread/3650 -.. _Plex: https://plex.tv/ +.. _api changes: https://web.archive.org/web/20160814092627/https://developer.echonest.com/forums/thread/3650 + .. _musixmatch: https://www.musixmatch.com/ +.. _plex: https://plex.tv/ + 1.3.9 (November 17, 2014) ------------------------- This release adds two new standard plugins to beets: one for synchronizing -Last.fm listening data and one for integrating with Linux desktops. And at -long last, imports can now create symbolic links to music files instead of -copying or moving them. We also gained the ability to search for album art on -the iTunes Store and a new way to compute ReplayGain levels. +Last.fm listening data and one for integrating with Linux desktops. And at long +last, imports can now create symbolic links to music files instead of copying or +moving them. We also gained the ability to search for album art on the iTunes +Store and a new way to compute ReplayGain levels. The major new features are: -* A new :doc:`/plugins/lastimport` lets you download your play count data from +- A new :doc:`/plugins/lastimport` lets you download your play count data from Last.fm into a flexible attribute. Thanks to Rafael Bodill. -* A new :doc:`/plugins/freedesktop` creates metadata files for +- A new :doc:`/plugins/freedesktop` creates metadata files for Freedesktop.org--compliant file managers. Thanks to :user:`kerobaros`. :bug:`1056`, :bug:`707` -* A new :ref:`link` option in the ``import`` section creates symbolic links +- A new :ref:`link` option in the ``import`` section creates symbolic links during import instead of moving or copying. Thanks to Rovanion Luckey. :bug:`710`, :bug:`114` -* :doc:`/plugins/fetchart`: You can now search for art on the iTunes Store. +- :doc:`/plugins/fetchart`: You can now search for art on the iTunes Store. There's also a new ``sources`` config option that lets you choose exactly where to look for images and in which order. -* :doc:`/plugins/replaygain`: A new Python Audio Tools backend was added. - Thanks to Francesco Rubino. :bug:`1070` -* :doc:`/plugins/embedart`: You can now automatically check that new art looks - similar to existing art---ensuring that you only get a better "version" of - the art you already have. See :ref:`image-similarity-check`. -* :doc:`/plugins/ftintitle`: The plugin now runs automatically on import. To +- :doc:`/plugins/replaygain`: A new Python Audio Tools backend was added. Thanks + to Francesco Rubino. :bug:`1070` +- :doc:`/plugins/embedart`: You can now automatically check that new art looks + similar to existing art---ensuring that you only get a better "version" of the + art you already have. See :ref:`image-similarity-check`. +- :doc:`/plugins/ftintitle`: The plugin now runs automatically on import. To disable this, unset the ``auto`` config flag. There are also core improvements and other substantial additions: -* The ``media`` attribute is now a *track-level field* instead of an - album-level one. This field stores the delivery mechanism for the music, so - in its album-level incarnation, it could not represent heterogeneous - releases---for example, an album consisting of a CD and a DVD. Now, tracks - accurately indicate the media they appear on. Thanks to Heinz Wiesinger. -* Re-imports of your existing music (see :ref:`reimport`) now preserve its - added date and flexible attributes. Thanks to Stig Inge Lea Bjørnsen. -* Slow queries, such as those over flexible attributes, should now be much +- The ``media`` attribute is now a *track-level field* instead of an album-level + one. This field stores the delivery mechanism for the music, so in its + album-level incarnation, it could not represent heterogeneous releases---for + example, an album consisting of a CD and a DVD. Now, tracks accurately + indicate the media they appear on. Thanks to Heinz Wiesinger. +- Re-imports of your existing music (see :ref:`reimport`) now preserve its added + date and flexible attributes. Thanks to Stig Inge Lea Bjørnsen. +- Slow queries, such as those over flexible attributes, should now be much faster when used with certain commands---notably, the :doc:`/plugins/play`. -* :doc:`/plugins/bpd`: Add a new configuration option for setting the default +- :doc:`/plugins/bpd`: Add a new configuration option for setting the default volume. Thanks to IndiGit. -* :doc:`/plugins/embedart`: A new ``ifempty`` config option lets you only - embed album art when no album art is present. Thanks to kerobaros. -* :doc:`/plugins/discogs`: Authenticate with the Discogs server. The plugin - now requires a Discogs account due to new API restrictions. Thanks to +- :doc:`/plugins/embedart`: A new ``ifempty`` config option lets you only embed + album art when no album art is present. Thanks to kerobaros. +- :doc:`/plugins/discogs`: Authenticate with the Discogs server. The plugin now + requires a Discogs account due to new API restrictions. Thanks to :user:`multikatt`. :bug:`1027`, :bug:`1040` And countless little improvements and fixes: -* Standard cover art in APEv2 metadata is now supported. Thanks to Matthias +- Standard cover art in APEv2 metadata is now supported. Thanks to Matthias Kiefer. :bug:`1042` -* :doc:`/plugins/convert`: Avoid a crash when embedding cover art - fails. -* :doc:`/plugins/mpdstats`: Fix an error on start (introduced in the previous +- :doc:`/plugins/convert`: Avoid a crash when embedding cover art fails. +- :doc:`/plugins/mpdstats`: Fix an error on start (introduced in the previous version). Thanks to Zach Denton. -* :doc:`/plugins/convert`: The ``--yes`` command-line flag no longer expects - an argument. -* :doc:`/plugins/play`: Remove the temporary .m3u file after sending it to - the player. -* The importer no longer tries to highlight partial differences in numeric +- :doc:`/plugins/convert`: The ``--yes`` command-line flag no longer expects an + argument. +- :doc:`/plugins/play`: Remove the temporary .m3u file after sending it to the + player. +- The importer no longer tries to highlight partial differences in numeric quantities (track numbers and durations), which was often confusing. -* Date-based queries that are malformed (not parse-able) no longer crash - beets and instead fail silently. -* :doc:`/plugins/duplicates`: Emit an error when the ``checksum`` config - option is set incorrectly. -* The migration from pre-1.1, non-YAML configuration files has been removed. - If you need to upgrade an old config file, use an older version of beets +- Date-based queries that are malformed (not parse-able) no longer crash beets + and instead fail silently. +- :doc:`/plugins/duplicates`: Emit an error when the ``checksum`` config option + is set incorrectly. +- The migration from pre-1.1, non-YAML configuration files has been removed. If + you need to upgrade an old config file, use an older version of beets temporarily. -* :doc:`/plugins/discogs`: Recover from HTTP errors when communicating with - the Discogs servers. Thanks to Dustin Rodriguez. -* :doc:`/plugins/embedart`: Do not log "embedding album art into..." messages +- :doc:`/plugins/discogs`: Recover from HTTP errors when communicating with the + Discogs servers. Thanks to Dustin Rodriguez. +- :doc:`/plugins/embedart`: Do not log "embedding album art into..." messages during the import process. -* Fix a crash in the autotagger when files had only whitespace in their +- Fix a crash in the autotagger when files had only whitespace in their metadata. -* :doc:`/plugins/play`: Fix a potential crash when the command outputs special +- :doc:`/plugins/play`: Fix a potential crash when the command outputs special characters. :bug:`1041` -* :doc:`/plugins/web`: Queries typed into the search field are now treated as +- :doc:`/plugins/web`: Queries typed into the search field are now treated as separate query components. :bug:`1045` -* Date tags that use slashes instead of dashes as separators are now - interpreted correctly. And WMA (ASF) files now map the ``comments`` field to - the "Description" tag (in addition to "WM/Comments"). Thanks to Matthias - Kiefer. :bug:`1043` -* :doc:`/plugins/embedart`: Avoid resizing the image multiple times when - embedding into an album. Thanks to :user:`kerobaros`. :bug:`1028`, - :bug:`1036` -* :doc:`/plugins/discogs`: Avoid a situation where a trailing comma could be +- Date tags that use slashes instead of dashes as separators are now interpreted + correctly. And WMA (ASF) files now map the ``comments`` field to the + "Description" tag (in addition to "WM/Comments"). Thanks to Matthias Kiefer. + :bug:`1043` +- :doc:`/plugins/embedart`: Avoid resizing the image multiple times when + embedding into an album. Thanks to :user:`kerobaros`. :bug:`1028`, :bug:`1036` +- :doc:`/plugins/discogs`: Avoid a situation where a trailing comma could be appended to some artist names. :bug:`1049` -* The output of the :ref:`stats-cmd` command is slightly different: the +- The output of the :ref:`stats-cmd` command is slightly different: the approximate size is now marked as such, and the total number of seconds only appears in exact mode. -* :doc:`/plugins/convert`: A new ``copy_album_art`` option puts images - alongside converted files. Thanks to Ángel Alonso. :bug:`1050`, :bug:`1055` -* There is no longer a "conflict" between two plugins that declare the same +- :doc:`/plugins/convert`: A new ``copy_album_art`` option puts images alongside + converted files. Thanks to Ángel Alonso. :bug:`1050`, :bug:`1055` +- There is no longer a "conflict" between two plugins that declare the same field with the same type. Thanks to Peter Schnebel. :bug:`1059` :bug:`1061` -* :doc:`/plugins/chroma`: Limit the number of releases and recordings fetched - as the result of an Acoustid match to avoid extremely long processing times - for very popular music. :bug:`1068` -* Fix an issue where modifying an album's field without actually changing it +- :doc:`/plugins/chroma`: Limit the number of releases and recordings fetched as + the result of an Acoustid match to avoid extremely long processing times for + very popular music. :bug:`1068` +- Fix an issue where modifying an album's field without actually changing it would not update the corresponding tracks to bring differing tracks back in line with the album. :bug:`856` -* ``echonest`` plugin: When communicating with the Echo Nest servers - fails repeatedly, log an error instead of exiting. :bug:`1096` -* :doc:`/plugins/lyrics`: Avoid an error when the Google source returns a - result without a title. Thanks to Alberto Leal. :bug:`1097` -* Importing an archive will no longer leave temporary files behind in - ``/tmp``. Thanks to :user:`multikatt`. :bug:`1067`, :bug:`1091` +- ``echonest`` plugin: When communicating with the Echo Nest servers fails + repeatedly, log an error instead of exiting. :bug:`1096` +- :doc:`/plugins/lyrics`: Avoid an error when the Google source returns a result + without a title. Thanks to Alberto Leal. :bug:`1097` +- Importing an archive will no longer leave temporary files behind in ``/tmp``. + Thanks to :user:`multikatt`. :bug:`1067`, :bug:`1091` 1.3.8 (September 17, 2014) -------------------------- @@ -3041,221 +3174,218 @@ This release has two big new chunks of functionality. Queries now support **sorting** and user-defined fields can now have **types**. If you want to see all your songs in reverse chronological order, just type -``beet list year-``. It couldn't be easier. For details, see -:ref:`query-sort`. +``beet list year-``. It couldn't be easier. For details, see :ref:`query-sort`. Flexible field types mean that some functionality that has previously only worked for built-in fields, like range queries, can now work with plugin- and user-defined fields too. For starters, the ``echonest`` plugin and -:doc:`/plugins/mpdstats` now mark the types of the fields they provide---so -you can now say, for example, ``beet ls liveness:0.5..1.5`` for the Echo Nest +:doc:`/plugins/mpdstats` now mark the types of the fields they provide---so you +can now say, for example, ``beet ls liveness:0.5..1.5`` for the Echo Nest "liveness" attribute. The :doc:`/plugins/types` makes it easy to specify field types in your config file. One upgrade note: if you use the :doc:`/plugins/discogs`, you will need to -upgrade the Discogs client library to use this version. Just type -``pip install -U discogs-client``. +upgrade the Discogs client library to use this version. Just type ``pip install +-U discogs-client``. Other new features: -* :doc:`/plugins/info`: Target files can now be specified through library +- :doc:`/plugins/info`: Target files can now be specified through library queries (in addition to filenames). The ``--library`` option prints library - fields instead of tags. Multiple files can be summarized together with the - new ``--summarize`` option. -* :doc:`/plugins/mbcollection`: A new option lets you automatically update - your collection on import. Thanks to Olin Gay. -* :doc:`/plugins/convert`: A new ``never_convert_lossy_files`` option can + fields instead of tags. Multiple files can be summarized together with the new + ``--summarize`` option. +- :doc:`/plugins/mbcollection`: A new option lets you automatically update your + collection on import. Thanks to Olin Gay. +- :doc:`/plugins/convert`: A new ``never_convert_lossy_files`` option can prevent lossy transcoding. Thanks to Simon Kohlmeyer. -* :doc:`/plugins/convert`: A new ``--yes`` command-line flag skips the +- :doc:`/plugins/convert`: A new ``--yes`` command-line flag skips the confirmation. Still more fixes and little improvements: -* Invalid state files don't crash the importer. -* :doc:`/plugins/lyrics`: Only strip featured artists and - parenthesized title suffixes if no lyrics for the original artist and - title were found. -* Fix a crash when reading some files with missing tags. -* :doc:`/plugins/discogs`: Compatibility with the new 2.0 version of the - `discogs_client`_ Python library. If you were using the old version, you will +- Invalid state files don't crash the importer. +- :doc:`/plugins/lyrics`: Only strip featured artists and parenthesized title + suffixes if no lyrics for the original artist and title were found. +- Fix a crash when reading some files with missing tags. +- :doc:`/plugins/discogs`: Compatibility with the new 2.0 version of the + discogs_client_ Python library. If you were using the old version, you will need to upgrade to the latest version of the library to use the - correspondingly new version of the plugin (e.g., with - ``pip install -U discogs-client``). Thanks to Andriy Kohut. -* Fix a crash when writing files that can't be read. Thanks to Jocelyn De La + correspondingly new version of the plugin (e.g., with ``pip install -U + discogs-client``). Thanks to Andriy Kohut. +- Fix a crash when writing files that can't be read. Thanks to Jocelyn De La Rosa. -* The :ref:`stats-cmd` command now counts album artists. The album count also +- The :ref:`stats-cmd` command now counts album artists. The album count also more accurately reflects the number of albums in the database. -* :doc:`/plugins/convert`: Avoid crashes when tags cannot be written to newly +- :doc:`/plugins/convert`: Avoid crashes when tags cannot be written to newly converted files. -* Formatting templates with item data no longer confusingly shows album-level +- Formatting templates with item data no longer confusingly shows album-level data when the two are inconsistent. -* Resuming imports and beginning incremental imports should now be much faster +- Resuming imports and beginning incremental imports should now be much faster when there is a lot of previously-imported music to skip. -* :doc:`/plugins/lyrics`: Remove ``<script>`` tags from scraped lyrics. Thanks +- :doc:`/plugins/lyrics`: Remove ``<script>`` tags from scraped lyrics. Thanks to Bombardment. -* :doc:`/plugins/play`: Add a ``relative_to`` config option. Thanks to +- :doc:`/plugins/play`: Add a ``relative_to`` config option. Thanks to BrainDamage. -* Fix a crash when a MusicBrainz release has zero tracks. -* The ``--version`` flag now works as an alias for the ``version`` command. -* :doc:`/plugins/lastgenre`: Remove some unhelpful genres from the default +- Fix a crash when a MusicBrainz release has zero tracks. +- The ``--version`` flag now works as an alias for the ``version`` command. +- :doc:`/plugins/lastgenre`: Remove some unhelpful genres from the default whitelist. Thanks to gwern. -* :doc:`/plugins/importfeeds`: A new ``echo`` output mode prints files' paths - to standard error. Thanks to robotanarchy. -* :doc:`/plugins/replaygain`: Restore some error handling when ``mp3gain`` +- :doc:`/plugins/importfeeds`: A new ``echo`` output mode prints files' paths to + standard error. Thanks to robotanarchy. +- :doc:`/plugins/replaygain`: Restore some error handling when ``mp3gain`` output cannot be parsed. The verbose log now contains the bad tool output in this case. -* :doc:`/plugins/convert`: Fix filename extensions when converting +- :doc:`/plugins/convert`: Fix filename extensions when converting automatically. -* The ``write`` plugin event allows plugins to change the tags that are - written to a media file. -* :doc:`/plugins/zero`: Do not delete database values; only media file - tags are affected. +- The ``write`` plugin event allows plugins to change the tags that are written + to a media file. +- :doc:`/plugins/zero`: Do not delete database values; only media file tags are + affected. .. _discogs_client: https://github.com/discogs/discogs_client 1.3.7 (August 22, 2014) ----------------------- -This release of beets fixes all the bugs, and you can be confident that you -will never again find any bugs in beets, ever. -It also adds support for plain old AIFF files and adds three more plugins, -including a nifty one that lets you measure a song's tempo by tapping out the -beat on your keyboard. -The importer deals more elegantly with duplicates and you can broaden your -cover art search to the entire web with Google Image Search. +This release of beets fixes all the bugs, and you can be confident that you will +never again find any bugs in beets, ever. It also adds support for plain old +AIFF files and adds three more plugins, including a nifty one that lets you +measure a song's tempo by tapping out the beat on your keyboard. The importer +deals more elegantly with duplicates and you can broaden your cover art search +to the entire web with Google Image Search. The big new features are: -* Support for AIFF files. Tags are stored as ID3 frames in one of the file's - IFF chunks. Thanks to Evan Purkhiser for contributing support to `Mutagen`_. -* The new :doc:`/plugins/importadded` reads files' modification times to set +- Support for AIFF files. Tags are stored as ID3 frames in one of the file's IFF + chunks. Thanks to Evan Purkhiser for contributing support to Mutagen_. +- The new :doc:`/plugins/importadded` reads files' modification times to set their "added" date. Thanks to Stig Inge Lea Bjørnsen. -* The new :doc:`/plugins/bpm` lets you manually measure the tempo of a playing +- The new :doc:`/plugins/bpm` lets you manually measure the tempo of a playing song. Thanks to aroquen. -* The new :doc:`/plugins/spotify` generates playlists for your `Spotify`_ - account. Thanks to Olin Gay. -* A new :ref:`required` configuration option for the importer skips matches - that are missing certain data. Thanks to oprietop. -* When the importer detects duplicates, it now shows you some details about - the potentially-replaced music so you can make an informed decision. Thanks - to Howard Jones. -* :doc:`/plugins/fetchart`: You can now optionally search for cover art on +- The new :doc:`/plugins/spotify` generates playlists for your Spotify_ account. + Thanks to Olin Gay. +- A new :ref:`required` configuration option for the importer skips matches that + are missing certain data. Thanks to oprietop. +- When the importer detects duplicates, it now shows you some details about the + potentially-replaced music so you can make an informed decision. Thanks to + Howard Jones. +- :doc:`/plugins/fetchart`: You can now optionally search for cover art on Google Image Search. Thanks to Lemutar. -* A new :ref:`asciify-paths` configuration option replaces all non-ASCII +- A new :ref:`asciify-paths` configuration option replaces all non-ASCII characters in paths. -.. _Mutagen: https://github.com/quodlibet/mutagen -.. _Spotify: https://www.spotify.com/ +.. _mutagen: https://github.com/quodlibet/mutagen + +.. _spotify: https://www.spotify.com/ And the multitude of little improvements and fixes: -* Compatibility with the latest version of `Mutagen`_, 1.23. -* :doc:`/plugins/web`: Lyrics now display readably with correct line breaks. +- Compatibility with the latest version of Mutagen_, 1.23. +- :doc:`/plugins/web`: Lyrics now display readably with correct line breaks. Also, the detail view scrolls to reveal all of the lyrics. Thanks to Meet Udeshi. -* :doc:`/plugins/play`: The ``command`` config option can now contain - arguments (rather than just an executable). Thanks to Alessandro Ghedini. -* Fix an error when using the :ref:`modify-cmd` command to remove a flexible +- :doc:`/plugins/play`: The ``command`` config option can now contain arguments + (rather than just an executable). Thanks to Alessandro Ghedini. +- Fix an error when using the :ref:`modify-cmd` command to remove a flexible attribute. Thanks to Pierre Rust. -* :doc:`/plugins/info`: The command now shows audio properties (e.g., bitrate) +- :doc:`/plugins/info`: The command now shows audio properties (e.g., bitrate) in addition to metadata. Thanks Alessandro Ghedini. -* Avoid a crash on Windows when writing to files with special characters in +- Avoid a crash on Windows when writing to files with special characters in their names. -* :doc:`/plugins/play`: Playing albums now generates filenames by default (as +- :doc:`/plugins/play`: Playing albums now generates filenames by default (as opposed to directories) for better compatibility. The ``use_folders`` option restores the old behavior. Thanks to Lucas Duailibe. -* Fix an error when importing an empty directory with the ``--flat`` option. -* :doc:`/plugins/mpdstats`: The last song in a playlist is now correctly - counted as played. Thanks to Johann Klähn. -* :doc:`/plugins/zero`: Prevent accidental nulling of dangerous fields (IDs - and paths). Thanks to brunal. -* The :ref:`remove-cmd` command now shows the paths of files that will be +- Fix an error when importing an empty directory with the ``--flat`` option. +- :doc:`/plugins/mpdstats`: The last song in a playlist is now correctly counted + as played. Thanks to Johann Klähn. +- :doc:`/plugins/zero`: Prevent accidental nulling of dangerous fields (IDs and + paths). Thanks to brunal. +- The :ref:`remove-cmd` command now shows the paths of files that will be deleted. Thanks again to brunal. -* Don't display changes for fields that are not in the restricted field set. - This fixes :ref:`write-cmd` showing changes for fields that are not written - to the file. -* The :ref:`write-cmd` command avoids displaying the item name if there are - no changes for it. -* When using both the :doc:`/plugins/convert` and the :doc:`/plugins/scrub`, +- Don't display changes for fields that are not in the restricted field set. + This fixes :ref:`write-cmd` showing changes for fields that are not written to + the file. +- The :ref:`write-cmd` command avoids displaying the item name if there are no + changes for it. +- When using both the :doc:`/plugins/convert` and the :doc:`/plugins/scrub`, avoid scrubbing the source file of conversions. (Fix a regression introduced in the previous release.) -* :doc:`/plugins/replaygain`: Logging is now quieter during import. Thanks to +- :doc:`/plugins/replaygain`: Logging is now quieter during import. Thanks to Yevgeny Bezman. -* :doc:`/plugins/fetchart`: When loading art from the filesystem, we now +- :doc:`/plugins/fetchart`: When loading art from the filesystem, we now prioritize covers with more keywords in them. This means that ``cover-front.jpg`` will now be taken before ``cover-back.jpg`` because it contains two keywords rather than one. Thanks to Fabrice Laporte. -* :doc:`/plugins/lastgenre`: Remove duplicates from canonicalized genre lists. +- :doc:`/plugins/lastgenre`: Remove duplicates from canonicalized genre lists. Thanks again to Fabrice Laporte. -* The importer now records its progress when skipping albums. This means that +- The importer now records its progress when skipping albums. This means that incremental imports will no longer try to import albums again after you've - chosen to skip them, and erroneous invitations to resume "interrupted" - imports should be reduced. Thanks to jcassette. -* :doc:`/plugins/bucket`: You can now customize the definition of alphanumeric - "ranges" using regular expressions. And the heuristic for detecting years - has been improved. Thanks to sotho. -* Already-imported singleton tracks are skipped when resuming an - import. -* :doc:`/plugins/chroma`: A new ``auto`` configuration option disables + chosen to skip them, and erroneous invitations to resume "interrupted" imports + should be reduced. Thanks to jcassette. +- :doc:`/plugins/bucket`: You can now customize the definition of alphanumeric + "ranges" using regular expressions. And the heuristic for detecting years has + been improved. Thanks to sotho. +- Already-imported singleton tracks are skipped when resuming an import. +- :doc:`/plugins/chroma`: A new ``auto`` configuration option disables fingerprinting on import. Thanks to ddettrittus. -* :doc:`/plugins/convert`: A new ``--format`` option to can select the +- :doc:`/plugins/convert`: A new ``--format`` option to can select the transcoding preset from the command-line. -* :doc:`/plugins/convert`: Transcoding presets can now omit their filename +- :doc:`/plugins/convert`: Transcoding presets can now omit their filename extensions (extensions default to the name of the preset). -* :doc:`/plugins/convert`: A new ``--pretend`` option lets you preview the - commands the plugin will execute without actually taking any action. Thanks - to Dietrich Daroch. -* Fix a crash when a float-valued tag field only contained a ``+`` or ``-`` +- :doc:`/plugins/convert`: A new ``--pretend`` option lets you preview the + commands the plugin will execute without actually taking any action. Thanks to + Dietrich Daroch. +- Fix a crash when a float-valued tag field only contained a ``+`` or ``-`` character. -* Fixed a regression in the core that caused the :doc:`/plugins/scrub` not to +- Fixed a regression in the core that caused the :doc:`/plugins/scrub` not to work in ``auto`` mode. Thanks to Harry Khanna. -* The :ref:`write-cmd` command now has a ``--force`` flag. Thanks again to - Harry Khanna. -* :doc:`/plugins/mbsync`: Track alignment now works with albums that have +- The :ref:`write-cmd` command now has a ``--force`` flag. Thanks again to Harry + Khanna. +- :doc:`/plugins/mbsync`: Track alignment now works with albums that have multiple copies of the same recording. Thanks to Rui Gonçalves. 1.3.6 (May 10, 2014) -------------------- -This is primarily a bugfix release, but it also brings two new plugins: one -for playing music in desktop players and another for organizing your -directories into "buckets." It also brings huge performance optimizations to -queries---your ``beet ls`` commands will now go much faster. +This is primarily a bugfix release, but it also brings two new plugins: one for +playing music in desktop players and another for organizing your directories +into "buckets." It also brings huge performance optimizations to queries---your +``beet ls`` commands will now go much faster. New features: -* The new :doc:`/plugins/play` lets you start your desktop music player with - the songs that match a query. Thanks to David Hamp-Gonsalves. -* The new :doc:`/plugins/bucket` provides a ``%bucket{}`` function for path +- The new :doc:`/plugins/play` lets you start your desktop music player with the + songs that match a query. Thanks to David Hamp-Gonsalves. +- The new :doc:`/plugins/bucket` provides a ``%bucket{}`` function for path formatting to generate folder names representing ranges of years or initial letter. Thanks to Fabrice Laporte. -* Item and album queries are much faster. -* :doc:`/plugins/ftintitle`: A new option lets you remove featured artists +- Item and album queries are much faster. +- :doc:`/plugins/ftintitle`: A new option lets you remove featured artists entirely instead of moving them to the title. Thanks to SUTJael. And those all-important bug fixes: -* :doc:`/plugins/mbsync`: Fix a regression in 1.3.5 that broke the plugin +- :doc:`/plugins/mbsync`: Fix a regression in 1.3.5 that broke the plugin entirely. -* :ref:`Shell completion <completion>` now searches more common paths for its +- :ref:`Shell completion <completion>` now searches more common paths for its ``bash_completion`` dependency. -* Fix encoding-related logging errors in :doc:`/plugins/convert` and +- Fix encoding-related logging errors in :doc:`/plugins/convert` and :doc:`/plugins/replaygain`. -* :doc:`/plugins/replaygain`: Suppress a deprecation warning emitted by later +- :doc:`/plugins/replaygain`: Suppress a deprecation warning emitted by later versions of PyGI. -* Fix a crash when reading files whose iTunes SoundCheck tags contain - non-ASCII characters. -* The ``%if{}`` template function now appropriately interprets the condition - as false when it contains the string "false". Thanks to Ayberk Yilmaz. -* :doc:`/plugins/convert`: Fix conversion for files that include a video - stream by ignoring it. Thanks to brunal. -* :doc:`/plugins/fetchart`: Log an error instead of crashing when tag +- Fix a crash when reading files whose iTunes SoundCheck tags contain non-ASCII + characters. +- The ``%if{}`` template function now appropriately interprets the condition as + false when it contains the string "false". Thanks to Ayberk Yilmaz. +- :doc:`/plugins/convert`: Fix conversion for files that include a video stream + by ignoring it. Thanks to brunal. +- :doc:`/plugins/fetchart`: Log an error instead of crashing when tag manipulation fails. -* :doc:`/plugins/convert`: Log an error instead of crashing when - embedding album art fails. -* :doc:`/plugins/convert`: Embed cover art into converted files. - Previously they were embedded into the source files. -* New plugin event: `before_item_moved`. Thanks to Robert Speicher. +- :doc:`/plugins/convert`: Log an error instead of crashing when embedding album + art fails. +- :doc:`/plugins/convert`: Embed cover art into converted files. Previously they + were embedded into the source files. +- New plugin event: ``before_item_moved``. Thanks to Robert Speicher. 1.3.5 (April 15, 2014) ---------------------- @@ -3265,59 +3395,59 @@ support for tracking and calculating musical keys, the ReplayGain plugin was expanded to work with more music formats via GStreamer, we can now import directly from compressed archives, and the lyrics plugin is more robust. -One note for upgraders and packagers: this version of beets has a new -dependency in `enum34`_, which is a backport of the new `enum`_ standard -library module. +One note for upgraders and packagers: this version of beets has a new dependency +in enum34_, which is a backport of the new enum_ standard library module. The major new features are: -* Beets can now import `zip`, `tar`, and `rar` archives. Just type ``beet +- Beets can now import ``zip``, ``tar``, and ``rar`` archives. Just type ``beet import music.zip`` to have beets transparently extract the files to import. -* :doc:`/plugins/replaygain`: Added support for calculating ReplayGain values +- :doc:`/plugins/replaygain`: Added support for calculating ReplayGain values with GStreamer as well the mp3gain program. This enables ReplayGain calculation for any audio format. Thanks to Yevgeny Bezman. -* :doc:`/plugins/lyrics`: Lyrics should now be found for more songs. Searching - is now sensitive to featured artists and parenthesized title suffixes. - When a song has multiple titles, lyrics from all the named songs are now +- :doc:`/plugins/lyrics`: Lyrics should now be found for more songs. Searching + is now sensitive to featured artists and parenthesized title suffixes. When a + song has multiple titles, lyrics from all the named songs are now concatenated. Thanks to Fabrice Laporte and Paul Phillips. -In particular, a full complement of features for supporting musical keys are -new in this release: +In particular, a full complement of features for supporting musical keys are new +in this release: -* A new `initial_key` field is available in the database and files' tags. You +- A new ``initial_key`` field is available in the database and files' tags. You can set the field manually using a command like ``beet modify initial_key=Am``. -* The ``echonest`` plugin sets the `initial_key` field if the data is +- The ``echonest`` plugin sets the ``initial_key`` field if the data is available. -* A new :doc:`/plugins/keyfinder` runs a command-line tool to get the key from - audio data and store it in the `initial_key` field. +- A new :doc:`/plugins/keyfinder` runs a command-line tool to get the key from + audio data and store it in the ``initial_key`` field. There are also many bug fixes and little enhancements: -* ``echonest`` plugin: Truncate files larger than 50MB before uploading for +- ``echonest`` plugin: Truncate files larger than 50MB before uploading for analysis. -* :doc:`/plugins/fetchart`: Fix a crash when the server does not specify a +- :doc:`/plugins/fetchart`: Fix a crash when the server does not specify a content type. Thanks to Lee Reinhardt. -* :doc:`/plugins/convert`: The ``--keep-new`` flag now works correctly - and the library includes the converted item. -* The importer now logs a message instead of crashing when errors occur while +- :doc:`/plugins/convert`: The ``--keep-new`` flag now works correctly and the + library includes the converted item. +- The importer now logs a message instead of crashing when errors occur while opening the files to be imported. -* :doc:`/plugins/embedart`: Better error messages in exceptional conditions. -* Silenced some confusing error messages when searching for a non-MusicBrainz +- :doc:`/plugins/embedart`: Better error messages in exceptional conditions. +- Silenced some confusing error messages when searching for a non-MusicBrainz ID. Using an invalid ID (of any kind---Discogs IDs can be used there too) at the "Enter ID:" importer prompt now just silently returns no results. More info is in the verbose logs. -* :doc:`/plugins/mbsync`: Fix application of album-level metadata. Due to a +- :doc:`/plugins/mbsync`: Fix application of album-level metadata. Due to a regression a few releases ago, only track-level metadata was being updated. -* On Windows, paths on network shares (UNC paths) no longer cause "invalid +- On Windows, paths on network shares (UNC paths) no longer cause "invalid filename" errors. -* :doc:`/plugins/replaygain`: Fix crashes when attempting to log errors. -* The :ref:`modify-cmd` command can now accept query arguments that contain = - signs. An argument is considered a query part when a : appears before any - =s. Thanks to mook. +- :doc:`/plugins/replaygain`: Fix crashes when attempting to log errors. +- The :ref:`modify-cmd` command can now accept query arguments that contain = + signs. An argument is considered a query part when a : appears before any =s. + Thanks to mook. + +.. _enum: https://docs.python.org/3.4/library/enum.html .. _enum34: https://pypi.python.org/pypi/enum34 -.. _enum: https://docs.python.org/3.4/library/enum.html 1.3.4 (April 5, 2014) --------------------- @@ -3329,71 +3459,70 @@ attributes. There are also some significant performance optimizations to the autotagger's matching logic. One note for upgraders: if you use the :doc:`/plugins/fetchart`, it has a new -dependency, the `requests`_ module. +dependency, the requests_ module. New stuff: -* Added a :ref:`config-cmd` command to manage your configuration. It can show +- Added a :ref:`config-cmd` command to manage your configuration. It can show you what you currently have in your config file, point you at where the file should be, or launch your text editor to let you modify the file. Thanks to geigerzaehler. -* Beets now ships with a shell command completion script! See - :ref:`completion`. Thanks to geigerzaehler. -* The :ref:`modify-cmd` command now allows removing flexible attributes. For +- Beets now ships with a shell command completion script! See :ref:`completion`. + Thanks to geigerzaehler. +- The :ref:`modify-cmd` command now allows removing flexible attributes. For example, ``beet modify artist:beatles oldies!`` deletes the ``oldies`` attribute from matching items. Thanks to brilnius. -* Internally, beets has laid the groundwork for supporting multi-valued - fields. Thanks to geigerzaehler. -* The importer interface now shows the URL for MusicBrainz matches. Thanks to +- Internally, beets has laid the groundwork for supporting multi-valued fields. + Thanks to geigerzaehler. +- The importer interface now shows the URL for MusicBrainz matches. Thanks to johtso. -* :doc:`/plugins/smartplaylist`: Playlists can now be generated from multiple - queries (combined with "or" logic). Album-level queries are also now - possible and automatic playlist regeneration can now be disabled. Thanks to - brilnius. -* ``echonest`` plugin: Echo Nest similarity now weights the tempo in - better proportion to other metrics. Also, options were added to specify - custom thresholds and output formats. Thanks to Adam M. -* Added the :ref:`after_write <plugin_events>` plugin event. -* :doc:`/plugins/lastgenre`: Separator in genre lists can now be - configured. Thanks to brilnius. -* We now only use "primary" aliases for artist names from MusicBrainz. This - eliminates some strange naming that could occur when the `languages` config +- :doc:`/plugins/smartplaylist`: Playlists can now be generated from multiple + queries (combined with "or" logic). Album-level queries are also now possible + and automatic playlist regeneration can now be disabled. Thanks to brilnius. +- ``echonest`` plugin: Echo Nest similarity now weights the tempo in better + proportion to other metrics. Also, options were added to specify custom + thresholds and output formats. Thanks to Adam M. +- Added the :ref:`after_write <plugin_events>` plugin event. +- :doc:`/plugins/lastgenre`: Separator in genre lists can now be configured. + Thanks to brilnius. +- We now only use "primary" aliases for artist names from MusicBrainz. This + eliminates some strange naming that could occur when the ``languages`` config option was set. Thanks to Filipe Fortes. -* The performance of the autotagger's matching mechanism is vastly improved. +- The performance of the autotagger's matching mechanism is vastly improved. This should be noticeable when matching against very large releases such as box sets. -* The :ref:`import-cmd` command can now accept individual files as arguments +- The :ref:`import-cmd` command can now accept individual files as arguments even in non-singleton mode. Files are imported as one-track albums. Fixes: -* Error messages involving paths no longer escape non-ASCII characters (for +- Error messages involving paths no longer escape non-ASCII characters (for legibility). -* Fixed a regression that made it impossible to use the :ref:`modify-cmd` +- Fixed a regression that made it impossible to use the :ref:`modify-cmd` command to add new flexible fields. Thanks to brilnius. -* ``echonest`` plugin: Avoid crashing when the audio analysis fails. - Thanks to Pedro Silva. -* :doc:`/plugins/duplicates`: Fix checksumming command execution for files - with quotation marks in their names. Thanks again to Pedro Silva. -* Fix a crash when importing with both of the :ref:`group_albums` and +- ``echonest`` plugin: Avoid crashing when the audio analysis fails. Thanks to + Pedro Silva. +- :doc:`/plugins/duplicates`: Fix checksumming command execution for files with + quotation marks in their names. Thanks again to Pedro Silva. +- Fix a crash when importing with both of the :ref:`group_albums` and :ref:`incremental` options enabled. Thanks to geigerzaehler. -* Give a sensible error message when ``BEETSDIR`` points to a file. Thanks - again to geigerzaehler. -* Fix a crash when reading WMA files whose boolean-valued fields contain +- Give a sensible error message when ``BEETSDIR`` points to a file. Thanks again + to geigerzaehler. +- Fix a crash when reading WMA files whose boolean-valued fields contain strings. Thanks to johtso. -* :doc:`/plugins/fetchart`: The plugin now sends "beets" as the User-Agent - when making scraping requests. This helps resolve some blocked requests. The - plugin now also depends on the `requests`_ Python library. -* The :ref:`write-cmd` command now only shows the changes to fields that will +- :doc:`/plugins/fetchart`: The plugin now sends "beets" as the User-Agent when + making scraping requests. This helps resolve some blocked requests. The plugin + now also depends on the requests_ Python library. +- The :ref:`write-cmd` command now only shows the changes to fields that will actually be written to a file. -* :doc:`/plugins/duplicates`: Spurious reports are now avoided for tracks with +- :doc:`/plugins/duplicates`: Spurious reports are now avoided for tracks with missing values (e.g., no MBIDs). Thanks to Pedro Silva. -* The default :ref:`replace` sanitation options now remove leading whitespace - by default. Thanks to brilnius. -* :doc:`/plugins/importfeeds`: Fix crash when importing albums - containing ``/`` with the ``m3u_multi`` format. -* Avoid crashing on Mutagen bugs while writing files' tags. -* :doc:`/plugins/convert`: Display a useful error message when the FFmpeg +- The default :ref:`replace` sanitation options now remove leading whitespace by + default. Thanks to brilnius. +- :doc:`/plugins/importfeeds`: Fix crash when importing albums containing ``/`` + with the ``m3u_multi`` format. +- Avoid crashing on Mutagen bugs while writing files' tags. +- :doc:`/plugins/convert`: Display a useful error message when the FFmpeg executable can't be found. .. _requests: https://requests.readthedocs.io/en/master/ @@ -3406,96 +3535,93 @@ internally. Along with laying the groundwork for some great things in the future, this brings a number of improvements to how you interact with beets. Here's what's new with fields in particular: -* Plugin-provided fields can now be used in queries. For example, if you use - the :doc:`/plugins/inline` to define a field called ``era``, you can now - filter your library based on that field by typing something like - ``beet list era:goldenage``. -* Album-level flexible attributes and plugin-provided attributes can now be - used in path formats (and other item-level templates). -* :ref:`Date-based queries <datequery>` are now possible. Try getting every +- Plugin-provided fields can now be used in queries. For example, if you use the + :doc:`/plugins/inline` to define a field called ``era``, you can now filter + your library based on that field by typing something like ``beet list + era:goldenage``. +- Album-level flexible attributes and plugin-provided attributes can now be used + in path formats (and other item-level templates). +- :ref:`Date-based queries <datequery>` are now possible. Try getting every track you added in February 2014 with ``beet ls added:2014-02`` or in the whole decade with ``added:2010..``. Thanks to Stig Inge Lea Bjørnsen. -* The :ref:`modify-cmd` command is now better at parsing and formatting - fields. You can assign to boolean fields like ``comp``, for example, using - either the words "true" or "false" or the numerals 1 and 0. Any - boolean-esque value is normalized to a real boolean. The :ref:`update-cmd` - and :ref:`write-cmd` commands also got smarter at formatting and colorizing - changes. +- The :ref:`modify-cmd` command is now better at parsing and formatting fields. + You can assign to boolean fields like ``comp``, for example, using either the + words "true" or "false" or the numerals 1 and 0. Any boolean-esque value is + normalized to a real boolean. The :ref:`update-cmd` and :ref:`write-cmd` + commands also got smarter at formatting and colorizing changes. For developers, the short version of the story is that Item and Album objects provide *uniform access* across fixed, flexible, and computed attributes. You -can write ``item.foo`` to access the ``foo`` field without worrying about -where the data comes from. +can write ``item.foo`` to access the ``foo`` field without worrying about where +the data comes from. Unrelated new stuff: -* The importer has a new interactive option (*G* for "Group albums"), +- The importer has a new interactive option (*G* for "Group albums"), command-line flag (``--group-albums``), and config option - (:ref:`group_albums`) that lets you split apart albums that are mixed - together in a single directory. Thanks to geigerzaehler. -* A new ``--config`` command-line option lets you specify an additional + (:ref:`group_albums`) that lets you split apart albums that are mixed together + in a single directory. Thanks to geigerzaehler. +- A new ``--config`` command-line option lets you specify an additional configuration file. This option *combines* config settings with your default - config file. (As part of this change, the ``BEETSDIR`` environment variable - no longer combines---it *replaces* your default config file.) Thanks again - to geigerzaehler. -* :doc:`/plugins/ihate`: The plugin's configuration interface was overhauled. + config file. (As part of this change, the ``BEETSDIR`` environment variable no + longer combines---it *replaces* your default config file.) Thanks again to + geigerzaehler. +- :doc:`/plugins/ihate`: The plugin's configuration interface was overhauled. Its configuration is now much simpler---it uses beets queries instead of an - ad-hoc per-field configuration. This is *backwards-incompatible*---if you - use this plugin, you will need to update your configuration. Thanks to + ad-hoc per-field configuration. This is *backwards-incompatible*---if you use + this plugin, you will need to update your configuration. Thanks to BrainDamage. Other little fixes: -* ``echonest`` plugin: Tempo (BPM) is now always stored as an integer. - Thanks to Heinz Wiesinger. -* Fix Python 2.6 compatibility in some logging statements in +- ``echonest`` plugin: Tempo (BPM) is now always stored as an integer. Thanks to + Heinz Wiesinger. +- Fix Python 2.6 compatibility in some logging statements in :doc:`/plugins/chroma` and :doc:`/plugins/lastgenre`. -* Prevent some crashes when things go really wrong when writing file metadata - at the end of the import process. -* New plugin events: ``item_removed`` (thanks to Romuald Conty) and +- Prevent some crashes when things go really wrong when writing file metadata at + the end of the import process. +- New plugin events: ``item_removed`` (thanks to Romuald Conty) and ``item_copied`` (thanks to Stig Inge Lea Bjørnsen). -* The ``pluginpath`` config option can now point to the directory containing +- The ``pluginpath`` config option can now point to the directory containing plugin code. (Previously, it awkwardly needed to point at a directory - containing a ``beetsplug`` directory, which would then contain your code. - This is preserved as an option for backwards compatibility.) This change - should also work around a long-standing issue when using ``pluginpath`` when - beets is installed using pip. Many thanks to geigerzaehler. -* :doc:`/plugins/web`: The ``/item/`` and ``/album/`` API endpoints now - produce full details about albums and items, not just lists of IDs. Thanks - to geigerzaehler. -* Fix a potential crash when using image resizing with the + containing a ``beetsplug`` directory, which would then contain your code. This + is preserved as an option for backwards compatibility.) This change should + also work around a long-standing issue when using ``pluginpath`` when beets is + installed using pip. Many thanks to geigerzaehler. +- :doc:`/plugins/web`: The ``/item/`` and ``/album/`` API endpoints now produce + full details about albums and items, not just lists of IDs. Thanks to + geigerzaehler. +- Fix a potential crash when using image resizing with the :doc:`/plugins/fetchart` or :doc:`/plugins/embedart` without ImageMagick installed. -* Also, when invoking ``convert`` for image resizing fails, we now log an - error instead of crashing. -* :doc:`/plugins/fetchart`: The ``beet fetchart`` command can now associate - local images with albums (unless ``--force`` is provided). Thanks to +- Also, when invoking ``convert`` for image resizing fails, we now log an error + instead of crashing. +- :doc:`/plugins/fetchart`: The ``beet fetchart`` command can now associate + local images with albums (unless ``--force`` is provided). Thanks to brilnius. +- :doc:`/plugins/fetchart`: Command output is now colorized. Thanks again to brilnius. -* :doc:`/plugins/fetchart`: Command output is now colorized. Thanks again to - brilnius. -* The :ref:`modify-cmd` command avoids writing files and committing to the +- The :ref:`modify-cmd` command avoids writing files and committing to the database when nothing has changed. Thanks once more to brilnius. -* The importer now uses the album artist field when guessing existing - metadata for albums (rather than just the track artist field). Thanks to - geigerzaehler. -* :doc:`/plugins/fromfilename`: Fix a crash when a filename contained only a +- The importer now uses the album artist field when guessing existing metadata + for albums (rather than just the track artist field). Thanks to geigerzaehler. +- :doc:`/plugins/fromfilename`: Fix a crash when a filename contained only a track number (e.g., ``02.mp3``). -* :doc:`/plugins/convert`: Transcoding should now work on Windows. -* :doc:`/plugins/duplicates`: The ``move`` and ``copy`` destination arguments +- :doc:`/plugins/convert`: Transcoding should now work on Windows. +- :doc:`/plugins/duplicates`: The ``move`` and ``copy`` destination arguments are now treated as directories. Thanks to Pedro Silva. -* The :ref:`modify-cmd` command now skips confirmation and prints a message if +- The :ref:`modify-cmd` command now skips confirmation and prints a message if no changes are necessary. Thanks to brilnius. -* :doc:`/plugins/fetchart`: When using the ``remote_priority`` config option, +- :doc:`/plugins/fetchart`: When using the ``remote_priority`` config option, local image files are no longer completely ignored. -* ``echonest`` plugin: Fix an issue causing the plugin to appear twice in - the output of the ``beet version`` command. -* :doc:`/plugins/lastgenre`: Fix an occasional crash when no tag weight was +- ``echonest`` plugin: Fix an issue causing the plugin to appear twice in the + output of the ``beet version`` command. +- :doc:`/plugins/lastgenre`: Fix an occasional crash when no tag weight was returned by Last.fm. -* :doc:`/plugins/mpdstats`: Restore the ``last_played`` field. Thanks to - Johann Klähn. -* The :ref:`modify-cmd` command's output now clearly shows when a file has - been deleted. -* Album art in files with Vorbis Comments is now marked with the "front cover" +- :doc:`/plugins/mpdstats`: Restore the ``last_played`` field. Thanks to Johann + Klähn. +- The :ref:`modify-cmd` command's output now clearly shows when a file has been + deleted. +- Album art in files with Vorbis Comments is now marked with the "front cover" type. Thanks to Jason Lefley. 1.3.2 (December 22, 2013) @@ -3505,144 +3631,139 @@ This update brings new plugins for fetching acoustic metrics and listening statistics, many more options for the duplicate detection plugin, and flexible options for fetching multiple genres. -The "core" of beets gained a new built-in command: :ref:`beet write -<write-cmd>` updates the metadata tags for files, bringing them back -into sync with your database. Thanks to Heinz Wiesinger. +The "core" of beets gained a new built-in command: :ref:`beet write <write-cmd>` +updates the metadata tags for files, bringing them back into sync with your +database. Thanks to Heinz Wiesinger. We added some plugins and overhauled some existing ones: -* The new ``echonest`` plugin plugin can fetch a wide range of `acoustic - attributes`_ from `The Echo Nest`_, including the "speechiness" and - "liveness" of each track. The new plugin supersedes an older version - (``echonest_tempo``) that only fetched the BPM field. Thanks to Pedro Silva - and Peter Schnebel. - -* The :doc:`/plugins/duplicates` got a number of new features, thanks to Pedro +- The new ``echonest`` plugin plugin can fetch a wide range of `acoustic + attributes`_ from `The Echo Nest`_, including the "speechiness" and "liveness" + of each track. The new plugin supersedes an older version (``echonest_tempo``) + that only fetched the BPM field. Thanks to Pedro Silva and Peter Schnebel. +- The :doc:`/plugins/duplicates` got a number of new features, thanks to Pedro Silva: - * The ``keys`` option lets you specify the fields used detect duplicates. - * You can now use checksumming (via an external command) to find - duplicates instead of metadata via the ``checksum`` option. - * The plugin can perform actions on the duplicates it find. The new - ``copy``, ``move``, ``delete``, ``delete_file``, and ``tag`` options - perform those actions. + - The ``keys`` option lets you specify the fields used detect duplicates. + - You can now use checksumming (via an external command) to find duplicates + instead of metadata via the ``checksum`` option. + - The plugin can perform actions on the duplicates it find. The new ``copy``, + ``move``, ``delete``, ``delete_file``, and ``tag`` options perform those + actions. -* The new :doc:`/plugins/mpdstats` collects statistics about your - listening habits from `MPD`_. Thanks to Peter Schnebel and Johann Klähn. - -* :doc:`/plugins/lastgenre`: The new ``multiple`` option has been replaced - with the ``count`` option, which lets you limit the number of genres added - to your music. (No more thousand-character genre fields!) Also, the - ``min_weight`` field filters out nonsense tags to make your genres more - relevant. Thanks to Peter Schnebel and rashley60. - -* :doc:`/plugins/lyrics`: A new ``--force`` option optionally re-downloads +- The new :doc:`/plugins/mpdstats` collects statistics about your listening + habits from MPD_. Thanks to Peter Schnebel and Johann Klähn. +- :doc:`/plugins/lastgenre`: The new ``multiple`` option has been replaced with + the ``count`` option, which lets you limit the number of genres added to your + music. (No more thousand-character genre fields!) Also, the ``min_weight`` + field filters out nonsense tags to make your genres more relevant. Thanks to + Peter Schnebel and rashley60. +- :doc:`/plugins/lyrics`: A new ``--force`` option optionally re-downloads lyrics even when files already have them. Thanks to Bitdemon. As usual, there are also innumerable little fixes and improvements: -* When writing ID3 tags for ReplayGain normalization, tags are written with - both upper-case and lower-case TXXX frame descriptions. Previous versions of - beets used only the upper-case style, which seems to be more standard, but - some players (namely, Quod Libet and foobar2000) seem to only use lower-case - names. -* :doc:`/plugins/missing`: Avoid a possible error when an album's - ``tracktotal`` field is missing. -* :doc:`/plugins/ftintitle`: Fix an error when the sort artist is missing. -* ``echonest_tempo``: The plugin should now match songs more - reliably (i.e., fewer "no tempo found" messages). Thanks to Peter Schnebel. -* :doc:`/plugins/convert`: Fix an "Item has no library" error when using the +- When writing ID3 tags for ReplayGain normalization, tags are written with both + upper-case and lower-case TXXX frame descriptions. Previous versions of beets + used only the upper-case style, which seems to be more standard, but some + players (namely, Quod Libet and foobar2000) seem to only use lower-case names. +- :doc:`/plugins/missing`: Avoid a possible error when an album's ``tracktotal`` + field is missing. +- :doc:`/plugins/ftintitle`: Fix an error when the sort artist is missing. +- ``echonest_tempo``: The plugin should now match songs more reliably (i.e., + fewer "no tempo found" messages). Thanks to Peter Schnebel. +- :doc:`/plugins/convert`: Fix an "Item has no library" error when using the ``auto`` config option. -* :doc:`/plugins/convert`: Fix an issue where files of the wrong format would +- :doc:`/plugins/convert`: Fix an issue where files of the wrong format would have their transcoding skipped (and files with the right format would be needlessly transcoded). Thanks to Jakob Schnitzer. -* Fix an issue that caused the :ref:`id3v23` option to work only occasionally. -* Also fix using :ref:`id3v23` in conjunction with the ``scrub`` and +- Fix an issue that caused the :ref:`id3v23` option to work only occasionally. +- Also fix using :ref:`id3v23` in conjunction with the ``scrub`` and ``embedart`` plugins. Thanks to Chris Cogburn. -* :doc:`/plugins/ihate`: Fix an error when importing singletons. Thanks to +- :doc:`/plugins/ihate`: Fix an error when importing singletons. Thanks to Mathijs de Bruin. -* The :ref:`clutter` option can now be a whitespace-separated list in addition +- The :ref:`clutter` option can now be a whitespace-separated list in addition to a YAML list. -* Values for the :ref:`replace` option can now be empty (i.e., null is +- Values for the :ref:`replace` option can now be empty (i.e., null is equivalent to the empty string). -* :doc:`/plugins/lastgenre`: Fix a conflict between canonicalization and +- :doc:`/plugins/lastgenre`: Fix a conflict between canonicalization and multiple genres. -* When a match has a year but not a month or day, the autotagger now "zeros - out" the month and day fields after applying the year. -* For plugin developers: added an ``optparse`` callback utility function for +- When a match has a year but not a month or day, the autotagger now "zeros out" + the month and day fields after applying the year. +- For plugin developers: added an ``optparse`` callback utility function for performing actions based on arguments. Thanks to Pedro Silva. -* :doc:`/plugins/scrub`: Fix scrubbing of MPEG-4 files. Thanks to Yevgeny +- :doc:`/plugins/scrub`: Fix scrubbing of MPEG-4 files. Thanks to Yevgeny Bezman. -.. _Acoustic Attributes: https://web.archive.org/web/20160701063109/http://developer.echonest.com/acoustic-attributes.html -.. _MPD: https://www.musicpd.org/ +.. _acoustic attributes: https://web.archive.org/web/20160701063109/http://developer.echonest.com/acoustic-attributes.html + +.. _mpd: https://www.musicpd.org/ 1.3.1 (October 12, 2013) ------------------------ This release boasts a host of new little features, many of them contributed by -beets' amazing and prolific community. It adds support for `Opus`_ files, +beets' amazing and prolific community. It adds support for Opus_ files, transcoding to any format, and two new plugins: one that guesses metadata for -"blank" files based on their filenames and one that moves featured artists -into the title field. +"blank" files based on their filenames and one that moves featured artists into +the title field. Here's the new stuff: -* Add `Opus`_ audio support. Thanks to Rowan Lewis. -* :doc:`/plugins/convert`: You can now transcode files to any audio format, +- Add Opus_ audio support. Thanks to Rowan Lewis. +- :doc:`/plugins/convert`: You can now transcode files to any audio format, rather than just MP3. Thanks again to Rowan Lewis. -* The new :doc:`/plugins/fromfilename` guesses tags from the filenames during +- The new :doc:`/plugins/fromfilename` guesses tags from the filenames during import when metadata tags themselves are missing. Thanks to Jan-Erik Dahlin. -* The :doc:`/plugins/ftintitle`, by `@Verrus`_, is now distributed with beets. - It helps you rewrite tags to move "featured" artists from the artist field - to the title field. -* The MusicBrainz data source now uses track artists over recording - artists. This leads to better metadata when tagging classical music. Thanks - to Henrique Ferreiro. -* :doc:`/plugins/lastgenre`: You can now get multiple genres per album or - track using the ``multiple`` config option. Thanks to rashley60 on GitHub. -* A new :ref:`id3v23` config option makes beets write MP3 files' tags using - the older ID3v2.3 metadata standard. Use this if you want your tags to be - visible to Windows and some older players. +- The :doc:`/plugins/ftintitle`, by `@Verrus`_, is now distributed with beets. + It helps you rewrite tags to move "featured" artists from the artist field to + the title field. +- The MusicBrainz data source now uses track artists over recording artists. + This leads to better metadata when tagging classical music. Thanks to Henrique + Ferreiro. +- :doc:`/plugins/lastgenre`: You can now get multiple genres per album or track + using the ``multiple`` config option. Thanks to rashley60 on GitHub. +- A new :ref:`id3v23` config option makes beets write MP3 files' tags using the + older ID3v2.3 metadata standard. Use this if you want your tags to be visible + to Windows and some older players. And some fixes: -* :doc:`/plugins/fetchart`: Better error message when the image file has an +- :doc:`/plugins/fetchart`: Better error message when the image file has an unrecognized type. -* :doc:`/plugins/mbcollection`: Detect, log, and skip invalid MusicBrainz IDs +- :doc:`/plugins/mbcollection`: Detect, log, and skip invalid MusicBrainz IDs (instead of failing with an API error). -* :doc:`/plugins/info`: Fail gracefully when used erroneously with a - directory. -* ``echonest_tempo``: Fix an issue where the plugin could use the - tempo from the wrong song when the API did not contain the requested song. -* Fix a crash when a file's metadata included a very large number (one wider +- :doc:`/plugins/info`: Fail gracefully when used erroneously with a directory. +- ``echonest_tempo``: Fix an issue where the plugin could use the tempo from the + wrong song when the API did not contain the requested song. +- Fix a crash when a file's metadata included a very large number (one wider than 64 bits). These huge numbers are now replaced with zeroes in the database. -* When a track on a MusicBrainz release has a different length from the +- When a track on a MusicBrainz release has a different length from the underlying recording's length, the track length is now used instead. -* With :ref:`per_disc_numbering` enabled, the ``tracktotal`` field is now set +- With :ref:`per_disc_numbering` enabled, the ``tracktotal`` field is now set correctly (i.e., to the number of tracks on the disc). -* :doc:`/plugins/scrub`: The ``scrub`` command now restores album art in +- :doc:`/plugins/scrub`: The ``scrub`` command now restores album art in addition to other (database-backed) tags. -* :doc:`/plugins/mpdupdate`: Domain sockets can now begin with a tilde (which - is correctly expanded to ``$HOME``) as well as a slash. Thanks to Johann - Klähn. -* :doc:`/plugins/lastgenre`: Fix a regression that could cause new genres - found during import not to be persisted. -* Fixed a crash when imported album art was also marked as "clutter" where the +- :doc:`/plugins/mpdupdate`: Domain sockets can now begin with a tilde (which is + correctly expanded to ``$HOME``) as well as a slash. Thanks to Johann Klähn. +- :doc:`/plugins/lastgenre`: Fix a regression that could cause new genres found + during import not to be persisted. +- Fixed a crash when imported album art was also marked as "clutter" where the art would be deleted before it could be moved into place. This led to a "image.jpg not found during copy" error. Now clutter is removed (and - directories pruned) much later in the process, after the - ``import_task_files`` hook. -* :doc:`/plugins/missing`: Fix an error when printing missing track names. + directories pruned) much later in the process, after the ``import_task_files`` + hook. +- :doc:`/plugins/missing`: Fix an error when printing missing track names. Thanks to Pedro Silva. -* Fix an occasional KeyError in the :ref:`update-cmd` command introduced in +- Fix an occasional KeyError in the :ref:`update-cmd` command introduced in 1.3.0. -* :doc:`/plugins/scrub`: Avoid preserving certain non-standard ID3 tags such - as NCON. +- :doc:`/plugins/scrub`: Avoid preserving certain non-standard ID3 tags such as + NCON. -.. _Opus: https://www.opus-codec.org/ -.. _@Verrus: https://github.com/Verrus +.. _@verrus: https://github.com/Verrus + +.. _opus: https://www.opus-codec.org/ 1.3.0 (September 11, 2013) -------------------------- @@ -3654,15 +3775,21 @@ artist, track, etc.). Instead, you can use any field name you can think of and treat it just like the built-in fields. For example, you can use the :ref:`modify-cmd` command to set a new field on a -track:: +track: + +:: $ beet modify mood=sexy artist:miguel -and then query your music based on that field:: +and then query your music based on that field: + +:: $ beet ls mood:sunny -or use templates to see the value of the field:: +or use templates to see the value of the field: + +:: $ beet ls -f '$title: $mood' @@ -3673,8 +3800,8 @@ infrastructure. One side effect of this change: queries that include unknown fields will now match *nothing* instead of *everything*. So if you type ``beet ls -fieldThatDoesNotExist:foo``, beets will now return no results, whereas -previous versions would spit out a warning and then list your entire library. +fieldThatDoesNotExist:foo``, beets will now return no results, whereas previous +versions would spit out a warning and then list your entire library. There's more detail than you could ever need `on the beets blog`_. @@ -3683,27 +3810,26 @@ There's more detail than you could ever need `on the beets blog`_. 1.2.2 (August 27, 2013) ----------------------- -This is a bugfix release. We're in the midst of preparing for a large change -in beets 1.3, so 1.2.2 resolves some issues that came up over the last few -weeks. Stay tuned! +This is a bugfix release. We're in the midst of preparing for a large change in +beets 1.3, so 1.2.2 resolves some issues that came up over the last few weeks. +Stay tuned! The improvements in this release are: -* A new plugin event, ``item_moved``, is sent when files are moved on disk. +- A new plugin event, ``item_moved``, is sent when files are moved on disk. Thanks to dsedivec. -* :doc:`/plugins/lyrics`: More improvements to the Google backend by Fabrice +- :doc:`/plugins/lyrics`: More improvements to the Google backend by Fabrice Laporte. -* :doc:`/plugins/bpd`: Fix for a crash when searching, thanks to Simon Chopin. -* Regular expression queries (and other query types) over paths now work. +- :doc:`/plugins/bpd`: Fix for a crash when searching, thanks to Simon Chopin. +- Regular expression queries (and other query types) over paths now work. (Previously, special query types were ignored for the ``path`` field.) -* :doc:`/plugins/fetchart`: Look for images in the Cover Art Archive for - the release group in addition to the specific release. Thanks to Filipe - Fortes. -* Fix a race in the importer that could cause files to be deleted before they +- :doc:`/plugins/fetchart`: Look for images in the Cover Art Archive for the + release group in addition to the specific release. Thanks to Filipe Fortes. +- Fix a race in the importer that could cause files to be deleted before they were imported. This happened when importing one album, importing a duplicate - album, and then asking for the first album to be replaced with the second. - The situation could only arise when importing music from the library - directory and when the two albums are imported close in time. + album, and then asking for the first album to be replaced with the second. The + situation could only arise when importing music from the library directory and + when the two albums are imported close in time. 1.2.1 (June 22, 2013) --------------------- @@ -3711,285 +3837,278 @@ The improvements in this release are: This release introduces a major internal change in the way that similarity scores are handled. It means that the importer interface can now show you exactly why a match is assigned its score and that the autotagger gained a few -new options that let you customize how matches are prioritized and -recommended. +new options that let you customize how matches are prioritized and recommended. -The refactoring work is due to the continued efforts of Tai Lee. The -changes you'll notice while using the autotagger are: +The refactoring work is due to the continued efforts of Tai Lee. The changes +you'll notice while using the autotagger are: -* The top 3 distance penalties are now displayed on the release listing, - and all album and track penalties are now displayed on the track changes - list. This should make it clear exactly which metadata is contributing to a - low similarity score. -* When displaying differences, the colorization has been made more consistent - and helpful: red for an actual difference, yellow to indicate that a - distance penalty is being applied, and light gray for no penalty (e.g., case - changes) or disambiguation data. +- The top 3 distance penalties are now displayed on the release listing, and all + album and track penalties are now displayed on the track changes list. This + should make it clear exactly which metadata is contributing to a low + similarity score. +- When displaying differences, the colorization has been made more consistent + and helpful: red for an actual difference, yellow to indicate that a distance + penalty is being applied, and light gray for no penalty (e.g., case changes) + or disambiguation data. There are also three new (or overhauled) configuration options that let you customize the way that matches are selected: -* The :ref:`ignored` setting lets you instruct the importer not to show you +- The :ref:`ignored` setting lets you instruct the importer not to show you matches that have a certain penalty applied. -* The :ref:`preferred` collection of settings specifies a sorted list of +- The :ref:`preferred` collection of settings specifies a sorted list of preferred countries and media types, or prioritizes releases closest to the original year for an album. -* The :ref:`max_rec` settings can now be used for any distance penalty +- The :ref:`max_rec` settings can now be used for any distance penalty component. The recommendation will be downgraded if a non-zero penalty is being applied to the specified field. And some little enhancements and bug fixes: -* Multi-disc directory names can now contain "disk" (in addition to "disc"). +- Multi-disc directory names can now contain "disk" (in addition to "disc"). Thanks to John Hawthorn. -* :doc:`/plugins/web`: Item and album counts are now exposed through the API - for use with the Tomahawk resolver. Thanks to Uwe L. Korn. -* Python 2.6 compatibility for ``beatport``, - :doc:`/plugins/missing`, and :doc:`/plugins/duplicates`. Thanks to Wesley - Bitter and Pedro Silva. -* Don't move the config file during a null migration. Thanks to Theofilos +- :doc:`/plugins/web`: Item and album counts are now exposed through the API for + use with the Tomahawk resolver. Thanks to Uwe L. Korn. +- Python 2.6 compatibility for ``beatport``, :doc:`/plugins/missing`, and + :doc:`/plugins/duplicates`. Thanks to Wesley Bitter and Pedro Silva. +- Don't move the config file during a null migration. Thanks to Theofilos Intzoglou. -* Fix an occasional crash in the ``beatport`` when a length - field was missing from the API response. Thanks to Timothy Appnel. -* :doc:`/plugins/scrub`: Handle and log I/O errors. -* :doc:`/plugins/lyrics`: The Google backend should now turn up more results. +- Fix an occasional crash in the ``beatport`` when a length field was missing + from the API response. Thanks to Timothy Appnel. +- :doc:`/plugins/scrub`: Handle and log I/O errors. +- :doc:`/plugins/lyrics`: The Google backend should now turn up more results. Thanks to Fabrice Laporte. -* :doc:`/plugins/random`: Fix compatibility with Python 2.6. Thanks to - Matthias Drochner. +- :doc:`/plugins/random`: Fix compatibility with Python 2.6. Thanks to Matthias + Drochner. 1.2.0 (June 5, 2013) -------------------- There's a *lot* of new stuff in this release: new data sources for the -autotagger, new plugins to look for problems in your library, tracking the -date that you acquired new music, an awesome new syntax for doing queries over -numeric fields, support for ALAC files, and major enhancements to the -importer's UI and distance calculations. A special thanks goes out to all the -contributors who helped make this release awesome. +autotagger, new plugins to look for problems in your library, tracking the date +that you acquired new music, an awesome new syntax for doing queries over +numeric fields, support for ALAC files, and major enhancements to the importer's +UI and distance calculations. A special thanks goes out to all the contributors +who helped make this release awesome. For the first time, beets can now tag your music using additional **data sources** to augment the matches from MusicBrainz. When you enable either of these plugins, the importer will start showing you new kinds of matches: -* New :doc:`/plugins/discogs`: Get matches from the `Discogs`_ database. - Thanks to Artem Ponomarenko and Tai Lee. -* New ``beatport`` plugin: Get matches from the `Beatport`_ database. - Thanks to Johannes Baiter. +- New :doc:`/plugins/discogs`: Get matches from the Discogs_ database. Thanks to + Artem Ponomarenko and Tai Lee. +- New ``beatport`` plugin: Get matches from the Beatport_ database. Thanks to + Johannes Baiter. We also have two other new plugins that can scan your library to check for common problems, both by Pedro Silva: -* New :doc:`/plugins/duplicates`: Find tracks or albums in your - library that are **duplicated**. -* New :doc:`/plugins/missing`: Find albums in your library that are **missing +- New :doc:`/plugins/duplicates`: Find tracks or albums in your library that are + **duplicated**. +- New :doc:`/plugins/missing`: Find albums in your library that are **missing tracks**. There are also three more big features added to beets core: -* Your library now keeps track of **when music was added** to it. The new +- Your library now keeps track of **when music was added** to it. The new ``added`` field is a timestamp reflecting when each item and album was imported and the new ``%time{}`` template function lets you format this timestamp for humans. Thanks to Lucas Duailibe. -* When using queries to match on quantitative fields, you can now use - **numeric ranges**. For example, you can get a list of albums from the '90s - by typing ``beet ls year:1990..1999`` or find high-bitrate music with +- When using queries to match on quantitative fields, you can now use **numeric + ranges**. For example, you can get a list of albums from the '90s by typing + ``beet ls year:1990..1999`` or find high-bitrate music with ``bitrate:128000..``. See :ref:`numericquery`. Thanks to Michael Schuerig. -* **ALAC files** are now marked as ALAC instead of being conflated with AAC +- **ALAC files** are now marked as ALAC instead of being conflated with AAC audio. Thanks to Simon Luijk. In addition, the importer saw various UI enhancements, thanks to Tai Lee: -* More consistent format and colorization of album and track metadata. -* Display data source URL for matches from the new data source plugins. This +- More consistent format and colorization of album and track metadata. +- Display data source URL for matches from the new data source plugins. This should make it easier to migrate data from Discogs or Beatport into MusicBrainz. -* Display album disambiguation and disc titles in the track listing, when +- Display album disambiguation and disc titles in the track listing, when available. -* Track changes are highlighted in yellow when they indicate a change in - format to or from the style of :ref:`per_disc_numbering`. (As before, no - penalty is applied because the track number is still "correct", just in a - different format.) -* Sort missing and unmatched tracks by index and title and group them - together for better readability. -* Indicate MusicBrainz ID mismatches. +- Track changes are highlighted in yellow when they indicate a change in format + to or from the style of :ref:`per_disc_numbering`. (As before, no penalty is + applied because the track number is still "correct", just in a different + format.) +- Sort missing and unmatched tracks by index and title and group them together + for better readability. +- Indicate MusicBrainz ID mismatches. The calculation of the similarity score for autotagger matches was also -improved, again thanks to Tai Lee. These changes, in general, help deal with -the new metadata sources and help disambiguate between similar releases in the -same MusicBrainz release group: +improved, again thanks to Tai Lee. These changes, in general, help deal with the +new metadata sources and help disambiguate between similar releases in the same +MusicBrainz release group: -* Strongly prefer releases with a matching MusicBrainz album ID. This helps +- Strongly prefer releases with a matching MusicBrainz album ID. This helps beets re-identify the same release when re-importing existing files. -* Prefer releases that are closest to the tagged ``year``. Tolerate files - tagged with release or original year. -* The new ``preferred_media`` config option lets you prefer a certain media - type when the ``media`` field is unset on an album. -* Apply minor penalties across a range of fields to differentiate between - nearly identical releases: ``disctotal``, ``label``, ``catalognum``, - ``country`` and ``albumdisambig``. +- Prefer releases that are closest to the tagged ``year``. Tolerate files tagged + with release or original year. +- The new ``preferred_media`` config option lets you prefer a certain media type + when the ``media`` field is unset on an album. +- Apply minor penalties across a range of fields to differentiate between nearly + identical releases: ``disctotal``, ``label``, ``catalognum``, ``country`` and + ``albumdisambig``. As usual, there were also lots of other great littler enhancements: -* :doc:`/plugins/random`: A new ``-e`` option gives an equal chance to each - artist in your collection to avoid biasing random samples to prolific - artists. Thanks to Georges Dubus. -* The :ref:`modify-cmd` now correctly converts types when modifying non-string +- :doc:`/plugins/random`: A new ``-e`` option gives an equal chance to each + artist in your collection to avoid biasing random samples to prolific artists. + Thanks to Georges Dubus. +- The :ref:`modify-cmd` now correctly converts types when modifying non-string fields. You can now safely modify the "comp" flag and the "year" field, for example. Thanks to Lucas Duailibe. -* :doc:`/plugins/convert`: You can now configure the path formats for - converted files separately from your main library. Thanks again to Lucas - Duailibe. -* The importer output now shows the number of audio files in each album. - Thanks to jayme on GitHub. -* Plugins can now provide fields for both Album and Item templates, thanks - to Pedro Silva. Accordingly, the :doc:`/plugins/inline` can also now define - album fields. For consistency, the ``pathfields`` configuration section has - been renamed ``item_fields`` (although the old name will still work for +- :doc:`/plugins/convert`: You can now configure the path formats for converted + files separately from your main library. Thanks again to Lucas Duailibe. +- The importer output now shows the number of audio files in each album. Thanks + to jayme on GitHub. +- Plugins can now provide fields for both Album and Item templates, thanks to + Pedro Silva. Accordingly, the :doc:`/plugins/inline` can also now define album + fields. For consistency, the ``pathfields`` configuration section has been + renamed ``item_fields`` (although the old name will still work for compatibility). -* Plugins can also provide metadata matches for ID searches. For example, the +- Plugins can also provide metadata matches for ID searches. For example, the new Discogs plugin lets you search for an album by its Discogs ID from the - same prompt that previously just accepted MusicBrainz IDs. Thanks to - Johannes Baiter. -* The :ref:`fields-cmd` command shows template fields provided by plugins. + same prompt that previously just accepted MusicBrainz IDs. Thanks to Johannes + Baiter. +- The :ref:`fields-cmd` command shows template fields provided by plugins. Thanks again to Pedro Silva. -* :doc:`/plugins/mpdupdate`: You can now communicate with MPD over a Unix - domain socket. Thanks to John Hawthorn. +- :doc:`/plugins/mpdupdate`: You can now communicate with MPD over a Unix domain + socket. Thanks to John Hawthorn. And a batch of fixes: -* Album art filenames now respect the :ref:`replace` configuration. -* Friendly error messages are now printed when trying to read or write files +- Album art filenames now respect the :ref:`replace` configuration. +- Friendly error messages are now printed when trying to read or write files that go missing. -* The :ref:`modify-cmd` command can now change albums' album art paths (i.e., +- The :ref:`modify-cmd` command can now change albums' album art paths (i.e., ``beet modify artpath=...`` works). Thanks to Lucas Duailibe. -* :doc:`/plugins/zero`: Fix a crash when nulling out a field that contains - None. -* Templates can now refer to non-tag item fields (e.g., ``$id`` and +- :doc:`/plugins/zero`: Fix a crash when nulling out a field that contains None. +- Templates can now refer to non-tag item fields (e.g., ``$id`` and ``$album_id``). -* :doc:`/plugins/lyrics`: Lyrics searches should now turn up more results due - to some fixes in dealing with special characters. +- :doc:`/plugins/lyrics`: Lyrics searches should now turn up more results due to + some fixes in dealing with special characters. -.. _Discogs: https://discogs.com/ -.. _Beatport: https://www.beatport.com/ +.. _beatport: https://www.beatport.com/ + +.. _discogs: https://discogs.com/ 1.1.0 (April 29, 2013) ---------------------- This final release of 1.1 brings a little polish to the betas that introduced -the new configuration system. The album art and lyrics plugins also got a -little love. +the new configuration system. The album art and lyrics plugins also got a little +love. If you're upgrading from 1.0.0 or earlier, this release (like the 1.1 betas) will automatically migrate your configuration to the new system. -* :doc:`/plugins/embedart`: The ``embedart`` command now embeds each album's - associated art by default. The ``--file`` option invokes the old behavior, - in which a specific image file is used. -* :doc:`/plugins/lyrics`: A new (optional) Google Custom Search backend was - added for finding lyrics on a wide array of sites. Thanks to Fabrice - Laporte. -* When automatically detecting the filesystem's maximum filename length, never +- :doc:`/plugins/embedart`: The ``embedart`` command now embeds each album's + associated art by default. The ``--file`` option invokes the old behavior, in + which a specific image file is used. +- :doc:`/plugins/lyrics`: A new (optional) Google Custom Search backend was + added for finding lyrics on a wide array of sites. Thanks to Fabrice Laporte. +- When automatically detecting the filesystem's maximum filename length, never guess more than 200 characters. This prevents errors on systems where the - maximum length was misreported. You can, of course, override this default - with the :ref:`max_filename_length` option. -* :doc:`/plugins/fetchart`: Two new configuration options were added: + maximum length was misreported. You can, of course, override this default with + the :ref:`max_filename_length` option. +- :doc:`/plugins/fetchart`: Two new configuration options were added: ``cover_names``, the list of keywords used to identify preferred images, and ``cautious``, which lets you avoid falling back to images that don't contain those keywords. Thanks to Fabrice Laporte. -* Avoid some error cases in the ``update`` command and the ``embedart`` and +- Avoid some error cases in the ``update`` command and the ``embedart`` and ``mbsync`` plugins. Invalid or missing files now cause error logs instead of crashing beets. Thanks to Lucas Duailibe. -* :doc:`/plugins/lyrics`: Searches now strip "featuring" artists when - searching for lyrics, which should increase the hit rate for these tracks. - Thanks to Fabrice Laporte. -* When listing the items in an album, the items are now always in track-number +- :doc:`/plugins/lyrics`: Searches now strip "featuring" artists when searching + for lyrics, which should increase the hit rate for these tracks. Thanks to + Fabrice Laporte. +- When listing the items in an album, the items are now always in track-number order. This should lead to more predictable listings from the :doc:`/plugins/importfeeds`. -* :doc:`/plugins/smartplaylist`: Queries are now split using shell-like syntax +- :doc:`/plugins/smartplaylist`: Queries are now split using shell-like syntax instead of just whitespace, so you can now construct terms that contain spaces. -* :doc:`/plugins/lastgenre`: The ``force`` config option now defaults to true +- :doc:`/plugins/lastgenre`: The ``force`` config option now defaults to true and controls the behavior of the import hook. (Previously, new genres were always forced during import.) -* :doc:`/plugins/web`: Fix an error when specifying the hostname on the - command line. -* :doc:`/plugins/web`: The underlying API was expanded slightly to support - `Tomahawk`_ collections. And file transfers now have a "Content-Length" - header. Thanks to Uwe L. Korn. -* :doc:`/plugins/lastgenre`: Fix an error when using genre canonicalization. +- :doc:`/plugins/web`: Fix an error when specifying the hostname on the command + line. +- :doc:`/plugins/web`: The underlying API was expanded slightly to support + Tomahawk_ collections. And file transfers now have a "Content-Length" header. + Thanks to Uwe L. Korn. +- :doc:`/plugins/lastgenre`: Fix an error when using genre canonicalization. -.. _Tomahawk: https://github.com/tomahawk-player/tomahawk +.. _tomahawk: https://github.com/tomahawk-player/tomahawk 1.1b3 (March 16, 2013) ---------------------- This third beta of beets 1.1 brings a hodgepodge of little new features (and -internal overhauls that will make improvements easier in the future). There -are new options for getting metadata in a particular language and seeing more -detail during the import process. There's also a new plugin for synchronizing -your metadata with MusicBrainz. Under the hood, plugins can now extend the -query syntax. +internal overhauls that will make improvements easier in the future). There are +new options for getting metadata in a particular language and seeing more detail +during the import process. There's also a new plugin for synchronizing your +metadata with MusicBrainz. Under the hood, plugins can now extend the query +syntax. New configuration options: -* :ref:`languages` controls the preferred languages when selecting an alias - from MusicBrainz. This feature requires `python-musicbrainzngs`_ 0.3 or - later. Thanks to Sam Doshi. -* :ref:`detail` enables a mode where all tracks are listed in the importer UI, +- :ref:`languages` controls the preferred languages when selecting an alias from + MusicBrainz. This feature requires python-musicbrainzngs_ 0.3 or later. Thanks + to Sam Doshi. +- :ref:`detail` enables a mode where all tracks are listed in the importer UI, as opposed to only changed tracks. -* The ``--flat`` option to the ``beet import`` command treats an entire +- The ``--flat`` option to the ``beet import`` command treats an entire directory tree of music files as a single album. This can help in situations where a multi-disc album is split across multiple directories. -* :doc:`/plugins/importfeeds`: An option was added to use absolute, rather - than relative, paths. Thanks to Lucas Duailibe. +- :doc:`/plugins/importfeeds`: An option was added to use absolute, rather than + relative, paths. Thanks to Lucas Duailibe. Other stuff: -* A new :doc:`/plugins/mbsync` provides a command that looks up each item and - track in MusicBrainz and updates your library to reflect it. This can help - you easily correct errors that have been fixed in the MB database. Thanks to - Jakob Schnitzer. -* :doc:`/plugins/fuzzy`: The ``fuzzy`` command was removed and replaced with a +- A new :doc:`/plugins/mbsync` provides a command that looks up each item and + track in MusicBrainz and updates your library to reflect it. This can help you + easily correct errors that have been fixed in the MB database. Thanks to Jakob + Schnitzer. +- :doc:`/plugins/fuzzy`: The ``fuzzy`` command was removed and replaced with a new query type. To perform fuzzy searches, use the ``~`` prefix with :ref:`list-cmd` or other commands. Thanks to Philippe Mongeau. -* As part of the above, plugins can now extend the query syntax and new kinds - of matching capabilities to beets. See :ref:`extend-query`. Thanks again to +- As part of the above, plugins can now extend the query syntax and new kinds of + matching capabilities to beets. See :ref:`extend-query`. Thanks again to Philippe Mongeau. -* :doc:`/plugins/convert`: A new ``--keep-new`` option lets you store - transcoded files in your library while backing up the originals (instead of - vice-versa). Thanks to Lucas Duailibe. -* :doc:`/plugins/convert`: Also, a new ``auto`` config option will transcode +- :doc:`/plugins/convert`: A new ``--keep-new`` option lets you store transcoded + files in your library while backing up the originals (instead of vice-versa). + Thanks to Lucas Duailibe. +- :doc:`/plugins/convert`: Also, a new ``auto`` config option will transcode audio files automatically during import. Thanks again to Lucas Duailibe. -* :doc:`/plugins/chroma`: A new ``fingerprint`` command lets you generate and +- :doc:`/plugins/chroma`: A new ``fingerprint`` command lets you generate and store fingerprints for items that don't yet have them. One more round of applause for Lucas Duailibe. -* ``echonest_tempo``: API errors now issue a warning instead of - exiting with an exception. We also avoid an error when track metadata - contains newlines. -* When the importer encounters an error (insufficient permissions, for - example) when walking a directory tree, it now logs an error instead of - crashing. -* In path formats, null database values now expand to the empty string instead +- ``echonest_tempo``: API errors now issue a warning instead of exiting with an + exception. We also avoid an error when track metadata contains newlines. +- When the importer encounters an error (insufficient permissions, for example) + when walking a directory tree, it now logs an error instead of crashing. +- In path formats, null database values now expand to the empty string instead of the string "None". -* Add "System Volume Information" (an internal directory found on some - Windows filesystems) to the default ignore list. -* Fix a crash when ReplayGain values were set to null. -* Fix a crash when iTunes Sound Check tags contained invalid data. -* Fix an error when the configuration file (``config.yaml``) is completely +- Add "System Volume Information" (an internal directory found on some Windows + filesystems) to the default ignore list. +- Fix a crash when ReplayGain values were set to null. +- Fix a crash when iTunes Sound Check tags contained invalid data. +- Fix an error when the configuration file (``config.yaml``) is completely empty. -* Fix an error introduced in 1.1b1 when importing using timid mode. Thanks to +- Fix an error introduced in 1.1b1 when importing using timid mode. Thanks to Sam Doshi. -* :doc:`/plugins/convert`: Fix a bug when creating files with Unicode - pathnames. -* Fix a spurious warning from the Unidecode module when matching albums that - are missing all metadata. -* Fix Unicode errors when a directory or file doesn't exist when invoking the +- :doc:`/plugins/convert`: Fix a bug when creating files with Unicode pathnames. +- Fix a spurious warning from the Unidecode module when matching albums that are + missing all metadata. +- Fix Unicode errors when a directory or file doesn't exist when invoking the import command. Thanks to Lucas Duailibe. -* :doc:`/plugins/mbcollection`: Show friendly, human-readable errors when +- :doc:`/plugins/mbcollection`: Show friendly, human-readable errors when MusicBrainz exceptions occur. -* ``echonest_tempo``: Catch socket errors that are not handled by - the Echo Nest library. -* :doc:`/plugins/chroma`: Catch Acoustid Web service errors when submitting +- ``echonest_tempo``: Catch socket errors that are not handled by the Echo Nest + library. +- :doc:`/plugins/chroma`: Catch Acoustid Web service errors when submitting fingerprints. 1.1b2 (February 16, 2013) @@ -3997,8 +4116,8 @@ Other stuff: The second beta of beets 1.1 uses the fancy new configuration infrastructure to add many, many new config options. The import process is more flexible; -filenames can be customized in more detail; and more. This release also -supports Windows Media (ASF) files and iTunes Sound Check volume normalization. +filenames can be customized in more detail; and more. This release also supports +Windows Media (ASF) files and iTunes Sound Check volume normalization. This version introduces one **change to the default behavior** that you should be aware of. Previously, when importing new albums matched in MusicBrainz, the @@ -4010,123 +4129,123 @@ behavior, just set :ref:`original_date` to true in your config file. New configuration options: -* :ref:`default_action` lets you determine the default (just-hit-return) option +- :ref:`default_action` lets you determine the default (just-hit-return) option is when considering a candidate. -* :ref:`none_rec_action` lets you skip the prompt, and automatically choose an +- :ref:`none_rec_action` lets you skip the prompt, and automatically choose an action, when there is no good candidate. Thanks to Tai Lee. -* :ref:`max_rec` lets you define a maximum recommendation for albums with +- :ref:`max_rec` lets you define a maximum recommendation for albums with missing/extra tracks or differing track lengths/numbers. Thanks again to Tai Lee. -* :ref:`original_date` determines whether, when importing new albums, the +- :ref:`original_date` determines whether, when importing new albums, the ``year``, ``month``, and ``day`` fields should reflect the specific (e.g., reissue) release date or the original release date. Note that the original release date is always available as ``original_year``, etc. -* :ref:`clutter` controls which files should be ignored when cleaning up empty +- :ref:`clutter` controls which files should be ignored when cleaning up empty directories. Thanks to Steinþór Pálsson. -* :doc:`/plugins/lastgenre`: A new configuration option lets you choose to +- :doc:`/plugins/lastgenre`: A new configuration option lets you choose to retrieve artist-level tags as genres instead of album- or track-level tags. Thanks to Peter Fern and Peter Schnebel. -* :ref:`max_filename_length` controls truncation of long filenames. Also, beets +- :ref:`max_filename_length` controls truncation of long filenames. Also, beets now tries to determine the filesystem's maximum length automatically if you leave this option unset. -* :doc:`/plugins/fetchart`: The ``remote_priority`` option searches remote - (Web) art sources even when local art is present. -* You can now customize the character substituted for path separators (e.g., /) +- :doc:`/plugins/fetchart`: The ``remote_priority`` option searches remote (Web) + art sources even when local art is present. +- You can now customize the character substituted for path separators (e.g., /) in filenames via ``path_sep_replace``. The default is an underscore. Use this setting with caution. Other new stuff: -* Support for Windows Media/ASF audio files. Thanks to Dave Hayes. -* New :doc:`/plugins/smartplaylist`: generate and maintain m3u playlist files +- Support for Windows Media/ASF audio files. Thanks to Dave Hayes. +- New :doc:`/plugins/smartplaylist`: generate and maintain m3u playlist files based on beets queries. Thanks to Dang Mai Hai. -* ReplayGain tags on MPEG-4/AAC files are now supported. And, even more +- ReplayGain tags on MPEG-4/AAC files are now supported. And, even more astonishingly, ReplayGain values in MP3 and AAC files are now compatible with `iTunes Sound Check`_. Thanks to Dave Hayes. -* Track titles in the importer UI's difference display are now either aligned +- Track titles in the importer UI's difference display are now either aligned vertically or broken across two lines for readability. Thanks to Tai Lee. -* Albums and items have new fields reflecting the *original* release date +- Albums and items have new fields reflecting the *original* release date (``original_year``, ``original_month``, and ``original_day``). Previously, when tagging from MusicBrainz, *only* the original date was stored; now, the old fields refer to the *specific* release date (e.g., when the album was reissued). -* Some changes to the way candidates are recommended for selection, thanks to +- Some changes to the way candidates are recommended for selection, thanks to Tai Lee: - * According to the new :ref:`max_rec` configuration option, partial album + - According to the new :ref:`max_rec` configuration option, partial album matches are downgraded to a "low" recommendation by default. - * When a match isn't great but is either better than all the others or the + - When a match isn't great but is either better than all the others or the only match, it is given a "low" (rather than "medium") recommendation. - * There is no prompt default (i.e., input is required) when matches are - bad: "low" or "none" recommendations or when choosing a candidate - other than the first. + - There is no prompt default (i.e., input is required) when matches are bad: + "low" or "none" recommendations or when choosing a candidate other than the + first. -* The importer's heuristic for coalescing the directories in a multi-disc album - has been improved. It can now detect when two directories alongside each - other share a similar prefix but a different number (e.g., "Album Disc 1" and - "Album Disc 2") even when they are not alone in a common parent directory. - Thanks once again to Tai Lee. -* Album listings in the importer UI now show the release medium (CD, Vinyl, +- The importer's heuristic for coalescing the directories in a multi-disc album + has been improved. It can now detect when two directories alongside each other + share a similar prefix but a different number (e.g., "Album Disc 1" and "Album + Disc 2") even when they are not alone in a common parent directory. Thanks + once again to Tai Lee. +- Album listings in the importer UI now show the release medium (CD, Vinyl, 3xCD, etc.) as well as the disambiguation string. Thanks to Peter Schnebel. -* :doc:`/plugins/lastgenre`: The plugin can now get different genres for +- :doc:`/plugins/lastgenre`: The plugin can now get different genres for individual tracks on an album. Thanks to Peter Schnebel. -* When getting data from MusicBrainz, the album disambiguation string +- When getting data from MusicBrainz, the album disambiguation string (``albumdisambig``) now reflects both the release and the release group. -* :doc:`/plugins/mpdupdate`: Sends an update message whenever *anything* in the +- :doc:`/plugins/mpdupdate`: Sends an update message whenever *anything* in the database changes---not just when importing. Thanks to Dang Mai Hai. -* When the importer UI shows a difference in track numbers or durations, they +- When the importer UI shows a difference in track numbers or durations, they are now colorized based on the *suffixes* that differ. For example, when showing the difference between 2:01 and 2:09, only the last digit will be highlighted. -* The importer UI no longer shows a change when the track length difference is +- The importer UI no longer shows a change when the track length difference is less than 10 seconds. (This threshold was previously 2 seconds.) -* Two new plugin events were added: *database_change* and *cli_exit*. Thanks +- Two new plugin events were added: *database_change* and *cli_exit*. Thanks again to Dang Mai Hai. -* Plugins are now loaded in the order they appear in the config file. Thanks to +- Plugins are now loaded in the order they appear in the config file. Thanks to Dang Mai Hai. -* :doc:`/plugins/bpd`: Browse by album artist and album artist sort name. - Thanks to Steinþór Pálsson. -* ``echonest_tempo``: Don't attempt a lookup when the artist or - track title is missing. -* Fix an error when migrating the ``.beetsstate`` file on Windows. -* A nicer error message is now given when the configuration file contains tabs. +- :doc:`/plugins/bpd`: Browse by album artist and album artist sort name. Thanks + to Steinþór Pálsson. +- ``echonest_tempo``: Don't attempt a lookup when the artist or track title is + missing. +- Fix an error when migrating the ``.beetsstate`` file on Windows. +- A nicer error message is now given when the configuration file contains tabs. (YAML doesn't like tabs.) -* Fix the ``-l`` (log path) command-line option for the ``import`` command. +- Fix the ``-l`` (log path) command-line option for the ``import`` command. -.. _iTunes Sound Check: https://support.apple.com/kb/HT2425 +.. _itunes sound check: https://support.apple.com/kb/HT2425 1.1b1 (January 29, 2013) ------------------------ This release entirely revamps beets' configuration system. The configuration -file is now a `YAML`_ document and is located, along with other support files, -in a common directory (e.g., ``~/.config/beets`` on Unix-like systems). +file is now a YAML_ document and is located, along with other support files, in +a common directory (e.g., ``~/.config/beets`` on Unix-like systems). -.. _YAML: https://en.wikipedia.org/wiki/YAML +.. _yaml: https://en.wikipedia.org/wiki/YAML -* Renamed plugins: The ``rdm`` plugin has been renamed to ``random`` and +- Renamed plugins: The ``rdm`` plugin has been renamed to ``random`` and ``fuzzy_search`` has been renamed to ``fuzzy``. -* Renamed config options: Many plugins have a flag dictating whether their +- Renamed config options: Many plugins have a flag dictating whether their action runs at import time. This option had many names (``autofetch``, ``autoembed``, etc.) but is now consistently called ``auto``. -* Reorganized import config options: The various ``import_*`` options are now +- Reorganized import config options: The various ``import_*`` options are now organized under an ``import:`` heading and their prefixes have been removed. -* New default file locations: The default filename of the library database is +- New default file locations: The default filename of the library database is now ``library.db`` in the same directory as the config file, as opposed to ``~/.beetsmusic.blb`` previously. Similarly, the runtime state file is now called ``state.pickle`` in the same directory instead of ``~/.beetsstate``. It also adds some new features: -* :doc:`/plugins/inline`: Inline definitions can now contain statements or +- :doc:`/plugins/inline`: Inline definitions can now contain statements or blocks in addition to just expressions. Thanks to Florent Thoumie. -* Add a configuration option, :ref:`terminal_encoding`, controlling the text +- Add a configuration option, :ref:`terminal_encoding`, controlling the text encoding used to print messages to standard output. -* The MusicBrainz hostname (and rate limiting) are now configurable. See +- The MusicBrainz hostname (and rate limiting) are now configurable. See :ref:`musicbrainz-config`. -* You can now configure the similarity thresholds used to determine when the +- You can now configure the similarity thresholds used to determine when the autotagger automatically accepts a metadata match. See :ref:`match-config`. -* :doc:`/plugins/importfeeds`: Added a new configuration option that controls +- :doc:`/plugins/importfeeds`: Added a new configuration option that controls the base for relative paths used in m3u files. Thanks to Philippe Mongeau. 1.0.0 (January 29, 2013) @@ -4137,12 +4256,12 @@ one-point-oh. Congratulations to everybody involved. This version of beets will remain stable and receive only bug fixes from here on out. New development is ongoing in the betas of version 1.1. -* :doc:`/plugins/scrub`: Fix an incompatibility with Python 2.6. -* :doc:`/plugins/lyrics`: Fix an issue that failed to find lyrics when metadata +- :doc:`/plugins/scrub`: Fix an incompatibility with Python 2.6. +- :doc:`/plugins/lyrics`: Fix an issue that failed to find lyrics when metadata contained "real" apostrophes. -* :doc:`/plugins/replaygain`: On Windows, emit a warning instead of - crashing when analyzing non-ASCII filenames. -* Silence a spurious warning from version 0.04.12 of the Unidecode module. +- :doc:`/plugins/replaygain`: On Windows, emit a warning instead of crashing + when analyzing non-ASCII filenames. +- Silence a spurious warning from version 0.04.12 of the Unidecode module. 1.0rc2 (December 31, 2012) -------------------------- @@ -4151,12 +4270,12 @@ This second release candidate follows quickly after rc1 and fixes a few small bugs found since that release. There were a couple of regressions and some bugs in a newly added plugin. -* ``echonest_tempo``: If the Echo Nest API limit is exceeded or a - communication error occurs, the plugin now waits and tries again instead of - crashing. Thanks to Zach Denton. -* :doc:`/plugins/fetchart`: Fix a regression that caused crashes when art was +- ``echonest_tempo``: If the Echo Nest API limit is exceeded or a communication + error occurs, the plugin now waits and tries again instead of crashing. Thanks + to Zach Denton. +- :doc:`/plugins/fetchart`: Fix a regression that caused crashes when art was not available from some sources. -* Fix a regression on Windows that caused all relative paths to be "not found". +- Fix a regression on Windows that caused all relative paths to be "not found". 1.0rc1 (December 17, 2012) -------------------------- @@ -4167,105 +4286,107 @@ goes to the growing and vibrant beets community. A million thanks to everybody who contributed to this release. There are new plugins for transcoding music, fuzzy searches, tempo collection, -and fiddling with metadata. The ReplayGain plugin has been rebuilt from -scratch. Album art images can now be resized automatically. Many other smaller +and fiddling with metadata. The ReplayGain plugin has been rebuilt from scratch. +Album art images can now be resized automatically. Many other smaller refinements make things "just work" as smoothly as possible. -With this release candidate, beets 1.0 is feature-complete. We'll be fixing -bugs on the road to 1.0 but no new features will be added. Concurrently, work -begins today on features for version 1.1. +With this release candidate, beets 1.0 is feature-complete. We'll be fixing bugs +on the road to 1.0 but no new features will be added. Concurrently, work begins +today on features for version 1.1. -* New plugin: :doc:`/plugins/convert` **transcodes** music and embeds album art +- New plugin: :doc:`/plugins/convert` **transcodes** music and embeds album art while copying to a separate directory. Thanks to Jakob Schnitzer and Andrew G. Dunn. -* New plugin: :doc:`/plugins/fuzzy` lets you find albums and tracks - using **fuzzy string matching** so you don't have to type (or even remember) - their exact names. Thanks to Philippe Mongeau. -* New plugin: ``echonest_tempo`` fetches **tempo** (BPM) information - from `The Echo Nest`_. Thanks to David Brenner. -* New plugin: :doc:`/plugins/the` adds a template function that helps format +- New plugin: :doc:`/plugins/fuzzy` lets you find albums and tracks using + **fuzzy string matching** so you don't have to type (or even remember) their + exact names. Thanks to Philippe Mongeau. +- New plugin: ``echonest_tempo`` fetches **tempo** (BPM) information from `The + Echo Nest`_. Thanks to David Brenner. +- New plugin: :doc:`/plugins/the` adds a template function that helps format text for nicely-sorted directory listings. Thanks to Blemjhoo Tezoulbr. -* New plugin: :doc:`/plugins/zero` **filters out undesirable fields** before +- New plugin: :doc:`/plugins/zero` **filters out undesirable fields** before they are written to your tags. Thanks again to Blemjhoo Tezoulbr. -* New plugin: :doc:`/plugins/ihate` automatically skips (or warns you about) +- New plugin: :doc:`/plugins/ihate` automatically skips (or warns you about) importing albums that match certain criteria. Thanks once again to Blemjhoo Tezoulbr. -* :doc:`/plugins/replaygain`: This plugin has been completely overhauled to use - the `mp3gain`_ or `aacgain`_ command-line tools instead of the failure-prone +- :doc:`/plugins/replaygain`: This plugin has been completely overhauled to use + the mp3gain_ or aacgain_ command-line tools instead of the failure-prone Gstreamer ReplayGain implementation. Thanks to Fabrice Laporte. -* :doc:`/plugins/fetchart` and :doc:`/plugins/embedart`: Both plugins can now +- :doc:`/plugins/fetchart` and :doc:`/plugins/embedart`: Both plugins can now **resize album art** to avoid excessively large images. Use the ``maxwidth`` config option with either plugin. Thanks to Fabrice Laporte. -* :doc:`/plugins/scrub`: Scrubbing now removes *all* types of tags from a file +- :doc:`/plugins/scrub`: Scrubbing now removes *all* types of tags from a file rather than just one. For example, if your FLAC file has both ordinary FLAC tags and ID3 tags, the ID3 tags are now also removed. -* :ref:`stats-cmd` command: New ``--exact`` switch to make the file size +- :ref:`stats-cmd` command: New ``--exact`` switch to make the file size calculation more accurate (thanks to Jakob Schnitzer). -* :ref:`list-cmd` command: Templates given with ``-f`` can now show items' and +- :ref:`list-cmd` command: Templates given with ``-f`` can now show items' and albums' paths (using ``$path``). -* The output of the :ref:`update-cmd`, :ref:`remove-cmd`, and :ref:`modify-cmd` - commands now respects the :ref:`list_format_album` and - :ref:`list_format_item` config options. Thanks to Mike Kazantsev. -* The :ref:`art-filename` option can now be a template rather than a simple +- The output of the :ref:`update-cmd`, :ref:`remove-cmd`, and :ref:`modify-cmd` + commands now respects the :ref:`list_format_album` and :ref:`list_format_item` + config options. Thanks to Mike Kazantsev. +- The :ref:`art-filename` option can now be a template rather than a simple string. Thanks to Jarrod Beardwood. -* Fix album queries for ``artpath`` and other non-item fields. -* Null values in the database can now be matched with the empty-string regular +- Fix album queries for ``artpath`` and other non-item fields. +- Null values in the database can now be matched with the empty-string regular expression, ``^$``. -* Queries now correctly match non-string values in path format predicates. -* When autotagging a various-artists album, the album artist field is now - used instead of the majority track artist. -* :doc:`/plugins/lastgenre`: Use the albums' existing genre tags if they pass +- Queries now correctly match non-string values in path format predicates. +- When autotagging a various-artists album, the album artist field is now used + instead of the majority track artist. +- :doc:`/plugins/lastgenre`: Use the albums' existing genre tags if they pass the whitelist (thanks to Fabrice Laporte). -* :doc:`/plugins/lastgenre`: Add a ``lastgenre`` command for fetching genres +- :doc:`/plugins/lastgenre`: Add a ``lastgenre`` command for fetching genres post facto (thanks to Jakob Schnitzer). -* :doc:`/plugins/fetchart`: Local image filenames are now used in alphabetical +- :doc:`/plugins/fetchart`: Local image filenames are now used in alphabetical order. -* :doc:`/plugins/fetchart`: Fix a bug where cover art filenames could lack - a ``.jpg`` extension. -* :doc:`/plugins/lyrics`: Fix an exception with non-ASCII lyrics. -* :doc:`/plugins/web`: The API now reports file sizes (for use with the +- :doc:`/plugins/fetchart`: Fix a bug where cover art filenames could lack a + ``.jpg`` extension. +- :doc:`/plugins/lyrics`: Fix an exception with non-ASCII lyrics. +- :doc:`/plugins/web`: The API now reports file sizes (for use with the `Tomahawk resolver`_). -* :doc:`/plugins/web`: Files now download with a reasonable filename rather - than just being called "file" (thanks to Zach Denton). -* :doc:`/plugins/importfeeds`: Fix error in symlink mode with non-ASCII +- :doc:`/plugins/web`: Files now download with a reasonable filename rather than + just being called "file" (thanks to Zach Denton). +- :doc:`/plugins/importfeeds`: Fix error in symlink mode with non-ASCII filenames. -* :doc:`/plugins/mbcollection`: Fix an error when submitting a large number of - releases (we now submit only 200 releases at a time instead of 350). Thanks - to Jonathan Towne. -* :doc:`/plugins/embedart`: Made the method for embedding art into FLAC files +- :doc:`/plugins/mbcollection`: Fix an error when submitting a large number of + releases (we now submit only 200 releases at a time instead of 350). Thanks to + Jonathan Towne. +- :doc:`/plugins/embedart`: Made the method for embedding art into FLAC files `standard <https://wiki.xiph.org/VorbisComment#METADATA_BLOCK_PICTURE>`_-compliant. Thanks to Daniele Sluijters. -* Add the track mapping dictionary to the ``album_distance`` plugin function. -* When an exception is raised while reading a file, the path of the file in +- Add the track mapping dictionary to the ``album_distance`` plugin function. +- When an exception is raised while reading a file, the path of the file in question is now logged (thanks to Mike Kazantsev). -* Truncate long filenames based on their *bytes* rather than their Unicode +- Truncate long filenames based on their *bytes* rather than their Unicode *characters*, fixing situations where encoded names could be too long. -* Filename truncation now incorporates the length of the extension. -* Fix an assertion failure when the MusicBrainz main database and search server +- Filename truncation now incorporates the length of the extension. +- Fix an assertion failure when the MusicBrainz main database and search server disagree. -* Fix a bug that caused the :doc:`/plugins/lastgenre` and other plugins not to +- Fix a bug that caused the :doc:`/plugins/lastgenre` and other plugins not to modify files' tags even when they successfully change the database. -* Fix a VFS bug leading to a crash in the :doc:`/plugins/bpd` when files had +- Fix a VFS bug leading to a crash in the :doc:`/plugins/bpd` when files had non-ASCII extensions. -* Fix for changing date fields (like "year") with the :ref:`modify-cmd` - command. -* Fix a crash when input is read from a pipe without a specified encoding. -* Fix some problem with identifying files on Windows with Unicode directory +- Fix for changing date fields (like "year") with the :ref:`modify-cmd` command. +- Fix a crash when input is read from a pipe without a specified encoding. +- Fix some problem with identifying files on Windows with Unicode directory names in their path. -* Fix a crash when Unicode queries were used with ``import -L`` re-imports. -* Fix an error when fingerprinting files with Unicode filenames on Windows. -* Warn instead of crashing when importing a specific file in singleton mode. -* Add human-readable error messages when writing files' tags fails or when a +- Fix a crash when Unicode queries were used with ``import -L`` re-imports. +- Fix an error when fingerprinting files with Unicode filenames on Windows. +- Warn instead of crashing when importing a specific file in singleton mode. +- Add human-readable error messages when writing files' tags fails or when a directory can't be created. -* Changed plugin loading so that modules can be imported without - unintentionally loading the plugins they contain. +- Changed plugin loading so that modules can be imported without unintentionally + loading the plugins they contain. -.. _The Echo Nest: https://web.archive.org/web/20180329103558/http://the.echonest.com/ -.. _Tomahawk resolver: https://beets.io/blog/tomahawk-resolver.html -.. _mp3gain: http://mp3gain.sourceforge.net/download.php .. _aacgain: https://aacgain.altosdesign.com +.. _mp3gain: http://mp3gain.sourceforge.net/download.php + +.. _the echo nest: https://web.archive.org/web/20180329103558/http://the.echonest.com/ + +.. _tomahawk resolver: https://beets.io/blog/tomahawk-resolver.html + 1.0b15 (July 26, 2012) ---------------------- @@ -4282,96 +4403,96 @@ encapsulated in a plugin (the :doc:`/plugins/fetchart`). If you want to continue fetching cover art for your music, enable this plugin after upgrading to beets 1.0b15. -* The autotagger can now find matches for albums when you have **extra tracks** +- The autotagger can now find matches for albums when you have **extra tracks** on your filesystem that aren't present in the MusicBrainz catalog. Previously, if you tried to match album with 15 audio files but the MusicBrainz entry had only 14 tracks, beets would ignore this match. Now, beets will show you matches even when they are "too short" and indicate which tracks from your disk are unmatched. -* Tracks on multi-disc albums can now be **numbered per-disc** instead of +- Tracks on multi-disc albums can now be **numbered per-disc** instead of per-album via the :ref:`per_disc_numbering` config option. -* The default output format for the ``beet list`` command is now configurable +- The default output format for the ``beet list`` command is now configurable via the :ref:`list_format_item` and :ref:`list_format_album` config options. Thanks to Fabrice Laporte. -* Album **cover art fetching** is now encapsulated in the +- Album **cover art fetching** is now encapsulated in the :doc:`/plugins/fetchart`. Be sure to enable this plugin if you're using this functionality. As a result of this new organization, the new plugin has gained a few new features: - * "As-is" and non-autotagged imports can now have album art imported from - the local filesystem (although Web repositories are still not searched in - these cases). - * A new command, ``beet fetchart``, allows you to download album art + - "As-is" and non-autotagged imports can now have album art imported from the + local filesystem (although Web repositories are still not searched in these + cases). + - A new command, ``beet fetchart``, allows you to download album art post-import. If you only want to fetch art manually, not automatically during import, set the new plugin's ``autofetch`` option to ``no``. - * New album art sources have been added. + - New album art sources have been added. -* Errors when communicating with MusicBrainz now log an error message instead of +- Errors when communicating with MusicBrainz now log an error message instead of halting the importer. -* Similarly, filesystem manipulation errors now print helpful error messages +- Similarly, filesystem manipulation errors now print helpful error messages instead of a messy traceback. They still interrupt beets, but they should now be easier for users to understand. Tracebacks are still available in verbose mode. -* New metadata fields for `artist credits`_: ``artist_credit`` and +- New metadata fields for `artist credits`_: ``artist_credit`` and ``albumartist_credit`` can now contain release- and recording-specific variations of the artist's name. See :ref:`itemfields`. -* Revamped the way beets handles concurrent database access to avoid +- Revamped the way beets handles concurrent database access to avoid nondeterministic SQLite-related crashes when using the multithreaded importer. On systems where SQLite was compiled without ``usleep(3)`` support, multithreaded database access could cause an internal error (with the message "database is locked"). This release synchronizes access to the database to avoid internal SQLite contention, which should avoid this error. -* Plugins can now add parallel stages to the import pipeline. See - :ref:`writing-plugins`. -* Beets now prints out an error when you use an unrecognized field name in a +- Plugins can now add parallel stages to the import pipeline. See + :ref:`basic-plugin-setup`. +- Beets now prints out an error when you use an unrecognized field name in a query: for example, when running ``beet ls -a artist:foo`` (because ``artist`` is an item-level field). -* New plugin events: +- New plugin events: - * ``import_task_choice`` is called after an import task has an action + - ``import_task_choice`` is called after an import task has an action assigned. - * ``import_task_files`` is called after a task's file manipulation has + - ``import_task_files`` is called after a task's file manipulation has finished (copying or moving files, writing metadata tags). - * ``library_opened`` is called when beets starts up and opens the library + - ``library_opened`` is called when beets starts up and opens the library database. -* :doc:`/plugins/lastgenre`: Fixed a problem where path formats containing +- :doc:`/plugins/lastgenre`: Fixed a problem where path formats containing ``$genre`` would use the old genre instead of the newly discovered one. -* Fix a crash when moving files to a Samba share. -* :doc:`/plugins/mpdupdate`: Fix TypeError crash (thanks to Philippe Mongeau). -* When re-importing files with ``import_copy`` enabled, only files inside the +- Fix a crash when moving files to a Samba share. +- :doc:`/plugins/mpdupdate`: Fix TypeError crash (thanks to Philippe Mongeau). +- When re-importing files with ``import_copy`` enabled, only files inside the library directory are moved. Files outside the library directory are still copied. This solves a problem (introduced in 1.0b14) where beets could crash after adding files to the library but before finishing copying them; during the next import, the (external) files would be moved instead of copied. -* Artist sort names are now populated correctly for multi-artist tracks and +- Artist sort names are now populated correctly for multi-artist tracks and releases. (Previously, they only reflected the first artist.) -* When previewing changes during import, differences in track duration are now +- When previewing changes during import, differences in track duration are now shown as "2:50 vs. 3:10" rather than separated with ``->`` like track numbers. This should clarify that beets isn't doing anything to modify lengths. -* Fix a problem with query-based path format matching where a field-qualified +- Fix a problem with query-based path format matching where a field-qualified pattern, like ``albumtype_soundtrack``, would match everything. -* :doc:`/plugins/chroma`: Fix matching with ambiguous Acoustids. Some Acoustids +- :doc:`/plugins/chroma`: Fix matching with ambiguous Acoustids. Some Acoustids are identified with multiple recordings; beets now considers any associated recording a valid match. This should reduce some cases of errant track reordering when using chroma. -* Fix the ID3 tag name for the catalog number field. -* :doc:`/plugins/chroma`: Fix occasional crash at end of fingerprint submission +- Fix the ID3 tag name for the catalog number field. +- :doc:`/plugins/chroma`: Fix occasional crash at end of fingerprint submission and give more context to "failed fingerprint generation" errors. -* Interactive prompts are sent to stdout instead of stderr. -* :doc:`/plugins/embedart`: Fix crash when audio files are unreadable. -* :doc:`/plugins/bpd`: Fix crash when sockets disconnect (thanks to Matteo +- Interactive prompts are sent to stdout instead of stderr. +- :doc:`/plugins/embedart`: Fix crash when audio files are unreadable. +- :doc:`/plugins/bpd`: Fix crash when sockets disconnect (thanks to Matteo Mecucci). -* Fix an assertion failure while importing with moving enabled when the file was +- Fix an assertion failure while importing with moving enabled when the file was already at its destination. -* Fix Unicode values in the ``replace`` config option (thanks to Jakob Borg). -* Use a nicer error message when input is requested but stdin is closed. -* Fix errors on Windows for certain Unicode characters that can't be represented +- Fix Unicode values in the ``replace`` config option (thanks to Jakob Borg). +- Use a nicer error message when input is requested but stdin is closed. +- Fix errors on Windows for certain Unicode characters that can't be represented in the MBCS encoding. This required a change to the way that paths are represented in the database on Windows; if you find that beets' paths are out of sync with your filesystem with this release, delete and recreate your database with ``beet import -AWC /path/to/music``. -* Fix ``import`` with relative path arguments on Windows. +- Fix ``import`` with relative path arguments on Windows. .. _artist credits: https://wiki.musicbrainz.org/Artist_Credit @@ -4389,74 +4510,74 @@ and a plugin for interoperability with other music library systems. A million thanks to the (growing) beets community for making this a huge release. -* The importer now gives you **choices when duplicates are detected**. +- The importer now gives you **choices when duplicates are detected**. Previously, when beets found an existing album or item in your library matching the metadata on a newly-imported one, it would just skip the new music to avoid introducing duplicates into your library. Now, you have three choices: skip the new music (the previous behavior), keep both, or remove the old music. See the :ref:`guide-duplicates` section in the autotagging guide for details. -* Beets can now avoid storing identically-named albums in the same directory. +- Beets can now avoid storing identically-named albums in the same directory. The new ``%aunique{}`` template function, which is included in the default path formats, ensures that Crystal Castles' albums will be placed into different directories. See :ref:`aunique` for details. -* Beets queries can now use **regular expressions**. Use an additional ``:`` in +- Beets queries can now use **regular expressions**. Use an additional ``:`` in your query to enable regex matching. See :ref:`regex` for the full details. Thanks to Matteo Mecucci. -* Artist **sort names** are now fetched from MusicBrainz. There are two new data +- Artist **sort names** are now fetched from MusicBrainz. There are two new data fields, ``artist_sort`` and ``albumartist_sort``, that contain sortable artist names like "Beatles, The". These fields are also used to sort albums and items when using the ``list`` command. Thanks to Paul Provost. -* Many other **new metadata fields** were added, including ASIN, label catalog +- Many other **new metadata fields** were added, including ASIN, label catalog number, disc title, encoder, and MusicBrainz release group ID. For a full list of fields, see :ref:`itemfields`. -* :doc:`/plugins/chroma`: A new command, ``beet submit``, will **submit +- :doc:`/plugins/chroma`: A new command, ``beet submit``, will **submit fingerprints** to the Acoustid database. Submitting your library helps increase the coverage and accuracy of Acoustid fingerprinting. The Chromaprint fingerprint and Acoustid ID are also now stored for all fingerprinted tracks. - This version of beets *requires* at least version 0.6 of `pyacoustid`_ for + This version of beets *requires* at least version 0.6 of pyacoustid_ for fingerprinting to work. -* The importer can now **move files**. Previously, beets could only copy files +- The importer can now **move files**. Previously, beets could only copy files and delete the originals, which is inefficient if the source and destination are on the same filesystem. Use the ``import_move`` configuration option and see :doc:`/reference/config` for more details. Thanks to Domen Kožar. -* New :doc:`/plugins/random`: Randomly select albums and tracks from your library. - Thanks to Philippe Mongeau. -* The :doc:`/plugins/mbcollection` by Jeffrey Aylesworth was added to the core +- New :doc:`/plugins/random`: Randomly select albums and tracks from your + library. Thanks to Philippe Mongeau. +- The :doc:`/plugins/mbcollection` by Jeffrey Aylesworth was added to the core beets distribution. -* New :doc:`/plugins/importfeeds`: Catalog imported files in ``m3u`` playlist +- New :doc:`/plugins/importfeeds`: Catalog imported files in ``m3u`` playlist files or as symlinks for easy importing to other systems. Thanks to Fabrice Laporte. -* The ``-f`` (output format) option to the ``beet list`` command can now contain +- The ``-f`` (output format) option to the ``beet list`` command can now contain template functions as well as field references. Thanks to Steve Dougherty. -* A new command ``beet fields`` displays the available metadata fields (thanks +- A new command ``beet fields`` displays the available metadata fields (thanks to Matteo Mecucci). -* The ``import`` command now has a ``--noincremental`` or ``-I`` flag to disable +- The ``import`` command now has a ``--noincremental`` or ``-I`` flag to disable incremental imports (thanks to Matteo Mecucci). -* When the autotagger fails to find a match, it now displays the number of +- When the autotagger fails to find a match, it now displays the number of tracks on the album (to help you guess what might be going wrong) and a link to the FAQ. -* The default filename character substitutions were changed to be more +- The default filename character substitutions were changed to be more conservative. The Windows "reserved characters" are substituted by default even on Unix platforms (this causes less surprise when using Samba shares to store music). To customize your character substitutions, see :ref:`the replace config option <replace>`. -* :doc:`/plugins/lastgenre`: Added a "fallback" option when no suitable genre +- :doc:`/plugins/lastgenre`: Added a "fallback" option when no suitable genre can be found (thanks to Fabrice Laporte). -* :doc:`/plugins/rewrite`: Unicode rewriting rules are now allowed (thanks to +- :doc:`/plugins/rewrite`: Unicode rewriting rules are now allowed (thanks to Nicolas Dietrich). -* Filename collisions are now avoided when moving album art. -* :doc:`/plugins/bpd`: Print messages to show when directory tree is being +- Filename collisions are now avoided when moving album art. +- :doc:`/plugins/bpd`: Print messages to show when directory tree is being constructed. -* :doc:`/plugins/bpd`: Use Gstreamer's ``playbin2`` element instead of the +- :doc:`/plugins/bpd`: Use Gstreamer's ``playbin2`` element instead of the deprecated ``playbin``. -* :doc:`/plugins/bpd`: Random and repeat modes are now supported (thanks to +- :doc:`/plugins/bpd`: Random and repeat modes are now supported (thanks to Matteo Mecucci). -* :doc:`/plugins/bpd`: Listings are now sorted (thanks once again to Matteo +- :doc:`/plugins/bpd`: Listings are now sorted (thanks once again to Matteo Mecucci). -* Filenames are normalized with Unicode Normal Form D (NFD) on Mac OS X and NFC +- Filenames are normalized with Unicode Normal Form D (NFD) on Mac OS X and NFC on all other platforms. -* Significant internal restructuring to avoid SQLite locking errors. As part of +- Significant internal restructuring to avoid SQLite locking errors. As part of these changes, the not-very-useful "save" plugin event has been removed. .. _pyacoustid: https://github.com/beetbox/pyacoustid @@ -4472,65 +4593,65 @@ deletion now cleans up after itself more thoroughly. Many, many bugs—including several crashers—were fixed. This release lays the foundation for more features to come in the next couple of releases. -* The :doc:`/plugins/lyrics`, originally by `Peter Brunner`_, is revamped and +- The :doc:`/plugins/lyrics`, originally by `Peter Brunner`_, is revamped and included with beets, making it easy to fetch **song lyrics**. -* Items now expose their audio **sample rate**, number of **channels**, and +- Items now expose their audio **sample rate**, number of **channels**, and **bits per sample** (bitdepth). See :doc:`/reference/pathformat` for a list of all available audio properties. Thanks to Andrew Dunn. -* The ``beet list`` command now accepts a "format" argument that lets you **show +- The ``beet list`` command now accepts a "format" argument that lets you **show specific information about each album or track**. For example, run ``beet ls -af '$album: $tracktotal' beatles`` to see how long each Beatles album is. Thanks to Philippe Mongeau. -* The autotagger now tolerates tracks on multi-disc albums that are numbered +- The autotagger now tolerates tracks on multi-disc albums that are numbered per-disc. For example, if track 24 on a release is the first track on the second disc, then it is not penalized for having its track number set to 1 instead of 24. -* The autotagger sets the disc number and disc total fields on autotagged +- The autotagger sets the disc number and disc total fields on autotagged albums. -* The autotagger now also tolerates tracks whose track artists tags are set - to "Various Artists". -* Terminal colors are now supported on Windows via `Colorama`_ (thanks to Karl). -* When previewing metadata differences, the importer now shows discrepancies in +- The autotagger now also tolerates tracks whose track artists tags are set to + "Various Artists". +- Terminal colors are now supported on Windows via Colorama_ (thanks to Karl). +- When previewing metadata differences, the importer now shows discrepancies in track length. -* Importing with ``import_delete`` enabled now cleans up empty directories that +- Importing with ``import_delete`` enabled now cleans up empty directories that contained deleting imported music files. -* Similarly, ``import_delete`` now causes original album art imported from the +- Similarly, ``import_delete`` now causes original album art imported from the disk to be deleted. -* Plugin-supplied template values, such as those created by ``rewrite``, are now +- Plugin-supplied template values, such as those created by ``rewrite``, are now properly sanitized (for example, ``AC/DC`` properly becomes ``AC_DC``). -* Filename extensions are now always lower-cased when copying and moving files. -* The ``inline`` plugin now prints a more comprehensible error when exceptions +- Filename extensions are now always lower-cased when copying and moving files. +- The ``inline`` plugin now prints a more comprehensible error when exceptions occur in Python snippets. -* The ``replace`` configuration option can now remove characters entirely (in +- The ``replace`` configuration option can now remove characters entirely (in addition to replacing them) if the special string ``<strip>`` is specified as the replacement. -* New plugin API: plugins can now add fields to the MediaFile tag abstraction - layer. See :ref:`writing-plugins`. -* A reasonable error message is now shown when the import log file cannot be +- New plugin API: plugins can now add fields to the MediaFile tag abstraction + layer. See :ref:`basic-plugin-setup`. +- A reasonable error message is now shown when the import log file cannot be opened. -* The import log file is now flushed and closed properly so that it can be used +- The import log file is now flushed and closed properly so that it can be used to monitor import progress, even when the import crashes. -* Duplicate track matches are no longer shown when autotagging singletons. -* The ``chroma`` plugin now logs errors when fingerprinting fails. -* The ``lastgenre`` plugin suppresses more errors when dealing with the Last.fm +- Duplicate track matches are no longer shown when autotagging singletons. +- The ``chroma`` plugin now logs errors when fingerprinting fails. +- The ``lastgenre`` plugin suppresses more errors when dealing with the Last.fm API. -* Fix a bug in the ``rewrite`` plugin that broke the use of multiple rules for - a single field. -* Fix a crash with non-ASCII characters in bytestring metadata fields (e.g., +- Fix a bug in the ``rewrite`` plugin that broke the use of multiple rules for a + single field. +- Fix a crash with non-ASCII characters in bytestring metadata fields (e.g., MusicBrainz IDs). -* Fix another crash with non-ASCII characters in the configuration paths. -* Fix a divide-by-zero crash on zero-length audio files. -* Fix a crash in the ``chroma`` plugin when the Acoustid database had no +- Fix another crash with non-ASCII characters in the configuration paths. +- Fix a divide-by-zero crash on zero-length audio files. +- Fix a crash in the ``chroma`` plugin when the Acoustid database had no recording associated with a fingerprint. -* Fix a crash when an autotagging with an artist or album containing "AND" or +- Fix a crash when an autotagging with an artist or album containing "AND" or "OR" (upper case). -* Fix an error in the ``rewrite`` and ``inline`` plugins when the corresponding +- Fix an error in the ``rewrite`` and ``inline`` plugins when the corresponding config sections did not exist. -* Fix bitrate estimation for AAC files whose headers are missing the relevant +- Fix bitrate estimation for AAC files whose headers are missing the relevant data. -* Fix the ``list`` command in BPD (thanks to Simon Chopin). +- Fix the ``list`` command in BPD (thanks to Simon Chopin). -.. _Colorama: https://pypi.python.org/pypi/colorama +.. _colorama: https://pypi.python.org/pypi/colorama 1.0b12 (January 16, 2012) ------------------------- @@ -4545,44 +4666,44 @@ In addition, beets avoids problematic filename conflicts by appending numbers to filenames that would otherwise conflict. Three new plugins (``inline``, ``scrub``, and ``rewrite``) are included in this release. -* **Functions in path formats** provide a simple way to write complex file +- **Functions in path formats** provide a simple way to write complex file naming rules: for example, ``%upper{%left{$artist,1}}`` will insert the capitalized first letter of the track's artist. For more details, see :doc:`/reference/pathformat`. If you're interested in adding your own template - functions via a plugin, see :ref:`writing-plugins`. -* Plugins can also now define new path *fields* in addition to functions. -* The new :doc:`/plugins/inline` lets you **use Python expressions to customize + functions via a plugin, see :ref:`basic-plugin-setup`. +- Plugins can also now define new path *fields* in addition to functions. +- The new :doc:`/plugins/inline` lets you **use Python expressions to customize path formats** by defining new fields in the config file. -* The configuration can **condition path formats based on queries**. That is, +- The configuration can **condition path formats based on queries**. That is, you can write a path format that is only used if an item matches a given query. (This supersedes the earlier functionality that only allowed conditioning on album type; if you used this feature in a previous version, you will need to replace, for example, ``soundtrack:`` with ``albumtype_soundtrack:``.) See :ref:`path-format-config`. -* **Filename substitutions are now configurable** via the ``replace`` config +- **Filename substitutions are now configurable** via the ``replace`` config value. You can choose which characters you think should be allowed in your - directory and music file names. See :doc:`/reference/config`. -* Beets now ensures that files have **unique filenames** by appending a number + directory and music file names. See :doc:`/reference/config`. +- Beets now ensures that files have **unique filenames** by appending a number to any filename that would otherwise conflict with an existing file. -* The new :doc:`/plugins/scrub` can remove extraneous metadata either manually +- The new :doc:`/plugins/scrub` can remove extraneous metadata either manually or automatically. -* The new :doc:`/plugins/rewrite` can canonicalize names for path formats. -* The autotagging heuristics have been tweaked in situations where the +- The new :doc:`/plugins/rewrite` can canonicalize names for path formats. +- The autotagging heuristics have been tweaked in situations where the MusicBrainz database did not contain track lengths. Previously, beets penalized matches where this was the case, leading to situations where seemingly good matches would have poor similarity. This penalty has been removed. -* Fix an incompatibility in BPD with libmpc (the library that powers mpc and +- Fix an incompatibility in BPD with libmpc (the library that powers mpc and ncmpc). -* Fix a crash when importing a partial match whose first track was missing. -* The ``lastgenre`` plugin now correctly writes discovered genres to imported +- Fix a crash when importing a partial match whose first track was missing. +- The ``lastgenre`` plugin now correctly writes discovered genres to imported files (when tag-writing is enabled). -* Add a message when skipping directories during an incremental import. -* The default ignore settings now ignore all files beginning with a dot. -* Date values in path formats (``$year``, ``$month``, and ``$day``) are now +- Add a message when skipping directories during an incremental import. +- The default ignore settings now ignore all files beginning with a dot. +- Date values in path formats (``$year``, ``$month``, and ``$day``) are now appropriately zero-padded. -* Removed the ``--path-format`` global flag for ``beet``. -* Removed the ``lastid`` plugin, which was deprecated in the previous version. +- Removed the ``--path-format`` global flag for ``beet``. +- Removed the ``lastid`` plugin, which was deprecated in the previous version. 1.0b11 (December 12, 2011) -------------------------- @@ -4590,66 +4711,71 @@ filenames that would otherwise conflict. Three new plugins (``inline``, This version of beets focuses on transitioning the autotagger to the new version of the MusicBrainz database (called NGS). This transition brings with it a number of long-overdue improvements: most notably, predictable behavior when -tagging multi-disc albums and integration with the new `Acoustid`_ acoustic +tagging multi-disc albums and integration with the new Acoustid_ acoustic fingerprinting technology. The importer can also now tag *incomplete* albums when you're missing a few tracks from a given release. Two other new plugins are also included with this release: one for assigning genres and another for ReplayGain analysis. -* Beets now communicates with MusicBrainz via the new `Next Generation Schema`_ - (NGS) service via `python-musicbrainzngs`_. The bindings are included with - this version of beets, but a future version will make them an external - dependency. -* The importer now detects **multi-disc albums** and tags them together. Using a +- Beets now communicates with MusicBrainz via the new `Next Generation Schema`_ + (NGS) service via python-musicbrainzngs_. The bindings are included with this + version of beets, but a future version will make them an external dependency. +- The importer now detects **multi-disc albums** and tags them together. Using a heuristic based on the names of directories, certain structures are classified as multi-disc albums: for example, if a directory contains subdirectories labeled "disc 1" and "disc 2", these subdirectories will be coalesced into a single album for tagging. -* The new :doc:`/plugins/chroma` uses the `Acoustid`_ **open-source acoustic +- The new :doc:`/plugins/chroma` uses the Acoustid_ **open-source acoustic fingerprinting** service. This replaces the old ``lastid`` plugin, which used Last.fm fingerprinting and is now deprecated. Fingerprinting with this library should be faster and more reliable. -* The importer can now perform **partial matches**. This means that, if you're +- The importer can now perform **partial matches**. This means that, if you're missing a few tracks from an album, beets can still tag the remaining tracks as a single album. (Thanks to `Simon Chopin`_.) -* The new :doc:`/plugins/lastgenre` automatically **assigns genres to imported +- The new :doc:`/plugins/lastgenre` automatically **assigns genres to imported albums** and items based on Last.fm tags and an internal whitelist. (Thanks to - `KraYmer`_.) -* The :doc:`/plugins/replaygain`, written by `Peter Brunner`_, has been merged + KraYmer_.) +- The :doc:`/plugins/replaygain`, written by `Peter Brunner`_, has been merged into the core beets distribution. Use it to analyze audio and **adjust playback levels** in ReplayGain-aware music players. -* Albums are now tagged with their *original* release date rather than the date +- Albums are now tagged with their *original* release date rather than the date of any reissue, remaster, "special edition", or the like. -* The config file and library databases are now given better names and locations +- The config file and library databases are now given better names and locations on Windows. Namely, both files now reside in ``%APPDATA%``; the config file is named ``beetsconfig.ini`` and the database is called ``beetslibrary.blb`` (neither has a leading dot as on Unix). For backwards compatibility, beets will check the old locations first. -* When entering an ID manually during tagging, beets now searches for anything +- When entering an ID manually during tagging, beets now searches for anything that looks like an MBID in the entered string. This means that full MusicBrainz URLs now work as IDs at the prompt. (Thanks to derwin.) -* The importer now ignores certain "clutter" files like ``.AppleDouble`` +- The importer now ignores certain "clutter" files like ``.AppleDouble`` directories and ``._*`` files. The list of ignored patterns is configurable via the ``ignore`` setting; see :doc:`/reference/config`. -* The database now keeps track of files' modification times so that, during - an ``update``, unmodified files can be skipped. (Thanks to Jos van der Til.) -* The album art fetcher now uses `albumart.org`_ as a fallback when the Amazon - art downloader fails. -* A new ``timeout`` config value avoids database locking errors on slow systems. -* Fix a crash after using the "as Tracks" option during import. -* Fix a Unicode error when tagging items with missing titles. -* Fix a crash when the state file (``~/.beetsstate``) became emptied or +- The database now keeps track of files' modification times so that, during an + ``update``, unmodified files can be skipped. (Thanks to Jos van der Til.) +- The album art fetcher now uses albumart.org_ as a fallback when the Amazon art + downloader fails. +- A new ``timeout`` config value avoids database locking errors on slow systems. +- Fix a crash after using the "as Tracks" option during import. +- Fix a Unicode error when tagging items with missing titles. +- Fix a crash when the state file (``~/.beetsstate``) became emptied or corrupted. -.. _KraYmer: https://github.com/KraYmer -.. _Next Generation Schema: https://musicbrainz.org/doc/XML_Web_Service/Version_2 -.. _python-musicbrainzngs: https://github.com/alastair/python-musicbrainzngs .. _acoustid: https://acoustid.org/ -.. _Peter Brunner: https://github.com/Lugoues -.. _Simon Chopin: https://github.com/laarmen + .. _albumart.org: https://www.albumart.org/ +.. _kraymer: https://github.com/KraYmer + +.. _next generation schema: https://musicbrainz.org/doc/XML_Web_Service/Version_2 + +.. _peter brunner: https://github.com/Lugoues + +.. _python-musicbrainzngs: https://github.com/alastair/python-musicbrainzngs + +.. _simon chopin: https://github.com/laarmen + 1.0b10 (September 22, 2011) --------------------------- @@ -4666,54 +4792,40 @@ previously-imported directories (with the ``-i`` flag) and there's an :doc:`experimental Web interface </plugins/web>` to beets in a new standard plugin. -* A new ``beet modify`` command enables **manual, command-line-based +- A new ``beet modify`` command enables **manual, command-line-based modification** of music metadata. Pass it a query along with ``field=value`` pairs that specify the changes you want to make. - -* A new ``beet update`` command updates the database to reflect **changes in the +- A new ``beet update`` command updates the database to reflect **changes in the on-disk metadata**. You can now use an external program to edit tags on files, remove files and directories, etc., and then run ``beet update`` to make sure your beets library is in sync. This will also rename files to reflect their new metadata. - -* A new ``beet move`` command can **copy or move files** into your library +- A new ``beet move`` command can **copy or move files** into your library directory or to another specified directory. - -* When importing files that are already in the library database, the items are +- When importing files that are already in the library database, the items are no longer duplicated---instead, the library is updated to reflect the new metadata. This way, the import command can be transparently used as a **re-import**. - -* Relatedly, the ``-L`` flag to the "import" command makes it take a query as +- Relatedly, the ``-L`` flag to the "import" command makes it take a query as its argument instead of a list of directories. The matched albums (or items, depending on the ``-s`` flag) are then re-imported. - -* A new flag ``-i`` to the import command runs **incremental imports**, keeping +- A new flag ``-i`` to the import command runs **incremental imports**, keeping track of and skipping previously-imported directories. This has the effect of making repeated import commands pick up only newly-added directories. The ``import_incremental`` config option makes this the default. - -* When pruning directories, "clutter" files such as ``.DS_Store`` and +- When pruning directories, "clutter" files such as ``.DS_Store`` and ``Thumbs.db`` are ignored (and removed with otherwise-empty directories). - -* The :doc:`/plugins/web` encapsulates a simple **Web-based GUI for beets**. The +- The :doc:`/plugins/web` encapsulates a simple **Web-based GUI for beets**. The current iteration can browse the library and play music in browsers that support HTML5 Audio. - -* When moving items that are part of an album, the album art implicitly moves +- When moving items that are part of an album, the album art implicitly moves too. - -* Files are no longer silently overwritten when moving and copying files. - -* Handle exceptions thrown when running Mutagen. - -* Fix a missing ``__future__`` import in ``embed art`` on Python 2.5. - -* Fix ID3 and MPEG-4 tag names for the album-artist field. - -* Fix Unicode encoding of album artist, album type, and label. - -* Fix crash when "copying" an art file that's already in place. +- Files are no longer silently overwritten when moving and copying files. +- Handle exceptions thrown when running Mutagen. +- Fix a missing ``__future__`` import in ``embed art`` on Python 2.5. +- Fix ID3 and MPEG-4 tag names for the album-artist field. +- Fix Unicode encoding of album artist, album type, and label. +- Fix crash when "copying" an art file that's already in place. 1.0b9 (July 9, 2011) -------------------- @@ -4722,101 +4834,74 @@ This release focuses on a large number of small fixes and improvements that turn beets into a well-oiled, music-devouring machine. See the full release notes, below, for a plethora of new features. -* **Queries can now contain whitespace.** Spaces passed as shell arguments are +- **Queries can now contain whitespace.** Spaces passed as shell arguments are now preserved, so you can use your shell's escaping syntax (quotes or - backslashes, for instance) to include spaces in queries. For example, - typing``beet ls "the knife"`` or ``beet ls the\ knife``. Read more in + backslashes, for instance) to include spaces in queries. For example, ``beet + ls "the knife"`` or ``beet ls theknife``. Read more in :doc:`/reference/query`. - -* Queries can **match items from the library by directory**. A ``path:`` prefix +- Queries can **match items from the library by directory**. A ``path:`` prefix is optional; any query containing a path separator (/ on POSIX systems) is assumed to be a path query. Running ``beet ls path/to/music`` will show all the music in your library under the specified directory. The :doc:`/reference/query` reference again has more details. - -* **Local album art** is now automatically discovered and copied from the +- **Local album art** is now automatically discovered and copied from the imported directories when available. - -* When choosing the "as-is" import album (or doing a non-autotagged import), +- When choosing the "as-is" import album (or doing a non-autotagged import), **every album either has an "album artist" set or is marked as a compilation (Various Artists)**. The choice is made based on the homogeneity of the tracks' artists. This prevents compilations that are imported as-is from being scattered across many directories after they are imported. - -* The release **label** for albums and tracks is now fetched from !MusicBrainz, +- The release **label** for albums and tracks is now fetched from !MusicBrainz, written to files, and stored in the database. - -* The "list" command now accepts a ``-p`` switch that causes it to **show +- The "list" command now accepts a ``-p`` switch that causes it to **show paths** instead of titles. This makes the output of ``beet ls -p`` suitable - for piping into another command such as `xargs`_. - -* Release year and label are now shown in the candidate selection list to help + for piping into another command such as xargs_. +- Release year and label are now shown in the candidate selection list to help disambiguate different releases of the same album. - -* Prompts in the importer interface are now colorized for easy reading. The +- Prompts in the importer interface are now colorized for easy reading. The default option is always highlighted. - -* The importer now provides the option to specify a MusicBrainz ID manually if +- The importer now provides the option to specify a MusicBrainz ID manually if the built-in searching isn't working for a particular album or track. - -* ``$bitrate`` in path formats is now formatted as a human-readable kbps value +- ``$bitrate`` in path formats is now formatted as a human-readable kbps value instead of as a raw integer. - -* The import logger has been improved for "always-on" use. First, it is now +- The import logger has been improved for "always-on" use. First, it is now possible to specify a log file in .beetsconfig. Also, logs are now appended rather than overwritten and contain timestamps. - -* Album art fetching and plugin events are each now run in separate pipeline +- Album art fetching and plugin events are each now run in separate pipeline stages during imports. This should bring additional performance when using album art plugins like embedart or beets-lyrics. - -* Accents and other Unicode decorators on characters are now treated more fairly +- Accents and other Unicode decorators on characters are now treated more fairly by the autotagger. For example, if you're missing the acute accent on the "e" - in "café", that change won't be penalized. This introduces a new dependency - on the `unidecode`_ Python module. - -* When tagging a track with no title set, the track's filename is now shown + in "café", that change won't be penalized. This introduces a new dependency on + the unidecode_ Python module. +- When tagging a track with no title set, the track's filename is now shown (instead of nothing at all). - -* The bitrate of lossless files is now calculated from their file size (rather +- The bitrate of lossless files is now calculated from their file size (rather than being fixed at 0 or reflecting the uncompressed audio bitrate). - -* Fixed a problem where duplicate albums or items imported at the same time +- Fixed a problem where duplicate albums or items imported at the same time would fail to be detected. - -* BPD now uses a persistent "virtual filesystem" in order to fake a directory +- BPD now uses a persistent "virtual filesystem" in order to fake a directory structure. This means that your path format settings are respected in BPD's browsing hierarchy. This may come at a performance cost, however. The virtual filesystem used by BPD is available for reuse by plugins (e.g., the FUSE plugin). - -* Singleton imports (``beet import -s``) can now take individual files as +- Singleton imports (``beet import -s``) can now take individual files as arguments as well as directories. +- Fix Unicode queries given on the command line. +- Fix crasher in quiet singleton imports (``import -qs``). +- Fix crash when autotagging files with no metadata. +- Fix a rare deadlock when finishing the import pipeline. +- Fix an issue that was causing mpdupdate to run twice for every album. +- Fix a bug that caused release dates/years not to be fetched. +- Fix a crasher when setting MBIDs on MP3s file metadata. +- Fix a "broken pipe" error when piping beets' standard output. +- A better error message is given when the database file is unopenable. +- Suppress errors due to timeouts and bad responses from MusicBrainz. +- Fix a crash on album queries with item-only field names. -* Fix Unicode queries given on the command line. - -* Fix crasher in quiet singleton imports (``import -qs``). - -* Fix crash when autotagging files with no metadata. - -* Fix a rare deadlock when finishing the import pipeline. - -* Fix an issue that was causing mpdupdate to run twice for every album. - -* Fix a bug that caused release dates/years not to be fetched. - -* Fix a crasher when setting MBIDs on MP3s file metadata. - -* Fix a "broken pipe" error when piping beets' standard output. - -* A better error message is given when the database file is unopenable. - -* Suppress errors due to timeouts and bad responses from MusicBrainz. - -* Fix a crash on album queries with item-only field names. +.. _unidecode: https://pypi.python.org/pypi/Unidecode/0.04.1 .. _xargs: https://en.wikipedia.org/wiki/xargs -.. _unidecode: https://pypi.python.org/pypi/Unidecode/0.04.1 1.0b8 (April 28, 2011) ---------------------- @@ -4830,57 +4915,44 @@ catalog, and manipulate your individual tracks. Second, beets can now storing it in a "file on the side." Check out the :doc:`/plugins/embedart` for that functionality. -* Better support for **singleton (non-album) tracks**. Whereas beets previously +- Better support for **singleton (non-album) tracks**. Whereas beets previously only really supported full albums, now it can also keep track of individual, off-album songs. The "singleton" path format can be used to customize where these tracks are stored. To import singleton tracks, provide the -s switch to the import command or, while doing a normal full-album import, choose the "as - Tracks" (T) option to add singletons to your library. To list only singleton + Tracks" (T) option to add singletons to your library. To list only singleton or only album tracks, use the new ``singleton:`` query term: the query ``singleton:true`` matches only singleton tracks; ``singleton:false`` matches - only album tracks. The ``lastid`` plugin has been extended to support - matching individual items as well. - -* The importer/autotagger system has been heavily refactored in this release. - If anything breaks as a result, please get in touch or just file a bug. - -* Support for **album art embedded in files**. A new :doc:`/plugins/embedart` + only album tracks. The ``lastid`` plugin has been extended to support matching + individual items as well. +- The importer/autotagger system has been heavily refactored in this release. If + anything breaks as a result, please get in touch or just file a bug. +- Support for **album art embedded in files**. A new :doc:`/plugins/embedart` implements this functionality. Enable the plugin to automatically embed downloaded album art into your music files' metadata. The plugin also provides the "embedart" and "extractart" commands for moving image files in and out of metadata. See the wiki for more details. (Thanks, daenney!) - -* The "distance" number, which quantifies how different an album's current and +- The "distance" number, which quantifies how different an album's current and proposed metadata are, is now displayed as "similarity" instead. This should be less noisy and confusing; you'll now see 99.5% instead of 0.00489323. - -* A new "timid mode" in the importer asks the user every time, even when it +- A new "timid mode" in the importer asks the user every time, even when it makes a match with very high confidence. The ``-t`` flag on the command line and the ``import_timid`` config option control this mode. (Thanks to mdecker on GitHub!) - -* The multithreaded importer should now abort (either by selecting aBort or by +- The multithreaded importer should now abort (either by selecting aBort or by typing ^C) much more quickly. Previously, it would try to get a lot of work done before quitting; now it gives up as soon as it can. - -* Added a new plugin event, ``album_imported``, which is called every time an +- Added a new plugin event, ``album_imported``, which is called every time an album is added to the library. (Thanks, Lugoues!) - -* A new plugin method, ``register_listener``, is an imperative alternative to +- A new plugin method, ``register_listener``, is an imperative alternative to the ``@listen`` decorator (Thanks again, Lugoues!) - -* In path formats, ``$albumartist`` now falls back to ``$artist`` (as well as +- In path formats, ``$albumartist`` now falls back to ``$artist`` (as well as the other way around). - -* The importer now prints "(unknown album)" when no tags are present. - -* When autotagging, "and" is considered equal to "&". - -* Fix some crashes when deleting files that don't exist. - -* Fix adding individual tracks in BPD. - -* Fix crash when ``~/.beetsconfig`` does not exist. +- The importer now prints "(unknown album)" when no tags are present. +- When autotagging, "and" is considered equal to "&". +- Fix some crashes when deleting files that don't exist. +- Fix adding individual tracks in BPD. +- Fix crash when ``~/.beetsconfig`` does not exist. 1.0b7 (April 5, 2011) --------------------- @@ -4891,74 +4963,55 @@ autotagger is better at handling them. It also includes a number of oft-requested improvements to the ``beet`` command-line tool, including several new configuration options and the ability to clean up empty directory subtrees. -* **"Various artists" releases** are handled much more gracefully. The +- **"Various artists" releases** are handled much more gracefully. The autotagger now sets the ``comp`` flag on albums whenever the album is identified as a "various artists" release by !MusicBrainz. Also, there is now a distinction between the "album artist" and the "track artist", the latter of which is never "Various Artists" or other such bogus stand-in. *(Thanks to Jonathan for the bulk of the implementation work on this feature!)* - -* The directory hierarchy can now be **customized based on release type**. In +- The directory hierarchy can now be **customized based on release type**. In particular, the ``path_format`` setting in .beetsconfig has been replaced with a new ``[paths]`` section, which allows you to specify different path formats for normal and "compilation" (various artists) releases as well as for each album type (see below). The default path formats have been changed to use ``$albumartist`` instead of ``$artist``. - -* A **new ``albumtype`` field** reflects the release type `as specified by +- A **new** ``albumtype`` **field** reflects the release type `as specified by MusicBrainz`_. - -* When deleting files, beets now appropriately "prunes" the directory - tree---empty directories are automatically cleaned up. *(Thanks to - wlof on GitHub for this!)* - -* The tagger's output now always shows the album directory that is currently +- When deleting files, beets now appropriately "prunes" the directory + tree---empty directories are automatically cleaned up. *(Thanks to wlof on + GitHub for this!)* +- The tagger's output now always shows the album directory that is currently being tagged. This should help in situations where files' current tags are missing or useless. - -* The logging option (``-l``) to the ``import`` command now logs duplicate +- The logging option (``-l``) to the ``import`` command now logs duplicate albums. - -* A new ``import_resume`` configuration option can be used to disable the +- A new ``import_resume`` configuration option can be used to disable the importer's resuming feature or force it to resume without asking. This option may be either ``yes``, ``no``, or ``ask``, with the obvious meanings. The ``-p`` and ``-P`` command-line flags override this setting and correspond to the "yes" and "no" settings. - -* Resuming is automatically disabled when the importer is in quiet (``-q``) +- Resuming is automatically disabled when the importer is in quiet (``-q``) mode. Progress is still saved, however, and the ``-p`` flag (above) can be used to force resuming. - -* The ``BEETSCONFIG`` environment variable can now be used to specify the +- The ``BEETSCONFIG`` environment variable can now be used to specify the location of the config file that is at ~/.beetsconfig by default. - -* A new ``import_quiet_fallback`` config option specifies what should - happen in quiet mode when there is no strong recommendation. The options are - ``skip`` (the default) and "asis". - -* When importing with the "delete" option and importing files that are already +- A new ``import_quiet_fallback`` config option specifies what should happen in + quiet mode when there is no strong recommendation. The options are ``skip`` + (the default) and "asis". +- When importing with the "delete" option and importing files that are already at their destination, files could be deleted (leaving zero copies afterward). This is fixed. +- The ``version`` command now lists all the loaded plugins. +- A new plugin, called ``info``, just prints out audio file metadata. +- Fix a bug where some files would be erroneously interpreted as MPEG-4 audio. +- Fix permission bits applied to album art files. +- Fix malformed !MusicBrainz queries caused by null characters. +- Fix a bug with old versions of the Monkey's Audio format. +- Fix a crash on broken symbolic links. +- Retry in more cases when !MusicBrainz servers are slow/overloaded. +- The old "albumify" plugin for upgrading databases was removed. -* The ``version`` command now lists all the loaded plugins. - -* A new plugin, called ``info``, just prints out audio file metadata. - -* Fix a bug where some files would be erroneously interpreted as MPEG-4 audio. - -* Fix permission bits applied to album art files. - -* Fix malformed !MusicBrainz queries caused by null characters. - -* Fix a bug with old versions of the Monkey's Audio format. - -* Fix a crash on broken symbolic links. - -* Retry in more cases when !MusicBrainz servers are slow/overloaded. - -* The old "albumify" plugin for upgrading databases was removed. - -.. _as specified by MusicBrainz: https://wiki.musicbrainz.org/ReleaseType +.. _as specified by musicbrainz: https://wiki.musicbrainz.org/ReleaseType 1.0b6 (January 20, 2011) ------------------------ @@ -4967,51 +5020,40 @@ This version consists primarily of bug fixes and other small improvements. It's in preparation for a more feature-ful release in beta 7. The most important issue involves correct ordering of autotagged albums. -* **Quiet import:** a new "-q" command line switch for the import command +- **Quiet import:** a new "-q" command line switch for the import command suppresses all prompts for input; it pessimistically skips all albums that the importer is not completely confident about. - -* Added support for the **WavPack** and **Musepack** formats. Unfortunately, due +- Added support for the **WavPack** and **Musepack** formats. Unfortunately, due to a limitation in the Mutagen library (used by beets for metadata manipulation), Musepack SV8 is not yet supported. Here's the `upstream bug`_ in question. - -* BPD now uses a pure-Python socket library and no longer requires +- BPD now uses a pure-Python socket library and no longer requires eventlet/greenlet (the latter of which is a C extension). For the curious, the - socket library in question is called `Bluelet`_. - -* Non-autotagged imports are now resumable (just like autotagged imports). - -* Fix a terrible and long-standing bug where track orderings were never applied. + socket library in question is called Bluelet_. +- Non-autotagged imports are now resumable (just like autotagged imports). +- Fix a terrible and long-standing bug where track orderings were never applied. This manifested when the tagger appeared to be applying a reasonable ordering to the tracks but, later, the database reflects a completely wrong association of track names to files. The order applied was always just alphabetical by filename, which is frequently but not always what you want. - -* We now use Windows' "long filename" support. This API is fairly tricky, +- We now use Windows' "long filename" support. This API is fairly tricky, though, so some instability may still be present---please file a bug if you run into pathname weirdness on Windows. Also, filenames on Windows now never end in spaces. - -* Fix crash in lastid when the artist name is not available. - -* Fixed a spurious crash when ``LANG`` or a related environment variable is set +- Fix crash in lastid when the artist name is not available. +- Fixed a spurious crash when ``LANG`` or a related environment variable is set to an invalid value (such as ``'UTF-8'`` on some installations of Mac OS X). - -* Fixed an error when trying to copy a file that is already at its destination. - -* When copying read-only files, the importer now tries to make the copy +- Fixed an error when trying to copy a file that is already at its destination. +- When copying read-only files, the importer now tries to make the copy writable. (Previously, this would just crash the import.) - -* Fixed an ``UnboundLocalError`` when no matches are found during autotag. - -* Fixed a Unicode encoding error when entering special characters into the +- Fixed an ``UnboundLocalError`` when no matches are found during autotag. +- Fixed a Unicode encoding error when entering special characters into the "manual search" prompt. +- Added ``beet version`` command that just shows the current release version. -* Added `` beet version`` command that just shows the current release version. +.. _bluelet: https://github.com/sampsyo/bluelet .. _upstream bug: https://github.com/quodlibet/mutagen/issues/7 -.. _Bluelet: https://github.com/sampsyo/bluelet 1.0b5 (September 28, 2010) -------------------------- @@ -5024,55 +5066,41 @@ it more reliable. This release also greatly expands the capabilities of beets' :doc:`plugin API </plugins/index>`. A host of other little features and fixes are also rolled into this release. -* The ``lastid`` plugin adds Last.fm **acoustic fingerprinting - support** to the autotagger. Similar to the PUIDs used by !MusicBrainz Picard, - this system allows beets to recognize files that don't have any metadata at - all. You'll need to install some dependencies for this plugin to work. - -* To support the above, there's also a new system for **extending the autotagger +- The ``lastid`` plugin adds Last.fm **acoustic fingerprinting support** to the + autotagger. Similar to the PUIDs used by !MusicBrainz Picard, this system + allows beets to recognize files that don't have any metadata at all. You'll + need to install some dependencies for this plugin to work. +- To support the above, there's also a new system for **extending the autotagger via plugins**. Plugins can currently add components to the track and album distance functions as well as augment the MusicBrainz search. The new API is documented at :doc:`/plugins/index`. - -* **String comparisons** in the autotagger have been augmented to act more +- **String comparisons** in the autotagger have been augmented to act more intuitively. Previously, if your album had the title "Something (EP)" and it was officially called "Something", then beets would think this was a fairly significant change. It now checks for and appropriately reweights certain parts of each string. As another example, the title "The Great Album" is considered equal to "Great Album, The". - -* New **event system for plugins** (thanks, Jeff!). Plugins can now get +- New **event system for plugins** (thanks, Jeff!). Plugins can now get callbacks from beets when certain events occur in the core. Again, the API is documented in :doc:`/plugins/index`. - -* The BPD plugin is now disabled by default. This greatly simplifies +- The BPD plugin is now disabled by default. This greatly simplifies installation of the beets core, which is now 100% pure Python. To use BPD, though, you'll need to set ``plugins: bpd`` in your .beetsconfig. - -* The ``import`` command can now remove original files when it copies items into +- The ``import`` command can now remove original files when it copies items into your library. (This might be useful if you're low on disk space.) Set the ``import_delete`` option in your .beetsconfig to ``yes``. - -* Importing without autotagging (``beet import -A``) now prints out album names +- Importing without autotagging (``beet import -A``) now prints out album names as it imports them to indicate progress. - -* The new :doc:`/plugins/mpdupdate` will automatically update your MPD server's +- The new :doc:`/plugins/mpdupdate` will automatically update your MPD server's index whenever your beets library changes. - -* Efficiency tweak should reduce the number of !MusicBrainz queries per +- Efficiency tweak should reduce the number of !MusicBrainz queries per autotagged album. - -* A new ``-v`` command line switch enables debugging output. - -* Fixed bug that completely broke non-autotagged imports (``import -A``). - -* Fixed bug that logged the wrong paths when using ``import -l``. - -* Fixed autotagging for the creatively-named band `!!!`_. - -* Fixed normalization of relative paths. - -* Fixed escaping of ``/`` characters in paths on Windows. +- A new ``-v`` command line switch enables debugging output. +- Fixed bug that completely broke non-autotagged imports (``import -A``). +- Fixed bug that logged the wrong paths when using ``import -l``. +- Fixed autotagging for the creatively-named band `!!!`_. +- Fixed normalization of relative paths. +- Fixed escaping of ``/`` characters in paths on Windows. .. _!!!: https://musicbrainz.org/artist/f26c72d3-e52c-467b-b651-679c73d8e1a7.html @@ -5104,24 +5132,22 @@ for Windows users. This should make running beets much easier: just type Here's the detailed list of changes: -* **Parallel tagger.** The autotagger has been reimplemented to use multiple +- **Parallel tagger.** The autotagger has been reimplemented to use multiple threads. This means that it can concurrently read files from disk, talk to the user, communicate with MusicBrainz, and write data back to disk. Not only does this make the tagger much faster because independent work may be performed in parallel, but it makes the tagging process much more pleasant for large imports. The user can let albums queue up in the background while making a - decision rather than waiting for beets between each question it asks. The + decision rather than waiting for beets between each question it asks. The parallel tagger is on by default but a sequential (single- threaded) version is still available by setting the ``threaded`` config value to ``no`` (because the parallel version is still quite experimental). - -* **Colorized tagger output.** The autotagger interface now makes it a little +- **Colorized tagger output.** The autotagger interface now makes it a little easier to see what's going on at a glance by highlighting changes with terminal colors. This feature is on by default, but you can turn it off by setting ``color`` to ``no`` in your ``.beetsconfig`` (if, for example, your terminal doesn't understand colors and garbles the output). - -* **Pause and resume imports.** The ``import`` command now keeps track of its +- **Pause and resume imports.** The ``import`` command now keeps track of its progress, so if you're interrupted (beets crashes, you abort the process, an alien devours your motherboard, etc.), beets will try to resume from the point where you left off. The next time you run ``import`` on the same directory, it @@ -5129,34 +5155,26 @@ Here's the detailed list of changes: through the albums in the directory until it encounters the last one it saw. (This means it might fail if that album can't be found.) Also, you can now abort the tagging process by entering ``b`` (for aBort) at any of the prompts. - -* Overhauled methods for handling filesystem paths to allow filenames that have +- Overhauled methods for handling filesystem paths to allow filenames that have badly encoded special characters. These changes are pretty fragile, so please report any bugs involving ``UnicodeError`` or SQLite ``ProgrammingError`` messages in this version. - -* The destination paths (the library directory structure) now respect +- The destination paths (the library directory structure) now respect album-level metadata. This means that if you have an album in which two tracks have different album-level attributes (like year, for instance), they will - still wind up in the same directory together. (There's currently not a very + still wind up in the same directory together. (There's currently not a very smart method for picking the "correct" album-level metadata, but we'll fix that later.) - -* Fixed a bug where the CLI would fail completely if the ``LANG`` environment +- Fixed a bug where the CLI would fail completely if the ``LANG`` environment variable was not set. - -* Fixed removal of albums (``beet remove -a``): previously, the album record +- Fixed removal of albums (``beet remove -a``): previously, the album record would stay around although the items were deleted. - -* The setup script now makes a ``beet.exe`` startup stub on Windows; Windows +- The setup script now makes a ``beet.exe`` startup stub on Windows; Windows users can now just type ``beet`` at the prompt to run beets. - -* Fixed an occasional bug where Mutagen would complain that a tag was already +- Fixed an occasional bug where Mutagen would complain that a tag was already present. - -* Fixed a bug with reading invalid integers from ID3 tags. - -* The tagger should now be a little more reluctant to reorder tracks that +- Fixed a bug with reading invalid integers from ID3 tags. +- The tagger should now be a little more reluctant to reorder tracks that already have indices. 1.0b3 (July 22, 2010) @@ -5164,8 +5182,8 @@ Here's the detailed list of changes: This release features two major additions to the autotagger's functionality: album art fetching and MusicBrainz ID tags. It also contains some important -under-the-hood improvements: a new plugin architecture is introduced -and the database schema is extended with explicit support for albums. +under-the-hood improvements: a new plugin architecture is introduced and the +database schema is extended with explicit support for albums. This release has one major backwards-incompatibility. Because of the new way beets handles albums in the library, databases created with an old version of @@ -5174,7 +5192,7 @@ switch to ``beet list`` and ``beet remove``, as well as the file browser for BPD). To "upgrade" an old database, you can use the included ``albumify`` plugin (see the fourth bullet point below). -* **Album art.** The tagger now, by default, downloads album art from Amazon +- **Album art.** The tagger now, by default, downloads album art from Amazon that is referenced in the MusicBrainz database. It places the album art alongside the audio files in a file called (for example) ``cover.jpg``. The ``import_art`` config option controls this behavior, as do the ``-r`` and @@ -5182,8 +5200,7 @@ BPD). To "upgrade" an old database, you can use the included ``albumify`` plugin of the album art file with the ``art_filename`` config option. (See :doc:`/reference/config` for more information about how to configure the album art downloader.) - -* **Support for MusicBrainz ID tags.** The autotagger now keeps track of the +- **Support for MusicBrainz ID tags.** The autotagger now keeps track of the MusicBrainz track, album, and artist IDs it matched for each file. It also looks for album IDs in new files it's importing and uses those to look up data in MusicBrainz. Furthermore, track IDs are used as a component of the tagger's @@ -5193,18 +5210,16 @@ BPD). To "upgrade" an old database, you can use the included ``albumify`` plugin form of migrations so that new columns could be added to old databases--this is a delicate feature, so it would be very wise to make a backup of your database before upgrading to this version. - -* **Plugin architecture.** Add-on modules can now add new commands to the beets +- **Plugin architecture.** Add-on modules can now add new commands to the beets command-line interface. The ``bpd`` and ``dadd`` commands were removed from the beets core and turned into plugins; BPD is loaded by default. To load the non-default plugins, use the config options ``plugins`` (a space-separated list of plugin names) and ``pluginpath`` (a colon-separated list of directories to search beyond ``sys.path``). Plugins are just Python modules under the ``beetsplug`` namespace package containing subclasses of - ``beets.plugins.BeetsPlugin``. See `the beetsplug directory`_ for examples or + |BeetsPlugin|. See `the beetsplug directory`_ for examples or :doc:`/plugins/index` for instructions. - -* As a consequence of adding album art, the database was significantly +- As a consequence of adding album art, the database was significantly refactored to keep track of some information at an album (rather than item) granularity. Databases created with earlier versions of beets should work fine, but they won't have any "albums" in them--they'll just be a bag of @@ -5213,19 +5228,17 @@ BPD). To "upgrade" an old database, you can use the included ``albumify`` plugin ``albumify`` plugin. Running ``beets albumify`` with the plugin activated (set ``plugins=albumify`` in your config file) will group all your items into albums, making beets behave more or less as it did before. - -* Fixed some bugs with encoding paths on Windows. Also, ``:`` is now replaced +- Fixed some bugs with encoding paths on Windows. Also, ``:`` is now replaced with ``-`` in path names (instead of ``_``) for readability. - -* ``MediaFile``s now have a ``format`` attribute, so you can use ``$format`` in +- ``MediaFile`` now has a ``format`` attribute, so you can use ``$format`` in your library path format strings like ``$artist - $album ($format)`` to get directories with names like ``Paul Simon - Graceland (FLAC)``. .. _for the future: https://github.com/google-code-export/beets/issues/69 -.. _the beetsplug directory: - https://github.com/beetbox/beets/tree/master/beetsplug -Beets also now has its first third-party plugin: `beetfs`_, by Martin Eve! It +.. _the beetsplug directory: https://github.com/beetbox/beets/tree/master/beetsplug + +Beets also now has its first third-party plugin: beetfs_, by Martin Eve! It exposes your music in a FUSE filesystem using a custom directory structure. Even cooler: it lets you keep your files intact on-disk while correcting their tags when accessed through FUSE. Check it out! @@ -5239,27 +5252,22 @@ This release focuses on high-priority fixes and conspicuously missing features. Highlights include support for two new audio formats (Monkey's Audio and Ogg Vorbis) and an option to log untaggable albums during import. -* **Support for Ogg Vorbis and Monkey's Audio** files and their tags. (This +- **Support for Ogg Vorbis and Monkey's Audio** files and their tags. (This support should be considered preliminary: I haven't tested it heavily because I don't use either of these formats regularly.) - -* An option to the ``beet import`` command for **logging albums that are +- An option to the ``beet import`` command for **logging albums that are untaggable** (i.e., are skipped or taken "as-is"). Use ``beet import -l LOGFILE PATHS``. The log format is very simple: it's just a status (either "skip" or "asis") followed by the path to the album in question. The idea is that you can tag a large collection and automatically keep track of the albums that weren't found in MusicBrainz so you can come back and look at them later. - -* Fixed a ``UnicodeEncodeError`` on terminals that don't (or don't claim to) +- Fixed a ``UnicodeEncodeError`` on terminals that don't (or don't claim to) support UTF-8. - -* Importing without autotagging (``beet import -A``) is now faster and doesn't +- Importing without autotagging (``beet import -A``) is now faster and doesn't print out a bunch of whitespace. It also lets you specify single files on the command line (rather than just directories). - -* Fixed importer crash when attempting to read a corrupt file. - -* Reorganized code for CLI in preparation for adding pluggable subcommands. Also +- Fixed importer crash when attempting to read a corrupt file. +- Reorganized code for CLI in preparation for adding pluggable subcommands. Also removed dependency on the aging ``cmdln`` module in favor of `a hand-rolled solution`_. diff --git a/docs/conf.py b/docs/conf.py index 337a76a54..c04e034ab 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,18 +1,56 @@ -AUTHOR = "Adrian Sampson" +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html -# General configuration +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information -extensions = ["sphinx.ext.autodoc", "sphinx.ext.extlinks"] +import sys +from pathlib import Path -exclude_patterns = ["_build"] -source_suffix = {".rst": "restructuredtext"} -master_doc = "index" +# Add custom extensions directory to path +sys.path.insert(0, str(Path(__file__).parent / "extensions")) project = "beets" +AUTHOR = "Adrian Sampson" copyright = "2016, Adrian Sampson" -version = "2.2" -release = "2.2.0" +master_doc = "index" +language = "en" +version = "2.5" +release = "2.5.1" + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.extlinks", + "sphinx.ext.viewcode", + "sphinx_design", + "sphinx_copybutton", + "conf", + "sphinx_toolbox.more_autodoc.autotypeddict", +] + +autosummary_generate = True +autosummary_context = { + "related_typeddicts": { + "MusicBrainzAPI": [ + "beetsplug._utils.musicbrainz.LookupKwargs", + "beetsplug._utils.musicbrainz.SearchKwargs", + "beetsplug._utils.musicbrainz.BrowseKwargs", + "beetsplug._utils.musicbrainz.BrowseRecordingsKwargs", + "beetsplug._utils.musicbrainz.BrowseReleaseGroupsKwargs", + ], + } +} +autodoc_member_order = "bysource" +exclude_patterns = ["_build"] +templates_path = ["_templates"] +source_suffix = {".rst": "restructuredtext", ".md": "markdown"} pygments_style = "sphinx" @@ -59,10 +97,41 @@ man_pages = [ ), ] -# Options for pydata theme +# Global substitutions that can be used anywhere in the documentation. +rst_epilog = """ +.. |Album| replace:: :class:`~beets.library.models.Album` +.. |AlbumInfo| replace:: :class:`beets.autotag.hooks.AlbumInfo` +.. |BeetsPlugin| replace:: :class:`beets.plugins.BeetsPlugin` +.. |ImportSession| replace:: :class:`~beets.importer.session.ImportSession` +.. |ImportTask| replace:: :class:`~beets.importer.tasks.ImportTask` +.. |Item| replace:: :class:`~beets.library.models.Item` +.. |Library| replace:: :class:`~beets.library.library.Library` +.. |Model| replace:: :class:`~beets.dbcore.db.Model` +.. |TrackInfo| replace:: :class:`beets.autotag.hooks.TrackInfo` +""" + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + + html_theme = "pydata_sphinx_theme" -html_theme_options = {"collapse_navigation": True, "logo": {"text": "beets"}} +html_theme_options = { + "collapse_navigation": False, + "logo": {"text": "beets"}, + "show_nav_level": 2, # How many levels in left sidebar to show automatically + "navigation_depth": 4, # How many levels of navigation to expand +} html_title = "beets" html_logo = "_static/beets_logo_nobg.png" html_static_path = ["_static"] html_css_files = ["beets.css"] + + +def skip_member(app, what, name, obj, skip, options): + if name.startswith("_"): + return True + return skip + + +def setup(app): + app.connect("autodoc-skip-member", skip_member) diff --git a/docs/dev/cli.rst b/docs/dev/cli.rst index 77d3af5a5..aab78d536 100644 --- a/docs/dev/cli.rst +++ b/docs/dev/cli.rst @@ -2,8 +2,7 @@ Providing a CLI =============== The ``beets.ui`` module houses interactions with the user via a terminal, the -:doc:`/reference/cli`. -The main function is called when the user types beet on the command line. -The CLI functionality is organized into commands, some of which are built-in -and some of which are provided by plugins. The built-in commands are all -implemented in the ``beets.ui.commands`` submodule. +:doc:`/reference/cli`. The main function is called when the user types beet on +the command line. The CLI functionality is organized into commands, some of +which are built-in and some of which are provided by plugins. The built-in +commands are all implemented in the ``beets.ui.commands`` submodule. diff --git a/docs/dev/importer.rst b/docs/dev/importer.rst index 5182c7134..2aca3b5fd 100644 --- a/docs/dev/importer.rst +++ b/docs/dev/importer.rst @@ -3,17 +3,17 @@ Music Importer The importer component is responsible for the user-centric workflow that adds music to a library. This is one of the first aspects that a user experiences -when using beets: it finds music in the filesystem, groups it into albums, -finds corresponding metadata in MusicBrainz, asks the user for intervention, -applies changes, and moves/copies files. A description of its user interface is -given in :doc:`/guides/tagger`. +when using beets: it finds music in the filesystem, groups it into albums, finds +corresponding metadata in MusicBrainz, asks the user for intervention, applies +changes, and moves/copies files. A description of its user interface is given in +:doc:`/guides/tagger`. -The workflow is implemented in the ``beets.importer`` module and is -distinct from the core logic for matching MusicBrainz metadata (in the -``beets.autotag`` module). The workflow is also decoupled from the command-line -interface with the hope that, eventually, other (graphical) interfaces can be -bolted onto the same importer implementation. +The workflow is implemented in the ``beets.importer`` module and is distinct +from the core logic for matching MusicBrainz metadata (in the ``beets.autotag`` +module). The workflow is also decoupled from the command-line interface with the +hope that, eventually, other (graphical) interfaces can be bolted onto the same +importer implementation. The importer is multithreaded and follows the pipeline pattern. Each pipeline -stage is a Python coroutine. The ``beets.util.pipeline`` module houses -a generic, reusable implementation of a multithreaded pipeline. +stage is a Python coroutine. The ``beets.util.pipeline`` module houses a +generic, reusable implementation of a multithreaded pipeline. diff --git a/docs/dev/index.rst b/docs/dev/index.rst index 63335160c..f22aa8c56 100644 --- a/docs/dev/index.rst +++ b/docs/dev/index.rst @@ -4,14 +4,21 @@ For Developers This section contains information for developers. Read on if you're interested in hacking beets itself or creating plugins for it. -See also the documentation for `MediaFile`_, the library used by beets to read -and write metadata tags in media files. +See also the documentation for the MediaFile_ and Confuse_ libraries. These are +maintained by the beets team and used to read and write metadata tags and manage +configuration files, respectively. -.. _MediaFile: https://mediafile.readthedocs.io/en/latest/ +.. _confuse: https://confuse.readthedocs.io/en/latest/ + +.. _mediafile: https://mediafile.readthedocs.io/en/latest/ .. toctree:: + :maxdepth: 3 + :titlesonly: - plugins + plugins/index library + paths importer cli + ../api/index diff --git a/docs/dev/library.rst b/docs/dev/library.rst index 9740c8b90..8b854937d 100644 --- a/docs/dev/library.rst +++ b/docs/dev/library.rst @@ -4,282 +4,192 @@ Library Database API .. currentmodule:: beets.library This page describes the internal API of beets' core database features. It -doesn't exhaustively document the API, but is aimed at giving an overview of -the architecture to orient anyone who wants to dive into the code. +doesn't exhaustively document the API, but is aimed at giving an overview of the +architecture to orient anyone who wants to dive into the code. -The :class:`Library` object is the central repository for data in beets. It -represents a database containing songs, which are :class:`Item` instances, and -groups of items, which are :class:`Album` instances. +The |Library| object is the central repository for data in beets. It represents +a database containing songs, which are |Item| instances, and groups of items, +which are |Album| instances. The Library Class ----------------- -The :class:`Library` is typically instantiated as a singleton. A single -invocation of beets usually has only one :class:`Library`. It's powered by -:class:`dbcore.Database` under the hood, which handles the `SQLite`_ -abstraction, something like a very minimal `ORM`_. The library is also -responsible for handling queries to retrieve stored objects. +The |Library| is typically instantiated as a singleton. A single invocation of +beets usually has only one |Library|. It's powered by :class:`dbcore.Database` +under the hood, which handles the SQLite_ abstraction, something like a very +minimal ORM_. The library is also responsible for handling queries to retrieve +stored objects. -.. autoclass:: Library(path, directory[, path_formats[, replacements]]) +Overview +~~~~~~~~ - .. automethod:: __init__ +You can add new items or albums to the library via the :py:meth:`Library.add` +and :py:meth:`Library.add_album` methods. - You can add new items or albums to the library: +You may also query the library for items and albums using the +:py:meth:`Library.items`, :py:meth:`Library.albums`, :py:meth:`Library.get_item` +and :py:meth:`Library.get_album` methods. - .. automethod:: add +Any modifications to the library must go through a :class:`Transaction` object, +which you can get using the :py:meth:`Library.transaction` context manager. - .. automethod:: add_album - - And there are methods for querying the database: - - .. automethod:: items - - .. automethod:: albums - - .. automethod:: get_item - - .. automethod:: get_album - - Any modifications must go through a :class:`Transaction` which you get can - using this method: - - .. automethod:: transaction - -.. _SQLite: https://sqlite.org/index.html -.. _ORM: https://en.wikipedia.org/wiki/Object-relational_mapping +.. _orm: https://en.wikipedia.org/wiki/Object-relational_mapping +.. _sqlite: https://sqlite.org/index.html Model Classes ------------- -The two model entities in beets libraries, :class:`Item` and :class:`Album`, -share a base class, :class:`LibModel`, that provides common functionality. That -class itself specialises :class:`dbcore.Model` which provides an ORM-like -abstraction. +The two model entities in beets libraries, |Item| and |Album|, share a base +class, :class:`LibModel`, that provides common functionality. That class itself +specialises :class:`beets.dbcore.Model` which provides an ORM-like abstraction. To get or change the metadata of a model (an item or album), either access its attributes (e.g., ``print(album.year)`` or ``album.year = 2012``) or use the ``dict``-like interface (e.g. ``item['artist']``). - Model base -'''''''''' +~~~~~~~~~~ -Models use dirty-flags to track when the object's metadata goes out of -sync with the database. The dirty dictionary maps field names to booleans -indicating whether the field has been written since the object was last -synchronized (via load or store) with the database. +Models use dirty-flags to track when the object's metadata goes out of sync with +the database. The dirty dictionary maps field names to booleans indicating +whether the field has been written since the object was last synchronized (via +load or store) with the database. This logic is implemented in the model base +class :class:`LibModel` and is inherited by both |Item| and |Album|. -.. autoclass:: LibModel +We provide CRUD-like methods for interacting with the database: - .. automethod:: all_keys +- :py:meth:`LibModel.store` +- :py:meth:`LibModel.load` +- :py:meth:`LibModel.remove` +- :py:meth:`LibModel.add` - .. automethod:: __init__ +The base class :class:`beets.dbcore.Model` has a ``dict``-like interface, so +normal the normal mapping API is supported: - .. autoattribute:: _types - - .. autoattribute:: _fields - - There are CRUD-like methods for interacting with the database: - - .. automethod:: store - - .. automethod:: load - - .. automethod:: remove - - .. automethod:: add - - The base class :class:`dbcore.Model` has a ``dict``-like interface, so - normal the normal mapping API is supported: - - .. automethod:: keys - - .. automethod:: update - - .. automethod:: items - - .. note:: - The :py:meth:`Album.items` method is not inherited from - :py:meth:`LibModel.items` for historical reasons. - - .. automethod:: get +- :py:meth:`LibModel.keys` +- :py:meth:`LibModel.update` +- :py:meth:`LibModel.items` +- :py:meth:`LibModel.get` Item -'''' +~~~~ -Each :class:`Item` object represents a song or track. (We use the more generic -term item because, one day, beets might support non-music media.) An item can -either be purely abstract, in which case it's just a bag of metadata fields, -or it can have an associated file (indicated by ``item.path``). +Each |Item| object represents a song or track. (We use the more generic term +item because, one day, beets might support non-music media.) An item can either +be purely abstract, in which case it's just a bag of metadata fields, or it can +have an associated file (indicated by ``item.path``). In terms of the underlying SQLite database, items are backed by a single table called items with one column per metadata fields. The metadata fields currently in use are listed in ``library.py`` in ``Item._fields``. -To read and write a file's tags, we use the `MediaFile`_ library. -To make changes to either the database or the tags on a file, you -update an item's fields (e.g., ``item.title = "Let It Be"``) and then call -``item.write()``. +To read and write a file's tags, we use the MediaFile_ library. To make changes +to either the database or the tags on a file, you update an item's fields (e.g., +``item.title = "Let It Be"``) and then call ``item.write()``. -.. _MediaFile: https://mediafile.readthedocs.io/en/latest/ +.. _mediafile: https://mediafile.readthedocs.io/en/latest/ Items also track their modification times (mtimes) to help detect when they become out of sync with on-disk metadata, mainly to speed up the -:ref:`update-cmd` (which needs to check whether the database is in sync with -the filesystem). This feature turns out to be sort of complicated. +:ref:`update-cmd` (which needs to check whether the database is in sync with the +filesystem). This feature turns out to be sort of complicated. -For any :class:`Item`, there are two mtimes: the on-disk mtime (maintained by -the OS) and the database mtime (maintained by beets). Correspondingly, there is -on-disk metadata (ID3 tags, for example) and DB metadata. The goal with the -mtime is to ensure that the on-disk and DB mtimes match when the on-disk and DB -metadata are in sync; this lets beets do a quick mtime check and avoid -rereading files in some circumstances. +For any |Item|, there are two mtimes: the on-disk mtime (maintained by the OS) +and the database mtime (maintained by beets). Correspondingly, there is on-disk +metadata (ID3 tags, for example) and DB metadata. The goal with the mtime is to +ensure that the on-disk and DB mtimes match when the on-disk and DB metadata are +in sync; this lets beets do a quick mtime check and avoid rereading files in +some circumstances. Specifically, beets attempts to maintain the following invariant: - If the on-disk metadata differs from the DB metadata, then the on-disk - mtime must be greater than the DB mtime. + If the on-disk metadata differs from the DB metadata, then the on-disk mtime + must be greater than the DB mtime. As a result, it is always valid for the DB mtime to be zero (assuming that real -disk mtimes are always positive). However, whenever possible, beets tries to -set ``db_mtime = disk_mtime`` at points where it knows the metadata is -synchronized. When it is possible that the metadata is out of sync, beets can -then just set ``db_mtime = 0`` to return to a consistent state. +disk mtimes are always positive). However, whenever possible, beets tries to set +``db_mtime = disk_mtime`` at points where it knows the metadata is synchronized. +When it is possible that the metadata is out of sync, beets can then just set +``db_mtime = 0`` to return to a consistent state. This leads to the following implementation policy: - * On every write of disk metadata (``Item.write()``), the DB mtime is updated - to match the post-write disk mtime. - * Same for metadata reads (``Item.read()``). - * On every modification to DB metadata (``item.field = ...``), the DB mtime - is reset to zero. - - -.. autoclass:: Item - - .. automethod:: __init__ - - .. automethod:: from_path - - .. automethod:: get_album - - .. automethod:: destination - - .. automethod:: current_mtime - - The methods ``read()`` and ``write()`` are complementary: one reads a - file's tags and updates the item's metadata fields accordingly while the - other takes the item's fields and writes them to the file's tags. - - .. automethod:: read - - .. automethod:: write - - .. automethod:: try_write - - .. automethod:: try_sync - - The :class:`Item` class supplements the normal model interface so that they - interacting with the filesystem as well: - - .. automethod:: move - - .. automethod:: remove + - On every write of disk metadata (``Item.write()``), the DB mtime is + updated to match the post-write disk mtime. + - Same for metadata reads (``Item.read()``). + - On every modification to DB metadata (``item.field = ...``), the DB mtime + is reset to zero. Album -''''' +~~~~~ -An :class:`Album` is a collection of Items in the database. Every item in the -database has either zero or one associated albums (accessible via -``item.album_id``). An item that has no associated album is called a -singleton. -Changing fields on an album (e.g. ``album.year = 2012``) updates the album -itself and also changes the same field in all associated items. +An |Album| is a collection of Items in the database. Every item in the database +has either zero or one associated albums (accessible via ``item.album_id``). An +item that has no associated album is called a singleton. Changing fields on an +album (e.g. ``album.year = 2012``) updates the album itself and also changes the +same field in all associated items. -An :class:`Album` object keeps track of album-level metadata, which is (mostly) -a subset of the track-level metadata. The album-level metadata fields are -listed in ``Album._fields``. -For those fields that are both item-level and album-level (e.g., ``year`` or -``albumartist``), every item in an album should share the same value. Albums -use an SQLite table called ``albums``, in which each column is an album -metadata field. +An |Album| object keeps track of album-level metadata, which is (mostly) a +subset of the track-level metadata. The album-level metadata fields are listed +in ``Album._fields``. For those fields that are both item-level and album-level +(e.g., ``year`` or ``albumartist``), every item in an album should share the +same value. Albums use an SQLite table called ``albums``, in which each column +is an album metadata field. -.. autoclass:: Album +.. note:: - .. automethod:: __init__ - - .. automethod:: item_dir - - .. automethod:: items - - Albums extend the normal model interface to also forward changes to their - items: - - .. autoattribute:: item_keys - - .. automethod:: store - - .. automethod:: try_sync - - .. automethod:: move - - .. automethod:: remove - - Albums also manage album art, image files that are associated with each - album: - - .. automethod:: set_art - - .. automethod:: move_art - - .. automethod:: art_destination + The :py:meth:`Album.items` method is not inherited from + :py:meth:`LibModel.items` for historical reasons. Transactions -'''''''''''' +~~~~~~~~~~~~ -The :class:`Library` class provides the basic methods necessary to access and +The |Library| class provides the basic methods necessary to access and manipulate its contents. To perform more complicated operations atomically, or to interact directly with the underlying SQLite database, you must use a -*transaction* (see this `blog post`_ for motivation). For example:: +*transaction* (see this `blog post`_ for motivation). For example + +.. code-block:: python lib = Library() with lib.transaction() as tx: items = lib.items(query) lib.add_album(list(items)) -.. _blog post: https://beets.io/blog/sqlite-nightmare.html - .. currentmodule:: beets.dbcore.db -.. autoclass:: Transaction - :members: +The :class:`Transaction` class is a context manager that provides a +transactional interface to the underlying SQLite database. It is responsible for +managing the transaction's lifecycle, including beginning, committing, and +rolling back the transaction if an error occurs. +.. _blog post: https://beets.io/blog/sqlite-nightmare.html Queries ------- -To access albums and items in a library, we use :doc:`/reference/query`. -In beets, the :class:`Query` abstract base class represents a criterion that -matches items or albums in the database. -Every subclass of :class:`Query` must implement two methods, which implement -two different ways of identifying matching items/albums. +.. currentmodule:: beets.dbcore.query + +To access albums and items in a library, we use :doc:`/reference/query`. In +beets, the :class:`Query` abstract base class represents a criterion that +matches items or albums in the database. Every subclass of :class:`Query` must +implement two methods, which implement two different ways of identifying +matching items/albums. The ``clause()`` method should return an SQLite ``WHERE`` clause that matches appropriate albums/items. This allows for efficient batch queries. -Correspondingly, the ``match(item)`` method should take an :class:`Item` object -and return a boolean, indicating whether or not a specific item matches the +Correspondingly, the ``match(item)`` method should take an |Item| object and +return a boolean, indicating whether or not a specific item matches the criterion. This alternate implementation allows clients to determine whether items that have already been fetched from the database match the query. There are many different types of queries. Just as an example, :class:`FieldQuery` determines whether a certain field matches a certain value -(an equality query). -:class:`AndQuery` (like its abstract superclass, :class:`CollectionQuery`) -takes a set of other query objects and bundles them together, matching only -albums/items that match all constituent queries. +(an equality query). :class:`AndQuery` (like its abstract superclass, +:class:`CollectionQuery`) takes a set of other query objects and bundles them +together, matching only albums/items that match all constituent queries. Beets has a human-writable plain-text query syntax that can be parsed into -:class:`Query` objects. Calling ``AndQuery.from_strings`` parses a list of -query parts into a query object that can then be used with :class:`Library` -objects. +:class:`Query` objects. Calling ``AndQuery.from_strings`` parses a list of query +parts into a query object that can then be used with |Library| objects. diff --git a/docs/dev/paths.rst b/docs/dev/paths.rst new file mode 100644 index 000000000..a593580f6 --- /dev/null +++ b/docs/dev/paths.rst @@ -0,0 +1,64 @@ +Handling Paths +============== + +``pathlib`` provides a clean, cross-platform API for working with filesystem +paths. + +Use the ``.filepath`` property on ``Item`` and ``Album`` library objects to +access paths as ``pathlib.Path`` objects. This produces a readable, native +representation suitable for printing, logging, or further processing. + +Normalize paths using ``Path(...).expanduser().resolve()``, which expands ``~`` +and resolves symlinks. + +Cross-platform differences—such as path separators, Unicode handling, and +long-path support (Windows) are automatically managed by ``pathlib``. + +When storing paths in the database, however, convert them to bytes with +``bytestring_path()``. Paths in Beets are currently stored as bytes, although +there are plans to eventually store ``pathlib.Path`` objects directly. To access +media file paths in their stored form, use the ``.path`` property on ``Item`` +and ``Album``. + +Legacy utilities +---------------- + +Historically, Beets used custom utilities to ensure consistent behavior across +Linux, macOS, and Windows before ``pathlib`` became reliable: + +- ``syspath()``: worked around Windows Unicode and long-path limitations by + converting to a system-safe string (adding the ``\\?\`` prefix where needed). +- ``normpath()``: normalized slashes and removed ``./`` or ``..`` parts but did + not expand ``~``. +- ``bytestring_path()``: converted paths to bytes for database storage (still + used for that purpose today). +- ``displayable_path()``: converted byte paths to Unicode for display or + logging. + +These functions remain safe to use in legacy code, but new code should rely +solely on ``pathlib.Path``. + +Examples +-------- + +Old style + +.. code-block:: python + + displayable_path(item.path) + normpath("~/Music/../Artist") + syspath(path) + +New style + +.. code-block:: python + + item.filepath + Path("~/Music/../Artist").expanduser().resolve() + Path(path) + +When storing paths in the database + +.. code-block:: python + + path_bytes = bytestring_path(Path("/some/path/to/file.mp3")) diff --git a/docs/dev/plugins.rst b/docs/dev/plugins.rst deleted file mode 100644 index 0ebff3231..000000000 --- a/docs/dev/plugins.rst +++ /dev/null @@ -1,653 +0,0 @@ -.. _writing-plugins: - -Writing Plugins ---------------- - -A beets plugin is just a Python module or package inside the ``beetsplug`` -namespace package. (Check out `this article`_ and `this Stack Overflow -question`_ if you haven't heard about namespace packages.) So, to make one, -create a directory called ``beetsplug`` and add either your plugin module:: - - beetsplug/ - myawesomeplugin.py - -or your plugin subpackage:: - - beetsplug/ - myawesomeplugin/ - __init__.py - myawesomeplugin.py - -.. attention:: - - You do not anymore need to add a ``__init__.py`` file to the ``beetsplug`` - directory. Python treats your plugin as a namespace package automatically, - thus we do not depend on ``pkgutil``-based setup in the ``__init__.py`` - file anymore. - -The meat of your plugin goes in ``myawesomeplugin.py``. There, you'll have to -import ``BeetsPlugin`` from ``beets.plugins`` and subclass it, for example - -.. code-block:: python - - from beets.plugins import BeetsPlugin - - class MyAwesomePlugin(BeetsPlugin): - pass - -Once you have your ``BeetsPlugin`` subclass, there's a variety of things your -plugin can do. (Read on!) - -To use your new plugin, package your plugin (see how to do this with `poetry`_ -or `setuptools`_, for example) and install it into your ``beets`` virtual -environment. Then, add your plugin to beets configuration - -.. code-block:: yaml - - # config.yaml - plugins: - - myawesomeplugin - -and you're good to go! - -.. _this article: https://realpython.com/python-namespace-package/#setting-up-some-namespace-packages -.. _this Stack Overflow question: https://stackoverflow.com/a/27586272/9582674 -.. _poetry: https://python-poetry.org/docs/pyproject/#packages -.. _setuptools: https://setuptools.pypa.io/en/latest/userguide/package_discovery.html#finding-simple-packages - -.. _add_subcommands: - -Add Commands to the CLI -^^^^^^^^^^^^^^^^^^^^^^^ - -Plugins can add new subcommands to the ``beet`` command-line interface. Define -the plugin class' ``commands()`` method to return a list of ``Subcommand`` -objects. (The ``Subcommand`` class is defined in the ``beets.ui`` module.) -Here's an example plugin that adds a simple command:: - - from beets.plugins import BeetsPlugin - from beets.ui import Subcommand - - my_super_command = Subcommand('super', help='do something super') - def say_hi(lib, opts, args): - print "Hello everybody! I'm a plugin!" - my_super_command.func = say_hi - - class SuperPlug(BeetsPlugin): - def commands(self): - return [my_super_command] - -To make a subcommand, invoke the constructor like so: ``Subcommand(name, parser, -help, aliases)``. The ``name`` parameter is the only required one and should -just be the name of your command. ``parser`` can be an `OptionParser instance`_, -but it defaults to an empty parser (you can extend it later). ``help`` is a -description of your command, and ``aliases`` is a list of shorthand versions of -your command name. - -.. _OptionParser instance: https://docs.python.org/library/optparse.html - -You'll need to add a function to your command by saying ``mycommand.func = -myfunction``. This function should take the following parameters: ``lib`` (a -beets ``Library`` object) and ``opts`` and ``args`` (command-line options and -arguments as returned by `OptionParser.parse_args`_). - -.. _OptionParser.parse_args: - https://docs.python.org/library/optparse.html#parsing-arguments - -The function should use any of the utility functions defined in ``beets.ui``. -Try running ``pydoc beets.ui`` to see what's available. - -You can add command-line options to your new command using the ``parser`` member -of the ``Subcommand`` class, which is a ``CommonOptionsParser`` instance. Just -use it like you would a normal ``OptionParser`` in an independent script. Note -that it offers several methods to add common options: ``--album``, ``--path`` -and ``--format``. This feature is versatile and extensively documented, try -``pydoc beets.ui.CommonOptionsParser`` for more information. - -.. _plugin_events: - -Listen for Events -^^^^^^^^^^^^^^^^^ - -Event handlers allow plugins to run code whenever something happens in beets' -operation. For instance, a plugin could write a log message every time an album -is successfully autotagged or update MPD's index whenever the database is -changed. - -You can "listen" for events using ``BeetsPlugin.register_listener``. Here's -an example:: - - from beets.plugins import BeetsPlugin - - def loaded(): - print 'Plugin loaded!' - - class SomePlugin(BeetsPlugin): - def __init__(self): - super().__init__() - self.register_listener('pluginload', loaded) - -Note that if you want to access an attribute of your plugin (e.g. ``config`` or -``log``) you'll have to define a method and not a function. Here is the usual -registration process in this case:: - - from beets.plugins import BeetsPlugin - - class SomePlugin(BeetsPlugin): - def __init__(self): - super().__init__() - self.register_listener('pluginload', self.loaded) - - def loaded(self): - self._log.info('Plugin loaded!') - -The events currently available are: - -* `pluginload`: called after all the plugins have been loaded after the ``beet`` - command starts - -* `import`: called after a ``beet import`` command finishes (the ``lib`` keyword - argument is a Library object; ``paths`` is a list of paths (strings) that were - imported) - -* `album_imported`: called with an ``Album`` object every time the ``import`` - command finishes adding an album to the library. Parameters: ``lib``, - ``album`` - -* `album_removed`: called with an ``Album`` object every time an album is - removed from the library (even when its file is not deleted from disk). - -* `item_copied`: called with an ``Item`` object whenever its file is copied. - Parameters: ``item``, ``source`` path, ``destination`` path - -* `item_imported`: called with an ``Item`` object every time the importer adds a - singleton to the library (not called for full-album imports). Parameters: - ``lib``, ``item`` - -* `before_item_moved`: called with an ``Item`` object immediately before its - file is moved. Parameters: ``item``, ``source`` path, ``destination`` path - -* `item_moved`: called with an ``Item`` object whenever its file is moved. - Parameters: ``item``, ``source`` path, ``destination`` path - -* `item_linked`: called with an ``Item`` object whenever a symlink is created - for a file. - Parameters: ``item``, ``source`` path, ``destination`` path - -* `item_hardlinked`: called with an ``Item`` object whenever a hardlink is - created for a file. - Parameters: ``item``, ``source`` path, ``destination`` path - -* `item_reflinked`: called with an ``Item`` object whenever a reflink is - created for a file. - Parameters: ``item``, ``source`` path, ``destination`` path - -* `item_removed`: called with an ``Item`` object every time an item (singleton - or album's part) is removed from the library (even when its file is not - deleted from disk). - -* `write`: called with an ``Item`` object, a ``path``, and a ``tags`` - dictionary just before a file's metadata is written to disk (i.e., - just before the file on disk is opened). Event handlers may change - the ``tags`` dictionary to customize the tags that are written to the - media file. Event handlers may also raise a - ``library.FileOperationError`` exception to abort the write - operation. Beets will catch that exception, print an error message - and continue. - -* `after_write`: called with an ``Item`` object after a file's metadata is - written to disk (i.e., just after the file on disk is closed). - -* `import_task_created`: called immediately after an import task is - initialized. Plugins can use this to, for example, change imported files of a - task before anything else happens. It's also possible to replace the task - with another task by returning a list of tasks. This list can contain zero - or more `ImportTask`s. Returning an empty list will stop the task. - Parameters: ``task`` (an `ImportTask`) and ``session`` (an `ImportSession`). - -* `import_task_start`: called when before an import task begins processing. - Parameters: ``task`` and ``session``. - -* `import_task_apply`: called after metadata changes have been applied in an - import task. This is called on the same thread as the UI, so use this - sparingly and only for tasks that can be done quickly. For most plugins, an - import pipeline stage is a better choice (see :ref:`plugin-stage`). - Parameters: ``task`` and ``session``. - -* `import_task_before_choice`: called after candidate search for an import task - before any decision is made about how/if to import or tag. Can be used to - present information about the task or initiate interaction with the user - before importing occurs. Return an importer action to take a specific action. - Only one handler may return a non-None result. - Parameters: ``task`` and ``session`` - -* `import_task_choice`: called after a decision has been made about an import - task. This event can be used to initiate further interaction with the user. - Use ``task.choice_flag`` to determine or change the action to be - taken. Parameters: ``task`` and ``session``. - -* `import_task_files`: called after an import task finishes manipulating the - filesystem (copying and moving files, writing metadata tags). Parameters: - ``task`` and ``session``. - -* `library_opened`: called after beets starts up and initializes the main - Library object. Parameter: ``lib``. - -* `database_change`: a modification has been made to the library database. The - change might not be committed yet. Parameters: ``lib`` and ``model``. - -* `cli_exit`: called just before the ``beet`` command-line program exits. - Parameter: ``lib``. - -* `import_begin`: called just before a ``beet import`` session starts up. - Parameter: ``session``. - -* `trackinfo_received`: called after metadata for a track item has been - fetched from a data source, such as MusicBrainz. You can modify the tags - that the rest of the pipeline sees on a ``beet import`` operation or during - later adjustments, such as ``mbsync``. Slow handlers of the event can impact - the operation, since the event is fired for any fetched possible match - `before` the user (or the autotagger machinery) gets to see the match. - Parameter: ``info``. - -* `albuminfo_received`: like `trackinfo_received`, the event indicates new - metadata for album items. The parameter is an ``AlbumInfo`` object instead - of a ``TrackInfo``. - Parameter: ``info``. - -* `before_choose_candidate`: called before the user is prompted for a decision - during a ``beet import`` interactive session. Plugins can use this event for - :ref:`appending choices to the prompt <append_prompt_choices>` by returning a - list of ``PromptChoices``. Parameters: ``task`` and ``session``. - -* `mb_track_extract`: called after the metadata is obtained from - MusicBrainz. The parameter is a ``dict`` containing the tags retrieved from - MusicBrainz for a track. Plugins must return a new (potentially empty) - ``dict`` with additional ``field: value`` pairs, which the autotagger will - apply to the item, as flexible attributes if ``field`` is not a hardcoded - field. Fields already present on the track are overwritten. - Parameter: ``data`` - -* `mb_album_extract`: Like `mb_track_extract`, but for album tags. Overwrites - tags set at the track level, if they have the same ``field``. - Parameter: ``data`` - -The included ``mpdupdate`` plugin provides an example use case for event listeners. - -Extend the Autotagger -^^^^^^^^^^^^^^^^^^^^^ - -Plugins can also enhance the functionality of the autotagger. For a -comprehensive example, try looking at the ``chroma`` plugin, which is included -with beets. - -A plugin can extend three parts of the autotagger's process: the track distance -function, the album distance function, and the initial MusicBrainz search. The -distance functions determine how "good" a match is at the track and album -levels; the initial search controls which candidates are presented to the -matching algorithm. Plugins implement these extensions by implementing four -methods on the plugin class: - -* ``track_distance(self, item, info)``: adds a component to the distance - function (i.e., the similarity metric) for individual tracks. ``item`` is the - track to be matched (an Item object) and ``info`` is the TrackInfo object - that is proposed as a match. Should return a ``(dist, dist_max)`` pair - of floats indicating the distance. - -* ``album_distance(self, items, album_info, mapping)``: like the above, but - compares a list of items (representing an album) to an album-level MusicBrainz - entry. ``items`` is a list of Item objects; ``album_info`` is an AlbumInfo - object; and ``mapping`` is a dictionary that maps Items to their corresponding - TrackInfo objects. - -* ``candidates(self, items, artist, album, va_likely)``: given a list of items - comprised by an album to be matched, return a list of ``AlbumInfo`` objects - for candidate albums to be compared and matched. - -* ``item_candidates(self, item, artist, album)``: given a *singleton* item, - return a list of ``TrackInfo`` objects for candidate tracks to be compared and - matched. - -* ``album_for_id(self, album_id)``: given an ID from user input or an album's - tags, return a candidate AlbumInfo object (or None). - -* ``track_for_id(self, track_id)``: given an ID from user input or a file's - tags, return a candidate TrackInfo object (or None). - -When implementing these functions, you may want to use the functions from the -``beets.autotag`` and ``beets.autotag.mb`` modules, both of which have -somewhat helpful docstrings. - -Read Configuration Options -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Plugins can configure themselves using the ``config.yaml`` file. You can read -configuration values in two ways. The first is to use `self.config` within -your plugin class. This gives you a view onto the configuration values in a -section with the same name as your plugin's module. For example, if your plugin -is in ``greatplugin.py``, then `self.config` will refer to options under the -``greatplugin:`` section of the config file. - -For example, if you have a configuration value called "foo", then users can put -this in their ``config.yaml``:: - - greatplugin: - foo: bar - -To access this value, say ``self.config['foo'].get()`` at any point in your -plugin's code. The `self.config` object is a *view* as defined by the `Confuse`_ -library. - -.. _Confuse: https://confuse.readthedocs.io/en/latest/ - -If you want to access configuration values *outside* of your plugin's section, -import the `config` object from the `beets` module. That is, just put ``from -beets import config`` at the top of your plugin and access values from there. - -If your plugin provides configuration values for sensitive data (e.g., -passwords, API keys, ...), you should add these to the config so they can be -redacted automatically when users dump their config. This can be done by -setting each value's `redact` flag, like so:: - - self.config['password'].redact = True - - -Add Path Format Functions and Fields -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Beets supports *function calls* in its path format syntax (see -:doc:`/reference/pathformat`). Beets includes a few built-in functions, but -plugins can register new functions by adding them to the ``template_funcs`` -dictionary. - -Here's an example:: - - class MyPlugin(BeetsPlugin): - def __init__(self): - super().__init__() - self.template_funcs['initial'] = _tmpl_initial - - def _tmpl_initial(text: str) -> str: - if text: - return text[0].upper() - else: - return u'' - -This plugin provides a function ``%initial`` to path templates where -``%initial{$artist}`` expands to the artist's initial (its capitalized first -character). - -Plugins can also add template *fields*, which are computed values referenced -as ``$name`` in templates. To add a new field, add a function that takes an -``Item`` object to the ``template_fields`` dictionary on the plugin object. -Here's an example that adds a ``$disc_and_track`` field:: - - class MyPlugin(BeetsPlugin): - def __init__(self): - super().__init__() - self.template_fields['disc_and_track'] = _tmpl_disc_and_track - - def _tmpl_disc_and_track(item: Item) -> str: - """Expand to the disc number and track number if this is a - multi-disc release. Otherwise, just expands to the track - number. - """ - if item.disctotal > 1: - return u'%02i.%02i' % (item.disc, item.track) - else: - return u'%02i' % (item.track) - -With this plugin enabled, templates can reference ``$disc_and_track`` as they -can any standard metadata field. - -This field works for *item* templates. Similarly, you can register *album* -template fields by adding a function accepting an ``Album`` argument to the -``album_template_fields`` dict. - -Extend MediaFile -^^^^^^^^^^^^^^^^ - -`MediaFile`_ is the file tag abstraction layer that beets uses to make -cross-format metadata manipulation simple. Plugins can add fields to MediaFile -to extend the kinds of metadata that they can easily manage. - -The ``MediaFile`` class uses ``MediaField`` descriptors to provide -access to file tags. If you have created a descriptor you can add it through -your plugins ``add_media_field()`` method. - -.. automethod:: beets.plugins.BeetsPlugin.add_media_field -.. _MediaFile: https://mediafile.readthedocs.io/en/latest/ - - -Here's an example plugin that provides a meaningless new field "foo":: - - class FooPlugin(BeetsPlugin): - def __init__(self): - field = mediafile.MediaField( - mediafile.MP3DescStorageStyle(u'foo'), - mediafile.StorageStyle(u'foo') - ) - self.add_media_field('foo', field) - - FooPlugin() - item = Item.from_path('/path/to/foo/tag.mp3') - assert item['foo'] == 'spam' - - item['foo'] == 'ham' - item.write() - # The "foo" tag of the file is now "ham" - - -.. _plugin-stage: - -Add Import Pipeline Stages -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Many plugins need to add high-latency operations to the import workflow. For -example, a plugin that fetches lyrics from the Web would, ideally, not block the -progress of the rest of the importer. Beets allows plugins to add stages to the -parallel import pipeline. - -Each stage is run in its own thread. Plugin stages run after metadata changes -have been applied to a unit of music (album or track) and before file -manipulation has occurred (copying and moving files, writing tags to disk). -Multiple stages run in parallel but each stage processes only one task at a time -and each task is processed by only one stage at a time. - -Plugins provide stages as functions that take two arguments: ``config`` and -``task``, which are ``ImportSession`` and ``ImportTask`` objects (both defined in -``beets.importer``). Add such a function to the plugin's ``import_stages`` field -to register it:: - - from beets.plugins import BeetsPlugin - class ExamplePlugin(BeetsPlugin): - def __init__(self): - super().__init__() - self.import_stages = [self.stage] - def stage(self, session, task): - print('Importing something!') - -It is also possible to request your function to run early in the pipeline by -adding the function to the plugin's ``early_import_stages`` field instead:: - - self.early_import_stages = [self.stage] - -.. _extend-query: - -Extend the Query Syntax -^^^^^^^^^^^^^^^^^^^^^^^ - -You can add new kinds of queries to beets' :doc:`query syntax -</reference/query>`. There are two ways to add custom queries: using a prefix -and using a name. Prefix-based query extension can apply to *any* field, while -named queries are not associated with any field. For example, beets already -supports regular expression queries, which are indicated by a colon -prefix---plugins can do the same. - -For either kind of query extension, define a subclass of the ``Query`` type -from the ``beets.dbcore.query`` module. Then: - -- To define a prefix-based query, define a ``queries`` method in your plugin - class. Return from this method a dictionary mapping prefix strings to query - classes. -- To define a named query, defined dictionaries named either ``item_queries`` - or ``album_queries``. These should map names to query types. So if you - use ``{ "foo": FooQuery }``, then the query ``foo:bar`` will construct a - query like ``FooQuery("bar")``. - -For prefix-based queries, you will want to extend ``FieldQuery``, which -implements string comparisons on fields. To use it, create a subclass -inheriting from that class and override the ``value_match`` class method. -(Remember the ``@classmethod`` decorator!) The following example plugin -declares a query using the ``@`` prefix to delimit exact string matches. The -plugin will be used if we issue a command like ``beet ls @something`` or -``beet ls artist:@something``:: - - from beets.plugins import BeetsPlugin - from beets.dbcore import FieldQuery - - class ExactMatchQuery(FieldQuery): - @classmethod - def value_match(self, pattern, val): - return pattern == val - - class ExactMatchPlugin(BeetsPlugin): - def queries(self): - return { - '@': ExactMatchQuery - } - - -Flexible Field Types -^^^^^^^^^^^^^^^^^^^^ - -If your plugin uses flexible fields to store numbers or other -non-string values, you can specify the types of those fields. A rating -plugin, for example, might want to declare that the ``rating`` field -should have an integer type:: - - from beets.plugins import BeetsPlugin - from beets.dbcore import types - - class RatingPlugin(BeetsPlugin): - item_types = {'rating': types.INTEGER} - - @property - def album_types(self): - return {'rating': types.INTEGER} - -A plugin may define two attributes: `item_types` and `album_types`. -Each of those attributes is a dictionary mapping a flexible field name -to a type instance. You can find the built-in types in the -`beets.dbcore.types` and `beets.library` modules or implement your own -type by inheriting from the `Type` class. - -Specifying types has several advantages: - -* Code that accesses the field like ``item['my_field']`` gets the right - type (instead of just a string). - -* You can use advanced queries (like :ref:`ranges <numericquery>`) - from the command line. - -* User input for flexible fields may be validated and converted. - -* Items missing the given field can use an appropriate null value for - querying and sorting purposes. - - -.. _plugin-logging: - -Logging -^^^^^^^ - -Each plugin object has a ``_log`` attribute, which is a ``Logger`` from the -`standard Python logging module`_. The logger is set up to `PEP 3101`_, -str.format-style string formatting. So you can write logging calls like this:: - - self._log.debug(u'Processing {0.title} by {0.artist}', item) - -.. _PEP 3101: https://www.python.org/dev/peps/pep-3101/ -.. _standard Python logging module: https://docs.python.org/2/library/logging.html - -When beets is in verbose mode, plugin messages are prefixed with the plugin -name to make them easier to see. - -Which messages will be logged depends on the logging level and the action -performed: - -* Inside import stages and event handlers, the default is ``WARNING`` messages - and above. -* Everywhere else, the default is ``INFO`` or above. - -The verbosity can be increased with ``--verbose`` (``-v``) flags: each flags -lowers the level by a notch. That means that, with a single ``-v`` flag, event -handlers won't have their ``DEBUG`` messages displayed, but command functions -(for example) will. With ``-vv`` on the command line, ``DEBUG`` messages will -be displayed everywhere. - -This addresses a common pattern where plugins need to use the same code for a -command and an import stage, but the command needs to print more messages than -the import stage. (For example, you'll want to log "found lyrics for this song" -when you're run explicitly as a command, but you don't want to noisily -interrupt the importer interface when running automatically.) - -.. _append_prompt_choices: - -Append Prompt Choices -^^^^^^^^^^^^^^^^^^^^^ - -Plugins can also append choices to the prompt presented to the user during -an import session. - -To do so, add a listener for the ``before_choose_candidate`` event, and return -a list of ``PromptChoices`` that represent the additional choices that your -plugin shall expose to the user:: - - from beets.plugins import BeetsPlugin - from beets.ui.commands import PromptChoice - - class ExamplePlugin(BeetsPlugin): - def __init__(self): - super().__init__() - self.register_listener('before_choose_candidate', - self.before_choose_candidate_event) - - def before_choose_candidate_event(self, session, task): - return [PromptChoice('p', 'Print foo', self.foo), - PromptChoice('d', 'Do bar', self.bar)] - - def foo(self, session, task): - print('User has chosen "Print foo"!') - - def bar(self, session, task): - print('User has chosen "Do bar"!') - -The previous example modifies the standard prompt:: - - # selection (default 1), Skip, Use as-is, as Tracks, Group albums, - Enter search, enter Id, aBort? - -by appending two additional options (``Print foo`` and ``Do bar``):: - - # selection (default 1), Skip, Use as-is, as Tracks, Group albums, - Enter search, enter Id, aBort, Print foo, Do bar? - -If the user selects a choice, the ``callback`` attribute of the corresponding -``PromptChoice`` will be called. It is the responsibility of the plugin to -check for the status of the import session and decide the choices to be -appended: for example, if a particular choice should only be presented if the -album has no candidates, the relevant checks against ``task.candidates`` should -be performed inside the plugin's ``before_choose_candidate_event`` accordingly. - -Please make sure that the short letter for each of the choices provided by the -plugin is not already in use: the importer will emit a warning and discard -all but one of the choices using the same letter, giving priority to the -core importer prompt choices. As a reference, the following characters are used -by the choices on the core importer prompt, and hence should not be used: -``a``, ``s``, ``u``, ``t``, ``g``, ``e``, ``i``, ``b``. - -Additionally, the callback function can optionally specify the next action to -be performed by returning a ``importer.action`` value. It may also return a -``autotag.Proposal`` value to update the set of current proposals to be -considered. diff --git a/docs/dev/plugins/autotagger.rst b/docs/dev/plugins/autotagger.rst new file mode 100644 index 000000000..8b6df6fb5 --- /dev/null +++ b/docs/dev/plugins/autotagger.rst @@ -0,0 +1,107 @@ +Extending the Autotagger +======================== + +.. currentmodule:: beets.metadata_plugins + +Beets supports **metadata source plugins**, which allow it to fetch and match +metadata from external services (such as Spotify, Discogs, or Deezer). This +guide explains how to build your own metadata source plugin by extending the +:py:class:`MetadataSourcePlugin`. + +These plugins integrate directly with the autotagger, providing candidate +metadata during lookups. To implement one, you must subclass +:py:class:`MetadataSourcePlugin` and implement its abstract methods. + +Overview +-------- + +Creating a metadata source plugin is very similar to writing a standard plugin +(see :ref:`basic-plugin-setup`). The main difference is that your plugin must: + +1. Subclass :py:class:`MetadataSourcePlugin`. +2. Implement all required abstract methods. + +Here`s a minimal example: + +.. code-block:: python + + # beetsplug/myawesomeplugin.py + from typing import Sequence + from beets.autotag.hooks import Item + from beets.metadata_plugin import MetadataSourcePlugin + + + class MyAwesomePlugin(MetadataSourcePlugin): + + def candidates( + self, + items: Sequence[Item], + artist: str, + album: str, + va_likely: bool, + ): ... + + def item_candidates(self, item: Item, artist: str, title: str): ... + + def track_for_id(self, track_id: str): ... + + def album_for_id(self, album_id: str): ... + +Each metadata source plugin automatically gets a unique identifier. You can +access this identifier using the :py:meth:`~MetadataSourcePlugin.data_source` +class property to tell plugins apart. + +Metadata lookup +--------------- + +When beets runs the autotagger, it queries **all enabled metadata source +plugins** for potential matches: + +- For **albums**, it calls :py:meth:`~MetadataSourcePlugin.candidates`. +- For **singletons**, it calls :py:meth:`~MetadataSourcePlugin.item_candidates`. + +The results are combined and scored. By default, candidate ranking is handled +automatically by the beets core, but you can customize weighting by overriding: + +- :py:meth:`~MetadataSourcePlugin.album_distance` +- :py:meth:`~MetadataSourcePlugin.track_distance` + +This is optional, if not overridden, both methods return a constant distance of +`0.5`. + +ID-based lookups +---------------- + +Your plugin must also define: + +- :py:meth:`~MetadataSourcePlugin.album_for_id` — fetch album metadata by ID. +- :py:meth:`~MetadataSourcePlugin.track_for_id` — fetch track metadata by ID. + +IDs are expected to be strings. If your source uses specific formats, consider +contributing an extractor regex to the core module: +:py:mod:`beets.util.id_extractors`. + +Best practices +-------------- + +Beets already ships with several metadata source plugins. Studying these +implementations can help you follow conventions and avoid pitfalls. Good +starting points include: + +- ``spotify`` +- ``deezer`` +- ``discogs`` + +Migration guidance +------------------ + +Older metadata plugins that extend |BeetsPlugin| should be migrated to +:py:class:`MetadataSourcePlugin`. Legacy support will be removed in **beets +v3.0.0**. + +.. seealso:: + + - :py:mod:`beets.autotag` + - :py:mod:`beets.metadata_plugins` + - :ref:`autotagger_extensions` + - :ref:`using-the-auto-tagger` diff --git a/docs/dev/plugins/commands.rst b/docs/dev/plugins/commands.rst new file mode 100644 index 000000000..f39578f11 --- /dev/null +++ b/docs/dev/plugins/commands.rst @@ -0,0 +1,54 @@ +.. _add_subcommands: + +Add Commands to the CLI +======================= + +Plugins can add new subcommands to the ``beet`` command-line interface. Define +the plugin class' ``commands()`` method to return a list of ``Subcommand`` +objects. (The ``Subcommand`` class is defined in the ``beets.ui`` module.) +Here's an example plugin that adds a simple command: + +.. code-block:: python + + from beets.plugins import BeetsPlugin + from beets.ui import Subcommand + + my_super_command = Subcommand("super", help="do something super") + + + def say_hi(lib, opts, args): + print("Hello everybody! I'm a plugin!") + + + my_super_command.func = say_hi + + + class SuperPlug(BeetsPlugin): + def commands(self): + return [my_super_command] + +To make a subcommand, invoke the constructor like so: ``Subcommand(name, parser, +help, aliases)``. The ``name`` parameter is the only required one and should +just be the name of your command. ``parser`` can be an `OptionParser instance`_, +but it defaults to an empty parser (you can extend it later). ``help`` is a +description of your command, and ``aliases`` is a list of shorthand versions of +your command name. + +.. _optionparser instance: https://docs.python.org/library/optparse.html + +You'll need to add a function to your command by saying ``mycommand.func = +myfunction``. This function should take the following parameters: ``lib`` (a +beets ``Library`` object) and ``opts`` and ``args`` (command-line options and +arguments as returned by OptionParser.parse_args_). + +.. _optionparser.parse_args: https://docs.python.org/library/optparse.html#parsing-arguments + +The function should use any of the utility functions defined in ``beets.ui``. +Try running ``pydoc beets.ui`` to see what's available. + +You can add command-line options to your new command using the ``parser`` member +of the ``Subcommand`` class, which is a ``CommonOptionsParser`` instance. Just +use it like you would a normal ``OptionParser`` in an independent script. Note +that it offers several methods to add common options: ``--album``, ``--path`` +and ``--format``. This feature is versatile and extensively documented, try +``pydoc beets.ui.CommonOptionsParser`` for more information. diff --git a/docs/dev/plugins/events.rst b/docs/dev/plugins/events.rst new file mode 100644 index 000000000..aaab9ccd7 --- /dev/null +++ b/docs/dev/plugins/events.rst @@ -0,0 +1,206 @@ +.. _plugin_events: + +Listen for Events +================= + +.. currentmodule:: beets.plugins + +Event handlers allow plugins to hook into whenever something happens in beets' +operations. For instance, a plugin could write a log message every time an album +is successfully autotagged or update MPD's index whenever the database is +changed. + +You can "listen" for events using :py:meth:`BeetsPlugin.register_listener`. +Here's an example: + +.. code-block:: python + + from beets.plugins import BeetsPlugin + + + def loaded(): + print("Plugin loaded!") + + + class SomePlugin(BeetsPlugin): + def __init__(self): + super().__init__() + self.register_listener("pluginload", loaded) + +Note that if you want to access an attribute of your plugin (e.g. ``config`` or +``log``) you'll have to define a method and not a function. Here is the usual +registration process in this case: + +.. code-block:: python + + from beets.plugins import BeetsPlugin + + + class SomePlugin(BeetsPlugin): + def __init__(self): + super().__init__() + self.register_listener("pluginload", self.loaded) + + def loaded(self): + self._log.info("Plugin loaded!") + +.. rubric:: Plugin Events + +``pluginload`` + :Parameters: (none) + :Description: Called after all plugins have been loaded after the ``beet`` + command starts. + +``import`` + :Parameters: ``lib`` (|Library|), ``paths`` (list of path strings) + :Description: Called after the ``import`` command finishes. + +``album_imported`` + :Parameters: ``lib`` (|Library|), ``album`` (|Album|) + :Description: Called every time the importer finishes adding an album to the + library. + +``album_removed`` + :Parameters: ``lib`` (|Library|), ``album`` (|Album|) + :Description: Called every time an album is removed from the library (even + when its files are not deleted from disk). + +``item_copied`` + :Parameters: ``item`` (|Item|), ``source`` (path), ``destination`` (path) + :Description: Called whenever an item file is copied. + +``item_imported`` + :Parameters: ``lib`` (|Library|), ``item`` (|Item|) + :Description: Called every time the importer adds a singleton to the library + (not called for full-album imports). + +``before_item_imported`` + :Parameters: ``item`` (|Item|), ``source`` (path), ``destination`` (path) + :Description: Called with an ``Item`` object immediately before it is + imported. + +``before_item_moved`` + :Parameters: ``item`` (|Item|), ``source`` (path), ``destination`` (path) + :Description: Called with an ``Item`` object immediately before its file is + moved. + +``item_moved`` + :Parameters: ``item`` (|Item|), ``source`` (path), ``destination`` (path) + :Description: Called with an ``Item`` object whenever its file is moved. + +``item_linked`` + :Parameters: ``item`` (|Item|), ``source`` (path), ``destination`` (path) + :Description: Called with an ``Item`` object whenever a symlink is created + for a file. + +``item_hardlinked`` + :Parameters: ``item`` (|Item|), ``source`` (path), ``destination`` (path) + :Description: Called with an ``Item`` object whenever a hardlink is created + for a file. + +``item_reflinked`` + :Parameters: ``item`` (|Item|), ``source`` (path), ``destination`` (path) + :Description: Called with an ``Item`` object whenever a reflink is created + for a file. + +``item_removed`` + :Parameters: ``item`` (|Item|) + :Description: Called with an ``Item`` object every time an item (singleton + or part of an album) is removed from the library (even when its file is + not deleted from disk). + +``write`` + :Parameters: ``item`` (|Item|), ``path`` (path), ``tags`` (dict) + :Description: Called just before a file's metadata is written to disk. + Handlers may modify ``tags`` or raise ``library.FileOperationError`` to + abort. + +``after_write`` + :Parameters: ``item`` (|Item|) + :Description: Called after a file's metadata is written to disk. + +``import_task_created`` + :Parameters: ``task`` (|ImportTask|), ``session`` (|ImportSession|) + :Description: Called immediately after an import task is initialized. May + return a list (possibly empty) of replacement tasks. + +``import_task_start`` + :Parameters: ``task`` (|ImportTask|), ``session`` (|ImportSession|) + :Description: Called before an import task begins processing. + +``import_task_apply`` + :Parameters: ``task`` (|ImportTask|), ``session`` (|ImportSession|) + :Description: Called after metadata changes have been applied in an import + task (on the UI thread; keep fast). Prefer a pipeline stage otherwise + (see :ref:`plugin-stage`). + +``import_task_before_choice`` + :Parameters: ``task`` (|ImportTask|), ``session`` (|ImportSession|) + :Description: Called after candidate search and before deciding how to + import. May return an importer action (only one handler may return + non-None). + +``import_task_choice`` + :Parameters: ``task`` (|ImportTask|), ``session`` (|ImportSession|) + :Description: Called after a decision has been made about an import task. + Use ``task.choice_flag`` to inspect or change the action. + +``import_task_files`` + :Parameters: ``task`` (|ImportTask|), ``session`` (|ImportSession|) + :Description: Called after filesystem manipulation (copy/move/write) for an + import task. + +``library_opened`` + :Parameters: ``lib`` (|Library|) + :Description: Called after beets starts and initializes the main Library + object. + +``database_change`` + :Parameters: ``lib`` (|Library|), ``model`` (|Model|) + :Description: A modification has been made to the library database (may not + yet be committed). + +``cli_exit`` + :Parameters: ``lib`` (|Library|) + :Description: Called just before the ``beet`` command-line program exits. + +``import_begin`` + :Parameters: ``session`` (|ImportSession|) + :Description: Called just before a ``beet import`` session starts. + +``trackinfo_received`` + :Parameters: ``info`` (|TrackInfo|) + :Description: Called after metadata for a track is fetched (e.g., from + MusicBrainz). Handlers can modify the tags seen by later pipeline stages + or adjustments (e.g., ``mbsync``). + +``albuminfo_received`` + :Parameters: ``info`` (|AlbumInfo|) + :Description: Like ``trackinfo_received`` but for album-level metadata. + +``album_matched`` + :Parameters: ``match`` (``AlbumMatch``) + :Description: Called after ``Item`` objects from a folder that's being + imported have been matched to an ``AlbumInfo`` and the corresponding + distance has been calculated. Missing and extra tracks, if any, are + included in the match. + +``before_choose_candidate`` + :Parameters: ``task`` (|ImportTask|), ``session`` (|ImportSession|) + :Description: Called before prompting the user during interactive import. + May return a list of ``PromptChoices`` to append to the prompt (see + :ref:`append_prompt_choices`). + +``mb_track_extract`` + :Parameters: ``data`` (dict) + :Description: Called after metadata is obtained from MusicBrainz for a + track. Must return a (possibly empty) dict of additional ``field: + value`` pairs to apply (overwriting existing fields). + +``mb_album_extract`` + :Parameters: ``data`` (dict) + :Description: Like ``mb_track_extract`` but for album tags. Overwrites tags + set at the track level with the same field. + +The included ``mpdupdate`` plugin provides an example use case for event +listeners. diff --git a/docs/dev/plugins/index.rst b/docs/dev/plugins/index.rst new file mode 100644 index 000000000..a8feb32d9 --- /dev/null +++ b/docs/dev/plugins/index.rst @@ -0,0 +1,109 @@ +Plugin Development +================== + +Beets plugins are Python modules or packages that extend the core functionality +of beets. The plugin system is designed to be flexible, allowing developers to +add virtually any type of features to beets. + +For instance you can create plugins that add new commands to the command-line +interface, listen for events in the beets lifecycle or extend the autotagger +with new metadata sources. + +.. _basic-plugin-setup: + +Basic Plugin Setup +------------------ + +A beets plugin is just a Python module or package inside the ``beetsplug`` +namespace [1]_ package. To create the basic plugin layout, create a directory +called ``beetsplug`` and add either your plugin module: + +.. code-block:: shell + + beetsplug/ + └── myawesomeplugin.py + +or your plugin subpackage + +.. code-block:: shell + + beetsplug/ + └── myawesomeplugin/ + ├── __init__.py + └── myawesomeplugin.py + +.. attention:: + + You do not need to add an ``__init__.py`` file to the ``beetsplug`` + directory. Python treats your plugin as a namespace package automatically, + thus we do not depend on ``pkgutil``-based setup in the ``__init__.py`` file + anymore. + +The meat of your plugin goes in ``myawesomeplugin.py``. Every plugin has to +extend the |BeetsPlugin| abstract base class [2]_ . For instance, a minimal +plugin without any functionality would look like this: + +.. code-block:: python + + # beetsplug/myawesomeplugin.py + from beets.plugins import BeetsPlugin + + + class MyAwesomePlugin(BeetsPlugin): + pass + +.. attention:: + + If your plugin is composed of intermediate |BeetsPlugin| subclasses, make + sure that your plugin is defined *last* in the namespace. We only load the + last subclass of |BeetsPlugin| we find in your plugin namespace. + +To use your new plugin, you need to package [3]_ your plugin and install it into +your ``beets`` (virtual) environment. To enable your plugin, add it it to the +beets configuration + +.. code-block:: yaml + + # config.yaml + plugins: + - myawesomeplugin + +and you're good to go! + +.. [1] Check out `this article`_ and `this Stack Overflow question`_ if you + haven't heard about namespace packages. + +.. [2] Abstract base classes allow us to define a contract which any plugin must + follow. This is a common paradigm in object-oriented programming, and it + helps to ensure that plugins are implemented in a consistent way. For more + information, see for example pep-3119_. + +.. [3] There are a variety of packaging tools available for python, for example + you can use poetry_, setuptools_ or hatchling_. + +.. _hatchling: https://hatch.pypa.io/latest/config/build/#build-system + +.. _pep-3119: https://peps.python.org/pep-3119/#rationale + +.. _poetry: https://python-poetry.org/docs/pyproject/#packages + +.. _setuptools: https://setuptools.pypa.io/en/latest/userguide/package_discovery.html#finding-simple-packages + +.. _this article: https://realpython.com/python-namespace-package/#setting-up-some-namespace-packages + +.. _this stack overflow question: https://stackoverflow.com/a/27586272/9582674 + +More information +---------------- + +For more information on writing plugins, feel free to check out the following +resources: + +.. toctree:: + :maxdepth: 3 + :includehidden: + + commands + events + autotagger + other/index diff --git a/docs/dev/plugins/other/config.rst b/docs/dev/plugins/other/config.rst new file mode 100644 index 000000000..7c529af93 --- /dev/null +++ b/docs/dev/plugins/other/config.rst @@ -0,0 +1,36 @@ +Read Configuration Options +========================== + +Plugins can configure themselves using the ``config.yaml`` file. You can read +configuration values in two ways. The first is to use ``self.config`` within +your plugin class. This gives you a view onto the configuration values in a +section with the same name as your plugin's module. For example, if your plugin +is in ``greatplugin.py``, then ``self.config`` will refer to options under the +``greatplugin:`` section of the config file. + +For example, if you have a configuration value called "foo", then users can put +this in their ``config.yaml``: + +:: + + greatplugin: + foo: bar + +To access this value, say ``self.config['foo'].get()`` at any point in your +plugin's code. The ``self.config`` object is a *view* as defined by the Confuse_ +library. + +.. _confuse: https://confuse.readthedocs.io/en/latest/ + +If you want to access configuration values *outside* of your plugin's section, +import the ``config`` object from the ``beets`` module. That is, just put ``from +beets import config`` at the top of your plugin and access values from there. + +If your plugin provides configuration values for sensitive data (e.g., +passwords, API keys, ...), you should add these to the config so they can be +redacted automatically when users dump their config. This can be done by setting +each value's ``redact`` flag, like so: + +:: + + self.config['password'].redact = True diff --git a/docs/dev/plugins/other/fields.rst b/docs/dev/plugins/other/fields.rst new file mode 100644 index 000000000..6ee570043 --- /dev/null +++ b/docs/dev/plugins/other/fields.rst @@ -0,0 +1,35 @@ +Flexible Field Types +==================== + +If your plugin uses flexible fields to store numbers or other non-string values, +you can specify the types of those fields. A rating plugin, for example, might +want to declare that the ``rating`` field should have an integer type: + +.. code-block:: python + + from beets.plugins import BeetsPlugin + from beets.dbcore import types + + + class RatingPlugin(BeetsPlugin): + item_types = {"rating": types.INTEGER} + + @property + def album_types(self): + return {"rating": types.INTEGER} + +A plugin may define two attributes: ``item_types`` and ``album_types``. Each of +those attributes is a dictionary mapping a flexible field name to a type +instance. You can find the built-in types in the ``beets.dbcore.types`` and +``beets.library`` modules or implement your own type by inheriting from the +``Type`` class. + +Specifying types has several advantages: + +- Code that accesses the field like ``item['my_field']`` gets the right type + (instead of just a string). +- You can use advanced queries (like :ref:`ranges <numericquery>`) from the + command line. +- User input for flexible fields may be validated and converted. +- Items missing the given field can use an appropriate null value for querying + and sorting purposes. diff --git a/docs/dev/plugins/other/import.rst b/docs/dev/plugins/other/import.rst new file mode 100644 index 000000000..706a520b7 --- /dev/null +++ b/docs/dev/plugins/other/import.rst @@ -0,0 +1,88 @@ +.. _plugin-stage: + +Add Import Pipeline Stages +========================== + +Many plugins need to add high-latency operations to the import workflow. For +example, a plugin that fetches lyrics from the Web would, ideally, not block the +progress of the rest of the importer. Beets allows plugins to add stages to the +parallel import pipeline. + +Each stage is run in its own thread. Plugin stages run after metadata changes +have been applied to a unit of music (album or track) and before file +manipulation has occurred (copying and moving files, writing tags to disk). +Multiple stages run in parallel but each stage processes only one task at a time +and each task is processed by only one stage at a time. + +Plugins provide stages as functions that take two arguments: ``config`` and +``task``, which are ``ImportSession`` and ``ImportTask`` objects (both defined +in ``beets.importer``). Add such a function to the plugin's ``import_stages`` +field to register it: + +.. code-block:: python + + from beets.importer import ImportSession, ImportTask + from beets.plugins import BeetsPlugin + + + class ExamplePlugin(BeetsPlugin): + + def __init__(self): + super().__init__() + self.import_stages = [self.stage] + + def stage(self, session: ImportSession, task: ImportTask): + print("Importing something!") + +It is also possible to request your function to run early in the pipeline by +adding the function to the plugin's ``early_import_stages`` field instead: + +.. code-block:: python + + self.early_import_stages = [self.stage] + +.. _extend-query: + +Extend the Query Syntax +----------------------- + +You can add new kinds of queries to beets' :doc:`query syntax +</reference/query>`. There are two ways to add custom queries: using a prefix +and using a name. Prefix-based query extension can apply to *any* field, while +named queries are not associated with any field. For example, beets already +supports regular expression queries, which are indicated by a colon +prefix---plugins can do the same. + +For either kind of query extension, define a subclass of the ``Query`` type from +the ``beets.dbcore.query`` module. Then: + +- To define a prefix-based query, define a ``queries`` method in your plugin + class. Return from this method a dictionary mapping prefix strings to query + classes. +- To define a named query, defined dictionaries named either ``item_queries`` or + ``album_queries``. These should map names to query types. So if you use ``{ + "foo": FooQuery }``, then the query ``foo:bar`` will construct a query like + ``FooQuery("bar")``. + +For prefix-based queries, you will want to extend ``FieldQuery``, which +implements string comparisons on fields. To use it, create a subclass inheriting +from that class and override the ``value_match`` class method. (Remember the +``@classmethod`` decorator!) The following example plugin declares a query using +the ``@`` prefix to delimit exact string matches. The plugin will be used if we +issue a command like ``beet ls @something`` or ``beet ls artist:@something``: + +.. code-block:: python + + from beets.plugins import BeetsPlugin + from beets.dbcore import FieldQuery + + + class ExactMatchQuery(FieldQuery): + @classmethod + def value_match(self, pattern, val): + return pattern == val + + + class ExactMatchPlugin(BeetsPlugin): + def queries(self): + return {"@": ExactMatchQuery} diff --git a/docs/dev/plugins/other/index.rst b/docs/dev/plugins/other/index.rst new file mode 100644 index 000000000..595139042 --- /dev/null +++ b/docs/dev/plugins/other/index.rst @@ -0,0 +1,16 @@ +Further Reading +=============== + +For more information on writing plugins, feel free to check out the following +resources: + +.. toctree:: + :maxdepth: 2 + + config + templates + mediafile + import + fields + logging + prompts diff --git a/docs/dev/plugins/other/logging.rst b/docs/dev/plugins/other/logging.rst new file mode 100644 index 000000000..a26f0c4c0 --- /dev/null +++ b/docs/dev/plugins/other/logging.rst @@ -0,0 +1,38 @@ +.. _plugin-logging: + +Logging +======= + +Each plugin object has a ``_log`` attribute, which is a ``Logger`` from the +`standard Python logging module`_. The logger is set up to `PEP 3101`_, +str.format-style string formatting. So you can write logging calls like this: + +.. code-block:: python + + self._log.debug("Processing {0.title} by {0.artist}", item) + +.. _pep 3101: https://www.python.org/dev/peps/pep-3101/ + +.. _standard python logging module: https://docs.python.org/3/library/logging.html + +When beets is in verbose mode, plugin messages are prefixed with the plugin name +to make them easier to see. + +Which messages will be logged depends on the logging level and the action +performed: + +- Inside import stages and event handlers, the default is ``WARNING`` messages + and above. +- Everywhere else, the default is ``INFO`` or above. + +The verbosity can be increased with ``--verbose`` (``-v``) flags: each flags +lowers the level by a notch. That means that, with a single ``-v`` flag, event +handlers won't have their ``DEBUG`` messages displayed, but command functions +(for example) will. With ``-vv`` on the command line, ``DEBUG`` messages will be +displayed everywhere. + +This addresses a common pattern where plugins need to use the same code for a +command and an import stage, but the command needs to print more messages than +the import stage. (For example, you'll want to log "found lyrics for this song" +when you're run explicitly as a command, but you don't want to noisily interrupt +the importer interface when running automatically.) diff --git a/docs/dev/plugins/other/mediafile.rst b/docs/dev/plugins/other/mediafile.rst new file mode 100644 index 000000000..8fa22ceca --- /dev/null +++ b/docs/dev/plugins/other/mediafile.rst @@ -0,0 +1,32 @@ +Extend MediaFile +================ + +MediaFile_ is the file tag abstraction layer that beets uses to make +cross-format metadata manipulation simple. Plugins can add fields to MediaFile +to extend the kinds of metadata that they can easily manage. + +The ``MediaFile`` class uses ``MediaField`` descriptors to provide access to +file tags. If you have created a descriptor you can add it through your plugins +:py:meth:`beets.plugins.BeetsPlugin.add_media_field` method. + +.. _mediafile: https://mediafile.readthedocs.io/en/latest/ + +Here's an example plugin that provides a meaningless new field "foo": + +.. code-block:: python + + class FooPlugin(BeetsPlugin): + def __init__(self): + field = mediafile.MediaField( + mediafile.MP3DescStorageStyle("foo"), mediafile.StorageStyle("foo") + ) + self.add_media_field("foo", field) + + + FooPlugin() + item = Item.from_path("/path/to/foo/tag.mp3") + assert item["foo"] == "spam" + + item["foo"] == "ham" + item.write() + # The "foo" tag of the file is now "ham" diff --git a/docs/dev/plugins/other/prompts.rst b/docs/dev/plugins/other/prompts.rst new file mode 100644 index 000000000..29720b922 --- /dev/null +++ b/docs/dev/plugins/other/prompts.rst @@ -0,0 +1,69 @@ +.. _append_prompt_choices: + +Append Prompt Choices +===================== + +Plugins can also append choices to the prompt presented to the user during an +import session. + +To do so, add a listener for the ``before_choose_candidate`` event, and return a +list of ``PromptChoices`` that represent the additional choices that your plugin +shall expose to the user: + +.. code-block:: python + + from beets.plugins import BeetsPlugin + from beets.util import PromptChoice + + + class ExamplePlugin(BeetsPlugin): + def __init__(self): + super().__init__() + self.register_listener( + "before_choose_candidate", self.before_choose_candidate_event + ) + + def before_choose_candidate_event(self, session, task): + return [ + PromptChoice("p", "Print foo", self.foo), + PromptChoice("d", "Do bar", self.bar), + ] + + def foo(self, session, task): + print('User has chosen "Print foo"!') + + def bar(self, session, task): + print('User has chosen "Do bar"!') + +The previous example modifies the standard prompt: + +.. code-block:: shell + + # selection (default 1), Skip, Use as-is, as Tracks, Group albums, + Enter search, enter Id, aBort? + +by appending two additional options (``Print foo`` and ``Do bar``): + +.. code-block:: shell + + # selection (default 1), Skip, Use as-is, as Tracks, Group albums, + Enter search, enter Id, aBort, Print foo, Do bar? + +If the user selects a choice, the ``callback`` attribute of the corresponding +``PromptChoice`` will be called. It is the responsibility of the plugin to check +for the status of the import session and decide the choices to be appended: for +example, if a particular choice should only be presented if the album has no +candidates, the relevant checks against ``task.candidates`` should be performed +inside the plugin's ``before_choose_candidate_event`` accordingly. + +Please make sure that the short letter for each of the choices provided by the +plugin is not already in use: the importer will emit a warning and discard all +but one of the choices using the same letter, giving priority to the core +importer prompt choices. As a reference, the following characters are used by +the choices on the core importer prompt, and hence should not be used: ``a``, +``s``, ``u``, ``t``, ``g``, ``e``, ``i``, ``b``. + +Additionally, the callback function can optionally specify the next action to be +performed by returning a ``importer.Action`` value. It may also return a +``autotag.Proposal`` value to update the set of current proposals to be +considered. diff --git a/docs/dev/plugins/other/templates.rst b/docs/dev/plugins/other/templates.rst new file mode 100644 index 000000000..89509dcb7 --- /dev/null +++ b/docs/dev/plugins/other/templates.rst @@ -0,0 +1,57 @@ +Add Path Format Functions and Fields +==================================== + +Beets supports *function calls* in its path format syntax (see +:doc:`/reference/pathformat`). Beets includes a few built-in functions, but +plugins can register new functions by adding them to the ``template_funcs`` +dictionary. + +Here's an example: + +.. code-block:: python + + class MyPlugin(BeetsPlugin): + def __init__(self): + super().__init__() + self.template_funcs["initial"] = _tmpl_initial + + + def _tmpl_initial(text: str) -> str: + if text: + return text[0].upper() + else: + return "" + +This plugin provides a function ``%initial`` to path templates where +``%initial{$artist}`` expands to the artist's initial (its capitalized first +character). + +Plugins can also add template *fields*, which are computed values referenced as +``$name`` in templates. To add a new field, add a function that takes an +``Item`` object to the ``template_fields`` dictionary on the plugin object. +Here's an example that adds a ``$disc_and_track`` field: + +.. code-block:: python + + class MyPlugin(BeetsPlugin): + def __init__(self): + super().__init__() + self.template_fields["disc_and_track"] = _tmpl_disc_and_track + + + def _tmpl_disc_and_track(item: Item) -> str: + """Expand to the disc number and track number if this is a + multi-disc release. Otherwise, just expands to the track + number. + """ + if item.disctotal > 1: + return "%02i.%02i" % (item.disc, item.track) + else: + return "%02i" % (item.track) + +With this plugin enabled, templates can reference ``$disc_and_track`` as they +can any standard metadata field. + +This field works for *item* templates. Similarly, you can register *album* +template fields by adding a function accepting an ``Album`` argument to the +``album_template_fields`` dict. diff --git a/docs/extensions/conf.py b/docs/extensions/conf.py new file mode 100644 index 000000000..308d28be2 --- /dev/null +++ b/docs/extensions/conf.py @@ -0,0 +1,142 @@ +"""Sphinx extension for simple configuration value documentation.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, ClassVar + +from docutils import nodes +from docutils.parsers.rst import directives +from sphinx import addnodes +from sphinx.directives import ObjectDescription +from sphinx.domains import Domain, ObjType +from sphinx.roles import XRefRole +from sphinx.util.nodes import make_refnode + +if TYPE_CHECKING: + from collections.abc import Iterable, Sequence + + from docutils.nodes import Element + from docutils.parsers.rst.states import Inliner + from sphinx.addnodes import desc_signature, pending_xref + from sphinx.application import Sphinx + from sphinx.builders import Builder + from sphinx.environment import BuildEnvironment + from sphinx.util.typing import ExtensionMetadata, OptionSpec + + +class Conf(ObjectDescription[str]): + """Directive for documenting a single configuration value.""" + + option_spec: ClassVar[OptionSpec] = { + "default": directives.unchanged, + } + + def handle_signature(self, sig: str, signode: desc_signature) -> str: + """Process the directive signature (the config name).""" + signode += addnodes.desc_name(sig, sig) + + # Add default value if provided + if "default" in self.options: + signode += nodes.Text(" ") + default_container = nodes.inline("", "") + default_container += nodes.Text("(default: ") + default_container += nodes.literal("", self.options["default"]) + default_container += nodes.Text(")") + signode += default_container + + return sig + + def add_target_and_index( + self, name: str, sig: str, signode: desc_signature + ) -> None: + """Add cross-reference target and index entry.""" + target = f"conf-{name}" + if target not in self.state.document.ids: + signode["ids"].append(target) + self.state.document.note_explicit_target(signode) + + # A unique full name which includes the document name + index_name = f"{self.env.docname.replace('/', '.')}:{name}" + # Register with the conf domain + domain = self.env.get_domain("conf") + domain.data["objects"][index_name] = (self.env.docname, target) + + # Add to index + self.indexnode["entries"].append( + ("single", f"{name} (configuration value)", target, "", None) + ) + + +class ConfDomain(Domain): + """Domain for simple configuration values.""" + + name = "conf" + label = "Simple Configuration" + object_types = {"conf": ObjType("conf", "conf")} + directives = {"conf": Conf} + roles = {"conf": XRefRole()} + initial_data: dict[str, Any] = {"objects": {}} + + def get_objects(self) -> Iterable[tuple[str, str, str, str, str, int]]: + """Return an iterable of object tuples for the inventory.""" + for name, (docname, targetname) in self.data["objects"].items(): + # Remove the document name prefix for display + display_name = name.split(":")[-1] + yield (name, display_name, "conf", docname, targetname, 1) + + def resolve_xref( + self, + env: BuildEnvironment, + fromdocname: str, + builder: Builder, + typ: str, + target: str, + node: pending_xref, + contnode: Element, + ) -> Element | None: + if entry := self.data["objects"].get(target): + docname, targetid = entry + return make_refnode( + builder, fromdocname, docname, targetid, contnode + ) + + return None + + +# sphinx.util.typing.RoleFunction +def conf_role( + name: str, + rawtext: str, + text: str, + lineno: int, + inliner: Inliner, + /, + options: dict[str, Any] | None = None, + content: Sequence[str] = (), +) -> tuple[list[nodes.Node], list[nodes.system_message]]: + """Role for referencing configuration values.""" + node = addnodes.pending_xref( + "", + refdomain="conf", + reftype="conf", + reftarget=text, + refwarn=True, + **(options or {}), + ) + node += nodes.literal(text, text.split(":")[-1]) + return [node], [] + + +def setup(app: Sphinx) -> ExtensionMetadata: + app.add_domain(ConfDomain) + + # register a top-level directive so users can use ".. conf:: ..." + app.add_directive("conf", Conf) + + # Register role with short name + app.add_role("conf", conf_role) + return { + "version": "0.1", + "parallel_read_safe": True, + "parallel_write_safe": True, + } diff --git a/docs/faq.rst b/docs/faq.rst index ac7818ab2..287dc88af 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -1,57 +1,56 @@ FAQ -### +=== -Here are some answers to frequently-asked questions from IRC and elsewhere. -Got a question that isn't answered here? Try the `discussion board`_, or +Here are some answers to frequently-asked questions from IRC and elsewhere. Got +a question that isn't answered here? Try the `discussion board`_, or :ref:`filing an issue <bugs>` in the bug tracker. -.. _mailing list: https://groups.google.com/group/beets-users .. _discussion board: https://github.com/beetbox/beets/discussions/ +.. _mailing list: https://groups.google.com/group/beets-users + .. contents:: :local: :depth: 2 - How do I… -========= - +--------- .. _move: …rename my files according to a new path format configuration? --------------------------------------------------------------- - -Just run the :ref:`move-cmd` command. Use a :doc:`query </reference/query>` -to rename a subset of your music or leave the query off to rename -everything. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Just run the :ref:`move-cmd` command. Use a :doc:`query </reference/query>` to +rename a subset of your music or leave the query off to rename everything. .. _asispostfacto: …find all the albums I imported "as-is"? ----------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Enable the :ref:`import log <import_log>` -to automatically record whenever you skip an album or accept one -"as-is". +Enable the :ref:`import log <import_log>` to automatically record whenever you +skip an album or accept one "as-is". -Alternatively, you can find all the albums in your library that are -missing MBIDs using a command like this:: +Alternatively, you can find all the albums in your library that are missing +MBIDs using a command like this: + +:: beet ls -a mb_albumid::^$ -Assuming your files didn't have MBIDs already, then this will roughly -correspond to those albums that didn't get autotagged. - +Assuming your files didn't have MBIDs already, then this will roughly correspond +to those albums that didn't get autotagged. .. _discdir: …create "Disc N" directories for multi-disc albums? ---------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Use the :doc:`/plugins/inline` along -with the ``%if{}`` function to accomplish this:: +Use the :doc:`/plugins/inline` along with the ``%if{}`` function to accomplish +this: + +:: plugins: inline paths: @@ -59,344 +58,315 @@ with the ``%if{}`` function to accomplish this:: item_fields: multidisc: 1 if disctotal > 1 else 0 -This ``paths`` configuration only contains the -``default`` key: it leaves the ``comp`` and ``singleton`` keys as their -default values, as documented in :ref:`path-format-config`. -To create "Disc N" directories for compilations and singletons, you will need -to specify similar templates for those keys as well. - +This ``paths`` configuration only contains the ``default`` key: it leaves the +``comp`` and ``singleton`` keys as their default values, as documented in +:ref:`path-format-config`. To create "Disc N" directories for compilations and +singletons, you will need to specify similar templates for those keys as well. .. _multidisc: …import a multi-disc album? ---------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~ -As of 1.0b11, beets tags multi-disc albums as a *single unit*. To get a -good match, it needs to treat all of the album's parts together as a -single release. +As of 1.0b11, beets tags multi-disc albums as a *single unit*. To get a good +match, it needs to treat all of the album's parts together as a single release. To help with this, the importer uses a simple heuristic to guess when a -directory represents a multi-disc album that's been divided into -multiple subdirectories. When it finds a situation like this, it -collapses all of the items in the subdirectories into a single release -for tagging. +directory represents a multi-disc album that's been divided into multiple +subdirectories. When it finds a situation like this, it collapses all of the +items in the subdirectories into a single release for tagging. The heuristic works by looking at the names of directories. If multiple -subdirectories of a common parent directory follow the pattern "(title) -disc (number) (...)" and the *prefix* (everything up to the number) is -the same, the directories are collapsed together. One of the key words -"disc" or "CD" must be present to make this work. - -If you have trouble tagging a multi-disc album, consider the ``--flat`` -flag (which treats a whole tree as a single album) or just putting all -the tracks into a single directory to force them to be tagged together. +subdirectories of a common parent directory follow the pattern "(title) disc +(number) (...)" and the *prefix* (everything up to the number) is the same, the +directories are collapsed together. One of the key words "disc" or "CD" must be +present to make this work. +If you have trouble tagging a multi-disc album, consider the ``--flat`` flag +(which treats a whole tree as a single album) or just putting all the tracks +into a single directory to force them to be tagged together. .. _mbid: …enter a MusicBrainz ID? ------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~ An MBID looks like one of these: -- ``https://musicbrainz.org/release/ded77dcf-7279-457e-955d-625bd3801b87`` -- ``d569deba-8c6b-4d08-8c43-d0e5a1b8c7f3`` +- ``https://musicbrainz.org/release/ded77dcf-7279-457e-955d-625bd3801b87`` +- ``d569deba-8c6b-4d08-8c43-d0e5a1b8c7f3`` -Beets can recognize either the hex-with-dashes UUID-style string or the -full URL that contains it (as of 1.0b11). +Beets can recognize either the hex-with-dashes UUID-style string or the full URL +that contains it (as of 1.0b11). -You can get these IDs by `searching on the MusicBrainz web -site <https://musicbrainz.org/>`__ and going to a *release* page (when -tagging full albums) or a *recording* page (when tagging singletons). -Then, copy the URL of the page and paste it into beets. - -Note that MusicBrainz has both "releases" and "release groups," which -link together different versions of the same album. Use *release* IDs -here. +You can get these IDs by `searching on the MusicBrainz web site +<https://musicbrainz.org/>`__ and going to a *release* page (when tagging full +albums) or a *recording* page (when tagging singletons). Then, copy the URL of +the page and paste it into beets. +Note that MusicBrainz has both "releases" and "release groups," which link +together different versions of the same album. Use *release* IDs here. .. _upgrade: …upgrade to the latest version of beets? ----------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Run a command like this:: +Run a command like this: + +:: pip install -U beets -The ``-U`` flag tells `pip`_ to upgrade -beets to the latest version. If you want a specific version, you can -specify with using ``==`` like so:: +The ``-U`` flag tells pip_ to upgrade beets to the latest version. If you want a +specific version, you can specify with using ``==`` like so: + +:: pip install beets==1.0rc2 - .. _src: …run the latest source version of beets? ----------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Beets sees regular releases (about every six weeks or so), but sometimes -it's helpful to run on the "bleeding edge". To run the latest source: +Beets sees regular releases (about every six weeks or so), but sometimes it's +helpful to run on the "bleeding edge". To run the latest source: -1. Uninstall beets. If you installed using ``pip``, you can just run - ``pip uninstall beets``. +1. Uninstall beets. If you installed using ``pip``, you can just run ``pip + uninstall beets``. 2. Install from source. Choose one of these methods: - - Directly from GitHub using - ``python -m pip install git+https://github.com/beetbox/beets.git`` - command. Depending on your system, you may need to use ``pip3`` - and ``python3`` instead of ``pip`` and ``python`` respectively. - - Use ``pip`` to install the latest snapshot tarball. Type: - ``pip install https://github.com/beetbox/beets/tarball/master`` - - Use ``pip`` to install an "editable" version of beets based on an - automatic source checkout. For example, run - ``pip install -e git+https://github.com/beetbox/beets#egg=beets`` - to clone beets and install it, allowing you to modify the source - in-place to try out changes. - - Clone source code and install it in editable mode + - Directly from GitHub using ``python -m pip install + git+https://github.com/beetbox/beets.git`` command. Depending on your + system, you may need to use ``pip3`` and ``python3`` instead of ``pip`` and + ``python`` respectively. + - Use ``pip`` to install the latest snapshot tarball. Type: ``pip install + https://github.com/beetbox/beets/tarball/master`` + - Use ``pip`` to install an "editable" version of beets based on an automatic + source checkout. For example, run ``pip install -e + git+https://github.com/beetbox/beets#egg=beets`` to clone beets and install + it, allowing you to modify the source in-place to try out changes. + - Clone source code and install it in editable mode - .. code-block:: shell + .. code-block:: shell git clone https://github.com/beetbox/beets.git poetry install - This approach lets you decide where the - source is stored, with any changes immediately reflected in your - environment. - -More details about the beets source are available on the :doc:`developer documentation </dev/index>` -pages. + This approach lets you decide where the source is stored, with any changes + immediately reflected in your environment. +More details about the beets source are available on the :doc:`developer +documentation </dev/index>` pages. .. _bugs: …report a bug in beets? ----------------------- -We use the `issue tracker`_ on GitHub where you can `open a new ticket`_. -Please follow these guidelines when reporting an issue: +We use the `issue tracker`_ on GitHub where you can `open a new ticket`_. Please +follow these guidelines when reporting an issue: -- Most importantly: if beets is crashing, please `include the - traceback <https://imgur.com/jacoj>`__. Tracebacks can be more - readable if you put them in a pastebin (e.g., - `Gist <https://gist.github.com/>`__ or - `Hastebin <https://hastebin.com/>`__), especially when communicating - over IRC or email. -- Turn on beets' debug output (using the -v option: for example, - ``beet -v import ...``) and include that with your bug report. Look - through this verbose output for any red flags that might point to the - problem. -- If you can, try installing the latest beets source code to see if the - bug is fixed in an unreleased version. You can also look at the - :doc:`latest changelog entries </changelog>` - for descriptions of the problem you're seeing. -- Try to narrow your problem down to something specific. Is a - particular plugin causing the problem? (You can disable plugins to - see whether the problem goes away.) Is a some music file or a single - album leading to the crash? (Try importing individual albums to - determine which one is causing the problem.) Is some entry in your - configuration file causing it? Et cetera. -- If you do narrow the problem down to a particular audio file or - album, include it with your bug report so the developers can run - tests. - -If you've never reported a bug before, Mozilla has some well-written -`general guidelines for good bug -reports`_. - -.. _issue tracker: https://github.com/beetbox/beets/issues -.. _general guidelines for good bug reports: https://developer.mozilla.org/en-US/docs/Mozilla/QA/Bug_writing_guidelines +- Most importantly: if beets is crashing, please `include the traceback + <https://imgur.com/jacoj>`__. Tracebacks can be more readable if you put them + in a pastebin (e.g., `Gist <https://gist.github.com/>`__ or `Hastebin + <https://hastebin.com/>`__), especially when communicating over IRC. +- Turn on beets' debug output (using the -v option: for example, ``beet -v + import ...``) and include that with your bug report. Look through this verbose + output for any red flags that might point to the problem. +- If you can, try installing the latest beets source code to see if the bug is + fixed in an unreleased version. You can also look at the :doc:`latest + changelog entries </changelog>` for descriptions of the problem you're seeing. +- Try to narrow your problem down to something specific. Is a particular plugin + causing the problem? (You can disable plugins to see whether the problem goes + away.) Is a some music file or a single album leading to the crash? (Try + importing individual albums to determine which one is causing the problem.) Is + some entry in your configuration file causing it? Et cetera. +- If you do narrow the problem down to a particular audio file or album, include + it with your bug report so the developers can run tests. +If you've never reported a bug before, Mozilla has some well-written `general +guidelines for good bug reports`_. .. _find-config: +.. _general guidelines for good bug reports: https://developer.mozilla.org/en-US/docs/Mozilla/QA/Bug_writing_guidelines + +.. _issue tracker: https://github.com/beetbox/beets/issues + …find the configuration file (config.yaml)? -------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You create this file yourself; beets just reads it. See :doc:`/reference/config`. - .. _special-chars: …avoid using special characters in my filenames? ------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Use the ``%asciify{}`` function in your path formats. See :ref:`template-functions`. - .. _move-dir: …point beets at a new music directory? --------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If you want to move your music from one directory to another, the best way is -to let beets do it for you. First, edit your configuration and set the +If you want to move your music from one directory to another, the best way is to +let beets do it for you. First, edit your configuration and set the ``directory`` setting to the new place. Then, type ``beet move`` to have beets move all your files. If you've already moved your music *outside* of beets, you have a few options: - Move the music back (with an ordinary ``mv``) and then use the above steps. -- Delete your database and re-create it from the new paths using ``beet import -AWC``. +- Delete your database and re-create it from the new paths using ``beet import + -AWC``. - Resort to manually modifying the SQLite database (not recommended). - Why does beets… -=============== +--------------- .. _nomatch: …complain that it can't find a match? -------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are a number of possibilities: -- First, make sure the album is in `the MusicBrainz - database <https://musicbrainz.org/>`__. You - can search on their site to make sure it's cataloged there. (If not, - anyone can edit MusicBrainz---so consider adding the data yourself.) -- If the album in question is a multi-disc release, see the relevant - FAQ answer above. -- The music files' metadata might be insufficient. Try using the "enter - search" or "enter ID" options to help the matching process find the - right MusicBrainz entry. -- If you have a lot of files that are missing metadata, consider using - :doc:`acoustic fingerprinting </plugins/chroma>` or - :doc:`filename-based guesses </plugins/fromfilename>` - for that music. - -If none of these situations apply and you're still having trouble -tagging something, please :ref:`file a bug report <bugs>`. +- First, make sure you have at least one autotagger extension/plugin enabled. + See :ref:`autotagger_extensions` for a list of valid plugins. +- Check that the album is in `the MusicBrainz database + <https://musicbrainz.org/>`__. You can search on their site to make sure it's + cataloged there. (If not, anyone can edit MusicBrainz---so consider adding the + data yourself.) +- If the album in question is a multi-disc release, see the relevant FAQ answer + above. +- The music files' metadata might be insufficient. Try using the "enter search" + or "enter ID" options to help the matching process find the right MusicBrainz + entry. +- If you have a lot of files that are missing metadata, consider using + :doc:`acoustic fingerprinting </plugins/chroma>` or :doc:`filename-based + guesses </plugins/fromfilename>` for that music. +If none of these situations apply and you're still having trouble tagging +something, please :ref:`file a bug report <bugs>`. .. _plugins: …appear to be missing some plugins? ------------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Please make sure you're using the latest version of beets---you might -be using a version earlier than the one that introduced the plugin. In -many cases, the plugin may be introduced in beets "trunk" (the latest -source version) and might not be released yet. Take a look at :doc:`the -changelog </changelog>` -to see which version added the plugin. (You can type ``beet version`` to -check which version of beets you have installed.) +Please make sure you're using the latest version of beets---you might be using a +version earlier than the one that introduced the plugin. In many cases, the +plugin may be introduced in beets "trunk" (the latest source version) and might +not be released yet. Take a look at :doc:`the changelog </changelog>` to see +which version added the plugin. (You can type ``beet version`` to check which +version of beets you have installed.) -If you want to live on the bleeding edge and use the latest source -version of beets, you can check out the source (see :ref:`the relevant -question <src>`). - -To see the beets documentation for your version (and avoid confusion -with new features in trunk), select your version from the menu in the sidebar. +If you want to live on the bleeding edge and use the latest source version of +beets, you can check out the source (see :ref:`the relevant question <src>`). +To see the beets documentation for your version (and avoid confusion with new +features in trunk), select your version from the menu in the sidebar. .. _kill: …ignore control-C during an import? ------------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Typing a ^C (control-C) control sequence will not halt beets' -multithreaded importer while it is waiting at a prompt for user input. -Instead, hit "return" (dismissing the prompt) after typing ^C. -Alternatively, just type a "b" for "aBort" at most prompts. Typing ^C -*will* work if the importer interface is between prompts. +Typing a ^C (control-C) control sequence will not halt beets' multithreaded +importer while it is waiting at a prompt for user input. Instead, hit "return" +(dismissing the prompt) after typing ^C. Alternatively, just type a "b" for +"aBort" at most prompts. Typing ^C *will* work if the importer interface is +between prompts. -Also note that beets may take some time to quit after ^C is typed; it -tries to clean up after itself briefly even when canceled. - -(For developers: this is because the UI thread is blocking on -``input`` and cannot be interrupted by the main thread, which is -trying to close all pipeline stages in the exception handler by setting -a flag. There is no simple way to remedy this.) +Also note that beets may take some time to quit after ^C is typed; it tries to +clean up after itself briefly even when canceled. +(For developers: this is because the UI thread is blocking on ``input`` and +cannot be interrupted by the main thread, which is trying to close all pipeline +stages in the exception handler by setting a flag. There is no simple way to +remedy this.) .. _id3v24: …not change my ID3 tags? ------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~ -Beets writes `ID3v2.4`_ tags by default. -Some software, including Windows (i.e., Windows Explorer and Windows -Media Player) and `id3lib/id3v2 <http://id3v2.sourceforge.net/>`__, -don't support v2.4 tags. When using 2.4-unaware software, it might look -like the tags are unmodified or missing completely. +Beets writes ID3v2.4_ tags by default. Some software, including Windows (i.e., +Windows Explorer and Windows Media Player) and `id3lib/id3v2 +<http://id3v2.sourceforge.net/>`__, don't support v2.4 tags. When using +2.4-unaware software, it might look like the tags are unmodified or missing +completely. To enable ID3v2.3 tags, enable the :ref:`id3v23` config option. +.. _id3v2.4: https://id3.org/id3v2.4.0-structure .. _invalid: -.. _ID3v2.4: https://id3.org/id3v2.4.0-structure …complain that a file is "unreadable"? --------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Beets will log a message like "unreadable file: /path/to/music.mp3" when -it encounters files that *look* like music files (according to their -extension) but seem to be broken. Most of the time, this is because the -file is corrupted. To check whether the file is intact, try opening it -in another media player (e.g., -`VLC <https://www.videolan.org/vlc/index.html>`__) to see whether it can -read the file. You can also use specialized programs for checking file -integrity---for example, type ``metaflac --list music.flac`` to check -FLAC files. +Beets will log a message like "unreadable file: /path/to/music.mp3" when it +encounters files that *look* like music files (according to their extension) but +seem to be broken. Most of the time, this is because the file is corrupted. To +check whether the file is intact, try opening it in another media player (e.g., +`VLC <https://www.videolan.org/vlc/index.html>`__) to see whether it can read +the file. You can also use specialized programs for checking file +integrity---for example, type ``metaflac --list music.flac`` to check FLAC +files. If beets still complains about a file that seems to be valid, `open a new -ticket`_ and we'll look into it. There's always a possibility that there's -a bug "upstream" in the `Mutagen <https://github.com/quodlibet/mutagen>`__ -library used by beets, in which case we'll forward the bug to that project's -tracker. - +ticket`_ and we'll look into it. There's always a possibility that there's a bug +"upstream" in the `Mutagen <https://github.com/quodlibet/mutagen>`__ library +used by beets, in which case we'll forward the bug to that project's tracker. .. _importhang: …seem to "hang" after an import finishes? ------------------------------------------ - -Probably not. Beets uses a *multithreaded importer* that overlaps many -different activities: it can prompt you for decisions while, in the -background, it talks to MusicBrainz and copies files. This means that, -even after you make your last decision, there may be a backlog of files -to be copied into place and tags to be written. (Plugin tasks, like -looking up lyrics and genres, also run at this time.) If beets pauses -after you see all the albums go by, have patience. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Probably not. Beets uses a *multithreaded importer* that overlaps many different +activities: it can prompt you for decisions while, in the background, it talks +to MusicBrainz and copies files. This means that, even after you make your last +decision, there may be a backlog of files to be copied into place and tags to be +written. (Plugin tasks, like looking up lyrics and genres, also run at this +time.) If beets pauses after you see all the albums go by, have patience. .. _replaceq: …put a bunch of underscores in my filenames? --------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When naming files, beets replaces certain characters to avoid causing -problems on the filesystem. For example, leading dots can confusingly -hide files on Unix and several non-alphanumeric characters are forbidden -on Windows. +When naming files, beets replaces certain characters to avoid causing problems +on the filesystem. For example, leading dots can confusingly hide files on Unix +and several non-alphanumeric characters are forbidden on Windows. -The :ref:`replace` config option -controls which replacements are made. By default, beets makes filenames -safe for all known platforms by replacing several patterns with -underscores. This means that, even on Unix, filenames are made -Windows-safe so that network filesystems (such as SMB) can be used -safely. - -Most notably, Windows forbids trailing dots, so a folder called "M.I.A." -will be rewritten to "M.I.A\_" by default. Change the ``replace`` config -if you don't want this behavior and don't need Windows-safe names. +The :ref:`replace` config option controls which replacements are made. By +default, beets makes filenames safe for all known platforms by replacing several +patterns with underscores. This means that, even on Unix, filenames are made +Windows-safe so that network filesystems (such as SMB) can be used safely. +Most notably, Windows forbids trailing dots, so a folder called "M.I.A." will be +rewritten to "M.I.A\_" by default. Change the ``replace`` config if you don't +want this behavior and don't need Windows-safe names. .. _pathq: …say "command not found"? -------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~ You need to put the ``beet`` program on your system's search path. If you installed using pip, the command ``pip show -f beets`` can show you where ``beet`` was placed on your system. If you need help extending your ``$PATH``, try `this Super User answer`_. -.. _this Super User answer: https://superuser.com/a/284361/4569 +.. _open a new ticket: https://github.com/beetbox/beets/issues/new?template=bug-report.md + .. _pip: https://pip.pypa.io/en/stable/ -.. _open a new ticket: - https://github.com/beetbox/beets/issues/new?template=bug-report.md + +.. _this super user answer: https://superuser.com/a/284361/4569 diff --git a/docs/guides/advanced.rst b/docs/guides/advanced.rst index 3802c91b1..b3a5aff20 100644 --- a/docs/guides/advanced.rst +++ b/docs/guides/advanced.rst @@ -1,40 +1,40 @@ Advanced Awesomeness ==================== -So you have beets up and running and you've started :doc:`importing your -music </guides/tagger>`. There's a lot more that beets can do now that it has +So you have beets up and running and you've started :doc:`importing your music +</guides/tagger>`. There's a lot more that beets can do now that it has cataloged your collection. Here's a few features to get you started. Most of these tips involve :doc:`plugins </plugins/index>` and fiddling with beets' :doc:`configuration </reference/config>`. So use your favorite text editor to create a config file before you continue. - Fetch album art, genres, and lyrics ----------------------------------- -Beets can help you fill in more than just the basic taxonomy metadata that -comes from MusicBrainz. Plugins can provide :doc:`album art -</plugins/fetchart>`, :doc:`lyrics </plugins/lyrics>`, and -:doc:`genres </plugins/lastgenre>` from databases around the Web. +Beets can help you fill in more than just the basic taxonomy metadata that comes +from MusicBrainz. Plugins can provide :doc:`album art </plugins/fetchart>`, +:doc:`lyrics </plugins/lyrics>`, and :doc:`genres </plugins/lastgenre>` from +databases around the Web. If you want beets to get any of this data automatically during the import process, just enable any of the three relevant plugins (see :doc:`/plugins/index`). For example, put this line in your :doc:`config file -</reference/config>` to enable all three:: +</reference/config>` to enable all three: + +:: plugins: fetchart lyrics lastgenre -Each plugin also has a command you can run to fetch data manually. For -example, if you want to get lyrics for all the Beatles tracks in your -collection, just type ``beet lyrics beatles`` after enabling the plugin. +Each plugin also has a command you can run to fetch data manually. For example, +if you want to get lyrics for all the Beatles tracks in your collection, just +type ``beet lyrics beatles`` after enabling the plugin. Read more about using each of these plugins: -* :doc:`/plugins/fetchart` (and its accompanying :doc:`/plugins/embedart`) -* :doc:`/plugins/lyrics` -* :doc:`/plugins/lastgenre` - +- :doc:`/plugins/fetchart` (and its accompanying :doc:`/plugins/embedart`) +- :doc:`/plugins/lyrics` +- :doc:`/plugins/lastgenre` Customize your file and folder names ------------------------------------ @@ -42,22 +42,21 @@ Customize your file and folder names Beets uses an extremely flexible template system to name the folders and files that organize your music in your filesystem. Take a look at :ref:`path-format-config` for the basics: use fields like ``$year`` and -``$title`` to build up a naming scheme. But if you need more flexibility, -there are two features you need to know about: +``$title`` to build up a naming scheme. But if you need more flexibility, there +are two features you need to know about: -* :ref:`Template functions <template-functions>` are simple expressions you - can use in your path formats to add logic to your names. For example, you - can get an artist's first initial using ``%upper{%left{$albumartist,1}}``. -* If you need more flexibility, the :doc:`/plugins/inline` lets you write - snippets of Python code that generate parts of your filenames. The - equivalent code for getting an artist initial with the *inline* plugin looks - like ``initial: albumartist[0].upper()``. +- :ref:`Template functions <template-functions>` are simple expressions you can + use in your path formats to add logic to your names. For example, you can get + an artist's first initial using ``%upper{%left{$albumartist,1}}``. +- If you need more flexibility, the :doc:`/plugins/inline` lets you write + snippets of Python code that generate parts of your filenames. The equivalent + code for getting an artist initial with the *inline* plugin looks like + ``initial: albumartist[0].upper()``. If you already have music in your library and want to update their names according to a new scheme, just run the :ref:`move-cmd` command to rename everything. - Stream your music to another computer ------------------------------------- @@ -69,8 +68,8 @@ your own personal Spotify. First, enable the ``web`` plugin (see :doc:`/plugins/index`). Run the server by typing ``beet web`` and head to http://localhost:8337 in a browser. You can -browse your collection with queries and, if your browser supports it, play -music using HTML5 audio. +browse your collection with queries and, if your browser supports it, play music +using HTML5 audio. Transcode music files for media players --------------------------------------- @@ -78,9 +77,11 @@ Transcode music files for media players Do you ever find yourself transcoding high-quality rips to a lower-bitrate, lossy format for your phone or music player? Beets can help with that. -You'll first need to install `ffmpeg`_. Then, enable beets' -:doc:`/plugins/convert`. Set a destination directory in your -:doc:`config file </reference/config>` like so:: +You'll first need to install ffmpeg_. Then, enable beets' +:doc:`/plugins/convert`. Set a destination directory in your :doc:`config file +</reference/config>` like so: + +:: convert: dest: ~/converted_music @@ -95,41 +96,44 @@ you like them. Check out :doc:`its documentation </plugins/convert>`. .. _ffmpeg: https://www.ffmpeg.org - Store any data you like ----------------------- The beets database keeps track of a long list of :ref:`built-in fields -<itemfields>`, but you're not limited to just that list. Say, for example, -that you like to categorize your music by the setting where it should be -played. You can invent a new ``context`` attribute to store this. Set the field -using the :ref:`modify-cmd` command:: +<itemfields>`, but you're not limited to just that list. Say, for example, that +you like to categorize your music by the setting where it should be played. You +can invent a new ``context`` attribute to store this. Set the field using the +:ref:`modify-cmd` command: + +:: beet modify context=party artist:'beastie boys' By default, beets will show you the changes that are about to be applied and ask if you really want to apply them to all, some or none of the items or albums. -You can type y for "yes", n for "no", or s for "select". If you choose the latter, -the command will prompt you for each individual matching item or album. +You can type y for "yes", n for "no", or s for "select". If you choose the +latter, the command will prompt you for each individual matching item or album. -Then :doc:`query </reference/query>` your music just as you would with any -other field:: +Then :doc:`query </reference/query>` your music just as you would with any other +field: + +:: beet ls context:mope -You can even use these fields in your filenames (see -:ref:`path-format-config`). +You can even use these fields in your filenames (see :ref:`path-format-config`). -And, unlike :ref:`built-in fields <itemfields>`, such fields can be removed:: +And, unlike :ref:`built-in fields <itemfields>`, such fields can be removed: + +:: beet modify context! artist:'beastie boys' -Read more than you ever wanted to know about the *flexible attributes* -feature `on the beets blog`_. +Read more than you ever wanted to know about the *flexible attributes* feature +`on the beets blog`_. .. _on the beets blog: https://beets.io/blog/flexattr.html - Choose a path style manually for some music ------------------------------------------- @@ -139,19 +143,22 @@ like, but keep around to play for friends and family. This is, of course, impossible to determine automatically using metadata from MusicBrainz. Instead, use a flexible attribute (see above) to store a flag on the music you -want to categorize, like so:: +want to categorize, like so: + +:: beet modify bad=1 christmas Then, you can query on this field in your path formats to sort this music -differently. Put something like this in your configuration file:: +differently. Put something like this in your configuration file: + +:: paths: bad:1: Bad/$artist/$title -Used together, flexible attributes and path format conditions let you sort -your music by any criteria you can imagine. - +Used together, flexible attributes and path format conditions let you sort your +music by any criteria you can imagine. Automatically add new music to your library ------------------------------------------- @@ -167,19 +174,16 @@ or the like. To use it this way, you might want to use these options in your quiet: yes log: /path/to/log.txt -The :ref:`incremental` option will skip importing any directories that have -been imported in the past. -:ref:`quiet` avoids asking you any questions (since this will be run -automatically, no input is possible). -You might also want to use the :ref:`quiet_fallback` options to configure -what should happen when no near-perfect match is found -- this option depends -on your level of paranoia. +The :ref:`incremental` option will skip importing any directories that have been +imported in the past. :ref:`quiet` avoids asking you any questions (since this +will be run automatically, no input is possible). You might also want to use the +:ref:`quiet_fallback` options to configure what should happen when no +near-perfect match is found -- this option depends on your level of paranoia. Finally, :ref:`import_log` will make beets record its decisions so you can come back later and see what you need to handle manually. -The last step is to set up cron or some other automation system to run -``beet import /path/to/incoming/music``. - +The last step is to set up cron or some other automation system to run ``beet +import /path/to/incoming/music``. Useful reports -------------- @@ -187,19 +191,25 @@ Useful reports Since beets has a quite powerful query tool, this list contains some useful and powerful queries to run on your library. -* See a list of all albums which have files which are 128 bit rate:: +- See a list of all albums which have files which are 128 bit rate: + + :: beet list bitrate:128000 -* See a list of all albums with the tracks listed in order of bit rate:: +- See a list of all albums with the tracks listed in order of bit rate: + + :: beet ls -f '$bitrate $artist - $title' bitrate+ -* See a list of albums and their formats:: +- See a list of albums and their formats: + + :: beet ls -f '$albumartist $album $format' | sort | uniq Note that ``beet ls --album -f '... $format'`` doesn't do what you want, - because ``format`` is an item-level field, not an album-level one. - If an album's tracks exist in multiple formats, the album will appear in the - list once for each format. + because ``format`` is an item-level field, not an album-level one. If an + album's tracks exist in multiple formats, the album will appear in the list + once for each format. diff --git a/docs/guides/index.rst b/docs/guides/index.rst index ff538eec6..0695e9ff8 100644 --- a/docs/guides/index.rst +++ b/docs/guides/index.rst @@ -6,8 +6,9 @@ with beets. If you're new to beets, you'll want to begin with the :doc:`main` guide. .. toctree:: - :maxdepth: 1 + :maxdepth: 1 - main - tagger - advanced + main + installation + tagger + advanced diff --git a/docs/guides/installation.rst b/docs/guides/installation.rst new file mode 100644 index 000000000..bd634c4c5 --- /dev/null +++ b/docs/guides/installation.rst @@ -0,0 +1,179 @@ +Installation +============ + +Beets requires `Python 3.10 or later`_. You can install it using package +managers, pipx_, pip_ or by using package managers. + +.. _python 3.10 or later: https://python.org/download/ + +Using ``pipx`` or ``pip`` +------------------------- + +We recommend installing with pipx_ as it isolates beets and its dependencies +from your system Python and other Python packages. This helps avoid dependency +conflicts and keeps your system clean. + +.. <!-- start-quick-install --> + +.. tab-set:: + + .. tab-item:: pipx + + .. code-block:: console + + pipx install beets + + .. tab-item:: pip + + .. code-block:: console + + pip install beets + + .. tab-item:: pip (user install) + + .. code-block:: console + + pip install --user beets + +.. <!-- end-quick-install --> + +If you don't have pipx_ installed, you can follow the instructions on the `pipx +installation page`_ to get it set up. + +.. _pip: https://pip.pypa.io/en/ + +.. _pipx: https://pipx.pypa.io/stable + +.. _pipx installation page: https://pipx.pypa.io/stable/installation/ + +Using a Package Manager +----------------------- + +Depending on your operating system, you may be able to install beets using a +package manager. Here are some common options: + +.. attention:: + + Package manager installations may not provide the latest version of beets. + + Release cycles for package managers vary, and they may not always have the + most recent version of beets. If you want the latest features and fixes, + consider using pipx_ or pip_ as described above. + + Additionally, installing external beets plugins may be surprisingly + difficult when using a package manager. + +- On **Debian or Ubuntu**, depending on the version, beets is available as an + official package (`Debian details`_, `Ubuntu details`_), so try typing: + ``apt-get install beets``. But the version in the repositories might lag + behind, so make sure you read the right version of these docs. If you want the + latest version, you can get everything you need to install with pip as + described below by running: ``apt-get install python-dev python-pip`` +- On **Arch Linux**, `beets is in [extra] <arch extra_>`_, so just run ``pacman + -S beets``. (There's also a bleeding-edge `dev package <aur_>`_ in the AUR, + which will probably set your computer on fire.) +- On **Alpine Linux**, `beets is in the community repository <alpine package_>`_ + and can be installed with ``apk add beets``. +- On **Void Linux**, `beets is in the official repository <void package_>`_ and + can be installed with ``xbps-install -S beets``. +- For **Gentoo Linux**, beets is in Portage as ``media-sound/beets``. Just run + ``emerge beets`` to install. There are several USE flags available for + optional plugin dependencies. +- On **FreeBSD**, there's a `beets port <freebsd_>`_ at ``audio/beets``. +- On **OpenBSD**, there's a `beets port <openbsd_>`_ can be installed with + ``pkg_add beets``. +- On **Fedora** 22 or later, there's a `DNF package`_ you can install with + ``sudo dnf install beets beets-plugins beets-doc``. +- On **Solus**, run ``eopkg install beets``. +- On **NixOS**, there's a `package <nixos_>`_ you can install with ``nix-env -i + beets``. +- Using **MacPorts**, run ``port install beets`` or ``port install beets-full`` + to include many third-party plugins. + +.. _alpine package: https://pkgs.alpinelinux.org/package/edge/community/x86_64/beets + +.. _arch extra: https://archlinux.org/packages/extra/any/beets/ + +.. _aur: https://aur.archlinux.org/packages/beets-git/ + +.. _debian details: https://tracker.debian.org/pkg/beets + +.. _dnf package: https://packages.fedoraproject.org/pkgs/beets/ + +.. _freebsd: http://portsmon.freebsd.org/portoverview.py?category=audio&portname=beets + +.. _nixos: https://github.com/NixOS/nixpkgs/tree/master/pkgs/tools/audio/beets + +.. _openbsd: http://openports.se/audio/beets + +.. _ubuntu details: https://launchpad.net/ubuntu/+source/beets + +.. _void package: https://github.com/void-linux/void-packages/tree/master/srcpkgs/beets + +Installation FAQ +---------------- + +MacOS Installation +~~~~~~~~~~~~~~~~~~ + +**Q: I'm getting permission errors on macOS. What should I do?** + +Due to System Integrity Protection on macOS 10.11+, you may need to install for +your user only: + +.. code-block:: console + + pip install --user beets + +You might need to also add ``~/Library/Python/3.x/bin`` to your ``$PATH``. + +Windows Installation +~~~~~~~~~~~~~~~~~~~~ + +**Q: What's the process for installing on Windows?** + +Installing beets on Windows can be tricky. Following these steps might help you +get it right: + +1. `Install Python`_ (check "Add Python to PATH" skip to 3) +2. Ensure Python is in your ``PATH`` (add if needed): + + - Settings → System → About → Advanced system settings → Environment + Variables + - Edit "PATH" and add: `;C:\Python39;C:\Python39\Scripts` + - *Guide: [Adding Python to + PATH](https://realpython.com/add-python-to-path/)* + +3. Now install beets by running: ``pip install beets`` +4. You're all set! Type ``beet version`` in a new command prompt to verify the + installation. + +**Bonus: Windows Context Menu Integration** + +Windows users may also want to install a context menu item for importing files +into beets. Download the beets.reg_ file and open it in a text file to make sure +the paths to Python match your system. Then double-click the file add the +necessary keys to your registry. You can then right-click a directory and choose +"Import with beets". + +.. _beets.reg: https://github.com/beetbox/beets/blob/master/extra/beets.reg + +.. _install pip: https://pip.pypa.io/en/stable/installing/ + +.. _install python: https://python.org/download/ + +ARM Installation +~~~~~~~~~~~~~~~~ + +**Q: Can I run beets on a Raspberry Pi or other ARM device?** + +Yes, but with some considerations: Beets on ARM devices is not recommended for +Linux novices. If you are comfortable with troubleshooting tools like ``pip``, +``make``, and binary dependencies (e.g. ``ffmpeg`` and ``ImageMagick``), you +will be fine. We have `notes for ARM`_ and an `older ARM reference`_. Beets is +generally developed on x86-64 based devices, and most plugins target that +platform as well. + +.. _notes for arm: https://github.com/beetbox/beets/discussions/4910 + +.. _older arm reference: https://discourse.beets.io/t/diary-of-beets-on-arm-odroid-hc4-armbian/1993 diff --git a/docs/guides/main.rst b/docs/guides/main.rst index 255484dde..48b248927 100644 --- a/docs/guides/main.rst +++ b/docs/guides/main.rst @@ -1,300 +1,310 @@ Getting Started =============== -Welcome to `beets`_! This guide will help you begin using it to make your music -collection better. +Welcome to beets_! This guide will help get started with improving and +organizing your music collection. .. _beets: https://beets.io/ -Installing ----------- +Quick Installation +------------------ -You will need Python. -Beets works on Python 3.8 or later. +Beets is distributed via PyPI_ and can be installed by most users with a single +command: -* **macOS** 11 (Big Sur) includes Python 3.8 out of the box. - You can opt for a more recent Python installing it via `Homebrew`_ - (``brew install python3``). - There's also a `MacPorts`_ port. Run ``port install beets`` or - ``port install beets-full`` to include many third-party plugins. +.. include:: installation.rst + :start-after: <!-- start-quick-install --> + :end-before: <!-- end-quick-install --> -* On **Debian or Ubuntu**, depending on the version, beets is available as an - official package (`Debian details`_, `Ubuntu details`_), so try typing: - ``apt-get install beets``. But the version in the repositories might lag - behind, so make sure you read the right version of these docs. If you want - the latest version, you can get everything you need to install with pip - as described below by running: - ``apt-get install python-dev python-pip`` +.. admonition:: Need more installation options? -* On **Arch Linux**, `beets is in [extra] <Arch extra_>`_, so just run ``pacman -S - beets``. (There's also a bleeding-edge `dev package <AUR_>`_ in the AUR, which will - probably set your computer on fire.) + Having trouble with the commands above? Looking for package manager + instructions? See the :doc:`complete installation guide + </guides/installation>` for: -* On **Alpine Linux**, `beets is in the community repository <Alpine package_>`_ - and can be installed with ``apk add beets``. + - Operating system specific instructions + - Package manager options + - Troubleshooting help -* For **Gentoo Linux**, beets is in Portage as ``media-sound/beets``. Just run - ``emerge beets`` to install. There are several USE flags available for - optional plugin dependencies. +.. _pypi: https://pypi.org/project/beets/ -* On **FreeBSD**, there's a `beets port <FreeBSD_>`_ at ``audio/beets``. +Basic Configuration +------------------- -* On **OpenBSD**, there's a `beets port <OpenBSD_>`_ can be installed with ``pkg_add beets``. +Before using beets, you'll need a configuration file. This YAML file tells beets +where to store your music and how to organize it. -* For **Slackware**, there's a `SlackBuild`_ available. +While beets is highly configurable, you only need a few basic settings to get +started. -* On **Fedora** 22 or later, there's a `DNF package`_ you can install with ``sudo dnf install beets beets-plugins beets-doc``. +1. **Open the config file:** + .. code-block:: console -* On **Solus**, run ``eopkg install beets``. + beet config -e -* On **NixOS**, there's a `package <NixOS_>`_ you can install with ``nix-env -i beets``. + This creates the file (if needed) and opens it in your default editor. + You can also find its location with ``beet config -p``. +2. **Add required settings:** + In the config file, set the ``directory`` option to the path where you + want beets to store your music files. Set the ``library`` option to the + path where you want beets to store its database file. -.. _DNF package: https://packages.fedoraproject.org/pkgs/beets/ -.. _SlackBuild: https://slackbuilds.org/repository/14.2/multimedia/beets/ -.. _FreeBSD: http://portsmon.freebsd.org/portoverview.py?category=audio&portname=beets -.. _AUR: https://aur.archlinux.org/packages/beets-git/ -.. _Debian details: https://tracker.debian.org/pkg/beets -.. _Ubuntu details: https://launchpad.net/ubuntu/+source/beets -.. _OpenBSD: http://openports.se/audio/beets -.. _Arch extra: https://archlinux.org/packages/extra/any/beets/ -.. _Alpine package: https://pkgs.alpinelinux.org/package/edge/community/x86_64/beets -.. _NixOS: https://github.com/NixOS/nixpkgs/tree/master/pkgs/tools/audio/beets -.. _MacPorts: https://www.macports.org + .. code-block:: yaml -If you have `pip`_, just say ``pip install beets`` (or ``pip install --user -beets`` if you run into permissions problems). + directory: ~/music + library: ~/data/musiclibrary.db +3. **Choose your import style** (pick one): + Beets offers flexible import strategies to match your workflow. Choose + one of the following approaches and put one of the following in your + config file: -To install without pip, download beets from `its PyPI page`_ and run ``python -setup.py install`` in the directory therein. + .. tab-set:: -.. _its PyPI page: https://pypi.org/project/beets/#files -.. _pip: https://pip.pypa.io + .. tab-item:: Copy Files (Default) -The best way to upgrade beets to a new version is by running ``pip install -U -beets``. You may want to follow `@b33ts`_ on Twitter to hear about progress on -new versions. + This is the default configuration and assumes you want to start a new organized music folder (inside ``directory`` above). During import we will *copy* cleaned-up music into that empty folder. -.. _@b33ts: https://twitter.com/b33ts + .. code-block:: yaml -Installing by Hand on macOS 10.11 and Higher -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + import: + copy: yes # Copy files to new location -Starting with version 10.11 (El Capitan), macOS has a new security feature -called `System Integrity Protection`_ (SIP) that prevents you from modifying -some parts of the system. This means that some ``pip`` commands may fail with a -permissions error. (You probably *won't* run into this if you've installed -Python yourself with `Homebrew`_ or otherwise. You can also try `MacPorts`_.) -If this happens, you can install beets for the current user only by typing -``pip install --user beets``. If you do that, you might want to add -``~/Library/Python/3.6/bin`` to your ``$PATH``. + .. tab-item:: Move Files -.. _System Integrity Protection: https://support.apple.com/en-us/HT204899 -.. _Homebrew: https://brew.sh + Start with a new empty directory, but *move* new music in instead of copying it (saving disk space). -Installing on Windows -^^^^^^^^^^^^^^^^^^^^^ + .. code-block:: yaml -Installing beets on Windows can be tricky. Following these steps might help you -get it right: + import: + move: yes # Move files to new location -1. If you don't have it, `install Python`_ (you want at least Python 3.8). The - installer should give you the option to "add Python to PATH." Check this - box. If you do that, you can skip the next step. + .. tab-item:: Use Existing Structure -2. If you haven't done so already, set your ``PATH`` environment variable to - include Python and its scripts. To do so, open the "Settings" application, - then access the "System" screen, then access the "About" tab, and then hit - "Advanced system settings" located on the right side of the screen. This - should open the "System Properties" screen, then select the "Advanced" tab, - then hit the "Environmental Variables..." button, and then look for the PATH - variable in the table. Add the following to the end of the variable's value: - ``;C:\Python38;C:\Python38\Scripts``. You may need to adjust these paths to - point to your Python installation. + Keep your current directory structure; importing should never move or copy files but instead just correct the tags on music. Make sure to point ``directory`` at the place where your music is currently stored. -3. Now install beets by running: ``pip install beets`` + .. code-block:: yaml -4. You're all set! Type ``beet`` at the command prompt to make sure everything's - in order. + import: + copy: no # Use files in place -Windows users may also want to install a context menu item for importing files -into beets. Download the `beets.reg`_ file and open it in a text file to make -sure the paths to Python match your system. Then double-click the file add the -necessary keys to your registry. You can then right-click a directory and -choose "Import with beets". + .. tab-item:: Read-Only Mode -Because I don't use Windows myself, I may have missed something. If you have -trouble or you have more detail to contribute here, please direct it to -`the mailing list`_. + Keep everything exactly as-is; only track metadata in database. (Corrected tags will still be stored in beets' database, and you can use them to do renaming or tag changes later.) -.. _install Python: https://python.org/download/ -.. _beets.reg: https://github.com/beetbox/beets/blob/master/extra/beets.reg -.. _install pip: https://pip.pypa.io/en/stable/installing/ -.. _get-pip.py: https://bootstrap.pypa.io/get-pip.py + .. code-block:: yaml -Installing on ARM (Raspberry Pi and similar) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + import: + copy: no # Use files in place + write: no # Don't modify tags +4. **Add customization via plugins (optional):** + Beets comes with many plugins that extend its functionality. You can + enable plugins by adding a `plugins` section to your config file. -Beets on ARM devices is not recommended for Linux novices. If you are -comfortable with light troubleshooting in tools like ``pip``, ``make``, -and beets' command-line binary dependencies (e.g. ``ffmpeg`` and -``ImageMagick``), you will probably be okay on ARM devices like the -Raspberry Pi. We have `notes for ARM`_ and an `older ARM reference`_. -Beets is generally developed on x86-64 based devices, and most plugins -target that platform as well. + We recommend adding at least one :ref:`Autotagger Plugin + <autotagger_extensions>` to help with fetching metadata during import. + For getting started, :doc:`MusicBrainz </plugins/musicbrainz>` is a good + choice. -.. _notes for ARM: https://github.com/beetbox/beets/discussions/4910 -.. _older ARM reference: https://discourse.beets.io/t/diary-of-beets-on-arm-odroid-hc4-armbian/1993 + .. code-block:: yaml -Configuring ------------ + plugins: + - musicbrainz # Example plugin for fetching metadata + - ... other plugins you want ... -You'll want to set a few basic options before you start using beets. The -:doc:`configuration </reference/config>` is stored in a text file. You -can show its location by running ``beet config -p``, though it may not -exist yet. Run ``beet config -e`` to edit the configuration in your -favorite text editor. The file will start out empty, but here's good -place to start:: + You can find a list of available plugins in the :doc:`plugins index + </plugins/index>`. - directory: ~/music - library: ~/data/musiclibrary.db +.. _yaml: https://yaml.org/ -Change that first path to a directory where you'd like to keep your music. Then, -for ``library``, choose a good place to keep a database file that keeps an index -of your music. (The config's format is `YAML`_. You'll want to configure your -text editor to use spaces, not real tabs, for indentation. Also, ``~`` means -your home directory in these paths, even on Windows.) +To validate that you've set up your configuration and it is valid YAML, you can +type ``beet version`` to see a list of enabled plugins or ``beet config`` to get +a complete listing of your current configuration. -The default configuration assumes you want to start a new organized music folder -(that ``directory`` above) and that you'll *copy* cleaned-up music into that -empty folder using beets' ``import`` command (see below). But you can configure -beets to behave many other ways: +.. dropdown:: Minimal configuration -* Start with a new empty directory, but *move* new music in instead of copying - it (saving disk space). Put this in your config file:: + Here's a sample configuration file that includes the settings mentioned above: + + .. code-block:: yaml + + directory: ~/music + library: ~/data/musiclibrary.db import: - move: yes + move: yes # Move files to new location + # copy: no # Use files in place + # write: no # Don't modify tags -* Keep your current directory structure; importing should never move or copy - files but instead just correct the tags on music. Put the line ``copy: no`` - under the ``import:`` heading in your config file to disable any copying or - renaming. Make sure to point ``directory`` at the place where your music is - currently stored. + plugins: + - musicbrainz # Example plugin for fetching metadata + # - ... other plugins you want ... -* Keep your current directory structure and *do not* correct files' tags: leave - files completely unmodified on your disk. (Corrected tags will still be stored - in beets' database, and you can use them to do renaming or tag changes later.) - Put this in your config file:: + You can copy and paste this into your config file and modify it as needed. - import: - copy: no - write: no +.. admonition:: Ready for more? - to disable renaming and tag-writing. + For a complete reference of all configuration options, see the + :doc:`configuration reference </reference/config>`. -There are approximately six million other configuration options you can set -here, including the directory and file naming scheme. See -:doc:`/reference/config` for a full reference. +Importing Your Music +-------------------- -.. _YAML: https://yaml.org/ +Now you're ready to import your music into beets! -To check that you've set up your configuration how you want it, you can type -``beet version`` to see a list of enabled plugins or ``beet config`` to get a -complete listing of your current configuration. +.. important:: + + Importing can modify and move your music files. **Make sure you have a + recent backup** before proceeding. + +Choose Your Import Method +~~~~~~~~~~~~~~~~~~~~~~~~~ + +There are two good ways to bring your *existing* library into beets database. + +.. tab-set:: + + .. tab-item:: Autotag (Recommended) + + This method uses beets' autotagger to find canonical metadata for every album you import. It may take a while, especially for large libraries, and it's an interactive process. But it ensures all your songs' tags are exactly right from the get-go. + + .. code-block:: console + + beet import /a/chunk/of/my/library + + .. warning:: + + The point about speed bears repeating: using the autotagger on a large library can take a + very long time, and it's an interactive process. So set aside a good chunk of + time if you're going to go that route. + + We also recommend importing smaller batches of music at a time (e.g., a few albums) to make the process more manageable. For more on the interactive tagging + process, see :doc:`tagger`. -Importing Your Library ----------------------- + .. tab-item:: Quick Import -The next step is to import your music files into the beets library database. -Because this can involve modifying files and moving them around, data loss is -always a possibility, so now would be a good time to make sure you have a -recent backup of all your music. We'll wait. + This method quickly brings all your files with all their current metadata into beets' database without any changes. It's really fast, but it doesn't clean up or correct any tags. -There are two good ways to bring your existing library into beets. You can -either: (a) quickly bring all your files with all their current metadata into -beets' database, or (b) use beets' highly-refined autotagger to find canonical -metadata for every album you import. Option (a) is really fast, but option (b) -makes sure all your songs' tags are exactly right from the get-go. The point -about speed bears repeating: using the autotagger on a large library can take a -very long time, and it's an interactive process. So set aside a good chunk of -time if you're going to go that route. For more on the interactive -tagging process, see :doc:`tagger`. + To use this method, run: -If you've got time and want to tag all your music right once and for all, do -this:: + .. code-block:: console - $ beet import /path/to/my/music + beet import --noautotag /my/huge/mp3/library -(Note that by default, this command will *copy music into the directory you -specified above*. If you want to use your current directory structure, set the -``import.copy`` config option.) To take the fast, -un-autotagged path, just say:: + The ``--noautotag`` / ``-A`` flag skips autotagging and uses your files' current metadata. - $ beet import -A /my/huge/mp3/library +.. admonition:: More Import Options -Note that you just need to add ``-A`` for "don't autotag". + The ``beet import`` command has many options to customize its behavior. For + a full list, type ``beet help import`` or see the :ref:`import command + reference <import-cmd>`. -Adding More Music ------------------ +Adding More Music Later +~~~~~~~~~~~~~~~~~~~~~~~ -If you've ripped or... otherwise obtained some new music, you can add it with -the ``beet import`` command, the same way you imported your library. Like so:: +When you acquire new music, use the same ``beet import`` command to add it to +your library: - $ beet import ~/some_great_album +.. code-block:: console -This will attempt to autotag the new album (interactively) and add it to your -library. There are, of course, more options for this command---just type ``beet -help import`` to see what's available. + beet import ~/new_totally_not_ripped_album + +This will apply the same autotagging process to your new additions. For +alternative import behaviors, consult the options mentioned above. Seeing Your Music ----------------- -If you want to query your music library, the ``beet list`` (shortened to ``beet -ls``) command is for you. You give it a :doc:`query string </reference/query>`, -which is formatted something like a Google search, and it gives you a list of -songs. Thus:: +Once you've imported music into beets, you'll want to explore and query your +library. Beets provides several commands for searching, browsing, and getting +statistics about your collection. + +Basic Searching +~~~~~~~~~~~~~~~ + +The ``beet list`` command (shortened to ``beet ls``) lets you search your music +library using :doc:`query string </reference/query>` similar to web searches: + +.. code-block:: console $ beet ls the magnetic fields The Magnetic Fields - Distortion - Three-Way - The Magnetic Fields - Distortion - California Girls + The Magnetic Fields - Dist The Magnetic Fields - Distortion - Old Fools + +.. code-block:: console + $ beet ls hissing gronlandic of Montreal - Hissing Fauna, Are You the Destroyer? - Gronlandic Edit + +.. code-block:: console + $ beet ls bird The Knife - The Knife - Bird The Mae Shi - Terrorbird - Revelation Six + +By default, search terms match against :ref:`common attributes <keywordquery>` +of songs, and multiple terms are combined with AND logic (a track must match +*all* criteria). + +Searching Specific Fields +~~~~~~~~~~~~~~~~~~~~~~~~~ + +To narrow a search term to a particular metadata field, prefix the term with the +field name followed by a colon. For example, ``album:bird`` searches for "bird" +only in the "album" field of your songs. For more details, see +:doc:`/reference/query/`. + +.. code-block:: console + $ beet ls album:bird The Mae Shi - Terrorbird - Revelation Six -By default, a search term will match any of a handful of :ref:`common -attributes <keywordquery>` of songs. -(They're -also implicitly joined by ANDs: a track must match *all* criteria in order to -match the query.) To narrow a search term to a particular metadata field, just -put the field before the term, separated by a : character. So ``album:bird`` -only looks for ``bird`` in the "album" field of your songs. (Need to know more? -:doc:`/reference/query/` will answer all your questions.) +This searches only the ``album`` field for the term ``bird``. -The ``beet list`` command also has an ``-a`` option, which searches for albums instead of songs:: +Searching for Albums +~~~~~~~~~~~~~~~~~~~~ + +The ``beet list`` command also has an ``-a`` option, which searches for albums +instead of songs: + +.. code-block:: console $ beet ls -a forever Bon Iver - For Emma, Forever Ago Freezepop - Freezepop Forever -There's also an ``-f`` option (for *format*) that lets you specify what gets displayed in the results of a search:: +Custom Output Formatting +~~~~~~~~~~~~~~~~~~~~~~~~ + +There's also an ``-f`` option (for *format*) that lets you specify what gets +displayed in the results of a search: + +.. code-block:: console $ beet ls -a forever -f "[$format] $album ($year) - $artist - $title" [MP3] For Emma, Forever Ago (2009) - Bon Iver - Flume [AAC] Freezepop Forever (2011) - Freezepop - Harebrained Scheme -In the format option, field references like `$format` and `$year` are filled -in with data from each result. You can see a full list of available fields by -running ``beet fields``. +In the format string, field references like ``$format``, ``$year``, ``$album``, +etc., are replaced with data from each result. -Beets also has a ``stats`` command, just in case you want to see how much music -you have:: +.. dropdown:: Available fields for formatting + + To see all available fields you can use in custom formats, run: + + .. code-block:: console + + beet fields + + This will display a comprehensive list of metadata fields available for your music. + +Library Statistics +~~~~~~~~~~~~~~~~~~ + +Beets can also show you statistics about your music collection: + +.. code-block:: console $ beet stats Tracks: 13019 @@ -303,29 +313,107 @@ you have:: Artists: 548 Albums: 1094 +.. admonition:: Ready for more advanced queries? + + The ``beet list`` command has many additional options for sorting, limiting + results, and more complex queries. For a complete reference, run: + + .. code-block:: console + + beet help list + + Or see the :ref:`list command reference <list-cmd>`. + Keep Playing ------------ -This is only the beginning of your long and prosperous journey with beets. To -keep learning, take a look at :doc:`advanced` for a sampling of what else -is possible. You'll also want to glance over the :doc:`/reference/cli` page -for a more detailed description of all of beets' functionality. (Like -deleting music! That's important.) +Congratulations! You've now mastered the basics of beets. But this is only the +beginning, beets has many more powerful features to explore. -Also, check out :doc:`beets' plugins </plugins/index>`. The -real power of beets is in its extensibility---with plugins, beets can do almost -anything for your music collection. +Continue Your Learning Journey +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -You can always get help using the ``beet help`` command. The plain ``beet help`` -command lists all the available commands; then, for example, ``beet help -import`` gives more specific help about the ``import`` command. +*I was there to push people beyond what's expected of them.* -If you need more of a walkthrough, you can read an illustrated one `on the -beets blog <https://beets.io/blog/walkthrough.html>`_. +.. grid:: 2 + :gutter: 3 -Please let us know what you think of beets via `the discussion board`_ or -`Mastodon`_. + .. grid-item-card:: :octicon:`zap` Advanced Techniques + :link: advanced + :link-type: doc -.. _the mailing list: https://groups.google.com/group/beets-users -.. _the discussion board: https://github.com/beetbox/beets/discussions -.. _mastodon: https://fosstodon.org/@beets + Explore sophisticated beets workflows including: + + - Advanced tagging strategies + - Complex import scenarios + - Custom metadata management + - Workflow automation + + .. grid-item-card:: :octicon:`terminal` Command Reference + :link: /reference/cli + :link-type: doc + + Comprehensive guide to all beets commands: + + - Complete command syntax + - All available options + - Usage examples + - **Important operations like deleting music** + + .. grid-item-card:: :octicon:`plug` Plugin Ecosystem + :link: /plugins/index + :link-type: doc + + Discover beets' true power through plugins: + + - Metadata fetching from multiple sources + - Audio analysis and processing + - Streaming service integration + - Custom export formats + + .. grid-item-card:: :octicon:`question` Illustrated Walkthrough + :link: https://beets.io/blog/walkthrough.html + :link-type: url + + Visual, step-by-step guide covering: + + - Real-world import examples + - Screenshots of interactive tagging + - Common workflow patterns + - Troubleshooting tips + +.. admonition:: Need Help? + + Remember you can always use ``beet help`` to see all available commands, or + ``beet help [command]`` for detailed help on specific commands. + +Join the Community +~~~~~~~~~~~~~~~~~~ + +We'd love to hear about your experience with beets! + +.. grid:: 2 + :gutter: 2 + + .. grid-item-card:: :octicon:`comment-discussion` Discussion Board + :link: https://github.com/beetbox/beets/discussions + :link-type: url + + - Ask questions + - Share tips and tricks + - Discuss feature ideas + - Get help from other users + + .. grid-item-card:: :octicon:`git-pull-request` Developer Resources + :link: /dev/index + :link-type: doc + + - Contribute code + - Report issues + - Review pull requests + - Join development discussions + +.. admonition:: Found a Bug? + + If you encounter any issues, please report them on our `GitHub Issues page + <https://github.com/beetbox/beets/issues>`_. diff --git a/docs/guides/tagger.rst b/docs/guides/tagger.rst index bf1ecbd8a..f43c1608c 100644 --- a/docs/guides/tagger.rst +++ b/docs/guides/tagger.rst @@ -1,3 +1,5 @@ +.. _using-the-auto-tagger: + Using the Auto-Tagger ===================== @@ -39,7 +41,7 @@ directory and it imports the files into your library, tagging them as it goes beets currently makes about the music you import. In time, we'd like to remove all of these limitations. -* Your music should be organized by album into directories. That is, the tagger +- Your music should be organized by album into directories. That is, the tagger assumes that each album is in a single directory. These directories can be arbitrarily deep (like ``music/2010/hiphop/seattle/freshespresso/glamour``), but any directory with music files in it is interpreted as a separate album. @@ -48,36 +50,35 @@ all of these limitations. First, directories that look like separate parts of a *multi-disc album* are tagged together as a single release. If two adjacent albums have a common - prefix, followed by "disc," "disk," or "CD" and then a number, they are - tagged together. + prefix, followed by "disc," "disk," or "CD" and then a number, they are tagged + together. Second, if you have jumbled directories containing more than one album, you - can ask beets to split them apart for you based on their metadata. Use - either the ``--group-albums`` command-line flag or the *G* interactive - option described below. + can ask beets to split them apart for you based on their metadata. Use either + the ``--group-albums`` command-line flag or the *G* interactive option + described below. -* The music may have bad tags, but it's not completely untagged. This is - because beets by default infers tags based on existing metadata. But this is - not a hard and fast rule---there are a few ways to tag metadata-poor music: +- The music may have bad tags, but it's not completely untagged. This is because + beets by default infers tags based on existing metadata. But this is not a + hard and fast rule---there are a few ways to tag metadata-poor music: - * You can use the *E* or *I* options described below to search in - MusicBrainz for a specific album or song. - * The :doc:`Acoustid plugin </plugins/chroma>` extends the autotagger to - use acoustic fingerprinting to find information for arbitrary audio. - Install that plugin if you're willing to spend a little more CPU power - to get tags for unidentified albums. (But be aware that it does slow - down the process.) - * The :doc:`FromFilename plugin </plugins/fromfilename>` adds the ability - to guess tags from the filenames. Use this plugin if your tracks have - useful names (like "03 Call Me Maybe.mp3") but their tags don't reflect - that. + - You can use the *E* or *I* options described below to search in + MusicBrainz for a specific album or song. + - The :doc:`Acoustid plugin </plugins/chroma>` extends the autotagger to + use acoustic fingerprinting to find information for arbitrary audio. + Install that plugin if you're willing to spend a little more CPU power + to get tags for unidentified albums. (But be aware that it does slow + down the process.) + - The :doc:`FromFilename plugin </plugins/fromfilename>` adds the ability + to guess tags from the filenames. Use this plugin if your tracks have + useful names (like "03 Call Me Maybe.mp3") but their tags don't reflect + that. -* Currently, MP3, AAC, FLAC, ALAC, Ogg Vorbis, Monkey's Audio, WavPack, - Musepack, Windows Media, Opus, and AIFF files are supported. (Do you use - some other format? Please `file a feature request`_!) +- Currently, MP3, AAC, FLAC, ALAC, Ogg Vorbis, Monkey's Audio, WavPack, + Musepack, Windows Media, Opus, and AIFF files are supported. (Do you use some + other format? Please `file a feature request`_!) -.. _file a feature request: - https://github.com/beetbox/beets/issues/new?template=feature-request.md +.. _file a feature request: https://github.com/beetbox/beets/issues/new?template=feature-request.md Now that that's out of the way, let's tag some music. @@ -89,44 +90,35 @@ Options To import music, just say ``beet import MUSICDIR``. There are, of course, a few command-line options you should know: -* ``beet import -A``: don't try to autotag anything; just import files (this +- ``beet import -A``: don't try to autotag anything; just import files (this goes much faster than with autotagging enabled) - -* ``beet import -W``: when autotagging, don't write new tags to the files +- ``beet import -W``: when autotagging, don't write new tags to the files themselves (just keep the new metadata in beets' database) - -* ``beet import -C``: don't copy imported files to your music directory; leave +- ``beet import -C``: don't copy imported files to your music directory; leave them where they are - -* ``beet import -m``: move imported files to your music directory (overrides - the ``-c`` option) - -* ``beet import -l LOGFILE``: write a message to ``LOGFILE`` every time you skip +- ``beet import -m``: move imported files to your music directory (overrides the + ``-c`` option) +- ``beet import -l LOGFILE``: write a message to ``LOGFILE`` every time you skip an album or choose to take its tags "as-is" (see below) or the album is skipped as a duplicate; this lets you come back later and reexamine albums that weren't tagged successfully. Run ``beet import --from-logfile=LOGFILE`` rerun the importer on such paths from the logfile. - -* ``beet import -q``: quiet mode. Never prompt for input and, instead, +- ``beet import -q``: quiet mode. Never prompt for input and, instead, conservatively skip any albums that need your opinion. The ``-ql`` combination is recommended. - -* ``beet import -t``: timid mode, which is sort of the opposite of "quiet." The +- ``beet import -t``: timid mode, which is sort of the opposite of "quiet." The importer will ask your permission for everything it does, confirming even very good matches with a prompt. - -* ``beet import -p``: automatically resume an interrupted import. The importer +- ``beet import -p``: automatically resume an interrupted import. The importer keeps track of imports that don't finish completely (either due to a crash or because you stop them halfway through) and, by default, prompts you to decide whether to resume them. The ``-p`` flag automatically says "yes" to this question. Relatedly, ``-P`` flag automatically says "no." - -* ``beet import -s``: run in *singleton* mode, tagging individual tracks instead - of whole albums at a time. See the "as Tracks" choice below. This means you +- ``beet import -s``: run in *singleton* mode, tagging individual tracks instead + of whole albums at a time. See the "as Tracks" choice below. This means you can use ``beet import -AC`` to quickly add a bunch of files to your library without doing anything to them. - -* ``beet import -g``: assume there are multiple albums contained in each +- ``beet import -g``: assume there are multiple albums contained in each directory. The tracks contained a directory are grouped by album artist and album name and you will be asked to import each of these groups separately. See the "Group albums" choice below. @@ -134,7 +126,9 @@ command-line options you should know: Similarity ---------- -So you import an album into your beets library. It goes like this:: +So you import an album into your beets library. It goes like this: + +:: $ beet imp witchinghour Tagging: @@ -161,7 +155,9 @@ better at making the call than a computer. So it occasionally asks for help. Choices ------- -When beets needs your input about a match, it says something like this:: +When beets needs your input about a match, it says something like this: + +:: Tagging: Beirut - Lon Gisland @@ -173,36 +169,28 @@ When beets asks you this question, it wants you to enter one of the capital letters: A, M, S, U, T, G, E, I or B. That is, you can choose one of the following: -* *A*: Apply the suggested changes shown and move on. - -* *M*: Show more options. (See the Candidates section, below.) - -* *S*: Skip this album entirely and move on to the next one. - -* *U*: Import the album without changing any tags. This is a good option for +- *A*: Apply the suggested changes shown and move on. +- *M*: Show more options. (See the Candidates section, below.) +- *S*: Skip this album entirely and move on to the next one. +- *U*: Import the album without changing any tags. This is a good option for albums that aren't in the MusicBrainz database, like your friend's operatic faux-goth solo record that's only on two CD-Rs in the universe. - -* *T*: Import the directory as *singleton* tracks, not as an album. Choose this +- *T*: Import the directory as *singleton* tracks, not as an album. Choose this if the tracks don't form a real release---you just have one or more loner tracks that aren't a full album. This will temporarily flip the tagger into *singleton* mode, which attempts to match each track individually. - -* *G*: Group tracks in this directory by *album artist* and *album* and import +- *G*: Group tracks in this directory by *album artist* and *album* and import groups as albums. If the album artist for a track is not set then the artist is used to group that track. For each group importing proceeds as for directories. This is helpful if a directory contains multiple albums. - -* *E*: Enter an artist and album to use as a search in the database. Use this +- *E*: Enter an artist and album to use as a search in the database. Use this option if beets hasn't found any good options because the album is mistagged or untagged. - -* *I*: Enter a metadata backend ID to use as search in the database. Use this +- *I*: Enter a metadata backend ID to use as search in the database. Use this option to specify a backend entity (for example, a MusicBrainz release or recording) directly, by pasting its ID or the full URL. You can also specify several IDs by separating them by a space. - -* *B*: Cancel this import task altogether. No further albums will be tagged; +- *B*: Cancel this import task altogether. No further albums will be tagged; beets shuts down immediately. The next time you attempt to import the same directory, though, beets will ask you if you want to resume tagging where you left off. @@ -215,7 +203,9 @@ Candidates If you choose the M option, or if beets isn't very confident about any of the choices it found, it will present you with a list of choices (called -candidates), like so:: +candidates), like so: + +:: Finding tags for "Panther - Panther". Candidates: @@ -225,9 +215,9 @@ candidates), like so:: Here, you have many of the same options as before, but you can also enter a number to choose one of the options that beets has found. Don't worry about -guessing---beets will show you the proposed changes and ask you to confirm -them, just like the earlier example. As the prompt suggests, you can just hit -return to select the first candidate. +guessing---beets will show you the proposed changes and ask you to confirm them, +just like the earlier example. As the prompt suggests, you can just hit return +to select the first candidate. .. _guide-duplicates: @@ -235,7 +225,9 @@ Duplicates ---------- If beets finds an album or item in your library that seems to be the same as the -one you're importing, you may see a prompt like this:: +one you're importing, you may see a prompt like this: + +:: This album is already in the library! [S]kip new, Keep all, Remove old, Merge all? @@ -243,19 +235,17 @@ one you're importing, you may see a prompt like this:: Beets wants to keep you safe from duplicates, which can be a real pain, so you have four choices in this situation. You can skip importing the new music, choosing to keep the stuff you already have in your library; you can keep both -the old and the new music; you can remove the existing music and choose the -new stuff; or you can merge all the new and old tracks into a single album. -If you choose that "remove" option, any duplicates will be -removed from your library database---and, if the corresponding files are located -inside of your beets library directory, the files themselves will be deleted as -well. +the old and the new music; you can remove the existing music and choose the new +stuff; or you can merge all the new and old tracks into a single album. If you +choose that "remove" option, any duplicates will be removed from your library +database---and, if the corresponding files are located inside of your beets +library directory, the files themselves will be deleted as well. If you choose "merge", beets will try re-importing the existing and new tracks -as one bundle together. -This is particularly helpful when you have an album that's missing some tracks -and then want to import the remaining songs. -The importer will ask you the same questions as it would if you were importing -all tracks at once. +as one bundle together. This is particularly helpful when you have an album +that's missing some tracks and then want to import the remaining songs. The +importer will ask you the same questions as it would if you were importing all +tracks at once. If you choose to keep two identically-named albums, beets can avoid storing both in the same directory. See :ref:`aunique` for details. @@ -268,15 +258,16 @@ files, but can get confused when files don't have any metadata (or have wildly incorrect metadata). In this case, you need *acoustic fingerprinting*, a technology that identifies songs from the audio itself. With fingerprinting, beets can autotag files that have very bad or missing tags. The :doc:`"chroma" -plugin </plugins/chroma>`, distributed with beets, uses the `Chromaprint`_ open-source fingerprinting technology, but it's disabled by default. That's because -it's sort of tricky to install. See the :doc:`/plugins/chroma` page for a guide -to getting it set up. +plugin </plugins/chroma>`, distributed with beets, uses the Chromaprint_ +open-source fingerprinting technology, but it's disabled by default. That's +because it's sort of tricky to install. See the :doc:`/plugins/chroma` page for +a guide to getting it set up. Before you jump into acoustic fingerprinting with both feet, though, give beets a try without it. You may be surprised at how well metadata-based matching works. -.. _Chromaprint: https://acoustid.org/chromaprint +.. _chromaprint: https://acoustid.org/chromaprint Album Art, Lyrics, Genres and Such ---------------------------------- @@ -292,22 +283,31 @@ Missing Albums? --------------- If you're having trouble tagging a particular album with beets, check to make -sure the album is present in `the MusicBrainz database`_. You can search on +sure the album is present in `the MusicBrainz database`_. You can search on their site to make sure it's cataloged there. If not, anyone can edit MusicBrainz---so consider adding the data yourself. -.. _the MusicBrainz database: https://musicbrainz.org/ +.. _the musicbrainz database: https://musicbrainz.org/ + +If you receive a "No matching release found" message from the Auto-Tagger for an +album you know is present in MusicBrainz, check that musicbrainz is in the +plugin list. Until version v2.4.0_ the default metadata source for the +Auto-Tagger, the :doc:`musicbrainz plugin </plugins/musicbrainz>`, had to be +manually disabled. At present, if the plugin list is changed, musicbrainz needs +to be added to the plugin list in order to continue contributing results to +Auto-Tagger. If you think beets is ignoring an album that's listed in MusicBrainz, please `file a bug report`_. .. _file a bug report: https://github.com/beetbox/beets/issues +.. _v2.4.0: https://github.com/beetbox/beets/releases/tag/v2.4.0 + I Hope That Makes Sense ----------------------- -If we haven't made the process clear, please post on `the discussion -board`_ and we'll try to improve this guide. +If we haven't made the process clear, please post on `the discussion board`_ and +we'll try to improve this guide. -.. _the mailing list: https://groups.google.com/group/beets-users .. _the discussion board: https://github.com/beetbox/beets/discussions/ diff --git a/docs/index.rst b/docs/index.rst index 3ec408fd2..e9dd3b34f 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,8 +1,8 @@ beets: the music geek's media organizer ======================================= -Welcome to the documentation for `beets`_, the media library management system -for obsessive music geeks. +Welcome to the documentation for beets_, the media library management system for +obsessive music geeks. If you're new to beets, begin with the :doc:`guides/main` guide. That guide walks you through installing beets, setting it up how you like it, and starting @@ -13,31 +13,31 @@ Then you can get a more detailed look at beets' features in the be interested in exploring the :doc:`plugins </plugins/index>`. If you still need help, you can drop by the ``#beets`` IRC channel on -Libera.Chat, drop by `the discussion board`_, send email to -`the mailing list`_, or `file a bug`_ in the issue tracker. Please let us know -where you think this documentation can be improved. +Libera.Chat, drop by `the discussion board`_ or `file a bug`_ in the issue +tracker. Please let us know where you think this documentation can be improved. .. _beets: https://beets.io/ -.. _the mailing list: https://groups.google.com/group/beets-users + .. _file a bug: https://github.com/beetbox/beets/issues + .. _the discussion board: https://github.com/beetbox/beets/discussions/ Contents -------- .. toctree:: - :maxdepth: 2 + :maxdepth: 2 - guides/index - reference/index - plugins/index - faq - team - contributing - code_of_conduct - dev/index + guides/index + reference/index + plugins/index + faq + team + contributing + code_of_conduct + dev/index .. toctree:: - :maxdepth: 1 + :maxdepth: 1 - changelog + changelog diff --git a/docs/plugins/absubmit.rst b/docs/plugins/absubmit.rst index 25c176e51..262977c4f 100644 --- a/docs/plugins/absubmit.rst +++ b/docs/plugins/absubmit.rst @@ -2,10 +2,10 @@ AcousticBrainz Submit Plugin ============================ The ``absubmit`` plugin lets you submit acoustic analysis results to an -`AcousticBrainz`_ server. This plugin is now deprecated since the -AcousicBrainz project has been shut down. +AcousticBrainz_ server. This plugin is now deprecated since the AcousicBrainz +project has been shut down. -As an alternative the `beets-xtractor`_ plugin can be used. +As an alternative the beets-xtractor_ plugin can be used. Warning ------- @@ -16,37 +16,37 @@ The AcousticBrainz project has shut down. To use this plugin you must set the Installation ------------ -The ``absubmit`` plugin requires the `streaming_extractor_music`_ program -to run. Its source can be found on `GitHub`_, and while it is possible to -compile the extractor from source, AcousticBrainz would prefer if you used -their binary (see the AcousticBrainz `FAQ`_). +The ``absubmit`` plugin requires the streaming_extractor_music_ program to run. +Its source can be found on GitHub_, and while it is possible to compile the +extractor from source, AcousticBrainz would prefer if you used their binary (see +the AcousticBrainz FAQ_). Then, install ``beets`` with ``absubmit`` extra pip install "beets[absubmit]" -Lastly, enable the plugin in your configuration (see :ref:`using-plugins`). - +Lastly, enable the plugin in your configuration (see :ref:`using-plugins`). Submitting Data --------------- -To run the analysis program and upload its results, type:: +To run the analysis program and upload its results, type: + +:: beet absubmit [-f] [-d] [QUERY] By default, the command will only look for AcousticBrainz data when the tracks -don't already have it; the ``-f`` or ``--force`` switch makes it refetch -data even when it already exists. You can use the ``-d`` or ``--dry`` switch -to check which files will be analyzed, before you start a longer period -of processing. +don't already have it; the ``-f`` or ``--force`` switch makes it refetch data +even when it already exists. You can use the ``-d`` or ``--dry`` switch to check +which files will be analyzed, before you start a longer period of processing. -The plugin works on music with a MusicBrainz track ID attached. The plugin -will also skip music that the analysis tool doesn't support. -`streaming_extractor_music`_ currently supports files with the extensions -``mp3``, ``ogg``, ``oga``, ``flac``, ``mp4``, ``m4a``, ``m4r``, ``m4b``, -``m4p``, ``aac``, ``wma``, ``asf``, ``mpc``, ``wv``, ``spx``, ``tta``, -``3g2``, ``aif``, ``aiff`` and ``ape``. +The plugin works on music with a MusicBrainz track ID attached. The plugin will +also skip music that the analysis tool doesn't support. +streaming_extractor_music_ currently supports files with the extensions ``mp3``, +``ogg``, ``oga``, ``flac``, ``mp4``, ``m4a``, ``m4r``, ``m4b``, ``m4p``, +``aac``, ``wma``, ``asf``, ``mpc``, ``wv``, ``spx``, ``tta``, ``3g2``, ``aif``, +``aiff`` and ``ape``. Configuration ------------- @@ -54,25 +54,27 @@ Configuration To configure the plugin, make a ``absubmit:`` section in your configuration file. The available options are: -- **auto**: Analyze every file on import. Otherwise, you need to use the - ``beet absubmit`` command explicitly. - Default: ``no`` -- **extractor**: The absolute path to the `streaming_extractor_music`_ binary. +- **auto**: Analyze every file on import. Otherwise, you need to use the ``beet + absubmit`` command explicitly. Default: ``no`` +- **extractor**: The absolute path to the streaming_extractor_music_ binary. Default: search for the program in your ``$PATH`` - **force**: Analyze items and submit of AcousticBrainz data even for tracks - that already have it. - Default: ``no``. + that already have it. Default: ``no``. - **pretend**: Do not analyze and submit of AcousticBrainz data but print out - the items which would be processed. - Default: ``no``. + the items which would be processed. Default: ``no``. - **base_url**: The base URL of the AcousticBrainz server. The plugin has no - function if this option is not set. - Default: None + function if this option is not set. Default: None + +.. _acousticbrainz: https://acousticbrainz.org + +.. _beets-xtractor: https://github.com/adamjakab/BeetsPluginXtractor + +.. _faq: https://acousticbrainz.org/faq + +.. _github: https://github.com/MTG/essentia + +.. _pip: https://pip.pypa.io + +.. _requests: https://requests.readthedocs.io/en/master/ .. _streaming_extractor_music: https://essentia.upf.edu/ -.. _FAQ: https://acousticbrainz.org/faq -.. _pip: https://pip.pypa.io -.. _requests: https://requests.readthedocs.io/en/master/ -.. _github: https://github.com/MTG/essentia -.. _AcousticBrainz: https://acousticbrainz.org -.. _beets-xtractor: https://github.com/adamjakab/BeetsPluginXtractor diff --git a/docs/plugins/acousticbrainz.rst b/docs/plugins/acousticbrainz.rst index 3a053e123..7f2f9a534 100644 --- a/docs/plugins/acousticbrainz.rst +++ b/docs/plugins/acousticbrainz.rst @@ -2,51 +2,54 @@ AcousticBrainz Plugin ===================== The ``acousticbrainz`` plugin gets acoustic-analysis information from the -`AcousticBrainz`_ project. This plugin is now deprecated since the -AcousicBrainz project has been shut down. +AcousticBrainz_ project. This plugin is now deprecated since the AcousicBrainz +project has been shut down. -As an alternative the `beets-xtractor`_ plugin can be used. +As an alternative the beets-xtractor_ plugin can be used. + +.. _acousticbrainz: https://acousticbrainz.org/ -.. _AcousticBrainz: https://acousticbrainz.org/ .. _beets-xtractor: https://github.com/adamjakab/BeetsPluginXtractor -Enable the ``acousticbrainz`` plugin in your configuration (see :ref:`using-plugins`) and run it by typing:: +Enable the ``acousticbrainz`` plugin in your configuration (see +:ref:`using-plugins`) and run it by typing: + +:: $ beet acousticbrainz [-f] [QUERY] By default, the command will only look for AcousticBrainz data when the tracks doesn't already have it; the ``-f`` or ``--force`` switch makes it re-download data even when it already exists. If you specify a query, only matching tracks -will be processed; otherwise, the command processes every track in your -library. +will be processed; otherwise, the command processes every track in your library. -For all tracks with a MusicBrainz recording ID, the plugin currently sets -these fields: +For all tracks with a MusicBrainz recording ID, the plugin currently sets these +fields: -* ``average_loudness`` -* ``bpm`` -* ``chords_changes_rate`` -* ``chords_key`` -* ``chords_number_rate`` -* ``chords_scale`` -* ``danceable`` -* ``gender`` -* ``genre_rosamerica`` -* ``initial_key`` (This is a built-in beets field, which can also be provided - by :doc:`/plugins/keyfinder`.) -* ``key_strength`` -* ``mood_acoustic`` -* ``mood_aggressive`` -* ``mood_electronic`` -* ``mood_happy`` -* ``mood_party`` -* ``mood_relaxed`` -* ``mood_sad`` -* ``moods_mirex`` -* ``rhythm`` -* ``timbre`` -* ``tonal`` -* ``voice_instrumental`` +- ``average_loudness`` +- ``bpm`` +- ``chords_changes_rate`` +- ``chords_key`` +- ``chords_number_rate`` +- ``chords_scale`` +- ``danceable`` +- ``gender`` +- ``genre_rosamerica`` +- ``initial_key`` (This is a built-in beets field, which can also be provided by + :doc:`/plugins/keyfinder`.) +- ``key_strength`` +- ``mood_acoustic`` +- ``mood_aggressive`` +- ``mood_electronic`` +- ``mood_happy`` +- ``mood_party`` +- ``mood_relaxed`` +- ``mood_sad`` +- ``moods_mirex`` +- ``rhythm`` +- ``timbre`` +- ``tonal`` +- ``voice_instrumental`` Warning ------- @@ -57,10 +60,10 @@ The AcousticBrainz project has shut down. To use this plugin you must set the Automatic Tagging ----------------- -To automatically tag files using AcousticBrainz data during import, just -enable the ``acousticbrainz`` plugin (see :ref:`using-plugins`). When importing -new files, beets will query the AcousticBrainz API using MBID and -set the appropriate metadata. +To automatically tag files using AcousticBrainz data during import, just enable +the ``acousticbrainz`` plugin (see :ref:`using-plugins`). When importing new +files, beets will query the AcousticBrainz API using MBID and set the +appropriate metadata. Configuration ------------- @@ -68,13 +71,10 @@ Configuration To configure the plugin, make a ``acousticbrainz:`` section in your configuration file. The available options are: -- **auto**: Enable AcousticBrainz during ``beet import``. - Default: ``yes``. -- **force**: Download AcousticBrainz data even for tracks that already have - it. +- **auto**: Enable AcousticBrainz during ``beet import``. Default: ``yes``. +- **force**: Download AcousticBrainz data even for tracks that already have it. Default: ``no``. -- **tags**: Which tags from the list above to set on your files. - Default: [] (all). +- **tags**: Which tags from the list above to set on your files. Default: [] + (all). - **base_url**: The base URL of the AcousticBrainz server. The plugin has no - function if this option is not set. - Default: None + function if this option is not set. Default: None diff --git a/docs/plugins/advancedrewrite.rst b/docs/plugins/advancedrewrite.rst index e244be44b..d796d7ee9 100644 --- a/docs/plugins/advancedrewrite.rst +++ b/docs/plugins/advancedrewrite.rst @@ -1,37 +1,38 @@ Advanced Rewrite Plugin ======================= -The ``advancedrewrite`` plugin lets you easily substitute values -in your templates and path formats, similarly to the :doc:`/plugins/rewrite`. -It's recommended to read the documentation of that plugin first. +The ``advancedrewrite`` plugin lets you easily substitute values in your +templates and path formats, similarly to the :doc:`/plugins/rewrite`. It's +recommended to read the documentation of that plugin first. -The *advanced* rewrite plugin does not only support the simple rule format -of the ``rewrite`` plugin, but also an advanced format: -there, the plugin doesn't consider the value of the rewritten field, -but instead checks if the given item matches a :doc:`query </reference/query>`. -Only then, the field is replaced with the given value. -It's also possible to replace multiple fields at once, -and even supports multi-valued fields. +The *advanced* rewrite plugin does not only support the simple rule format of +the ``rewrite`` plugin, but also an advanced format: there, the plugin doesn't +consider the value of the rewritten field, but instead checks if the given item +matches a :doc:`query </reference/query>`. Only then, the field is replaced with +the given value. It's also possible to replace multiple fields at once, and even +supports multi-valued fields. To use advanced field rewriting, first enable the ``advancedrewrite`` plugin -(see :ref:`using-plugins`). -Then, make a ``advancedrewrite:`` section in your config file to contain -your rewrite rules. +(see :ref:`using-plugins`). Then, make a ``advancedrewrite:`` section in your +config file to contain your rewrite rules. In contrast to the normal ``rewrite`` plugin, you need to provide a list of -replacement rule objects, which can have a different syntax depending on -the rule complexity. +replacement rule objects, which can have a different syntax depending on the +rule complexity. -The simple syntax is the same as the one of the rewrite plugin and allows -to replace a single field:: +The simple syntax is the same as the one of the rewrite plugin and allows to +replace a single field: + +:: advancedrewrite: - artist ODD EYE CIRCLE: 이달의 소녀 오드아이써클 -The advanced syntax consists of a query to match against, as well as a map -of replacements to apply. -For example, to credit all songs of ODD EYE CIRCLE before 2023 -to their original group name, you can use the following rule:: +The advanced syntax consists of a query to match against, as well as a map of +replacements to apply. For example, to credit all songs of ODD EYE CIRCLE before +2023 to their original group name, you can use the following rule: + +:: advancedrewrite: - match: "mb_artistid:dec0f331-cb08-4c8e-9c9f-aeb1f0f6d88c year:..2022" @@ -39,10 +40,12 @@ to their original group name, you can use the following rule:: artist: 이달의 소녀 오드아이써클 artist_sort: LOONA / ODD EYE CIRCLE -Note how the sort name is also rewritten within the same rule. -You can specify as many fields as you'd like in the replacements map. +Note how the sort name is also rewritten within the same rule. You can specify +as many fields as you'd like in the replacements map. -If you need to work with multi-valued fields, you can use the following syntax:: +If you need to work with multi-valued fields, you can use the following syntax: + +:: advancedrewrite: - match: "artist:배유빈 feat. 김미현" @@ -55,10 +58,12 @@ As a convenience, the plugin applies patterns for the ``artist`` field to the ``albumartist`` field as well. (Otherwise, you would probably want to duplicate every rule for ``artist`` and ``albumartist``.) -Make sure to properly quote your query strings if they contain spaces, -otherwise they might not do what you expect, or even cause beets to crash. +Make sure to properly quote your query strings if they contain spaces, otherwise +they might not do what you expect, or even cause beets to crash. -Take the following example:: +Take the following example: + +:: advancedrewrite: # BAD, DON'T DO THIS! @@ -69,10 +74,12 @@ Take the following example:: On the first sight, this might look sane, and replace the artist of the album *THE ALBUM* with *New artist*. However, due to the space and missing quotes, this query will evaluate to ``album:THE`` and match ``ALBUM`` on any field, -including ``artist``. As ``artist`` is the field being replaced, -this query will result in infinite recursion and ultimately crash beets. +including ``artist``. As ``artist`` is the field being replaced, this query will +result in infinite recursion and ultimately crash beets. -Instead, you should use the following rule:: +Instead, you should use the following rule: + +:: advancedrewrite: # Note the quotes around the query string! @@ -81,11 +88,11 @@ Instead, you should use the following rule:: artist: New artist A word of warning: This plugin theoretically only applies to templates and path -formats; it initially does not modify files' metadata tags or the values -tracked by beets' library database, but since it *rewrites all field lookups*, -it modifies the file's metadata anyway. See comments in issue :bug:`2786`. +formats; it initially does not modify files' metadata tags or the values tracked +by beets' library database, but since it *rewrites all field lookups*, it +modifies the file's metadata anyway. See comments in issue :bug:`2786`. As an alternative to this plugin the simpler but less powerful -:doc:`/plugins/rewrite` can be used. -If you don't want to modify the item's metadata and only replace values -in file paths, you can check out the :doc:`/plugins/substitute`. +:doc:`/plugins/rewrite` can be used. If you don't want to modify the item's +metadata and only replace values in file paths, you can check out the +:doc:`/plugins/substitute`. diff --git a/docs/plugins/albumtypes.rst b/docs/plugins/albumtypes.rst index bf736abca..f3b1b7587 100644 --- a/docs/plugins/albumtypes.rst +++ b/docs/plugins/albumtypes.rst @@ -2,19 +2,19 @@ AlbumTypes Plugin ================= The ``albumtypes`` plugin adds the ability to format and output album types, -such as "Album", "EP", "Single", etc. For the list of available album types, -see the `MusicBrainz documentation`_. +such as "Album", "EP", "Single", etc. For the list of available album types, see +the `MusicBrainz documentation`_. -To use the ``albumtypes`` plugin, enable it in your configuration -(see :ref:`using-plugins`). The plugin defines a new field ``$atypes``, which -you can use in your path formats or elsewhere. +To use the ``albumtypes`` plugin, enable it in your configuration (see +:ref:`using-plugins`). The plugin defines a new field ``$atypes``, which you can +use in your path formats or elsewhere. -.. _MusicBrainz documentation: https://musicbrainz.org/doc/Release_Group/Type +.. _musicbrainz documentation: https://musicbrainz.org/doc/Release_Group/Type A bug introduced in beets 1.6.0 could have possibly imported broken data into -the ``albumtypes`` library field. Please follow the instructions `described -here <https://github.com/beetbox/beets/pull/4582#issuecomment-1445023493>`_ for -a sanity check and potential fix. :bug:`4528` +the ``albumtypes`` library field. Please follow the instructions `described here +<https://github.com/beetbox/beets/pull/4582#issuecomment-1445023493>`_ for a +sanity check and potential fix. :bug:`4528` Configuration ------------- @@ -30,7 +30,9 @@ file. The available options are: are often compilations. - **bracket**: Defines the brackets to enclose each album type in the output. -The default configuration looks like this:: +The default configuration looks like this: + +:: albumtypes: types: @@ -45,18 +47,22 @@ The default configuration looks like this:: Examples -------- -With path formats configured like:: + +With path formats configured like: + +:: paths: default: $albumartist/[$year]$atypes $album/... albumtype:soundtrack: Various Artists/$album [$year]$atypes/... comp: Various Artists/$album [$year]$atypes/... +The default plugin configuration generates paths that look like this, for +example: -The default plugin configuration generates paths that look like this, for example:: +:: Aphex Twin/[1993][EP][Remix] On Remixes Pink Floyd/[1995][Live] p·u·l·s·e Various Artists/20th Century Lullabies [1999] Various Artists/Ocean's Eleven [2001][OST] - diff --git a/docs/plugins/aura.rst b/docs/plugins/aura.rst index 49e2649b6..a29fa1952 100644 --- a/docs/plugins/aura.rst +++ b/docs/plugins/aura.rst @@ -1,13 +1,14 @@ AURA Plugin =========== -This plugin is a server implementation of the `AURA`_ specification using the -`Flask`_ framework. AURA is still a work in progress and doesn't yet have a -stable version, but this server should be kept up to date. You are advised to -read the :ref:`aura-issues` section. +This plugin is a server implementation of the AURA_ specification using the +Flask_ framework. AURA is still a work in progress and doesn't yet have a stable +version, but this server should be kept up to date. You are advised to read the +:ref:`aura-issues` section. -.. _AURA: https://auraspec.readthedocs.io -.. _Flask: https://palletsprojects.com/p/flask/ +.. _aura: https://auraspec.readthedocs.io + +.. _flask: https://palletsprojects.com/p/flask/ Install ------- @@ -17,18 +18,16 @@ To use the ``aura`` plugin, first enable it in your configuration (see pip install "beets[aura]" - Usage ----- -Use ``beet aura`` to start the AURA server. -By default Flask's built-in server is used, which will give a warning about -using it in a production environment. It is safe to ignore this warning if the -server will have only a few users. +Use ``beet aura`` to start the AURA server. By default Flask's built-in server +is used, which will give a warning about using it in a production environment. +It is safe to ignore this warning if the server will have only a few users. -Alternatively, you can use ``beet aura -d`` to start the server in -`development mode`_, which will reload the server every time the AURA plugin -file is changed. +Alternatively, you can use ``beet aura -d`` to start the server in `development +mode <https://flask.palletsprojects.com/en/1.1.x/server>`__, which will reload +the server every time the AURA plugin file is changed. You can specify the hostname and port number used by the server in your :doc:`configuration file </reference/config>`. For more detail see the @@ -39,50 +38,48 @@ then see :ref:`aura-external-server`. AURA is designed to separate the client and server functionality. This plugin provides the server but not the client, so unless you like looking at JSON you -will need a separate client. Currently the only client is `AURA Web Client`_. -In order to use a local browser client with ``file:///`` see :ref:`aura-cors`. +will need a separate client. Currently the only client is `AURA Web Client`_. In +order to use a local browser client with ``file:///`` see :ref:`aura-cors`. By default the API is served under http://127.0.0.1:8337/aura/. For example information about the track with an id of 3 can be obtained at http://127.0.0.1:8337/aura/tracks/3. -**Note the absence of a trailing slash**: -http://127.0.0.1:8337/aura/tracks/3/ returns a ``404 Not Found`` error. - -.. _development mode: https://flask.palletsprojects.com/en/1.1.x/server -.. _AURA Web Client: https://sr.ht/~callum/aura-web-client/ +**Note the absence of a trailing slash**: http://127.0.0.1:8337/aura/tracks/3/ +returns a ``404 Not Found`` error. +.. _aura web client: https://sr.ht/~callum/aura-web-client/ .. _configuration: Configuration ------------- -To configure the plugin, make an ``aura:`` section in your -configuration file. The available options are: +To configure the plugin, make an ``aura:`` section in your configuration file. +The available options are: - **host**: The server hostname. Set this to ``0.0.0.0`` to bind to all interfaces. Default: ``127.0.0.1``. -- **port**: The server port. - Default: ``8337``. +- **port**: The server port. Default: ``8337``. - **cors**: A YAML list of origins to allow CORS requests from (see - :ref:`aura-cors`, below). - Default: disabled. + :ref:`aura-cors`, below). Default: disabled. - **cors_supports_credentials**: Allow authenticated requests when using CORS. Default: disabled. - **page_limit**: The number of items responses should be truncated to if the client does not specify. Default ``500``. - .. _aura-cors: Cross-Origin Resource Sharing (CORS) ------------------------------------ -`CORS`_ allows browser clients to make requests to the AURA server. You should -set the ``cors`` configuration option to a YAML list of allowed origins. +`CORS <https://en.wikipedia.org/wiki/Cross-origin_resource_sharing>`__ allows +browser clients to make requests to the AURA server. You should set the ``cors`` +configuration option to a YAML list of allowed origins. -For example:: +For example: + +:: aura: cors: @@ -91,24 +88,21 @@ For example:: In order to use the plugin with a local browser client accessed using ``file:///`` you must include ``'null'`` in the list of allowed origins -(including quote marks):: +(including quote marks): + +:: aura: cors: - 'null' -Alternatively you use ``'*'`` to enable access from all origins. -Note that there are security implications if you set the origin to ``'*'``, -so please research this before using it. Note the use of quote marks when -allowing all origins. - -If the server is behind a proxy that uses credentials, you might want to set -the ``cors_supports_credentials`` configuration option to true to let -in-browser clients log in. Note that this option has not been tested, so it -may not work. - -.. _CORS: https://en.wikipedia.org/wiki/Cross-origin_resource_sharing +Alternatively you use ``'*'`` to enable access from all origins. Note that there +are security implications if you set the origin to ``'*'``, so please research +this before using it. Note the use of quote marks when allowing all origins. +If the server is behind a proxy that uses credentials, you might want to set the +``cors_supports_credentials`` configuration option to true to let in-browser +clients log in. Note that this option has not been tested, so it may not work. .. _aura-external-server: @@ -119,16 +113,16 @@ If you would like to use a different WSGI server (not Flask's built-in one), then you can! The ``beetsplug.aura`` module provides a WSGI callable called ``create_app()`` which can be used by many WSGI servers. -For example to run the AURA server using `gunicorn`_ use -``gunicorn 'beetsplug.aura:create_app()'``, or for `uWSGI`_ use -``uwsgi --http :8337 --module 'beetsplug.aura:create_app()'``. -Note that these commands just show how to use the AURA app and you would -probably use something a bit different in a production environment. Read the -relevant server's documentation to figure out what you need. +For example to run the AURA server using gunicorn_ use ``gunicorn +'beetsplug.aura:create_app()'``, or for uWSGI_ use ``uwsgi --http :8337 --module +'beetsplug.aura:create_app()'``. Note that these commands just show how to use +the AURA app and you would probably use something a bit different in a +production environment. Read the relevant server's documentation to figure out +what you need. .. _gunicorn: https://gunicorn.org -.. _uWSGI: https://uwsgi-docs.readthedocs.io +.. _uwsgi: https://uwsgi-docs.readthedocs.io Reverse Proxy Support --------------------- @@ -137,6 +131,8 @@ The plugin should work behind a reverse proxy without further configuration, however this has not been tested extensively. For details of what headers must be rewritten and a sample NGINX configuration see `Flask proxy setups`_. +.. _flask proxy setups: https://flask.palletsprojects.com/en/1.1.x/deploying/wsgi-standalone/#proxy-setups + It is (reportedly) possible to run the application under a URL prefix (for example so you could have ``/foo/aura/server`` rather than ``/aura/server``), but you'll have to work it out for yourself :-) @@ -145,9 +141,6 @@ If using NGINX, do **not** add a trailing slash (``/``) to the URL where the application is running, otherwise you will get a 404. However if you are using Apache then you **should** add a trailing slash. -.. _Flask proxy setups: https://flask.palletsprojects.com/en/1.1.x/deploying/wsgi-standalone/#proxy-setups - - .. _aura-issues: Issues @@ -160,26 +153,26 @@ implementation: multiple ``filter`` parameters as AND. See `issue #19`_ for discussion. - The ``bitrate`` parameter used for content negotiation is not supported. Adding support for this is doable, but the way Flask handles acceptable MIME - types means it's a lot easier not to bother with it. This means an error - could be returned even if no transcoding was required. + types means it's a lot easier not to bother with it. This means an error could + be returned even if no transcoding was required. It is possible that some attributes required by AURA could be absent from the server's response if beets does not have a saved value for them. However, this has not happened so far. -Beets fields (including flexible fields) that do not have an AURA equivalent -are not provided in any resource's attributes section, however these fields may -be used for filtering. +Beets fields (including flexible fields) that do not have an AURA equivalent are +not provided in any resource's attributes section, however these fields may be +used for filtering. The ``mimetype`` and ``framecount`` attributes for track resources are not -supported. The first is due to beets storing the file type (e.g. ``MP3``), so -it is hard to filter by MIME type. The second is because there is no -corresponding beets field. +supported. The first is due to beets storing the file type (e.g. ``MP3``), so it +is hard to filter by MIME type. The second is because there is no corresponding +beets field. Artists are defined by the ``artist`` field on beets Items, which means some -albums have no ``artists`` relationship. Albums only have related artists -when their beets ``albumartist`` field is the same as the ``artist`` field on -at least one of it's constituent tracks. +albums have no ``artists`` relationship. Albums only have related artists when +their beets ``albumartist`` field is the same as the ``artist`` field on at +least one of it's constituent tracks. The only art tracked by beets is a single cover image, so only albums have related images at the moment. This could be expanded to looking in the same diff --git a/docs/plugins/autobpm.rst b/docs/plugins/autobpm.rst index 53908c517..00f02d6a8 100644 --- a/docs/plugins/autobpm.rst +++ b/docs/plugins/autobpm.rst @@ -1,10 +1,10 @@ AutoBPM Plugin ============== -The `autobpm` plugin uses the `Librosa`_ library to calculate the BPM -of a track from its audio data and store it in the `bpm` field of your -database. It does so automatically when importing music or through -the ``beet autobpm [QUERY]`` command. +The ``autobpm`` plugin uses the Librosa_ library to calculate the BPM of a track +from its audio data and store it in the ``bpm`` field of your database. It does +so automatically when importing music or through the ``beet autobpm [QUERY]`` +command. Install ------- @@ -19,17 +19,15 @@ To use the ``autobpm`` plugin, first enable it in your configuration (see Configuration ------------- -To configure the plugin, make a ``autobpm:`` section in your -configuration file. The available options are: +To configure the plugin, make a ``autobpm:`` section in your configuration file. +The available options are: -- **auto**: Analyze every file on import. - Otherwise, you need to use the ``beet autobpm`` command explicitly. - Default: ``yes`` -- **overwrite**: Calculate a BPM even for files that already have a - `bpm` value. - Default: ``no``. +- **auto**: Analyze every file on import. Otherwise, you need to use the ``beet + autobpm`` command explicitly. Default: ``yes`` +- **overwrite**: Calculate a BPM even for files that already have a ``bpm`` + value. Default: ``no``. - **beat_track_kwargs**: Any extra keyword arguments that you would like to - provide to librosa's `beat_track`_ function call, for example: + provide to librosa's beat_track_ function call, for example: .. code-block:: yaml @@ -37,5 +35,6 @@ configuration file. The available options are: beat_track_kwargs: start_bpm: 160 -.. _Librosa: https://github.com/librosa/librosa/ .. _beat_track: https://librosa.org/doc/latest/generated/librosa.beat.beat_track.html + +.. _librosa: https://github.com/librosa/librosa/ diff --git a/docs/plugins/badfiles.rst b/docs/plugins/badfiles.rst index 796f991e1..8f496cfce 100644 --- a/docs/plugins/badfiles.rst +++ b/docs/plugins/badfiles.rst @@ -11,10 +11,12 @@ First, enable the ``badfiles`` plugin (see :ref:`using-plugins`). The default configuration defines the following default checkers, which you may need to install yourself: -* `mp3val`_ for MP3 files -* `FLAC`_ command-line tools for FLAC files +- mp3val_ for MP3 files +- FLAC_ command-line tools for FLAC files -You can also add custom commands for a specific extension, like this:: +You can also add custom commands for a specific extension, like this: + +:: badfiles: check_on_import: yes @@ -26,26 +28,33 @@ Custom commands will be run once for each file of the specified type, with the path to the file as the last argument. Commands must return a status code greater than zero for a file to be considered corrupt. -You can run the checkers when importing files by using the `check_on_import` +You can run the checkers when importing files by using the ``check_on_import`` option. When on, checkers will be run against every imported file and warnings and errors will be presented when selecting a tagging option. -.. _mp3val: http://mp3val.sourceforge.net/ .. _flac: https://xiph.org/flac/ +.. _mp3val: http://mp3val.sourceforge.net/ + Using ----- Type ``beet bad`` with a query according to beets' usual query syntax. For -instance, this will run a check on all songs containing the word "wolf":: +instance, this will run a check on all songs containing the word "wolf": + +:: beet bad wolf -This one will run checks on a specific album:: +This one will run checks on a specific album: + +:: beet bad album_id:1234 -Here is an example where the FLAC decoder signals a corrupt file:: +Here is an example where the FLAC decoder signals a corrupt file: + +:: beet bad title::^$ /tank/Music/__/00.flac: command exited with status 1 @@ -54,10 +63,10 @@ Here is an example where the FLAC decoder signals a corrupt file:: state = FLAC__STREAM_DECODER_READ_FRAME Note that the default ``mp3val`` checker is a bit verbose and can output a lot -of "stream error" messages, even for files that play perfectly well. -Generally, if more than one stream error happens, or if a stream error happens -in the middle of a file, this is a bad sign. +of "stream error" messages, even for files that play perfectly well. Generally, +if more than one stream error happens, or if a stream error happens in the +middle of a file, this is a bad sign. By default, only errors for the bad files will be shown. In order for the -results for all of the checked files to be seen, including the uncorrupted -ones, use the ``-v`` or ``--verbose`` option. +results for all of the checked files to be seen, including the uncorrupted ones, +use the ``-v`` or ``--verbose`` option. diff --git a/docs/plugins/bareasc.rst b/docs/plugins/bareasc.rst index 0c8d6636c..0a9c75fad 100644 --- a/docs/plugins/bareasc.rst +++ b/docs/plugins/bareasc.rst @@ -1,15 +1,17 @@ Bare-ASCII Search Plugin ======================== -The ``bareasc`` plugin provides a prefixed query that searches your library using -simple ASCII character matching, with accented characters folded to their base -ASCII character. This can be useful if you want to find a track with accented -characters in the title or artist, particularly if you are not confident -you have the accents correct. It is also not unknown for the accents +The ``bareasc`` plugin provides a prefixed query that searches your library +using simple ASCII character matching, with accented characters folded to their +base ASCII character. This can be useful if you want to find a track with +accented characters in the title or artist, particularly if you are not +confident you have the accents correct. It is also not unknown for the accents to not be correct in the database entry or wrong in the CD information. -First, enable the plugin named ``bareasc`` (see :ref:`using-plugins`). -You'll then be able to use the ``#`` prefix to use bare-ASCII matching:: +First, enable the plugin named ``bareasc`` (see :ref:`using-plugins`). You'll +then be able to use the ``#`` prefix to use bare-ASCII matching: + +:: $ beet ls '#dvorak' István Kertész - REQUIEM - Dvořàk: Requiem, op.89 - Confutatis maledictis @@ -17,13 +19,16 @@ You'll then be able to use the ``#`` prefix to use bare-ASCII matching:: Command ------- -In addition to the query prefix, the plugin provides a utility ``bareasc`` command. -This command is **exactly** the same as the ``beet list`` command except that -the output is passed through the bare-ASCII transformation before being printed. -This allows you to easily check what the library data looks like in bare ASCII, -which can be useful if you are trying to work out why a query is not matching. +In addition to the query prefix, the plugin provides a utility ``bareasc`` +command. This command is **exactly** the same as the ``beet list`` command +except that the output is passed through the bare-ASCII transformation before +being printed. This allows you to easily check what the library data looks like +in bare ASCII, which can be useful if you are trying to work out why a query is +not matching. -Using the same example track as above:: +Using the same example track as above: + +:: $ beet bareasc 'Dvořàk' Istvan Kertesz - REQUIEM - Dvorak: Requiem, op.89 - Confutatis maledictis @@ -37,33 +42,34 @@ Notes If the query string is all in lower case, the comparison ignores case as well as accents. -The default ``bareasc`` prefix (``#``) is used as a comment character in some shells -so may need to be protected (for example in quotes) when typed into the command line. +The default ``bareasc`` prefix (``#``) is used as a comment character in some +shells so may need to be protected (for example in quotes) when typed into the +command line. -The bare ASCII transliteration is quite simple. It may not give the expected output -for all languages. For example, German u-umlaut ``ü`` is transformed into ASCII ``u``, -not into ``ue``. +The bare ASCII transliteration is quite simple. It may not give the expected +output for all languages. For example, German u-umlaut ``ü`` is transformed into +ASCII ``u``, not into ``ue``. -The bare ASCII transformation also changes Unicode punctuation like double quotes, -apostrophes and even some hyphens. It is often best to leave out punctuation -in the queries. Note that the punctuation changes are often not even visible -with normal terminal fonts. You can always use the ``bareasc`` command to print the -transformed entries and use a command like ``diff`` to compare with the output -from the ``list`` command. +The bare ASCII transformation also changes Unicode punctuation like double +quotes, apostrophes and even some hyphens. It is often best to leave out +punctuation in the queries. Note that the punctuation changes are often not even +visible with normal terminal fonts. You can always use the ``bareasc`` command +to print the transformed entries and use a command like ``diff`` to compare with +the output from the ``list`` command. Configuration ------------- -To configure the plugin, make a ``bareasc:`` section in your configuration -file. The only available option is: +To configure the plugin, make a ``bareasc:`` section in your configuration file. +The only available option is: -- **prefix**: The character used to designate bare-ASCII queries. - Default: ``#``, which may need to be escaped in some shells. +- **prefix**: The character used to designate bare-ASCII queries. Default: + ``#``, which may need to be escaped in some shells. Credits ------- -The hard work in this plugin is done in Sean Burke's -`Unidecode <https://pypi.org/project/Unidecode/>`__ library. -Thanks are due to Sean and to all the people who created the Python -version and the beets extensible query architecture. +The hard work in this plugin is done in Sean Burke's `Unidecode +<https://pypi.org/project/Unidecode/>`__ library. Thanks are due to Sean and to +all the people who created the Python version and the beets extensible query +architecture. diff --git a/docs/plugins/beatport.rst b/docs/plugins/beatport.rst index bc10f02a4..4e7569a1b 100644 --- a/docs/plugins/beatport.rst +++ b/docs/plugins/beatport.rst @@ -1,15 +1,16 @@ Beatport Plugin =============== -The ``beatport`` plugin adds support for querying the `Beatport`_ catalogue -during the autotagging process. This can potentially be helpful for users -whose collection includes a lot of diverse electronic music releases, for which -both MusicBrainz and (to a lesser degree) `Discogs`_ show no matches. +The ``beatport`` plugin adds support for querying the Beatport_ catalogue during +the autotagging process. This can potentially be helpful for users whose +collection includes a lot of diverse electronic music releases, for which both +MusicBrainz and (to a lesser degree) Discogs_ show no matches. -.. _Discogs: https://discogs.com +.. _discogs: https://discogs.com Installation ------------ + To use the ``beatport`` plugin, first enable it in your configuration (see :ref:`using-plugins`). Then, install ``beets`` with ``beatport`` extra @@ -17,27 +18,29 @@ To use the ``beatport`` plugin, first enable it in your configuration (see pip install "beets[beatport]" -You will also need to register for a `Beatport`_ account. The first time you -run the :ref:`import-cmd` command after enabling the plugin, it will ask you -to authorize with Beatport by visiting the site in a browser. On the site -you will be asked to enter your username and password to authorize beets -to query the Beatport API. You will then be displayed with a single line of -text that you should paste as a whole into your terminal. This will store the -authentication data for subsequent runs and you will not be required to repeat -the above steps. +You will also need to register for a Beatport_ account. The first time you run +the :ref:`import-cmd` command after enabling the plugin, it will ask you to +authorize with Beatport by visiting the site in a browser. On the site you will +be asked to enter your username and password to authorize beets to query the +Beatport API. You will then be displayed with a single line of text that you +should paste as a whole into your terminal. This will store the authentication +data for subsequent runs and you will not be required to repeat the above steps. -Matches from Beatport should now show up alongside matches -from MusicBrainz and other sources. +Matches from Beatport should now show up alongside matches from MusicBrainz and +other sources. If you have a Beatport ID or a URL for a release or track you want to tag, you can just enter one of the two at the "enter Id" prompt in the importer. You can -also search for an id like so:: +also search for an id like so: + +:: beet import path/to/music/library --search-id id Configuration ------------- -This plugin can be configured like other metadata source plugins as described in :ref:`metadata-source-plugin-configuration`. +This plugin can be configured like other metadata source plugins as described in +:ref:`metadata-source-plugin-configuration`. -.. _Beatport: https://www.beatport.com/ +.. _beatport: https://www.beatport.com/ diff --git a/docs/plugins/bpd.rst b/docs/plugins/bpd.rst index c7ac350df..dc0bd92b2 100644 --- a/docs/plugins/bpd.rst +++ b/docs/plugins/bpd.rst @@ -3,23 +3,25 @@ BPD Plugin BPD is a music player using music from a beets library. It runs as a daemon and implements the MPD protocol, so it's compatible with all the great MPD clients -out there. I'm using `Theremin`_, `gmpc`_, `Sonata`_, and `Ario`_ successfully. +out there. I'm using Theremin_, gmpc_, Sonata_, and Ario_ successfully. + +.. _ario: http://ario-player.sourceforge.net/ -.. _Theremin: https://github.com/TheStalwart/Theremin .. _gmpc: https://gmpc.wikia.com/wiki/Gnome_Music_Player_Client -.. _Sonata: http://sonata.berlios.de/ -.. _Ario: http://ario-player.sourceforge.net/ + +.. _sonata: http://sonata.berlios.de/ + +.. _theremin: https://github.com/TheStalwart/Theremin Dependencies ------------ -Before you can use BPD, you'll need the media library called `GStreamer`_ (along +Before you can use BPD, you'll need the media library called GStreamer_ (along with its Python bindings) on your system. -* On Mac OS X, you can use `Homebrew`_. Run ``brew install gstreamer +- On Mac OS X, you can use Homebrew_. Run ``brew install gstreamer gst-plugins-base pygobject3``. - -* On Linux, you need to install GStreamer 1.0 and the GObject bindings for +- On Linux, you need to install GStreamer 1.0 and the GObject bindings for python. Under Ubuntu, they are called ``python-gi`` and ``gstreamer1.0``. You will also need the various GStreamer plugin packages to make everything @@ -33,15 +35,17 @@ extra which installs Python bindings for ``GStreamer``: pip install "beets[bpd]" -.. _GStreamer: https://gstreamer.freedesktop.org/download -.. _Homebrew: https://brew.sh +.. _gstreamer: https://gstreamer.freedesktop.org/download + +.. _homebrew: https://brew.sh Usage ----- To use the ``bpd`` plugin, first enable it in your configuration (see -:ref:`using-plugins`). -Then, you can run BPD by invoking:: +:ref:`using-plugins`). Then, you can run BPD by invoking: + +:: $ beet bpd @@ -50,15 +54,12 @@ long list of available clients`_. Here are my favorites: .. _a long list of available clients: https://mpd.wikia.com/wiki/Clients -* Linux: `gmpc`_, `Sonata`_ +- Linux: gmpc_, Sonata_ +- Mac: Theremin_ +- Windows: I don't know. Get in touch if you have a recommendation. +- iPhone/iPod touch: Rigelian_ -* Mac: `Theremin`_ - -* Windows: I don't know. Get in touch if you have a recommendation. - -* iPhone/iPod touch: `Rigelian`_ - -.. _Rigelian: https://www.rigelian.net/ +.. _rigelian: https://www.rigelian.net/ One nice thing about MPD's (and thus BPD's) client-server architecture is that the client can just as easily on a different computer from the server as it can @@ -68,21 +69,18 @@ on your headless server box. Rad! Configuration ------------- -To configure the plugin, make a ``bpd:`` section in your configuration file. -The available options are: +To configure the plugin, make a ``bpd:`` section in your configuration file. The +available options are: -- **host**: - Default: Bind to all interfaces. -- **port**: - Default: 6600 -- **password**: - Default: No password. -- **volume**: Initial volume, as a percentage. - Default: 100 -- **control_port**: Port for the internal control socket. - Default: 6601 +- **host**: Default: Bind to all interfaces. +- **port**: Default: 6600 +- **password**: Default: No password. +- **volume**: Initial volume, as a percentage. Default: 100 +- **control_port**: Port for the internal control socket. Default: 6601 -Here's an example:: +Here's an example: + +:: bpd: host: 127.0.0.1 @@ -93,49 +91,49 @@ Here's an example:: Implementation Notes -------------------- -In the real MPD, the user can browse a music directory as it appears on disk. -In beets, we like to abstract away from the directory structure. Therefore, BPD +In the real MPD, the user can browse a music directory as it appears on disk. In +beets, we like to abstract away from the directory structure. Therefore, BPD creates a "virtual" directory structure (artist/album/track) to present to -clients. This is static for now and cannot be reconfigured like the real -on-disk directory structure can. (Note that an obvious solution to this is just -string matching on items' destination, but this requires examining the entire -library Python-side for every query.) +clients. This is static for now and cannot be reconfigured like the real on-disk +directory structure can. (Note that an obvious solution to this is just string +matching on items' destination, but this requires examining the entire library +Python-side for every query.) -BPD plays music using GStreamer's ``playbin`` player, which has a simple API -but doesn't support many advanced playback features. +BPD plays music using GStreamer's ``playbin`` player, which has a simple API but +doesn't support many advanced playback features. Differences from the real MPD ----------------------------- BPD currently supports version 0.16 of `the MPD protocol`_, but several of the commands and features are "pretend" implementations or have slightly different -behaviour to their MPD equivalents. BPD aims to look enough like MPD that it -can interact with the ecosystem of clients, but doesn't try to be -a fully-fledged MPD replacement in terms of its playback capabilities. +behaviour to their MPD equivalents. BPD aims to look enough like MPD that it can +interact with the ecosystem of clients, but doesn't try to be a fully-fledged +MPD replacement in terms of its playback capabilities. -.. _the MPD protocol: https://www.musicpd.org/doc/protocol/ +.. _the mpd protocol: https://www.musicpd.org/doc/protocol/ These are some of the known differences between BPD and MPD: -* BPD doesn't currently support versioned playlists. Many clients, however, use +- BPD doesn't currently support versioned playlists. Many clients, however, use plchanges instead of playlistinfo to get the current playlist, so plchanges contains a dummy implementation that just calls playlistinfo. -* Stored playlists aren't supported (BPD understands the commands though). -* The ``stats`` command always send zero for ``playtime``, which is supposed to +- Stored playlists aren't supported (BPD understands the commands though). +- The ``stats`` command always send zero for ``playtime``, which is supposed to indicate the amount of time the server has spent playing music. BPD doesn't currently keep track of this. -* The ``update`` command regenerates the directory tree from the beets database +- The ``update`` command regenerates the directory tree from the beets database synchronously, whereas MPD does this in the background. -* Advanced playback features like cross-fade, ReplayGain and MixRamp are not +- Advanced playback features like cross-fade, ReplayGain and MixRamp are not supported due to BPD's simple audio player backend. -* Advanced query syntax is not currently supported. -* Clients can't use the ``tagtypes`` mask to hide fields. -* BPD's ``random`` mode is not deterministic and doesn't support priorities. -* Mounts and streams are not supported. BPD can only play files from disk. -* Stickers are not supported (although this is basically a flexattr in beets +- Advanced query syntax is not currently supported. +- Clients can't use the ``tagtypes`` mask to hide fields. +- BPD's ``random`` mode is not deterministic and doesn't support priorities. +- Mounts and streams are not supported. BPD can only play files from disk. +- Stickers are not supported (although this is basically a flexattr in beets nomenclature so this is feasible to add). -* There is only a single password, and is enabled it grants access to all +- There is only a single password, and is enabled it grants access to all features rather than having permissions-based granularity. -* Partitions and alternative outputs are not supported; BPD can only play one +- Partitions and alternative outputs are not supported; BPD can only play one song at a time. -* Client channels are not implemented. +- Client channels are not implemented. diff --git a/docs/plugins/bpm.rst b/docs/plugins/bpm.rst index 012c3903c..249f8f767 100644 --- a/docs/plugins/bpm.rst +++ b/docs/plugins/bpm.rst @@ -10,17 +10,21 @@ Usage To use the ``bpm`` plugin, first enable it in your configuration (see :ref:`using-plugins`). -Then, play a song you want to measure in your favorite media player and type:: +Then, play a song you want to measure in your favorite media player and type: - beet bpm <song> +:: + + beet bpm <song> You'll be prompted to press Enter three times to the rhythm. This typically allows to determine the BPM within 5% accuracy. -The plugin works best if you wrap it in a script that gets the playing song. -for instance, with ``mpc`` you can do something like:: +The plugin works best if you wrap it in a script that gets the playing song. for +instance, with ``mpc`` you can do something like: - beet bpm $(mpc |head -1|tr -d "-") +:: + + beet bpm $(mpc |head -1|tr -d "-") If :ref:`import.write <config-import-write>` is ``yes``, the song's tags are written to disk. @@ -28,14 +32,12 @@ written to disk. Configuration ------------- -To configure the plugin, make a ``bpm:`` section in your configuration file. -The available options are: +To configure the plugin, make a ``bpm:`` section in your configuration file. The +available options are: - **max_strokes**: The maximum number of strokes to accept when tapping out the - BPM. - Default: 3. -- **overwrite**: Overwrite the track's existing BPM. - Default: ``yes``. + BPM. Default: 3. +- **overwrite**: Overwrite the track's existing BPM. Default: ``yes``. Credit ------ diff --git a/docs/plugins/bpsync.rst b/docs/plugins/bpsync.rst index 29cbd08e3..6c420b61e 100644 --- a/docs/plugins/bpsync.rst +++ b/docs/plugins/bpsync.rst @@ -1,15 +1,13 @@ BPSync Plugin ============= -This plugin provides the ``bpsync`` command, which lets you fetch metadata -from Beatport for albums and tracks that already have Beatport IDs. -This plugin works similarly to :doc:`/plugins/mbsync`. - -If you have downloaded music from Beatport, this can speed -up the initial import if you just import "as-is" and then use ``bpsync`` to -get up-to-date tags that are written to the files according to your beets -configuration. +This plugin provides the ``bpsync`` command, which lets you fetch metadata from +Beatport for albums and tracks that already have Beatport IDs. This plugin works +similarly to :doc:`/plugins/mbsync`. +If you have downloaded music from Beatport, this can speed up the initial import +if you just import "as-is" and then use ``bpsync`` to get up-to-date tags that +are written to the files according to your beets configuration. Usage ----- @@ -18,17 +16,17 @@ Enable the ``bpsync`` plugin in your configuration (see :ref:`using-plugins`) and then run ``beet bpsync QUERY`` to fetch updated metadata for a part of your collection (or omit the query to run over your whole library). -This plugin treats albums and singletons (non-album tracks) separately. It -first processes all matching singletons and then proceeds on to full albums. -The same query is used to search for both kinds of entities. +This plugin treats albums and singletons (non-album tracks) separately. It first +processes all matching singletons and then proceeds on to full albums. The same +query is used to search for both kinds of entities. The command has a few command-line options: -* To preview the changes that would be made without applying them, use the +- To preview the changes that would be made without applying them, use the ``-p`` (``--pretend``) flag. -* By default, files will be moved (renamed) according to their metadata if - they are inside your beets library directory. To disable this, use the - ``-M`` (``--nomove``) command-line option. -* If you have the ``import.write`` configuration option enabled, then this - plugin will write new metadata to files' tags. To disable this, use the - ``-W`` (``--nowrite``) option. +- By default, files will be moved (renamed) according to their metadata if they + are inside your beets library directory. To disable this, use the ``-M`` + (``--nomove``) command-line option. +- If you have the ``import.write`` configuration option enabled, then this + plugin will write new metadata to files' tags. To disable this, use the ``-W`` + (``--nowrite``) option. diff --git a/docs/plugins/bucket.rst b/docs/plugins/bucket.rst index ee1857777..7d2061991 100644 --- a/docs/plugins/bucket.rst +++ b/docs/plugins/bucket.rst @@ -8,14 +8,17 @@ smaller subfolders by grouping albums or artists alphabetically (e.g. *A-F*, *G-M*, *N-Z*). To use the ``bucket`` plugin, first enable it in your configuration (see -:ref:`using-plugins`). -The plugin provides a :ref:`template function -<template-functions>` called ``%bucket`` for use in path format expressions:: +:ref:`using-plugins`). The plugin provides a :ref:`template function +<template-functions>` called ``%bucket`` for use in path format expressions: + +:: paths: default: /%bucket{$year}/%bucket{$artist}/$albumartist-$album-$year -Then, define your ranges in the ``bucket:`` section of the config file:: +Then, define your ranges in the ``bucket:`` section of the config file: + +:: bucket: bucket_alpha: ['A-F', 'G-M', 'N-Z'] @@ -30,17 +33,16 @@ The definition of a range is somewhat loose, and multiple formats are allowed: alphanumeric characters in the string you provide. For example, ``ABCD``, ``A-D``, ``A->D``, and ``[AD]`` are all equivalent. - For year ranges: digits characters are extracted and the two extreme years - define the range. For example, ``1975-77``, ``1975,76,77`` and ``1975-1977`` are - equivalent. If no upper bound is given, the range is extended to current year - (unless a later range is defined). For example, ``1975`` encompasses all years - from 1975 until now. + define the range. For example, ``1975-77``, ``1975,76,77`` and ``1975-1977`` + are equivalent. If no upper bound is given, the range is extended to current + year (unless a later range is defined). For example, ``1975`` encompasses all + years from 1975 until now. The ``%bucket`` template function guesses whether to use alpha- or year-style buckets depending on the text it receives. It can guess wrong if, for example, an artist or album happens to begin with four digits. Provide ``alpha`` as the -second argument to the template to avoid this automatic detection: for -example, use ``%bucket{$artist,alpha}``. - +second argument to the template to avoid this automatic detection: for example, +use ``%bucket{$artist,alpha}``. Configuration ------------- @@ -49,29 +51,28 @@ To configure the plugin, make a ``bucket:`` section in your configuration file. The available options are: - **bucket_alpha**: Ranges to use for all substitutions occurring on textual - fields. - Default: none. + fields. Default: none. - **bucket_alpha_regex**: A ``range: regex`` mapping (one per line) where - ``range`` is one of the `bucket_alpha` ranges and ``value`` is a regex that - overrides original range definition. - Default: none. + ``range`` is one of the ``bucket_alpha`` ranges and ``value`` is a regex that + overrides original range definition. Default: none. - **bucket_year**: Ranges to use for all substitutions occurring on the - ``$year`` field. - Default: none. + ``$year`` field. Default: none. - **extrapolate**: Enable this if you want to group your files into multiple year ranges without enumerating them all. This option will generate year - bucket names by reproducing characteristics of declared buckets. - Default: ``no`` + bucket names by reproducing characteristics of declared buckets. Default: + ``no`` -Here's an example:: +Here's an example: - bucket: - bucket_year: ['2000-05'] - extrapolate: true - bucket_alpha: ['A - D', 'E - L', 'M - R', 'S - Z'] - bucket_alpha_regex: - 'A - D': ^[0-9a-dA-D…äÄ] +:: -This configuration creates five-year ranges for any input year. -The `A - D` bucket now matches also all artists starting with ä or Ä and 0 to 9 -and … (ellipsis). The other alpha buckets work as ranges. + bucket: + bucket_year: ['2000-05'] + extrapolate: true + bucket_alpha: ['A - D', 'E - L', 'M - R', 'S - Z'] + bucket_alpha_regex: + 'A - D': ^[0-9a-dA-D…äÄ] + +This configuration creates five-year ranges for any input year. The ``A - D`` +bucket now matches also all artists starting with ä or Ä and 0 to 9 and … +(ellipsis). The other alpha buckets work as ranges. diff --git a/docs/plugins/chroma.rst b/docs/plugins/chroma.rst index 30d57939f..4e333ab99 100644 --- a/docs/plugins/chroma.rst +++ b/docs/plugins/chroma.rst @@ -4,18 +4,19 @@ Chromaprint/Acoustid Plugin Acoustic fingerprinting is a technique for identifying songs from the way they "sound" rather from their existing metadata. That means that beets' autotagger can theoretically use fingerprinting to tag files that don't have any ID3 -information at all (or have completely incorrect data). This plugin uses an -open-source fingerprinting technology called `Chromaprint`_ and its associated -Web service, called `Acoustid`_. +information at all (or have completely incorrect data). This plugin uses an +open-source fingerprinting technology called Chromaprint_ and its associated Web +service, called Acoustid_. -.. _Chromaprint: https://acoustid.org/chromaprint .. _acoustid: https://acoustid.org/ +.. _chromaprint: https://acoustid.org/chromaprint + Turning on fingerprinting can increase the accuracy of the autotagger---especially on files with very poor metadata---but it comes at a cost. First, it can be trickier to set up than beets itself (you need to set up the native fingerprinting library, whereas all of the beets core is written in -pure Python). Also, fingerprinting takes significantly more CPU and memory than +pure Python). Also, fingerprinting takes significantly more CPU and memory than ordinary tagging---which means that imports will go substantially slower. If you're willing to pay the performance cost for fingerprinting, read on! @@ -23,80 +24,83 @@ If you're willing to pay the performance cost for fingerprinting, read on! Installing Dependencies ----------------------- -To get fingerprinting working, you'll need to install three things: +To get fingerprinting working, you'll need to install three things: -1. `pyacoustid`_ Python library (version 0.6 or later). You can install it by +1. pyacoustid_ Python library (version 0.6 or later). You can install it by installing ``beets`` with ``chroma`` extra .. code-block:: bash pip install "beets[chroma]" -2. the `Chromaprint`_ library_ or |command-line-tool|_ +2. the Chromaprint_ library_ or |command-line-tool|_ 3. an |audio-decoder|_ .. |command-line-tool| replace:: command line tool -.. |audio-decoder| replace:: audio decoder +.. |audio-decoder| replace:: audio decoder .. _command-line-tool: Installing the Binary Command-Line Tool -''''''''''''''''''''''''''''''''''''''' +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The simplest way to get up and running, especially on Windows, is to -`download`_ the appropriate Chromaprint binary package and place the -``fpcalc`` (or ``fpcalc.exe``) on your shell search path. On Windows, this -means something like ``C:\\Program Files``. On OS X or Linux, put the -executable somewhere like ``/usr/local/bin``. +The simplest way to get up and running, especially on Windows, is to download_ +the appropriate Chromaprint binary package and place the ``fpcalc`` (or +``fpcalc.exe``) on your shell search path. On Windows, this means something like +``C:\\Program Files``. On OS X or Linux, put the executable somewhere like +``/usr/local/bin``. .. _download: https://acoustid.org/chromaprint .. _library: Installing the Library -'''''''''''''''''''''' +~~~~~~~~~~~~~~~~~~~~~~ -On OS X and Linux, you can also use a library installed by your package -manager, which has some advantages (automatic upgrades, etc.). The Chromaprint -site has links to packages for major Linux distributions. If you use -`Homebrew`_ on Mac OS X, you can install the library with ``brew install -chromaprint``. - -.. _Homebrew: https://brew.sh/ +On OS X and Linux, you can also use a library installed by your package manager, +which has some advantages (automatic upgrades, etc.). The Chromaprint site has +links to packages for major Linux distributions. If you use Homebrew_ on Mac OS +X, you can install the library with ``brew install chromaprint``. .. _audio-decoder: +.. _homebrew: https://brew.sh/ + Audio Decoder -''''''''''''' +~~~~~~~~~~~~~ You will also need a mechanism for decoding audio files supported by the -`audioread`_ library: +audioread_ library: -* OS X has a number of decoders already built into Core Audio, so there's no +- OS X has a number of decoders already built into Core Audio, so there's no need to install anything. - -* On Linux, you can install `GStreamer`_ with `PyGObject`_, `FFmpeg`_, or - `MAD`_ with `pymad`_. How you install these will depend on your - distribution. - For example, on Ubuntu, run ``apt-get install gstreamer1.0 python-gi``. On - Arch Linux, you want ``pacman -S gstreamer python2-gobject``. If you use - GStreamer, be sure to install its codec plugins also (``gst-plugins-good``, - etc.). +- On Linux, you can install GStreamer_ with PyGObject_, FFmpeg_, or MAD_ with + pymad_. How you install these will depend on your distribution. For example, + on Ubuntu, run ``apt-get install gstreamer1.0 python-gi``. On Arch Linux, you + want ``pacman -S gstreamer python2-gobject``. If you use GStreamer, be sure to + install its codec plugins also (``gst-plugins-good``, etc.). Note that if you install beets in a virtualenv, you'll need it to have ``--system-site-packages`` enabled for Python to see the GStreamer bindings. -* On Windows, builds are provided by `GStreamer`_ +- On Windows, builds are provided by GStreamer_ .. _audioread: https://github.com/beetbox/audioread + +.. _core audio: https://developer.apple.com/technologies/mac/audio-and-video.html + +.. _ffmpeg: https://ffmpeg.org/ + +.. _gstreamer: https://gstreamer.freedesktop.org/ + +.. _mad: https://www.underbit.com/products/mad/ + .. _pyacoustid: https://github.com/beetbox/pyacoustid -.. _FFmpeg: https://ffmpeg.org/ + +.. _pygobject: https://wiki.gnome.org/Projects/PyGObject + .. _pymad: https://spacepants.org/src/pymad/ -.. _MAD: https://www.underbit.com/products/mad/ -.. _Core Audio: https://developer.apple.com/technologies/mac/audio-and-video.html -.. _Gstreamer: https://gstreamer.freedesktop.org/ -.. _PyGObject: https://wiki.gnome.org/Projects/PyGObject To decode audio formats (MP3, FLAC, etc.) with GStreamer, you'll need the standard set of Gstreamer plugins. For example, on Ubuntu, install the packages @@ -107,16 +111,16 @@ Usage ----- Once you have all the dependencies sorted out, enable the ``chroma`` plugin in -your configuration (see :ref:`using-plugins`) to benefit from fingerprinting -the next time you run ``beet import``. (The plugin doesn't produce any obvious -output by default. If you want to confirm that it's enabled, you can try -running in verbose mode once with ``beet -v import``.) +your configuration (see :ref:`using-plugins`) to benefit from fingerprinting the +next time you run ``beet import``. (The plugin doesn't produce any obvious +output by default. If you want to confirm that it's enabled, you can try running +in verbose mode once with ``beet -v import``.) You can also use the ``beet fingerprint`` command to generate fingerprints for items already in your library. (Provide a query to fingerprint a subset of your -library.) The generated fingerprints will be stored in the library database. -If you have the ``import.write`` config option enabled, they will also be -written to files' metadata. +library.) The generated fingerprints will be stored in the library database. If +you have the ``import.write`` config option enabled, they will also be written +to files' metadata. .. _submitfp: @@ -125,7 +129,9 @@ Configuration There is one configuration option in the ``chroma:`` section, ``auto``, which controls whether to fingerprint files during the import process. To disable -fingerprint-based autotagging, set it to ``no``, like so:: +fingerprint-based autotagging, set it to ``no``, like so: + +:: chroma: auto: no @@ -133,11 +139,13 @@ fingerprint-based autotagging, set it to ``no``, like so:: Submitting Fingerprints ----------------------- -You can help expand the `Acoustid`_ database by submitting fingerprints for the +You can help expand the Acoustid_ database by submitting fingerprints for the music in your collection. To do this, first `get an API key`_ from the Acoustid service. Just use an OpenID or MusicBrainz account to log in and you'll get a -short token string. Then, add the key to your ``config.yaml`` as the -value ``apikey`` in a section called ``acoustid`` like so:: +short token string. Then, add the key to your ``config.yaml`` as the value +``apikey`` in a section called ``acoustid`` like so: + +:: acoustid: apikey: AbCd1234 @@ -146,4 +154,4 @@ Then, run ``beet submit``. (You can also provide a query to submit a subset of your library.) The command will use stored fingerprints if they're available; otherwise it will fingerprint each file before submitting it. -.. _get an API key: https://acoustid.org/api-key +.. _get an api key: https://acoustid.org/api-key diff --git a/docs/plugins/convert.rst b/docs/plugins/convert.rst index a41e6c529..14b545b28 100644 --- a/docs/plugins/convert.rst +++ b/docs/plugins/convert.rst @@ -1,44 +1,40 @@ Convert Plugin ============== -The ``convert`` plugin lets you convert parts of your collection to a -directory of your choice, transcoding audio and embedding album art along the -way. It can transcode to and from any format using a configurable command -line. Optionally an m3u playlist file containing all the converted files can be -saved to the destination path. - +The ``convert`` plugin lets you convert parts of your collection to a directory +of your choice, transcoding audio and embedding album art along the way. It can +transcode to and from any format using a configurable command line. Optionally +an m3u playlist file containing all the converted files can be saved to the +destination path. Installation ------------ To use the ``convert`` plugin, first enable it in your configuration (see -:ref:`using-plugins`). By default, the plugin depends on `FFmpeg`_ to -transcode the audio, so you might want to install it. - -.. _FFmpeg: https://ffmpeg.org +:ref:`using-plugins`). By default, the plugin depends on FFmpeg_ to transcode +the audio, so you might want to install it. +.. _ffmpeg: https://ffmpeg.org Usage ----- -To convert a part of your collection, run ``beet convert QUERY``. The -command will transcode all the files matching the query to the -destination directory given by the ``-d`` (``--dest``) option or the -``dest`` configuration. The path layout mirrors that of your library, -but it may be customized through the ``paths`` configuration. Files -that have been previously converted---and thus already exist in the -destination directory---will be skipped. +To convert a part of your collection, run ``beet convert QUERY``. The command +will transcode all the files matching the query to the destination directory +given by the ``-d`` (``--dest``) option or the ``dest`` configuration. The path +layout mirrors that of your library, but it may be customized through the +``paths`` configuration. Files that have been previously converted---and thus +already exist in the destination directory---will be skipped. -The plugin uses a command-line program to transcode the audio. With the -``-f`` (``--format``) option you can choose the transcoding command -and customize the available commands -:ref:`through the configuration <convert-format-config>`. +The plugin uses a command-line program to transcode the audio. With the ``-f`` +(``--format``) option you can choose the transcoding command and customize the +available commands :ref:`through the configuration <convert-format-config>`. -Unless the ``-y`` (``--yes``) flag is set, the command will list all -the items to be converted and ask for your confirmation. +Unless the ``-y`` (``--yes``) flag is set, the command will list all the items +to be converted and ask for your confirmation. -The ``-a`` (or ``--album``) option causes the command -to match albums instead of tracks. +The ``-a`` (or ``--album``) option causes the command to match albums instead of +tracks. By default, the command places converted files into the destination directory and leaves your library pristine. To instead back up your original files into @@ -51,18 +47,23 @@ them. By default, files that do not need to be transcoded will be copied to their destination. Passing the ``-l`` (``--link``) flag creates symbolic links -instead, passing ``-H`` (``--hardlink``) creates hard links. -Note that album art embedding is disabled for files that are linked. -Refer to the ``link`` and ``hardlink`` options below. +instead, passing ``-H`` (``--hardlink``) creates hard links. Note that album art +embedding is disabled for files that are linked. Refer to the ``link`` and +``hardlink`` options below. + +The ``-F`` (or ``--force``) option forces transcoding even when safety options +such as ``no_convert``, ``never_convert_lossy_files``, or ``max_bitrate`` would +normally cause a file to be copied or skipped instead. This can be combined with +``--format`` to explicitly transcode lossy inputs to a chosen target format. The ``-m`` (or ``--playlist``) option enables the plugin to create an m3u8 playlist file in the destination folder given by the ``-d`` (``--dest``) option or the ``dest`` configuration. The path to the playlist file can either be absolute or relative to the ``dest`` directory. The contents will always be relative paths to media files, which tries to ensure compatibility when read -from external drives or on computers other than the one used for the -conversion. There is one caveat though: A list generated on Unix/macOS can't be -read on Windows and vice versa. +from external drives or on computers other than the one used for the conversion. +There is one caveat though: A list generated on Unix/macOS can't be read on +Windows and vice versa. Depending on the beets user's settings a generated playlist potentially could contain unicode characters. This is supported, playlists are written in `M3U8 @@ -71,96 +72,101 @@ format`_. Configuration ------------- -To configure the plugin, make a ``convert:`` section in your configuration -file. The available options are: +To configure the plugin, make a ``convert:`` section in your configuration file. +The available options are: - **auto**: Import transcoded versions of your files automatically during imports. With this option enabled, the importer will transcode all (in the default configuration) non-MP3 files over the maximum bitrate before adding - them to your library. - Default: ``no``. + them to your library. Default: ``no``. - **auto_keep**: Convert your files automatically on import to **dest** but - import the non transcoded version. It uses the default format you have - defined in your config file. - Default: ``no``. + import the non transcoded version. It uses the default format you have defined + in your config file. Default: ``no``. - .. note:: You probably want to use only one of the `auto` and `auto_keep` - options, not both. Enabling both will convert your files twice on import, - which you probably don't want. + .. note:: + + You probably want to use only one of the ``auto`` and ``auto_keep`` + options, not both. Enabling both will convert your files twice on import, + which you probably don't want. - **tmpdir**: The directory where temporary files will be stored during import. Default: none (system default), - **copy_album_art**: Copy album art when copying or transcoding albums matched using the ``-a`` option. Default: ``no``. - **album_art_maxwidth**: Downscale album art if it's too big. The resize - operation reduces image width to at most ``maxwidth`` pixels while - preserving the aspect ratio. The specified image size will apply to both - embedded album art and external image files. + operation reduces image width to at most ``maxwidth`` pixels while preserving + the aspect ratio. The specified image size will apply to both embedded album + art and external image files. - **dest**: The directory where the files will be converted (or copied) to. Default: none. - **embed**: Embed album art in converted items. Default: ``yes``. - **id3v23**: Can be used to override the global ``id3v23`` option. Default: ``inherit``. +- **write_metadata**: Can be used to disable writing metadata to converted + files. Default: ``true``. - **max_bitrate**: By default, the plugin does not transcode files that are already in the destination format. This option instead also transcodes files - with high bitrates, even if they are already in the same format as the - output. Note that this does not guarantee that all converted files will have - a lower bitrate---that depends on the encoder and its configuration. - Default: none. + with high bitrates, even if they are already in the same format as the output. + Note that this does not guarantee that all converted files will have a lower + bitrate---that depends on the encoder and its configuration. Default: none. + This option will be overridden by the ``--force`` flag - **no_convert**: Does not transcode items matching the query string provided - (see :doc:`/reference/query`). For example, to not convert AAC or WMA formats, you can use ``format:AAC, format:WMA`` or - ``path::\.(m4a|wma)$``. If you only want to transcode WMA format, you can use a negative query, e.g., ``^path::\.(wma)$``, to not convert any other format except WMA. + (see :doc:`/reference/query`). For example, to not convert AAC or WMA formats, + you can use ``format:AAC, format:WMA`` or ``path::\.(m4a|wma)$``. If you only + want to transcode WMA format, you can use a negative query, e.g., + ``^path::\.(wma)$``, to not convert any other format except WMA. This option + will be overridden by the ``--force`` flag - **never_convert_lossy_files**: Cross-conversions between lossy codecs---such as mp3, ogg vorbis, etc.---makes little sense as they will decrease quality - even further. If set to ``yes``, lossy files are always copied. - Default: ``no``. -- **paths**: The directory structure and naming scheme for the converted - files. Uses the same format as the top-level ``paths`` section (see - :ref:`path-format-config`). - Default: Reuse your top-level path format settings. + even further. If set to ``yes``, lossy files are always copied. Default: + ``no``. When ``never_convert_lossy_files`` is enabled, lossy source files (for + example MP3 or Ogg Vorbis) are normally not transcoded and are instead copied + or linked as-is. To explicitly transcode lossy files in spite of this, use the + ``--force`` option with the ``convert`` command (optionally together with + ``--format`` to choose a target format) +- **paths**: The directory structure and naming scheme for the converted files. + Uses the same format as the top-level ``paths`` section (see + :ref:`path-format-config`). Default: Reuse your top-level path format + settings. - **quiet**: Prevent the plugin from announcing every file it processes. Default: ``false``. -- **threads**: The number of threads to use for parallel encoding. - By default, the plugin will detect the number of processors available and use - them all. +- **threads**: The number of threads to use for parallel encoding. By default, + the plugin will detect the number of processors available and use them all. - **link**: By default, files that do not need to be transcoded will be copied to their destination. This option creates symbolic links instead. Note that options such as ``embed`` that modify the output files after the transcoding step will cause the original files to be modified as well if ``link`` is - enabled. For this reason, album-art embedding is disabled - for files that are linked. - Default: ``false``. -- **hardlink**: This options works similar to ``link``, but it creates - hard links instead of symlinks. - This option overrides ``link``. Only works when converting to a directory - on the same filesystem as the library. - Default: ``false``. -- **delete_originals**: Transcoded files will be copied or moved to their destination, depending on the import configuration. By default, the original files are not modified by the plugin. This option deletes the original files after the transcoding step has completed. - Default: ``false``. + enabled. For this reason, album-art embedding is disabled for files that are + linked. Default: ``false``. +- **hardlink**: This options works similar to ``link``, but it creates hard + links instead of symlinks. This option overrides ``link``. Only works when + converting to a directory on the same filesystem as the library. Default: + ``false``. +- **delete_originals**: Transcoded files will be copied or moved to their + destination, depending on the import configuration. By default, the original + files are not modified by the plugin. This option deletes the original files + after the transcoding step has completed. Default: ``false``. - **playlist**: The name of a playlist file that should be written on each run - of the plugin. A relative file path (e.g `playlists/mylist.m3u8`) is allowed - as well. The final destination of the playlist file will always be relative - to the destination path (``dest``, ``--dest``, ``-d``). This configuration is - overridden by the ``-m`` (``--playlist``) command line option. - Default: none. + of the plugin. A relative file path (e.g ``playlists/mylist.m3u8``) is allowed + as well. The final destination of the playlist file will always be relative to + the destination path (``dest``, ``--dest``, ``-d``). This configuration is + overridden by the ``-m`` (``--playlist``) command line option. Default: none. -You can also configure the format to use for transcoding (see the next -section): +You can also configure the format to use for transcoding (see the next section): - **format**: The name of the format to transcode to when none is specified on - the command line. - Default: ``mp3``. + the command line. Default: ``mp3``. - **formats**: A set of formats and associated command lines for transcoding each. .. _convert-format-config: Configuring the transcoding command -``````````````````````````````````` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -You can customize the transcoding command through the ``formats`` map -and select a command with the ``--format`` command-line option or the -``format`` configuration. +You can customize the transcoding command through the ``formats`` map and select +a command with the ``--format`` command-line option or the ``format`` +configuration. :: @@ -172,25 +178,25 @@ and select a command with the ``--format`` command-line option or the extension: spx wav: ffmpeg -i $source -y -acodec pcm_s16le $dest -In this example ``beet convert`` will use the *speex* command by -default. To convert the audio to `wav`, run ``beet convert -f wav``. -This will also use the format key (``wav``) as the file extension. +In this example ``beet convert`` will use the *speex* command by default. To +convert the audio to ``wav``, run ``beet convert -f wav``. This will also use +the format key (``wav``) as the file extension. -Each entry in the ``formats`` map consists of a key (the name of the -format) as well as the command and optionally the file extension. -``extension`` is the filename extension to be used for newly transcoded -files. If only the command is given as a string or the extension is not -provided, the file extension defaults to the format's name. ``command`` is the -command to use to transcode audio. The tokens ``$source`` and ``$dest`` in the -command are replaced with the paths to the existing and new file. +Each entry in the ``formats`` map consists of a key (the name of the format) as +well as the command and optionally the file extension. ``extension`` is the +filename extension to be used for newly transcoded files. If only the command is +given as a string or the extension is not provided, the file extension defaults +to the format's name. ``command`` is the command to use to transcode audio. The +tokens ``$source`` and ``$dest`` in the command are replaced with the paths to +the existing and new file. -The plugin in comes with default commands for the most common audio -formats: `mp3`, `alac`, `flac`, `aac`, `opus`, `ogg`, `wma`. For -details have a look at the output of ``beet config -d``. +The plugin in comes with default commands for the most common audio formats: +``mp3``, ``alac``, ``flac``, ``aac``, ``opus``, ``ogg``, ``wma``. For details +have a look at the output of ``beet config -d``. For a one-command-fits-all solution use the ``convert.command`` and -``convert.extension`` options. If these are set, the formats are ignored -and the given command is used for all conversions. +``convert.extension`` options. If these are set, the formats are ignored and the +given command is used for all conversions. :: @@ -198,31 +204,38 @@ and the given command is used for all conversions. command: ffmpeg -i $source -y -vn -aq 2 $dest extension: mp3 - Gapless MP3 encoding -```````````````````` +~~~~~~~~~~~~~~~~~~~~ -While FFmpeg cannot produce "`gapless`_" MP3s by itself, you can create them -by using `LAME`_ directly. Use a shell script like this to pipe the output of -FFmpeg into the LAME tool:: +While FFmpeg cannot produce "gapless_" MP3s by itself, you can create them by +using LAME_ directly. Use a shell script like this to pipe the output of FFmpeg +into the LAME tool: + +:: #!/bin/sh ffmpeg -i "$1" -f wav - | lame -V 2 --noreplaygain - "$2" -Then configure the ``convert`` plugin to use the script:: +Then configure the ``convert`` plugin to use the script: + +:: convert: command: /path/to/script.sh $source $dest extension: mp3 This strategy configures FFmpeg to produce a WAV file with an accurate length -header for LAME to use. Using ``--noreplaygain`` disables gain analysis; you -can use the :doc:`/plugins/replaygain` to do this analysis. See the LAME -`documentation`_ and the `HydrogenAudio wiki`_ for other LAME configuration +header for LAME to use. Using ``--noreplaygain`` disables gain analysis; you can +use the :doc:`/plugins/replaygain` to do this analysis. See the LAME +documentation_ and the `HydrogenAudio wiki`_ for other LAME configuration options and a thorough discussion of MP3 encoding. .. _documentation: https://lame.sourceforge.io/index.php -.. _HydrogenAudio wiki: https://wiki.hydrogenaud.io/index.php?title=LAME + .. _gapless: https://wiki.hydrogenaud.io/index.php?title=Gapless_playback -.. _LAME: https://lame.sourceforge.io/index.php -.. _M3U8 format: https://en.wikipedia.org/wiki/M3U#M3U8 + +.. _hydrogenaudio wiki: https://wiki.hydrogenaud.io/index.php?title=LAME + +.. _lame: https://lame.sourceforge.io/index.php + +.. _m3u8 format: https://en.wikipedia.org/wiki/M3U#M3U8 diff --git a/docs/plugins/deezer.rst b/docs/plugins/deezer.rst index 9f8da41fd..d44a565ce 100644 --- a/docs/plugins/deezer.rst +++ b/docs/plugins/deezer.rst @@ -1,20 +1,24 @@ Deezer Plugin -============== +============= The ``deezer`` plugin provides metadata matches for the importer using the -`Deezer`_ `Album`_ and `Track`_ APIs. +Deezer_ Album_ and Track_ APIs. -.. _Deezer: https://www.deezer.com -.. _Album: https://developers.deezer.com/api/album -.. _Track: https://developers.deezer.com/api/track +.. _album: https://developers.deezer.com/api/album + +.. _deezer: https://www.deezer.com + +.. _track: https://developers.deezer.com/api/track Basic Usage ----------- First, enable the ``deezer`` plugin (see :ref:`using-plugins`). -You can enter the URL for an album or song on Deezer at the ``enter Id`` -prompt during import:: +You can enter the URL for an album or song on Deezer at the ``enter Id`` prompt +during import: + +:: Enter search, enter Id, aBort, eDit, edit Candidates, plaY? i Enter release ID: https://www.deezer.com/en/album/572261 @@ -22,6 +26,34 @@ prompt during import:: Configuration ------------- -This plugin can be configured like other metadata source plugins as described in :ref:`metadata-source-plugin-configuration`. +This plugin can be configured like other metadata source plugins as described in +:ref:`metadata-source-plugin-configuration`. -The ``deezer`` plugin provides an additional command ``deezerupdate`` to update the ``rank`` information from Deezer. The ``rank`` (ranges from 0 to 1M) is a global indicator of a song's popularity on Deezer that is updated daily based on streams. The higher the ``rank``, the more popular the track is. +Default +~~~~~~~ + +.. code-block:: yaml + + deezer: + search_query_ascii: no + data_source_mismatch_penalty: 0.5 + search_limit: 5 + +.. conf:: search_query_ascii + :default: no + + If enabled, the search query will be converted to ASCII before being sent to + Deezer. Converting searches to ASCII can enhance search results in some cases, + but in general, it is not recommended. For instance, ``artist:deadmau5 + album:4×4`` will be converted to ``artist:deadmau5 album:4x4`` (notice + ``×!=x``). + +.. include:: ./shared_metadata_source_config.rst + +Commands +-------- + +The ``deezer`` plugin provides an additional command ``deezerupdate`` to update +the ``rank`` information from Deezer. The ``rank`` (ranges from 0 to 1M) is a +global indicator of a song's popularity on Deezer that is updated daily based on +streams. The higher the ``rank``, the more popular the track is. diff --git a/docs/plugins/discogs.rst b/docs/plugins/discogs.rst index ac67f2d0a..780042026 100644 --- a/docs/plugins/discogs.rst +++ b/docs/plugins/discogs.rst @@ -1,15 +1,15 @@ Discogs Plugin ============== -The ``discogs`` plugin extends the autotagger's search capabilities to -include matches from the `Discogs`_ database. +The ``discogs`` plugin extends the autotagger's search capabilities to include +matches from the Discogs_ database. -Files can be imported as albums or as singletons. Since `Discogs`_ matches are -always based on `Discogs`_ releases, the album tag is written even to -singletons. This enhances the importers results when reimporting as (full or -partial) albums later on. +Files can be imported as albums or as singletons. Since Discogs_ matches are +always based on Discogs_ releases, the album tag is written even to singletons. +This enhances the importers results when reimporting as (full or partial) albums +later on. -.. _Discogs: https://discogs.com +.. _discogs: https://discogs.com Installation ------------ @@ -21,7 +21,7 @@ To use the ``discogs`` plugin, first enable it in your configuration (see pip install "beets[discogs]" -You will also need to register for a `Discogs`_ account, and provide +You will also need to register for a Discogs_ account, and provide authentication credentials via a personal access token or an OAuth2 authorization. @@ -29,84 +29,139 @@ Matches from Discogs will now show up during import alongside matches from MusicBrainz. The search terms sent to the Discogs API are based on the artist and album tags of your tracks. If those are empty no query will be issued. -If you have a Discogs ID for an album you want to tag, you can also enter it -at the "enter Id" prompt in the importer. +If you have a Discogs ID for an album you want to tag, you can also enter it at +the "enter Id" prompt in the importer. OAuth Authorization -``````````````````` +~~~~~~~~~~~~~~~~~~~ The first time you run the :ref:`import-cmd` command after enabling the plugin, it will ask you to authorize with Discogs by visiting the site in a browser. Subsequent runs will not require re-authorization. Authentication via Personal Access Token -```````````````````````````````````````` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -As an alternative to OAuth, you can get a token from Discogs and add it to -your configuration. -To get a personal access token (called a "user token" in the `python3-discogs-client`_ -documentation): +As an alternative to OAuth, you can get a token from Discogs and add it to your +configuration. To get a personal access token (called a "user token" in the +python3-discogs-client_ documentation): -#. login to `Discogs`_; -#. visit the `Developer settings page <https://www.discogs.com/settings/developers>`_; -#. press the *Generate new token* button; -#. copy the generated token; -#. place it in your configuration in the ``discogs`` section as the ``user_token`` option: +1. login to Discogs_; +2. visit the `Developer settings page + <https://www.discogs.com/settings/developers>`_; +3. press the *Generate new token* button; +4. copy the generated token; +5. place it in your configuration in the ``discogs`` section as the + ``user_token`` option: .. code-block:: yaml - discogs: - user_token: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" - + discogs: + user_token: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" Configuration ------------- -This plugin can be configured like other metadata source plugins as described in :ref:`metadata-source-plugin-configuration`. +This plugin can be configured like other metadata source plugins as described in +:ref:`metadata-source-plugin-configuration`. -There is one additional option in the ``discogs:`` section, ``index_tracks``. -Index tracks (see the `Discogs guidelines -<https://support.discogs.com/hc/en-us/articles/360005055373-Database-Guidelines-12-Tracklisting#Index_Tracks_And_Headings>`_), -along with headers, mark divisions between distinct works on the same release -or within works. When ``index_tracks`` is enabled: +Default +~~~~~~~ .. code-block:: yaml discogs: - index_tracks: yes + apikey: REDACTED + apisecret: REDACTED + tokenfile: discogs_token.json + user_token: + index_tracks: no + append_style_genre: no + separator: ', ' + strip_disambiguation: yes + featured_string: Feat. + anv: + artist_credit: yes + artist: no + album_artist: no + data_source_mismatch_penalty: 0.5 + search_limit: 5 -beets will incorporate the names of the divisions containing each track into -the imported track's title. +.. conf:: index_tracks + :default: no -For example, importing -`this album -<https://www.discogs.com/Handel-Sutherland-Kirkby-Kwella-Nelson-Watkinson-Bowman-Rolfe-Johnson-Elliott-Partridge-Thomas-The-A/release/2026070>`_ -would result in track names like: + Index tracks (see the `Discogs guidelines`_) along with headers, mark divisions + between distinct works on the same release or within works. When enabled, + beets will incorporate the names of the divisions containing each track into the + imported track's title. -.. code-block:: text + For example, importing `divisions album`_ would result in track names like: - Messiah, Part I: No.1: Sinfony - Messiah, Part II: No.22: Chorus- Behold The Lamb Of God - Athalia, Act I, Scene I: Sinfonia + .. code-block:: text -whereas with ``index_tracks`` disabled you'd get: + Messiah, Part I: No.1: Sinfony + Messiah, Part II: No.22: Chorus- Behold The Lamb Of God + Athalia, Act I, Scene I: Sinfonia -.. code-block:: text + whereas with ``index_tracks`` disabled you'd get: - No.1: Sinfony - No.22: Chorus- Behold The Lamb Of God - Sinfonia + .. code-block:: text -This option is useful when importing classical music. + No.1: Sinfony + No.22: Chorus- Behold The Lamb Of God + Sinfonia -Other configurations available under ``discogs:`` are: + This option is useful when importing classical music. -- **append_style_genre**: Appends the Discogs style (if found) to the genre tag. This can be useful if you want more granular genres to categorize your music. - For example, a release in Discogs might have a genre of "Electronic" and a style of "Techno": enabling this setting would set the genre to be "Electronic, Techno" (assuming default separator of ``", "``) instead of just "Electronic". - Default: ``False`` -- **separator**: How to join multiple genre and style values from Discogs into a string. - Default: ``", "`` +.. conf:: append_style_genre + :default: no + Appends the Discogs style (if found) to the genre tag. This can be useful if + you want more granular genres to categorize your music. For example, + a release in Discogs might have a genre of "Electronic" and a style of + "Techno": enabling this setting would set the genre to be "Electronic, + Techno" (assuming default separator of ``", "``) instead of just + "Electronic". + +.. conf:: separator + :default: ", " + + How to join multiple genre and style values from Discogs into a string. + +.. conf:: strip_disambiguation + :default: yes + + Discogs uses strings like ``"(4)"`` to mark distinct artists and labels with + the same name. If you'd like to use the Discogs disambiguation in your tags, + you can disable this option. + +.. conf:: featured_string + :default: Feat. + + Configure the string used for noting featured artists. Useful if you prefer ``Featuring`` or ``ft.``. + +.. conf:: anv + + This configuration option is dedicated to handling Artist Name + Variations (ANVs). Sometimes a release credits artists differently compared to + the majority of their work. For example, "Basement Jaxx" may be credited as + "Tha Jaxx" or "The Basement Jaxx". You can select any combination of these + config options to control where beets writes and stores the variation credit. + The default, shown below, writes variations to the artist_credit field. + + .. code-block:: yaml + + discogs: + anv: + artist_credit: yes + artist: no + album_artist: no + +.. include:: ./shared_metadata_source_config.rst + +.. _discogs guidelines: https://support.discogs.com/hc/en-us/articles/360005055373-Database-Guidelines-12-Tracklisting#Index_Tracks_And_Headings + +.. _divisions album: https://www.discogs.com/Handel-Sutherland-Kirkby-Kwella-Nelson-Watkinson-Bowman-Rolfe-Johnson-Elliott-Partridge-Thomas-The-A/release/2026070 Troubleshooting --------------- @@ -117,12 +172,13 @@ please start by searching for `a similar issue on the repo Here are two things you can try: -* Try deleting the token file (``~/.config/beets/discogs_token.json`` by +- Try deleting the token file (``~/.config/beets/discogs_token.json`` by default) to force re-authorization. -* Make sure that your system clock is accurate. The Discogs servers can reject +- Make sure that your system clock is accurate. The Discogs servers can reject your request if your clock is too out of sync. Matching tracks by Discogs ID is not yet supported. The ``--group-albums`` -option in album import mode provides an alternative to singleton mode for autotagging tracks that are not in album-related folders. +option in album import mode provides an alternative to singleton mode for +autotagging tracks that are not in album-related folders. .. _python3-discogs-client: https://github.com/joalla/discogs_client diff --git a/docs/plugins/duplicates.rst b/docs/plugins/duplicates.rst index 8b11b6661..4580343de 100644 --- a/docs/plugins/duplicates.rst +++ b/docs/plugins/duplicates.rst @@ -1,8 +1,8 @@ Duplicates Plugin ================= -This plugin adds a new command, ``duplicates`` or ``dup``, which finds -and lists duplicate tracks or albums in your collection. +This plugin adds a new command, ``duplicates`` or ``dup``, which finds and lists +duplicate tracks or albums in your collection. Usage ----- @@ -10,30 +10,31 @@ Usage To use the ``duplicates`` plugin, first enable it in your configuration (see :ref:`using-plugins`). -By default, the ``beet duplicates`` command lists the names of tracks -in your library that are duplicates. It assumes that Musicbrainz track -and album ids are unique to each track or album. That is, it lists -every track or album with an ID that has been seen before in the -library. -You can customize the output format, count the number of duplicate -tracks or albums, and list all tracks that have duplicates or just the -duplicates themselves via command-line switches :: +By default, the ``beet duplicates`` command lists the names of tracks in your +library that are duplicates. It assumes that Musicbrainz track and album ids are +unique to each track or album. That is, it lists every track or album with an ID +that has been seen before in the library. You can customize the output format, +count the number of duplicate tracks or albums, and list all tracks that have +duplicates or just the duplicates themselves via command-line switches - -h, --help show this help message and exit - -f FMT, --format=FMT print with custom format - -a, --album show duplicate albums instead of tracks - -c, --count count duplicate tracks or albums - -C PROG, --checksum=PROG - report duplicates based on arbitrary command - -d, --delete delete items from library and disk - -F, --full show all versions of duplicate tracks or albums - -s, --strict report duplicates only if all attributes are set - -k, --key report duplicates based on keys (can be used multiple times) - -M, --merge merge duplicate items - -m DEST, --move=DEST move items to dest - -o DEST, --copy=DEST copy items to dest - -p, --path print paths for matched items or albums - -t TAG, --tag=TAG tag matched items with 'k=v' attribute +:: + + -h, --help show this help message and exit + -f FMT, --format=FMT print with custom format + -a, --album show duplicate albums instead of tracks + -c, --count count duplicate tracks or albums + -C PROG, --checksum=PROG + report duplicates based on arbitrary command + -d, --delete delete items from library and disk + -F, --full show all versions of duplicate tracks or albums + -s, --strict report duplicates only if all attributes are set + -k, --key report duplicates based on keys (can be used multiple times) + -M, --merge merge duplicate items + -m DEST, --move=DEST move items to dest + -o DEST, --copy=DEST copy items to dest + -p, --path print paths for matched items or albums + -t TAG, --tag=TAG tag matched items with 'k=v' attribute + -r, --remove remove items from library Configuration ------------- @@ -41,115 +42,132 @@ Configuration To configure the plugin, make a ``duplicates:`` section in your configuration file. The available options mirror the command-line options: -- **album**: List duplicate albums instead of tracks. - Default: ``no``. -- **checksum**: Use an arbitrary command to compute a checksum - of items. This overrides the ``keys`` option the first time it is run; - however, because it caches the resulting checksum as ``flexattrs`` in the - database, you can use ``--key=name_of_the_checksumming_program - --key=any_other_keys`` (or set the ``keys`` configuration option) the second - time around. - Default: ``ffmpeg -i {file} -f crc -``. -- **copy**: A destination base directory into which to copy matched - items. +- **album**: List duplicate albums instead of tracks. Default: ``no``. +- **checksum**: Use an arbitrary command to compute a checksum of items. This + overrides the ``keys`` option the first time it is run; however, because it + caches the resulting checksum as ``flexattrs`` in the database, you can use + ``--key=name_of_the_checksumming_program --key=any_other_keys`` (or set the + ``keys`` configuration option) the second time around. Default: ``ffmpeg -i + {file} -f crc -``. +- **copy**: A destination base directory into which to copy matched items. Default: none (disabled). - **count**: Print a count of duplicate tracks or albums in the format ``$albumartist - $album - $title: $count`` (for tracks) or ``$albumartist - - $album: $count`` (for albums). - Default: ``no``. -- **delete**: Removes matched items from the library and from the disk. - Default: ``no`` -- **format**: A specific format with which to print every track - or album. This uses the same template syntax as beets' - :doc:`path formats</reference/pathformat>`. The usage is inspired by, and - therefore similar to, the :ref:`list <list-cmd>` command. - Default: :ref:`format_item` + $album: $count`` (for albums). Default: ``no``. +- **delete**: Remove matched items from the library and from the disk. Default: + ``no`` +- **format**: A specific format with which to print every track or album. This + uses the same template syntax as beets' :doc:`path + formats</reference/pathformat>`. The usage is inspired by, and therefore + similar to, the :ref:`list <list-cmd>` command. Default: :ref:`format_item` - **full**: List every track or album that has duplicates, not just the - duplicates themselves. - Default: ``no`` -- **keys**: Define in which track or album fields duplicates are to be - searched. By default, the plugin uses the musicbrainz track and album IDs for - this purpose. Using the ``keys`` option (as a YAML list in the configuration - file, or as space-delimited strings in the command-line), you can extend this - behavior to consider other attributes. - Default: ``[mb_trackid, mb_albumid]`` -- **merge**: Merge duplicate items by consolidating tracks and-or - metadata where possible. -- **move**: A destination base directory into which it will move matched - items. + duplicates themselves. Default: ``no`` +- **keys**: Define in which track or album fields duplicates are to be searched. + By default, the plugin uses the musicbrainz track and album IDs for this + purpose. Using the ``keys`` option (as a YAML list in the configuration file, + or as space-delimited strings in the command-line), you can extend this + behavior to consider other attributes. Default: ``[mb_trackid, mb_albumid]`` +- **merge**: Merge duplicate items by consolidating tracks and-or metadata where + possible. +- **move**: A destination base directory into which it will move matched items. Default: none (disabled). - **path**: Output the path instead of metadata when listing duplicates. Default: ``no``. -- **strict**: Do not report duplicate matches if some of the - attributes are not defined (ie. null or empty). - Default: ``no`` +- **strict**: Do not report duplicate matches if some of the attributes are not + defined (ie. null or empty). Default: ``no`` - **tag**: A ``key=value`` pair. The plugin will add a new ``key`` attribute with ``value`` value as a flexattr to the database for duplicate items. Default: ``no``. -- **tiebreak**: Dictionary of lists of attributes keyed by ``items`` - or ``albums`` to use when choosing duplicates. By default, the - tie-breaking procedure favors the most complete metadata attribute - set. If you would like to consider the lower bitrates as duplicates, - for example, set ``tiebreak: items: [bitrate]``. - Default: ``{}``. +- **tiebreak**: Dictionary of lists of attributes keyed by ``items`` or + ``albums`` to use when choosing duplicates. By default, the tie-breaking + procedure favors the most complete metadata attribute set. If you would like + to consider the lower bitrates as duplicates, for example, set ``tiebreak: + items: [bitrate]``. Default: ``{}``. +- **remove**: Remove matched items from the library, but not from the disk. + Default: ``no``. Examples -------- -List all duplicate tracks in your collection:: +List all duplicate tracks in your collection: - beet duplicates +:: -List all duplicate tracks from 2008:: + beet duplicates - beet duplicates year:2008 +List all duplicate tracks from 2008: -Print out a unicode histogram of duplicate track years using `spark`_:: +:: - beet duplicates -f '$year' | spark - ▆▁▆█▄▇▇▄▇▇▁█▇▆▇▂▄█▁██▂█▁▁██▁█▂▇▆▂▇█▇▇█▆▆▇█▇█▇▆██▂▇ + beet duplicates year:2008 -Print out a listing of all albums with duplicate tracks, and respective -counts:: +Print out a unicode histogram of duplicate track years using spark_: - beet duplicates -ac +:: -The same as the above but include the original album, and show the path:: + beet duplicates -f '$year' | spark + ▆▁▆█▄▇▇▄▇▇▁█▇▆▇▂▄█▁██▂█▁▁██▁█▂▇▆▂▇█▇▇█▆▆▇█▇█▇▆██▂▇ - beet duplicates -acf '$path' +Print out a listing of all albums with duplicate tracks, and respective counts: -Get tracks with the same title, artist, and album:: +:: - beet duplicates -k title -k albumartist -k album + beet duplicates -ac -Compute Adler CRC32 or MD5 checksums, storing them as flexattrs, and report -back duplicates based on those values:: +The same as the above but include the original album, and show the path: - beet dup -C 'ffmpeg -i {file} -f crc -' - beet dup -C 'md5sum {file}' +:: -Copy highly danceable items to ``party`` directory:: + beet duplicates -acf '$path' - beet dup --copy /tmp/party +Get tracks with the same title, artist, and album: -Move likely duplicates to ``trash`` directory:: +:: - beet dup --move ${HOME}/.Trash + beet duplicates -k title -k albumartist -k album -Delete items (careful!), if they're Nickelback:: +Compute Adler CRC32 or MD5 checksums, storing them as flexattrs, and report back +duplicates based on those values: - beet duplicates --delete -k albumartist -k albumartist:nickelback +:: -Tag duplicate items with some flag:: + beet dup -C 'ffmpeg -i {file} -f crc -' + beet dup -C 'md5sum {file}' - beet duplicates --tag dup=1 +Copy highly danceable items to ``party`` directory: -Ignore items with undefined keys:: +:: - beet duplicates --strict + beet dup --copy /tmp/party -Merge and delete duplicate albums with different missing tracks:: +Move likely duplicates to ``trash`` directory: - beet duplicates --album --merge --delete +:: + + beet dup --move ${HOME}/.Trash + +Delete items (careful!), if they're Nickelback: + +:: + + beet duplicates --delete -k albumartist -k albumartist:nickelback + +Tag duplicate items with some flag: + +:: + + beet duplicates --tag dup=1 + +Ignore items with undefined keys: + +:: + + beet duplicates --strict + +Merge and delete duplicate albums with different missing tracks: + +:: + + beet duplicates --album --merge --delete .. _spark: https://github.com/holman/spark diff --git a/docs/plugins/edit.rst b/docs/plugins/edit.rst index fe5e348d6..ab38dd169 100644 --- a/docs/plugins/edit.rst +++ b/docs/plugins/edit.rst @@ -5,9 +5,11 @@ The ``edit`` plugin lets you modify music metadata using your favorite text editor. Enable the ``edit`` plugin in your configuration (see :ref:`using-plugins`) and -then type:: +then type: - beet edit QUERY +:: + + beet edit QUERY Your text editor (i.e., the command in your ``$VISUAL`` or ``$EDITOR`` environment variable) will open with a list of tracks to edit. Make your changes @@ -19,15 +21,17 @@ Command-Line Options The ``edit`` command has these command-line options: - ``-a`` or ``--album``: Edit albums instead of individual items. -- ``-f FIELD`` or ``--field FIELD``: Specify an additional field to edit - (in addition to the defaults set in the configuration). +- ``-f FIELD`` or ``--field FIELD``: Specify an additional field to edit (in + addition to the defaults set in the configuration). - ``--all``: Edit *all* available fields. Interactive Usage ----------------- The ``edit`` plugin can also be invoked during an import session. If enabled, it -adds two new options to the user prompt:: +adds two new options to the user prompt: + +:: [A]pply, More candidates, Skip, Use as-is, as Tracks, Group albums, Enter search, enter Id, aBort, eDit, edit Candidates? @@ -38,8 +42,8 @@ adds two new options to the user prompt:: Please note that currently the interactive usage of the plugin will only allow you to change the item-level fields. In case you need to edit the album-level -fields, the recommended approach is to invoke the plugin via the command line -in album mode (``beet edit -a QUERY``) after the import. +fields, the recommended approach is to invoke the plugin via the command line in +album mode (``beet edit -a QUERY``) after the import. Also, please be aware that the ``edit Candidates`` choice can only be used with the matches found during the initial search (and currently not supporting the @@ -50,11 +54,10 @@ cases where you already have a specific candidate ID that you want to edit. Configuration ------------- -To configure the plugin, make an ``edit:`` section in your configuration -file. The available options are: +To configure the plugin, make an ``edit:`` section in your configuration file. +The available options are: -- **itemfields**: A space-separated list of item fields to include in the - editor by default. - Default: ``track title artist album`` +- **itemfields**: A space-separated list of item fields to include in the editor + by default. Default: ``track title artist album`` - **albumfields**: The same when editing albums (with the ``-a`` option). Default: ``album albumartist`` diff --git a/docs/plugins/embedart.rst b/docs/plugins/embedart.rst index 9a91055f4..abbe2460d 100644 --- a/docs/plugins/embedart.rst +++ b/docs/plugins/embedart.rst @@ -25,15 +25,14 @@ This behavior can be disabled with the ``auto`` config option (see below). .. _image-similarity-check: Image Similarity -'''''''''''''''' +~~~~~~~~~~~~~~~~ When importing a lot of files with the ``auto`` option, one may be reluctant to overwrite existing embedded art for all of them. You can tell beets to avoid embedding images that are too different from the -existing ones. -This works by computing the perceptual hashes (`PHASH`_) of the two images and -checking that the difference between the two does not exceed a +existing ones. This works by computing the perceptual hashes (PHASH_) of the two +images and checking that the difference between the two does not exceed a threshold. You can set the threshold with the ``compare_threshold`` option. A threshold of 0 (the default) disables similarity checking and always embeds @@ -41,7 +40,7 @@ new images. Set the threshold to another number---we recommend between 10 and 100---to adjust the sensitivity of the comparison. The smaller the threshold number, the more similar the images must be. -This feature requires `ImageMagick`_. +This feature requires ImageMagick_. Configuration ------------- @@ -49,40 +48,37 @@ Configuration To configure the plugin, make an ``embedart:`` section in your configuration file. The available options are: -- **auto**: Enable automatic album art embedding. - Default: ``yes``. -- **compare_threshold**: How similar candidate art must be to - existing art to be written to the file (see :ref:`image-similarity-check`). - Default: 0 (disabled). +- **auto**: Enable automatic album art embedding. Default: ``yes``. +- **compare_threshold**: How similar candidate art must be to existing art to be + written to the file (see :ref:`image-similarity-check`). Default: 0 + (disabled). - **ifempty**: Avoid embedding album art for files that already have art - embedded. - Default: ``no``. -- **maxwidth**: A maximum width to downscale images before embedding - them (the original image file is not altered). The resize operation reduces - image width to at most ``maxwidth`` pixels. The height is recomputed so that - the aspect ratio is preserved. See also :ref:`image-resizing` for further - caveats about image resizing. - Default: 0 (disabled). + embedded. Default: ``no``. +- **maxwidth**: A maximum width to downscale images before embedding them (the + original image file is not altered). The resize operation reduces image width + to at most ``maxwidth`` pixels. The height is recomputed so that the aspect + ratio is preserved. See also :ref:`image-resizing` for further caveats about + image resizing. Default: 0 (disabled). - **quality**: The JPEG quality level to use when compressing images (when - ``maxwidth`` is set). This should be either a number from 1 to 100 or 0 to - use the default quality. 65–75 is usually a good starting point. The default + ``maxwidth`` is set). This should be either a number from 1 to 100 or 0 to use + the default quality. 65–75 is usually a good starting point. The default behavior depends on the imaging tool used for scaling: ImageMagick tries to estimate the input image quality and uses 92 if it cannot be determined, and - PIL defaults to 75. - Default: 0 (disabled) + PIL defaults to 75. Default: 0 (disabled) - **remove_art_file**: Automatically remove the album art file for the album after it has been embedded. This option is best used alongside the :doc:`FetchArt </plugins/fetchart>` plugin to download art with the purpose of - directly embedding it into the file's metadata without an "intermediate" - album art file. - Default: ``no``. + directly embedding it into the file's metadata without an "intermediate" album + art file. Default: ``no``. -Note: ``compare_threshold`` option requires `ImageMagick`_, and ``maxwidth`` -requires either `ImageMagick`_ or `Pillow`_. +Note: ``compare_threshold`` option requires ImageMagick_, and ``maxwidth`` +requires either ImageMagick_ or Pillow_. -.. _Pillow: https://github.com/python-pillow/Pillow -.. _ImageMagick: https://www.imagemagick.org/ -.. _PHASH: http://www.fmwconcepts.com/misc_tests/perceptual_hash_test_results_510/ +.. _imagemagick: https://www.imagemagick.org/ + +.. _phash: http://www.fmwconcepts.com/misc_tests/perceptual_hash_test_results_510/ + +.. _pillow: https://github.com/python-pillow/Pillow Manually Embedding and Extracting Art ------------------------------------- @@ -90,29 +86,28 @@ Manually Embedding and Extracting Art The ``embedart`` plugin provides a couple of commands for manually managing embedded album art: -* ``beet embedart [-f IMAGE] QUERY``: embed images in every track of the - albums matching the query. If the ``-f`` (``--file``) option is given, then - use a specific image file from the filesystem; otherwise, each album embeds - its own currently associated album art. The command prompts for confirmation - before making the change unless you specify the ``-y`` (``--yes``) option. - -* ``beet embedart [-u IMAGE_URL] QUERY``: embed image specified in the URL - into every track of the albums matching the query. The ``-u`` (``--url``) option can be used to specify the URL of the image to be used. The command prompts for confirmation before making the change unless you specify the ``-y`` (``--yes``) option. - -* ``beet extractart [-a] [-n FILE] QUERY``: extracts the images for all albums +- ``beet embedart [-f IMAGE] QUERY``: embed images in every track of the albums + matching the query. If the ``-f`` (``--file``) option is given, then use a + specific image file from the filesystem; otherwise, each album embeds its own + currently associated album art. The command prompts for confirmation before + making the change unless you specify the ``-y`` (``--yes``) option. +- ``beet embedart [-u IMAGE_URL] QUERY``: embed image specified in the URL into + every track of the albums matching the query. The ``-u`` (``--url``) option + can be used to specify the URL of the image to be used. The command prompts + for confirmation before making the change unless you specify the ``-y`` + (``--yes``) option. +- ``beet extractart [-a] [-n FILE] QUERY``: extracts the images for all albums matching the query. The images are placed inside the album folder. You can specify the destination file name using the ``-n`` option, but leave off the extension: it will be chosen automatically. The destination filename is specified using the ``art_filename`` configuration option. It defaults to - ``cover`` if it's not specified via ``-o`` nor the config. - Using ``-a``, the extracted image files are automatically associated with the - corresponding album. - -* ``beet extractart -o FILE QUERY``: extracts the image from an item matching + ``cover`` if it's not specified via ``-o`` nor the config. Using ``-a``, the + extracted image files are automatically associated with the corresponding + album. +- ``beet extractart -o FILE QUERY``: extracts the image from an item matching the query and stores it in a file. You have to specify the destination file using the ``-o`` option, but leave off the extension: it will be chosen automatically. - -* ``beet clearart QUERY``: removes all embedded images from all items matching +- ``beet clearart QUERY``: removes all embedded images from all items matching the query. The command prompts for confirmation before making the change unless you specify the ``-y`` (``--yes``) option. diff --git a/docs/plugins/embyupdate.rst b/docs/plugins/embyupdate.rst index 1c6fc61e4..5bb69cca0 100644 --- a/docs/plugins/embyupdate.rst +++ b/docs/plugins/embyupdate.rst @@ -1,11 +1,11 @@ EmbyUpdate Plugin ================= -``embyupdate`` is a plugin that lets you automatically update `Emby`_'s library +``embyupdate`` is a plugin that lets you automatically update Emby_'s library whenever you change your beets library. -To use it, first enable the your configuration (see :ref:`using-plugins`). -Then, install ``beets`` with ``embyupdate`` extra +To use it, first enable the your configuration (see :ref:`using-plugins`). Then, +install ``beets`` with ``embyupdate`` extra .. code-block:: bash @@ -22,26 +22,27 @@ that using an ``emby`` section in your ``config.yaml`` username: user apikey: apikey -With that all in place, you'll see beets send the "update" command to your Emby server every time you change your beets library. +With that all in place, you'll see beets send the "update" command to your Emby +server every time you change your beets library. -.. _Emby: https://emby.media/ +.. _emby: https://emby.media/ Configuration ------------- The available options under the ``emby:`` section are: -- **host**: The Emby server host. You also can include ``http://`` or ``https://``. - Default: ``localhost`` -- **port**: The Emby server port. - Default: 8096 -- **username**: A username of an Emby user that is allowed to refresh the library. +- **host**: The Emby server host. You also can include ``http://`` or + ``https://``. Default: ``localhost`` +- **port**: The Emby server port. Default: 8096 +- **username**: A username of an Emby user that is allowed to refresh the + library. - **userid**: A user ID of an Emby user that is allowed to refresh the library. (This is only necessary for private users i.e. when the user is hidden from login screens) - **apikey**: An Emby API key for the user. -- **password**: The password for the user. (This is only necessary if no API - key is provided.) +- **password**: The password for the user. (This is only necessary if no API key + is provided.) You can choose to authenticate either with ``apikey`` or ``password``, but only one of those two is required. diff --git a/docs/plugins/export.rst b/docs/plugins/export.rst index bca9d1e5a..b8e14ef22 100644 --- a/docs/plugins/export.rst +++ b/docs/plugins/export.rst @@ -1,54 +1,56 @@ Export Plugin ============= -The ``export`` plugin lets you get data from the items and export the content -as `JSON`_, `CSV`_, or `XML`_. +The ``export`` plugin lets you get data from the items and export the content as +JSON_, CSV_, or XML_. -.. _JSON: https://www.json.org -.. _CSV: https://fileinfo.com/extension/csv -.. _XML: https://fileinfo.com/extension/xml +.. _csv: https://fileinfo.com/extension/csv -Enable the ``export`` plugin (see :ref:`using-plugins` for help). Then, type ``beet export`` followed by a :doc:`query </reference/query>` to get the data from -your library. For example, run this:: +.. _json: https://www.json.org + +.. _xml: https://fileinfo.com/extension/xml + +Enable the ``export`` plugin (see :ref:`using-plugins` for help). Then, type +``beet export`` followed by a :doc:`query </reference/query>` to get the data +from your library. For example, run this: + +:: $ beet export beatles to print a JSON file containing information about your Beatles tracks. - Command-Line Options -------------------- The ``export`` command has these command-line options: -* ``--include-keys`` or ``-i``: Choose the properties to include in the output +- ``--include-keys`` or ``-i``: Choose the properties to include in the output data. The argument is a comma-separated list of simple glob patterns where - ``*`` matches any string. For example:: + ``*`` matches any string. For example: + + :: $ beet export -i 'title,mb*' beatles - will include the ``title`` property and all properties starting with - ``mb``. You can add the ``-i`` option multiple times to the command - line. + will include the ``title`` property and all properties starting with ``mb``. + You can add the ``-i`` option multiple times to the command line. -* ``--library`` or ``-l``: Show data from the library database instead of the +- ``--library`` or ``-l``: Show data from the library database instead of the files' tags. - -* ``--album`` or ``-a``: Show data from albums instead of tracks (implies +- ``--album`` or ``-a``: Show data from albums instead of tracks (implies ``--library``). - -* ``--output`` or ``-o``: Path for an output file. If not informed, will print +- ``--output`` or ``-o``: Path for an output file. If not informed, will print the data in the console. - -* ``--append``: Appends the data to the file instead of writing. - -* ``--format`` or ``-f``: Specifies the format the data will be exported as. If not informed, JSON will be used by default. The format options include csv, json, `jsonlines <https://jsonlines.org/>`_ and xml. +- ``--append``: Appends the data to the file instead of writing. +- ``--format`` or ``-f``: Specifies the format the data will be exported as. If + not informed, JSON will be used by default. The format options include csv, + json, `jsonlines <https://jsonlines.org/>`_ and xml. Configuration ------------- -To configure the plugin, make a ``export:`` section in your configuration -file. +To configure the plugin, make a ``export:`` section in your configuration file. For JSON export, these options are available under the ``json`` and ``jsonlines`` keys: @@ -57,19 +59,22 @@ For JSON export, these options are available under the ``json`` and - **separators**: A ``[item_separator, dict_separator]`` tuple. - **sort_keys**: Sorts the keys in JSON dictionaries. -Those options match the options from the `Python json module`_. -Similarly, these options are available for the CSV format under the ``csv`` -key: +Those options match the options from the `Python json module`_. Similarly, these +options are available for the CSV format under the ``csv`` key: -- **delimiter**: Used as the separating character between fields. The default value is a comma (,). -- **dialect**: The kind of CSV file to produce. The default is `excel`. +- **delimiter**: Used as the separating character between fields. The default + value is a comma (,). +- **dialect**: The kind of CSV file to produce. The default is ``excel``. These options match the options from the `Python csv module`_. -.. _Python json module: https://docs.python.org/2/library/json.html#basic-usage -.. _Python csv module: https://docs.python.org/3/library/csv.html#csv-fmt-params +.. _python csv module: https://docs.python.org/3/library/csv.html#csv-fmt-params -The default options look like this:: +.. _python json module: https://docs.python.org/3/library/json.html#basic-usage + +The default options look like this: + +:: export: json: diff --git a/docs/plugins/fetchart.rst b/docs/plugins/fetchart.rst index 1da92a493..fd578212a 100644 --- a/docs/plugins/fetchart.rst +++ b/docs/plugins/fetchart.rst @@ -28,57 +28,53 @@ Configuration To configure the plugin, make a ``fetchart:`` section in your configuration file. The available options are: -- **auto**: Enable automatic album art fetching during import. - Default: ``yes``. +- **auto**: Enable automatic album art fetching during import. Default: ``yes``. - **cautious**: Pick only trusted album art by ignoring filenames that do not - contain one of the keywords in ``cover_names``. - Default: ``no``. -- **cover_names**: Prioritize images containing words in this list. - Default: ``cover front art album folder``. + contain one of the keywords in ``cover_names``. Default: ``no``. +- **cover_names**: Prioritize images containing words in this list. Default: + ``cover front art album folder``. +- **fallback**: Path to a fallback album art file if no album art was found + otherwise. Default: ``None`` (disabled). - **minwidth**: Only images with a width bigger or equal to ``minwidth`` are considered as valid album art candidates. Default: 0. - **maxwidth**: A maximum image width to downscale fetched images if they are too big. The resize operation reduces image width to at most ``maxwidth`` pixels. The height is recomputed so that the aspect ratio is preserved. See the section on :ref:`cover-art-archive-maxwidth` below for additional - information regarding the Cover Art Archive source. - Default: 0 (no maximum is enforced). + information regarding the Cover Art Archive source. Default: 0 (no maximum is + enforced). - **quality**: The JPEG quality level to use when compressing images (when - ``maxwidth`` is set). This should be either a number from 1 to 100 or 0 to - use the default quality. 65–75 is usually a good starting point. The default + ``maxwidth`` is set). This should be either a number from 1 to 100 or 0 to use + the default quality. 65–75 is usually a good starting point. The default behavior depends on the imaging tool used for scaling: ImageMagick tries to estimate the input image quality and uses 92 if it cannot be determined, and - PIL defaults to 75. - Default: 0 (disabled) + PIL defaults to 75. Default: 0 (disabled) - **max_filesize**: The maximum size of a target piece of cover art in bytes. - When using an ImageMagick backend this sets - ``-define jpeg:extent=max_filesize``. Using PIL this will reduce JPG quality - by up to 50% to attempt to reach the target filesize. Neither method is - *guaranteed* to reach the target size, however in most cases it should - succeed. - Default: 0 (disabled) -- **enforce_ratio**: Only images with a width:height ratio of 1:1 are - considered as valid album art candidates if set to ``yes``. - It is also possible to specify a certain deviation to the exact ratio to - still be considered valid. This can be done either in pixels - (``enforce_ratio: 10px``) or as a percentage of the longer edge - (``enforce_ratio: 0.5%``). Default: ``no``. -- **sources**: List of sources to search for images. An asterisk `*` expands - to all available sources. - Default: ``filesystem coverart itunes amazon albumart``, i.e., everything but - ``wikipedia``, ``google``, ``fanarttv`` and ``lastfm``. Enable those sources - for more matches at the cost of some speed. They are searched in the given - order, thus in the default config, no remote (Web) art source are queried if - local art is found in the filesystem. To use a local image as fallback, - move it to the end of the list. For even more fine-grained control over - the search order, see the section on :ref:`album-art-sources` below. + When using an ImageMagick backend this sets ``-define + jpeg:extent=max_filesize``. Using PIL this will reduce JPG quality by up to + 50% to attempt to reach the target filesize. Neither method is *guaranteed* to + reach the target size, however in most cases it should succeed. Default: 0 + (disabled) +- **enforce_ratio**: Only images with a width:height ratio of 1:1 are considered + as valid album art candidates if set to ``yes``. It is also possible to + specify a certain deviation to the exact ratio to still be considered valid. + This can be done either in pixels (``enforce_ratio: 10px``) or as a percentage + of the longer edge (``enforce_ratio: 0.5%``). Default: ``no``. +- **sources**: List of sources to search for images. An asterisk ``*`` expands + to all available sources. Default: ``filesystem coverart itunes amazon + albumart``, i.e., everything but ``wikipedia``, ``google``, ``fanarttv`` and + ``lastfm``. Enable those sources for more matches at the cost of some speed. + They are searched in the given order, thus in the default config, no remote + (Web) art source are queried if local art is found in the filesystem. To use a + local image as fallback, move it to the end of the list. For even more + fine-grained control over the search order, see the section on + :ref:`album-art-sources` below. - **google_key**: Your Google API key (to enable the Google Custom Search - backend). - Default: None. -- **google_engine**: The custom search engine to use. - Default: The `beets custom search engine`_, which searches the entire web. -- **fanarttv_key**: The personal API key for requesting art from - fanart.tv. See below. + backend). Default: None. +- **google_engine**: The custom search engine to use. Default: The `beets custom + search engine`_, which searches the entire web. +- **fanarttv_key**: The personal API key for requesting art from fanart.tv. See + below. - **lastfm_key**: The personal API key for requesting art from Last.fm. See below. - **store_source**: If enabled, fetchart stores the artwork's source in a @@ -87,64 +83,70 @@ file. The available options are: - **high_resolution**: If enabled, fetchart retrieves artwork in the highest resolution it can find (warning: image files can sometimes reach >20MB). Default: ``no``. -- **deinterlace**: If enabled, `Pillow`_ or `ImageMagick`_ backends are - instructed to store cover art as non-progressive JPEG. You might need this if - you use DAPs that don't support progressive images. - Default: ``no``. +- **deinterlace**: If enabled, Pillow_ or ImageMagick_ backends are instructed + to store cover art as non-progressive JPEG. You might need this if you use + DAPs that don't support progressive images. Default: ``no``. - **cover_format**: If enabled, forced the cover image into the specified - format. Most often, this will be either ``JPEG`` or ``PNG`` [#imgformats]_. - Also respects ``deinterlace``. - Default: None (leave unchanged). + format. Most often, this will be either ``JPEG`` or ``PNG`` (see + image-formats_). Also respects ``deinterlace``. Default: None (leave + unchanged). -Note: ``maxwidth`` and ``enforce_ratio`` options require either `ImageMagick`_ -or `Pillow`_. +Note: ``maxwidth`` and ``enforce_ratio`` options require either ImageMagick_ or +Pillow_. .. note:: - Previously, there was a ``remote_priority`` option to specify when to - look for art on the filesystem. This is - still respected, but a deprecation message will be shown until you - replace this configuration with the new ``filesystem`` value in the - ``sources`` array. + Previously, there was a ``remote_priority`` option to specify when to look + for art on the filesystem. This is still respected, but a deprecation + message will be shown until you replace this configuration with the new + ``filesystem`` value in the ``sources`` array. + +.. _image-formats: + +.. admonition:: Image formats + + Other image formats are available, though the full list depends on your + system and what backend you are using. If you're using the ImageMagick + backend, you can use ``magick identify -list format`` to get a full list of + all supported formats, and you can use the Python function + PIL.features.pilinfo() to print a list of all supported formats in Pillow + (``python3 -c 'import PIL.features as f; f.pilinfo()'``). .. _beets custom search engine: https://cse.google.com.au:443/cse/publicurl?cx=001442825323518660753:hrh5ch1gjzm -.. _Pillow: https://github.com/python-pillow/Pillow -.. _ImageMagick: https://www.imagemagick.org/ -.. [#imgformats] Other image formats are available, though the full list - depends on your system and what backend you are using. If you're using the - ImageMagick backend, you can use ``magick identify -list format`` to get a - full list of all supported formats, and you can use the Python function - PIL.features.pilinfo() to print a list of all supported formats in Pillow - (``python3 -c 'import PIL.features as f; f.pilinfo()'``). Here's an example that makes plugin select only images that contain ``front`` or ``back`` keywords in their filenames and prioritizes the iTunes source over -others:: +others: + +:: fetchart: cautious: true cover_names: front back sources: itunes * - Manually Fetching Album Art --------------------------- Use the ``fetchart`` command to download album art after albums have already -been imported:: +been imported: + +:: $ beet fetchart [-f] [query] By default, the command will only look for album art when the album doesn't -already have it; the ``-f`` or ``--force`` switch makes it search for art -in Web databases regardless. If you specify a query, only matching albums will -be processed; otherwise, the command processes every album in your library. +already have it; the ``-f`` or ``--force`` switch makes it search for art in Web +databases regardless. If you specify a query, only matching albums will be +processed; otherwise, the command processes every album in your library. Display Only Missing Album Art ------------------------------ -Use the ``fetchart`` command with the ``-q`` switch in order to display only missing -art:: +Use the ``fetchart`` command with the ``-q`` switch in order to display only +missing art: + +:: $ beet fetchart [-q] [query] @@ -157,7 +159,7 @@ fetched, or for which artwork could not be found will be printed. Image Resizing -------------- -Beets can resize images using `Pillow`_, `ImageMagick`_, or a server-side resizing +Beets can resize images using Pillow_, ImageMagick_, or a server-side resizing proxy. If either Pillow or ImageMagick is installed, beets will use those; otherwise, it falls back to the resizing proxy. If the resizing proxy is used, no resizing is performed for album art found on the filesystem---only downloaded @@ -169,9 +171,6 @@ On some versions of Windows, the program can be shadowed by a system-provided ``convert.exe``. On these systems, you may need to modify your ``%PATH%`` environment variable so that ImageMagick comes first or use Pillow instead. -.. _Pillow: https://github.com/python-pillow/Pillow -.. _ImageMagick: https://www.imagemagick.org/ - .. _album-art-sources: Album Art Sources @@ -179,9 +178,8 @@ Album Art Sources By default, this plugin searches for art in the local filesystem as well as on the Cover Art Archive, the iTunes Store, Amazon, and AlbumArt.org, in that -order. -You can reorder the sources or remove -some to speed up the process using the ``sources`` configuration option. +order. You can reorder the sources or remove some to speed up the process using +the ``sources`` configuration option. When looking for local album art, beets checks for image files located in the same folder as the music files you're importing. Beets prefers to use an image @@ -190,8 +188,10 @@ the absence of well-known names, it will use any image file in the same folder as your music files. For some of the art sources, the backend service can match artwork by various -criteria. If you want finer control over the search order in such cases, you -can use this alternative syntax for the ``sources`` option:: +criteria. If you want finer control over the search order in such cases, you can +use this alternative syntax for the ``sources`` option: + +:: fetchart: sources: @@ -203,73 +203,72 @@ can use this alternative syntax for the ``sources`` option:: where listing a source without matching criteria will default to trying all available strategies. Entries of the forms ``coverart: release releasegroup`` -and ``coverart: *`` are also valid. -Currently, only the ``coverart`` source supports multiple criteria: -namely, ``release`` and ``releasegroup``, which refer to the -respective MusicBrainz IDs. +and ``coverart: *`` are also valid. Currently, only the ``coverart`` source +supports multiple criteria: namely, ``release`` and ``releasegroup``, which +refer to the respective MusicBrainz IDs. When you choose to apply changes during an import, beets will search for art as -described above. For "as-is" imports (and non-autotagged imports using the +described above. For "as-is" imports (and non-autotagged imports using the ``-A`` flag), beets only looks for art on the local filesystem. Google custom search -'''''''''''''''''''' +~~~~~~~~~~~~~~~~~~~~ -To use the google image search backend you need to -`register for a Google API key`_. Set the ``google_key`` configuration -option to your key, then add ``google`` to the list of sources in your -configuration. +To use the google image search backend you need to `register for a Google API +key`_. Set the ``google_key`` configuration option to your key, then add +``google`` to the list of sources in your configuration. -.. _register for a Google API key: https://console.developers.google.com. +.. _register for a google api key: https://console.developers.google.com. Optionally, you can `define a custom search engine`_. Get your search engine's -token and use it for your ``google_engine`` configuration option. The -default engine searches the entire web for cover art. +token and use it for your ``google_engine`` configuration option. The default +engine searches the entire web for cover art. .. _define a custom search engine: https://www.google.com/cse/all -Note that the Google custom search API is limited to 100 queries per day. -After that, the fetchart plugin will fall back on other declared data sources. +Note that the Google custom search API is limited to 100 queries per day. After +that, the fetchart plugin will fall back on other declared data sources. Fanart.tv -''''''''' +~~~~~~~~~ -Although not strictly necessary right now, you might think about -`registering a personal fanart.tv API key`_. Set the ``fanarttv_key`` -configuration option to your key, then add ``fanarttv`` to the list of sources -in your configuration. +Although not strictly necessary right now, you might think about `registering a +personal fanart.tv API key`_. Set the ``fanarttv_key`` configuration option to +your key, then add ``fanarttv`` to the list of sources in your configuration. -.. _registering a personal fanart.tv API key: https://fanart.tv/get-an-api-key/ +.. _registering a personal fanart.tv api key: https://fanart.tv/get-an-api-key/ More detailed information can be found `on their Wiki`_. Specifically, the personal key will give you earlier access to new art. -.. _on their Wiki: https://wiki.fanart.tv/General/personal%20api/ +.. _on their wiki: https://wiki.fanart.tv/General/personal%20api/ Last.fm -''''''' +~~~~~~~ To use the Last.fm backend, you need to `register for a Last.fm API key`_. Set the ``lastfm_key`` configuration option to your API key, then add ``lastfm`` to the list of sources in your configuration. -.. _register for a Last.fm API key: https://www.last.fm/api/account/create +.. _register for a last.fm api key: https://www.last.fm/api/account/create Spotify -''''''' +~~~~~~~ -Spotify backend is enabled by default and will update album art if a valid Spotify album id is found. +Spotify backend is enabled by default and will update album art if a valid +Spotify album id is found. + +.. _beautifulsoup: https://www.crummy.com/software/BeautifulSoup/bs4/doc/ .. _pip: https://pip.pypa.io -.. _BeautifulSoup: https://www.crummy.com/software/BeautifulSoup/bs4/doc/ Cover Art URL -''''''''''''' +~~~~~~~~~~~~~ -The `fetchart` plugin can also use a flexible attribute field ``cover_art_url`` -where you can manually specify the image URL to be used as cover art. Any custom -plugin can use this field to provide the cover art and ``fetchart`` will use it -as a source. +The ``fetchart`` plugin can also use a flexible attribute field +``cover_art_url`` where you can manually specify the image URL to be used as +cover art. Any custom plugin can use this field to provide the cover art and +``fetchart`` will use it as a source. .. _cover-art-archive-maxwidth: @@ -277,20 +276,24 @@ Cover Art Archive Pre-sized Thumbnails -------------------------------------- The CAA provides pre-sized thumbnails of width 250, 500, and 1200 pixels. If you -set the `maxwidth` option to one of these values, the corresponding image will -be downloaded, saving `beets` the need to scale down the image. It can also +set the ``maxwidth`` option to one of these values, the corresponding image will +be downloaded, saving ``beets`` the need to scale down the image. It can also speed up the downloading process, as some cover arts can sometimes be very large. Storing the Artwork's Source ---------------------------- -Storing the current artwork's source might be used to narrow down -``fetchart`` commands. For example, if some albums have artwork placed -manually in their directories that should not be replaced by a forced -album art fetch, you could do +Storing the current artwork's source might be used to narrow down ``fetchart`` +commands. For example, if some albums have artwork placed manually in their +directories that should not be replaced by a forced album art fetch, you could +do ``beet fetchart -f ^art_source:filesystem`` The values written to ``art_source`` are the same names used in the ``sources`` configuration value. + +.. _imagemagick: https://www.imagemagick.org/ + +.. _pillow: https://github.com/python-pillow/Pillow diff --git a/docs/plugins/filefilter.rst b/docs/plugins/filefilter.rst index 21600aca7..f56d14553 100644 --- a/docs/plugins/filefilter.rst +++ b/docs/plugins/filefilter.rst @@ -1,8 +1,8 @@ FileFilter Plugin ================= -The ``filefilter`` plugin allows you to skip files during import using -regular expressions. +The ``filefilter`` plugin allows you to skip files during import using regular +expressions. To use the ``filefilter`` plugin, enable it in your configuration (see :ref:`using-plugins`). @@ -10,18 +10,20 @@ To use the ``filefilter`` plugin, enable it in your configuration (see Configuration ------------- -To configure the plugin, make a ``filefilter:`` section in your -configuration file. The available options are: +To configure the plugin, make a ``filefilter:`` section in your configuration +file. The available options are: - **path**: A regular expression to filter files based on their path and name. Default: ``.*`` (import everything) - **album_path** and **singleton_path**: You may specify different regular expressions used for imports of albums and singletons. This way, you can automatically skip singletons when importing albums if the names (and paths) - of the files are distinguishable via a regex. The regexes defined here - take precedence over the global ``path`` option. + of the files are distinguishable via a regex. The regexes defined here take + precedence over the global ``path`` option. -Here's an example:: +Here's an example: + +:: filefilter: path: .*\d\d[^/]+$ diff --git a/docs/plugins/fish.rst b/docs/plugins/fish.rst index 0c89576c5..c1ae4f990 100644 --- a/docs/plugins/fish.rst +++ b/docs/plugins/fish.rst @@ -2,10 +2,10 @@ Fish Plugin =========== The ``fish`` plugin adds a ``beet fish`` command that creates a `Fish shell`_ -tab-completion file named ``beet.fish`` in ``~/.config/fish/completions``. -This enables tab-completion of ``beet`` commands for the `Fish shell`_. +tab-completion file named ``beet.fish`` in ``~/.config/fish/completions``. This +enables tab-completion of ``beet`` commands for the `Fish shell`_. -.. _Fish shell: https://fishshell.com/ +.. _fish shell: https://fishshell.com/ Configuration ------------- @@ -24,11 +24,11 @@ For users not accustomed to tab completion… After you type ``beet`` followed b a space in your shell prompt and then the ``TAB`` key, you should see a list of the beets commands (and their abbreviated versions) that can be invoked in your current environment. Similarly, typing ``beet -<TAB>`` will show you all the -option flags available to you, which also applies to subcommands such as -``beet import -<TAB>``. If you type ``beet ls`` followed by a space and then the -and the ``TAB`` key, you will see a list of all the album/track fields that can -be used in beets queries. For example, typing ``beet ls ge<TAB>`` will complete -to ``genre:`` and leave you ready to type the rest of your query. +option flags available to you, which also applies to subcommands such as ``beet +import -<TAB>``. If you type ``beet ls`` followed by a space and then the and +the ``TAB`` key, you will see a list of all the album/track fields that can be +used in beets queries. For example, typing ``beet ls ge<TAB>`` will complete to +``genre:`` and leave you ready to type the rest of your query. Options ------- @@ -41,17 +41,16 @@ commands and option flags. If you want generated completions to also contain album/track field *values* for the items in your library, you can use the ``-e`` or ``--extravalues`` option. -For example: ``beet fish -e genre`` or ``beet fish -e genre -e albumartist`` -In the latter case, subsequently typing ``beet list genre: <TAB>`` will display -a list of all the genres in your library and ``beet list albumartist: <TAB>`` -will show a list of the album artists in your library. Keep in mind that all of -these values will be put into the generated completions file, so use this option -with care when specified fields contain a large number of values. Libraries with, -for example, very large numbers of genres/artists may result in higher memory +For example: ``beet fish -e genre`` or ``beet fish -e genre -e albumartist`` In +the latter case, subsequently typing ``beet list genre: <TAB>`` will display a +list of all the genres in your library and ``beet list albumartist: <TAB>`` will +show a list of the album artists in your library. Keep in mind that all of these +values will be put into the generated completions file, so use this option with +care when specified fields contain a large number of values. Libraries with, for +example, very large numbers of genres/artists may result in higher memory utilization, completion latency, et cetera. This option is not meant to replace database queries altogether. By default, the completion file will be generated at -``~/.config/fish/completions/``. -If you want to save it somewhere else, you can use the ``-o`` or ``--output`` -option. +``~/.config/fish/completions/``. If you want to save it somewhere else, you can +use the ``-o`` or ``--output`` option. diff --git a/docs/plugins/freedesktop.rst b/docs/plugins/freedesktop.rst index 0368cc5da..c584fd08e 100644 --- a/docs/plugins/freedesktop.rst +++ b/docs/plugins/freedesktop.rst @@ -1,6 +1,6 @@ Freedesktop Plugin ================== -The ``freedesktop`` plugin created .directory files in your album folders. -This plugin is now deprecated and replaced by the :doc:`/plugins/thumbnails` -with the ``dolphin`` option enabled. +The ``freedesktop`` plugin created .directory files in your album folders. This +plugin is now deprecated and replaced by the :doc:`/plugins/thumbnails` with the +``dolphin`` option enabled. diff --git a/docs/plugins/fromfilename.rst b/docs/plugins/fromfilename.rst index 5cb6ccb76..e78677b86 100644 --- a/docs/plugins/fromfilename.rst +++ b/docs/plugins/fromfilename.rst @@ -1,13 +1,12 @@ FromFilename Plugin =================== -The ``fromfilename`` plugin helps to tag albums that are missing tags -altogether but where the filenames contain useful information like the artist -and title. +The ``fromfilename`` plugin helps to tag albums that are missing tags altogether +but where the filenames contain useful information like the artist and title. -When you attempt to import a track that's missing a title, this plugin will -look at the track's filename and guess its track number, title, and artist. -These will be used to search in MusicBrainz and match track ordering. +When you attempt to import a track that's missing a title, this plugin will look +at the track's filename and guess its track number, title, and artist. These +will be used to search in MusicBrainz and match track ordering. -To use the ``fromfilename`` plugin, enable it in your configuration -(see :ref:`using-plugins`). +To use the ``fromfilename`` plugin, enable it in your configuration (see +:ref:`using-plugins`). diff --git a/docs/plugins/ftintitle.rst b/docs/plugins/ftintitle.rst index 63d023dc9..7daea5582 100644 --- a/docs/plugins/ftintitle.rst +++ b/docs/plugins/ftintitle.rst @@ -10,8 +10,8 @@ tracks in your library like "Tellin' Me Things" by the artist "Blakroc feat. RZA". If you prefer to tag this as "Tellin' Me Things feat. RZA" by "Blakroc", then this plugin is for you. -To use the ``ftintitle`` plugin, enable it in your configuration -(see :ref:`using-plugins`). +To use the ``ftintitle`` plugin, enable it in your configuration (see +:ref:`using-plugins`). Configuration ------------- @@ -19,23 +19,46 @@ Configuration To configure the plugin, make a ``ftintitle:`` section in your configuration file. The available options are: -- **auto**: Enable metadata rewriting during import. - Default: ``yes``. -- **drop**: Remove featured artists entirely instead of adding them to the - title field. - Default: ``no``. -- **format**: Defines the format for the featuring X part of the new title field. - In this format the ``{0}`` is used to define where the featured artists are placed. - Default: ``feat. {0}`` -- **keep_in_artist**: Keep the featuring X part in the artist field. This can - be useful if you still want to be able to search for features in the artist - field. - Default: ``no``. +- **auto**: Enable metadata rewriting during import. Default: ``yes``. +- **drop**: Remove featured artists entirely instead of adding them to the title + field. Default: ``no``. +- **format**: Defines the format for the featuring X part of the new title + field. In this format the ``{0}`` is used to define where the featured artists + are placed. Default: ``feat. {0}`` +- **keep_in_artist**: Keep the featuring X part in the artist field. This can be + useful if you still want to be able to search for features in the artist + field. Default: ``no``. +- **preserve_album_artist**: If the artist and the album artist are the same, + skip the ftintitle processing. Default: ``yes``. +- **custom_words**: List of additional words that will be treated as a marker + for artist features. Default: ``[]``. +- **bracket_keywords**: Controls where the featuring text is inserted when the + title includes bracketed qualifiers such as ``(Remix)`` or ``[Live]``. + FtInTitle inserts the new text before the first bracket whose contents match + any of these keywords. Supply a list of words to fine-tune the behavior or set + the list to ``[]`` to match *any* bracket regardless of its contents. Default: + + :: + + ["abridged", "acapella", "club", "demo", "edit", "edition", "extended", + "instrumental", "live", "mix", "radio", "release", "remaster", + "remastered", "remix", "rmx", "unabridged", "unreleased", + "version", "vip"] + +Path Template Values +-------------------- + +This plugin provides the ``album_artist_no_feat`` :ref:`template value +<templ_plugins>` that you can use in your :ref:`path-format-config` in +``paths.default``. Any ``custom_words`` in the configuration are taken into +account. Running Manually ---------------- -From the command line, type:: +From the command line, type: + +:: $ beet ftintitle [QUERY] @@ -45,4 +68,4 @@ your entire collection. Use the ``-d`` flag to remove featured artists (equivalent of the ``drop`` config option). -.. _MusicBrainz style: https://musicbrainz.org/doc/Style +.. _musicbrainz style: https://musicbrainz.org/doc/Style diff --git a/docs/plugins/fuzzy.rst b/docs/plugins/fuzzy.rst index 6b013b9f5..4a65a8827 100644 --- a/docs/plugins/fuzzy.rst +++ b/docs/plugins/fuzzy.rst @@ -5,8 +5,10 @@ The ``fuzzy`` plugin provides a prefixed query that searches your library using fuzzy pattern matching. This can be useful if you want to find a track with complicated characters in the title. -First, enable the plugin named ``fuzzy`` (see :ref:`using-plugins`). -You'll then be able to use the ``~`` prefix to use fuzzy matching:: +First, enable the plugin named ``fuzzy`` (see :ref:`using-plugins`). You'll then +be able to use the ``~`` prefix to use fuzzy matching: + +:: $ beet ls '~Vareoldur' Sigur Rós - Valtari - Varðeldur @@ -14,11 +16,10 @@ You'll then be able to use the ``~`` prefix to use fuzzy matching:: Configuration ------------- -To configure the plugin, make a ``fuzzy:`` section in your configuration -file. The available options are: +To configure the plugin, make a ``fuzzy:`` section in your configuration file. +The available options are: -- **threshold**: The "sensitivity" of the fuzzy match. A value of 1.0 will - show only perfect matches and a value of 0.0 will match everything. - Default: 0.7. -- **prefix**: The character used to designate fuzzy queries. - Default: ``~``, which may need to be escaped in some shells. +- **threshold**: The "sensitivity" of the fuzzy match. A value of 1.0 will show + only perfect matches and a value of 0.0 will match everything. Default: 0.7. +- **prefix**: The character used to designate fuzzy queries. Default: ``~``, + which may need to be escaped in some shells. diff --git a/docs/plugins/gmusic.rst b/docs/plugins/gmusic.rst deleted file mode 100644 index 412978bd6..000000000 --- a/docs/plugins/gmusic.rst +++ /dev/null @@ -1,5 +0,0 @@ -Gmusic Plugin -============= - -The ``gmusic`` plugin interfaced beets to Google Play Music. It has been -removed after the shutdown of this service. diff --git a/docs/plugins/hook.rst b/docs/plugins/hook.rst index 2c1dfec25..f3e847aa3 100644 --- a/docs/plugins/hook.rst +++ b/docs/plugins/hook.rst @@ -3,43 +3,42 @@ Hook Plugin Internally, beets uses *events* to tell plugins when something happens. For example, one event fires when the importer finishes processes a song, and -another triggers just before the ``beet`` command exits. -The ``hook`` plugin lets you run commands in response to these events. +another triggers just before the ``beet`` command exits. The ``hook`` plugin +lets you run commands in response to these events. .. _hook-configuration: Configuration ------------- -To configure the plugin, make a ``hook`` section in your configuration -file. The available options are: +To configure the plugin, make a ``hook`` section in your configuration file. The +available options are: -- **hooks**: A list of events and the commands to run - (see :ref:`individual-hook-configuration`). Default: Empty. +- **hooks**: A list of events and the commands to run (see + :ref:`individual-hook-configuration`). Default: Empty. .. _individual-hook-configuration: Configuring Each Hook -''''''''''''''''''''' +~~~~~~~~~~~~~~~~~~~~~ Each element under ``hooks`` should have these keys: -- **event**: The name of the event that will trigger this hook. - See the :ref:`plugin events <plugin_events>` documentation for a list - of possible values. +- **event**: The name of the event that will trigger this hook. See the + :ref:`plugin events <plugin_events>` documentation for a list of possible + values. - **command**: The command to run when this hook executes. .. _command-substitution: Command Substitution -'''''''''''''''''''' +~~~~~~~~~~~~~~~~~~~~ -Commands can access the parameters of events using `Python string -formatting`_. Use ``{name}`` in your command and the plugin will substitute it -with the named value. The name can also refer to a field, as in -``{album.path}``. +Commands can access the parameters of events using `Python string formatting`_. +Use ``{name}`` in your command and the plugin will substitute it with the named +value. The name can also refer to a field, as in ``{album.path}``. -.. _Python string formatting: https://www.python.org/dev/peps/pep-3101/ +.. _python string formatting: https://www.python.org/dev/peps/pep-3101/ You can find a list of all available events and their arguments in the :ref:`plugin events <plugin_events>` documentation. diff --git a/docs/plugins/ihate.rst b/docs/plugins/ihate.rst index f9cde39eb..47e679dbd 100644 --- a/docs/plugins/ihate.rst +++ b/docs/plugins/ihate.rst @@ -2,9 +2,8 @@ IHate Plugin ============ The ``ihate`` plugin allows you to automatically skip things you hate during -import or warn you about them. You specify queries (see -:doc:`/reference/query`) and the plugin skips (or warns about) albums or items -that match any query. +import or warn you about them. You specify queries (see :doc:`/reference/query`) +and the plugin skips (or warns about) albums or items that match any query. To use the ``ihate`` plugin, enable it in your configuration (see :ref:`using-plugins`). @@ -12,15 +11,17 @@ To use the ``ihate`` plugin, enable it in your configuration (see Configuration ------------- -To configure the plugin, make an ``ihate:`` section in your configuration -file. The available options are: +To configure the plugin, make an ``ihate:`` section in your configuration file. +The available options are: - **skip**: Never import items and albums that match a query in this list. Default: ``[]`` (empty list). - **warn**: Print a warning message for matches in this list of queries. Default: ``[]``. -Here's an example:: +Here's an example: + +:: ihate: warn: diff --git a/docs/plugins/importadded.rst b/docs/plugins/importadded.rst index 2a2e8ea29..8a6f92277 100644 --- a/docs/plugins/importadded.rst +++ b/docs/plugins/importadded.rst @@ -1,8 +1,8 @@ ImportAdded Plugin ================== -The ``importadded`` plugin is useful when an existing collection is imported -and the time when albums and items were added should be preserved. +The ``importadded`` plugin is useful when an existing collection is imported and +the time when albums and items were added should be preserved. To use the ``importadded`` plugin, enable it in your configuration (see :ref:`using-plugins`). @@ -11,30 +11,29 @@ Usage ----- The :abbr:`mtime (modification time)` of files that are imported into the -library are assumed to represent the time when the items were originally -added. +library are assumed to represent the time when the items were originally added. The ``item.added`` field is populated as follows: -* For singleton items with no album, ``item.added`` is set to the item's file +- For singleton items with no album, ``item.added`` is set to the item's file mtime before it was imported. -* For items that are part of an album, ``album.added`` and ``item.added`` are +- For items that are part of an album, ``album.added`` and ``item.added`` are set to the oldest mtime of the files in the album before they were imported. The mtime of album directories is ignored. -This plugin can optionally be configured to also preserve mtimes at -import using the ``preserve_mtimes`` option. +This plugin can optionally be configured to also preserve mtimes at import using +the ``preserve_mtimes`` option. -When ``preserve_write_mtimes`` option is set, this plugin preserves -mtimes after each write to files using the ``item.added`` attribute. +When ``preserve_write_mtimes`` option is set, this plugin preserves mtimes after +each write to files using the ``item.added`` attribute. File modification times are preserved as follows: -* For all items: +- For all items: - * ``item.mtime`` is set to the mtime of the file - from which the item is imported from. - * The mtime of the file ``item.path`` is set to ``item.mtime``. + - ``item.mtime`` is set to the mtime of the file from which the item is + imported from. + - The mtime of the file ``item.path`` is set to ``item.mtime``. Note that there is no ``album.mtime`` field in the database and that the mtime of album directories on disk aren't preserved. @@ -42,16 +41,13 @@ of album directories on disk aren't preserved. Configuration ------------- -To configure the plugin, make an ``importadded:`` section in your -configuration file. There are two options available: +To configure the plugin, make an ``importadded:`` section in your configuration +file. There are two options available: - **preserve_mtimes**: After importing files, re-set their mtimes to their - original value. - Default: ``no``. - + original value. Default: ``no``. - **preserve_write_mtimes**: After writing files, re-set their mtimes to their - original value. - Default: ``no``. + original value. Default: ``no``. Reimport -------- diff --git a/docs/plugins/importfeeds.rst b/docs/plugins/importfeeds.rst index 5f108db86..5246f2bc7 100644 --- a/docs/plugins/importfeeds.rst +++ b/docs/plugins/importfeeds.rst @@ -3,45 +3,43 @@ ImportFeeds Plugin This plugin helps you keep track of newly imported music in your library. -To use the ``importfeeds`` plugin, enable it in your configuration -(see :ref:`using-plugins`). +To use the ``importfeeds`` plugin, enable it in your configuration (see +:ref:`using-plugins`). Configuration ------------- -To configure the plugin, make an ``importfeeds:`` section in your -configuration file. The available options are: +To configure the plugin, make an ``importfeeds:`` section in your configuration +file. The available options are: - **absolute_path**: Use absolute paths instead of relative paths. Some - applications may need this to work properly. - Default: ``no``. -- **dir**: The output directory. - Default: Your beets library directory. + applications may need this to work properly. Default: ``no``. +- **dir**: The output directory. Default: Your beets library directory. - **formats**: Select the kind of output. Use one or more of: - - **m3u**: Catalog the imports in a centralized playlist. - - **m3u_multi**: Create a new playlist for each import (uniquely named by - appending the date and track/album name). - - **m3u_session**: Create a new playlist for each import session. The file - is named as ``m3u_name`` appending the date and time the import session - was started. - - **link**: Create a symlink for each imported item. This is the - recommended setting to propagate beets imports to your iTunes library: - just drag and drop the ``dir`` folder on the iTunes dock icon. - - **echo**: Do not write a playlist file at all, but echo a list of new - file paths to the terminal. + - **m3u**: Catalog the imports in a centralized playlist. + - **m3u_multi**: Create a new playlist for each import (uniquely named by + appending the date and track/album name). + - **m3u_session**: Create a new playlist for each import session. The file + is named as ``m3u_name`` appending the date and time the import session + was started. + - **link**: Create a symlink for each imported item. This is the + recommended setting to propagate beets imports to your iTunes library: + just drag and drop the ``dir`` folder on the iTunes dock icon. + - **echo**: Do not write a playlist file at all, but echo a list of new + file paths to the terminal. Default: None. -- **m3u_name**: Playlist name used by the ``m3u`` format and as a prefix used - by the ``m3u_session`` format. - Default: ``imported.m3u``. -- **relative_to**: Make the m3u paths relative to another - folder than where the playlist is being written. If you're using importfeeds - to generate a playlist for MPD, you should set this to the root of your music - library. - Default: None. -Here's an example configuration for this plugin:: +- **m3u_name**: Playlist name used by the ``m3u`` format and as a prefix used by + the ``m3u_session`` format. Default: ``imported.m3u``. +- **relative_to**: Make the m3u paths relative to another folder than where the + playlist is being written. If you're using importfeeds to generate a playlist + for MPD, you should set this to the root of your music library. Default: None. + +Here's an example configuration for this plugin: + +:: importfeeds: formats: m3u link diff --git a/docs/plugins/importsource.rst b/docs/plugins/importsource.rst new file mode 100644 index 000000000..dda2d5e08 --- /dev/null +++ b/docs/plugins/importsource.rst @@ -0,0 +1,80 @@ +ImportSource Plugin +=================== + +The ``importsource`` plugin adds a ``source_path`` field to every item imported +to the library which stores the original media files' paths. Using this plugin +makes most sense when the general importing workflow is using ``beet import +--copy``. Additionally the plugin interactively suggests deletion of original +source files whenever items are removed from the Beets library. + +To enable it, add ``importsource`` to the list of plugins in your configuration +(see :ref:`using-plugins`). + +Tracking Source Paths +--------------------- + +The primary use case for the plugin is tracking the original location of +imported files using the ``source_path`` field. Consider this scenario: you've +imported all directories in your current working directory using: + +.. code-block:: bash + + beet import --flat --copy */ + +Later, for instance if the import didn't complete successfully, you'll need to +rerun the import but don't want Beets to re-process the already successfully +imported directories. You can view which files were successfully imported using: + +.. code-block:: bash + + beet ls source_path:$PWD --format='$source_path' + +To extract just the directory names, pipe the output to standard UNIX utilities: + +.. code-block:: bash + + beet ls source_path:$PWD --format='$source_path' | awk -F / '{print $(NF-1)}' | sort -u + +This might help to find out what's left to be imported. + +Removal Suggestion +------------------ + +Another feature of the plugin is suggesting removal of original source files +when items are deleted from your library. Consider this scenario: you imported +an album using: + +.. code-block:: bash + + beet import --copy --flat ~/Desktop/interesting-album-to-check/ + +After listening to that album and deciding it wasn't good, you want to delete it +from your library as well as from your ``~/Desktop``, so you run: + +.. code-block:: bash + + beet remove --delete source_path:$HOME/Desktop/interesting-album-to-check + +After approving the deletion, the plugin will prompt: + +.. code-block:: text + + The item: + <music-library>/Interesting Album/01 Interesting Song.flac + is originated from: + <HOME>/Desktop/interesting-album-to-check/01-interesting-song.flac + What would you like to do? + Delete the item's source, Recursively delete the source's directory, + do Nothing, + do nothing and Stop suggesting to delete items from this album? + +Configuration +------------- + +To configure the plugin, make an ``importsource:`` section in your configuration +file. There is one option available: + +- **suggest_removal**: By default ``importsource`` suggests to remove the + original directories / files from which the items were imported whenever + library items (and files) are removed. To disable these prompts set this + option to ``no``. Default: ``yes``. diff --git a/docs/plugins/index.rst b/docs/plugins/index.rst index bd7ece200..1583ac5ab 100644 --- a/docs/plugins/index.rst +++ b/docs/plugins/index.rst @@ -5,7 +5,7 @@ Plugins extend beets' core functionality. They add new commands, fetch additional data during import, provide new metadata sources, and much more. If beets by itself doesn't do what you want it to, you may just need to enable a plugin---or, if you want to do something new, :doc:`writing a plugin -</dev/plugins>` is easy if you know a little Python. +</dev/plugins/index>` is easy if you know a little Python. .. _using-plugins: @@ -13,125 +13,125 @@ Using Plugins ------------- To use one of the plugins included with beets (see the rest of this page for a -list), just use the ``plugins`` option in your :doc:`config.yaml </reference/config>` file, like so:: +list), just use the ``plugins`` option in your :doc:`config.yaml +</reference/config>` file: - plugins: inline convert web +.. code-block:: sh + + plugins: musicbrainz inline convert web The value for ``plugins`` can be a space-separated list of plugin names or a YAML list like ``[foo, bar]``. You can see which plugins are currently enabled by typing ``beet version``. -Each plugin has its own set of options that can be defined in a section bearing its name:: +Each plugin has its own set of options that can be defined in a section bearing +its name: - plugins: inline convert web +.. code-block:: yaml + + plugins: musicbrainz inline convert web convert: auto: true Some plugins have special dependencies that you'll need to install. The -documentation page for each plugin will list them in the setup instructions. -For some, you can use ``pip``'s "extras" feature to install the dependencies, -like this:: +documentation page for each plugin will list them in the setup instructions. For +some, you can use ``pip``'s "extras" feature to install the dependencies: - pip install beets[fetchart,lyrics,lastgenre] +.. code-block:: sh + + pip install "beets[fetchart,lyrics,lastgenre]" .. _metadata-source-plugin-configuration: Using Metadata Source Plugins ----------------------------- -Some plugins provide sources for metadata in addition to MusicBrainz. These -plugins share the following configuration option: - -- **source_weight**: Penalty applied to matches during import. Set to 0.0 to - disable. - Default: ``0.5``. - -For example, to equally consider matches from Discogs and MusicBrainz add the -following to your configuration:: - - plugins: discogs - - discogs: - source_weight: 0.0 +We provide several :ref:`autotagger_extensions` that fetch metadata from online +databases. They share the following configuration options: +.. include:: ./shared_metadata_source_config.rst .. toctree:: - :hidden: + :hidden: - absubmit - acousticbrainz - advancedrewrite - albumtypes - aura - autobpm - badfiles - bareasc - beatport - bpd - bpm - bpsync - bucket - chroma - convert - deezer - discogs - duplicates - edit - embedart - embyupdate - export - fetchart - filefilter - fish - freedesktop - fromfilename - ftintitle - fuzzy - gmusic - hook - ihate - importadded - importfeeds - info - inline - ipfs - keyfinder - kodiupdate - lastgenre - lastimport - limit - listenbrainz - loadext - lyrics - mbcollection - mbsubmit - mbsync - metasync - missing - mpdstats - mpdupdate - parentwork - permissions - play - playlist - plexupdate - random - replaygain - rewrite - scrub - smartplaylist - sonosupdate - spotify - subsonicplaylist - subsonicupdate - substitute - the - thumbnails - types - unimported - web - zero + absubmit + acousticbrainz + advancedrewrite + albumtypes + aura + autobpm + badfiles + bareasc + beatport + bpd + bpm + bpsync + bucket + chroma + convert + deezer + discogs + duplicates + edit + embedart + embyupdate + export + fetchart + filefilter + fish + freedesktop + fromfilename + ftintitle + fuzzy + hook + ihate + importadded + importsource + importfeeds + info + inline + ipfs + keyfinder + kodiupdate + lastgenre + lastimport + limit + listenbrainz + loadext + lyrics + mbcollection + mbpseudo + mbsubmit + mbsync + metasync + missing + mpdstats + mpdupdate + musicbrainz + parentwork + permissions + play + playlist + plexupdate + random + replace + replaygain + rewrite + scrub + smartplaylist + sonosupdate + spotify + subsonicplaylist + subsonicupdate + substitute + the + thumbnails + titlecase + types + unimported + web + zero .. _autotagger_extensions: @@ -139,254 +139,266 @@ Autotagger Extensions --------------------- :doc:`chroma <chroma>` - Use acoustic fingerprinting to identify audio files with - missing or incorrect metadata. - -:doc:`discogs <discogs>` - Search for releases in the `Discogs`_ database. - -:doc:`spotify <spotify>` - Search for releases in the `Spotify`_ database. + Use acoustic fingerprinting to identify audio files with missing or + incorrect metadata. :doc:`deezer <deezer>` - Search for releases in the `Deezer`_ database. + Search for releases in the Deezer_ database. + +:doc:`discogs <discogs>` + Search for releases in the Discogs_ database. :doc:`fromfilename <fromfilename>` - Guess metadata for untagged tracks from their filenames. + Guess metadata for untagged tracks from their filenames. -.. _Discogs: https://www.discogs.com/ -.. _Spotify: https://www.spotify.com -.. _Deezer: https://www.deezer.com/ +:doc:`musicbrainz <musicbrainz>` + Search for releases in the MusicBrainz_ database. + +:doc:`mbpseudo <mbpseudo>` + Search for releases and pseudo-releases in the MusicBrainz_ database. + +:doc:`spotify <spotify>` + Search for releases in the Spotify_ database. + +.. _deezer: https://www.deezer.com + +.. _discogs: https://www.discogs.com + +.. _musicbrainz: https://www.musicbrainz.com + +.. _spotify: https://www.spotify.com Metadata -------- :doc:`absubmit <absubmit>` - Analyse audio with the `streaming_extractor_music`_ program and submit the metadata to an AcousticBrainz server + Analyse audio with the streaming_extractor_music_ program and submit the + metadata to an AcousticBrainz server :doc:`acousticbrainz <acousticbrainz>` - Fetch various AcousticBrainz metadata + Fetch various AcousticBrainz metadata :doc:`autobpm <autobpm>` - Use `Librosa`_ to calculate the BPM from the audio. + Use Librosa_ to calculate the BPM from the audio. :doc:`bpm <bpm>` - Measure tempo using keystrokes. + Measure tempo using keystrokes. :doc:`bpsync <bpsync>` - Fetch updated metadata from Beatport. + Fetch updated metadata from Beatport. :doc:`edit <edit>` - Edit metadata from a text editor. + Edit metadata from a text editor. :doc:`embedart <embedart>` - Embed album art images into files' metadata. + Embed album art images into files' metadata. :doc:`fetchart <fetchart>` - Fetch album cover art from various sources. + Fetch album cover art from various sources. :doc:`ftintitle <ftintitle>` - Move "featured" artists from the artist field to the title - field. + Move "featured" artists from the artist field to the title field. :doc:`keyfinder <keyfinder>` - Use the `KeyFinder`_ program to detect the musical - key from the audio. + Use the KeyFinder_ program to detect the musical key from the audio. :doc:`importadded <importadded>` - Use file modification times for guessing the value for - the `added` field in the database. + Use file modification times for guessing the value for the ``added`` field + in the database. :doc:`lastgenre <lastgenre>` - Fetch genres based on Last.fm tags. + Fetch genres based on Last.fm tags. :doc:`lastimport <lastimport>` - Collect play counts from Last.fm. + Collect play counts from Last.fm. :doc:`lyrics <lyrics>` - Automatically fetch song lyrics. + Automatically fetch song lyrics. :doc:`mbsync <mbsync>` - Fetch updated metadata from MusicBrainz. + Fetch updated metadata from MusicBrainz. :doc:`metasync <metasync>` - Fetch metadata from local or remote sources + Fetch metadata from local or remote sources :doc:`mpdstats <mpdstats>` - Connect to `MPD`_ and update the beets library with play - statistics (last_played, play_count, skip_count, rating). + Connect to MPD_ and update the beets library with play statistics + (last_played, play_count, skip_count, rating). :doc:`parentwork <parentwork>` - Fetch work titles and works they are part of. + Fetch work titles and works they are part of. :doc:`replaygain <replaygain>` - Calculate volume normalization for players that support it. + Calculate volume normalization for players that support it. :doc:`scrub <scrub>` - Clean extraneous metadata from music files. + Clean extraneous metadata from music files. :doc:`zero <zero>` - Nullify fields by pattern or unconditionally. + Nullify fields by pattern or unconditionally. + +.. _keyfinder: http://www.ibrahimshaath.co.uk/keyfinder/ + +.. _librosa: https://github.com/librosa/librosa/ -.. _Librosa: https://github.com/librosa/librosa/ -.. _KeyFinder: http://www.ibrahimshaath.co.uk/keyfinder/ .. _streaming_extractor_music: https://acousticbrainz.org/download Path Formats ------------ :doc:`albumtypes <albumtypes>` - Format album type in path formats. + Format album type in path formats. :doc:`bucket <bucket>` - Group your files into bucket directories that cover different - field values ranges. + Group your files into bucket directories that cover different field values + ranges. :doc:`inline <inline>` - Use Python snippets to customize path format strings. + Use Python snippets to customize path format strings. :doc:`rewrite <rewrite>` - Substitute values in path formats. + Substitute values in path formats. :doc:`advancedrewrite <advancedrewrite>` - Substitute field values for items matching a query. + Substitute field values for items matching a query. :doc:`substitute <substitute>` - As an alternative to :doc:`rewrite <rewrite>`, use this plugin. The main - difference between them is that this plugin never modifies the files - metadata. + As an alternative to :doc:`rewrite <rewrite>`, use this plugin. The main + difference between them is that this plugin never modifies the files + metadata. :doc:`the <the>` - Move patterns in path formats (i.e., move "a" and "the" to the - end). + Move patterns in path formats (i.e., move "a" and "the" to the end). Interoperability ---------------- :doc:`aura <aura>` - A server implementation of the `AURA`_ specification. + A server implementation of the AURA_ specification. :doc:`badfiles <badfiles>` - Check audio file integrity. + Check audio file integrity. :doc:`embyupdate <embyupdate>` - Automatically notifies `Emby`_ whenever the beets library changes. + Automatically notifies Emby_ whenever the beets library changes. :doc:`fish <fish>` - Adds `Fish shell`_ tab autocompletion to ``beet`` commands. + Adds `Fish shell`_ tab autocompletion to ``beet`` commands. :doc:`importfeeds <importfeeds>` - Keep track of imported files via ``.m3u`` playlist file(s) or symlinks. + Keep track of imported files via ``.m3u`` playlist file(s) or symlinks. :doc:`ipfs <ipfs>` - Import libraries from friends and get albums from them via ipfs. + Import libraries from friends and get albums from them via ipfs. :doc:`kodiupdate <kodiupdate>` - Automatically notifies `Kodi`_ whenever the beets library - changes. + Automatically notifies Kodi_ whenever the beets library changes. :doc:`mpdupdate <mpdupdate>` - Automatically notifies `MPD`_ whenever the beets library - changes. + Automatically notifies MPD_ whenever the beets library changes. :doc:`play <play>` - Play beets queries in your music player. + Play beets queries in your music player. :doc:`playlist <playlist>` - Use M3U playlists to query the beets library. + Use M3U playlists to query the beets library. :doc:`plexupdate <plexupdate>` - Automatically notifies `Plex`_ whenever the beets library - changes. + Automatically notifies Plex_ whenever the beets library changes. :doc:`smartplaylist <smartplaylist>` - Generate smart playlists based on beets queries. + Generate smart playlists based on beets queries. :doc:`sonosupdate <sonosupdate>` - Automatically notifies `Sonos`_ whenever the beets library - changes. + Automatically notifies Sonos_ whenever the beets library changes. :doc:`thumbnails <thumbnails>` - Get thumbnails with the cover art on your album folders. + Get thumbnails with the cover art on your album folders. :doc:`subsonicupdate <subsonicupdate>` - Automatically notifies `Subsonic`_ whenever the beets - library changes. + Automatically notifies Subsonic_ whenever the beets library changes. +.. _aura: https://auraspec.readthedocs.io -.. _AURA: https://auraspec.readthedocs.io -.. _Emby: https://emby.media -.. _Fish shell: https://fishshell.com/ -.. _Plex: https://plex.tv -.. _Kodi: https://kodi.tv -.. _Sonos: https://sonos.com -.. _Subsonic: http://www.subsonic.org/ +.. _emby: https://emby.media + +.. _fish shell: https://fishshell.com/ + +.. _kodi: https://kodi.tv + +.. _plex: https://plex.tv + +.. _sonos: https://sonos.com + +.. _subsonic: http://www.subsonic.org/ Miscellaneous ------------- :doc:`bareasc <bareasc>` - Search albums and tracks with bare ASCII string matching. + Search albums and tracks with bare ASCII string matching. :doc:`bpd <bpd>` - A music player for your beets library that emulates `MPD`_ and is - compatible with `MPD clients`_. + A music player for your beets library that emulates MPD_ and is compatible + with `MPD clients`_. :doc:`convert <convert>` - Transcode music and embed album art while exporting to - a different directory. + Transcode music and embed album art while exporting to a different + directory. :doc:`duplicates <duplicates>` - List duplicate tracks or albums. + List duplicate tracks or albums. :doc:`export <export>` - Export data from queries to a format. + Export data from queries to a format. :doc:`filefilter <filefilter>` - Automatically skip files during the import process based - on regular expressions. + Automatically skip files during the import process based on regular + expressions. :doc:`fuzzy <fuzzy>` - Search albums and tracks with fuzzy string matching. + Search albums and tracks with fuzzy string matching. :doc:`hook <hook>` - Run a command when an event is emitted by beets. + Run a command when an event is emitted by beets. :doc:`ihate <ihate>` - Automatically skip albums and tracks during the import process. + Automatically skip albums and tracks during the import process. :doc:`info <info>` - Print music files' tags to the console. + Print music files' tags to the console. :doc:`loadext <loadext>` - Load SQLite extensions. + Load SQLite extensions. :doc:`mbcollection <mbcollection>` - Maintain your MusicBrainz collection list. + Maintain your MusicBrainz collection list. :doc:`mbsubmit <mbsubmit>` - Print an album's tracks in a MusicBrainz-friendly format. + Print an album's tracks in a MusicBrainz-friendly format. :doc:`missing <missing>` - List missing tracks. + List missing tracks. -`mstream`_ - A music streaming server + webapp that can be used alongside beets. +mstream_ + A music streaming server + webapp that can be used alongside beets. :doc:`random <random>` - Randomly choose albums and tracks from your library. + Randomly choose albums and tracks from your library. :doc:`spotify <spotify>` - Create Spotify playlists from the Beets library. + Create Spotify playlists from the Beets library. :doc:`types <types>` - Declare types for flexible attributes. + Declare types for flexible attributes. :doc:`web <web>` - An experimental Web-based GUI for beets. + An experimental Web-based GUI for beets. + +.. _mpd: https://www.musicpd.org/ + +.. _mpd clients: https://mpd.wikia.com/wiki/Clients -.. _MPD: https://www.musicpd.org/ -.. _MPD clients: https://mpd.wikia.com/wiki/Clients .. _mstream: https://github.com/IrosTheBeggar/mStream .. _other-plugins: @@ -394,199 +406,248 @@ Miscellaneous Other Plugins ------------- -In addition to the plugins that come with beets, there are several plugins -that are maintained by the beets community. To use an external plugin, there -are two options for installation: +In addition to the plugins that come with beets, there are several plugins that +are maintained by the beets community. To use an external plugin, there are two +options for installation: -* Make sure it's in the Python path (known as ``sys.path`` to developers). This +- Make sure it's in the Python path (known as ``sys.path`` to developers). This just means the plugin has to be installed on your system (e.g., with a ``setup.py`` script or a command like ``pip`` or ``easy_install``). - -* Set the ``pluginpath`` config variable to point to the directory containing the - plugin. (See :doc:`/reference/config`.) +- Set the ``pluginpath`` config variable to point to the directory containing + the plugin. (See :doc:`/reference/config`.) Once the plugin is installed, enable it by placing its name on the ``plugins`` line in your config file. Here are a few of the plugins written by the beets community: -`beets-alternatives`_ - Manages external files. +beets-alternatives_ + Manages external files. -`beet-amazon`_ - Adds Amazon.com as a tagger data source. +beet-amazon_ + Adds Amazon.com as a tagger data source. -`beets-artistcountry`_ - Fetches the artist's country of origin from MusicBrainz. +beets-artistcountry_ + Fetches the artist's country of origin from MusicBrainz. -`beets-autofix`_ - Automates repetitive tasks to keep your library in order. +beets-autofix_ + Automates repetitive tasks to keep your library in order. -`beets-autogenre`_ - Assigns genres to your library items using the :doc:`lastgenre <lastgenre>` - and `beets-xtractor`_ plugins as well as additional rules. +beets-autogenre_ + Assigns genres to your library items using the :doc:`lastgenre <lastgenre>` + and beets-xtractor_ plugins as well as additional rules. -`beets-audible`_ - Adds Audible as a tagger data source and provides - other features for managing audiobook collections. +beets-audible_ + Adds Audible as a tagger data source and provides other features for + managing audiobook collections. -`beets-barcode`_ - Lets you scan or enter barcodes for physical media to - search for their metadata. +beets-barcode_ + Lets you scan or enter barcodes for physical media to search for their + metadata. -`beetcamp`_ - Enables **bandcamp.com** autotagger with a fairly extensive amount of metadata. +beetcamp_ + Enables **bandcamp.com** autotagger with a fairly extensive amount of + metadata. -`beetstream`_ - Server implementation of the `Subsonic API`_ specification, serving the - beets library and (:doc:`smartplaylist <smartplaylist>` plugin generated) - M3U playlists, allowing you to stream your music on a multitude of clients. +beetstream_ + Server implementation of the `Subsonic API`_ specification, serving the + beets library and (:doc:`smartplaylist <smartplaylist>` plugin generated) + M3U playlists, allowing you to stream your music on a multitude of clients. -`beets-bpmanalyser`_ - Analyses songs and calculates their tempo (BPM). +beets-bpmanalyser_ + Analyses songs and calculates their tempo (BPM). -`beets-check`_ - Automatically checksums your files to detect corruption. +beets-check_ + Automatically checksums your files to detect corruption. `A cmus plugin`_ - Integrates with the `cmus`_ console music player. + Integrates with the cmus_ console music player. -`beets-copyartifacts`_ - Helps bring non-music files along during import. +beets-copyartifacts_ + Helps bring non-music files along during import. -`beets-describe`_ - Gives you the full picture of a single attribute of your library items. +beets-describe_ + Gives you the full picture of a single attribute of your library items. -`drop2beets`_ - Automatically imports singles as soon as they are dropped in a - folder (using Linux's ``inotify``). You can also set a sub-folders - hierarchy to set flexible attributes by the way. +drop2beets_ + Automatically imports singles as soon as they are dropped in a folder (using + Linux's ``inotify``). You can also set a sub-folders hierarchy to set + flexible attributes by the way. -`dsedivec`_ - Has two plugins: ``edit`` and ``moveall``. +dsedivec_ + Has two plugins: ``edit`` and ``moveall``. -`beets-follow`_ - Lets you check for new albums from artists you like. +beets-filetote_ + Helps bring non-music extra files, attachments, and artifacts during imports + and CLI file manipulation actions (``beet move``, etc.). -`beetFs`_ - Is a FUSE filesystem for browsing the music in your beets library. - (Might be out of date.) +beets-follow_ + Lets you check for new albums from artists you like. -`beets-goingrunning`_ - Generates playlists to go with your running sessions. +beetFs_ + Is a FUSE filesystem for browsing the music in your beets library. (Might be + out of date.) -`beets-ibroadcast`_ - Uploads tracks to the `iBroadcast`_ cloud service. +beets-goingrunning_ + Generates playlists to go with your running sessions. -`beets-id3extract`_ - Maps arbitrary ID3 tags to beets custom fields. +beets-ibroadcast_ + Uploads tracks to the iBroadcast_ cloud service. -`beets-importreplace`_ - Lets you perform regex replacements on incoming - metadata. +beets-id3extract_ + Maps arbitrary ID3 tags to beets custom fields. -`beets-jiosaavn`_ - Adds JioSaavn.com as a tagger data source. +beets-importreplace_ + Lets you perform regex replacements on incoming metadata. -`beets-more`_ - Finds versions of indexed releases with more tracks, like deluxe and anniversary editions. +beets-jiosaavn_ + Adds JioSaavn.com as a tagger data source. -`beets-mosaic`_ - Generates a montage of a mosaic from cover art. +beets-more_ + Finds versions of indexed releases with more tracks, like deluxe and + anniversary editions. -`beets-mpd-utils`_ - Plugins to interface with `MPD`_. Comes with ``mpd_tracker`` (track play/skip counts from MPD) and ``mpd_dj`` (auto-add songs to your queue.) +beets-mosaic_ + Generates a montage of a mosaic from cover art. -`beets-noimport`_ - Adds and removes directories from the incremental import skip list. +beets-mpd-utils_ + Plugins to interface with MPD_. Comes with ``mpd_tracker`` (track play/skip + counts from MPD) and ``mpd_dj`` (auto-add songs to your queue.) -`beets-originquery`_ - Augments MusicBrainz queries with locally-sourced data - to improve autotagger results. +beets-noimport_ + Adds and removes directories from the incremental import skip list. -`beets-plexsync`_ - Allows you to sync your Plex library with your beets library, create smart playlists in Plex, and import online playlists (from services like Spotify) into Plex. +beets-originquery_ + Augments MusicBrainz queries with locally-sourced data to improve autotagger + results. -`beets-setlister`_ - Generate playlists from the setlists of a given artist. +beets-plexsync_ + Allows you to sync your Plex library with your beets library, create smart + playlists in Plex, and import online playlists (from services like Spotify) + into Plex. -`beet-summarize`_ - Can compute lots of counts and statistics about your music - library. +beets-setlister_ + Generate playlists from the setlists of a given artist. -`beets-usertag`_ - Lets you use keywords to tag and organize your music. +beet-summarize_ + Can compute lots of counts and statistics about your music library. -`beets-webm3u`_ - Serves the (:doc:`smartplaylist <smartplaylist>` plugin generated) M3U - playlists via HTTP. +beets-usertag_ + Lets you use keywords to tag and organize your music. -`beets-webrouter`_ - Serves multiple beets webapps (e.g. :doc:`web <web>`, `beets-webm3u`_, - `beetstream`_, :doc:`aura <aura>`) using a single command/process/host/port, - each under a different path. +beets-webm3u_ + Serves the (:doc:`smartplaylist <smartplaylist>` plugin generated) M3U + playlists via HTTP. -`whatlastgenre`_ - Fetches genres from various music sites. +beets-webrouter_ + Serves multiple beets webapps (e.g. :doc:`web <web>`, beets-webm3u_, + beetstream_, :doc:`aura <aura>`) using a single command/process/host/port, + each under a different path. -`beets-xtractor`_ - Extracts low- and high-level musical information from your songs. +whatlastgenre_ + Fetches genres from various music sites. -`beets-ydl`_ - Downloads audio from youtube-dl sources and import into beets. +beets-xtractor_ + Extracts low- and high-level musical information from your songs. -`beets-ytimport`_ - Download and import your liked songs from YouTube into beets. +beets-ydl_ + Downloads audio from youtube-dl sources and import into beets. -`beets-yearfixer`_ - Attempts to fix all missing ``original_year`` and ``year`` fields. +beets-ytimport_ + Download and import your liked songs from YouTube into beets. -`beets-youtube`_ - Adds YouTube Music as a tagger data source. +beets-yearfixer_ + Attempts to fix all missing ``original_year`` and ``year`` fields. + +beets-youtube_ + Adds YouTube Music as a tagger data source. + +.. _a cmus plugin: https://github.com/coolkehon/beets/blob/master/beetsplug/cmus.py + +.. _beet-amazon: https://github.com/jmwatte/beet-amazon + +.. _beet-musicbrainz-collection: https://github.com/jeffayle/Beet-MusicBrainz-Collection/ + +.. _beet-summarize: https://github.com/steven-murray/beet-summarize + +.. _beetcamp: https://github.com/snejus/beetcamp + +.. _beetfs: https://github.com/jbaiter/beetfs + +.. _beets-alternatives: https://github.com/geigerzaehler/beets-alternatives + +.. _beets-artistcountry: https://github.com/agrausem/beets-artistcountry + +.. _beets-audible: https://github.com/Neurrone/beets-audible + +.. _beets-autofix: https://github.com/adamjakab/BeetsPluginAutofix + +.. _beets-autogenre: https://github.com/mgoltzsche/beets-autogenre .. _beets-barcode: https://github.com/8h2a/beets-barcode -.. _beetcamp: https://github.com/snejus/beetcamp -.. _beetstream: https://github.com/BinaryBrain/Beetstream -.. _Subsonic API: http://www.subsonic.org/pages/api.jsp -.. _beets-check: https://github.com/geigerzaehler/beets-check -.. _beets-copyartifacts: https://github.com/adammillerio/beets-copyartifacts -.. _dsedivec: https://github.com/dsedivec/beets-plugins -.. _beets-artistcountry: https://github.com/agrausem/beets-artistcountry -.. _beetFs: https://github.com/jbaiter/beetfs -.. _Beet-MusicBrainz-Collection: - https://github.com/jeffayle/Beet-MusicBrainz-Collection/ -.. _A cmus plugin: - https://github.com/coolkehon/beets/blob/master/beetsplug/cmus.py -.. _cmus: http://cmus.sourceforge.net/ -.. _beet-amazon: https://github.com/jmwatte/beet-amazon -.. _beets-alternatives: https://github.com/geigerzaehler/beets-alternatives -.. _beets-follow: https://github.com/nolsto/beets-follow -.. _beets-ibroadcast: https://github.com/ctrueden/beets-ibroadcast -.. _iBroadcast: https://ibroadcast.com/ -.. _beets-id3extract: https://github.com/bcotton/beets-id3extract -.. _beets-importreplace: https://github.com/edgars-supe/beets-importreplace -.. _beets-setlister: https://github.com/tomjaspers/beets-setlister -.. _beets-noimport: https://gitlab.com/tiago.dias/beets-noimport -.. _whatlastgenre: https://github.com/YetAnotherNerd/whatlastgenre/tree/master/plugin/beets -.. _beets-usertag: https://github.com/edgars-supe/beets-usertag -.. _beets-plexsync: https://github.com/arsaboo/beets-plexsync -.. _beets-jiosaavn: https://github.com/arsaboo/beets-jiosaavn -.. _beets-youtube: https://github.com/arsaboo/beets-youtube -.. _beets-ydl: https://github.com/vmassuchetto/beets-ydl -.. _beets-ytimport: https://github.com/mgoltzsche/beets-ytimport -.. _beet-summarize: https://github.com/steven-murray/beet-summarize -.. _beets-mosaic: https://github.com/SusannaMaria/beets-mosaic -.. _beets-goingrunning: https://pypi.org/project/beets-goingrunning -.. _beets-xtractor: https://github.com/adamjakab/BeetsPluginXtractor -.. _beets-yearfixer: https://github.com/adamjakab/BeetsPluginYearFixer -.. _beets-autofix: https://github.com/adamjakab/BeetsPluginAutofix -.. _beets-describe: https://github.com/adamjakab/BeetsPluginDescribe + .. _beets-bpmanalyser: https://github.com/adamjakab/BeetsPluginBpmAnalyser -.. _beets-originquery: https://github.com/x1ppy/beets-originquery -.. _drop2beets: https://github.com/martinkirch/drop2beets -.. _beets-audible: https://github.com/Neurrone/beets-audible + +.. _beets-check: https://github.com/geigerzaehler/beets-check + +.. _beets-copyartifacts: https://github.com/adammillerio/beets-copyartifacts + +.. _beets-describe: https://github.com/adamjakab/BeetsPluginDescribe + +.. _beets-filetote: https://github.com/gtronset/beets-filetote + +.. _beets-follow: https://github.com/nolsto/beets-follow + +.. _beets-goingrunning: https://pypi.org/project/beets-goingrunning + +.. _beets-ibroadcast: https://github.com/ctrueden/beets-ibroadcast + +.. _beets-id3extract: https://github.com/bcotton/beets-id3extract + +.. _beets-importreplace: https://github.com/edgars-supe/beets-importreplace + +.. _beets-jiosaavn: https://github.com/arsaboo/beets-jiosaavn + .. _beets-more: https://forgejo.sny.sh/sun/beetsplug/src/branch/main/more + +.. _beets-mosaic: https://github.com/SusannaMaria/beets-mosaic + .. _beets-mpd-utils: https://github.com/thekakkun/beets-mpd-utils + +.. _beets-noimport: https://gitlab.com/tiago.dias/beets-noimport + +.. _beets-originquery: https://github.com/x1ppy/beets-originquery + +.. _beets-plexsync: https://github.com/arsaboo/beets-plexsync + +.. _beets-setlister: https://github.com/tomjaspers/beets-setlister + +.. _beets-usertag: https://github.com/edgars-supe/beets-usertag + .. _beets-webm3u: https://github.com/mgoltzsche/beets-webm3u + .. _beets-webrouter: https://github.com/mgoltzsche/beets-webrouter -.. _beets-autogenre: https://github.com/mgoltzsche/beets-autogenre + +.. _beets-xtractor: https://github.com/adamjakab/BeetsPluginXtractor + +.. _beets-ydl: https://github.com/vmassuchetto/beets-ydl + +.. _beets-yearfixer: https://github.com/adamjakab/BeetsPluginYearFixer + +.. _beets-youtube: https://github.com/arsaboo/beets-youtube + +.. _beets-ytimport: https://github.com/mgoltzsche/beets-ytimport + +.. _beetstream: https://github.com/BinaryBrain/Beetstream + +.. _cmus: http://cmus.sourceforge.net/ + +.. _drop2beets: https://github.com/martinkirch/drop2beets + +.. _dsedivec: https://github.com/dsedivec/beets-plugins + +.. _ibroadcast: https://ibroadcast.com/ + +.. _subsonic api: http://www.subsonic.org/pages/api.jsp + +.. _whatlastgenre: https://github.com/YetAnotherNerd/whatlastgenre/tree/master/plugin/beets diff --git a/docs/plugins/info.rst b/docs/plugins/info.rst index 1ed7582af..051b081ef 100644 --- a/docs/plugins/info.rst +++ b/docs/plugins/info.rst @@ -1,45 +1,52 @@ Info Plugin =========== -The ``info`` plugin provides a command that dumps the current tag values for -any file format supported by beets. It works like a supercharged version of -`mp3info`_ or `id3v2`_. +The ``info`` plugin provides a command that dumps the current tag values for any +file format supported by beets. It works like a supercharged version of mp3info_ +or id3v2_. Enable the ``info`` plugin in your configuration (see :ref:`using-plugins`) and -then type:: +then type: + +:: $ beet info /path/to/music.flac and the plugin will enumerate all the tags in the specified file. It also accepts multiple filenames in a single command-line. -You can also enter a :doc:`query </reference/query>` to inspect music from -your library:: +You can also enter a :doc:`query </reference/query>` to inspect music from your +library: + +:: $ beet info beatles -If you just want to see specific properties you can use the -``--include-keys`` option to filter them. The argument is a -comma-separated list of field names. For example:: +If you just want to see specific properties you can use the ``--include-keys`` +option to filter them. The argument is a comma-separated list of field names. +For example: + +:: $ beet info -i 'title,mb_artistid' beatles -Will only show the ``title`` and ``mb_artistid`` properties. You can add the +Will only show the ``title`` and ``mb_artistid`` properties. You can add the ``-i`` option multiple times to the command line. Additional command-line options include: -* ``--library`` or ``-l``: Show data from the library database instead of the +- ``--library`` or ``-l``: Show data from the library database instead of the files' tags. -* ``--album`` or ``-a``: Show data from albums instead of tracks (implies +- ``--album`` or ``-a``: Show data from albums instead of tracks (implies ``--library``). -* ``--summarize`` or ``-s``: Merge all the information from multiple files - into a single list of values. If the tags differ across the files, print +- ``--summarize`` or ``-s``: Merge all the information from multiple files into + a single list of values. If the tags differ across the files, print ``[various]``. -* ``--format`` or ``-f``: Specify a specific format with which to print every +- ``--format`` or ``-f``: Specify a specific format with which to print every item. This uses the same template syntax as beets’ :doc:`path formats </reference/pathformat>`. -* ``--keys-only`` or ``-k``: Show the name of the tags without the values. +- ``--keys-only`` or ``-k``: Show the name of the tags without the values. .. _id3v2: http://id3v2.sourceforge.net + .. _mp3info: https://www.ibiblio.org/mp3info/ diff --git a/docs/plugins/inline.rst b/docs/plugins/inline.rst index 4dfca261d..d653b6d52 100644 --- a/docs/plugins/inline.rst +++ b/docs/plugins/inline.rst @@ -2,42 +2,45 @@ Inline Plugin ============= The ``inline`` plugin lets you use Python to customize your path formats. Using -it, you can define template fields in your beets configuration file and refer -to them from your template strings in the ``paths:`` section (see +it, you can define template fields in your beets configuration file and refer to +them from your template strings in the ``paths:`` section (see :doc:`/reference/config/`). -To use the ``inline`` plugin, enable it in your configuration -(see :ref:`using-plugins`). -Then, make a ``item_fields:`` block in your config file. Under this key, every line defines a -new template field; the key is the name of the field (you'll use the name to -refer to the field in your templates) and the value is a Python expression or -function body. The Python code has all of a track's fields in scope, so you can -refer to any normal attributes (such as ``artist`` or ``title``) as Python -variables. +To use the ``inline`` plugin, enable it in your configuration (see +:ref:`using-plugins`). Then, make a ``item_fields:`` block in your config file. +Under this key, every line defines a new template field; the key is the name of +the field (you'll use the name to refer to the field in your templates) and the +value is a Python expression or function body. The Python code has all of a +track's fields in scope, so you can refer to any normal attributes (such as +``artist`` or ``title``) as Python variables. -Here are a couple of examples of expressions:: +Here are a couple of examples of expressions: + +:: item_fields: initial: albumartist[0].upper() + u'.' - disc_and_track: u'%02i.%02i' % (disc, track) if - disctotal > 1 else u'%02i' % (track) + disc_and_track: f"{disc:02d}.{track:02d}" if disctotal > 1 else f"{track:02d}" Note that YAML syntax allows newlines in values if the subsequent lines are indented. These examples define ``$initial`` and ``$disc_and_track`` fields that can be -referenced in path templates like so:: +referenced in path templates like so: + +:: paths: default: $initial/$artist/$album%aunique{}/$disc_and_track $title - Block Definitions ----------------- If you need to use statements like ``import``, you can write a Python function -body instead of a single expression. In this case, you'll need to ``return`` -a result for the value of the path field, like so:: +body instead of a single expression. In this case, you'll need to ``return`` a +result for the value of the path field, like so: + +:: item_fields: filename: | @@ -48,17 +51,18 @@ a result for the value of the path field, like so:: You might want to use the YAML syntax for "block literals," in which a leading ``|`` character indicates a multi-line block of text. - Album Fields ------------ The above examples define fields for *item* templates, but you can also define -fields for *album* templates. Use the ``album_fields`` configuration section. -In this context, all existing album fields are available as variables along -with ``items``, which is a list of items in the album. +fields for *album* templates. Use the ``album_fields`` configuration section. In +this context, all existing album fields are available as variables along with +``items``, which is a list of items in the album. This example defines a ``$bitrate`` field for albums as the average of the -tracks' fields:: +tracks' fields: + +:: album_fields: bitrate: | diff --git a/docs/plugins/ipfs.rst b/docs/plugins/ipfs.rst index 5bf8ca906..6f8144087 100644 --- a/docs/plugins/ipfs.rst +++ b/docs/plugins/ipfs.rst @@ -2,15 +2,15 @@ IPFS Plugin =========== The ``ipfs`` plugin makes it easy to share your library and music with friends. -The plugin uses `ipfs`_ for storing the library and file content. +The plugin uses ipfs_ for storing the library and file content. .. _ipfs: https://ipfs.io/ Installation ------------ -This plugin requires `go-ipfs`_ to be running as a daemon and that the -associated ``ipfs`` command is on the user's ``$PATH``. +This plugin requires go-ipfs_ to be running as a daemon and that the associated +``ipfs`` command is on the user's ``$PATH``. .. _go-ipfs: https://github.com/ipfs/go-ipfs @@ -24,51 +24,54 @@ This plugin can store and retrieve music individually, or it can share entire library databases. Adding -'''''' +~~~~~~ -To add albums to ipfs, making them shareable, use the ``-a`` or ``--add`` -flag. If used without arguments it will add all albums in the local library. -When added, all items and albums will get an "ipfs" field in the database -containing the hash of that specific file/folder. Newly imported albums will -be added automatically to ipfs by default (see below). +To add albums to ipfs, making them shareable, use the ``-a`` or ``--add`` flag. +If used without arguments it will add all albums in the local library. When +added, all items and albums will get an "ipfs" field in the database containing +the hash of that specific file/folder. Newly imported albums will be added +automatically to ipfs by default (see below). Retrieving -'''''''''' +~~~~~~~~~~ You can give the ipfs hash for some music to a friend. They can get that album -from ipfs, and import it into beets, using the ``-g`` or ``--get`` flag. If -the argument passed to the ``-g`` flag isn't an ipfs hash, it will be used as -a query instead, getting all albums matching the query. +from ipfs, and import it into beets, using the ``-g`` or ``--get`` flag. If the +argument passed to the ``-g`` flag isn't an ipfs hash, it will be used as a +query instead, getting all albums matching the query. Sharing Libraries -''''''''''''''''' +~~~~~~~~~~~~~~~~~ Using the ``-p`` or ``--publish`` flag, a copy of the local library will be published to ipfs. Only albums/items with ipfs records in the database will published, and local paths will be stripped from the library. A hash of the library will be returned to the user. -A friend can then import this remote library by using the ``-i`` or -``--import`` flag. To tag an imported library with a specific name by passing -a name as the second argument to ``-i,`` after the hash. The content of all -remote libraries will be combined into an additional library as long as the -content doesn't already exist in the joined library. +A friend can then import this remote library by using the ``-i`` or ``--import`` +flag. To tag an imported library with a specific name by passing a name as the +second argument to ``-i,`` after the hash. The content of all remote libraries +will be combined into an additional library as long as the content doesn't +already exist in the joined library. -When remote libraries has been imported you can search them by using the -``-l`` or ``--list`` flag. The hash of albums matching the query will be -returned, and this can then be used with ``-g`` to fetch and import the album -to the local library. +When remote libraries has been imported you can search them by using the ``-l`` +or ``--list`` flag. The hash of albums matching the query will be returned, and +this can then be used with ``-g`` to fetch and import the album to the local +library. Ipfs can be mounted as a FUSE file system. This means that music in a remote library can be streamed directly, without importing them to the local library -first. If the ``/ipfs`` folder is mounted then matching queries will be sent -to the :doc:`/plugins/play` using the ``-m`` or ``--play`` flag. +first. If the ``/ipfs`` folder is mounted then matching queries will be sent to +the :doc:`/plugins/play` using the ``-m`` or ``--play`` flag. Configuration ------------- The ipfs plugin will automatically add imported albums to ipfs and add those -hashes to the database. This can be turned off by setting the ``auto`` option -in the ``ipfs:`` section of the config to ``no``. +hashes to the database. This can be turned off by setting the ``auto`` option in +the ``ipfs:`` section of the config to ``no``. -If the setting ``nocopy`` is true (defaults false) then the plugin will pass the ``--nocopy`` option when adding things to ipfs. If the filestore option of ipfs is enabled this will mean files are neither removed from beets nor copied somewhere else. +If the setting ``nocopy`` is true (defaults false) then the plugin will pass the +``--nocopy`` option when adding things to ipfs. If the filestore option of ipfs +is enabled this will mean files are neither removed from beets nor copied +somewhere else. diff --git a/docs/plugins/keyfinder.rst b/docs/plugins/keyfinder.rst index a5c64d39c..c692c5407 100644 --- a/docs/plugins/keyfinder.rst +++ b/docs/plugins/keyfinder.rst @@ -1,11 +1,10 @@ Key Finder Plugin ================= -The `keyfinder` plugin uses either the `KeyFinder`_ or `keyfinder-cli`_ -program to detect the musical key of a track from its audio data and store -it in the `initial_key` field of your database. It does so -automatically when importing music or through the ``beet keyfinder -[QUERY]`` command. +The ``keyfinder`` plugin uses either the KeyFinder_ or keyfinder-cli_ program to +detect the musical key of a track from its audio data and store it in the +``initial_key`` field of your database. It does so automatically when importing +music or through the ``beet keyfinder [QUERY]`` command. To use the ``keyfinder`` plugin, enable it in your configuration (see :ref:`using-plugins`). @@ -13,23 +12,20 @@ To use the ``keyfinder`` plugin, enable it in your configuration (see Configuration ------------- -To configure the plugin, make a ``keyfinder:`` section in your -configuration file. The available options are: +To configure the plugin, make a ``keyfinder:`` section in your configuration +file. The available options are: -- **auto**: Analyze every file on - import. Otherwise, you need to use the ``beet keyfinder`` command - explicitly. - Default: ``yes`` +- **auto**: Analyze every file on import. Otherwise, you need to use the ``beet + keyfinder`` command explicitly. Default: ``yes`` - **bin**: The name of the program use for key analysis. You can use either - `KeyFinder`_ or `keyfinder-cli`_. - If you installed the KeyFinder GUI on a Mac, for example, you want - something like - ``/Applications/KeyFinder.app/Contents/MacOS/KeyFinder``. - If using `keyfinder-cli`_, the binary must be named ``keyfinder-cli``. - Default: ``KeyFinder`` (i.e., search for the program in your ``$PATH``).. + KeyFinder_ or keyfinder-cli_. If you installed the KeyFinder GUI on a Mac, for + example, you want something like + ``/Applications/KeyFinder.app/Contents/MacOS/KeyFinder``. If using + keyfinder-cli_, the binary must be named ``keyfinder-cli``. Default: + ``KeyFinder`` (i.e., search for the program in your ``$PATH``).. - **overwrite**: Calculate a key even for files that already have an - `initial_key` value. - Default: ``no``. + ``initial_key`` value. Default: ``no``. + +.. _keyfinder: http://www.ibrahimshaath.co.uk/keyfinder/ -.. _KeyFinder: http://www.ibrahimshaath.co.uk/keyfinder/ .. _keyfinder-cli: https://github.com/EvanPurkhiser/keyfinder-cli/ diff --git a/docs/plugins/kodiupdate.rst b/docs/plugins/kodiupdate.rst index 90b33d9c1..20bc38b0f 100644 --- a/docs/plugins/kodiupdate.rst +++ b/docs/plugins/kodiupdate.rst @@ -1,14 +1,15 @@ KodiUpdate Plugin ================= -The ``kodiupdate`` plugin lets you automatically update `Kodi`_'s music -library whenever you change your beets library. +The ``kodiupdate`` plugin lets you automatically update Kodi_'s music library +whenever you change your beets library. -To use ``kodiupdate`` plugin, enable it in your configuration -(see :ref:`using-plugins`). -Then, you'll want to configure the specifics of your Kodi host. -You can do that using a ``kodi:`` section in your ``config.yaml``, -which looks like this:: +To use ``kodiupdate`` plugin, enable it in your configuration (see +:ref:`using-plugins`). Then, you'll want to configure the specifics of your Kodi +host. You can do that using a ``kodi:`` section in your ``config.yaml``, which +looks like this: + +:: kodi: host: localhost @@ -16,7 +17,9 @@ which looks like this:: user: kodi pwd: kodi -To update multiple Kodi instances, specify them as an array:: +To update multiple Kodi instances, specify them as an array: + +:: kodi: - host: x.x.x.x @@ -28,7 +31,6 @@ To update multiple Kodi instances, specify them as an array:: user: kodi2 pwd: kodi2 - To use the ``kodiupdate`` plugin, first enable it in your configuration (see :ref:`using-plugins`). Then, install ``beets`` with ``kodiupdate`` extra @@ -38,23 +40,20 @@ To use the ``kodiupdate`` plugin, first enable it in your configuration (see You'll also need to enable JSON-RPC in Kodi. -In Kodi's interface, navigate to System/Settings/Network/Services and choose "Allow control of Kodi via HTTP." +In Kodi's interface, navigate to System/Settings/Network/Services and choose +"Allow control of Kodi via HTTP." With that all in place, you'll see beets send the "update" command to your Kodi host every time you change your beets library. -.. _Kodi: https://kodi.tv/ +.. _kodi: https://kodi.tv/ Configuration ------------- The available options under the ``kodi:`` section are: -- **host**: The Kodi host name. - Default: ``localhost`` -- **port**: The Kodi host port. - Default: 8080 -- **user**: The Kodi host user. - Default: ``kodi`` -- **pwd**: The Kodi host password. - Default: ``kodi`` +- **host**: The Kodi host name. Default: ``localhost`` +- **port**: The Kodi host port. Default: 8080 +- **user**: The Kodi host user. Default: ``kodi`` +- **pwd**: The Kodi host password. Default: ``kodi`` diff --git a/docs/plugins/lastgenre.rst b/docs/plugins/lastgenre.rst index a48cd3074..ace7caaf0 100644 --- a/docs/plugins/lastgenre.rst +++ b/docs/plugins/lastgenre.rst @@ -1,11 +1,10 @@ LastGenre Plugin ================ - -The ``lastgenre`` plugin fetches *tags* from `Last.fm`_ and assigns them as genres +The ``lastgenre`` plugin fetches *tags* from Last.fm_ and assigns them as genres to your albums and items. -.. _Last.fm: https://last.fm/ +.. _last.fm: https://last.fm/ Installation ------------ @@ -20,30 +19,33 @@ To use the ``lastgenre`` plugin, first enable it in your configuration (see Usage ----- -The plugin chooses genres based on a *whitelist*, meaning that only certain -tags can be considered genres. This way, tags like "my favorite music" or "seen -live" won't be considered genres. The plugin ships with a fairly extensive -`internal whitelist`_, but you can set your own in the config file using the -``whitelist`` configuration value or forgo a whitelist altogether by setting -the option to ``no``. +The plugin chooses genres based on a *whitelist*, meaning that only certain tags +can be considered genres. This way, tags like "my favorite music" or "seen live" +won't be considered genres. The plugin ships with a fairly extensive `internal +whitelist`_, but you can set your own in the config file using the ``whitelist`` +configuration value or forgo a whitelist altogether by setting the option to +``no``. The genre list file should contain one genre per line. Blank lines are ignored. For the curious, the default genre list is generated by a `script that scrapes Wikipedia`_. -.. _script that scrapes Wikipedia: https://gist.github.com/1241307 .. _internal whitelist: https://raw.githubusercontent.com/beetbox/beets/master/beetsplug/lastgenre/genres.txt +.. _script that scrapes wikipedia: https://gist.github.com/1241307 + Canonicalization -^^^^^^^^^^^^^^^^ +~~~~~~~~~~~~~~~~ The plugin can also *canonicalize* genres, meaning that more obscure genres can be turned into coarser-grained ones that are present in the whitelist. This -works using a `tree of nested genre names`_, represented using `YAML`_, where the +works using a `tree of nested genre names`_, represented using YAML_, where the leaves of the tree represent the most specific genres. The most common way to use this would be with a custom whitelist containing only -a desired subset of genres. Consider for a example this minimal whitelist:: +a desired subset of genres. Consider for a example this minimal whitelist: + +:: rock heavy metal @@ -54,7 +56,9 @@ as *viking metal* would actually be tagged as *heavy metal* because neither *viking metal* nor its parent *black metal* are in the whitelist. It always tries to use the most specific genre that's available in the whitelist. -The relevant subtree path in the default tree looks like this:: +The relevant subtree path in the default tree looks like this: + +:: - rock: - heavy metal: @@ -66,63 +70,61 @@ contains about any genre contained in the tree) with canonicalization because nothing would ever be matched to a more generic node since all the specific subgenres are in the whitelist to begin with. - -.. _YAML: https://yaml.org/ .. _tree of nested genre names: https://raw.githubusercontent.com/beetbox/beets/master/beetsplug/lastgenre/genres-tree.yaml +.. _yaml: https://yaml.org/ Genre Source -^^^^^^^^^^^^ +~~~~~~~~~~~~ When looking up genres for albums or individual tracks, you can choose whether to use Last.fm tags on the album, the artist, or the track. For example, you -might want all the albums for a certain artist to carry the same genre. -The default is "album". When set to "track", the plugin will fetch *both* +might want all the albums for a certain artist to carry the same genre. The +default is "album". When set to "track", the plugin will fetch *both* album-level and track-level genres for your music when importing albums. - Multiple Genres -^^^^^^^^^^^^^^^ +~~~~~~~~~~~~~~~ By default, the plugin chooses the most popular tag on Last.fm as a genre. If -you prefer to use a *list* of popular genre tags, you can increase the number -of the ``count`` config option. +you prefer to use a *list* of popular genre tags, you can increase the number of +the ``count`` config option. Lists of up to *count* genres will then be used instead of single genres. The genres are separated by commas by default, but you can change this with the ``separator`` config option. -`Last.fm`_ provides a popularity factor, a.k.a. *weight*, for each tag ranging -from 100 for the most popular tag down to 0 for the least popular. -The plugin uses this weight to discard unpopular tags. The default is to -ignore tags with a weight less then 10. You can change this by setting -the ``min_weight`` config option. +Last.fm_ provides a popularity factor, a.k.a. *weight*, for each tag ranging +from 100 for the most popular tag down to 0 for the least popular. The plugin +uses this weight to discard unpopular tags. The default is to ignore tags with a +weight less then 10. You can change this by setting the ``min_weight`` config +option. Specific vs. Popular Genres -^^^^^^^^^^^^^^^^^^^^^^^^^^^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default, the plugin sorts genres by popularity. However, you can use the -``prefer_specific`` option to override this behavior and instead sort genres -by specificity, as determined by your whitelist and canonicalization tree. +``prefer_specific`` option to override this behavior and instead sort genres by +specificity, as determined by your whitelist and canonicalization tree. -For instance, say you have both ``folk`` and ``americana`` in your whitelist -and canonicalization tree and ``americana`` is a leaf within ``folk``. If -Last.fm returns both of those tags, lastgenre is going to use the most -popular, which is often the most generic (in this case ``folk``). By setting -``prefer_specific`` to true, lastgenre would use ``americana`` instead. +For instance, say you have both ``folk`` and ``americana`` in your whitelist and +canonicalization tree and ``americana`` is a leaf within ``folk``. If Last.fm +returns both of those tags, lastgenre is going to use the most popular, which is +often the most generic (in this case ``folk``). By setting ``prefer_specific`` +to true, lastgenre would use ``americana`` instead. Handling pre-populated tags -^^^^^^^^^^^^^^^^^^^^^^^^^^^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``force``, ``keep_existing`` and ``whitelist`` options control how pre-existing genres are handled. As you would assume, setting ``force: no`` **won't touch pre-existing genre tags** and will only **fetch new genres for empty tags**. When ``force`` is -``yes`` the setting of the ``whitelist`` option (as documented in `Usage`_) +``yes`` the setting of the ``whitelist`` option (as documented in Usage_) applies to any existing or newly fetched genres. -The follwing configurations are possible: +The following configurations are possible: **Setup 1** (default) @@ -145,8 +147,9 @@ Add new last.fm genres when **empty**. Any present tags stay **untouched**. **Setup 3** **Combine** genres in present tags with new ones (be aware of that with an -enabled ``whitelist`` setting, of course some genres might get cleaned up. To -make sure any existing genres remain, set ``whitelist: no``). +enabled ``whitelist`` setting, of course some genres might get cleaned up - +existing genres take precedence over new ones though. To make sure any existing +genres remain, set ``whitelist: no``). .. code-block:: yaml @@ -154,73 +157,70 @@ make sure any existing genres remain, set ``whitelist: no``). keep_existing: yes .. attention:: - If ``force`` is disabled the ``keep_existing`` option is simply ignored (since ``force: - no`` means `not touching` existing tags anyway). - + If ``force`` is disabled the ``keep_existing`` option is simply ignored + (since ``force: no`` means ``not touching`` existing tags anyway). Configuration ------------- -To configure the plugin, make a ``lastgenre:`` section in your -configuration file. The available options are: +To configure the plugin, make a ``lastgenre:`` section in your configuration +file. The available options are: -- **auto**: Fetch genres automatically during import. - Default: ``yes``. -- **canonical**: Use a canonicalization tree. Setting this to ``yes`` will use - a built-in tree. You can also set it to a path, like the ``whitelist`` - config value, to use your own tree. - Default: ``no`` (disabled). -- **count**: Number of genres to fetch. - Default: 1 -- **fallback**: A string to use as a fallback genre when no genre is found `or` - the original genre is not desired to be kept (``keep_existing: no``). You can - use the empty string ``''`` to reset the genre. - Default: None. +- **auto**: Fetch genres automatically during import. Default: ``yes``. +- **canonical**: Use a canonicalization tree. Setting this to ``yes`` will use a + built-in tree. You can also set it to a path, like the ``whitelist`` config + value, to use your own tree. Default: ``no`` (disabled). +- **count**: Number of genres to fetch. Default: 1 +- **fallback**: A string to use as a fallback genre when no genre is found + ``or`` the original genre is not desired to be kept (``keep_existing: no``). + You can use the empty string ``''`` to reset the genre. Default: None. - **force**: By default, lastgenre will fetch new genres for empty tags only, enable this option to always try to fetch new last.fm genres. Enable the ``keep_existing`` option to combine existing and new genres. (see `Handling - pre-populated tags`_). - Default: ``no``. -- **keep_existing**: This option alters the ``force`` behavior. - If both ``force`` and ``keep_existing`` are enabled, existing genres are - combined with new ones. Depending on the ``whitelist`` setting, existing and - new genres are filtered accordingly. To ensure only fresh last.fm genres, - disable this option. (see `Handling pre-populated tags`_) - Default: ``no``. + pre-populated tags`_). Default: ``no``. +- **keep_existing**: This option alters the ``force`` behavior. If both + ``force`` and ``keep_existing`` are enabled, existing genres are combined with + new ones. Depending on the ``whitelist`` setting, existing and new genres are + filtered accordingly. To ensure only fresh last.fm genres, disable this + option. (see `Handling pre-populated tags`_) Default: ``no``. - **min_weight**: Minimum popularity factor below which genres are discarded. Default: 10. - **prefer_specific**: Sort genres by the most to least specific, rather than most to least popular. Note that this option requires a ``canonical`` tree, and if not configured it will automatically enable and use the built-in tree. Default: ``no``. -- **source**: Which entity to look up in Last.fm. Can be - either ``artist``, ``album`` or ``track``. - Default: ``album``. -- **separator**: A separator for multiple genres. - Default: ``', '``. -- **whitelist**: The filename of a custom genre list, ``yes`` to use - the internal whitelist, or ``no`` to consider all genres valid. - Default: ``yes``. -- **title_case**: Convert the new tags to TitleCase before saving. - Default: ``yes``. -- **extended_debug**: Add additional debug logging messages that show what - last.fm tags were fetched for tracks, albums and artists. This is done before - any canonicalization and whitelist filtering is applied. It's useful for - tuning the plugin's settings and understanding how it works, but it can be - quite verbose. - Default: ``no``. +- **source**: Which entity to look up in Last.fm. Can be either ``artist``, + ``album`` or ``track``. Default: ``album``. +- **separator**: A separator for multiple genres. Default: ``', '``. +- **whitelist**: The filename of a custom genre list, ``yes`` to use the + internal whitelist, or ``no`` to consider all genres valid. Default: ``yes``. +- **title_case**: Convert the new tags to TitleCase before saving. Default: + ``yes``. Running Manually ---------------- -In addition to running automatically on import, the plugin can also be run manually -from the command line. Use the command ``beet lastgenre [QUERY]`` to fetch -genres for albums or items matching a certain query. +In addition to running automatically on import, the plugin can also be run +manually from the command line. Use the command ``beet lastgenre [QUERY]`` to +fetch genres for albums or items matching a certain query. -By default, ``beet lastgenre`` matches albums. To match -individual tracks or singletons, use the ``-A`` switch: -``beet lastgenre -A [QUERY]``. +By default, ``beet lastgenre`` matches albums. To match individual tracks or +singletons, use the ``-A`` switch: ``beet lastgenre -A [QUERY]``. -To disable automatic genre fetching on import, set the ``auto`` config option -to false. +To preview the changes that would be made without applying them, use the ``-p`` +or ``--pretend`` flag. This shows which genres would be set but does not write +or store any changes. + +To disable automatic genre fetching on import, set the ``auto`` config option to +false. + +Tuning Logs +----------- + +To enable tuning logs, run ``beet -vvv lastgenre ...`` or ``beet -vvv import +...``. This enables additional messages at the ``DEBUG`` log level, showing for +example what data was received from last.fm at each stage of genre fetching +(artist, album, and track levels) before any canonicalization or whitelist +filtering is applied. Tuning logs are useful for adjusting the plugin’s settings +and understanding its behavior, though they can be quite verbose. diff --git a/docs/plugins/lastimport.rst b/docs/plugins/lastimport.rst index 5fc7e4b4c..61fadc506 100644 --- a/docs/plugins/lastimport.rst +++ b/docs/plugins/lastimport.rst @@ -1,12 +1,12 @@ LastImport Plugin ================= -The ``lastimport`` plugin downloads play-count data from your `Last.fm`_ -library into beets' database. You can later create :doc:`smart playlists -</plugins/smartplaylist>` by querying ``play_count`` and do other fun stuff -with this field. +The ``lastimport`` plugin downloads play-count data from your Last.fm_ library +into beets' database. You can later create :doc:`smart playlists +</plugins/smartplaylist>` by querying ``play_count`` and do other fun stuff with +this field. -.. _Last.fm: https://last.fm +.. _last.fm: https://last.fm Installation ------------ @@ -18,7 +18,9 @@ To use the ``lastimport`` plugin, first enable it in your configuration (see pip install "beets[lastimport]" -Next, add your Last.fm username to your beets configuration file:: +Next, add your Last.fm username to your beets configuration file: + +:: lastfm: user: beetsfanatic @@ -27,11 +29,13 @@ Importing Play Counts --------------------- Simply run ``beet lastimport`` and wait for the plugin to request tracks from -Last.fm and match them to your beets library. (You will be notified of tracks -in your Last.fm profile that do not match any songs in your library.) +Last.fm and match them to your beets library. (You will be notified of tracks in +your Last.fm profile that do not match any songs in your library.) -Then, your matched tracks will be populated with the ``play_count`` field, -which you can use in any query or template. For example:: +Then, your matched tracks will be populated with the ``play_count`` field, which +you can use in any query or template. For example: + +:: $ beet ls -f '$title: $play_count' play_count:5.. Eple (Melody A.M.): 60 @@ -45,14 +49,15 @@ Configuration Aside from the required ``lastfm.user`` field, this plugin has some specific options under the ``lastimport:`` section: -* **per_page**: The number of tracks to request from the API at once. - Default: 500. -* **retry_limit**: How many times should we re-send requests to Last.fm on - failure? - Default: 3. +- **per_page**: The number of tracks to request from the API at once. Default: + 500. +- **retry_limit**: How many times should we re-send requests to Last.fm on + failure? Default: 3. By default, the plugin will use beets's own Last.fm API key. You can also -override it with your own API key:: +override it with your own API key: + +:: lastfm: api_key: your_api_key diff --git a/docs/plugins/limit.rst b/docs/plugins/limit.rst index cd89a5579..64ed89ed2 100644 --- a/docs/plugins/limit.rst +++ b/docs/plugins/limit.rst @@ -1,58 +1,63 @@ Limit Query Plugin ================== -``limit`` is a plugin to limit a query to the first or last set of -results. We also provide a query prefix ``'<n'`` to inline the same -behavior in the ``list`` command. They are analogous to piping results: +``limit`` is a plugin to limit a query to the first or last set of results. We +also provide a query prefix ``'<n'`` to inline the same behavior in the ``list`` +command. They are analogous to piping results: $ beet [list|ls] [QUERY] | [head|tail] -n n There are two provided interfaces: -1. ``beet lslimit [--head n | --tail n] [QUERY]`` returns the head or -tail of a query +1. ``beet lslimit [--head n | --tail n] [QUERY]`` returns the head or tail of a +query 2. ``beet [list|ls] [QUERY] '<n'`` returns the head of a query -There are two differences in behavior: +There are two differences in behavior: 1. The query prefix does not support tail. -2. The query prefix could appear anywhere in the query but will only -have the same behavior as the ``lslimit`` command and piping to ``head`` -when it appears last. +2. The query prefix could appear anywhere in the query but will only have the +same behavior as the ``lslimit`` command and piping to ``head`` when it appears +last. -Performance for the query previx is much worse due to the current -singleton-based implementation. +Performance for the query previx is much worse due to the current +singleton-based implementation. -So why does the query prefix exist? Because it composes with any other -query-based API or plugin (see :doc:`/reference/query`). For example, -you can use the query prefix in ``smartplaylist`` -(see :doc:`/plugins/smartplaylist`) to limit the number of tracks in a smart -playlist for applications like most played and recently added. +So why does the query prefix exist? Because it composes with any other +query-based API or plugin (see :doc:`/reference/query`). For example, you can +use the query prefix in ``smartplaylist`` (see :doc:`/plugins/smartplaylist`) to +limit the number of tracks in a smart playlist for applications like most played +and recently added. Configuration ------------- -Enable the ``limit`` plugin in your configuration (see -:ref:`using-plugins`). +Enable the ``limit`` plugin in your configuration (see :ref:`using-plugins`). Examples -------- First 10 tracks +.. code-block:: sh + $ beet ls | head -n 10 $ beet lslimit --head 10 $ beet ls '<10' Last 10 tracks +.. code-block:: sh + $ beet ls | tail -n 10 $ beet lslimit --tail 10 100 mostly recently released tracks +.. code-block:: sh + $ beet lslimit --head 100 year- month- day- $ beet ls year- month- day- '<100' $ beet lslimit --tail 100 year+ month+ day+ diff --git a/docs/plugins/listenbrainz.rst b/docs/plugins/listenbrainz.rst index 037ccd685..ceff0e800 100644 --- a/docs/plugins/listenbrainz.rst +++ b/docs/plugins/listenbrainz.rst @@ -3,30 +3,35 @@ ListenBrainz Plugin =================== -The ListenBrainz plugin for beets allows you to interact with the ListenBrainz service. +The ListenBrainz plugin for beets allows you to interact with the ListenBrainz +service. -Installation ------------- +Configuration +------------- -To enable the ListenBrainz plugin, add the following to your beets configuration file (`config.yaml`_): +To enable the ListenBrainz plugin, add the following to your beets configuration +file (config.yaml_): .. code-block:: yaml - plugins: - - listenbrainz + plugins: + - listenbrainz -You can then configure the plugin by providing your Listenbrainz token (see intructions `here`_) and username:: +You can then configure the plugin by providing your Listenbrainz token (see +intructions here_) and username: + +:: listenbrainz: token: TOKEN username: LISTENBRAINZ_USERNAME - Usage ----- -Once the plugin is enabled, you can import the listening history using the `lbimport` command in beets. +Once the plugin is enabled, you can import the listening history using the +``lbimport`` command in beets. +.. _config.yaml: ../reference/config.rst .. _here: https://listenbrainz.readthedocs.io/en/latest/users/api/index.html#get-the-user-token -.. _config.yaml: ../reference/config.rst diff --git a/docs/plugins/loadext.rst b/docs/plugins/loadext.rst index 5acd10ec7..f0012b9b7 100644 --- a/docs/plugins/loadext.rst +++ b/docs/plugins/loadext.rst @@ -1,21 +1,21 @@ Load Extension Plugin ===================== -Beets uses an SQLite database to store and query library information, which -has support for extensions to extend its functionality. The ``loadext`` plugin -lets you enable these SQLite extensions within beets. +Beets uses an SQLite database to store and query library information, which has +support for extensions to extend its functionality. The ``loadext`` plugin lets +you enable these SQLite extensions within beets. One of the primary uses of this within beets is with the `"ICU" extension`_, which adds support for case insensitive querying of non-ASCII characters. -.. _"ICU" extension: https://www.sqlite.org/src/dir?ci=7461d2e120f21493&name=ext/icu +.. _"icu" extension: https://www.sqlite.org/src/dir?ci=7461d2e120f21493&name=ext/icu Configuration ------------- -To configure the plugin, make a ``loadext`` section in your configuration -file. The section must consist of a list of paths to extensions to load, which -looks like this: +To configure the plugin, make a ``loadext`` section in your configuration file. +The section must consist of a list of paths to extensions to load, which looks +like this: .. code-block:: yaml @@ -25,21 +25,22 @@ looks like this: If a relative path is specified, it is resolved relative to the beets configuration directory. -If no file extension is specified, the default dynamic library extension for -the current platform will be used. +If no file extension is specified, the default dynamic library extension for the +current platform will be used. Building the ICU extension -------------------------- + This section is for **advanced** users only, and is not an in-depth guide on building the extension. To compile the ICU extension, you will need a few dependencies: - - gcc - - icu-devtools - - libicu - - libicu-dev - - libsqlite3-dev + - gcc + - icu-devtools + - libicu + - libicu-dev + - libsqlite3-dev Here's roughly how to download, build and install the extension (although the specifics may vary from system to system): @@ -49,5 +50,5 @@ specifics may vary from system to system): $ wget https://sqlite.org/2019/sqlite-src-3280000.zip $ unzip sqlite-src-3280000.zip $ cd sqlite-src-3280000/ext/icu - $ gcc -shared -fPIC icu.c `icu-config --ldflags` -o libicu.so + $ gcc -shared -fPIC icu.c $(icu-config --ldflags) -o libicu.so $ cp libicu.so ~/.config/beets diff --git a/docs/plugins/lyrics.rst b/docs/plugins/lyrics.rst index a20f97faf..7984fcb6c 100644 --- a/docs/plugins/lyrics.rst +++ b/docs/plugins/lyrics.rst @@ -2,13 +2,14 @@ Lyrics Plugin ============= The ``lyrics`` plugin fetches and stores song lyrics from databases on the Web. -Namely, the current version of the plugin uses `Genius.com`_, `Tekstowo.pl`_, -`LRCLIB`_ and, optionally, the Google Custom Search API. +Namely, the current version of the plugin uses Genius.com_, Tekstowo.pl_, +LRCLIB_ and, optionally, the Google Custom Search API. -.. _Genius.com: https://genius.com/ -.. _Tekstowo.pl: https://www.tekstowo.pl/ -.. _LRCLIB: https://lrclib.net/ +.. _genius.com: https://genius.com/ +.. _lrclib: https://lrclib.net/ + +.. _tekstowo.pl: https://www.tekstowo.pl/ Install ------- @@ -62,20 +63,20 @@ The available options are: ``translate_to`` are translated. Use a list of language codes to restrict them. - **to_language**: Language code to translate lyrics to. -- **dist_thresh**: The maximum distance between the artist and title - combination of the music file and lyrics candidate to consider them a match. - Lower values will make the plugin more strict, higher values will make it - more lenient. This does not apply to the ``lrclib`` backend as it matches - durations. + +- **dist_thresh**: The maximum distance between the artist and title combination + of the music file and lyrics candidate to consider them a match. Lower values + will make the plugin more strict, higher values will make it more lenient. + This does not apply to the ``lrclib`` backend as it matches durations. - **fallback**: By default, the file will be left unchanged when no lyrics are found. Use the empty string ``''`` to reset the lyrics in such a case. - **force**: By default, beets won't fetch lyrics if the files already have ones. To instead always fetch lyrics, set the ``force`` option to ``yes``. - **google_API_key**: Your Google API key (to enable the Google Custom Search backend). -- **google_engine_ID**: The custom search engine to use. - Default: The `beets custom search engine`_, which gathers an updated list of - sources known to be scrapeable. +- **google_engine_ID**: The custom search engine to use. Default: The `beets + custom search engine`_, which gathers an updated list of sources known to be + scrapeable. - **print**: Print lyrics to the console. - **sources**: List of sources to search for lyrics. An asterisk ``*`` expands to all available sources. The ``google`` source will be automatically @@ -109,61 +110,60 @@ Rendering Lyrics into Other Formats ----------------------------------- The ``-r directory, --write-rest directory`` option renders all lyrics as -`reStructuredText`_ (ReST) documents in ``directory``. That directory, in turn, -can be parsed by tools like `Sphinx`_ to generate HTML, ePUB, or PDF documents. +reStructuredText_ (ReST) documents in ``directory``. That directory, in turn, +can be parsed by tools like Sphinx_ to generate HTML, ePUB, or PDF documents. Minimal ``conf.py`` and ``index.rst`` files are created the first time the command is run. They are not overwritten on subsequent runs, so you can safely modify these files to customize the output. -Sphinx supports various `builders`_, see a few suggestions: - +Sphinx supports various builders_, see a few suggestions: .. admonition:: Build an HTML version - :: + :: - sphinx-build -b html <dir> <dir>/html + sphinx-build -b html <dir> <dir>/html .. admonition:: Build an ePUB3 formatted file, usable on ebook readers - :: + :: - sphinx-build -b epub3 <dir> <dir>/epub + sphinx-build -b epub3 <dir> <dir>/epub .. admonition:: Build a PDF file, which incidentally also builds a LaTeX file - :: + :: - sphinx-build -b latex <dir> <dir>/latex && make -C <dir>/latex all-pdf + sphinx-build -b latex <dir> <dir>/latex && make -C <dir>/latex all-pdf - -.. _Sphinx: https://www.sphinx-doc.org/ -.. _reStructuredText: http://docutils.sourceforge.net/rst.html .. _builders: https://www.sphinx-doc.org/en/stable/builders.html -Activate Google Custom Search ------------------------------- +.. _restructuredtext: http://docutils.sourceforge.net/rst.html -You need to `register for a Google API key`_. Set the ``google_API_key`` +.. _sphinx: https://www.sphinx-doc.org/ + +Activate Google Custom Search +----------------------------- + +You need to `register for a Google API key +<https://console.developers.google.com/>`__. Set the ``google_API_key`` configuration option to your key. -Then add ``google`` to the list of sources in your configuration (or use -default list, which includes it as long as you have an API key). -If you use default ``google_engine_ID``, we recommend limiting the sources to -``google`` as the other sources are already included in the Google results. +Then add ``google`` to the list of sources in your configuration (or use default +list, which includes it as long as you have an API key). If you use default +``google_engine_ID``, we recommend limiting the sources to ``google`` as the +other sources are already included in the Google results. Optionally, you can `define a custom search engine`_. Get your search engine's -token and use it for your ``google_engine_ID`` configuration option. By -default, beets use a list of sources known to be scrapeable. +token and use it for your ``google_engine_ID`` configuration option. By default, +beets use a list of sources known to be scrapeable. -Note that the Google custom search API is limited to 100 queries per day. -After that, the lyrics plugin will fall back on other declared data sources. +Note that the Google custom search API is limited to 100 queries per day. After +that, the lyrics plugin will fall back on other declared data sources. -.. _register for a Google API key: https://console.developers.google.com/ .. _define a custom search engine: https://www.google.com/cse/all - .. _lyrics-translation: Activate On-the-Fly Translation @@ -177,20 +177,22 @@ follow these steps: 3. Add the API key to your configuration as ``translate.api_key``. 4. Configure your target language using the ``translate.to_language`` option. - For example, with the following configuration .. code-block:: yaml - lyrics: - translate: - api_key: YOUR_TRANSLATOR_API_KEY - to_language: de + lyrics: + translate: + api_key: YOUR_TRANSLATOR_API_KEY + to_language: de -You should expect lyrics like this:: +You should expect lyrics like this: - Original verse / Ursprünglicher Vers - Some other verse / Ein anderer Vers +:: -.. _create a Translator resource: https://learn.microsoft.com/en-us/azure/ai-services/translator/create-translator-resource -.. _obtain its API key: https://learn.microsoft.com/en-us/python/api/overview/azure/ai-translation-text-readme?view=azure-python&preserve-view=true#get-an-api-key + Original verse / Ursprünglicher Vers + Some other verse / Ein anderer Vers + +.. _create a translator resource: https://learn.microsoft.com/en-us/azure/ai-services/translator/create-translator-resource + +.. _obtain its api key: https://learn.microsoft.com/en-us/python/api/overview/azure/ai-translation-text-readme?view=azure-python&preserve-view=true#get-an-api-key diff --git a/docs/plugins/mbcollection.rst b/docs/plugins/mbcollection.rst index 00acd4604..87efcd6d5 100644 --- a/docs/plugins/mbcollection.rst +++ b/docs/plugins/mbcollection.rst @@ -6,10 +6,11 @@ maintain your `music collection`_ list there. .. _music collection: https://musicbrainz.org/doc/Collections -To begin, just enable the ``mbcollection`` plugin in your -configuration (see :ref:`using-plugins`). -Then, add your MusicBrainz username and password to your -:doc:`configuration file </reference/config>` under a ``musicbrainz`` section:: +To begin, just enable the ``mbcollection`` plugin in your configuration (see +:ref:`using-plugins`). Then, add your MusicBrainz username and password to your +:doc:`configuration file </reference/config>` under a ``musicbrainz`` section: + +:: musicbrainz: user: you @@ -22,21 +23,18 @@ profile first. The command has one command-line option: -* To remove albums from the collection which are no longer present in - the beets database, use the ``-r`` (``--remove``) flag. - +- To remove albums from the collection which are no longer present in the beets + database, use the ``-r`` (``--remove``) flag. Configuration ------------- -To configure the plugin, make a ``mbcollection:`` section in your -configuration file. There is one option available: +To configure the plugin, make a ``mbcollection:`` section in your configuration +file. There is one option available: -- **auto**: Automatically amend your MusicBrainz collection whenever you - import a new album. - Default: ``no``. -- **collection**: The MBID of which MusicBrainz collection to update. - Default: ``None``. -- **remove**: Remove albums from collections which are no longer - present in the beets database. - Default: ``no``. +- **auto**: Automatically amend your MusicBrainz collection whenever you import + a new album. Default: ``no``. +- **collection**: The MBID of which MusicBrainz collection to update. Default: + ``None``. +- **remove**: Remove albums from collections which are no longer present in the + beets database. Default: ``no``. diff --git a/docs/plugins/mbpseudo.rst b/docs/plugins/mbpseudo.rst new file mode 100644 index 000000000..56658db26 --- /dev/null +++ b/docs/plugins/mbpseudo.rst @@ -0,0 +1,103 @@ +MusicBrainz Pseudo-Release Plugin +================================= + +The `mbpseudo` plugin can be used *instead of* the `musicbrainz` plugin to +search for MusicBrainz pseudo-releases_ during the import process, which are +added to the normal candidates from the MusicBrainz search. + +.. _pseudo-releases: https://musicbrainz.org/doc/Style/Specific_types_of_releases/Pseudo-Releases + +This is useful for releases whose title and track titles are written with a +script_ that can be translated or transliterated into a different one. + +.. _script: https://en.wikipedia.org/wiki/ISO_15924 + +Pseudo-releases will only be included if the initial search in MusicBrainz +returns releases whose script is *not* desired and whose relationships include +pseudo-releases with desired scripts. + +Configuration +------------- + +Since this plugin first searches for official releases from MusicBrainz, all +options from the `musicbrainz` plugin's :ref:`musicbrainz-config` are supported, +but they must be specified under `mbpseudo` in the configuration file. +Additionally, the configuration expects an array of scripts that are desired for +the pseudo-releases. For ``artist`` in particular, keep in mind that even +pseudo-releases might specify it with the original script, so you should also +configure import :ref:`languages` to give artist aliases more priority. +Therefore, the minimum configuration for this plugin looks like this: + +.. code-block:: yaml + + plugins: mbpseudo # remove musicbrainz + + import: + languages: en + + mbpseudo: + scripts: + - Latn + +Note that the `search_limit` configuration applies to the initial search for +official releases, and that the `data_source` in the database will be +"MusicBrainz". Nevertheless, `data_source_mismatch_penalty` must also be +specified under `mbpseudo` if desired (see also +:ref:`metadata-source-plugin-configuration`). An example with multiple data +sources may look like this: + +.. code-block:: yaml + + plugins: mbpseudo deezer + + import: + languages: en + + mbpseudo: + data_source_mismatch_penalty: 0 + scripts: + - Latn + + deezer: + data_source_mismatch_penalty: 0.2 + +By default, the data from the pseudo-release will be used to create a proposal +that is independent from the official release and sets all properties in its +metadata. It's possible to change the configuration so that some information +from the pseudo-release is instead added as custom tags, keeping the metadata +from the official release: + +.. code-block:: yaml + + mbpseudo: + # other config not shown + custom_tags_only: yes + +The default custom tags with this configuration are specified as mappings where +the keys define the tag names and the values define the pseudo-release property +that will be used to set the tag's value: + +.. code-block:: yaml + + mbpseudo: + album_custom_tags: + album_transl: album + album_artist_transl: artist + track_custom_tags: + title_transl: title + artist_transl: artist + +Note that the information for each set of custom tags corresponds to different +metadata levels (album or track level), which is why ``artist`` appears twice +even though it effectively references album artist and track artist +respectively. + +If you want to modify any mapping under ``album_custom_tags`` or +``track_custom_tags``, you must specify *everything* for that set of tags in +your configuration file because any customization replaces the whole dictionary +of mappings for that level. + +.. note:: + + These custom tags are also added to the music files, not only to the + database. diff --git a/docs/plugins/mbsubmit.rst b/docs/plugins/mbsubmit.rst index 0e86ddc69..12e9cd208 100644 --- a/docs/plugins/mbsubmit.rst +++ b/docs/plugins/mbsubmit.rst @@ -8,28 +8,32 @@ that is parseable by MusicBrainz's `track parser`_. The prompt choices are: - Print the tracks to stdout in a format suitable for MusicBrainz's `track parser`_. +- Open the program Picard_ with the unmatched folder as an input, allowing you + to start submitting the unmatched release to MusicBrainz with many input + fields already filled in, thanks to Picard reading the preexisting tags of the + files. -- Open the program `Picard`_ with the unmatched folder as an input, allowing - you to start submitting the unmatched release to MusicBrainz with many input - fields already filled in, thanks to Picard reading the preexisting tags of - the files. - -For the last option, `Picard`_ is assumed to be installed and available on the +For the last option, Picard_ is assumed to be installed and available on the machine including a ``picard`` executable. Picard developers list `download options`_. `other GNU/Linux distributions`_ may distribute Picard via their package manager as well. -.. _track parser: https://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings -.. _Picard: https://picard.musicbrainz.org/ .. _download options: https://picard.musicbrainz.org/downloads/ -.. _other GNU/Linux distributions: https://repology.org/project/picard-tagger/versions + +.. _other gnu/linux distributions: https://repology.org/project/picard-tagger/versions + +.. _picard: https://picard.musicbrainz.org/ + +.. _track parser: https://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings Usage ----- Enable the ``mbsubmit`` plugin in your configuration (see :ref:`using-plugins`) and select one of the options mentioned above. Here the option ``Print tracks`` -choice is demonstrated:: +choice is demonstrated: + +:: No matching release found for 3 tracks. For help, see: https://beets.readthedocs.org/en/latest/faq.html#nomatch @@ -44,7 +48,10 @@ choice is demonstrated:: [U]se as-is, as Tracks, Group albums, Skip, Enter search, enter Id, aBort, Print tracks? -You can also run ``beet mbsubmit QUERY`` to print the track information for any album:: +You can also run ``beet mbsubmit QUERY`` to print the track information for any +album: + +:: $ beet mbsubmit album:"An Obscure Album" 01. An Obscure Track - An Obscure Artist (3:37) @@ -53,8 +60,8 @@ You can also run ``beet mbsubmit QUERY`` to print the track information for any As MusicBrainz currently does not support submitting albums programmatically, the recommended workflow is to copy the output of the ``Print tracks`` choice -and paste it into the parser that can be found by clicking on the -"Track Parser" button on MusicBrainz "Tracklist" tab. +and paste it into the parser that can be found by clicking on the "Track Parser" +button on MusicBrainz "Tracklist" tab. Configuration ------------- @@ -62,22 +69,22 @@ Configuration To configure the plugin, make a ``mbsubmit:`` section in your configuration file. The following options are available: -- **format**: The format used for printing the tracks, defined using the - same template syntax as beets’ :doc:`path formats </reference/pathformat>`. +- **format**: The format used for printing the tracks, defined using the same + template syntax as beets’ :doc:`path formats </reference/pathformat>`. Default: ``$track. $title - $artist ($length)``. -- **threshold**: The minimum strength of the autotagger recommendation that - will cause the ``Print tracks`` choice to be displayed on the prompt. - Default: ``medium`` (causing the choice to be displayed for all albums that - have a recommendation of medium strength or lower). Valid values: ``none``, - ``low``, ``medium``, ``strong``. +- **threshold**: The minimum strength of the autotagger recommendation that will + cause the ``Print tracks`` choice to be displayed on the prompt. Default: + ``medium`` (causing the choice to be displayed for all albums that have a + recommendation of medium strength or lower). Valid values: ``none``, ``low``, + ``medium``, ``strong``. - **picard_path**: The path to the ``picard`` executable. Could be an absolute path, and if not, ``$PATH`` is consulted. The default value is simply ``picard``. Windows users will have to find and specify the absolute path to - their ``picard.exe``. That would probably be: - ``C:\Program Files\MusicBrainz Picard\picard.exe``. + their ``picard.exe``. That would probably be: ``C:\Program Files\MusicBrainz + Picard\picard.exe``. Please note that some values of the ``threshold`` configuration option might require other ``beets`` command line switches to be enabled in order to work as -intended. In particular, setting a threshold of ``strong`` will only display -the prompt if ``timid`` mode is enabled. You can find more information about -how the recommendation system works at :ref:`match-config`. +intended. In particular, setting a threshold of ``strong`` will only display the +prompt if ``timid`` mode is enabled. You can find more information about how the +recommendation system works at :ref:`match-config`. diff --git a/docs/plugins/mbsync.rst b/docs/plugins/mbsync.rst index 647ff4df8..d2f80d1f8 100644 --- a/docs/plugins/mbsync.rst +++ b/docs/plugins/mbsync.rst @@ -1,8 +1,8 @@ MBSync Plugin ============= -This plugin provides the ``mbsync`` command, which lets you synchronize -metadata for albums and tracks that have external data source IDs. +This plugin provides the ``mbsync`` command, which lets you synchronize metadata +for albums and tracks that have external data source IDs. This is useful for syncing your library with online data or when changing configuration options that affect tag writing. If your music library already @@ -10,7 +10,6 @@ contains correct tags, you can speed up the initial import by importing files "as-is" and then using ``mbsync`` to write tags according to your beets configuration. - Usage ----- @@ -18,20 +17,20 @@ Enable the ``mbsync`` plugin in your configuration (see :ref:`using-plugins`) and then run ``beet mbsync QUERY`` to fetch updated metadata for a part of your collection (or omit the query to run over your whole library). -This plugin treats albums and singletons (non-album tracks) separately. It -first processes all matching singletons and then proceeds on to full albums. -The same query is used to search for both kinds of entities. +This plugin treats albums and singletons (non-album tracks) separately. It first +processes all matching singletons and then proceeds on to full albums. The same +query is used to search for both kinds of entities. The command has a few command-line options: -* To preview the changes that would be made without applying them, use the +- To preview the changes that would be made without applying them, use the ``-p`` (``--pretend``) flag. -* By default, files will be moved (renamed) according to their metadata if - they are inside your beets library directory. To disable this, use the - ``-M`` (``--nomove``) command-line option. -* If you have the ``import.write`` configuration option enabled, then this - plugin will write new metadata to files' tags. To disable this, use the - ``-W`` (``--nowrite``) option. -* To customize the output of unrecognized items, use the ``-f`` - (``--format``) option. The default output is ``format_item`` or - ``format_album`` for items and albums, respectively. +- By default, files will be moved (renamed) according to their metadata if they + are inside your beets library directory. To disable this, use the ``-M`` + (``--nomove``) command-line option. +- If you have the ``import.write`` configuration option enabled, then this + plugin will write new metadata to files' tags. To disable this, use the ``-W`` + (``--nowrite``) option. +- To customize the output of unrecognized items, use the ``-f`` (``--format``) + option. The default output is ``format_item`` or ``format_album`` for items + and albums, respectively. diff --git a/docs/plugins/metasync.rst b/docs/plugins/metasync.rst index 8d9dac5a3..617281c6b 100644 --- a/docs/plugins/metasync.rst +++ b/docs/plugins/metasync.rst @@ -4,23 +4,21 @@ MetaSync Plugin This plugin provides the ``metasync`` command, which lets you fetch certain metadata from other sources: for example, your favorite audio player. -Currently, the plugin supports synchronizing with the `Amarok`_ music player, -and with `iTunes`_. -It can fetch the rating, score, first-played date, last-played date, play -count, and track uid from Amarok. +Currently, the plugin supports synchronizing with the Amarok_ music player, and +with iTunes_. It can fetch the rating, score, first-played date, last-played +date, play count, and track uid from Amarok. -.. _Amarok: https://amarok.kde.org/ -.. _iTunes: https://www.apple.com/itunes/ +.. _amarok: https://amarok.kde.org/ +.. _itunes: https://www.apple.com/itunes/ Installation ------------ -Enable the ``metasync`` plugin in your configuration (see -:ref:`using-plugins`). +Enable the ``metasync`` plugin in your configuration (see :ref:`using-plugins`). -To synchronize with Amarok, you'll need the `dbus-python`_ library. In such -case, install ``beets`` with ``metasync`` extra +To synchronize with Amarok, you'll need the dbus-python_ library. In such case, +install ``beets`` with ``metasync`` extra .. code-block:: bash @@ -28,23 +26,24 @@ case, install ``beets`` with ``metasync`` extra .. _dbus-python: https://dbus.freedesktop.org/releases/dbus-python/ - Configuration ------------- To configure the plugin, make a ``metasync:`` section in your configuration file. The available options are: -- **source**: A list of comma-separated sources to fetch metadata from. - Set this to "amarok" or "itunes" to enable synchronization with that player. - Default: empty +- **source**: A list of comma-separated sources to fetch metadata from. Set this + to "amarok" or "itunes" to enable synchronization with that player. Default: + empty The follow subsections describe additional configure required for some players. itunes -'''''' +~~~~~~ -The path to your iTunes library **xml** file has to be configured, e.g.:: +The path to your iTunes library **xml** file has to be configured, e.g.: + +:: metasync: source: itunes @@ -61,7 +60,7 @@ sources. The command has a few command-line options: -* To preview the changes that would be made without applying them, use the +- To preview the changes that would be made without applying them, use the ``-p`` (``--pretend``) flag. -* To specify temporary sources to fetch metadata from, use the ``-s`` +- To specify temporary sources to fetch metadata from, use the ``-s`` (``--source``) flag with a comma-separated list of a sources. diff --git a/docs/plugins/missing.rst b/docs/plugins/missing.rst index 9cd3fde71..d286e43cc 100644 --- a/docs/plugins/missing.rst +++ b/docs/plugins/missing.rst @@ -8,7 +8,6 @@ call to album data source. Usage ----- -Add the ``missing`` plugin to your configuration (see :ref:`using-plugins`). The ``beet missing`` command fetches album information from the origin data source and lists names of the **tracks** that are missing from your library. @@ -17,13 +16,15 @@ is limited to albums from the MusicBrainz data source only. You can customize the output format, show missing counts instead of track titles, or display the total number of missing entities across your entire -library:: +library: - -f FORMAT, --format=FORMAT - print with custom FORMAT - -c, --count count missing tracks per album - -t, --total count totals across the entire library - -a, --album show missing albums for artist instead of tracks for album +:: + + -f FORMAT, --format=FORMAT + print with custom FORMAT + -c, --count count missing tracks per album + -t, --total count totals across the entire library + -a, --album show missing albums for artist instead of tracks for album …or by editing the corresponding configuration options. @@ -34,63 +35,83 @@ library:: Configuration ------------- -To configure the plugin, make a ``missing:`` section in your -configuration file. The available options are: +To configure the plugin, make a ``missing:`` section in your configuration file. +The available options are: -- **count**: Print a count of missing tracks per album, with ``format`` - defaulting to ``$albumartist - $album: $missing``. - Default: ``no``. -- **format**: A specific format with which to print every - track. This uses the same template syntax as beets' - :doc:`path formats </reference/pathformat>`. The usage is inspired by, and - therefore similar to, the :ref:`list <list-cmd>` command. - Default: :ref:`format_item`. -- **total**: Print a single count of missing tracks in all albums. - Default: ``no``. +- **count**: Print a count of missing tracks per album, with the global + ``format_album`` used for formatting. Default: ``no``. +- **total**: Print a single count of missing tracks in all albums. Default: + ``no``. -Here's an example :: +Formatting +~~~~~~~~~~ +- This plugin uses global formatting options from the main configuration; see + :ref:`format_item` and :ref:`format_album`: +- :ref:`format_item`: Used when listing missing tracks (default item format). +- :ref:`format_album`: Used when showing counts (``-c``) or missing albums + (``-a``). + +Here's an example + +:: + + format_album: $albumartist - $album + format_item: $artist - $album - $title missing: - format: $albumartist - $album - $title count: no total: no Template Fields --------------- -With this plugin enabled, the ``$missing`` template field expands to the -number of tracks missing from each album. +With this plugin enabled, the ``$missing`` template field expands to the number +of tracks missing from each album. Examples -------- -List all missing tracks in your collection:: +List all missing tracks in your collection: - beet missing +:: -List all missing albums in your collection:: + beet missing - beet missing -a +List all missing albums in your collection: -List all missing tracks from 2008:: +:: - beet missing year:2008 + beet missing -a -Print out a unicode histogram of the missing track years using `spark`_:: +List all missing tracks from 2008: - beet missing -f '$year' | spark - ▆▁▆█▄▇▇▄▇▇▁█▇▆▇▂▄█▁██▂█▁▁██▁█▂▇▆▂▇█▇▇█▆▆▇█▇█▇▆██▂▇ +:: -Print out a listing of all albums with missing tracks, and respective counts:: + beet missing year:2008 - beet missing -c +Print out a unicode histogram of the missing track years using spark_: -Print out a count of the total number of missing tracks:: +:: - beet missing -t + beet missing -f '$year' | spark + ▆▁▆█▄▇▇▄▇▇▁█▇▆▇▂▄█▁██▂█▁▁██▁█▂▇▆▂▇█▇▇█▆▆▇█▇█▇▆██▂▇ -Call this plugin from other beet commands:: +Print out a listing of all albums with missing tracks, and respective counts: - beet ls -a -f '$albumartist - $album: $missing' +:: + + beet missing -c + +Print out a count of the total number of missing tracks: + +:: + + beet missing -t + +Call this plugin from other beet commands: + +:: + + beet ls -a -f '$albumartist - $album: $missing' .. _spark: https://github.com/holman/spark diff --git a/docs/plugins/mpdstats.rst b/docs/plugins/mpdstats.rst index cb2cf1606..276b069e3 100644 --- a/docs/plugins/mpdstats.rst +++ b/docs/plugins/mpdstats.rst @@ -1,25 +1,24 @@ MPDStats Plugin -================ +=============== ``mpdstats`` is a plugin for beets that collects statistics about your listening -habits from `MPD`_. It collects the following information about tracks: +habits from MPD_. It collects the following information about tracks: -* ``play_count``: The number of times you *fully* listened to this track. -* ``skip_count``: The number of times you *skipped* this track. -* ``last_played``: UNIX timestamp when you last played this track. -* ``rating``: A rating based on ``play_count`` and ``skip_count``. +- ``play_count``: The number of times you *fully* listened to this track. +- ``skip_count``: The number of times you *skipped* this track. +- ``last_played``: UNIX timestamp when you last played this track. +- ``rating``: A rating based on ``play_count`` and ``skip_count``. -To gather these statistics it runs as an MPD client and watches the current state -of MPD. This means that ``mpdstats`` needs to be running continuously for it to -work. +To gather these statistics it runs as an MPD client and watches the current +state of MPD. This means that ``mpdstats`` needs to be running continuously for +it to work. -.. _MPD: https://www.musicpd.org/ +.. _mpd: https://www.musicpd.org/ Installing Dependencies ----------------------- -This plugin requires the python-mpd2 library in order to talk to the MPD -server. +This plugin requires the python-mpd2 library in order to talk to the MPD server. To use the ``mpdstats`` plugin, first enable it in your configuration (see :ref:`using-plugins`). Then, install ``beets`` with ``mpdstats`` extra @@ -29,76 +28,77 @@ To use the ``mpdstats`` plugin, first enable it in your configuration (see Usage ----- -Use the ``mpdstats`` command to fire it up:: +Use the ``mpdstats`` command to fire it up: + +:: $ beet mpdstats Configuration ------------- -To configure the plugin, make an ``mpd:`` section in your -configuration file. The available options are: +To configure the plugin, make an ``mpd:`` section in your configuration file. +The available options are: -- **host**: The MPD server hostname. - Default: The ``$MPD_HOST`` environment variable if set, - falling back to ``localhost`` otherwise. -- **port**: The MPD server port. - Default: The ``$MPD_PORT`` environment variable if set, - falling back to 6600 otherwise. -- **password**: The MPD server password. - Default: None. +- **host**: The MPD server hostname. Default: The ``$MPD_HOST`` environment + variable if set, falling back to ``localhost`` otherwise. +- **port**: The MPD server port. Default: The ``$MPD_PORT`` environment variable + if set, falling back to 6600 otherwise. +- **password**: The MPD server password. Default: None. - **music_directory**: If your MPD library is at a different location from the beets library (e.g., because one is mounted on a NFS share), specify the path here. -- **strip_path**: If your MPD library contains local path, specify the part to remove - here. Combining this with **music_directory** you can mangle MPD path to match the - beets library one. - Default: The beets library directory. -- **rating**: Enable rating updates. - Default: ``yes``. -- **rating_mix**: Tune the way rating is calculated (see below). - Default: 0.75. +- **strip_path**: If your MPD library contains local path, specify the part to + remove here. Combining this with **music_directory** you can mangle MPD path + to match the beets library one. Default: The beets library directory. +- **rating**: Enable rating updates. Default: ``yes``. +- **rating_mix**: Tune the way rating is calculated (see below). Default: 0.75. +- **played_ratio_threshold**: If a song was played for less than this percentage + of its duration it will be considered a skip. Default: 0.85 A Word on Ratings ----------------- Ratings are calculated based on the *play_count*, *skip_count* and the last -*action* (play or skip). It consists in one part of a *stable_rating* and in -another part on a *rolling_rating*. The *stable_rating* is calculated like -this:: +*action* (play or skip). It consists in one part of a *stable_rating* and in +another part on a *rolling_rating*. The *stable_rating* is calculated like this: + +:: stable_rating = (play_count + 1.0) / (play_count + skip_count + 2.0) So if the *play_count* equals the *skip_count*, the *stable_rating* is always -0.5. More *play_counts* adjust the rating up to 1.0. More *skip_counts* -adjust it down to 0.0. One of the disadvantages of this rating system, is -that it doesn't really cover *recent developments*. e.g. a song that you -loved last year and played over 50 times will keep a high rating even if you -skipped it the last 10 times. That's were the *rolling_rating* comes in. +0.5. More *play_counts* adjust the rating up to 1.0. More *skip_counts* adjust +it down to 0.0. One of the disadvantages of this rating system, is that it +doesn't really cover *recent developments*. e.g. a song that you loved last year +and played over 50 times will keep a high rating even if you skipped it the last +10 times. That's were the *rolling_rating* comes in. -If a song has been fully played, the *rolling_rating* is calculated like -this:: +If a song has been fully played, the *rolling_rating* is calculated like this: + +:: rolling_rating = old_rating + (1.0 - old_rating) / 2.0 -If a song has been skipped, like this:: +If a song has been skipped, like this: + +:: rolling_rating = old_rating - old_rating / 2.0 -So *rolling_rating* adapts pretty fast to *recent developments*. But it's too -fast. Taking the example from above, your old favorite with 50 plays will get -a negative rating (<0.5) the first time you skip it. Also not good. +So *rolling_rating* adapts pretty fast to *recent developments*. But it's too +fast. Taking the example from above, your old favorite with 50 plays will get a +negative rating (<0.5) the first time you skip it. Also not good. To take the best of both worlds, we mix the ratings together with the -``rating_mix`` factor. A ``rating_mix`` of 0.0 means all -*rolling* and 1.0 means all *stable*. We found 0.75 to be a good compromise, -but fell free to play with that. - +``rating_mix`` factor. A ``rating_mix`` of 0.0 means all *rolling* and 1.0 means +all *stable*. We found 0.75 to be a good compromise, but fell free to play with +that. Warning ------- -This has only been tested with MPD versions >= 0.16. It may not work -on older versions. If that is the case, please report an `issue`_. +This has only been tested with MPD versions >= 0.16. It may not work on older +versions. If that is the case, please report an issue_. .. _issue: https://github.com/beetbox/beets/issues diff --git a/docs/plugins/mpdupdate.rst b/docs/plugins/mpdupdate.rst index 01a6a9fe7..9ac011ff5 100644 --- a/docs/plugins/mpdupdate.rst +++ b/docs/plugins/mpdupdate.rst @@ -2,15 +2,16 @@ MPDUpdate Plugin ================ ``mpdupdate`` is a very simple plugin for beets that lets you automatically -update `MPD`_'s index whenever you change your beets library. +update MPD_'s index whenever you change your beets library. -.. _MPD: https://www.musicpd.org/ +.. _mpd: https://www.musicpd.org/ -To use ``mpdupdate`` plugin, enable it in your configuration -(see :ref:`using-plugins`). -Then, you'll probably want to configure the specifics of your MPD server. -You can do that using an ``mpd:`` section in your ``config.yaml``, -which looks like this:: +To use ``mpdupdate`` plugin, enable it in your configuration (see +:ref:`using-plugins`). Then, you'll probably want to configure the specifics of +your MPD server. You can do that using an ``mpd:`` section in your +``config.yaml``, which looks like this: + +:: mpd: host: localhost @@ -20,9 +21,9 @@ which looks like this:: With that all in place, you'll see beets send the "update" command to your MPD server every time you change your beets library. -If you want to communicate with MPD over a Unix domain socket instead over -TCP, just give the path to the socket in the filesystem for the ``host`` -setting. (Any ``host`` value starting with a slash or a tilde is interpreted as a domain +If you want to communicate with MPD over a Unix domain socket instead over TCP, +just give the path to the socket in the filesystem for the ``host`` setting. +(Any ``host`` value starting with a slash or a tilde is interpreted as a domain socket.) Configuration @@ -30,10 +31,8 @@ Configuration The available options under the ``mpd:`` section are: -- **host**: The MPD server name. - Default: The ``$MPD_HOST`` environment variable if set, falling back to ``localhost`` otherwise. -- **port**: The MPD server port. - Default: The ``$MPD_PORT`` environment variable if set, falling back to 6600 - otherwise. -- **password**: The MPD server password. - Default: None. +- **host**: The MPD server name. Default: The ``$MPD_HOST`` environment variable + if set, falling back to ``localhost`` otherwise. +- **port**: The MPD server port. Default: The ``$MPD_PORT`` environment variable + if set, falling back to 6600 otherwise. +- **password**: The MPD server password. Default: None. diff --git a/docs/plugins/musicbrainz.rst b/docs/plugins/musicbrainz.rst new file mode 100644 index 000000000..60c3bc4a2 --- /dev/null +++ b/docs/plugins/musicbrainz.rst @@ -0,0 +1,151 @@ +MusicBrainz Plugin +================== + +The ``musicbrainz`` plugin extends the autotagger's search capabilities to +include matches from the MusicBrainz_ database. + +.. _musicbrainz: https://musicbrainz.org/ + +Installation +------------ + +To use the ``musicbrainz`` plugin, enable it in your configuration (see +:ref:`using-plugins`) + +.. _musicbrainz-config: + +Configuration +------------- + +This plugin can be configured like other metadata source plugins as described in +:ref:`metadata-source-plugin-configuration`. + +Default +~~~~~~~ + +.. code-block:: yaml + + musicbrainz: + host: musicbrainz.org + https: no + ratelimit: 1 + ratelimit_interval: 1.0 + extra_tags: [] + genres: no + genres_tag: genre + external_ids: + discogs: no + bandcamp: no + spotify: no + deezer: no + beatport: no + tidal: no + data_source_mismatch_penalty: 0.5 + search_limit: 5 + +.. conf:: host + :default: musicbrainz.org + + The Web server hostname (and port, optionally) that will be contacted by beets. + You can use this to configure beets to use `your own MusicBrainz database + <https://musicbrainz.org/doc/MusicBrainz_Server/Setup>`__ instead of the + `main server`_. + + The server must have search indices enabled (see `Building search indexes`_). + + Example: + + .. code-block:: yaml + + musicbrainz: + host: localhost:5000 + +.. conf:: https + :default: no + + Makes the client use HTTPS instead of HTTP. This setting applies only to custom + servers. The official MusicBrainz server always uses HTTPS. + +.. conf:: ratelimit + :default: 1 + + Controls the number of Web service requests per second. This setting applies only + to custom servers. The official MusicBrainz server enforces a rate limit of 1 + request per second. + +.. conf:: ratelimit_interval + :default: 1.0 + + The time interval (in seconds) for the rate limit. Only applies to custom servers. + +.. conf:: enabled + :default: yes + + .. deprecated:: 2.4 Add ``musicbrainz`` to the ``plugins`` list instead. + +.. conf:: extra_tags + :default: [] + + By default, beets will use only the artist, album, and track count to query + MusicBrainz. Additional tags to be queried can be supplied with the + ``extra_tags`` setting. + + This setting should improve the autotagger results if the metadata with the + given tags match the metadata returned by MusicBrainz. + + Note that the only tags supported by this setting are: ``barcode``, + ``catalognum``, ``country``, ``label``, ``media``, and ``year``. + + Example: + + .. code-block:: yaml + + musicbrainz: + extra_tags: [barcode, catalognum, country, label, media, year] + +.. conf:: genres + :default: no + + Use MusicBrainz genre tags to populate (and replace if it's already set) the + ``genre`` tag. This will make it a list of all the genres tagged for the release + and the release-group on MusicBrainz, separated by "; " and sorted by the total + number of votes. + +.. conf:: external_ids + + **Default** + + .. code-block:: yaml + + musicbrainz: + external_ids: + discogs: no + spotify: no + bandcamp: no + beatport: no + deezer: no + tidal: no + + Set any of the ``external_ids`` options to ``yes`` to enable the MusicBrainz + importer to look for links to related metadata sources. If such a link is + available the release ID will be extracted from the URL provided and imported to + the beets library. + + The library fields of the corresponding :ref:`autotagger_extensions` are used to + save the data as flexible attributes (``discogs_album_id``, ``bandcamp_album_id``, ``spotify_album_id``, + ``beatport_album_id``, ``deezer_album_id``, ``tidal_album_id``). On re-imports + existing data will be overwritten. + +.. conf:: genres_tag + :default: genre + + Either ``genre`` or ``tag``. Specify ``genre`` to use just musicbrainz genre and + ``tag`` to use all user-supplied musicbrainz tags. + +.. include:: ./shared_metadata_source_config.rst + +.. _building search indexes: https://musicbrainz.org/doc/Development/Search_server_setup + +.. _limited: https://musicbrainz.org/doc/XML_Web_Service/Rate_Limiting + +.. _main server: https://musicbrainz.org/ diff --git a/docs/plugins/parentwork.rst b/docs/plugins/parentwork.rst index fb15af9f1..21b774120 100644 --- a/docs/plugins/parentwork.rst +++ b/docs/plugins/parentwork.rst @@ -1,30 +1,28 @@ ParentWork Plugin ================= -The ``parentwork`` plugin fetches the work title, parent work title and -parent work composer from MusicBrainz. +The ``parentwork`` plugin fetches the work title, parent work title and parent +work composer from MusicBrainz. -In the MusicBrainz database, a recording can be associated with a work. A -work can itself be associated with another work, for example one being part -of the other (what we call the *direct parent*). This plugin looks the work id -from the library and then looks up the direct parent, then the direct parent -of the direct parent and so on until it reaches the top. The work at the top -is what we call the *parent work*. +In the MusicBrainz database, a recording can be associated with a work. A work +can itself be associated with another work, for example one being part of the +other (what we call the *direct parent*). This plugin looks the work id from the +library and then looks up the direct parent, then the direct parent of the +direct parent and so on until it reaches the top. The work at the top is what we +call the *parent work*. -This plugin is especially designed for -classical music. For classical music, just fetching the work title as in -MusicBrainz is not satisfying, because MusicBrainz has separate works for, for -example, all the movements of a symphony. This plugin aims to solve this -problem by also fetching the parent work, which would be the whole symphony in -this example. +This plugin is especially designed for classical music. For classical music, +just fetching the work title as in MusicBrainz is not satisfying, because +MusicBrainz has separate works for, for example, all the movements of a +symphony. This plugin aims to solve this problem by also fetching the parent +work, which would be the whole symphony in this example. The plugin can detect changes in ``mb_workid`` so it knows when to re-fetch other metadata, such as ``parentwork``. To do this, when it runs, it stores a -copy of ``mb_workid`` in the bookkeeping field ``parentwork_workid_current``. -At any later run of ``beet parentwork`` it will check if the tags -``mb_workid`` and ``parentwork_workid_current`` are still identical. If it is -not the case, it means the work has changed and all the tags need to be -fetched again. +copy of ``mb_workid`` in the bookkeeping field ``parentwork_workid_current``. At +any later run of ``beet parentwork`` it will check if the tags ``mb_workid`` and +``parentwork_workid_current`` are still identical. If it is not the case, it +means the work has changed and all the tags need to be fetched again. This plugin adds seven tags: @@ -33,28 +31,23 @@ This plugin adds seven tags: - **parentwork_disambig**: The disambiguation of the parent work title. - **parent_composer**: The composer of the parent work. - **parent_composer_sort**: The sort name of the parent work composer. -- **work_date**: The composition date of the work, or the first parent work - that has a composition date. Format: yyyy-mm-dd. +- **work_date**: The composition date of the work, or the first parent work that + has a composition date. Format: yyyy-mm-dd. - **parentwork_workid_current**: The MusicBrainz id of the work as it was when the parentwork was retrieved. This tag exists only for internal bookkeeping, - to keep track of recordings whose works have changed. + to keep track of recordings whose works have changed. - **parentwork_date**: The composition date of the parent work. -To use the ``parentwork`` plugin, enable it in your configuration (see -:ref:`using-plugins`). - Configuration ------------- -To configure the plugin, make a ``parentwork:`` section in your -configuration file. The available options are: - -- **force**: As a default, ``parentwork`` only fetches work info for - recordings that do not already have a ``parentwork`` tag or where - ``mb_workid`` differs from ``parentwork_workid_current``. If ``force`` - is enabled, it fetches it for all recordings. - Default: ``no`` +To configure the plugin, make a ``parentwork:`` section in your configuration +file. The available options are: +- **force**: As a default, ``parentwork`` only fetches work info for recordings + that do not already have a ``parentwork`` tag or where ``mb_workid`` differs + from ``parentwork_workid_current``. If ``force`` is enabled, it fetches it for + all recordings. Default: ``no`` - **auto**: If enabled, automatically fetches works at import. It takes quite some time, because beets is restricted to one MusicBrainz query per second. Default: ``no`` diff --git a/docs/plugins/permissions.rst b/docs/plugins/permissions.rst index 9c4cdc0aa..33841d8d9 100644 --- a/docs/plugins/permissions.rst +++ b/docs/plugins/permissions.rst @@ -1,8 +1,8 @@ Permissions Plugin ================== -The ``permissions`` plugin allows you to set file permissions for imported -music files and its directories. +The ``permissions`` plugin allows you to set file permissions for imported music +files and its directories. To use the ``permissions`` plugin, enable it in your configuration (see :ref:`using-plugins`). Permissions will be adjusted automatically on import. @@ -12,9 +12,12 @@ Configuration To configure the plugin, make an ``permissions:`` section in your configuration file. The ``file`` config value therein uses **octal modes** to specify the -desired permissions. The default flags for files are octal 644 and 755 for directories. +desired permissions. The default flags for files are octal 644 and 755 for +directories. -Here's an example:: +Here's an example: + +:: permissions: file: 644 diff --git a/docs/plugins/play.rst b/docs/plugins/play.rst index d72ec4e0d..f06eb4cb3 100644 --- a/docs/plugins/play.rst +++ b/docs/plugins/play.rst @@ -1,70 +1,72 @@ Play Plugin =========== -The ``play`` plugin allows you to pass the results of a query to a music -player in the form of an m3u playlist or paths on the command line. +The ``play`` plugin allows you to pass the results of a query to a music player +in the form of an m3u playlist or paths on the command line. Command Line Usage ------------------ To use the ``play`` plugin, enable it in your configuration (see -:ref:`using-plugins`). Then use it by invoking the ``beet play`` command with -a query. The command will create a temporary m3u file and open it using an -appropriate application. You can query albums instead of tracks using the -``-a`` option. +:ref:`using-plugins`). Then use it by invoking the ``beet play`` command with a +query. The command will create a temporary m3u file and open it using an +appropriate application. You can query albums instead of tracks using the ``-a`` +option. By default, the playlist is opened using the ``open`` command on OS X, ``xdg-open`` on other Unixes, and ``start`` on Windows. To configure the -command, you can use a ``play:`` section in your configuration file:: +command, you can use a ``play:`` section in your configuration file: + +:: play: command: /Applications/VLC.app/Contents/MacOS/VLC You can also specify additional space-separated options to command (like you -would on the command-line):: +would on the command-line): + +:: play: command: /usr/bin/command --option1 --option2 some_other_option -While playing you'll be able to interact with the player if it is a -command-line oriented, and you'll get its output in real time. +While playing you'll be able to interact with the player if it is a command-line +oriented, and you'll get its output in real time. Interactive Usage ----------------- The ``play`` plugin can also be invoked during an import. If enabled, the plugin -adds a ``plaY`` option to the prompt, so pressing ``y`` will execute the configured -command and play the items currently being imported. +adds a ``plaY`` option to the prompt, so pressing ``y`` will execute the +configured command and play the items currently being imported. -Once the configured command exits, you will be returned to the import -decision prompt. If your player is configured to run in the background (in a +Once the configured command exits, you will be returned to the import decision +prompt. If your player is configured to run in the background (in a client/server setup), the music will play until you choose to stop it, and the import operation continues immediately. Configuration ------------- -To configure the plugin, make a ``play:`` section in your -configuration file. The available options are: +To configure the plugin, make a ``play:`` section in your configuration file. +The available options are: -- **command**: The command used to open the playlist. - Default: ``open`` on OS X, ``xdg-open`` on other Unixes and ``start`` on - Windows. Insert ``$args`` to use the ``--args`` feature. -- **relative_to**: If set, emit paths relative to this directory. - Default: None. -- **use_folders**: When using the ``-a`` option, the m3u will contain the - paths to each track on the matched albums. Enable this option to - store paths to folders instead. - Default: ``no``. +- **command**: The command used to open the playlist. Default: ``open`` on OS X, + ``xdg-open`` on other Unixes and ``start`` on Windows. Insert ``$args`` to use + the ``--args`` feature. +- **relative_to**: If set, emit paths relative to this directory. Default: None. +- **use_folders**: When using the ``-a`` option, the m3u will contain the paths + to each track on the matched albums. Enable this option to store paths to + folders instead. Default: ``no``. - **raw**: Instead of creating a temporary m3u playlist and then opening it, simply call the command with the paths returned by the query as arguments. Default: ``no``. - **warning_threshold**: Set the minimum number of files to play which will trigger a warning to be emitted. If set to ``no``, warning are never issued. Default: 100. -- **bom**: Set whether or not a UTF-8 Byte Order Mark should be emitted into - the m3u file. If you're using foobar2000 or Winamp, this is needed. - Default: ``no``. +- **bom**: Set whether or not a UTF-8 Byte Order Mark should be emitted into the + m3u file. If you're using foobar2000 or Winamp, this is needed. Default: + ``no``. Optional Arguments ------------------ @@ -73,24 +75,31 @@ The ``--args`` (or ``-A``) flag to the ``play`` command lets you specify additional arguments for your player command. Options are inserted after the configured ``command`` string and before the playlist filename. -For example, if you have the plugin configured like this:: +For example, if you have the plugin configured like this: + +:: play: command: mplayer -quiet -and you occasionally want to shuffle the songs you play, you can type:: +and you occasionally want to shuffle the songs you play, you can type: + +:: $ beet play --args -shuffle -to get beets to execute this command:: +to get beets to execute this command: + +:: mplayer -quiet -shuffle /path/to/playlist.m3u instead of the default. -If you need to insert arguments somewhere other than the end of the -``command`` string, use ``$args`` to indicate where to insert them. For -example:: +If you need to insert arguments somewhere other than the end of the ``command`` +string, use ``$args`` to indicate where to insert them. For example: + +:: play: command: mpv $args --playlist @@ -98,9 +107,18 @@ example:: indicates that you need to insert extra arguments before specifying the playlist. +Some players require a different syntax. For example, with ``mpv`` the optional +``$playlist`` variable can be used to match the syntax of the ``--playlist`` +option: + +:: + + play: + command: mpv $args --playlist=$playlist + The ``--yes`` (or ``-y``) flag to the ``play`` command will skip the warning -message if you choose to play more items than the **warning_threshold** -value usually allows. +message if you choose to play more items than the **warning_threshold** value +usually allows. Note on the Leakage of the Generated Playlists ---------------------------------------------- @@ -109,9 +127,9 @@ Because the command that will open the generated ``.m3u`` files can be arbitrarily configured by the user, beets won't try to delete those files. For this reason, using this plugin will leave one or several playlist(s) in the directory selected to create temporary files (Most likely ``/tmp/`` on Unix-like -systems. See `tempfile.tempdir`_ in the Python docs.). Leaking those playlists until -they are externally wiped could be an issue for privacy or storage reasons. If -this is the case for you, you might want to use the ``raw`` config option -described above. +systems. See tempfile.tempdir_ in the Python docs.). Leaking those playlists +until they are externally wiped could be an issue for privacy or storage +reasons. If this is the case for you, you might want to use the ``raw`` config +option described above. -.. _tempfile.tempdir: https://docs.python.org/2/library/tempfile.html#tempfile.tempdir +.. _tempfile.tempdir: https://docs.python.org/3/library/tempfile.html#tempfile.tempdir diff --git a/docs/plugins/playlist.rst b/docs/plugins/playlist.rst index 9737874b0..e89c880ad 100644 --- a/docs/plugins/playlist.rst +++ b/docs/plugins/playlist.rst @@ -3,9 +3,10 @@ Playlist Plugin ``playlist`` is a plugin to use playlists in m3u format. -To use it, enable the ``playlist`` plugin in your configuration -(see :ref:`using-plugins`). -Then configure your playlists like this:: +To use it, enable the ``playlist`` plugin in your configuration (see +:ref:`using-plugins`). Then configure your playlists like this: + +:: playlist: auto: no @@ -14,21 +15,26 @@ Then configure your playlists like this:: forward_slash: no It is possible to query the library based on a playlist by specifying its -absolute path:: +absolute path: + +:: $ beet ls playlist:/path/to/someplaylist.m3u The plugin also supports referencing playlists by name. The playlist is then -searched in the playlist_dir and the ".m3u" extension is appended to the -name:: +searched in the playlist_dir and the ".m3u" extension is appended to the name: + +:: $ beet ls playlist:anotherplaylist -A playlist query will use the paths found in the playlist file to match items -in the beets library. ``playlist:`` submits a regular beets -:ref:`query<queries>` similar to a :ref:`specific fields query<fieldsquery>`. -If you want the list in any particular order, you can use the standard beets -query syntax for :ref:`sorting<query-sort>`:: +A playlist query will use the paths found in the playlist file to match items in +the beets library. ``playlist:`` submits a regular beets :ref:`query<queries>` +similar to a :ref:`specific fields query<fieldsquery>`. If you want the list in +any particular order, you can use the standard beets query syntax for +:ref:`sorting<query-sort>`: + +:: $ beet ls playlist:/path/to/someplaylist.m3u artist+ year+ @@ -41,22 +47,19 @@ configuration option. Configuration ------------- -To configure the plugin, make a ``playlist:`` section in your -configuration file. In addition to the ``playlists`` described above, the -other configuration options are: +To configure the plugin, make a ``playlist:`` section in your configuration +file. In addition to the ``playlists`` described above, the other configuration +options are: - **auto**: If this is set to ``yes``, then anytime an item in the library is - moved or removed, the plugin will update all playlists in the - ``playlist_dir`` directory that contain that item to reflect the change. - Default: ``no`` -- **playlist_dir**: Where to read playlist files from. - Default: The current working directory (i.e., ``'.'``). + moved or removed, the plugin will update all playlists in the ``playlist_dir`` + directory that contain that item to reflect the change. Default: ``no`` +- **playlist_dir**: Where to read playlist files from. Default: The current + working directory (i.e., ``'.'``). - **relative_to**: Interpret paths in the playlist files relative to a base - directory. Instead of setting it to a fixed path, it is also possible to - set it to ``playlist`` to use the playlist's parent directory or to - ``library`` to use the library directory. - Default: ``library`` -- **forward_slash**: Forces forward slashes in the generated playlist files. - If you intend to use this plugin to generate playlists for MPD on - Windows, set this to yes. - Default: Use system separator. + directory. Instead of setting it to a fixed path, it is also possible to set + it to ``playlist`` to use the playlist's parent directory or to ``library`` to + use the library directory. Default: ``library`` +- **forward_slash**: Forces forward slashes in the generated playlist files. If + you intend to use this plugin to generate playlists for MPD on Windows, set + this to yes. Default: Use system separator. diff --git a/docs/plugins/plexupdate.rst b/docs/plugins/plexupdate.rst index 3ca9cbfab..a3aecae11 100644 --- a/docs/plugins/plexupdate.rst +++ b/docs/plugins/plexupdate.rst @@ -2,7 +2,7 @@ PlexUpdate Plugin ================= ``plexupdate`` is a very simple plugin for beets that lets you automatically -update `Plex`_'s music library whenever you change your beets library. +update Plex_'s music library whenever you change your beets library. Firstly, install ``beets`` with ``plexupdate`` extra @@ -10,39 +10,36 @@ Firstly, install ``beets`` with ``plexupdate`` extra pip install "beets[plexupdate]" -Then, enable ``plexupdate`` plugin it in your configuration (see :ref:`using-plugins`). -Optionally, configure the specifics of your Plex server. You can do this using -a ``plex:`` section in your ``config.yaml``: +Then, enable ``plexupdate`` plugin it in your configuration (see +:ref:`using-plugins`). Optionally, configure the specifics of your Plex server. +You can do this using a ``plex:`` section in your ``config.yaml``: .. code-block:: yaml - plex: - host: "localhost" - port: 32400 - token: "TOKEN" + plex: + host: "localhost" + port: 32400 + token: "TOKEN" -The ``token`` key is optional: you'll need to use it when in a Plex Home (see Plex's own `documentation about tokens`_). +The ``token`` key is optional: you'll need to use it when in a Plex Home (see +Plex's own `documentation about tokens`_). With that all in place, you'll see beets send the "update" command to your Plex server every time you change your beets library. -.. _Plex: https://plex.tv/ .. _documentation about tokens: https://support.plex.tv/hc/en-us/articles/204059436-Finding-your-account-token-X-Plex-Token +.. _plex: https://plex.tv/ + Configuration ------------- The available options under the ``plex:`` section are: -- **host**: The Plex server name. - Default: ``localhost``. -- **port**: The Plex server port. - Default: 32400. -- **token**: The Plex Home token. - Default: Empty. -- **library_name**: The name of the Plex library to update. - Default: ``Music`` -- **secure**: Use secure connections to the Plex server. - Default: ``False`` -- **ignore_cert_errors**: Ignore TLS certificate errors when using secure connections. - Default: ``False`` +- **host**: The Plex server name. Default: ``localhost``. +- **port**: The Plex server port. Default: 32400. +- **token**: The Plex Home token. Default: Empty. +- **library_name**: The name of the Plex library to update. Default: ``Music`` +- **secure**: Use secure connections to the Plex server. Default: ``False`` +- **ignore_cert_errors**: Ignore TLS certificate errors when using secure + connections. Default: ``False`` diff --git a/docs/plugins/random.rst b/docs/plugins/random.rst index b0c437819..ca227c4b8 100644 --- a/docs/plugins/random.rst +++ b/docs/plugins/random.rst @@ -6,7 +6,9 @@ from your library. This can be helpful if you need some help deciding what to listen to. First, enable the plugin named ``random`` (see :ref:`using-plugins`). You'll -then be able to use the ``beet random`` command:: +then be able to use the ``beet random`` command: + +:: $ beet random Aesop Rock - None Shall Pass - The Harbor Is Yours @@ -16,14 +18,14 @@ command (see :doc:`/reference/cli`). To choose an album instead of a single track, use ``-a``; to print paths to items instead of metadata, use ``-p``; and to use a custom format for printing, use ``-f FORMAT``. -If the ``-e`` option is passed, the random choice will be even among -artists (the albumartist field). This makes sure that your anthology -of Bob Dylan won't make you listen to Bob Dylan 50% of the time. +If the ``-e`` option is passed, the random choice will be even among artists +(the albumartist field). This makes sure that your anthology of Bob Dylan won't +make you listen to Bob Dylan 50% of the time. The ``-n NUMBER`` option controls the number of objects that are selected and printed (default 1). To select 5 tracks from your library, type ``beet random -n5``. As an alternative, you can use ``-t MINUTES`` to choose a set of music with a -given play time. To select tracks that total one hour, for example, type -``beet random -t60``. +given play time. To select tracks that total one hour, for example, type ``beet +random -t60``. diff --git a/docs/plugins/replace.rst b/docs/plugins/replace.rst new file mode 100644 index 000000000..7216f8399 --- /dev/null +++ b/docs/plugins/replace.rst @@ -0,0 +1,19 @@ +Replace Plugin +============== + +The ``replace`` plugin provides a command that replaces the audio file of a +track, while keeping the name and tags intact. It should save some time when you +get the wrong version of a song. + +Enable the ``replace`` plugin in your configuration (see :ref:`using-plugins`) +and then type: + +:: + + $ beet replace <query> <path> + +The plugin will show you a list of files for you to pick from, and then ask for +confirmation. + +Consider using the ``replaygain`` command from the :doc:`/plugins/replaygain` +plugin, if you usually use it during imports. diff --git a/docs/plugins/replaygain.rst b/docs/plugins/replaygain.rst index 900f6f8c4..c7e51d25d 100644 --- a/docs/plugins/replaygain.rst +++ b/docs/plugins/replaygain.rst @@ -1,11 +1,10 @@ ReplayGain Plugin ================= -This plugin adds support for `ReplayGain`_, a technique for normalizing audio +This plugin adds support for ReplayGain_, a technique for normalizing audio playback levels. -.. _ReplayGain: https://wiki.hydrogenaudio.org/index.php?title=ReplayGain - +.. _replaygain: https://wiki.hydrogenaudio.org/index.php?title=ReplayGain Installation ------------ @@ -20,21 +19,22 @@ can be a slow process; to instead analyze after the fact, disable automatic analysis and use the ``beet replaygain`` command (see below). To speed up analysis with some of the available backends, this plugin processes -tracks or albums (when using the ``-a`` option) in parallel. By default, -a single thread is used per logical core of your CPU. +tracks or albums (when using the ``-a`` option) in parallel. By default, a +single thread is used per logical core of your CPU. GStreamer -````````` +~~~~~~~~~ -To use `GStreamer`_ for ReplayGain analysis, you will of course need to -install GStreamer and plugins for compatibility with your audio files. -You will need at least GStreamer 1.0 and `PyGObject 3.x`_ (a.k.a. ``python-gi``). +To use GStreamer_ for ReplayGain analysis, you will of course need to install +GStreamer and plugins for compatibility with your audio files. You will need at +least GStreamer 1.0 and `PyGObject 3.x`_ (a.k.a. ``python-gi``). -.. _PyGObject 3.x: https://pygobject.readthedocs.io/en/latest/ -.. _GStreamer: https://gstreamer.freedesktop.org/ +.. _gstreamer: https://gstreamer.freedesktop.org/ -Then, install ``beets`` with ``replaygain`` extra which installs -``GStreamer`` bindings for Python +.. _pygobject 3.x: https://pygobject.readthedocs.io/en/latest/ + +Then, install ``beets`` with ``replaygain`` extra which installs ``GStreamer`` +bindings for Python .. code-block:: bash @@ -42,7 +42,9 @@ Then, install ``beets`` with ``replaygain`` extra which installs Lastly, enable the ``replaygain`` plugin in your configuration (see :ref:`using-plugins`) and specify the GStreamer backend by adding this to your -configuration file:: +configuration file: + +:: replaygain: backend: gstreamer @@ -50,116 +52,118 @@ configuration file:: The GStreamer backend does not support parallel analysis. mp3gain and aacgain -``````````````````` +~~~~~~~~~~~~~~~~~~~ -In order to use this backend, you will need to install the `mp3gain`_ -command-line tool or the `aacgain`_ fork thereof. Here are some hints: +In order to use this backend, you will need to install the mp3gain_ command-line +tool or the aacgain_ fork thereof. Here are some hints: -* On Mac OS X, you can use `Homebrew`_. Type ``brew install aacgain``. -* On Linux, `mp3gain`_ is probably in your repositories. On Debian or Ubuntu, - for example, you can run ``apt-get install mp3gain``. -* On Windows, download and install the original `mp3gain`_. +- On Mac OS X, you can use Homebrew_. Type ``brew install aacgain``. +- On Linux, mp3gain_ is probably in your repositories. On Debian or Ubuntu, for + example, you can run ``apt-get install mp3gain``. +- On Windows, download and install the original mp3gain_. + +.. _aacgain: https://aacgain.altosdesign.com + +.. _homebrew: https://brew.sh .. _mp3gain: http://mp3gain.sourceforge.net/download.php -.. _aacgain: https://aacgain.altosdesign.com -.. _Homebrew: https://brew.sh Then, enable the plugin (see :ref:`using-plugins`) and specify the "command" -backend in your configuration file:: +backend in your configuration file: + +:: replaygain: backend: command If beets doesn't automatically find the ``mp3gain`` or ``aacgain`` executable, -you can configure the path explicitly like so:: +you can configure the path explicitly like so: + +:: replaygain: command: /Applications/MacMP3Gain.app/Contents/Resources/aacgain Python Audio Tools -`````````````````` +~~~~~~~~~~~~~~~~~~ -This backend uses the `Python Audio Tools`_ package to compute ReplayGain for -a range of different file formats. The package is not available via PyPI; it -must be installed manually (only versions preceding 3.x are compatible). +This backend uses the `Python Audio Tools`_ package to compute ReplayGain for a +range of different file formats. The package is not available via PyPI; it must +be installed manually (only versions preceding 3.x are compatible). -On OS X, most of the dependencies can be installed with `Homebrew`_:: +On OS X, most of the dependencies can be installed with Homebrew_: + +:: brew install mpg123 mp3gain vorbisgain faad2 libvorbis The Python Audio Tools backend does not support parallel analysis. -.. _Python Audio Tools: http://audiotools.sourceforge.net +.. _python audio tools: http://audiotools.sourceforge.net ffmpeg -`````` +~~~~~~ -This backend uses ffmpeg to calculate EBU R128 gain values. -To use it, install the `ffmpeg`_ command-line tool and select the -``ffmpeg`` backend in your config file. +This backend uses ffmpeg to calculate EBU R128 gain values. To use it, install +the ffmpeg_ command-line tool and select the ``ffmpeg`` backend in your config +file. .. _ffmpeg: https://ffmpeg.org Configuration ------------- -To configure the plugin, make a ``replaygain:`` section in your -configuration file. The available options are: +To configure the plugin, make a ``replaygain:`` section in your configuration +file. The available options are: -- **auto**: Enable ReplayGain analysis during import. - Default: ``yes``. +- **auto**: Enable ReplayGain analysis during import. Default: ``yes``. - **threads**: The number of parallel threads to run the analysis in. Overridden - by ``--threads`` at the command line. - Default: # of logical CPU cores -- **parallel_on_import**: Whether to enable parallel analysis during import. - As of now this ReplayGain data is not written to files properly, so this option - is disabled by default. - If you wish to enable it, remember to run ``beet write`` after importing to - actually write to the imported files. - Default: ``no`` -- **backend**: The analysis backend; either ``gstreamer``, ``command``, ``audiotools`` - or ``ffmpeg``. - Default: ``command``. + by ``--threads`` at the command line. Default: # of logical CPU cores +- **parallel_on_import**: Whether to enable parallel analysis during import. As + of now this ReplayGain data is not written to files properly, so this option + is disabled by default. If you wish to enable it, remember to run ``beet + write`` after importing to actually write to the imported files. Default: + ``no`` +- **backend**: The analysis backend; either ``gstreamer``, ``command``, + ``audiotools`` or ``ffmpeg``. Default: ``command``. - **overwrite**: On import, re-analyze files that already have ReplayGain tags. Note that, for historical reasons, the name of this option is somewhat unfortunate: It does not decide whether tags are written to the files (which is controlled by the :ref:`import.write <config-import-write>` option). Default: ``no``. - **targetlevel**: A number of decibels for the target loudness level for files - using ``REPLAYGAIN_`` tags. - Default: ``89``. -- **r128_targetlevel**: The target loudness level in decibels (i.e. - ``<loudness in LUFS> + 107``) for files using ``R128_`` tags. - Default: 84 (Use ``83`` for ATSC A/85, ``84`` for EBU R128 or ``89`` for - ReplayGain 2.0.) + using ``REPLAYGAIN_`` tags. Default: ``89``. +- **r128_targetlevel**: The target loudness level in decibels (i.e. ``<loudness + in LUFS> + 107``) for files using ``R128_`` tags. Default: 84 (Use ``83`` for + ATSC A/85, ``84`` for EBU R128 or ``89`` for ReplayGain 2.0.) - **r128**: A space separated list of formats that will use ``R128_`` tags with integer values instead of the common ``REPLAYGAIN_`` tags with floating point - values. Requires the "ffmpeg" backend. - Default: ``Opus``. + values. Requires the "ffmpeg" backend. Default: ``Opus``. - **per_disc**: Calculate album ReplayGain on disc level instead of album level. Default: ``no`` These options only work with the "command" backend: - **command**: The path to the ``mp3gain`` or ``aacgain`` executable (if beets - cannot find it by itself). - For example: ``/Applications/MacMP3Gain.app/Contents/Resources/aacgain``. - Default: Search in your ``$PATH``. + cannot find it by itself). For example: + ``/Applications/MacMP3Gain.app/Contents/Resources/aacgain``. Default: Search + in your ``$PATH``. - **noclip**: Reduce the amount of ReplayGain adjustment to whatever amount - would keep clipping from occurring. - Default: ``yes``. + would keep clipping from occurring. Default: ``yes``. This option only works with the "ffmpeg" backend: -- **peak**: Either ``true`` (the default) or ``sample``. ``true`` is - more accurate but slower. +- **peak**: Either ``true`` (the default) or ``sample``. ``true`` is more + accurate but slower. Manual Analysis --------------- By default, the plugin will analyze all items an albums as they are implemented. However, you can also manually analyze files that are already in your library. -Use the ``beet replaygain`` command:: +Use the ``beet replaygain`` command: + +:: $ beet replaygain [-Waf] [QUERY] @@ -167,19 +171,23 @@ The ``-a`` flag analyzes whole albums instead of individual tracks. Provide a query (see :doc:`/reference/query`) to indicate which items or albums to analyze. Files that already have ReplayGain values are skipped unless ``-f`` is supplied. Use ``-w`` (write tags) or ``-W`` (don't write tags) to control -whether ReplayGain tags are written into the music files, or stored in the -beets database only (the default is to use :ref:`the importer's configuration +whether ReplayGain tags are written into the music files, or stored in the beets +database only (the default is to use :ref:`the importer's configuration <config-import-write>`). -To execute with a different number of threads, call ``beet replaygain --threads N``:: +To execute with a different number of threads, call ``beet replaygain --threads +N``: + +:: $ beet replaygain --threads N [-Waf] [QUERY] with N any integer. To disable parallelism, use ``--threads 0``. ReplayGain analysis is not fast, so you may want to disable it during import. -Use the ``auto`` config option to control this:: +Use the ``auto`` config option to control this: + +:: replaygain: auto: no - diff --git a/docs/plugins/rewrite.rst b/docs/plugins/rewrite.rst index 41cd454bf..87a124cc6 100644 --- a/docs/plugins/rewrite.rst +++ b/docs/plugins/rewrite.rst @@ -2,16 +2,18 @@ Rewrite Plugin ============== The ``rewrite`` plugin lets you easily substitute values in your templates and -path formats. Specifically, it is intended to let you *canonicalize* names -such as artists: for example, perhaps you want albums from The Jimi Hendrix +path formats. Specifically, it is intended to let you *canonicalize* names such +as artists: for example, perhaps you want albums from The Jimi Hendrix Experience to be sorted into the same folder as solo Hendrix albums. -To use field rewriting, first enable the ``rewrite`` plugin -(see :ref:`using-plugins`). -Then, make a ``rewrite:`` section in your config file to contain your rewrite -rules. Each rule consists of a field name, a regular expression pattern, and a -replacement value. Rules are written ``fieldname regex: replacement``. -For example, this line implements the Jimi Hendrix example above:: +To use field rewriting, first enable the ``rewrite`` plugin (see +:ref:`using-plugins`). Then, make a ``rewrite:`` section in your config file to +contain your rewrite rules. Each rule consists of a field name, a regular +expression pattern, and a replacement value. Rules are written ``fieldname +regex: replacement``. For example, this line implements the Jimi Hendrix example +above: + +:: rewrite: artist The Jimi Hendrix Experience: Jimi Hendrix @@ -21,7 +23,9 @@ would otherwise be "The Jimi Hendrix Experience". The pattern is a case-insensitive regular expression. This means you can use ordinary regular expression syntax to match multiple artists. For example, you -might use:: +might use: + +:: rewrite: artist .*jimi hendrix.*: Jimi Hendrix @@ -31,8 +35,8 @@ As a convenience, the plugin applies patterns for the ``artist`` field to the every rule for ``artist`` and ``albumartist``.) A word of warning: This plugin theoretically only applies to templates and path -formats; it initially does not modify files' metadata tags or the values -tracked by beets' library database, but since it *rewrites all field lookups*, -it modifies the file's metadata anyway. See comments in issue :bug:`2786`. +formats; it initially does not modify files' metadata tags or the values tracked +by beets' library database, but since it *rewrites all field lookups*, it +modifies the file's metadata anyway. See comments in issue :bug:`2786`. As an alternative to this plugin the :doc:`/plugins/substitute` could be used. diff --git a/docs/plugins/scrub.rst b/docs/plugins/scrub.rst index 73ee01645..77e3dc696 100644 --- a/docs/plugins/scrub.rst +++ b/docs/plugins/scrub.rst @@ -1,5 +1,5 @@ Scrub Plugin -============= +============ The ``scrub`` plugin lets you remove extraneous metadata from files' tags. If you'd prefer never to see crufty tags that come from other tools, the plugin can @@ -40,8 +40,7 @@ whatsoever. Configuration ------------- -To configure the plugin, make a ``scrub:`` section in your -configuration file. There is one option: +To configure the plugin, make a ``scrub:`` section in your configuration file. +There is one option: -- **auto**: Enable metadata stripping during import. - Default: ``yes``. +- **auto**: Enable metadata stripping during import. Default: ``yes``. diff --git a/docs/plugins/shared_metadata_source_config.rst b/docs/plugins/shared_metadata_source_config.rst new file mode 100644 index 000000000..609c7afd2 --- /dev/null +++ b/docs/plugins/shared_metadata_source_config.rst @@ -0,0 +1,65 @@ +.. _data_source_mismatch_penalty: + +.. conf:: data_source_mismatch_penalty + :default: 0.5 + + Penalty applied when the data source of a + match candidate differs from the original source of your existing tracks. Any + decimal number between 0.0 and 1.0 + + This setting controls how much to penalize matches from different metadata + sources during import. The penalty is applied when beets detects that a match + candidate comes from a different data source than what appears to be the + original source of your music collection. + + **Example configurations:** + + .. code-block:: yaml + + # Prefer MusicBrainz over Discogs when sources don't match + plugins: musicbrainz discogs + + musicbrainz: + data_source_mismatch_penalty: 0.3 # Lower penalty = preferred + discogs: + data_source_mismatch_penalty: 0.8 # Higher penalty = less preferred + + .. code-block:: yaml + + # Do not penalise candidates from Discogs at all + plugins: musicbrainz discogs + + musicbrainz: + data_source_mismatch_penalty: 0.5 + discogs: + data_source_mismatch_penalty: 0.0 + + .. code-block:: yaml + + # Disable cross-source penalties entirely + plugins: musicbrainz discogs + + musicbrainz: + data_source_mismatch_penalty: 0.0 + discogs: + data_source_mismatch_penalty: 0.0 + + .. tip:: + + The last configuration is equivalent to setting: + + .. code-block:: yaml + + match: + distance_weights: + data_source: 0.0 # Disable data source matching + +.. conf:: source_weight + :default: 0.5 + + .. deprecated:: 2.5 Use `data_source_mismatch_penalty`_ instead. + +.. conf:: search_limit + :default: 5 + + Maximum number of search results to return. diff --git a/docs/plugins/smartplaylist.rst b/docs/plugins/smartplaylist.rst index cb697f762..f227559a8 100644 --- a/docs/plugins/smartplaylist.rst +++ b/docs/plugins/smartplaylist.rst @@ -5,11 +5,13 @@ Smart Playlist Plugin beets queries every time your library changes. This plugin is specifically created to work well with `MPD's`_ playlist functionality. -.. _MPD's: https://www.musicpd.org/ +.. _mpd's: https://www.musicpd.org/ -To use it, enable the ``smartplaylist`` plugin in your configuration -(see :ref:`using-plugins`). -Then configure your smart playlists like the following example:: +To use it, enable the ``smartplaylist`` plugin in your configuration (see +:ref:`using-plugins`). Then configure your smart playlists like the following +example: + +:: smartplaylist: relative_to: ~/Music @@ -23,15 +25,17 @@ Then configure your smart playlists like the following example:: query: 'artist:Beatles' You can generate as many playlists as you want by adding them to the -``playlists`` section, using beets query syntax (see -:doc:`/reference/query`) for ``query`` and the file name to be generated for -``name``. The query will be split using shell-like syntax, so if you need to -use spaces in the query, be sure to quote them (e.g., ``artist:"The Beatles"``). -If you have existing files with the same names, you should back them up---they -will be overwritten when the plugin runs. +``playlists`` section, using beets query syntax (see :doc:`/reference/query`) +for ``query`` and the file name to be generated for ``name``. The query will be +split using shell-like syntax, so if you need to use spaces in the query, be +sure to quote them (e.g., ``artist:"The Beatles"``). If you have existing files +with the same names, you should back them up---they will be overwritten when the +plugin runs. For more advanced usage, you can use template syntax (see -:doc:`/reference/pathformat/`) in the ``name`` field. For example:: +:doc:`/reference/pathformat/`) in the ``name`` field. For example: + +:: - name: 'ReleasedIn$year.m3u' query: 'year::201(0|1)' @@ -40,13 +44,17 @@ This will query all the songs in 2010 and 2011 and generate the two playlist files ``ReleasedIn2010.m3u`` and ``ReleasedIn2011.m3u`` using those songs. You can also gather the results of several queries by putting them in a list. -(Items that match both queries are not included twice.) For example:: +(Items that match both queries are not included twice.) For example: + +:: - name: 'BeatlesUniverse.m3u' query: ['artist:beatles', 'genre:"beatles cover"'] Note that since beets query syntax is in effect, you can also use sorting -directives:: +directives: + +:: - name: 'Chronological Beatles' query: 'artist:Beatles year+' @@ -57,47 +65,55 @@ The former case behaves as expected, however please note that in the latter the sorts will be merged: ``year+ bitrate+`` will apply to both the Beatles and Led Zeppelin. If that bothers you, please get in touch. -For querying albums instead of items (mainly useful with extensible fields), -use the ``album_query`` field. ``query`` and ``album_query`` can be used at the -same time. The following example gathers single items but also items belonging -to albums that have a ``for_travel`` extensible field set to 1:: +For querying albums instead of items (mainly useful with extensible fields), use +the ``album_query`` field. ``query`` and ``album_query`` can be used at the same +time. The following example gathers single items but also items belonging to +albums that have a ``for_travel`` extensible field set to 1: + +:: - name: 'MyTravelPlaylist.m3u' album_query: 'for_travel:1' query: 'for_travel:1' -By default, each playlist is automatically regenerated at the end of the -session if an item or album it matches changed in the library database. To -force regeneration, you can invoke it manually from the command line:: +By default, each playlist is automatically regenerated at the end of the session +if an item or album it matches changed in the library database. To force +regeneration, you can invoke it manually from the command line: + +:: $ beet splupdate This will regenerate all smart playlists. You can also specify which ones you -want to regenerate:: +want to regenerate: + +:: $ beet splupdate BeatlesUniverse.m3u MyTravelPlaylist You can also use this plugin together with the :doc:`mpdupdate`, in order to -automatically notify MPD of the playlist change, by adding ``mpdupdate`` to -the ``plugins`` line in your config file *after* the ``smartplaylist`` -plugin. +automatically notify MPD of the playlist change, by adding ``mpdupdate`` to the +``plugins`` line in your config file *after* the ``smartplaylist`` plugin. While changing existing playlists in the beets configuration it can help to use the ``--pretend`` option to find out if the edits work as expected. The results of the queries will be printed out instead of being written to the playlist file. +:: + $ beet splupdate --pretend BeatlesUniverse.m3u The ``pretend_paths`` configuration option sets whether the items should be -displayed as per the user's ``format_item`` setting or what the file -paths as they would be written to the m3u file look like. +displayed as per the user's ``format_item`` setting or what the file paths as +they would be written to the m3u file look like. In case you want to export additional fields from the beets database into the generated playlists, you can do so by specifying them within the ``fields`` -configuration option and setting the ``output`` option to ``extm3u``. -For instance the following configuration exports the ``id`` and ``genre`` -fields:: +configuration option and setting the ``output`` option to ``extm3u``. For +instance the following configuration exports the ``id`` and ``genre`` fields: + +:: smartplaylist: playlist_dir: /data/playlists @@ -110,58 +126,56 @@ fields:: - name: all.m3u query: '' -Values of additional fields are URL-encoded. -A resulting ``all.m3u`` file could look as follows:: +Values of additional fields are URL-encoded. A resulting ``all.m3u`` file could +look as follows: + +:: #EXTM3U #EXTINF:805 id="1931" genre="Progressive%20Rock",Led Zeppelin - Stairway to Heaven ../music/singles/Led Zeppelin/Stairway to Heaven.mp3 -To give a usage example, the `webm3u`_ and `Beetstream`_ plugins read the -exported ``id`` field, allowing you to serve your local m3u playlists via HTTP. +To give a usage example, the webm3u_ and Beetstream_ plugins read the exported +``id`` field, allowing you to serve your local m3u playlists via HTTP. + +.. _beetstream: https://github.com/BinaryBrain/Beetstream -.. _Beetstream: https://github.com/BinaryBrain/Beetstream .. _webm3u: https://github.com/mgoltzsche/beets-webm3u Configuration ------------- -To configure the plugin, make a ``smartplaylist:`` section in your -configuration file. In addition to the ``playlists`` described above, the -other configuration options are: +To configure the plugin, make a ``smartplaylist:`` section in your configuration +file. In addition to the ``playlists`` described above, the other configuration +options are: -- **auto**: Regenerate the playlist after every database change. - Default: ``yes``. -- **playlist_dir**: Where to put the generated playlist files. - Default: The current working directory (i.e., ``'.'``). +- **auto**: Regenerate the playlist after every database change. Default: + ``yes``. +- **playlist_dir**: Where to put the generated playlist files. Default: The + current working directory (i.e., ``'.'``). - **relative_to**: Generate paths in the playlist files relative to a base directory. If you intend to use this plugin to generate playlists for MPD, - point this to your MPD music directory. - Default: Use absolute paths. -- **forward_slash**: Forces forward slashes in the generated playlist files. - If you intend to use this plugin to generate playlists for MPD on - Windows, set this to yes. - Default: Use system separator. + point this to your MPD music directory. Default: Use absolute paths. +- **forward_slash**: Forces forward slashes in the generated playlist files. If + you intend to use this plugin to generate playlists for MPD on Windows, set + this to yes. Default: Use system separator. - **prefix**: Prepend this string to every path in the playlist file. For example, you could use the URL for a server where the music is stored. Default: empty string. - **urlencode**: URL-encode all paths. Default: ``no``. -- **pretend_paths**: When running with ``--pretend``, show the actual file - paths that will be written to the m3u file. Default: ``false``. -- **uri_format**: Template with an ``$id`` placeholder used generate a - playlist item URI, e.g. ``http://beets:8337/item/$id/file``. - When this option is specified, the local path-related options ``prefix``, - ``relative_to``, ``forward_slash`` and ``urlencode`` are ignored. +- **pretend_paths**: When running with ``--pretend``, show the actual file paths + that will be written to the m3u file. Default: ``false``. +- **uri_format**: Template with an ``$id`` placeholder used generate a playlist + item URI, e.g. ``http://beets:8337/item/$id/file``. When this option is + specified, the local path-related options ``prefix``, ``relative_to``, + ``forward_slash`` and ``urlencode`` are ignored. - **output**: Specify the playlist format: m3u|extm3u. Default ``m3u``. -- **fields**: Specify the names of the additional item fields to export into - the playlist. This allows using e.g. the ``id`` field within other tools such - as the `webm3u`_ and `Beetstream`_ plugins. - To use this option, you must set the ``output`` option to ``extm3u``. - -.. _Beetstream: https://github.com/BinaryBrain/Beetstream -.. _webm3u: https://github.com/mgoltzsche/beets-webm3u +- **fields**: Specify the names of the additional item fields to export into the + playlist. This allows using e.g. the ``id`` field within other tools such as + the webm3u_ and Beetstream_ plugins. To use this option, you must set the + ``output`` option to ``extm3u``. For many configuration options, there is a corresponding CLI option, e.g. ``--playlist-dir``, ``--relative-to``, ``--prefix``, ``--forward-slash``, -``--urlencode``, ``--uri-format``, ``--output``, ``--pretend-paths``. -CLI options take precedence over those specified within the configuration file. +``--urlencode``, ``--uri-format``, ``--output``, ``--pretend-paths``. CLI +options take precedence over those specified within the configuration file. diff --git a/docs/plugins/sonosupdate.rst b/docs/plugins/sonosupdate.rst index 6076590e3..956a26a2a 100644 --- a/docs/plugins/sonosupdate.rst +++ b/docs/plugins/sonosupdate.rst @@ -1,11 +1,11 @@ SonosUpdate Plugin ================== -The ``sonosupdate`` plugin lets you automatically update `Sonos`_'s music -library whenever you change your beets library. +The ``sonosupdate`` plugin lets you automatically update Sonos_'s music library +whenever you change your beets library. -To use ``sonosupdate`` plugin, enable it in your configuration -(see :ref:`using-plugins`). +To use ``sonosupdate`` plugin, enable it in your configuration (see +:ref:`using-plugins`). To use the ``sonosupdate`` plugin, first enable it in your configuration (see :ref:`using-plugins`). Then, install ``beets`` with ``sonosupdate`` extra @@ -15,4 +15,4 @@ To use the ``sonosupdate`` plugin, first enable it in your configuration (see With that all in place, you'll see beets send the "update" command to your Sonos controller every time you change your beets library. -.. _Sonos: https://sonos.com/ +.. _sonos: https://sonos.com/ diff --git a/docs/plugins/spotify.rst b/docs/plugins/spotify.rst index 233d00726..f0d6ac2ef 100644 --- a/docs/plugins/spotify.rst +++ b/docs/plugins/spotify.rst @@ -1,34 +1,43 @@ Spotify Plugin ============== -The ``spotify`` plugin generates `Spotify`_ playlists from tracks in your -library with the ``beet spotify`` command using the `Spotify Search API`_. +The ``spotify`` plugin generates Spotify_ playlists from tracks in your library +with the ``beet spotify`` command using the `Spotify Search API`_. -Also, the plugin can use the Spotify `Album`_ and `Track`_ APIs to provide -metadata matches for the importer. +Also, the plugin can use the Spotify Album_ and Track_ APIs to provide metadata +matches for the importer. -.. _Spotify: https://www.spotify.com/ -.. _Spotify Search API: https://developer.spotify.com/documentation/web-api/reference/#/operations/search -.. _Album: https://developer.spotify.com/documentation/web-api/reference/#/operations/get-an-album -.. _Track: https://developer.spotify.com/documentation/web-api/reference/#/operations/get-track +.. _album: https://developer.spotify.com/documentation/web-api/reference/#/operations/get-an-album + +.. _spotify: https://www.spotify.com/ + +.. _spotify search api: https://developer.spotify.com/documentation/web-api/reference/#/operations/search + +.. _track: https://developer.spotify.com/documentation/web-api/reference/#/operations/get-track Why Use This Plugin? -------------------- -* You're a Beets user and Spotify user already. -* You have playlists or albums you'd like to make available in Spotify from Beets without having to search for each artist/album/track. -* You want to check which tracks in your library are available on Spotify. -* You want to autotag music with metadata from the Spotify API. -* You want to obtain track popularity and audio features (e.g., danceability) +- You're a Beets user and Spotify user already. +- You have playlists or albums you'd like to make available in Spotify from + Beets without having to search for each artist/album/track. +- You want to check which tracks in your library are available on Spotify. +- You want to autotag music with metadata from the Spotify API. +- You want to obtain track popularity and audio features (e.g., danceability) Basic Usage ----------- -First, enable the ``spotify`` plugin (see :ref:`using-plugins`). -Then, use the ``spotify`` command with a beets query:: + +First, enable the ``spotify`` plugin (see :ref:`using-plugins`). Then, use the +``spotify`` command with a beets query: + +:: beet spotify [OPTIONS...] QUERY -Here's an example:: +Here's an example: + +:: $ beet spotify "In The Lonely Hour" Processing 14 tracks... @@ -38,14 +47,16 @@ Here's an example:: Command-line options include: -* ``-m MODE`` or ``--mode=MODE`` where ``MODE`` is either "list" or "open" - controls whether to print out the playlist (for copying and pasting) or - open it in the Spotify app. (See below.) -* ``--show-failures`` or ``-f``: List the tracks that did not match a Spotify +- ``-m MODE`` or ``--mode=MODE`` where ``MODE`` is either "list" or "open" + controls whether to print out the playlist (for copying and pasting) or open + it in the Spotify app. (See below.) +- ``--show-failures`` or ``-f``: List the tracks that did not match a Spotify ID. -You can enter the URL for an album or song on Spotify at the ``enter Id`` -prompt during import:: +You can enter the URL for an album or song on Spotify at the ``enter Id`` prompt +during import: + +:: Enter search, enter Id, aBort, eDit, edit Candidates, plaY? i Enter release ID: https://open.spotify.com/album/2rFYTHFBLQN3AYlrymBPPA @@ -53,73 +64,100 @@ prompt during import:: Configuration ------------- -This plugin can be configured like other metadata source plugins as described in :ref:`metadata-source-plugin-configuration`. In addition, the following -configuration options are provided. +This plugin can be configured like other metadata source plugins as described in +:ref:`metadata-source-plugin-configuration`. -The default options should work as-is, but there are some options you can put -in config.yaml under the ``spotify:`` section: +Default +~~~~~~~ -- **mode**: One of the following: - - - ``list``: Print out the playlist as a list of links. This list can then - be pasted in to a new or existing Spotify playlist. - - ``open``: This mode actually sends a link to your default browser with - instructions to open Spotify with the playlist you created. Until this - has been tested on all platforms, it will remain optional. - - Default: ``list``. -- **region_filter**: A two-character country abbreviation, to limit results - to that market. - Default: None. -- **show_failures**: List each lookup that does not return a Spotify ID (and - therefore cannot be added to a playlist). - Default: ``no``. -- **tiebreak**: How to choose the track if there is more than one identical - result. For example, there might be multiple releases of the same album. - The options are ``popularity`` and ``first`` (to just choose the first match - returned). - Default: ``popularity``. -- **regex**: An array of regex transformations to perform on the - track/album/artist fields before sending them to Spotify. Can be useful for - changing certain abbreviations, like ft. -> feat. See the examples below. - Default: None. - -Here's an example:: +.. code-block:: yaml spotify: - source_weight: 0.7 - mode: open - region_filter: US - show_failures: on - tiebreak: first + mode: list + region_filter: + show_failures: no + tiebreak: popularity + regex: [] + search_query_ascii: no + client_id: REDACTED + client_secret: REDACTED + tokenfile: spotify_token.json + data_source_mismatch_penalty: 0.5 + search_limit: 5 - regex: [ - { - field: "albumartist", # Field in the item object to regex. - search: "Something", # String to look for. - replace: "Replaced" # Replacement value. - }, - { - field: "title", - search: "Something Else", - replace: "AlsoReplaced" - } - ] +.. conf:: mode + :default: list + + Controls how the playlist is output: + + - ``list``: Print out the playlist as a list of links. This list can then + be pasted in to a new or existing Spotify playlist. + - ``open``: This mode actually sends a link to your default browser with + instructions to open Spotify with the playlist you created. Until this + has been tested on all platforms, it will remain optional. + +.. conf:: region_filter + :default: + + A two-character country abbreviation, to limit results to that market. + +.. conf:: show_failures + :default: no + + List each lookup that does not return a Spotify ID (and therefore cannot be + added to a playlist). + +.. conf:: tiebreak + :default: popularity + + How to choose the candidate if there is more than one identical result. For + example, there might be multiple releases of the same album. + + - ``popularity``: pick the more popular candidate + - ``first``: pick the first candidate + +.. conf:: regex + :default: [] + + An array of regex transformations to perform on the track/album/artist fields + before sending them to Spotify. Can be useful for changing certain + abbreviations, like ft. -> feat. For example: + + .. code-block:: yaml + + regex: + - field: albumartist + search: Something + replace: Replaced + - field: title + search: Something Else + replace: AlsoReplaced + +.. conf:: search_query_ascii + :default: no + + If enabled, the search query will be converted to ASCII before being sent to + Spotify. Converting searches to ASCII can enhance search results in some + cases, but in general, it is not recommended. For instance, + ``artist:deadmau5 album:4×4`` will be converted to ``artist:deadmau5 + album:4x4`` (notice ``×!=x``). + +.. include:: ./shared_metadata_source_config.rst Obtaining Track Popularity and Audio Features from Spotify ---------------------------------------------------------- -Spotify provides information on track `popularity`_ and audio `features`_ that -can be used for music discovery. - -.. _popularity: https://developer.spotify.com/documentation/web-api/reference/#/operations/get-track +Spotify provides information on track popularity_ and audio features_ that can +be used for music discovery. .. _features: https://developer.spotify.com/documentation/web-api/reference/#/operations/get-audio-features +.. _popularity: https://developer.spotify.com/documentation/web-api/reference/#/operations/get-track + The ``spotify`` plugin provides an additional command ``spotifysync`` to obtain these track attributes from Spotify: -* ``beet spotifysync [-f]``: obtain popularity and audio features information +- ``beet spotifysync [-f]``: obtain popularity and audio features information for every track in the library. By default, ``spotifysync`` will skip tracks that already have this information populated. Using the ``-f`` or ``-force`` option will download the data even for tracks that already have it. Please @@ -131,15 +169,15 @@ these track attributes from Spotify: In addition to ``popularity``, the command currently sets these audio features for all tracks with a Spotify track ID: - * ``acousticness`` - * ``danceability`` - * ``energy`` - * ``instrumentalness`` - * ``key`` - * ``liveness`` - * ``loudness`` - * ``mode`` - * ``speechiness`` - * ``tempo`` - * ``time_signature`` - * ``valence`` + - ``acousticness`` + - ``danceability`` + - ``energy`` + - ``instrumentalness`` + - ``key`` + - ``liveness`` + - ``loudness`` + - ``mode`` + - ``speechiness`` + - ``tempo`` + - ``time_signature`` + - ``valence`` diff --git a/docs/plugins/subsonicplaylist.rst b/docs/plugins/subsonicplaylist.rst index 98c83ebe1..484a9ca8a 100644 --- a/docs/plugins/subsonicplaylist.rst +++ b/docs/plugins/subsonicplaylist.rst @@ -1,36 +1,44 @@ Subsonic Playlist Plugin ======================== -The ``subsonicplaylist`` plugin allows to import playlists from a subsonic server. -This is done by retrieving the track info from the subsonic server, searching -for them in the beets library, and adding the playlist names to the -`subsonic_playlist` tag of the found items. The content of the tag has the format: +The ``subsonicplaylist`` plugin allows to import playlists from a subsonic +server. This is done by retrieving the track info from the subsonic server, +searching for them in the beets library, and adding the playlist names to the +``subsonic_playlist`` tag of the found items. The content of the tag has the +format: subsonic_playlist: ";first playlist;second playlist;" -To get all items in a playlist use the query `;playlist name;`. +To get all items in a playlist use the query ``;playlist name;``. Command Line Usage ------------------ To use the ``subsonicplaylist`` plugin, enable it in your configuration (see :ref:`using-plugins`). Then use it by invoking the ``subsonicplaylist`` command. -Next, configure the plugin to connect to your Subsonic server, like this:: +Next, configure the plugin to connect to your Subsonic server, like this: + +:: subsonicplaylist: base_url: http://subsonic.example.com username: someUser password: somePassword -After this you can import your playlists by invoking the `subsonicplaylist` command. +After this you can import your playlists by invoking the ``subsonicplaylist`` +command. -By default only the tags of the items found for playlists will be updated. -This means that, if one imported a playlist, then delete one song from it and +By default only the tags of the items found for playlists will be updated. This +means that, if one imported a playlist, then delete one song from it and imported the playlist again, the deleted song will still have the playlist set -in its `subsonic_playlist` tag. To solve this problem one can use the `-d/--delete` -flag. This resets all `subsonic_playlist` tag before importing playlists. +in its ``subsonic_playlist`` tag. To solve this problem one can use the +``-d/--delete`` flag. This resets all ``subsonic_playlist`` tag before importing +playlists. -Here's an example configuration with all the available options and their default values:: +Here's an example configuration with all the available options and their default +values: + +:: subsonicplaylist: base_url: "https://your.subsonic.server" @@ -40,4 +48,4 @@ Here's an example configuration with all the available options and their default username: '' password: '' -The `base_url`, `username`, and `password` options are required. +The ``base_url``, ``username``, and ``password`` options are required. diff --git a/docs/plugins/subsonicupdate.rst b/docs/plugins/subsonicupdate.rst index fc7e0019e..27ee925dc 100644 --- a/docs/plugins/subsonicupdate.rst +++ b/docs/plugins/subsonicupdate.rst @@ -2,15 +2,16 @@ SubsonicUpdate Plugin ===================== ``subsonicupdate`` is a very simple plugin for beets that lets you automatically -update `Subsonic`_'s index whenever you change your beets library. +update Subsonic_'s index whenever you change your beets library. -.. _Subsonic: http://www.subsonic.org/pages/index.jsp +.. _subsonic: http://www.subsonic.org/pages/index.jsp -To use ``subsonicupdate`` plugin, enable it in your configuration -(see :ref:`using-plugins`). -Then, you'll probably want to configure the specifics of your Subsonic server. -You can do that using a ``subsonic:`` section in your ``config.yaml``, -which looks like this:: +To use ``subsonicupdate`` plugin, enable it in your configuration (see +:ref:`using-plugins`). Then, you'll probably want to configure the specifics of +your Subsonic server. You can do that using a ``subsonic:`` section in your +``config.yaml``, which looks like this: + +:: subsonic: url: https://example.com:443/subsonic @@ -19,17 +20,17 @@ which looks like this:: auth: token With that all in place, this plugin will send a REST API call to your Subsonic -server every time you change your beets library. Due to a current limitation -of the API, all libraries visible to that user will be scanned. +server every time you change your beets library. Due to a current limitation of +the API, all libraries visible to that user will be scanned. -If the :doc:`/plugins/smartplaylist` is used, creating or changing any -playlist will trigger a Subsonic update as well. +If the :doc:`/plugins/smartplaylist` is used, creating or changing any playlist +will trigger a Subsonic update as well. This plugin requires Subsonic with an active Premium license (or active trial) or any other `Subsonic API compatible`_ server implementing the ``startScan`` endpoint. -.. _Subsonic API compatible: http://www.subsonic.org/pages/api.jsp +.. _subsonic api compatible: http://www.subsonic.org/pages/api.jsp Configuration ------------- @@ -41,5 +42,5 @@ The available options under the ``subsonic:`` section are: - **pass**: The Subsonic user password. (This may either be a clear-text password or hex-encoded with the prefix ``enc:``.) Default: ``admin`` - **auth**: The authentication method. Possible choices are ``token`` or - ``password``. ``token`` authentication is preferred to avoid sending - cleartext password. + ``password``. ``token`` authentication is preferred to avoid sending cleartext + password. diff --git a/docs/plugins/substitute.rst b/docs/plugins/substitute.rst index 87ee2ad45..292314101 100644 --- a/docs/plugins/substitute.rst +++ b/docs/plugins/substitute.rst @@ -1,28 +1,29 @@ Substitute Plugin ================= -The ``substitute`` plugin lets you easily substitute values in your templates and -path formats. Specifically, it is intended to let you *canonicalize* names +The ``substitute`` plugin lets you easily substitute values in your templates +and path formats. Specifically, it is intended to let you *canonicalize* names such as artists: For example, perhaps you want albums from The Jimi Hendrix Experience to be sorted into the same folder as solo Hendrix albums. -This plugin is intended as a replacement for the ``rewrite`` plugin. While -the ``rewrite`` plugin modifies the metadata, this plugin does not. +This plugin is intended as a replacement for the ``rewrite`` plugin. While the +``rewrite`` plugin modifies the metadata, this plugin does not. -Enable the ``substitute`` plugin (see :ref:`using-plugins`), then make a ``substitute:`` section in your config file to contain your rules. -Each rule consists of a case-insensitive regular expression pattern, and a -replacement string. For example, you might use: +Enable the ``substitute`` plugin (see :ref:`using-plugins`), then make a +``substitute:`` section in your config file to contain your rules. Each rule +consists of a case-insensitive regular expression pattern, and a replacement +string. For example, you might use: .. code-block:: yaml substitute: .*jimi hendrix.*: Jimi Hendrix -The replacement can be an expression utilising the matched regex, allowing us -to create more general rules. Say for example, we want to sort all albums by +The replacement can be an expression utilising the matched regex, allowing us to +create more general rules. Say for example, we want to sort all albums by multiple artists into the directory of the first artist. We can thus capture -everything before the first ``,``, `` &`` or `` and``, and use this capture -group in the output, discarding the rest of the string. +everything before the first ``,``, ``&`` or ``and``, and use this capture group +in the output, discarding the rest of the string. .. code-block:: yaml @@ -31,12 +32,13 @@ group in the output, discarding the rest of the string. This would handle all the below cases in a single rule: - Bob Dylan and The Band -> Bob Dylan - Neil Young & Crazy Horse -> Neil Young - James Yorkston, Nina Persson & The Second Hand Orchestra -> James Yorkston + | Bob Dylan and The Band -> Bob Dylan + | Neil Young & Crazy Horse -> Neil Young + | James Yorkston, Nina Persson & The Second Hand Orchestra -> James + Yorkston - -To apply the substitution, you have to call the function ``%substitute{}`` in the paths section. For example: +To apply the substitution, you have to call the function ``%substitute{}`` in +the paths section. For example: .. code-block:: yaml diff --git a/docs/plugins/the.rst b/docs/plugins/the.rst index 5de0f5e54..b7a880b53 100644 --- a/docs/plugins/the.rst +++ b/docs/plugins/the.rst @@ -3,16 +3,20 @@ The Plugin The ``the`` plugin allows you to move patterns in path formats. It's suitable, for example, for moving articles from string start to the end. This is useful -for quick search on filesystems and generally looks good. Plugin does not -change tags. By default plugin supports English "the, a, an", but custom -regexp patterns can be added by user. How it works:: +for quick search on filesystems and generally looks good. Plugin does not change +tags. By default plugin supports English "the, a, an", but custom regexp +patterns can be added by user. How it works: + +:: The Something -> Something, The A Band -> Band, A An Orchestra -> Orchestra, An -To use the ``the`` plugin, enable it (see :doc:`/plugins/index`) and then use -a template function called ``%the`` in path format expressions:: +To use the ``the`` plugin, enable it (see :doc:`/plugins/index`) and then use a +template function called ``%the`` in path format expressions: + +:: paths: default: %the{$albumartist}/($year) $album/$track $title @@ -23,21 +27,17 @@ but you can override these defaults to make more complex changes. Configuration ------------- -To configure the plugin, make a ``the:`` section in your -configuration file. The available options are: +To configure the plugin, make a ``the:`` section in your configuration file. The +available options are: -- **a**: Handle "A/An" moves. - Default: ``yes``. -- **the**: handle "The" moves. - Default: ``yes``. +- **a**: Handle "A/An" moves. Default: ``yes``. +- **the**: handle "The" moves. Default: ``yes``. - **patterns**: Custom regexp patterns, space-separated. Custom patterns are case-insensitive regular expressions. Patterns can be matched anywhere in the string (not just the beginning), so use ``^`` if you intend to match leading - words. - Default: ``[]``. + words. Default: ``[]``. - **strip**: Remove the article altogether instead of moving it to the end. Default: ``no``. -- **format**: A Python format string for the output. Use ``{0}`` to indicate - the part without the article and ``{1}`` for the article. - Spaces are already trimmed from ends of both parts. - Default: ``'{0}, {1}'``. +- **format**: A Python format string for the output. Use ``{0}`` to indicate the + part without the article and ``{1}`` for the article. Spaces are already + trimmed from ends of both parts. Default: ``'{0}, {1}'``. diff --git a/docs/plugins/thumbnails.rst b/docs/plugins/thumbnails.rst index c5cc3f5e5..4eeeb74be 100644 --- a/docs/plugins/thumbnails.rst +++ b/docs/plugins/thumbnails.rst @@ -1,5 +1,5 @@ Thumbnails Plugin -================== +================= The ``thumbnails`` plugin creates thumbnails for your album folders with the album cover. This works on freedesktop.org-compliant file managers such as @@ -14,10 +14,11 @@ install ``beets`` with ``thumbnails`` and ``fetchart`` extras pip install "beets[fetchart,thumbnails]" ``thumbnails`` need to resize the covers, and therefore requires either -`ImageMagick`_ or `Pillow`_. +ImageMagick_ or Pillow_. -.. _Pillow: https://github.com/python-pillow/Pillow -.. _ImageMagick: https://www.imagemagick.org/ +.. _imagemagick: https://www.imagemagick.org/ + +.. _pillow: https://github.com/python-pillow/Pillow Configuration ------------- @@ -28,12 +29,10 @@ file. The available options are - **auto**: Whether the thumbnail should be automatically set on import. Default: ``yes``. - **force**: Generate the thumbnail even when there's one that seems fine (more - recent than the cover art). - Default: ``no``. + recent than the cover art). Default: ``no``. - **dolphin**: Generate dolphin-compatible thumbnails. Dolphin (KDE file explorer) does not respect freedesktop.org's standard on thumbnails. This - functionality replaces the :doc:`/plugins/freedesktop` - Default: ``no`` + functionality replaces the :doc:`/plugins/freedesktop` Default: ``no`` Usage ----- diff --git a/docs/plugins/titlecase.rst b/docs/plugins/titlecase.rst new file mode 100644 index 000000000..e2861f0ac --- /dev/null +++ b/docs/plugins/titlecase.rst @@ -0,0 +1,215 @@ +Titlecase Plugin +================ + +The ``titlecase`` plugin lets you format tags and paths in accordance with the +titlecase guidelines in the `New York Times Manual of Style`_ and uses the +`python titlecase library`_. + +Motivation for this plugin comes from a desire to resolve differences in style +between databases sources. For example, `MusicBrainz style`_ follows standard +title case rules, except in the case of terms that are deemed generic, like +"mix" and "remix". On the other hand, `Discogs guidelines`_ recommend +capitalizing the first letter of each word, even for small words like "of" and +"a". This plugin aims to achieve a middle ground between disparate approaches to +casing, and bring more consistency to titles in your library. + +.. _discogs guidelines: https://support.discogs.com/hc/en-us/articles/360005006334-Database-Guidelines-1-General-Rules#Capitalization_And_Grammar + +.. _musicbrainz style: https://musicbrainz.org/doc/Style + +.. _new york times manual of style: https://search.worldcat.org/en/title/946964415 + +.. _python titlecase library: https://pypi.org/project/titlecase/ + +Installation +------------ + +To use the ``titlecase`` plugin, first enable it in your configuration (see +:ref:`using-plugins`). Then, install ``beets`` with ``titlecase`` extra: + +.. code-block:: bash + + pip install "beets[titlecase]" + +If you'd like to just use the path format expression, call ``%titlecase`` in +your path formatter, and set ``auto`` to ``no`` in the configuration. + +:: + + paths: + default: %titlecase($albumartist)/$titlecase($albumtitle)/$track $title + +You can now configure ``titlecase`` to your preference. + +Configuration +------------- + +This plugin offers several configuration options to tune its function to your +preference. + +Default +~~~~~~~ + +.. code-block:: yaml + + titlecase: + auto: yes + fields: [] + preserve: [] + replace: [] + separators: [] + force_lowercase: no + small_first_last: yes + the_artist: yes + all_lowercase: no + all_caps: no + after_choice: no + +.. conf:: auto + :default: yes + + Whether to automatically apply titlecase to new imports. + +.. conf:: fields + :default: [] + + A list of fields to apply the titlecase logic to. You must specify the fields + you want to have modified in order for titlecase to apply changes to metadata. + + A good starting point is below, which will titlecase album titles, track titles, and all artist fields. + +.. code-block:: yaml + + titlecase: + fields: + - album + - title + - albumartist + - albumartist_credit + - albumartist_sort + - albumartists + - albumartists_credit + - albumartists_sort + - artist + - artist_credit + - artist_sort + - artists + - artists_credit + - artists_sort + +.. conf:: preserve + :default: [] + + List of words and phrases to preserve the case of. Without specifying ``DJ`` on + the list, titlecase will format it as ``Dj``, or specify ``The Beatles`` to make sure + ``With The Beatles`` is not capitalized as ``With the Beatles``. + +.. conf:: replace + :default: [] + + The replace function takes place before any titlecasing occurs, and is intended to + help normalize differences in puncuation styles. It accepts a list of tuples, with + the first being the target, and the second being the replacement. + + An example configuration that enforces one style of quotation mark is below. + +.. code-block:: yaml + + titlecase: + replace: + - "’": "'" + - "‘": "'" + - "“": '"' + - "”": '"' + +.. conf:: separators + :default: [] + + A list of characters to treat as markers of new sentences. Helpful for split titles + that might otherwise have a lowercase letter at the start of the second string. + +.. conf:: force_lowercase + :default: no + + Force all strings to lowercase before applying titlecase, but can cause + problems with all caps acronyms titlecase would otherwise recognize. + +.. conf:: small_first_last + :default: yes + + An option from the base titlecase library. Controls capitalizing small words at the start + of a sentence. With this turned off ``a`` and similar words will not be capitalized + under any circumstance. + +.. conf:: the_artist + :default: yes + + If a field name contains ``artist``, then any lowercase ``the`` will be + capitalized. Useful for bands with `The` as part of the proper name, + like ``Amyl and The Sniffers``. + +.. conf:: all_caps + :default: no + + If the letters a-Z in a string are all caps, do not modify the string. Useful + if you encounter a lot of acronyms. + +.. conf:: all_lowercase + :default: no + + If the letters a-Z in a string are all lowercase, do not modify the string. + Useful if you encounter a lot of stylized lowercase spellings, but otherwise + want titlecase applied. + +.. conf:: after_choice + :default: no + + By default, titlecase runs on the candidates that are received, adjusting them before + you make your selection and creating different weight calculations. If you'd rather + see the data as recieved from the database, set this to true to run after you make + your tag choice. + +Dangerous Fields +~~~~~~~~~~~~~~~~ + +``titlecase`` only ever modifies string fields, however, this doesn't prevent +you from selecting a case sensitive field that another plugin or feature may +rely on. + +In particular, including any of the following in your configuration could lead +to unintended behavior: + +.. code-block:: bash + + acoustid_fingerprint + acoustid_id + artists_ids + asin + deezer_track_id + format + id + isrc + mb_workid + mb_trackid + mb_albumid + mb_artistid + mb_artistids + mb_albumartistid + mb_albumartistids + mb_releasetrackid + mb_releasegroupid + bitrate_mode + encoder_info + encoder_settings + +Running Manually +---------------- + +From the command line, type: + +:: + + $ beet titlecase [QUERY] + +Configuration is drawn from the config file. Without a query the operation will +be applied to the entire collection. diff --git a/docs/plugins/types.rst b/docs/plugins/types.rst index 9847fec44..713664ff5 100644 --- a/docs/plugins/types.rst +++ b/docs/plugins/types.rst @@ -2,22 +2,26 @@ Types Plugin ============ The ``types`` plugin lets you declare types for attributes you use in your -library. For example, you can declare that a ``rating`` field is numeric so -that you can query it with ranges---which isn't possible when the field is -considered a string (the default). +library. For example, you can declare that a ``rating`` field is numeric so that +you can query it with ranges---which isn't possible when the field is considered +a string (the default). -Enable the ``types`` plugin as described in :doc:`/plugins/index` and then add -a ``types`` section to your :doc:`configuration file </reference/config>`. The +Enable the ``types`` plugin as described in :doc:`/plugins/index` and then add a +``types`` section to your :doc:`configuration file </reference/config>`. The configuration section should map field name to one of ``int``, ``float``, ``bool``, or ``date``. -Here's an example:: +Here's an example: + +:: types: rating: int Now you can assign numeric ratings to tracks and albums and use :ref:`range -queries <numericquery>` to filter them.:: +queries <numericquery>` to filter them.: + +:: beet modify "My favorite track" rating=5 beet ls rating:4..5 diff --git a/docs/plugins/unimported.rst b/docs/plugins/unimported.rst index 80ee8004b..1673c9d54 100644 --- a/docs/plugins/unimported.rst +++ b/docs/plugins/unimported.rst @@ -1,15 +1,19 @@ Unimported Plugin ================= -The ``unimported`` plugin allows one to list all files in the library folder which are not listed in the beets library database, including art files. +The ``unimported`` plugin allows one to list all files in the library folder +which are not listed in the beets library database, including art files. Command Line Usage ------------------ To use the ``unimported`` plugin, enable it in your configuration (see :ref:`using-plugins`). Then use it by invoking the ``beet unimported`` command. -The command will list all files in the library folder which are not imported. You can -exclude file extensions or entire subdirectories using the configuration file:: +The command will list all files in the library folder which are not imported. +You can exclude file extensions or entire subdirectories using the configuration +file: + +:: unimported: ignore_extensions: jpg png diff --git a/docs/plugins/web.rst b/docs/plugins/web.rst index 15719e119..74e2cf03e 100644 --- a/docs/plugins/web.rst +++ b/docs/plugins/web.rst @@ -46,45 +46,43 @@ HTML5 Audio. Configuration ------------- -To configure the plugin, make a ``web:`` section in your -configuration file. The available options are: +To configure the plugin, make a ``web:`` section in your configuration file. The +available options are: - **host**: The server hostname. Set this to 0.0.0.0 to bind to all interfaces. Default: Bind to 127.0.0.1. -- **port**: The server port. - Default: 8337. -- **cors**: The CORS allowed origin (see :ref:`web-cors`, below). - Default: CORS is disabled. -- **cors_supports_credentials**: Support credentials when using CORS (see :ref:`web-cors`, below). - Default: CORS_SUPPORTS_CREDENTIALS is disabled. +- **port**: The server port. Default: 8337. +- **cors**: The CORS allowed origin (see :ref:`web-cors`, below). Default: CORS + is disabled. +- **cors_supports_credentials**: Support credentials when using CORS (see + :ref:`web-cors`, below). Default: CORS_SUPPORTS_CREDENTIALS is disabled. - **reverse_proxy**: If true, enable reverse proxy support (see - :ref:`reverse-proxy`, below). - Default: false. -- **include_paths**: If true, includes paths in item objects. - Default: false. -- **readonly**: If true, DELETE and PATCH operations are not allowed. Only GET is permitted. - Default: true. + :ref:`reverse-proxy`, below). Default: false. +- **include_paths**: If true, includes paths in item objects. Default: false. +- **readonly**: If true, DELETE and PATCH operations are not allowed. Only GET + is permitted. Default: true. Implementation -------------- -The Web backend is built using a simple REST+JSON API with the excellent -`Flask`_ library. The frontend is a single-page application written with -`Backbone.js`_. This allows future non-Web clients to use the same backend API. +The Web backend is built using a simple REST+JSON API with the excellent Flask_ +library. The frontend is a single-page application written with Backbone.js_. +This allows future non-Web clients to use the same backend API. - -.. _Backbone.js: https://backbonejs.org +.. _backbone.js: https://backbonejs.org Eventually, to make the Web player really viable, we should use a Flash fallback for unsupported formats/browsers. There are a number of options for this: -* `audio.js`_ -* `html5media`_ -* `MediaElement.js`_ +- audio.js_ +- html5media_ +- MediaElement.js_ .. _audio.js: https://kolber.github.io/audiojs/ + .. _html5media: https://html5media.info/ -.. _MediaElement.js: https://www.mediaelementjs.com/ + +.. _mediaelement.js: https://www.mediaelementjs.com/ .. _web-cors: @@ -92,45 +90,50 @@ Cross-Origin Resource Sharing (CORS) ------------------------------------ The ``web`` plugin's API can be used as a backend for an in-browser client. By -default, browsers will only allow access from clients running on the same -server as the API. (You will get an arcane error about ``XMLHttpRequest`` -otherwise.) A technology called `CORS`_ lets you relax this restriction. +default, browsers will only allow access from clients running on the same server +as the API. (You will get an arcane error about ``XMLHttpRequest`` otherwise.) A +technology called CORS_ lets you relax this restriction. If you want to use an in-browser client hosted elsewhere (or running from a -different server on your machine), set the ``cors`` configuration option to -the "origin" (protocol, host, and optional port number) where the client is -served. Or set it to ``'*'`` to enable access from all origins. Note that there -are security implications if you set the origin to ``'*'``, so please research -this before using it. +different server on your machine), set the ``cors`` configuration option to the +"origin" (protocol, host, and optional port number) where the client is served. +Or set it to ``'*'`` to enable access from all origins. Note that there are +security implications if you set the origin to ``'*'``, so please research this +before using it. -If the ``web`` server is behind a proxy that uses credentials, you might want -to set the ``cors_supports_credentials`` configuration option to true to let +If the ``web`` server is behind a proxy that uses credentials, you might want to +set the ``cors_supports_credentials`` configuration option to true to let in-browser clients log in. -For example:: +For example: + +:: web: host: 0.0.0.0 cors: 'http://example.com' -.. _CORS: https://en.wikipedia.org/wiki/Cross-origin_resource_sharing +.. _cors: https://en.wikipedia.org/wiki/Cross-origin_resource_sharing + .. _reverse-proxy: Reverse Proxy Support --------------------- When the server is running behind a reverse proxy, you can tell the plugin to -respect forwarded headers. Specifically, this can help when you host the -plugin at a base URL other than the root ``/`` or when you use the proxy to -handle secure connections. Enable the ``reverse_proxy`` configuration option -if you do this. +respect forwarded headers. Specifically, this can help when you host the plugin +at a base URL other than the root ``/`` or when you use the proxy to handle +secure connections. Enable the ``reverse_proxy`` configuration option if you do +this. Technically, this option lets the proxy provide ``X-Script-Name`` and ``X-Scheme`` HTTP headers to control the plugin's the ``SCRIPT_NAME`` and its ``wsgi.url_scheme`` parameter. -Here's a sample `Nginx`_ configuration that serves the web plugin under the -/beets directory:: +Here's a sample Nginx_ configuration that serves the web plugin under the /beets +directory: + +:: location /beets { proxy_pass http://127.0.0.1:8080; @@ -140,15 +143,17 @@ Here's a sample `Nginx`_ configuration that serves the web plugin under the proxy_set_header X-Script-Name /beets; } -.. _Nginx: https://www.nginx.com +.. _nginx: https://www.nginx.com JSON API -------- ``GET /item/`` -++++++++++++++ +~~~~~~~~~~~~~~ -Responds with a list of all tracks in the beets library. :: +Responds with a list of all tracks in the beets library. + +:: { "items": [ @@ -165,12 +170,13 @@ Responds with a list of all tracks in the beets library. :: ] } - ``GET /item/6`` -+++++++++++++++ +~~~~~~~~~~~~~~~ Looks for an item with id *6* in the beets library and responds with its JSON -representation. :: +representation. + +:: { "id": 6, @@ -178,24 +184,25 @@ representation. :: ... } -If there is no item with that id responds with a *404* status -code. +If there is no item with that id responds with a *404* status code. ``DELETE /item/6`` -++++++++++++++++++ +~~~~~~~~~~~~~~~~~~ -Removes the item with id *6* from the beets library. If the *?delete* query string is included, -the matching file will be deleted from disk. +Removes the item with id *6* from the beets library. If the *?delete* query +string is included, the matching file will be deleted from disk. Only allowed if ``readonly`` configuration option is set to ``no``. ``PATCH /item/6`` -++++++++++++++++++ +~~~~~~~~~~~~~~~~~ -Updates the item with id *6* and write the changes to the music file. The body should be a JSON object -containing the changes to the object. +Updates the item with id *6* and write the changes to the music file. The body +should be a JSON object containing the changes to the object. -Returns the updated JSON representation. :: +Returns the updated JSON representation. + +:: { "id": 6, @@ -206,18 +213,18 @@ Returns the updated JSON representation. :: Only allowed if ``readonly`` configuration option is set to ``no``. ``GET /item/6,12,13`` -+++++++++++++++++++++ +~~~~~~~~~~~~~~~~~~~~~ -Response with a list of tracks with the ids *6*, *12* and *13*. The format of +Response with a list of tracks with the ids *6*, *12* and *13*. The format of the response is the same as for `GET /item/`_. It is *not guaranteed* that the -response includes all the items requested. If a track is not found it is silently -dropped from the response. +response includes all the items requested. If a track is not found it is +silently dropped from the response. -This endpoint also supports *DELETE* and *PATCH* methods as above, to operate on all -items of the list. +This endpoint also supports *DELETE* and *PATCH* methods as above, to operate on +all items of the list. ``GET /item/path/...`` -++++++++++++++++++++++ +~~~~~~~~~~~~~~~~~~~~~~ Look for an item at the given absolute path on the server. If it corresponds to a track, return the track in the same format as ``/item/*``. @@ -225,12 +232,13 @@ a track, return the track in the same format as ``/item/*``. If the server runs UNIX, you'll need to include an extra leading slash: ``http://localhost:8337/item/path//Users/beets/Music/Foo/Bar/Baz.mp3`` - ``GET /item/query/querystring`` -+++++++++++++++++++++++++++++++ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Returns a list of tracks matching the query. The *querystring* must be a -valid query as described in :doc:`/reference/query`. :: +Returns a list of tracks matching the query. The *querystring* must be a valid +query as described in :doc:`/reference/query`. + +:: { "results": [ @@ -240,57 +248,51 @@ valid query as described in :doc:`/reference/query`. :: } Path elements are joined as parts of a query. For example, -``/item/query/foo/bar`` will be converted to the query ``foo,bar``. -To specify literal path separators in a query, use a backslash instead of a -slash. +``/item/query/foo/bar`` will be converted to the query ``foo,bar``. To specify +literal path separators in a query, use a backslash instead of a slash. -This endpoint also supports *DELETE* and *PATCH* methods as above, to operate on all -items returned by the query. +This endpoint also supports *DELETE* and *PATCH* methods as above, to operate on +all items returned by the query. ``GET /item/6/file`` -++++++++++++++++++++ - -Sends the media file for the track. If the item or its corresponding file do -not exist a *404* status code is returned. +~~~~~~~~~~~~~~~~~~~~ +Sends the media file for the track. If the item or its corresponding file do not +exist a *404* status code is returned. Albums -++++++ +~~~~~~ For albums, the following endpoints are provided: -* ``GET /album/`` - -* ``GET /album/5`` - -* ``GET /album/5/art`` - -* ``DELETE /album/5`` - -* ``GET /album/5,7`` - -* ``DELETE /album/5,7`` - -* ``GET /album/query/querystring`` - -* ``DELETE /album/query/querystring`` +- ``GET /album/`` +- ``GET /album/5`` +- ``GET /album/5/art`` +- ``DELETE /album/5`` +- ``GET /album/5,7`` +- ``DELETE /album/5,7`` +- ``GET /album/query/querystring`` +- ``DELETE /album/query/querystring`` The interface and response format is similar to the item API, except replacing the encapsulation key ``"items"`` with ``"albums"`` when requesting ``/album/`` or ``/album/5,7``. In addition we can request the cover art of an album with -``GET /album/5/art``. -You can also add the '?expand' flag to get the individual items of an album. +``GET /album/5/art``. You can also add the '?expand' flag to get the individual +items of an album. -``DELETE`` is only allowed if ``readonly`` configuration option is set to ``no``. +``DELETE`` is only allowed if ``readonly`` configuration option is set to +``no``. ``GET /stats`` -++++++++++++++ +~~~~~~~~~~~~~~ -Responds with the number of tracks and albums in the database. :: +Responds with the number of tracks and albums in the database. + +:: { "items": 5, "albums": 3 } -.. _Flask: https://flask.palletsprojects.com/en/1.1.x/ +.. _flask: https://flask.palletsprojects.com/en/1.1.x/ diff --git a/docs/plugins/zero.rst b/docs/plugins/zero.rst index e3d717dfd..bf134e664 100644 --- a/docs/plugins/zero.rst +++ b/docs/plugins/zero.rst @@ -7,33 +7,40 @@ the plugin can strip useless comments like "ripped by MyGreatRipper." The plugin can work in one of two modes: -* ``fields``: A blacklist, where you choose the tags you want to remove (used by default). -* ``keep_fields``: A whitelist, where you instead specify the tags you want to keep. +- ``fields``: A blacklist, where you choose the tags you want to remove (used by + default). +- ``keep_fields``: A whitelist, where you instead specify the tags you want to + keep. -To use the ``zero`` plugin, enable the plugin in your configuration -(see :ref:`using-plugins`). +To use the ``zero`` plugin, enable the plugin in your configuration (see +:ref:`using-plugins`). Configuration ------------- -Make a ``zero:`` section in your configuration file. You can specify the -fields to nullify and the conditions for nullifying them: +Make a ``zero:`` section in your configuration file. You can specify the fields +to nullify and the conditions for nullifying them: -* Set ``auto`` to ``yes`` to null fields automatically on import. - Default: ``yes``. -* Set ``fields`` to a whitespace-separated list of fields to remove. You can - get the list of all available fields by running ``beet fields``. In - addition, the ``images`` field allows you to remove any images - embedded in the media file. -* Set ``keep_fields`` to *invert* the logic of the plugin. Only these fields - will be kept; other fields will be removed. Remember to set only - ``fields`` or ``keep_fields``---not both! -* To conditionally filter a field, use ``field: [regexp, regexp]`` to specify +- Set ``auto`` to ``yes`` to null fields automatically on import. Default: + ``yes``. +- Set ``fields`` to a whitespace-separated list of fields to remove. You can get + the list of all available fields by running ``beet fields``. In addition, the + ``images`` field allows you to remove any images embedded in the media file. +- Set ``keep_fields`` to *invert* the logic of the plugin. Only these fields + will be kept; other fields will be removed. Remember to set only ``fields`` or + ``keep_fields``---not both! +- To conditionally filter a field, use ``field: [regexp, regexp]`` to specify regular expressions. -* By default this plugin only affects files' tags; the beets database is left - unchanged. To update the tags in the database, set the ``update_database`` option to true. +- Set ``omit_single_disc`` to ``True`` to omit writing the ``disc`` number for + albums with only a single disc (``disctotal == 1``). By default, beets will + number the disc even if the album contains only one disc in total. +- By default this plugin only affects files' tags; the beets database is left + unchanged. To update the tags in the database, set the ``update_database`` + option to true. -For example:: +For example: + +:: zero: fields: month day genre genres comments @@ -56,9 +63,11 @@ in your library. Preserving Album Art -------------------- -If you use the ``keep_fields`` option, the plugin will remove embedded album -art from files' tags unless you tell it not to. To keep the album art, include -the special field ``images`` in the list. For example:: +If you use the ``keep_fields`` option, the plugin will remove embedded album art +from files' tags unless you tell it not to. To keep the album art, include the +special field ``images`` in the list. For example: + +:: zero: keep_fields: title artist album year track genre genres images diff --git a/docs/reference/cli.rst b/docs/reference/cli.rst index 456059c6c..c0274553a 100644 --- a/docs/reference/cli.rst +++ b/docs/reference/cli.rst @@ -26,9 +26,6 @@ Command-Line Interface command; for zsh, see the accompanying `completion script`_ for the ``beet`` command. - - - Commands -------- @@ -45,7 +42,8 @@ Commands .. _import-cmd: import -`````` +~~~~~~ + :: beet import [-CWAPRqst] [-l LOGPATH] PATH... @@ -54,120 +52,104 @@ import Add music to your library, attempting to get correct tags for it from MusicBrainz. -Point the command at some music: directories, single files, or -compressed archives. The music will be copied to a configurable -directory structure and added to a library database. The command is -interactive and will try to get you to verify MusicBrainz tags that it -thinks are suspect. See the :doc:`autotagging guide </guides/tagger>` -for detail on how to use the interactive tag-correction flow. +Point the command at some music: directories, single files, or compressed +archives. The music will be copied to a configurable directory structure and +added to a library database. The command is interactive and will try to get you +to verify MusicBrainz tags that it thinks are suspect. See the :doc:`autotagging +guide </guides/tagger>` for detail on how to use the interactive tag-correction +flow. -Directories passed to the import command can contain either a single -album or many, in which case the leaf directories will be considered -albums (the latter case is true of typical Artist/Album organizations -and many people's "downloads" folders). The path can also be a single -song or an archive. Beets supports `zip` and `tar` archives out of the -box. To extract `rar` files, install the `rarfile`_ package and the -`unrar` command. To extract `7z` files, install the `py7zr`_ package. +Directories passed to the import command can contain either a single album or +many, in which case the leaf directories will be considered albums (the latter +case is true of typical Artist/Album organizations and many people's "downloads" +folders). The path can also be a single song or an archive. Beets supports +``zip`` and ``tar`` archives out of the box. To extract ``rar`` files, install +the rarfile_ package and the ``unrar`` command. To extract ``7z`` files, install +the py7zr_ package. Optional command flags: -* By default, the command copies files to your library directory and - updates the ID3 tags on your music. In order to move the files, instead of - copying, use the ``-m`` (move) option. If you'd like to leave your music - files untouched, try the ``-C`` (don't copy) and ``-W`` (don't write tags) - options. You can also disable this behavior by default in the - configuration file (below). - -* Also, you can disable the autotagging behavior entirely using ``-A`` - (don't autotag)---then your music will be imported with its existing - metadata. - -* During a long tagging import, it can be useful to keep track of albums - that weren't tagged successfully---either because they're not in the - MusicBrainz database or because something's wrong with the files. Use the - ``-l`` option to specify a filename to log every time you skip an album - or import it "as-is" or an album gets skipped as a duplicate. You can later - review the file manually or import skipped paths from the logfile - automatically by using the ``--from-logfile LOGFILE`` argument. - -* Relatedly, the ``-q`` (quiet) option can help with large imports by - autotagging without ever bothering to ask for user input. Whenever the - normal autotagger mode would ask for confirmation, the quiet mode - performs a fallback action that can be configured using the - ``quiet_fallback`` configuration or ``--quiet-fallback`` CLI option. - By default it pessimistically ``skip``s the file. +- By default, the command copies files to your library directory and updates the + ID3 tags on your music. In order to move the files, instead of copying, use + the ``-m`` (move) option. If you'd like to leave your music files untouched, + try the ``-C`` (don't copy) and ``-W`` (don't write tags) options. You can + also disable this behavior by default in the configuration file (below). +- Also, you can disable the autotagging behavior entirely using ``-A`` (don't + autotag)---then your music will be imported with its existing metadata. +- During a long tagging import, it can be useful to keep track of albums that + weren't tagged successfully---either because they're not in the MusicBrainz + database or because something's wrong with the files. Use the ``-l`` option to + specify a filename to log every time you skip an album or import it "as-is" or + an album gets skipped as a duplicate. You can later review the file manually + or import skipped paths from the logfile automatically by using the + ``--from-logfile LOGFILE`` argument. +- Relatedly, the ``-q`` (quiet) option can help with large imports by + autotagging without ever bothering to ask for user input. Whenever the normal + autotagger mode would ask for confirmation, the quiet mode performs a fallback + action that can be configured using the ``quiet_fallback`` configuration or + ``--quiet-fallback`` CLI option. By default it pessimistically skips the file. Alternatively, it can be used as is, by configuring ``asis``. - -* Speaking of resuming interrupted imports, the tagger will prompt you if it - seems like the last import of the directory was interrupted (by you or by - a crash). If you want to skip this prompt, you can say "yes" automatically - by providing ``-p`` or "no" using ``-P``. The resuming feature can be - disabled by default using a configuration option (see below). - -* If you want to import only the *new* stuff from a directory, use the - ``-i`` - option to run an *incremental* import. With this flag, beets will keep - track of every directory it ever imports and avoid importing them again. - This is useful if you have an "incoming" directory that you periodically - add things to. - To get this to work correctly, you'll need to use an incremental import *every - time* you run an import on the directory in question---including the first - time, when no subdirectories will be skipped. So consider enabling the +- Speaking of resuming interrupted imports, the tagger will prompt you if it + seems like the last import of the directory was interrupted (by you or by a + crash). If you want to skip this prompt, you can say "yes" automatically by + providing ``-p`` or "no" using ``-P``. The resuming feature can be disabled by + default using a configuration option (see below). +- If you want to import only the *new* stuff from a directory, use the ``-i`` + option to run an *incremental* import. With this flag, beets will keep track + of every directory it ever imports and avoid importing them again. This is + useful if you have an "incoming" directory that you periodically add things + to. To get this to work correctly, you'll need to use an incremental import + *every time* you run an import on the directory in question---including the + first time, when no subdirectories will be skipped. So consider enabling the ``incremental`` configuration option. - -* If you don't want to record skipped files during an *incremental* import, use - the ``--incremental-skip-later`` flag which corresponds to the - ``incremental_skip_later`` configuration option. - Setting the flag prevents beets from persisting skip decisions during a - non-interactive import so that a user can make a decision regarding - previously skipped files during a subsequent interactive import run. - To record skipped files during incremental import explicitly, use the - ``--noincremental-skip-later`` option. - -* When beets applies metadata to your music, it will retain the value of any +- If you don't want to record skipped files during an *incremental* import, use + the ``--incremental-skip-later`` flag which corresponds to the + ``incremental_skip_later`` configuration option. Setting the flag prevents + beets from persisting skip decisions during a non-interactive import so that a + user can make a decision regarding previously skipped files during a + subsequent interactive import run. To record skipped files during incremental + import explicitly, use the ``--noincremental-skip-later`` option. +- When beets applies metadata to your music, it will retain the value of any existing tags that weren't overwritten, and import them into the database. You may prefer to only use existing metadata for finding matches, and to erase it completely when new metadata is applied. You can enforce this behavior with the ``--from-scratch`` option, or the ``from_scratch`` configuration option. - -* By default, beets will proceed without asking if it finds a very close - metadata match. To disable this and have the importer ask you every time, - use the ``-t`` (for *timid*) option. - -* The importer typically works in a whole-album-at-a-time mode. If you - instead want to import individual, non-album tracks, use the *singleton* - mode by supplying the ``-s`` option. - -* If you have an album that's split across several directories under a common - top directory, use the ``--flat`` option. This takes all the music files - under the directory (recursively) and treats them as a single large album - instead of as one album per directory. This can help with your more stubborn - multi-disc albums. - -* Similarly, if you have one directory that contains multiple albums, use the +- By default, beets will proceed without asking if it finds a very close + metadata match. To disable this and have the importer ask you every time, use + the ``-t`` (for *timid*) option. +- The importer typically works in a whole-album-at-a-time mode. If you instead + want to import individual, non-album tracks, use the *singleton* mode by + supplying the ``-s`` option. +- If you have an album that's split across several directories under a common + top directory, use the ``--flat`` option. This takes all the music files under + the directory (recursively) and treats them as a single large album instead of + as one album per directory. This can help with your more stubborn multi-disc + albums. +- Similarly, if you have one directory that contains multiple albums, use the ``--group-albums`` option to split the files based on their metadata before matching them as separate albums. - -* If you want to preview which files would be imported, use the ``--pretend`` - option. If set, beets will just print a list of files that it would - otherwise import. - -* If you already have a metadata backend ID that matches the items to be +- If you want to preview which files would be imported, use the ``--pretend`` + option. If set, beets will just print a list of files that it would otherwise + import. +- If you already have a metadata backend ID that matches the items to be imported, you can instruct beets to restrict the search to that ID instead of searching for other candidates by using the ``--search-id SEARCH_ID`` option. Multiple IDs can be specified by simply repeating the option several times. +- You can supply ``--set field=value`` to assign ``field`` to ``value`` on + import. Values support the same template syntax as beets' :doc:`path formats + <pathformat>`. -* You can supply ``--set field=value`` to assign `field` to `value` on import. - Values support the same template syntax as beets' - :doc:`path formats <pathformat>`. These assignments will merge with (and possibly override) the :ref:`set_fields` configuration dictionary. You can use the option multiple - times on the command line, like so:: + times on the command line, like so: - beet import --set genre="Alternative Rock" --set mood="emotional" + :: + + beet import --set genre="Alternative Rock" --set mood="emotional" + +.. _py7zr: https://pypi.org/project/py7zr/ .. _rarfile: https://pypi.python.org/pypi/rarfile/ -.. _py7zr: https://pypi.org/project/py7zr/ .. only:: html @@ -206,7 +188,8 @@ Optional command flags: .. _list-cmd: list -```` +~~~~ + :: beet list [-apf] QUERY @@ -214,9 +197,9 @@ list :doc:`Queries <query>` the database for music. Want to search for "Gronlandic Edit" by of Montreal? Try ``beet list -gronlandic``. Maybe you want to see everything released in 2009 with -"vegetables" in the title? Try ``beet list year:2009 title:vegetables``. You -can also specify the sort order. (Read more in :doc:`query`.) +gronlandic``. Maybe you want to see everything released in 2009 with +"vegetables" in the title? Try ``beet list year:2009 title:vegetables``. You can +also specify the sort order. (Read more in :doc:`query`.) You can use the ``-a`` switch to search for albums instead of individual items. In this case, the queries you use are restricted to album-level fields: for @@ -225,20 +208,19 @@ like ``title:foo`` will be ignored. Remember that ``artist`` is an item-level field; ``albumartist`` is the corresponding album field. The ``-p`` option makes beets print out filenames of matched items, which might -be useful for piping into other Unix commands (such as `xargs`_). Similarly, the -``-f`` option lets you specify a specific format with which to print every album -or track. This uses the same template syntax as beets' :doc:`path formats -<pathformat>`. For example, the command ``beet ls -af '$album: $albumtotal' -beatles`` prints out the number of tracks on each Beatles album. In Unix shells, -remember to enclose the template argument in single quotes to avoid environment -variable expansion. - -.. _xargs: https://en.wikipedia.org/wiki/Xargs +be useful for piping into other Unix commands (such as `xargs +<https://en.wikipedia.org/wiki/Xargs>`__). Similarly, the ``-f`` option lets you +specify a specific format with which to print every album or track. This uses +the same template syntax as beets' :doc:`path formats <pathformat>`. For +example, the command ``beet ls -af '$album: $albumtotal' beatles`` prints out +the number of tracks on each Beatles album. In Unix shells, remember to enclose +the template argument in single quotes to avoid environment variable expansion. .. _remove-cmd: remove -`````` +~~~~~~ + :: beet remove [-adf] QUERY @@ -246,26 +228,28 @@ remove Remove music from your library. This command uses the same :doc:`query <query>` syntax as the ``list`` command. -By default, it just removes entries from the library database; it doesn't -touch the files on disk. To actually delete the files, use the ``-d`` flag. -When the ``-a`` flag is given, the command operates on albums instead of -individual tracks. +By default, it just removes entries from the library database; it doesn't touch +the files on disk. To actually delete the files, use the ``-d`` flag. When the +``-a`` flag is given, the command operates on albums instead of individual +tracks. + +When you run the ``remove`` command, it prints a list of all affected items in +the library and asks for your permission before removing them. You can then +choose to abort (type ``n``), confirm (``y``), or interactively choose some of +the items (``s``). In the latter case, the command will prompt you for every +matching item or album and invite you to type ``y`` to remove the item/album, +``n`` to keep it or ``q`` to exit and only remove the items/albums selected up +to this point. -When you run the ``remove`` command, it prints a list of all -affected items in the library and asks for your permission before removing -them. You can then choose to abort (type `n`), confirm (`y`), or interactively -choose some of the items (`s`). In the latter case, the command will prompt you -for every matching item or album and invite you to type `y` to remove the -item/album, `n` to keep it or `q` to exit and only remove the items/albums -selected up to this point. This option lets you choose precisely which tracks/albums to remove without -spending too much time to carefully craft a query. -If you do not want to be prompted at all, use the ``-f`` option. +spending too much time to carefully craft a query. If you do not want to be +prompted at all, use the ``-f`` option. .. _modify-cmd: modify -`````` +~~~~~~ + :: beet modify [-IMWay] [-f FORMAT] QUERY [FIELD=VALUE...] [FIELD!...] @@ -278,44 +262,42 @@ artist="Tom Tom Club"`` will change the artist for the track "Genius of Love." To remove fields (which is only possible for flexible attributes), follow a field name with an exclamation point: ``field!``. -Values can also be *templates*, using the same syntax as -:doc:`path formats <pathformat>`. -For example, ``beet modify artist='$artist_sort'`` will copy the artist sort -name into the artist field for all your tracks, -and ``beet modify title='$track $title'`` will add track numbers to their -title metadata. +Values can also be *templates*, using the same syntax as :doc:`path formats +<pathformat>`. For example, ``beet modify artist='$artist_sort'`` will copy the +artist sort name into the artist field for all your tracks, and ``beet modify +title='$track $title'`` will add track numbers to their title metadata. The ``-a`` option changes to querying album fields instead of track fields and -also enables to operate on albums in addition to the individual tracks. -Without this flag, the command will only change *track-level* data, even if all -the tracks belong to the same album. If you want to change an *album-level* -field, such as ``year`` or ``albumartist``, you'll want to use the ``-a`` flag -to avoid a confusing situation where the data for individual tracks conflicts -with the data for the whole album. +also enables to operate on albums in addition to the individual tracks. Without +this flag, the command will only change *track-level* data, even if all the +tracks belong to the same album. If you want to change an *album-level* field, +such as ``year`` or ``albumartist``, you'll want to use the ``-a`` flag to avoid +a confusing situation where the data for individual tracks conflicts with the +data for the whole album. Modifications issued using ``-a`` by default cascade to individual tracks. To prevent this behavior, use ``-I``/``--noinherit``. Items will automatically be moved around when necessary if they're in your -library directory, but you can disable that with ``-M``. Tags will be written -to the files according to the settings you have for imports, but these can be -overridden with ``-w`` (write tags, the default) and ``-W`` (don't write -tags). +library directory, but you can disable that with ``-M``. Tags will be written to +the files according to the settings you have for imports, but these can be +overridden with ``-w`` (write tags, the default) and ``-W`` (don't write tags). -When you run the ``modify`` command, it prints a list of all -affected items in the library and asks for your permission before making any -changes. You can then choose to abort the change (type `n`), confirm -(`y`), or interactively choose some of the items (`s`). In the latter case, -the command will prompt you for every matching item or album and invite you to -type `y` to apply the changes, `n` to discard them or `q` to exit and apply -the selected changes. This option lets you choose precisely which data to -change without spending too much time to carefully craft a query. To skip the -prompts entirely, use the ``-y`` option. +When you run the ``modify`` command, it prints a list of all affected items in +the library and asks for your permission before making any changes. You can then +choose to abort the change (type ``n``), confirm (``y``), or interactively +choose some of the items (``s``). In the latter case, the command will prompt +you for every matching item or album and invite you to type ``y`` to apply the +changes, ``n`` to discard them or ``q`` to exit and apply the selected changes. +This option lets you choose precisely which data to change without spending too +much time to carefully craft a query. To skip the prompts entirely, use the +``-y`` option. .. _move-cmd: move -```` +~~~~ + :: beet move [-capt] [-d DIR] QUERY @@ -329,18 +311,19 @@ anywhere in your filesystem. The ``-c`` option copies files instead of moving them. As with other commands, the ``-a`` option matches albums instead of items. The ``-e`` flag (for "export") copies files without changing the database. -To perform a "dry run", just use the ``-p`` (for "pretend") flag. This will -show you a list of files that would be moved but won't actually change anything -on disk. The ``-t`` option sets the timid mode which will ask again -before really moving or copying the files. +To perform a "dry run", just use the ``-p`` (for "pretend") flag. This will show +you a list of files that would be moved but won't actually change anything on +disk. The ``-t`` option sets the timid mode which will ask again before really +moving or copying the files. .. _update-cmd: update -`````` +~~~~~~ + :: - beet update [-F] FIELD [-e] EXCLUDE_FIELD [-aM] QUERY + beet update [-F] FIELD [-e] EXCLUDE_FIELD [-aMp] QUERY Update the library (and, by default, move files) to reflect out-of-band metadata changes and file deletions. @@ -356,78 +339,80 @@ To perform a "dry run" of an update, just use the ``-p`` (for "pretend") flag. This will show you all the proposed changes but won't actually change anything on disk. -By default, all the changed metadata will be populated back to the database. -If you only want certain fields to be written, specify them with the ```-F``` -flags (which can be used multiple times). Alternatively, specify fields to *not* -write with ```-e``` flags (which can be used multiple times). For the list of -supported fields, please see ```beet fields```. +By default, all the changed metadata will be populated back to the database. If +you only want certain fields to be written, specify them with the ``-F`` flags +(which can be used multiple times). Alternatively, specify fields to *not* write +with ``-e`` flags (which can be used multiple times). For the list of supported +fields, please see ``beet fields``. When an updated track is part of an album, the album-level fields of *all* tracks from the album are also updated. (Specifically, the command copies -album-level data from the first track on the album and applies it to the -rest of the tracks.) This means that, if album-level fields aren't identical -within an album, some changes shown by the ``update`` command may be -overridden by data from other tracks on the same album. This means that -running the ``update`` command multiple times may show the same changes being -applied. - +album-level data from the first track on the album and applies it to the rest of +the tracks.) This means that, if album-level fields aren't identical within an +album, some changes shown by the ``update`` command may be overridden by data +from other tracks on the same album. This means that running the ``update`` +command multiple times may show the same changes being applied. .. _write-cmd: write -````` +~~~~~ + :: beet write [-pf] [QUERY] Write metadata from the database into files' tags. -When you make changes to the metadata stored in beets' library database -(during import or with the :ref:`modify-cmd` command, for example), you often -have the option of storing changes only in the database, leaving your files -untouched. The ``write`` command lets you later change your mind and write the -contents of the database into the files. By default, this writes the changes only if there is a difference between the database and the tags in the file. +When you make changes to the metadata stored in beets' library database (during +import or with the :ref:`modify-cmd` command, for example), you often have the +option of storing changes only in the database, leaving your files untouched. +The ``write`` command lets you later change your mind and write the contents of +the database into the files. By default, this writes the changes only if there +is a difference between the database and the tags in the file. You can think of this command as the opposite of :ref:`update-cmd`. The ``-p`` option previews metadata changes without actually applying them. -The ``-f`` option forces a write to the file, even if the file tags match the database. This is useful for making sure that enabled plugins that run on write (e.g., the Scrub and Zero plugins) are run on the file. - - +The ``-f`` option forces a write to the file, even if the file tags match the +database. This is useful for making sure that enabled plugins that run on write +(e.g., the Scrub and Zero plugins) are run on the file. .. _stats-cmd: stats -````` +~~~~~ + :: beet stats [-e] [QUERY] -Show some statistics on your entire library (if you don't provide a -:doc:`query <query>`) or the matched items (if you do). +Show some statistics on your entire library (if you don't provide a :doc:`query +<query>`) or the matched items (if you do). -By default, the command calculates file sizes using their bitrate and -duration. The ``-e`` (``--exact``) option reads the exact sizes of each file -(but is slower). The exact mode also outputs the exact duration in seconds. +By default, the command calculates file sizes using their bitrate and duration. +The ``-e`` (``--exact``) option reads the exact sizes of each file (but is +slower). The exact mode also outputs the exact duration in seconds. .. _fields-cmd: fields -`````` +~~~~~~ + :: beet fields Show the item and album metadata fields available for use in :doc:`query` and -:doc:`pathformat`. The listing includes any template fields provided by -plugins and any flexible attributes you've manually assigned to your items and -albums. +:doc:`pathformat`. The listing includes any template fields provided by plugins +and any flexible attributes you've manually assigned to your items and albums. .. _config-cmd: config -`````` +~~~~~~ + :: beet config [-pdc] @@ -435,20 +420,19 @@ config Show or edit the user configuration. This command does one of three things: -* With no options, print a YAML representation of the current user - configuration. With the ``--default`` option, beets' default options are - also included in the dump. -* The ``--path`` option instead shows the path to your configuration file. - This can be combined with the ``--default`` flag to show where beets keeps - its internal defaults. -* By default, sensitive information like passwords is removed when dumping the +- With no options, print a YAML representation of the current user + configuration. With the ``--default`` option, beets' default options are also + included in the dump. +- The ``--path`` option instead shows the path to your configuration file. This + can be combined with the ``--default`` flag to show where beets keeps its + internal defaults. +- By default, sensitive information like passwords is removed when dumping the configuration. The ``--clear`` option includes this sensitive data. -* With the ``--edit`` option, beets attempts to open your config file for +- With the ``--edit`` option, beets attempts to open your config file for editing. It first tries the ``$EDITOR`` environment variable, followed by ``$EDITOR`` and then a fallback option depending on your platform: ``open`` on OS X, ``xdg-open`` on Unix, and direct invocation on Windows. - .. _global-flags: Global Flags @@ -458,27 +442,26 @@ Beets has a few "global" flags that affect all commands. These must appear between the executable name (``beet``) and the command---for example, ``beet -v import ...``. -* ``-l LIBPATH``: specify the library database file to use. -* ``-d DIRECTORY``: specify the library root directory. -* ``-v``: verbose mode; prints out a deluge of debugging information. Please use +- ``-l LIBPATH``: specify the library database file to use. +- ``-d DIRECTORY``: specify the library root directory. +- ``-v``: verbose mode; prints out a deluge of debugging information. Please use this flag when reporting bugs. You can use it twice, as in ``-vv``, to make beets even more verbose. -* ``-c FILE``: read a specified YAML :doc:`configuration file <config>`. This +- ``-c FILE``: read a specified YAML :doc:`configuration file <config>`. This configuration works as an overlay: rather than replacing your normal configuration options entirely, the two are merged. Any individual options set in this config file will override the corresponding settings in your base configuration. -* ``-p plugins``: specify a comma-separated list of plugins to enable. If - specified, the plugin list in your configuration is ignored. The long form - of this argument also allows specifying no plugins, effectively disabling - all plugins: ``--plugins=``. -* ``-P plugins``: specify a comma-separated list of plugins to disable in a - specific beets run. This will overwrite ``-p`` if used with it. To disable all plugins, use - ``--plugins=`` instead. - -Beets also uses the ``BEETSDIR`` environment variable to look for -configuration and data. +- ``-p plugins``: specify a comma-separated list of plugins to enable. If + specified, the plugin list in your configuration is ignored. The long form of + this argument also allows specifying no plugins, effectively disabling all + plugins: ``--plugins=``. +- ``-P plugins``: specify a comma-separated list of plugins to disable in a + specific beets run. This will overwrite ``-p`` if used with it. To disable all + plugins, use ``--plugins=`` instead. +Beets also uses the ``BEETSDIR`` environment variable to look for configuration +and data. .. _completion: @@ -486,48 +469,55 @@ Shell Completion ---------------- Beets includes support for shell command completion. The command ``beet -completion`` prints out a `bash`_ 3.2 script; to enable completion put a line -like this into your ``.bashrc`` or similar file:: +completion`` prints out a bash_ 3.2 script; to enable completion put a line like +this into your ``.bashrc`` or similar file: + +:: eval "$(beet completion)" Or, to avoid slowing down your shell startup time, you can pipe the ``beet completion`` output to a file and source that instead. -You will also need to source the `bash-completion`_ script, which is probably +You will also need to source the bash-completion_ script, which is probably available via your package manager. On OS X, you can install it via Homebrew with ``brew install bash-completion``; Homebrew will give you instructions for sourcing the script. -.. _bash-completion: https://github.com/scop/bash-completion .. _bash: https://www.gnu.org/software/bash/ -The completion script suggests names of subcommands and (after typing -``-``) options of the given command. If you are using a command that -accepts a query, the script will also complete field names. :: +.. _bash-completion: https://github.com/scop/bash-completion + +The completion script suggests names of subcommands and (after typing ``-``) +options of the given command. If you are using a command that accepts a query, +the script will also complete field names. + +:: beet list ar[TAB] # artist: artist_credit: artist_sort: artpath: beet list artp[TAB] beet list artpath\: -(Don't worry about the slash in front of the colon: this is a escape -sequence for the shell and won't be seen by beets.) +(Don't worry about the slash in front of the colon: this is a escape sequence +for the shell and won't be seen by beets.) -Completion of plugin commands only works for those plugins -that were enabled when running ``beet completion``. If you add a plugin -later on you will want to re-generate the script. +Completion of plugin commands only works for those plugins that were enabled +when running ``beet completion``. If you add a plugin later on you will want to +re-generate the script. zsh -``` +~~~ If you use zsh, take a look at the included `completion script`_. The script -should be placed in a directory that is part of your ``fpath``, and `not` +should be placed in a directory that is part of your ``fpath``, and ``not`` sourced in your ``.zshrc``. Running ``echo $fpath`` will give you a list of valid directories. Another approach is to use zsh's bash completion compatibility. This snippet -defines some bash-specific functions to make this work without errors:: +defines some bash-specific functions to make this work without errors: + +:: autoload bashcompinit bashcompinit @@ -538,7 +528,6 @@ defines some bash-specific functions to make this work without errors:: .. _completion script: https://github.com/beetbox/beets/blob/master/extra/_beet - .. only:: man See Also diff --git a/docs/reference/config.rst b/docs/reference/config.rst index 234185e79..b4874416c 100644 --- a/docs/reference/config.rst +++ b/docs/reference/config.rst @@ -1,25 +1,27 @@ Configuration ============= -Beets has an extensive configuration system that lets you customize nearly -every aspect of its operation. To configure beets, you create a file called +Beets has an extensive configuration system that lets you customize nearly every +aspect of its operation. To configure beets, you create a file called ``config.yaml``. The location of the file depends on your platform (type ``beet config -p`` to see the path on your system): -* On Unix-like OSes, write ``~/.config/beets/config.yaml``. -* On Windows, use ``%APPDATA%\beets\config.yaml``. This is usually in a +- On Unix-like OSes, write ``~/.config/beets/config.yaml``. +- On Windows, use ``%APPDATA%\beets\config.yaml``. This is usually in a directory like ``C:\Users\You\AppData\Roaming``. -* On OS X, you can use either the Unix location or ``~/Library/Application +- On OS X, you can use either the Unix location or ``~/Library/Application Support/beets/config.yaml``. -You can launch your text editor to create or update your configuration by -typing ``beet config -e``. (See the :ref:`config-cmd` command for details.) It -is also possible to customize the location of the configuration file and even -use multiple layers of configuration. See `Configuration Location`_, below. +You can launch your text editor to create or update your configuration by typing +``beet config -e``. (See the :ref:`config-cmd` command for details.) It is also +possible to customize the location of the configuration file and even use +multiple layers of configuration. See `Configuration Location`_, below. -The config file uses `YAML`_ syntax. You can use the full power of YAML, but -most configuration options are simple key/value pairs. This means your config -file will look like this:: +The config file uses YAML_ syntax. You can use the full power of YAML, but most +configuration options are simple key/value pairs. This means your config file +will look like this: + +:: option: value another_option: foo @@ -28,14 +30,14 @@ file will look like this:: foo: bar In YAML, you will need to use spaces (not tabs!) to indent some lines. If you -have questions about more sophisticated syntax, take a look at the `YAML`_ +have questions about more sophisticated syntax, take a look at the YAML_ documentation. -.. _YAML: https://yaml.org/ +.. _yaml: https://yaml.org/ The rest of this page enumerates the dizzying litany of configuration options -available in beets. You might also want to see an -:ref:`example <config-example>`. +available in beets. You might also want to see an :ref:`example +<config-example>`. .. contents:: :local: @@ -58,26 +60,29 @@ directory The directory to which files will be copied/moved when adding them to the library. Defaults to a folder called ``Music`` in your home directory. +.. _plugins-config: + plugins ~~~~~~~ -A space-separated list of plugin module names to load. See -:ref:`using-plugins`. +A space-separated list of plugin module names to load. See :ref:`using-plugins`. include ~~~~~~~ -A space-separated list of extra configuration files to include. -Filenames are relative to the directory containing ``config.yaml``. +A space-separated list of extra configuration files to include. Filenames are +relative to the directory containing ``config.yaml``. pluginpath ~~~~~~~~~~ -Directories to search for plugins. Each Python file or directory in a plugin -path represents a plugin and should define a subclass of :class:`BeetsPlugin`. -A plugin can then be loaded by adding the filename to the `plugins` configuration. +Directories to search for plugins. Each Python file or directory in a plugin +path represents a plugin and should define a subclass of |BeetsPlugin|. A plugin +can then be loaded by adding the plugin name to the ``plugins`` configuration. The plugin path can either be a single string or a list of strings---so, if you -have multiple paths, format them as a YAML list like so:: +have multiple paths, format them as a YAML list like so: + +:: pluginpath: - /path/one @@ -89,7 +94,7 @@ ignore ~~~~~~ A list of glob patterns specifying file and directory names to be ignored when -importing. By default, this consists of ``.*``, ``*~``, ``System Volume +importing. By default, this consists of ``.*``, ``*~``, ``System Volume Information``, ``lost+found`` (i.e., beets ignores Unix-style hidden files, backup files, and directories that appears at the root of some Linux and Windows filesystems). @@ -119,9 +124,11 @@ replacement strings. For example, ``[xy]: z`` will make beets replace all instances of the characters ``x`` or ``y`` with the character ``z``. If you do change this value, be certain that you include at least enough -substitutions to avoid causing errors on your operating system. Here are -the default substitutions used by beets, which are sufficient to avoid -unexpected behavior on all popular platforms:: +substitutions to avoid causing errors on your operating system. Here are the +default substitutions used by beets, which are sufficient to avoid unexpected +behavior on all popular platforms: + +:: replace: '[\\/]': _ @@ -133,40 +140,39 @@ unexpected behavior on all popular platforms:: '^\s+': '' '^-': _ -These substitutions remove forward and back slashes, leading dots, and -control characters—all of which is a good idea on any OS. The fourth line -removes the Windows "reserved characters" (useful even on Unix for -compatibility with Windows-influenced network filesystems like Samba). -Trailing dots and trailing whitespace, which can cause problems on Windows -clients, are also removed. +These substitutions remove forward and back slashes, leading dots, and control +characters—all of which is a good idea on any OS. The fourth line removes the +Windows "reserved characters" (useful even on Unix for compatibility with +Windows-influenced network filesystems like Samba). Trailing dots and trailing +whitespace, which can cause problems on Windows clients, are also removed. When replacements other than the defaults are used, it is possible that they will increase the length of the path. In the scenario where this leads to a -conflict with the maximum filename length, the default replacements will be -used to resolve the conflict and beets will display a warning. +conflict with the maximum filename length, the default replacements will be used +to resolve the conflict and beets will display a warning. -Note that paths might contain special characters such as typographical -quotes (``“”``). With the configuration above, those will not be -replaced as they don't match the typewriter quote (``"``). To also strip these -special characters, you can either add them to the replacement list or use the -:ref:`asciify-paths` configuration option below. +Note that paths might contain special characters such as typographical quotes +(``“”``). With the configuration above, those will not be replaced as they don't +match the typewriter quote (``"``). To also strip these special characters, you +can either add them to the replacement list or use the :ref:`asciify-paths` +configuration option below. .. _path-sep-replace: path_sep_replace ~~~~~~~~~~~~~~~~ -A string that replaces the path separator (for example, the forward slash -``/`` on Linux and MacOS, and the backward slash ``\\`` on Windows) when -generating filenames with beets. -This option is related to :ref:`replace`, but is distinct from it for -technical reasons. +A string that replaces the path separator (for example, the forward slash ``/`` +on Linux and MacOS, and the backward slash ``\\`` on Windows) when generating +filenames with beets. This option is related to :ref:`replace`, but is distinct +from it for technical reasons. .. warning:: - Changing this option is potentially dangerous. For example, setting - it to the actual path separator could create directories in unexpected - locations. Use caution when changing it and always try it out on a small - number of files before applying it to your whole library. + + Changing this option is potentially dangerous. For example, setting it to + the actual path separator could create directories in unexpected locations. + Use caution when changing it and always try it out on a small number of + files before applying it to your whole library. Default: ``_``. @@ -177,22 +183,20 @@ asciify_paths Convert all non-ASCII characters in paths to ASCII equivalents. -For example, if your path template for -singletons is ``singletons/$title`` and the title of a track is "Café", -then the track will be saved as ``singletons/Cafe.mp3``. The changes -take place before applying the :ref:`replace` configuration and are roughly -equivalent to wrapping all your path templates in the ``%asciify{}`` -:ref:`template function <template-functions>`. +For example, if your path template for singletons is ``singletons/$title`` and +the title of a track is "Café", then the track will be saved as +``singletons/Cafe.mp3``. The changes take place before applying the +:ref:`replace` configuration and are roughly equivalent to wrapping all your +path templates in the ``%asciify{}`` :ref:`template function +<template-functions>`. -This uses the `unidecode module`_ which is language agnostic, so some -characters may be transliterated from a different language than expected. -For example, Japanese kanji will usually use their Chinese readings. +This uses the `unidecode module <https://pypi.org/project/Unidecode>`__ which is +language agnostic, so some characters may be transliterated from a different +language than expected. For example, Japanese kanji will usually use their +Chinese readings. Default: ``no``. -.. _unidecode module: https://pypi.org/project/Unidecode - - .. _art-filename: art_filename @@ -207,38 +211,37 @@ album's directory). threaded ~~~~~~~~ -Either ``yes`` or ``no``, indicating whether the autotagger should use -multiple threads. This makes things substantially faster by overlapping work: -for example, it can copy files for one album in parallel with looking up data -in MusicBrainz for a different album. You may want to disable this when -debugging problems with the autotagger. -Defaults to ``yes``. +Either ``yes`` or ``no``, indicating whether the autotagger should use multiple +threads. This makes things substantially faster by overlapping work: for +example, it can copy files for one album in parallel with looking up data in +MusicBrainz for a different album. You may want to disable this when debugging +problems with the autotagger. Defaults to ``yes``. +.. _format_item: .. _list_format_item: -.. _format_item: format_item ~~~~~~~~~~~ -Format to use when listing *individual items* with the :ref:`list-cmd` -command and other commands that need to print out items. Defaults to -``$artist - $album - $title``. The ``-f`` command-line option overrides -this setting. +Format to use when listing *individual items* with the :ref:`list-cmd` command +and other commands that need to print out items. Defaults to ``$artist - $album +- $title``. The ``-f`` command-line option overrides this setting. -It used to be named `list_format_item`. +It used to be named ``list_format_item``. + +.. _format_album: .. _list_format_album: -.. _format_album: format_album ~~~~~~~~~~~~ -Format to use when listing *albums* with :ref:`list-cmd` and other -commands. Defaults to ``$albumartist - $album``. The ``-f`` command-line -option overrides this setting. +Format to use when listing *albums* with :ref:`list-cmd` and other commands. +Defaults to ``$albumartist - $album``. The ``-f`` command-line option overrides +this setting. -It used to be named `list_format_album`. +It used to be named ``list_format_album``. .. _sort_item: @@ -260,10 +263,11 @@ Default sort order to use when fetching albums from the database. Defaults to sort_case_insensitive ~~~~~~~~~~~~~~~~~~~~~ + Either ``yes`` or ``no``, indicating whether the case should be ignored when sorting lexicographic fields. When set to ``no``, lower-case values will be -placed after upper-case values (e.g., *Bar Qux foo*), while ``yes`` would -result in the more expected *Bar foo Qux*. Default: ``yes``. +placed after upper-case values (e.g., *Bar Qux foo*), while ``yes`` would result +in the more expected *Bar foo Qux*. Default: ``yes``. .. _original_date: @@ -281,11 +285,13 @@ That is, if this option is turned on, then ``year`` will always equal overwrite_null ~~~~~~~~~~~~~~ -This confusingly-named option indicates which fields have meaningful `null` values. If -an album or track field is in the corresponding list, then an existing value for this -field in an item in the database can be overwritten with `null`. By default, however, -`null` is interpreted as information about the field being unavailable, so it would not -overwrite existing values. For example:: +This confusingly-named option indicates which fields have meaningful ``null`` +values. If an album or track field is in the corresponding list, then an +existing value for this field in an item in the database can be overwritten with +``null``. By default, however, ``null`` is interpreted as information about the +field being unavailable, so it would not overwrite existing values. For example: + +:: overwrite_null: album: ["albumid"] @@ -314,15 +320,16 @@ first (non-pregap) track on each disc always has track number 1. If you enable ``per_disc_numbering``, you will likely want to change your :ref:`path-format-config` also to include ``$disc`` before ``$track`` to make filenames sort correctly in album directories. For example, you might want to -use a path format like this:: +use a path format like this: + +:: paths: default: $albumartist/$album%aunique{}/$disc-$track $title -When this option is off (the default), even "pregap" hidden tracks are -numbered from one, not zero, so other track numbers may appear to be bumped up -by one. When it is on, the pregap track for each disc can be numbered zero. - +When this option is off (the default), even "pregap" hidden tracks are numbered +from one, not zero, so other track numbers may appear to be bumped up by one. +When it is on, the pregap track for each disc can be numbered zero. .. _config-aunique: @@ -332,7 +339,9 @@ aunique These options are used to generate a string that is guaranteed to be unique among all albums in the library who share the same set of keys. -The defaults look like this:: +The defaults look like this: + +:: aunique: keys: albumartist album @@ -341,7 +350,6 @@ The defaults look like this:: See :ref:`aunique` for more details. - .. _config-sunique: sunique @@ -351,7 +359,9 @@ Like :ref:`config-aunique` above for albums, these options control the generation of a unique string to disambiguate *singletons* that share similar metadata. -The defaults look like this:: +The defaults look like this: + +:: sunique: keys: artist title @@ -360,18 +370,16 @@ The defaults look like this:: See :ref:`sunique` for more details. - .. _terminal_encoding: terminal_encoding ~~~~~~~~~~~~~~~~~ -The text encoding, as `known to Python`_, to use for messages printed to the -standard output. It's also used to read messages from the standard input. -By default, this is determined automatically from the locale -environment variables. - -.. _known to python: https://docs.python.org/2/library/codecs.html#standard-encodings +The text encoding, as `known to Python +<https://docs.python.org/3/library/codecs.html#standard-encodings>`__, to use +for messages printed to the standard output. It's also used to read messages +from the standard input. By default, this is determined automatically from the +locale environment variables. .. _clutter: @@ -380,7 +388,7 @@ clutter When beets imports all the files in a directory, it tries to remove the directory if it's empty. A directory is considered empty if it only contains -files whose names match the glob patterns in `clutter`, which should be a list +files whose names match the glob patterns in ``clutter``, which should be a list of strings. The default list consists of "Thumbs.DB" and ".DS_Store". The importer only removes recursively searched subdirectories---the top-level @@ -400,9 +408,9 @@ maximum. id3v23 ~~~~~~ -By default, beets writes MP3 tags using the ID3v2.4 standard, the latest -version of ID3. Enable this option to instead use the older ID3v2.3 standard, -which is preferred by certain older software such as Windows Media Player. +By default, beets writes MP3 tags using the ID3v2.4 standard, the latest version +of ID3. Enable this option to instead use the older ID3v2.3 standard, which is +preferred by certain older software such as Windows Media Player. .. _va_name: @@ -418,23 +426,14 @@ Artists'`` (the MusicBrainz standard). Affects other sources, such as UI Options ---------- -The options that allow for customization of the visual appearance -of the console interface. - -These options are available in this section: +The options that allow for customization of the visual appearance of the console +interface. color ~~~~~ -Either ``yes`` or ``no``; whether to use color in console output (currently -only in the ``import`` command). Turn this off if your terminal doesn't -support ANSI colors. - -.. note:: - - The `color` option was previously a top-level configuration. This is - still respected, but a deprecation message will be shown until your - top-level `color` configuration has been nested under `ui`. +Either ``yes`` or ``no``; whether to use color in console output. Turn this off +if your terminal doesn't support ANSI colors. .. _colors: @@ -442,8 +441,9 @@ colors ~~~~~~ The colors that are used throughout the user interface. These are only used if -the ``color`` option is set to ``yes``. For example, you might have a section -in your configuration file that looks like this:: +the ``color`` option is set to ``yes``. See the default configuration: + +.. code-block:: yaml ui: colors: @@ -455,35 +455,35 @@ in your configuration file that looks like this:: action_default: ['bold', 'cyan'] action: ['bold', 'cyan'] # New colors after UI overhaul - text: ['normal'] text_faint: ['faint'] import_path: ['bold', 'blue'] import_path_items: ['bold', 'blue'] - added: ['green'] - removed: ['red'] changed: ['yellow'] - added_highlight: ['bold', 'green'] - removed_highlight: ['bold', 'red'] - changed_highlight: ['bold', 'yellow'] - text_diff_added: ['bold', 'red'] + text_diff_added: ['bold', 'green'] text_diff_removed: ['bold', 'red'] - text_diff_changed: ['bold', 'red'] action_description: ['white'] -Available colors: black, darkred, darkgreen, brown (darkyellow), darkblue, -purple (darkmagenta), teal (darkcyan), lightgray, darkgray, red, green, -yellow, blue, fuchsia (magenta), turquoise (cyan), white +Available attributes: -Legacy UI colors config directive used strings. If any colors value is still a -string instead of a list, it will be translated to list automatically. For -example ``blue`` will become ``['blue']``. +Foreground colors + ``black``, ``red``, ``green``, ``yellow``, ``blue``, ``magenta``, ``cyan``, + ``white`` + +Background colors + ``bg_black``, ``bg_red``, ``bg_green``, ``bg_yellow``, ``bg_blue``, + ``bg_magenta``, ``bg_cyan``, ``bg_white`` + +Text styles + ``normal``, ``bold``, ``faint``, ``underline``, ``reverse`` terminal_width ~~~~~~~~~~~~~~ Controls line wrapping on non-Unix systems. On Unix systems, the width of the terminal is detected automatically. If this fails, or on non-Unix systems, the -specified value is used as a fallback. Defaults to ``80`` characters:: +specified value is used as a fallback. Defaults to ``80`` characters: + +.. code-block:: yaml ui: terminal_width: 80 @@ -495,9 +495,11 @@ Beets compares the length of the imported track with the length the metadata source provides. If any tracks differ by at least ``length_diff_thresh`` seconds, they will be colored with ``text_highlight``. Below this threshold, different track lengths are colored with ``text_highlight_minor``. -``length_diff_thresh`` does not impact which releases are selected in -autotagger matching or distance score calculation (see :ref:`match-config`, -``distance_weights`` and :ref:`colors`):: +``length_diff_thresh`` does not impact which releases are selected in autotagger +matching or distance score calculation (see :ref:`match-config`, +``distance_weights`` and :ref:`colors`): + +.. code-block:: yaml ui: length_diff_thresh: 10.0 @@ -506,26 +508,30 @@ import ~~~~~~ When importing, beets will read several options to configure the visuals of the -import dialogue. There are two layouts controlling how horizontal space and -line wrapping is dealt with: ``column`` and ``newline``. The indentation of the -respective elements of the import UI can also be configured. For example -setting ``4`` for ``match_header`` will indent the very first block of a -proposed match by five characters in the terminal:: +import dialogue. There are two layouts controlling how horizontal space and line +wrapping is dealt with: ``column`` and ``newline``. The indentation of the +respective elements of the import UI can also be configured. For example setting +``2`` for ``match_header`` will indent the very first block of a proposed match +by two characters in the terminal: + +.. code-block:: yaml ui: import: indentation: - match_header: 4 - match_details: 4 - match_tracklist: 7 - layout: newline + match_header: 2 + match_details: 2 + match_tracklist: 5 + layout: column Importer Options ---------------- The options that control the :ref:`import-cmd` command are indented under the ``import:`` key. For example, you might have a section in your configuration -file that looks like this:: +file that looks like this: + +:: import: write: yes @@ -540,39 +546,38 @@ write ~~~~~ Either ``yes`` or ``no``, controlling whether metadata (e.g., ID3) tags are -written to files when using ``beet import``. Defaults to ``yes``. The ``-w`` -and ``-W`` command-line options override this setting. +written to files when using ``beet import``. Defaults to ``yes``. The ``-w`` and +``-W`` command-line options override this setting. .. _config-import-copy: copy ~~~~ -Either ``yes`` or ``no``, indicating whether to **copy** files into the -library directory when using ``beet import``. Defaults to ``yes``. Can be -overridden with the ``-c`` and ``-C`` command-line options. +Either ``yes`` or ``no``, indicating whether to **copy** files into the library +directory when using ``beet import``. Defaults to ``yes``. Can be overridden +with the ``-c`` and ``-C`` command-line options. -The option is ignored if ``move`` is enabled (i.e., beets can move or -copy files but it doesn't make sense to do both). +The option is ignored if ``move`` is enabled (i.e., beets can move or copy files +but it doesn't make sense to do both). .. _config-import-move: move ~~~~ -Either ``yes`` or ``no``, indicating whether to **move** files into the -library directory when using ``beet import``. -Defaults to ``no``. +Either ``yes`` or ``no``, indicating whether to **move** files into the library +directory when using ``beet import``. Defaults to ``no``. -The effect is similar to the ``copy`` option but you end up with only -one copy of the imported file. ("Moving" works even across filesystems; if -necessary, beets will copy and then delete when a simple rename is -impossible.) Moving files can be risky—it's a good idea to keep a backup in -case beets doesn't do what you expect with your files. +The effect is similar to the ``copy`` option but you end up with only one copy +of the imported file. ("Moving" works even across filesystems; if necessary, +beets will copy and then delete when a simple rename is impossible.) Moving +files can be risky—it's a good idea to keep a backup in case beets doesn't do +what you expect with your files. -This option *overrides* ``copy``, so enabling it will always move -(and not copy) files. The ``-c`` switch to the ``beet import`` command, -however, still takes precedence. +This option *overrides* ``copy``, so enabling it will always move (and not copy) +files. The ``-c`` switch to the ``beet import`` command, however, still takes +precedence. .. _link: @@ -595,11 +600,11 @@ hardlink ~~~~~~~~ Either ``yes`` or ``no``, indicating whether to use hard links instead of -moving, copying, or symlinking files. (It conflicts with the ``move``, -``copy``, and ``link`` options.) Defaults to ``no``. +moving, copying, or symlinking files. (It conflicts with the ``move``, ``copy``, +and ``link`` options.) Defaults to ``no``. As with symbolic links (see :ref:`link`, above), this will not work on Windows -and you will want to set ``write`` to ``no``. Otherwise, metadata on the +and you will want to set ``write`` to ``no``. Otherwise, metadata on the original file will be modified. .. _reflink: @@ -608,51 +613,49 @@ reflink ~~~~~~~ Either ``yes``, ``no``, or ``auto``, indicating whether to use copy-on-write -`file clones`_ (a.k.a. "reflinks") instead of copying or moving files. -The ``auto`` option uses reflinks when possible and falls back to plain -copying when necessary. -Defaults to ``no``. +`file clones`_ (a.k.a. "reflinks") instead of copying or moving files. The +``auto`` option uses reflinks when possible and falls back to plain copying when +necessary. Defaults to ``no``. -This kind of clone is only available on certain filesystems: for example, -btrfs and APFS. For more details on filesystem support, see the `pyreflink`_ +This kind of clone is only available on certain filesystems: for example, btrfs +and APFS. For more details on filesystem support, see the pyreflink_ documentation. Note that you need to install ``pyreflink``, either through ``python -m pip install beets[reflink]`` or ``python -m pip install reflink``. -The option is ignored if ``move`` is enabled (i.e., beets can move or -copy files but it doesn't make sense to do both). +The option is ignored if ``move`` is enabled (i.e., beets can move or copy files +but it doesn't make sense to do both). .. _file clones: https://en.wikipedia.org/wiki/Copy-on-write + .. _pyreflink: https://reflink.readthedocs.io/en/latest/ resume ~~~~~~ -Either ``yes``, ``no``, or ``ask``. Controls whether interrupted imports -should be resumed. "Yes" means that imports are always resumed when -possible; "no" means resuming is disabled entirely; "ask" (the default) -means that the user should be prompted when resuming is possible. The ``-p`` -and ``-P`` flags correspond to the "yes" and "no" settings and override this -option. +Either ``yes``, ``no``, or ``ask``. Controls whether interrupted imports should +be resumed. "Yes" means that imports are always resumed when possible; "no" +means resuming is disabled entirely; "ask" (the default) means that the user +should be prompted when resuming is possible. The ``-p`` and ``-P`` flags +correspond to the "yes" and "no" settings and override this option. .. _incremental: incremental ~~~~~~~~~~~ -Either ``yes`` or ``no``, controlling whether imported directories are -recorded and whether these recorded directories are skipped. This -corresponds to the ``-i`` flag to ``beet import``. +Either ``yes`` or ``no``, controlling whether imported directories are recorded +and whether these recorded directories are skipped. This corresponds to the +``-i`` flag to ``beet import``. .. _incremental_skip_later: incremental_skip_later ~~~~~~~~~~~~~~~~~~~~~~ -Either ``yes`` or ``no``, controlling whether skipped directories are -recorded in the incremental list. When set to ``yes``, skipped -directories won't be recorded, and beets will try to import them again -later. When set to ``no``, skipped directories will be recorded, and -skipped later. Defaults to ``no``. +Either ``yes`` or ``no``, controlling whether skipped directories are recorded +in the incremental list. When set to ``yes``, skipped directories won't be +recorded, and beets will try to import them again later. When set to ``no``, +skipped directories will be recorded, and skipped later. Defaults to ``no``. .. _from_scratch: @@ -677,9 +680,9 @@ corresponds to the ``--quiet`` flag to ``beet import``. quiet_fallback ~~~~~~~~~~~~~~ -Either ``skip`` (default) or ``asis``, specifying what should happen in -quiet mode (see the ``-q`` flag to ``import``, above) when there is no -strong recommendation. +Either ``skip`` (default) or ``asis``, specifying what should happen in quiet +mode (see the ``-q`` flag to ``import``, above) when there is no strong +recommendation. .. _none_rec_action: @@ -694,19 +697,18 @@ interactively. timid ~~~~~ -Either ``yes`` or ``no``, controlling whether the importer runs in *timid* -mode, in which it asks for confirmation on every autotagging match, even the -ones that seem very close. Defaults to ``no``. The ``-t`` command-line flag -controls the same setting. +Either ``yes`` or ``no``, controlling whether the importer runs in *timid* mode, +in which it asks for confirmation on every autotagging match, even the ones that +seem very close. Defaults to ``no``. The ``-t`` command-line flag controls the +same setting. .. _import_log: log ~~~ -Specifies a filename where the importer's log should be kept. By default, -no log is written. This can be overridden with the ``-l`` flag to -``import``. +Specifies a filename where the importer's log should be kept. By default, no log +is written. This can be overridden with the ``-l`` flag to ``import``. .. _default_action: @@ -726,8 +728,8 @@ languages A list of locale names to search for preferred aliases. For example, setting this to ``en`` uses the transliterated artist name "Pyotr Ilyich Tchaikovsky" instead of the Cyrillic script for the composer's name when tagging from -MusicBrainz. You can use a space-separated list of language abbreviations, like -``en jp es``, to specify a preference order. Defaults to an empty list, meaning +MusicBrainz. You can use a space-separated list of language abbreviations, like +``en jp es``, to specify a preference order. Defaults to an empty list, meaning that no language is preferred. .. _ignored_alias_types: @@ -737,7 +739,7 @@ ignored_alias_types A list of alias types to be ignored when importing new items. -See the `MusicBrainz Documentation` for more information on aliases. +See the ``MusicBrainz Documentation`` for more information on aliases. .._MusicBrainz Documentation: https://musicbrainz.org/doc/Aliases @@ -758,8 +760,8 @@ group_albums By default, the beets importer groups tracks into albums based on the directories they reside in. This option instead uses files' metadata to -partition albums. Enable this option if you have directories that contain -tracks from many albums mixed together. +partition albums. Enable this option if you have directories that contain tracks +from many albums mixed together. The ``--group-albums`` or ``-g`` option to the :ref:`import-cmd` command is equivalent, and the *G* interactive option invokes the same workflow. @@ -771,10 +773,10 @@ Default: ``no``. autotag ~~~~~~~ -By default, the beets importer always attempts to autotag new music. If -most of your collection consists of obscure music, you may be interested in -disabling autotagging by setting this option to ``no``. (You can re-enable it -with the ``-a`` flag to the :ref:`import-cmd` command.) +By default, the beets importer always attempts to autotag new music. If most of +your collection consists of obscure music, you may be interested in disabling +autotagging by setting this option to ``no``. (You can re-enable it with the +``-a`` flag to the :ref:`import-cmd` command.) Default: ``yes``. @@ -783,13 +785,14 @@ Default: ``yes``. duplicate_keys ~~~~~~~~~~~~~~ -The fields used to find duplicates when importing. -There are two sub-values here: ``album`` and ``item``. -Each one is a list of field names; if an existing object (album or item) in -the library matches the new object on all of these fields, the importer will -consider it a duplicate. +The fields used to find duplicates when importing. There are two sub-values +here: ``album`` and ``item``. Each one is a list of field names; if an existing +object (album or item) in the library matches the new object on all of these +fields, the importer will consider it a duplicate. -Default:: +Default: + +:: album: albumartist album item: artist title @@ -799,12 +802,11 @@ Default:: duplicate_action ~~~~~~~~~~~~~~~~ -Either ``skip``, ``keep``, ``remove``, ``merge`` or ``ask``. -Controls how duplicates are treated in import task. -"skip" means that new item(album or track) will be skipped; -"keep" means keep both old and new items; "remove" means remove old -item; "merge" means merge into one album; "ask" means the user -should be prompted for the action each time. The default is ``ask``. +Either ``skip``, ``keep``, ``remove``, ``merge`` or ``ask``. Controls how +duplicates are treated in import task. "skip" means that new item(album or +track) will be skipped; "keep" means keep both old and new items; "remove" means +remove old item; "merge" means merge into one album; "ask" means the user should +be prompted for the action each time. The default is ``ask``. .. _duplicate_verbose_prompt: @@ -813,9 +815,8 @@ duplicate_verbose_prompt Usually when duplicates are detected during import, information about the existing and the newly imported album is summarized. Enabling this option also -lists details on individual tracks. The :ref:`format_item setting -<format_item>` is applied, which would, considering the default, look like -this: +lists details on individual tracks. The :ref:`format_item setting <format_item>` +is applied, which would, considering the default, look like this: .. code-block:: console @@ -843,8 +844,10 @@ Default: ``no``. set_fields ~~~~~~~~~~ -A dictionary indicating fields to set to values for newly imported music. -Here's an example:: +A dictionary indicating fields to set to values for newly imported music. Here's +an example: + +:: set_fields: genre: 'To Listen' @@ -853,11 +856,11 @@ Here's an example:: Other field/value pairs supplied via the ``--set`` option on the command-line override any settings here for fields with the same name. -Values support the same template syntax as beets' -:doc:`path formats <pathformat>`. +Values support the same template syntax as beets' :doc:`path formats +<pathformat>`. -Fields are set on both the album and each individual track of the album. -Fields are persisted to the media files of each track. +Fields are set on both the album and each individual track of the album. Fields +are persisted to the media files of each track. Default: ``{}`` (empty). @@ -866,123 +869,14 @@ Default: ``{}`` (empty). singleton_album_disambig ~~~~~~~~~~~~~~~~~~~~~~~~ -During singleton imports and if the metadata source provides it, album names -are appended to the disambiguation string of matching track candidates. For -example: ``The Artist - The Title (Discogs, Index 3, Track B1, [The Album]``. -This feature is currently supported by the :doc:`/plugins/discogs` and the +During singleton imports and if the metadata source provides it, album names are +appended to the disambiguation string of matching track candidates. For example: +``The Artist - The Title (Discogs, Index 3, Track B1, [The Album]``. This +feature is currently supported by the :doc:`/plugins/discogs` and the :doc:`/plugins/spotify`. Default: ``yes``. -.. _musicbrainz-config: - -MusicBrainz Options -------------------- - -You can instruct beets to use `your own MusicBrainz database`_ instead of -the `main server`_. Use the ``host``, ``https`` and ``ratelimit`` options -under a ``musicbrainz:`` header, like so:: - - musicbrainz: - host: localhost:5000 - https: no - ratelimit: 100 - -The ``host`` key, of course, controls the Web server hostname (and port, -optionally) that will be contacted by beets (default: musicbrainz.org). -The ``https`` key makes the client use HTTPS instead of HTTP. This setting applies -only to custom servers. The official MusicBrainz server always uses HTTPS. (Default: no.) -The server must have search indices enabled (see `Building search indexes`_). - -The ``ratelimit`` option, an integer, controls the number of Web service requests -per second (default: 1). **Do not change the rate limit setting** if you're -using the main MusicBrainz server---on this public server, you're `limited`_ -to one request per second. - -.. _your own MusicBrainz database: https://musicbrainz.org/doc/MusicBrainz_Server/Setup -.. _main server: https://musicbrainz.org/ -.. _limited: https://musicbrainz.org/doc/XML_Web_Service/Rate_Limiting -.. _Building search indexes: https://musicbrainz.org/doc/Development/Search_server_setup - -.. _musicbrainz.enabled: - -enabled -~~~~~~~ - -This option allows you to disable using MusicBrainz as a metadata source. This applies -if you use plugins that fetch data from alternative sources and should make the import -process quicker. - -Default: ``yes``. - -.. _searchlimit: - -searchlimit -~~~~~~~~~~~ - -The number of matches returned when sending search queries to the -MusicBrainz server. - -Default: ``5``. - -.. _extra_tags: - -extra_tags -~~~~~~~~~~ - -By default, beets will use only the artist, album, and track count to query -MusicBrainz. Additional tags to be queried can be supplied with the -``extra_tags`` setting. For example:: - - musicbrainz: - extra_tags: [year, catalognum, country, media, label] - -This setting should improve the autotagger results if the metadata with the -given tags match the metadata returned by MusicBrainz. - -Note that the only tags supported by this setting are the ones listed in the -above example. - -Default: ``[]`` - -.. _genres: - -genres -~~~~~~ - -Use MusicBrainz genre tags to populate (and replace if it's already set) the -``genre`` tag. This will make it a list of all the genres tagged for the -release and the release-group on MusicBrainz, separated by "; " and sorted by -the total number of votes. -Default: ``no`` - -.. _musicbrainz.external_ids: - -external_ids -~~~~~~~~~~~~ - -Set any of the ``external_ids`` options to ``yes`` to enable the MusicBrainz -importer to look for links to related metadata sources. If such a link is -available the release ID will be extracted from the URL provided and imported -to the beets library:: - - musicbrainz: - external_ids: - discogs: yes - spotify: yes - bandcamp: yes - beatport: yes - deezer: yes - tidal: yes - - -The library fields of the corresponding :ref:`autotagger_extensions` are used -to save the data (``discogs_albumid``, ``bandcamp_album_id``, -``spotify_album_id``, ``beatport_album_id``, ``deezer_album_id``, -``tidal_album_id``). On re-imports existing data will be overwritten. - -The default of all options is ``no``. - .. _match-config: Autotagger Matching Options @@ -992,13 +886,15 @@ You can configure some aspects of the logic beets uses when automatically matching MusicBrainz results under the ``match:`` section. To control how *tolerant* the autotagger is of differences, use the ``strong_rec_thresh`` option, which reflects the distance threshold below which beets will make a -"strong recommendation" that the metadata be used. Strong recommendations -are accepted automatically (except in "timid" mode), so you can use this to -make beets ask your opinion more or less often. +"strong recommendation" that the metadata be used. Strong recommendations are +accepted automatically (except in "timid" mode), so you can use this to make +beets ask your opinion more or less often. The threshold is a *distance* value between 0.0 and 1.0, so you can think of it as the opposite of a *similarity* value. For example, if you want to -automatically accept any matches above 90% similarity, use:: +automatically accept any matches above 90% similarity, use: + +:: match: strong_rec_thresh: 0.10 @@ -1018,13 +914,15 @@ max_rec As mentioned above, autotagger matches have *recommendations* that control how the UI behaves for a certain quality of match. The recommendation for a certain -match is based on the overall distance calculation. But you can also control -the recommendation when a specific distance penalty is applied by defining -*maximum* recommendations for each field: +match is based on the overall distance calculation. But you can also control the +recommendation when a specific distance penalty is applied by defining *maximum* +recommendations for each field: To define maxima, use keys under ``max_rec:`` in the ``match`` section. The defaults are "medium" for missing and unmatched tracks and "strong" (i.e., no -maximum) for everything else:: +maximum) for everything else: + +:: match: max_rec: @@ -1032,30 +930,30 @@ maximum) for everything else:: unmatched_tracks: medium If a recommendation is higher than the configured maximum and the indicated -penalty is applied, the recommendation is downgraded. The setting for -each field can be one of ``none``, ``low``, ``medium`` or ``strong``. When the -maximum recommendation is ``strong``, no "downgrading" occurs. The available -penalty names here are: +penalty is applied, the recommendation is downgraded. The setting for each field +can be one of ``none``, ``low``, ``medium`` or ``strong``. When the maximum +recommendation is ``strong``, no "downgrading" occurs. The available penalty +names here are: -* source -* artist -* album -* media -* mediums -* year -* country -* label -* catalognum -* albumdisambig -* album_id -* tracks -* missing_tracks -* unmatched_tracks -* track_title -* track_artist -* track_index -* track_length -* track_id +- data_source +- artist +- album +- media +- mediums +- year +- country +- label +- catalognum +- albumdisambig +- album_id +- tracks +- missing_tracks +- unmatched_tracks +- track_title +- track_artist +- track_index +- track_length +- track_id .. _preferred: @@ -1069,13 +967,15 @@ media types. A distance penalty will be applied if the country or media type from the match metadata doesn't match. The specified values are preferred in descending order (i.e., the first item will be most preferred). Each item may be a regular -expression, and will be matched case insensitively. The number of media will -be stripped when matching preferred media (e.g. "2x" in "2xCD"). +expression, and will be matched case insensitively. The number of media will be +stripped when matching preferred media (e.g. "2x" in "2xCD"). You can also tell the autotagger to prefer matches that have a release year closest to the original year for an album. -Here's an example:: +Here's an example: + +:: match: preferred: @@ -1091,14 +991,20 @@ ignored ~~~~~~~ You can completely avoid matches that have certain penalties applied by adding -the penalty name to the ``ignored`` setting:: +the penalty name to the ``ignored`` setting: + +:: match: ignored: missing_tracks unmatched_tracks The available penalties are the same as those for the :ref:`max_rec` setting. -For example, setting ``ignored: missing_tracks`` will skip any album matches where your audio files are missing some of the tracks. The importer will not attempt to display these matches. It does not ignore the fact that the album is missing tracks, which would allow these matches to apply more easily. To do that, you'll want to adjust the penalty for missing tracks. +For example, setting ``ignored: missing_tracks`` will skip any album matches +where your audio files are missing some of the tracks. The importer will not +attempt to display these matches. It does not ignore the fact that the album is +missing tracks, which would allow these matches to apply more easily. To do +that, you'll want to adjust the penalty for missing tracks. .. _required: @@ -1106,7 +1012,9 @@ required ~~~~~~~~ You can avoid matches that lack certain required information. Add the tags you -want to enforce to the ``required`` setting:: +want to enforce to the ``required`` setting: + +:: match: required: year label catalognum country @@ -1120,7 +1028,9 @@ ignored_media A list of media (i.e., formats) in metadata databases to ignore when matching music. You can use this to ignore all media that usually contain video instead -of audio, for example:: +of audio, for example: + +:: match: ignored_media: ['Data CD', 'DVD', 'DVD-Video', 'Blu-ray', 'HD-DVD', @@ -1128,11 +1038,10 @@ of audio, for example:: No formats are ignored by default. - .. _ignore_data_tracks: ignore_data_tracks -~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~ By default, audio files contained in data tracks within a release are included in the album's tracklist. If you want them to be included, set it ``no``. @@ -1155,27 +1064,31 @@ Default: ``yes``. Path Format Configuration ------------------------- -You can also configure the directory hierarchy beets uses to store music. -These settings appear under the ``paths:`` key. Each string is a template -string that can refer to metadata fields like ``$artist`` or ``$title``. The -filename extension is added automatically. At the moment, you can specify three -special paths: ``default`` for most releases, ``comp`` for "various artist" -releases with no dominant artist, and ``singleton`` for non-album tracks. The -defaults look like this:: +You can also configure the directory hierarchy beets uses to store music. These +settings appear under the ``paths:`` key. Each string is a template string that +can refer to metadata fields like ``$artist`` or ``$title``. The filename +extension is added automatically. At the moment, you can specify three special +paths: ``default`` for most releases, ``comp`` for "various artist" releases +with no dominant artist, and ``singleton`` for non-album tracks. The defaults +look like this: + +:: paths: default: $albumartist/$album%aunique{}/$track $title singleton: Non-Album/$artist/$title comp: Compilations/$album%aunique{}/$track $title -Note the use of ``$albumartist`` instead of ``$artist``; this ensures that albums -will be well-organized. For more about these format strings, see +Note the use of ``$albumartist`` instead of ``$artist``; this ensures that +albums will be well-organized. For more about these format strings, see :doc:`pathformat`. The ``aunique{}`` function ensures that identically-named albums are placed in different directories; see :ref:`aunique` for details. In addition to ``default``, ``comp``, and ``singleton``, you can condition path queries based on beets queries (see :doc:`/reference/query`). This means that a -config file like this:: +config file like this: + +:: paths: albumtype:soundtrack: Soundtracks/$album/$track $title @@ -1189,7 +1102,6 @@ fact, just shorthand for the explicit queries ``singleton:true`` and ``comp:true``. In contrast, ``default`` is special and has no query equivalent: the ``default`` format is only used if no queries match. - Configuration Location ---------------------- @@ -1211,35 +1123,36 @@ Command-Line Option ~~~~~~~~~~~~~~~~~~~ Alternatively, you can use the ``--config`` command-line option to indicate a -YAML file containing options that will then be merged with your existing -options (from ``BEETSDIR`` or the default locations). This is useful if you -want to keep your configuration mostly the same but modify a few options as a -batch. For example, you might have different strategies for importing files, -each with a different set of importer options. +YAML file containing options that will then be merged with your existing options +(from ``BEETSDIR`` or the default locations). This is useful if you want to keep +your configuration mostly the same but modify a few options as a batch. For +example, you might have different strategies for importing files, each with a +different set of importer options. Default Location ~~~~~~~~~~~~~~~~ -In the absence of a ``BEETSDIR`` variable, beets searches a few places for -your configuration, depending on the platform: +In the absence of a ``BEETSDIR`` variable, beets searches a few places for your +configuration, depending on the platform: - On Unix platforms, including OS X:``~/.config/beets`` and then ``$XDG_CONFIG_DIR/beets``, if the environment variable is set. - On OS X, we also search ``~/Library/Application Support/beets`` before the Unixy locations. -- On Windows: ``~\AppData\Roaming\beets``, and then ``%APPDATA%\beets``, if - the environment variable is set. +- On Windows: ``~\AppData\Roaming\beets``, and then ``%APPDATA%\beets``, if the + environment variable is set. Beets uses the first directory in your platform's list that contains ``config.yaml``. If no config file exists, the last path in the list is used. - .. _config-example: Example ------- -Here's an example file:: +Here's an example file: + +:: directory: /var/mp3 import: diff --git a/docs/reference/index.rst b/docs/reference/index.rst index 42600dc93..1f0436659 100644 --- a/docs/reference/index.rst +++ b/docs/reference/index.rst @@ -6,9 +6,9 @@ started with beets as a new user, though, you may want to read the :doc:`/guides/main` guide first. .. toctree:: - :maxdepth: 2 + :maxdepth: 2 - cli - config - pathformat - query + cli + config + pathformat + query diff --git a/docs/reference/pathformat.rst b/docs/reference/pathformat.rst index d80bdec34..10dd3ae05 100644 --- a/docs/reference/pathformat.rst +++ b/docs/reference/pathformat.rst @@ -1,30 +1,27 @@ Path Formats ============ -The ``paths:`` section of the config file (see :doc:`config`) lets -you specify the directory and file naming scheme for your music library. -Templates substitute symbols like ``$title`` (any field value prefixed by ``$``) -with the appropriate value from the track's metadata. Beets adds the filename -extension automatically. +The ``paths:`` section of the config file (see :doc:`config`) lets you specify +the directory and file naming scheme for your music library. Templates +substitute symbols like ``$title`` (any field value prefixed by ``$``) with the +appropriate value from the track's metadata. Beets adds the filename extension +automatically. -For example, consider this path format string: -``$albumartist/$album/$track $title`` +For example, consider this path format string: ``$albumartist/$album/$track +$title`` Here are some paths this format will generate: -* ``Yeah Yeah Yeahs/It's Blitz!/01 Zero.mp3`` - -* ``Spank Rock/YoYoYoYoYo/11 Competition.mp3`` - -* ``The Magnetic Fields/Realism/01 You Must Be Out of Your Mind.mp3`` +- ``Yeah Yeah Yeahs/It's Blitz!/01 Zero.mp3`` +- ``Spank Rock/YoYoYoYoYo/11 Competition.mp3`` +- ``The Magnetic Fields/Realism/01 You Must Be Out of Your Mind.mp3`` Because ``$`` is used to delineate a field reference, you can use ``$$`` to emit a dollars sign. As with `Python template strings`_, ``${title}`` is equivalent to ``$title``; you can use this if you need to separate a field name from the text that follows it. -.. _Python template strings: https://docs.python.org/library/string.html#template-strings - +.. _python template strings: https://docs.python.org/library/string.html#template-strings A Note About Artists -------------------- @@ -37,11 +34,10 @@ Continuing with the Stop Making Sense example, you'll end up with most of the tracks in a "Talking Heads" directory and one in a "Tom Tom Club" directory. You probably don't want that! So use ``$albumartist``. -.. _Stop Making Sense: - https://musicbrainz.org/release/798dcaab-0f1a-4f02-a9cb-61d5b0ddfd36.html - -As a convenience, however, beets allows ``$albumartist`` to fall back to the value for ``$artist`` and vice-versa if one tag is present but the other is not. +.. _stop making sense: https://musicbrainz.org/release/798dcaab-0f1a-4f02-a9cb-61d5b0ddfd36.html +As a convenience, however, beets allows ``$albumartist`` to fall back to the +value for ``$artist`` and vice-versa if one tag is present but the other is not. .. _template-functions: @@ -58,47 +54,46 @@ track's artists. These functions are built in to beets: -* ``%lower{text}``: Convert ``text`` to lowercase. -* ``%upper{text}``: Convert ``text`` to UPPERCASE. -* ``%capitalize{text}``: Make the first letter of ``text`` UPPERCASE and the rest lowercase. -* ``%title{text}``: Convert ``text`` to Title Case. -* ``%left{text,n}``: Return the first ``n`` characters of ``text``. -* ``%right{text,n}``: Return the last ``n`` characters of ``text``. -* ``%if{condition,text}`` or ``%if{condition,truetext,falsetext}``: If - ``condition`` is nonempty (or nonzero, if it's a number), then returns - the second argument. Otherwise, returns the third argument if specified (or +- ``%lower{text}``: Convert ``text`` to lowercase. +- ``%upper{text}``: Convert ``text`` to UPPERCASE. +- ``%capitalize{text}``: Make the first letter of ``text`` UPPERCASE and the + rest lowercase. +- ``%title{text}``: Convert ``text`` to Title Case. +- ``%left{text,n}``: Return the first ``n`` characters of ``text``. +- ``%right{text,n}``: Return the last ``n`` characters of ``text``. +- ``%if{condition,text}`` or ``%if{condition,truetext,falsetext}``: If + ``condition`` is nonempty (or nonzero, if it's a number), then returns the + second argument. Otherwise, returns the third argument if specified (or nothing if ``falsetext`` is left off). -* ``%asciify{text}``: Convert non-ASCII characters to their ASCII equivalents. +- ``%asciify{text}``: Convert non-ASCII characters to their ASCII equivalents. For example, "café" becomes "cafe". Uses the mapping provided by the - `unidecode module`_. See the :ref:`asciify-paths` configuration - option. -* ``%aunique{identifiers,disambiguators,brackets}``: Provides a unique string - to disambiguate similar albums in the database. See :ref:`aunique`, below. -* ``%sunique{identifiers,disambiguators,brackets}``: Similarly, a unique string + `unidecode module`_. See the :ref:`asciify-paths` configuration option. +- ``%aunique{identifiers,disambiguators,brackets}``: Provides a unique string to + disambiguate similar albums in the database. See :ref:`aunique`, below. +- ``%sunique{identifiers,disambiguators,brackets}``: Similarly, a unique string to disambiguate similar singletons in the database. See :ref:`sunique`, below. -* ``%time{date_time,format}``: Return the date and time in any format accepted - by `strftime`_. For example, to get the year some music was added to your +- ``%time{date_time,format}``: Return the date and time in any format accepted + by strftime_. For example, to get the year some music was added to your library, use ``%time{$added,%Y}``. -* ``%first{text}``: Returns the first item, separated by ``;`` (a semicolon - followed by a space). - You can use ``%first{text,count,skip}``, where ``count`` is the number of - items (default 1) and ``skip`` is number to skip (default 0). You can also use - ``%first{text,count,skip,sep,join}`` where ``sep`` is the separator, like - ``;`` or ``/`` and join is the text to concatenate the items. -* ``%ifdef{field}``, ``%ifdef{field,truetext}`` or +- ``%first{text}``: Returns the first item, separated by ``;`` (a semicolon + followed by a space). You can use ``%first{text,count,skip}``, where ``count`` + is the number of items (default 1) and ``skip`` is number to skip (default 0). + You can also use ``%first{text,count,skip,sep,join}`` where ``sep`` is the + separator, like ``;`` or ``/`` and join is the text to concatenate the items. +- ``%ifdef{field}``, ``%ifdef{field,truetext}`` or ``%ifdef{field,truetext,falsetext}``: Checks if an flexible attribute ``field`` is defined. If it exists, then return ``truetext`` or ``field`` (default). Otherwise, returns ``falsetext``. The ``field`` should be entered without ``$``. Note that this doesn't work with built-in :ref:`itemfields`, as they are always defined. -.. _unidecode module: https://pypi.org/project/Unidecode .. _strftime: https://docs.python.org/3/library/time.html#time.strftime +.. _unidecode module: https://pypi.org/project/Unidecode + Plugins can extend beets with more template functions (see :ref:`templ_plugins`). - .. _aunique: Album Disambiguation @@ -113,32 +108,32 @@ disk. The ``aunique`` function detects situations where two albums have some identical fields and emits text from additional fields to disambiguate the albums. For example, if you have both Crystal Castles albums in your library, ``%aunique{}`` -will expand to "[2008]" for one album and "[2010]" for the other. The -function detects that you have two albums with the same artist and title but -that they have different release years. +will expand to "[2008]" for one album and "[2010]" for the other. The function +detects that you have two albums with the same artist and title but that they +have different release years. -For full flexibility, the ``%aunique`` function takes three arguments. The -first two are whitespace-separated lists of album field names: a set of -*identifiers* and a set of *disambiguators*. The third argument is a pair of -characters used to surround the disambiguator. +For full flexibility, the ``%aunique`` function takes three arguments. The first +two are whitespace-separated lists of album field names: a set of *identifiers* +and a set of *disambiguators*. The third argument is a pair of characters used +to surround the disambiguator. Any group of albums with identical values for all the identifiers will be considered "duplicates". Then, the function tries each disambiguator field, -looking for one that distinguishes each of the duplicate albums from each -other. The first such field is used as the result for ``%aunique``. If no field +looking for one that distinguishes each of the duplicate albums from each other. +The first such field is used as the result for ``%aunique``. If no field suffices, an arbitrary number is used to distinguish the two albums. -The default identifiers are ``albumartist album`` and the default -disambiguators are ``albumtype year label catalognum albumdisambig -releasegroupdisambig``. So you can get reasonable disambiguation -behavior if you just use ``%aunique{}`` with no parameters in your -path forms (as in the default path formats), but you can customize the -disambiguation if, for example, you include the year by default in -path formats. +The default identifiers are ``albumartist album`` and the default disambiguators +are ``albumtype year label catalognum albumdisambig releasegroupdisambig``. So +you can get reasonable disambiguation behavior if you just use ``%aunique{}`` +with no parameters in your path forms (as in the default path formats), but you +can customize the disambiguation if, for example, you include the year by +default in path formats. The default characters used as brackets are ``[]``. To change this, provide a -third argument to the ``%aunique`` function consisting of two characters: the left -and right brackets. Or, to turn off bracketing entirely, leave argument blank. +third argument to the ``%aunique`` function consisting of two characters: the +left and right brackets. Or, to turn off bracketing entirely, leave argument +blank. One caveat: When you import an album that is named identically to one already in your library, the *first* album—the one already in your library— will not @@ -154,12 +149,12 @@ Singleton Disambiguation ------------------------ It is also possible to have singleton tracks with the same name and the same -artist. Beets provides the ``%sunique{}`` template to avoid giving these -tracks the same file path. +artist. Beets provides the ``%sunique{}`` template to avoid giving these tracks +the same file path. -It has the same arguments as the :ref:`%aunique <aunique>` template, but the default -values are different. The default identifiers are ``artist title`` and the -default disambiguators are ``year trackdisambig``. +It has the same arguments as the :ref:`%aunique <aunique>` template, but the +default values are different. The default identifiers are ``artist title`` and +the default disambiguators are ``year trackdisambig``. Syntax Details -------------- @@ -168,16 +163,16 @@ The characters ``$``, ``%``, ``{``, ``}``, and ``,`` are "special" in the path template syntax. This means that, for example, if you want a ``%`` character to appear in your paths, you'll need to be careful that you don't accidentally write a function call. To escape any of these characters (except ``{``, and -``,`` outside a function argument), prefix it with a ``$``. For example, -``$$`` becomes ``$``; ``$%`` becomes ``%``, etc. The only exceptions are: +``,`` outside a function argument), prefix it with a ``$``. For example, ``$$`` +becomes ``$``; ``$%`` becomes ``%``, etc. The only exceptions are: -* ``${``, which is ambiguous with the variable reference syntax (like +- ``${``, which is ambiguous with the variable reference syntax (like ``${title}``). To insert a ``{`` alone, it's always sufficient to just type - ``{``. -* commas are used as argument separators in function calls. Inside of a + ``{``. You do, however need to use ``$`` to escape a closing brace ``$}``. +- commas are used as argument separators in function calls. Inside of a function's argument, use ``$,`` to get a literal ``,`` character. Outside of - any function argument, escaping is not necessary: ``,`` by itself will - produce ``,`` in the output. + any function argument, escaping is not necessary: ``,`` by itself will produce + ``,`` in the output. If a value or function is undefined, the syntax is simply left unreplaced. For example, if you write ``$foo`` in a path template, this will yield ``$foo`` in @@ -191,7 +186,6 @@ your template. For example, the second parameter to ``%left`` must be an integer; if you write ``%left{foo,bar}``, this will be expanded to something like ``<ValueError: invalid literal for int()>``. - .. _itemfields: Available Values @@ -204,95 +198,97 @@ plugins can add new (or replace existing) template values (see Ordinary metadata: -* title -* artist -* artist_sort: The "sort name" of the track artist (e.g., "Beatles, The" or +- title +- artist +- artist_sort: The "sort name" of the track artist (e.g., "Beatles, The" or "White, Jack"). -* artist_credit: The track-specific `artist credit`_ name, which may be a +- artist_credit: The track-specific `artist credit`_ name, which may be a variation of the artist's "canonical" name. -* album -* albumartist: The artist for the entire album, which may be different from the +- album +- albumartist: The artist for the entire album, which may be different from the artists for the individual tracks. -* albumartist_sort -* albumartist_credit -* genre -* composer -* grouping -* year, month, day: The release date of the specific release. -* original_year, original_month, original_day: The release date of the original +- albumartist_sort +- albumartist_credit +- genre +- composer +- grouping +- year, month, day: The release date of the specific release. +- original_year, original_month, original_day: The release date of the original version of the album. -* track -* tracktotal -* disc -* disctotal -* lyrics -* comments -* bpm -* comp: Compilation flag. -* albumtype: The MusicBrainz album type; the MusicBrainz wiki has a `list of +- track +- tracktotal +- disc +- disctotal +- lyrics +- comments +- bpm +- comp: Compilation flag. +- albumtype: The MusicBrainz album type; the MusicBrainz wiki has a `list of type names`_. -* label -* asin -* catalognum -* script -* language -* country -* albumstatus -* media -* albumdisambig -* disctitle -* encoder +- label +- asin +- catalognum +- script +- language +- country +- albumstatus +- media +- albumdisambig +- disctitle +- encoder .. _artist credit: https://wiki.musicbrainz.org/Artist_Credit + .. _list of type names: https://musicbrainz.org/doc/Release_Group/Type Audio information: -* length (in seconds) -* bitrate (in kilobits per second, with units: e.g., "192kbps") -* bitrate_mode (e.g., "CBR", "VBR" or "ABR", only available for the MP3 format) -* encoder_info (e.g., "LAME 3.97.0", only available for some formats) -* encoder_settings (e.g., "-V2", only available for the MP3 format) -* format (e.g., "MP3" or "FLAC") -* channels -* bitdepth (only available for some formats) -* samplerate (in kilohertz, with units: e.g., "48kHz") +- length (in seconds) +- bitrate (in kilobits per second, with units: e.g., "192kbps") +- bitrate_mode (e.g., "CBR", "VBR" or "ABR", only available for the MP3 format) +- encoder_info (e.g., "LAME 3.97.0", only available for some formats) +- encoder_settings (e.g., "-V2", only available for the MP3 format) +- format (e.g., "MP3" or "FLAC") +- channels +- bitdepth (only available for some formats) +- samplerate (in kilohertz, with units: e.g., "48kHz") MusicBrainz and fingerprint information: -* mb_trackid -* mb_releasetrackid -* mb_albumid -* mb_artistid -* mb_albumartistid -* mb_releasegroupid -* acoustid_fingerprint -* acoustid_id +- mb_trackid +- mb_releasetrackid +- mb_albumid +- mb_artistid +- mb_albumartistid +- mb_releasegroupid +- acoustid_fingerprint +- acoustid_id Library metadata: -* mtime: The modification time of the audio file. -* added: The date and time that the music was added to your library. -* path: The item's filename. - +- mtime: The modification time of the audio file. +- added: The date and time that the music was added to your library. +- path: The item's filename. .. _templ_plugins: Template functions and values provided by plugins ------------------------------------------------- -Beets plugins can provide additional fields and functions to templates. See -the :doc:`/plugins/index` page for a full list of plugins. Some plugin-provided +Beets plugins can provide additional fields and functions to templates. See the +:doc:`/plugins/index` page for a full list of plugins. Some plugin-provided constructs include: -* ``$missing`` by :doc:`/plugins/missing`: The number of missing tracks per +- ``$missing`` by :doc:`/plugins/missing`: The number of missing tracks per album. -* ``%bucket{text}`` by :doc:`/plugins/bucket`: Substitute a string by the - range it belongs to. -* ``%the{text}`` by :doc:`/plugins/the`: Moves English articles to ends of +- ``$album_artist_no_feat`` by :doc:`/plugins/ftintitle`: The album artist + without any featured artists +- ``%bucket{text}`` by :doc:`/plugins/bucket`: Substitute a string by the range + it belongs to. +- ``%the{text}`` by :doc:`/plugins/the`: Moves English articles to ends of strings. The :doc:`/plugins/inline` lets you define template fields in your beets -configuration file using Python snippets. And for more advanced processing, -you can go all-in and write a dedicated plugin to register your own fields and -functions (see :ref:`writing-plugins`). +configuration file using Python snippets. And for more advanced processing, you +can go all-in and write a dedicated plugin to register your own fields and +functions (see :ref:`basic-plugin-setup`). diff --git a/docs/reference/query.rst b/docs/reference/query.rst index eaa2d6701..a8d2c4487 100644 --- a/docs/reference/query.rst +++ b/docs/reference/query.rst @@ -13,7 +13,9 @@ search engines. Keyword ------- -This command:: +This command: + +:: $ beet list love @@ -21,7 +23,9 @@ will show all tracks matching the query string ``love``. By default any unadorned word like this matches in a track's title, artist, album name, album artist, genre and comments. See below on how to search other fields. -For example, this is what I might see when I run the command above:: +For example, this is what I might see when I run the command above: + +:: Against Me! - Reinventing Axl Rose - I Still Love You Julie Air - Love 2 - Do the Joy @@ -36,7 +40,9 @@ Combining Keywords Multiple keywords are implicitly joined with a Boolean "and." That is, if a query has two keywords, it only matches tracks that contain *both* keywords. For -example, this command:: +example, this command: + +:: $ beet ls magnetic tomorrow @@ -45,8 +51,10 @@ my library. It *doesn't* match other songs by the Magnetic Fields, nor does it match "Tomorrowland" by Walter Meego---those songs only have *one* of the two keywords I specified. -Keywords can also be joined with a Boolean "or" using a comma. For example, -the command:: +Keywords can also be joined with a Boolean "or" using a comma. For example, the +command: + +:: $ beet ls magnetic tomorrow , beatles yesterday @@ -65,16 +73,22 @@ Just say ``field:value``, where ``field`` is the name of the thing you're trying to match (such as ``artist``, ``album``, or ``title``) and ``value`` is the keyword you're searching for. -For example, while this query:: +For example, while this query: + +:: $ beet list dream -matches a lot of songs in my library, this more-specific query:: +matches a lot of songs in my library, this more-specific query: + +:: $ beet list artist:dream only matches songs by the artist The-Dream. One query I especially appreciate is -one that matches albums by year:: +one that matches albums by year: + +:: $ beet list -a year:2012 @@ -85,11 +99,15 @@ For multi-valued tags (such as ``artists`` or ``albumartists``), a regular expression search must be used to search for a single value within the multi-valued tag. -Note that you can filter albums by querying tracks fields and vice versa:: +Note that you can filter albums by querying tracks fields and vice versa: + +:: $ beet list -a title:love -and vice versa:: +and vice versa: + +:: $ beet list art_path::love @@ -97,11 +115,15 @@ Phrases ------- You can query for strings with spaces in them by quoting or escaping them using -your shell's argument syntax. For example, this command:: +your shell's argument syntax. For example, this command: + +:: $ beet list the rebel -shows several tracks in my library, but these (equivalent) commands:: +shows several tracks in my library, but these (equivalent) commands: + +:: $ beet list "the rebel" $ beet list the\ rebel @@ -118,7 +140,9 @@ Exact Matches While ordinary queries perform *substring* matches, beets can also match whole strings by adding either ``=`` (case-sensitive) or ``=~`` (ignore case) after -the field name's colon and before the expression:: +the field name's colon and before the expression: + +:: $ beet list artist:air $ beet list artist:=~air @@ -130,7 +154,9 @@ case-insensitive match for the entire expression, but does not return anything by Air Supply. The third query, which requires a case-sensitive exact match, returns tracks by AIR only. -Exact matches may be performed on phrases as well:: +Exact matches may be performed on phrases as well: + +:: $ beet list artist:=~"dave matthews" $ beet list artist:="Dave Matthews" @@ -139,7 +165,9 @@ Both of these queries return tracks by Dave Matthews, but not by Dave Matthews Band. To search for exact matches across *all* fields, just prefix the expression with -a single ``=`` or ``=~``:: +a single ``=`` or ``=~``: + +:: $ beet list =~crash $ beet list ="American Football" @@ -151,58 +179,65 @@ Regular Expressions In addition to simple substring and exact matches, beets also supports regular expression matching for more advanced queries. To run a regex query, use an -additional ``:`` between the field name and the expression:: +additional ``:`` between the field name and the expression: + +:: $ beet list "artist::Ann(a|ie)" That query finds songs by Anna Calvi and Annie but not Annuals. Similarly, this -query prints the path to any file in my library that's missing a track title:: +query prints the path to any file in my library that's missing a track title: + +:: $ beet list -p title::^$ To search *all* fields using a regular expression, just prefix the expression -with a single ``:``, like so:: +with a single ``:``, like so: + +:: $ beet list ":Ho[pm]eless" Regular expressions are case-sensitive and build on `Python's built-in -implementation`_. See Python's documentation for specifics on regex syntax. +implementation <https://docs.python.org/library/re.html>`__. See Python's +documentation for specifics on regex syntax. Most command-line shells will try to interpret common characters in regular -expressions, such as ``()[]|``. To type those characters, you'll need to -escape them (e.g., with backslashes or quotation marks, depending on your -shell). - -.. _Python's built-in implementation: https://docs.python.org/library/re.html - +expressions, such as ``()[]|``. To type those characters, you'll need to escape +them (e.g., with backslashes or quotation marks, depending on your shell). .. _numericquery: Numeric Range Queries --------------------- -For numeric fields, such as year, bitrate, and track, you can query using one- -or two-sided intervals. That is, you can find music that falls within a -*range* of values. To use ranges, write a query that has two dots (``..``) at -the beginning, middle, or end of a string of numbers. Dots in the beginning -let you specify a maximum (e.g., ``..7``); dots at the end mean a minimum -(``4..``); dots in the middle mean a range (``4..7``). +For numeric fields, such as year, bitrate, and track, you can query using one-or +two-sided intervals. That is, you can find music that falls within a *range* of +values. To use ranges, write a query that has two dots (``..``) at the +beginning, middle, or end of a string of numbers. Dots in the beginning let you +specify a maximum (e.g., ``..7``); dots at the end mean a minimum (``4..``); +dots in the middle mean a range (``4..7``). -For example, this command finds all your albums that were released in the -'90s:: +For example, this command finds all your albums that were released in the '90s: + +:: $ beet list -a year:1990..1999 -and this command finds MP3 files with bitrates of 128k or lower:: +and this command finds MP3 files with bitrates of 128k or lower: + +:: $ beet list format:MP3 bitrate:..128000 -The ``length`` field also lets you use a "M:SS" format. For example, this -query finds tracks that are less than four and a half minutes in length:: +The ``length`` field also lets you use a "M:SS" format. For example, this query +finds tracks that are less than four and a half minutes in length: + +:: $ beet list length:..4:30 - .. _datequery: Date and Date Range Queries @@ -218,51 +253,66 @@ matches for the whole month. Date *intervals*, like the numeric intervals described above, are separated by two dots (``..``). You can specify a start, an end, or both. -Here is an example that finds all the albums added in 2008:: +Here is an example that finds all the albums added in 2008: + +:: $ beet ls -a 'added:2008' -Find all items added in the years 2008, 2009 and 2010:: +Find all items added in the years 2008, 2009 and 2010: + +:: $ beet ls 'added:2008..2010' -Find all items added before the year 2010:: +Find all items added before the year 2010: + +:: $ beet ls 'added:..2009' -Find all items added on or after 2008-12-01 but before 2009-10-12:: +Find all items added on or after 2008-12-01 but before 2009-10-12: + +:: $ beet ls 'added:2008-12..2009-10-11' -Find all items with a file modification time between 2008-12-01 and -2008-12-03:: +Find all items with a file modification time between 2008-12-01 and 2008-12-03: + +:: $ beet ls 'mtime:2008-12-01..2008-12-02' You can also add an optional time value to date queries, specifying hours, minutes, and seconds. -Times are separated from dates by a space, an uppercase 'T' or a lowercase -'t', for example: ``2008-12-01T23:59:59``. If you specify a time, then the -date must contain a year, month, and day. The minutes and seconds are -optional. +Times are separated from dates by a space, an uppercase 'T' or a lowercase 't', +for example: ``2008-12-01T23:59:59``. If you specify a time, then the date must +contain a year, month, and day. The minutes and seconds are optional. Here is an example that finds all items added on 2008-12-01 at or after 22:00 -but before 23:00:: +but before 23:00: + +:: $ beet ls 'added:2008-12-01T22' -To find all items added on or after 2008-12-01 at 22:45:: +To find all items added on or after 2008-12-01 at 22:45: + +:: $ beet ls 'added:2008-12-01T22:45..' -To find all items added on 2008-12-01, at or after 22:45:20 but before -22:45:41:: +To find all items added on 2008-12-01, at or after 22:45:20 but before 22:45:41: + +:: $ beet ls 'added:2008-12-01T22:45:20..2008-12-01T22:45:40' Here are example of the three ways to separate dates from times. All of these -queries do the same thing:: +queries do the same thing: + +:: $ beet ls 'added:2008-12-01T22:45:20' $ beet ls 'added:2008-12-01t22:45:20' @@ -278,12 +328,16 @@ and ``+4d`` means four days in the future. A relative date has three parts: weeks, months or years. (A "month" is always 30 days and a "year" is always 365 days.) -Here's an example that finds all the albums added since last week:: +Here's an example that finds all the albums added since last week: + +:: $ beet ls -a 'added:-1w..' -And here's an example that lists items added in a two-week period starting -four weeks ago:: +And here's an example that lists items added in a two-week period starting four +weeks ago: + +:: $ beet ls 'added:-6w..-4w' @@ -292,9 +346,11 @@ four weeks ago:: Query Term Negation ------------------- -Query terms can also be negated, acting like a Boolean "not," by prefixing -them with ``-`` or ``^``. This has the effect of returning all the items that -do **not** match the query term. For example, this command:: +Query terms can also be negated, acting like a Boolean "not," by prefixing them +with ``-`` or ``^``. This has the effect of returning all the items that do +**not** match the query term. For example, this command: + +:: $ beet list ^love @@ -302,7 +358,9 @@ matches all the songs in the library that do not have "love" in any of their fields. Negation can be combined with the rest of the query mechanisms, so you can -negate specific fields, regular expressions, etc. For example, this command:: +negate specific fields, regular expressions, etc. For example, this command: + +:: $ beet list -a artist:dylan ^year:1980..1989 "^album::the(y)?" @@ -311,7 +369,9 @@ released in the eighties and those that have "the" or "they" on the title. The syntax supports both ``^`` and ``-`` as synonyms because the latter indicates flags on the command line. To use a minus sign in a command-line -query, use a double dash ``--`` to separate the options from the query:: +query, use a double dash ``--`` to separate the options from the query: + +:: $ beet list -a -- artist:dylan -year:1980..1990 "-album::the(y)?" @@ -321,13 +381,17 @@ Path Queries ------------ Sometimes it's useful to find all the items in your library that are -(recursively) inside a certain directory. Use the ``path:`` field to do this:: +(recursively) inside a certain directory. Use the ``path:`` field to do this: + +:: $ beet list path:/my/music/directory In fact, beets automatically recognizes any query term containing a path separator (``/`` on POSIX systems) as a path query if that path exists, so this -command is equivalent as long as ``/my/music/directory`` exist:: +command is equivalent as long as ``/my/music/directory`` exist: + +:: $ beet list /my/music/directory @@ -343,14 +407,18 @@ filesystem. Sort Order ---------- -Queries can specify a sort order. Use the name of the `field` you want to sort -on, followed by a ``+`` or ``-`` sign to indicate ascending or descending -sort. For example, this command:: +Queries can specify a sort order. Use the name of the ``field`` you want to sort +on, followed by a ``+`` or ``-`` sign to indicate ascending or descending sort. +For example, this command: + +:: $ beet list -a year+ will list all albums in chronological order. You can also specify several sort -orders, which will be used in the same order as they appear in your query:: +orders, which will be used in the same order as they appear in your query: + +:: $ beet list -a genre+ year+ @@ -364,8 +432,8 @@ transparently (but fall back to the ordinary fields when those are empty). Lexicographic sorts are case insensitive by default, resulting in the following sort order: ``Bar foo Qux``. This behavior can be changed with the :ref:`sort_case_insensitive` configuration option. Case sensitive sort will -result in lower-case values being placed after upper-case values, e.g., -``Bar Qux foo``. +result in lower-case values being placed after upper-case values, e.g., ``Bar +Qux foo``. Note that when sorting by fields that are not present on all items (such as flexible fields, or those defined by plugins) in *ascending* order, the items diff --git a/docs/team.rst b/docs/team.rst index eae3ef532..b23b125ce 100644 --- a/docs/team.rst +++ b/docs/team.rst @@ -1,89 +1,90 @@ Team -#### +==== This is an introduction of beets' core-team members, collaborators and frequent -contributors. Refer to this list to find out who to ask about your -collaboration idea, discuss a usage-question, request a review of your open PR. -Beets is a huge project and not everyone involved, knows everything. We hope -this helps to point you in the right direction in the first place and should -give you an idea of what you can expect from these *knowledge owners*. +contributors. Refer to this list to find out who to ask about your collaboration +idea, discuss a usage-question, request a review of your open PR. Beets is a +huge project and not everyone involved, knows everything. We hope this helps to +point you in the right direction in the first place and should give you an idea +of what you can expect from these *knowledge owners*. @arsaboo -======== +-------- -* The master of the Spotify plugin -* Testing out new contributions -* beets as a music discovery tool +- The master of the Spotify plugin +- Testing out new contributions +- beets as a music discovery tool @bal-e -====== +------ -* Documentation -* The Fish plugin -* Type annotations +- Documentation +- The Fish plugin +- Type annotations @govynnus -========= +--------- -* The AURA plugin -* The AURA specification -* The web plugin -* The plugin ecosystem -* The library database API and its documentation +- The AURA plugin +- The AURA specification +- The web plugin +- The plugin ecosystem +- The library database API and its documentation @jackwilsdon -============ -* Broad knowledge around beets' configuration and plugins -* Assists in discussion forums frequently -* Knows internals of beets and puts new contributors into the right direction +------------ + +- Broad knowledge around beets' configuration and plugins +- Assists in discussion forums frequently +- Knows internals of beets and puts new contributors into the right direction @joj0 -===== +----- -* The Discogs plugin -* Other metadata source plugins -* Generalization of source plugin logic (The MetaDataSourcePlugin abstract +- The Discogs plugin +- Other metadata source plugins +- Generalization of source plugin logic (The MetaDataSourcePlugin abstract class) -* Good documentation throughout the project -* The smartplaylist plugin -* Things around m3u and other playlist formats +- Good documentation throughout the project +- The smartplaylist plugin +- Things around m3u and other playlist formats @RollingStar -============ +------------ -* Data visualization -* ListenBrainz / Last.fm -* Smart playlists -* Library reports -* MusicBrainz fields and searching -* Project organization and roadmap +- Data visualization +- ListenBrainz / Last.fm +- Smart playlists +- Library reports +- MusicBrainz fields and searching +- Project organization and roadmap @sampsyo -======== +-------- -* The founder -* Knows almost everything ;-) +- The founder +- Knows almost everything ;-) @serene-arc -=========== +----------- -* Good documentation throughout the project -* Experienced Python developer -* Experienced in test-driven-development -* Code quality -* Typing +- Good documentation throughout the project +- Experienced Python developer +- Experienced in test-driven-development +- Code quality +- Typing @wisp3rwind -=========== +----------- -* Mr. Tidy - Keeping the code in shape -* Focus on improving core things rather than implementing new features +- Mr. Tidy - Keeping the code in shape +- Focus on improving core things rather than implementing new features @ybnd -===== +----- -* The replaygain plugin -* Improving the general parallelism of plugins -* Experienced with web scrapers -* Experienced with Flask and JavaScript integration -* The web plugin +- The replaygain plugin +- Improving the general parallelism of plugins +- Experienced with web scrapers +- Experienced with Flask and JavaScript integration +- The web plugin diff --git a/extra/release.py b/extra/release.py index 1891f17f2..0c11415a9 100755 --- a/extra/release.py +++ b/extra/release.py @@ -6,18 +6,20 @@ from __future__ import annotations import re import subprocess +from collections.abc import Callable from contextlib import redirect_stdout from datetime import datetime, timezone from functools import partial from io import StringIO from pathlib import Path -from typing import Callable, NamedTuple +from typing import NamedTuple, TypeAlias import click import tomli from packaging.version import Version, parse from sphinx.ext import intersphinx -from typing_extensions import TypeAlias + +from docs.conf import rst_epilog BASE = Path(__file__).parent.parent.absolute() PYPROJECT = BASE / "pyproject.toml" @@ -104,15 +106,21 @@ def create_rst_replacements() -> list[Replacement]: plugins = "|".join( r.split("/")[-1] for r in refs if r.startswith("plugins/") ) + explicit_replacements = dict( + line.removeprefix(".. ").split(" replace:: ") + for line in filter(None, rst_epilog.splitlines()) + ) return [ - # Fix nested bullet points indent: use 2 spaces consistently - (r"(?<=\n) {3,4}(?=\*)", " "), - # Fix nested text indent: use 4 spaces consistently - (r"(?<=\n) {5,6}(?=[\w:`])", " "), - # Replace Sphinx :ref: and :doc: directives by documentation URLs + # Replace explicitly defined substitutions from rst_epilog + # |BeetsPlugin| -> :class:`beets.plugins.BeetsPlugin` + ( + r"\|\w[^ ]*\|", + lambda m: explicit_replacements.get(m[0], m[0]), + ), + # Replace Sphinx directives by documentation URLs, e.g., # :ref:`/plugins/autobpm` -> [AutoBPM Plugin](DOCS/plugins/autobpm.html) ( - r":(?:ref|doc):`+(?:([^`<]+)<)?/?([\w./_-]+)>?`+", + r":(?:ref|doc|class|conf):`+(?:([^`<]+)<)?/?([\w.:/_-]+)>?`+", lambda m: make_ref_link(m[2], m[1]), ), # Convert command references to documentation URLs @@ -124,9 +132,6 @@ def create_rst_replacements() -> list[Replacement]: # Convert plugin references to documentation URLs # `fetchart` plugin -> [fetchart](DOCS/plugins/fetchart.html) (rf"`+({plugins})`+", lambda m: make_ref_link(f"plugins/{m[1]}")), - # Add additional backticks around existing backticked text to ensure it - # is rendered as inline code in Markdown - (r"(?<=[\s])(`[^`]+`)(?!_)", r"`\1`"), # Convert bug references to GitHub issue links (r":bug:`(\d+)`", r":bug: (#\1)"), # Convert user references to GitHub @mentions @@ -135,13 +140,12 @@ def create_rst_replacements() -> list[Replacement]: MD_REPLACEMENTS: list[Replacement] = [ - (r"<span[^>]+>([^<]+)</span>", r"_\1"), # remove a couple of wild span refs (r"^(\w[^\n]{,80}):(?=\n\n[^ ])", r"### \1"), # format section headers (r"^(\w[^\n]{81,}):(?=\n\n[^ ])", r"**\1**"), # and bolden too long ones (r"### [^\n]+\n+(?=### )", ""), # remove empty sections ] order_bullet_points = partial( - re.compile("(\n- .*?(?=\n(?! *- )|$))", flags=re.DOTALL).sub, + re.compile(r"(\n- .*?(?=\n(?! *(-|\d\.) )|$))", flags=re.DOTALL).sub, lambda m: "\n- ".join(sorted(m.group().split("\n- "))), ) @@ -170,7 +174,7 @@ For packagers: Other changes: {new_header} -{'-' * len(new_header)} +{"-" * len(new_header)} """, text, ) diff --git a/poetry.lock b/poetry.lock index 8fa603a13..5a0832399 100644 --- a/poetry.lock +++ b/poetry.lock @@ -20,36 +20,70 @@ tests = ["hypothesis", "pytest"] [[package]] name = "alabaster" -version = "0.7.16" +version = "1.0.0" description = "A light, configurable Sphinx theme" -optional = true -python-versions = ">=3.9" +optional = false +python-versions = ">=3.10" files = [ - {file = "alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92"}, - {file = "alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65"}, + {file = "alabaster-1.0.0-py3-none-any.whl", hash = "sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b"}, + {file = "alabaster-1.0.0.tar.gz", hash = "sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e"}, ] [[package]] name = "anyio" -version = "4.6.2.post1" -description = "High level compatibility layer for multiple asynchronous event loop implementations" +version = "4.11.0" +description = "High-level concurrency and networking framework on top of asyncio or Trio" optional = false python-versions = ">=3.9" files = [ - {file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"}, - {file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"}, + {file = "anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc"}, + {file = "anyio-4.11.0.tar.gz", hash = "sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4"}, ] [package.dependencies] exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} idna = ">=2.8" sniffio = ">=1.1" -typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} +typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] -doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] -trio = ["trio (>=0.26.1)"] +trio = ["trio (>=0.31.0)"] + +[[package]] +name = "apeye" +version = "1.4.1" +description = "Handy tools for working with URLs and APIs." +optional = true +python-versions = ">=3.6.1" +files = [ + {file = "apeye-1.4.1-py3-none-any.whl", hash = "sha256:44e58a9104ec189bf42e76b3a7fe91e2b2879d96d48e9a77e5e32ff699c9204e"}, + {file = "apeye-1.4.1.tar.gz", hash = "sha256:14ea542fad689e3bfdbda2189a354a4908e90aee4bf84c15ab75d68453d76a36"}, +] + +[package.dependencies] +apeye-core = ">=1.0.0b2" +domdf-python-tools = ">=2.6.0" +platformdirs = ">=2.3.0" +requests = ">=2.24.0" + +[package.extras] +all = ["cachecontrol[filecache] (>=0.12.6)", "lockfile (>=0.12.2)"] +limiter = ["cachecontrol[filecache] (>=0.12.6)", "lockfile (>=0.12.2)"] + +[[package]] +name = "apeye-core" +version = "1.1.5" +description = "Core (offline) functionality for the apeye library." +optional = true +python-versions = ">=3.6.1" +files = [ + {file = "apeye_core-1.1.5-py3-none-any.whl", hash = "sha256:dc27a93f8c9e246b3b238c5ea51edf6115ab2618ef029b9f2d9a190ec8228fbf"}, + {file = "apeye_core-1.1.5.tar.gz", hash = "sha256:5de72ed3d00cc9b20fea55e54b7ab8f5ef8500eb33a5368bc162a5585e238a55"}, +] + +[package.dependencies] +domdf-python-tools = ">=2.6.0" +idna = ">=2.5" [[package]] name = "appdirs" @@ -63,46 +97,125 @@ files = [ ] [[package]] -name = "audioread" -version = "3.0.1" -description = "Multi-library, cross-platform audio decoding." +name = "audioop-lts" +version = "0.2.2" +description = "LTS Port of Python audioop" optional = true -python-versions = ">=3.6" +python-versions = ">=3.13" files = [ - {file = "audioread-3.0.1-py3-none-any.whl", hash = "sha256:4cdce70b8adc0da0a3c9e0d85fb10b3ace30fbdf8d1670fd443929b61d117c33"}, - {file = "audioread-3.0.1.tar.gz", hash = "sha256:ac5460a5498c48bdf2e8e767402583a4dcd13f4414d286f42ce4379e8b35066d"}, + {file = "audioop_lts-0.2.2-cp313-abi3-macosx_10_13_universal2.whl", hash = "sha256:fd3d4602dc64914d462924a08c1a9816435a2155d74f325853c1f1ac3b2d9800"}, + {file = "audioop_lts-0.2.2-cp313-abi3-macosx_10_13_x86_64.whl", hash = "sha256:550c114a8df0aafe9a05442a1162dfc8fec37e9af1d625ae6060fed6e756f303"}, + {file = "audioop_lts-0.2.2-cp313-abi3-macosx_11_0_arm64.whl", hash = "sha256:9a13dc409f2564de15dd68be65b462ba0dde01b19663720c68c1140c782d1d75"}, + {file = "audioop_lts-0.2.2-cp313-abi3-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:51c916108c56aa6e426ce611946f901badac950ee2ddaf302b7ed35d9958970d"}, + {file = "audioop_lts-0.2.2-cp313-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:47eba38322370347b1c47024defbd36374a211e8dd5b0dcbce7b34fdb6f8847b"}, + {file = "audioop_lts-0.2.2-cp313-abi3-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ba7c3a7e5f23e215cb271516197030c32aef2e754252c4c70a50aaff7031a2c8"}, + {file = "audioop_lts-0.2.2-cp313-abi3-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:def246fe9e180626731b26e89816e79aae2276f825420a07b4a647abaa84becc"}, + {file = "audioop_lts-0.2.2-cp313-abi3-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e160bf9df356d841bb6c180eeeea1834085464626dc1b68fa4e1d59070affdc3"}, + {file = "audioop_lts-0.2.2-cp313-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:4b4cd51a57b698b2d06cb9993b7ac8dfe89a3b2878e96bc7948e9f19ff51dba6"}, + {file = "audioop_lts-0.2.2-cp313-abi3-musllinux_1_2_ppc64le.whl", hash = "sha256:4a53aa7c16a60a6857e6b0b165261436396ef7293f8b5c9c828a3a203147ed4a"}, + {file = "audioop_lts-0.2.2-cp313-abi3-musllinux_1_2_riscv64.whl", hash = "sha256:3fc38008969796f0f689f1453722a0f463da1b8a6fbee11987830bfbb664f623"}, + {file = "audioop_lts-0.2.2-cp313-abi3-musllinux_1_2_s390x.whl", hash = "sha256:15ab25dd3e620790f40e9ead897f91e79c0d3ce65fe193c8ed6c26cffdd24be7"}, + {file = "audioop_lts-0.2.2-cp313-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:03f061a1915538fd96272bac9551841859dbb2e3bf73ebe4a23ef043766f5449"}, + {file = "audioop_lts-0.2.2-cp313-abi3-win32.whl", hash = "sha256:3bcddaaf6cc5935a300a8387c99f7a7fbbe212a11568ec6cf6e4bc458c048636"}, + {file = "audioop_lts-0.2.2-cp313-abi3-win_amd64.whl", hash = "sha256:a2c2a947fae7d1062ef08c4e369e0ba2086049a5e598fda41122535557012e9e"}, + {file = "audioop_lts-0.2.2-cp313-abi3-win_arm64.whl", hash = "sha256:5f93a5db13927a37d2d09637ccca4b2b6b48c19cd9eda7b17a2e9f77edee6a6f"}, + {file = "audioop_lts-0.2.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:73f80bf4cd5d2ca7814da30a120de1f9408ee0619cc75da87d0641273d202a09"}, + {file = "audioop_lts-0.2.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:106753a83a25ee4d6f473f2be6b0966fc1c9af7e0017192f5531a3e7463dce58"}, + {file = "audioop_lts-0.2.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fbdd522624141e40948ab3e8cdae6e04c748d78710e9f0f8d4dae2750831de19"}, + {file = "audioop_lts-0.2.2-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:143fad0311e8209ece30a8dbddab3b65ab419cbe8c0dde6e8828da25999be911"}, + {file = "audioop_lts-0.2.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dfbbc74ec68a0fd08cfec1f4b5e8cca3d3cd7de5501b01c4b5d209995033cde9"}, + {file = "audioop_lts-0.2.2-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cfcac6aa6f42397471e4943e0feb2244549db5c5d01efcd02725b96af417f3fe"}, + {file = "audioop_lts-0.2.2-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:752d76472d9804ac60f0078c79cdae8b956f293177acd2316cd1e15149aee132"}, + {file = "audioop_lts-0.2.2-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:83c381767e2cc10e93e40281a04852facc4cd9334550e0f392f72d1c0a9c5753"}, + {file = "audioop_lts-0.2.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c0022283e9556e0f3643b7c3c03f05063ca72b3063291834cca43234f20c60bb"}, + {file = "audioop_lts-0.2.2-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:a2d4f1513d63c795e82948e1305f31a6d530626e5f9f2605408b300ae6095093"}, + {file = "audioop_lts-0.2.2-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:c9c8e68d8b4a56fda8c025e538e639f8c5953f5073886b596c93ec9b620055e7"}, + {file = "audioop_lts-0.2.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:96f19de485a2925314f5020e85911fb447ff5fbef56e8c7c6927851b95533a1c"}, + {file = "audioop_lts-0.2.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e541c3ef484852ef36545f66209444c48b28661e864ccadb29daddb6a4b8e5f5"}, + {file = "audioop_lts-0.2.2-cp313-cp313t-win32.whl", hash = "sha256:d5e73fa573e273e4f2e5ff96f9043858a5e9311e94ffefd88a3186a910c70917"}, + {file = "audioop_lts-0.2.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9191d68659eda01e448188f60364c7763a7ca6653ed3f87ebb165822153a8547"}, + {file = "audioop_lts-0.2.2-cp313-cp313t-win_arm64.whl", hash = "sha256:c174e322bb5783c099aaf87faeb240c8d210686b04bd61dfd05a8e5a83d88969"}, + {file = "audioop_lts-0.2.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:f9ee9b52f5f857fbaf9d605a360884f034c92c1c23021fb90b2e39b8e64bede6"}, + {file = "audioop_lts-0.2.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:49ee1a41738a23e98d98b937a0638357a2477bc99e61b0f768a8f654f45d9b7a"}, + {file = "audioop_lts-0.2.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5b00be98ccd0fc123dcfad31d50030d25fcf31488cde9e61692029cd7394733b"}, + {file = "audioop_lts-0.2.2-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a6d2e0f9f7a69403e388894d4ca5ada5c47230716a03f2847cfc7bd1ecb589d6"}, + {file = "audioop_lts-0.2.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f9b0b8a03ef474f56d1a842af1a2e01398b8f7654009823c6d9e0ecff4d5cfbf"}, + {file = "audioop_lts-0.2.2-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2b267b70747d82125f1a021506565bdc5609a2b24bcb4773c16d79d2bb260bbd"}, + {file = "audioop_lts-0.2.2-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0337d658f9b81f4cd0fdb1f47635070cc084871a3d4646d9de74fdf4e7c3d24a"}, + {file = "audioop_lts-0.2.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:167d3b62586faef8b6b2275c3218796b12621a60e43f7e9d5845d627b9c9b80e"}, + {file = "audioop_lts-0.2.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0d9385e96f9f6da847f4d571ce3cb15b5091140edf3db97276872647ce37efd7"}, + {file = "audioop_lts-0.2.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:48159d96962674eccdca9a3df280e864e8ac75e40a577cc97c5c42667ffabfc5"}, + {file = "audioop_lts-0.2.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:8fefe5868cd082db1186f2837d64cfbfa78b548ea0d0543e9b28935ccce81ce9"}, + {file = "audioop_lts-0.2.2-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:58cf54380c3884fb49fdd37dfb7a772632b6701d28edd3e2904743c5e1773602"}, + {file = "audioop_lts-0.2.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:088327f00488cdeed296edd9215ca159f3a5a5034741465789cad403fcf4bec0"}, + {file = "audioop_lts-0.2.2-cp314-cp314t-win32.whl", hash = "sha256:068aa17a38b4e0e7de771c62c60bbca2455924b67a8814f3b0dee92b5820c0b3"}, + {file = "audioop_lts-0.2.2-cp314-cp314t-win_amd64.whl", hash = "sha256:a5bf613e96f49712073de86f20dbdd4014ca18efd4d34ed18c75bd808337851b"}, + {file = "audioop_lts-0.2.2-cp314-cp314t-win_arm64.whl", hash = "sha256:b492c3b040153e68b9fdaff5913305aaaba5bb433d8a7f73d5cf6a64ed3cc1dd"}, + {file = "audioop_lts-0.2.2.tar.gz", hash = "sha256:64d0c62d88e67b98a1a5e71987b7aa7b5bcffc7dcee65b635823dbdd0a8dbbd0"}, ] +[[package]] +name = "audioread" +version = "3.1.0" +description = "Multi-library, cross-platform audio decoding." +optional = true +python-versions = ">=3.9" +files = [ + {file = "audioread-3.1.0-py3-none-any.whl", hash = "sha256:b30d1df6c5d3de5dcef0fb0e256f6ea17bdcf5f979408df0297d8a408e2971b4"}, + {file = "audioread-3.1.0.tar.gz", hash = "sha256:1c4ab2f2972764c896a8ac61ac53e261c8d29f0c6ccd652f84e18f08a4cab190"}, +] + +[package.dependencies] +standard-aifc = {version = "*", markers = "python_version >= \"3.13\""} +standard-sunau = {version = "*", markers = "python_version >= \"3.13\""} + [package.extras] -test = ["tox"] +gi = ["pygobject (>=3.54.2,<4.0.0)"] +mad = ["pymad[mad] (>=0.11.3,<0.12.0)"] +test = ["pytest (>=8.4.2)", "pytest-cov (>=7.0.0)"] + +[[package]] +name = "autodocsumm" +version = "0.2.14" +description = "Extended sphinx autodoc including automatic autosummaries" +optional = true +python-versions = ">=3.7" +files = [ + {file = "autodocsumm-0.2.14-py3-none-any.whl", hash = "sha256:3bad8717fc5190802c60392a7ab04b9f3c97aa9efa8b3780b3d81d615bfe5dc0"}, + {file = "autodocsumm-0.2.14.tar.gz", hash = "sha256:2839a9d4facc3c4eccd306c08695540911042b46eeafcdc3203e6d0bab40bc77"}, +] + +[package.dependencies] +Sphinx = ">=4.0,<9.0" [[package]] name = "babel" -version = "2.16.0" +version = "2.17.0" description = "Internationalization utilities" -optional = true +optional = false python-versions = ">=3.8" files = [ - {file = "babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b"}, - {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"}, + {file = "babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2"}, + {file = "babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d"}, ] [package.extras] -dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] +dev = ["backports.zoneinfo", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest (>=6.0)", "pytest-cov", "pytz", "setuptools", "tzdata"] [[package]] name = "beautifulsoup4" -version = "4.12.3" +version = "4.14.2" description = "Screen-scraping library" optional = false -python-versions = ">=3.6.0" +python-versions = ">=3.7.0" files = [ - {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"}, - {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"}, + {file = "beautifulsoup4-4.14.2-py3-none-any.whl", hash = "sha256:5ef6fa3a8cbece8488d66985560f97ed091e22bbc4e9c2338508a9d5de6d4515"}, + {file = "beautifulsoup4-4.14.2.tar.gz", hash = "sha256:2a98ab9f944a11acee9cc848508ec28d9228abfd522ef0fad6a02a72e0ded69e"}, ] [package.dependencies] soupsieve = ">1.2" +typing-extensions = ">=4.0.0" [package.extras] cchardet = ["cchardet"] @@ -111,6 +224,53 @@ charset-normalizer = ["charset-normalizer"] html5lib = ["html5lib"] lxml = ["lxml"] +[[package]] +name = "black" +version = "25.9.0" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.9" +files = [ + {file = "black-25.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ce41ed2614b706fd55fd0b4a6909d06b5bab344ffbfadc6ef34ae50adba3d4f7"}, + {file = "black-25.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2ab0ce111ef026790e9b13bd216fa7bc48edd934ffc4cbf78808b235793cbc92"}, + {file = "black-25.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f96b6726d690c96c60ba682955199f8c39abc1ae0c3a494a9c62c0184049a713"}, + {file = "black-25.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:d119957b37cc641596063cd7db2656c5be3752ac17877017b2ffcdb9dfc4d2b1"}, + {file = "black-25.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:456386fe87bad41b806d53c062e2974615825c7a52159cde7ccaeb0695fa28fa"}, + {file = "black-25.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a16b14a44c1af60a210d8da28e108e13e75a284bf21a9afa6b4571f96ab8bb9d"}, + {file = "black-25.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aaf319612536d502fdd0e88ce52d8f1352b2c0a955cc2798f79eeca9d3af0608"}, + {file = "black-25.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:c0372a93e16b3954208417bfe448e09b0de5cc721d521866cd9e0acac3c04a1f"}, + {file = "black-25.9.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1b9dc70c21ef8b43248f1d86aedd2aaf75ae110b958a7909ad8463c4aa0880b0"}, + {file = "black-25.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8e46eecf65a095fa62e53245ae2795c90bdecabd53b50c448d0a8bcd0d2e74c4"}, + {file = "black-25.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9101ee58ddc2442199a25cb648d46ba22cd580b00ca4b44234a324e3ec7a0f7e"}, + {file = "black-25.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:77e7060a00c5ec4b3367c55f39cf9b06e68965a4f2e61cecacd6d0d9b7ec945a"}, + {file = "black-25.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0172a012f725b792c358d57fe7b6b6e8e67375dd157f64fa7a3097b3ed3e2175"}, + {file = "black-25.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3bec74ee60f8dfef564b573a96b8930f7b6a538e846123d5ad77ba14a8d7a64f"}, + {file = "black-25.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b756fc75871cb1bcac5499552d771822fd9db5a2bb8db2a7247936ca48f39831"}, + {file = "black-25.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:846d58e3ce7879ec1ffe816bb9df6d006cd9590515ed5d17db14e17666b2b357"}, + {file = "black-25.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ef69351df3c84485a8beb6f7b8f9721e2009e20ef80a8d619e2d1788b7816d47"}, + {file = "black-25.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e3c1f4cd5e93842774d9ee4ef6cd8d17790e65f44f7cdbaab5f2cf8ccf22a823"}, + {file = "black-25.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:154b06d618233fe468236ba1f0e40823d4eb08b26f5e9261526fde34916b9140"}, + {file = "black-25.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:e593466de7b998374ea2585a471ba90553283fb9beefcfa430d84a2651ed5933"}, + {file = "black-25.9.0-py3-none-any.whl", hash = "sha256:474b34c1342cdc157d307b56c4c65bce916480c4a8f6551fdc6bf9b486a7c4ae"}, + {file = "black-25.9.0.tar.gz", hash = "sha256:0474bca9a0dd1b51791fcc507a4e02078a1c63f6d4e4ae5544b9848c7adfb619"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" +pytokens = ">=0.1.10" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.10)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + [[package]] name = "blinker" version = "1.9.0" @@ -295,219 +455,265 @@ files = [ [package.dependencies] cffi = ">=1.0.0" +[[package]] +name = "cachecontrol" +version = "0.14.4" +description = "httplib2 caching for requests" +optional = true +python-versions = ">=3.10" +files = [ + {file = "cachecontrol-0.14.4-py3-none-any.whl", hash = "sha256:b7ac014ff72ee199b5f8af1de29d60239954f223e948196fa3d84adaffc71d2b"}, + {file = "cachecontrol-0.14.4.tar.gz", hash = "sha256:e6220afafa4c22a47dd0badb319f84475d79108100d04e26e8542ef7d3ab05a1"}, +] + +[package.dependencies] +filelock = {version = ">=3.8.0", optional = true, markers = "extra == \"filecache\""} +msgpack = ">=0.5.2,<2.0.0" +requests = ">=2.16.0" + +[package.extras] +dev = ["cachecontrol[filecache,redis]", "cheroot (>=11.1.2)", "cherrypy", "codespell", "furo", "mypy", "pytest", "pytest-cov", "ruff", "sphinx", "sphinx-copybutton", "types-redis", "types-requests"] +filecache = ["filelock (>=3.8.0)"] +redis = ["redis (>=2.10.5)"] + [[package]] name = "certifi" -version = "2024.8.30" +version = "2025.10.5" description = "Python package for providing Mozilla's CA Bundle." optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, - {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, + {file = "certifi-2025.10.5-py3-none-any.whl", hash = "sha256:0f212c2744a9bb6de0c56639a6f68afe01ecd92d91f14ae897c4fe7bbeeef0de"}, + {file = "certifi-2025.10.5.tar.gz", hash = "sha256:47c09d31ccf2acf0be3f701ea53595ee7e0b8fa08801c6624be771df09ae7b43"}, ] [[package]] name = "cffi" -version = "1.17.1" +version = "2.0.0" description = "Foreign Function Interface for Python calling C code." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, - {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, - {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, - {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, - {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, - {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, - {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, - {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, - {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, - {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, - {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, - {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, - {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, - {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, - {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, - {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, - {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, - {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, - {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, - {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, - {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, - {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, - {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, - {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, - {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, - {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, - {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, - {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, + {file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"}, + {file = "cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb"}, + {file = "cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a"}, + {file = "cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743"}, + {file = "cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5"}, + {file = "cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5"}, + {file = "cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187"}, + {file = "cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18"}, + {file = "cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5"}, + {file = "cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b"}, + {file = "cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27"}, + {file = "cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75"}, + {file = "cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1"}, + {file = "cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f"}, + {file = "cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25"}, + {file = "cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4"}, + {file = "cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e"}, + {file = "cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6"}, + {file = "cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322"}, + {file = "cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a"}, + {file = "cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9"}, + {file = "cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529"}, ] [package.dependencies] -pycparser = "*" +pycparser = {version = "*", markers = "implementation_name != \"PyPy\""} [[package]] name = "charset-normalizer" -version = "3.4.0" +version = "3.4.4" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.7" files = [ - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, - {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, - {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-win32.whl", hash = "sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-win32.whl", hash = "sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ce8a0633f41a967713a59c4139d29110c07e826d131a316b50ce11b1d79b4f84"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eaabd426fe94daf8fd157c32e571c85cb12e66692f15516a83a03264b08d06c3"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c4ef880e27901b6cc782f1b95f82da9313c0eb95c3af699103088fa0ac3ce9ac"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aaba3b0819274cc41757a1da876f810a3e4d7b6eb25699253a4effef9e8e4af"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:778d2e08eda00f4256d7f672ca9fef386071c9202f5e4607920b86d7803387f2"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f155a433c2ec037d4e8df17d18922c3a0d9b3232a396690f17175d2946f0218d"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a8bf8d0f749c5757af2142fe7903a9df1d2e8aa3841559b2bad34b08d0e2bcf3"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:194f08cbb32dc406d6e1aea671a68be0823673db2832b38405deba2fb0d88f63"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:6aee717dcfead04c6eb1ce3bd29ac1e22663cdea57f943c87d1eab9a025438d7"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:cd4b7ca9984e5e7985c12bc60a6f173f3c958eae74f3ef6624bb6b26e2abbae4"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_riscv64.whl", hash = "sha256:b7cf1017d601aa35e6bb650b6ad28652c9cd78ee6caff19f3c28d03e1c80acbf"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e912091979546adf63357d7e2ccff9b44f026c075aeaf25a52d0e95ad2281074"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5cb4d72eea50c8868f5288b7f7f33ed276118325c1dfd3957089f6b519e1382a"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-win32.whl", hash = "sha256:837c2ce8c5a65a2035be9b3569c684358dfbf109fd3b6969630a87535495ceaa"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:44c2a8734b333e0578090c4cd6b16f275e07aa6614ca8715e6c038e865e70576"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a9768c477b9d7bd54bc0c86dbaebdec6f03306675526c9927c0e8a04e8f94af9"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bee1e43c28aa63cb16e5c14e582580546b08e535299b8b6158a7c9c768a1f3d"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fd44c878ea55ba351104cb93cc85e74916eb8fa440ca7903e57575e97394f608"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0f04b14ffe5fdc8c4933862d8306109a2c51e0704acfa35d51598eb45a1e89fc"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:cd09d08005f958f370f539f186d10aec3377d55b9eeb0d796025d4886119d76e"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4fe7859a4e3e8457458e2ff592f15ccb02f3da787fcd31e0183879c3ad4692a1"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fa09f53c465e532f4d3db095e0c55b615f010ad81803d383195b6b5ca6cbf5f3"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7fa17817dc5625de8a027cb8b26d9fefa3ea28c8253929b8d6649e705d2835b6"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:5947809c8a2417be3267efc979c47d76a079758166f7d43ef5ae8e9f92751f88"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4902828217069c3c5c71094537a8e623f5d097858ac6ca8252f7b4d10b7560f1"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:7c308f7e26e4363d79df40ca5b2be1c6ba9f02bdbccfed5abddb7859a6ce72cf"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2c9d3c380143a1fedbff95a312aa798578371eb29da42106a29019368a475318"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb01158d8b88ee68f15949894ccc6712278243d95f344770fa7593fa2d94410c"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-win32.whl", hash = "sha256:2677acec1a2f8ef614c6888b5b4ae4060cc184174a938ed4e8ef690e15d3e505"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:f8e160feb2aed042cd657a72acc0b481212ed28b1b9a95c0cee1621b524e1966"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-win_arm64.whl", hash = "sha256:b5d84d37db046c5ca74ee7bb47dd6cbc13f80665fdde3e8040bdd3fb015ecb50"}, + {file = "charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f"}, + {file = "charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a"}, ] [[package]] name = "click" -version = "8.1.7" +version = "8.3.0" description = "Composable command line interface toolkit" optional = false -python-versions = ">=3.7" +python-versions = ">=3.10" files = [ - {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, - {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, + {file = "click-8.3.0-py3-none-any.whl", hash = "sha256:9b9f285302c6e3064f4330c05f05b81945b2a39544279343e6e7c5f27a9baddc"}, + {file = "click-8.3.0.tar.gz", hash = "sha256:e7b8232224eba16f4ebe410c25ced9f7875cb5f3263ffc93cc3e8da705e229c4"}, ] [package.dependencies] @@ -541,13 +747,13 @@ files = [ [[package]] name = "confuse" -version = "2.0.1" -description = "Painless YAML configuration." +version = "2.1.0" +description = "Painless YAML config files" optional = false -python-versions = ">=3.6" +python-versions = ">=3.9" files = [ - {file = "confuse-2.0.1-py3-none-any.whl", hash = "sha256:9b9e5bbc70e2cb9b318bcab14d917ec88e21bf1b724365e3815eb16e37aabd2a"}, - {file = "confuse-2.0.1.tar.gz", hash = "sha256:7379a2ad49aaa862b79600cc070260c1b7974d349f4fa5e01f9afa6c4dd0611f"}, + {file = "confuse-2.1.0-py3-none-any.whl", hash = "sha256:502be1299aa6bf7c48f7719f56795720c073fb28550c0c7a37394366c9d30316"}, + {file = "confuse-2.1.0.tar.gz", hash = "sha256:abb9674a99c7a6efaef84e2fc84403ecd2dd304503073ff76ea18ed4176e218d"}, ] [package.dependencies] @@ -555,73 +761,103 @@ pyyaml = "*" [[package]] name = "coverage" -version = "7.6.8" +version = "7.11.0" description = "Code coverage measurement for Python" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "coverage-7.6.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b39e6011cd06822eb964d038d5dff5da5d98652b81f5ecd439277b32361a3a50"}, - {file = "coverage-7.6.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:63c19702db10ad79151a059d2d6336fe0c470f2e18d0d4d1a57f7f9713875dcf"}, - {file = "coverage-7.6.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3985b9be361d8fb6b2d1adc9924d01dec575a1d7453a14cccd73225cb79243ee"}, - {file = "coverage-7.6.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:644ec81edec0f4ad17d51c838a7d01e42811054543b76d4ba2c5d6af741ce2a6"}, - {file = "coverage-7.6.8-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f188a2402f8359cf0c4b1fe89eea40dc13b52e7b4fd4812450da9fcd210181d"}, - {file = "coverage-7.6.8-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e19122296822deafce89a0c5e8685704c067ae65d45e79718c92df7b3ec3d331"}, - {file = "coverage-7.6.8-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:13618bed0c38acc418896005732e565b317aa9e98d855a0e9f211a7ffc2d6638"}, - {file = "coverage-7.6.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:193e3bffca48ad74b8c764fb4492dd875038a2f9925530cb094db92bb5e47bed"}, - {file = "coverage-7.6.8-cp310-cp310-win32.whl", hash = "sha256:3988665ee376abce49613701336544041f2117de7b7fbfe91b93d8ff8b151c8e"}, - {file = "coverage-7.6.8-cp310-cp310-win_amd64.whl", hash = "sha256:f56f49b2553d7dd85fd86e029515a221e5c1f8cb3d9c38b470bc38bde7b8445a"}, - {file = "coverage-7.6.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:86cffe9c6dfcfe22e28027069725c7f57f4b868a3f86e81d1c62462764dc46d4"}, - {file = "coverage-7.6.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d82ab6816c3277dc962cfcdc85b1efa0e5f50fb2c449432deaf2398a2928ab94"}, - {file = "coverage-7.6.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13690e923a3932e4fad4c0ebfb9cb5988e03d9dcb4c5150b5fcbf58fd8bddfc4"}, - {file = "coverage-7.6.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4be32da0c3827ac9132bb488d331cb32e8d9638dd41a0557c5569d57cf22c9c1"}, - {file = "coverage-7.6.8-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44e6c85bbdc809383b509d732b06419fb4544dca29ebe18480379633623baafb"}, - {file = "coverage-7.6.8-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:768939f7c4353c0fac2f7c37897e10b1414b571fd85dd9fc49e6a87e37a2e0d8"}, - {file = "coverage-7.6.8-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e44961e36cb13c495806d4cac67640ac2866cb99044e210895b506c26ee63d3a"}, - {file = "coverage-7.6.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3ea8bb1ab9558374c0ab591783808511d135a833c3ca64a18ec927f20c4030f0"}, - {file = "coverage-7.6.8-cp311-cp311-win32.whl", hash = "sha256:629a1ba2115dce8bf75a5cce9f2486ae483cb89c0145795603d6554bdc83e801"}, - {file = "coverage-7.6.8-cp311-cp311-win_amd64.whl", hash = "sha256:fb9fc32399dca861584d96eccd6c980b69bbcd7c228d06fb74fe53e007aa8ef9"}, - {file = "coverage-7.6.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e683e6ecc587643f8cde8f5da6768e9d165cd31edf39ee90ed7034f9ca0eefee"}, - {file = "coverage-7.6.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1defe91d41ce1bd44b40fabf071e6a01a5aa14de4a31b986aa9dfd1b3e3e414a"}, - {file = "coverage-7.6.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7ad66e8e50225ebf4236368cc43c37f59d5e6728f15f6e258c8639fa0dd8e6d"}, - {file = "coverage-7.6.8-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fe47da3e4fda5f1abb5709c156eca207eacf8007304ce3019eb001e7a7204cb"}, - {file = "coverage-7.6.8-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:202a2d645c5a46b84992f55b0a3affe4f0ba6b4c611abec32ee88358db4bb649"}, - {file = "coverage-7.6.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4674f0daa1823c295845b6a740d98a840d7a1c11df00d1fd62614545c1583787"}, - {file = "coverage-7.6.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:74610105ebd6f33d7c10f8907afed696e79c59e3043c5f20eaa3a46fddf33b4c"}, - {file = "coverage-7.6.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37cda8712145917105e07aab96388ae76e787270ec04bcb9d5cc786d7cbb8443"}, - {file = "coverage-7.6.8-cp312-cp312-win32.whl", hash = "sha256:9e89d5c8509fbd6c03d0dd1972925b22f50db0792ce06324ba069f10787429ad"}, - {file = "coverage-7.6.8-cp312-cp312-win_amd64.whl", hash = "sha256:379c111d3558272a2cae3d8e57e6b6e6f4fe652905692d54bad5ea0ca37c5ad4"}, - {file = "coverage-7.6.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0b0c69f4f724c64dfbfe79f5dfb503b42fe6127b8d479b2677f2b227478db2eb"}, - {file = "coverage-7.6.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c15b32a7aca8038ed7644f854bf17b663bc38e1671b5d6f43f9a2b2bd0c46f63"}, - {file = "coverage-7.6.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63068a11171e4276f6ece913bde059e77c713b48c3a848814a6537f35afb8365"}, - {file = "coverage-7.6.8-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f4548c5ead23ad13fb7a2c8ea541357474ec13c2b736feb02e19a3085fac002"}, - {file = "coverage-7.6.8-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b4b4299dd0d2c67caaaf286d58aef5e75b125b95615dda4542561a5a566a1e3"}, - {file = "coverage-7.6.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c9ebfb2507751f7196995142f057d1324afdab56db1d9743aab7f50289abd022"}, - {file = "coverage-7.6.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:c1b4474beee02ede1eef86c25ad4600a424fe36cff01a6103cb4533c6bf0169e"}, - {file = "coverage-7.6.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d9fd2547e6decdbf985d579cf3fc78e4c1d662b9b0ff7cc7862baaab71c9cc5b"}, - {file = "coverage-7.6.8-cp313-cp313-win32.whl", hash = "sha256:8aae5aea53cbfe024919715eca696b1a3201886ce83790537d1c3668459c7146"}, - {file = "coverage-7.6.8-cp313-cp313-win_amd64.whl", hash = "sha256:ae270e79f7e169ccfe23284ff5ea2d52a6f401dc01b337efb54b3783e2ce3f28"}, - {file = "coverage-7.6.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:de38add67a0af869b0d79c525d3e4588ac1ffa92f39116dbe0ed9753f26eba7d"}, - {file = "coverage-7.6.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b07c25d52b1c16ce5de088046cd2432b30f9ad5e224ff17c8f496d9cb7d1d451"}, - {file = "coverage-7.6.8-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62a66ff235e4c2e37ed3b6104d8b478d767ff73838d1222132a7a026aa548764"}, - {file = "coverage-7.6.8-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09b9f848b28081e7b975a3626e9081574a7b9196cde26604540582da60235fdf"}, - {file = "coverage-7.6.8-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:093896e530c38c8e9c996901858ac63f3d4171268db2c9c8b373a228f459bbc5"}, - {file = "coverage-7.6.8-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9a7b8ac36fd688c8361cbc7bf1cb5866977ece6e0b17c34aa0df58bda4fa18a4"}, - {file = "coverage-7.6.8-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:38c51297b35b3ed91670e1e4efb702b790002e3245a28c76e627478aa3c10d83"}, - {file = "coverage-7.6.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2e4e0f60cb4bd7396108823548e82fdab72d4d8a65e58e2c19bbbc2f1e2bfa4b"}, - {file = "coverage-7.6.8-cp313-cp313t-win32.whl", hash = "sha256:6535d996f6537ecb298b4e287a855f37deaf64ff007162ec0afb9ab8ba3b8b71"}, - {file = "coverage-7.6.8-cp313-cp313t-win_amd64.whl", hash = "sha256:c79c0685f142ca53256722a384540832420dff4ab15fec1863d7e5bc8691bdcc"}, - {file = "coverage-7.6.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3ac47fa29d8d41059ea3df65bd3ade92f97ee4910ed638e87075b8e8ce69599e"}, - {file = "coverage-7.6.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:24eda3a24a38157eee639ca9afe45eefa8d2420d49468819ac5f88b10de84f4c"}, - {file = "coverage-7.6.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4c81ed2820b9023a9a90717020315e63b17b18c274a332e3b6437d7ff70abe0"}, - {file = "coverage-7.6.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd55f8fc8fa494958772a2a7302b0354ab16e0b9272b3c3d83cdb5bec5bd1779"}, - {file = "coverage-7.6.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f39e2f3530ed1626c66e7493be7a8423b023ca852aacdc91fb30162c350d2a92"}, - {file = "coverage-7.6.8-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:716a78a342679cd1177bc8c2fe957e0ab91405bd43a17094324845200b2fddf4"}, - {file = "coverage-7.6.8-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:177f01eeaa3aee4a5ffb0d1439c5952b53d5010f86e9d2667963e632e30082cc"}, - {file = "coverage-7.6.8-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:912e95017ff51dc3d7b6e2be158dedc889d9a5cc3382445589ce554f1a34c0ea"}, - {file = "coverage-7.6.8-cp39-cp39-win32.whl", hash = "sha256:4db3ed6a907b555e57cc2e6f14dc3a4c2458cdad8919e40b5357ab9b6db6c43e"}, - {file = "coverage-7.6.8-cp39-cp39-win_amd64.whl", hash = "sha256:428ac484592f780e8cd7b6b14eb568f7c85460c92e2a37cb0c0e5186e1a0d076"}, - {file = "coverage-7.6.8-pp39.pp310-none-any.whl", hash = "sha256:5c52a036535d12590c32c49209e79cabaad9f9ad8aa4cbd875b68c4d67a9cbce"}, - {file = "coverage-7.6.8.tar.gz", hash = "sha256:8b2b8503edb06822c86d82fa64a4a5cb0760bb8f31f26e138ec743f422f37cfc"}, + {file = "coverage-7.11.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eb53f1e8adeeb2e78962bade0c08bfdc461853c7969706ed901821e009b35e31"}, + {file = "coverage-7.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9a03ec6cb9f40a5c360f138b88266fd8f58408d71e89f536b4f91d85721d075"}, + {file = "coverage-7.11.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0d7f0616c557cbc3d1c2090334eddcbb70e1ae3a40b07222d62b3aa47f608fab"}, + {file = "coverage-7.11.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:e44a86a47bbdf83b0a3ea4d7df5410d6b1a0de984fbd805fa5101f3624b9abe0"}, + {file = "coverage-7.11.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:596763d2f9a0ee7eec6e643e29660def2eef297e1de0d334c78c08706f1cb785"}, + {file = "coverage-7.11.0-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ef55537ff511b5e0a43edb4c50a7bf7ba1c3eea20b4f49b1490f1e8e0e42c591"}, + {file = "coverage-7.11.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9cbabd8f4d0d3dc571d77ae5bdbfa6afe5061e679a9d74b6797c48d143307088"}, + {file = "coverage-7.11.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e24045453384e0ae2a587d562df2a04d852672eb63051d16096d3f08aa4c7c2f"}, + {file = "coverage-7.11.0-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:7161edd3426c8d19bdccde7d49e6f27f748f3c31cc350c5de7c633fea445d866"}, + {file = "coverage-7.11.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3d4ed4de17e692ba6415b0587bc7f12bc80915031fc9db46a23ce70fc88c9841"}, + {file = "coverage-7.11.0-cp310-cp310-win32.whl", hash = "sha256:765c0bc8fe46f48e341ef737c91c715bd2a53a12792592296a095f0c237e09cf"}, + {file = "coverage-7.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:24d6f3128f1b2d20d84b24f4074475457faedc3d4613a7e66b5e769939c7d969"}, + {file = "coverage-7.11.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d58ecaa865c5b9fa56e35efc51d1014d4c0d22838815b9fce57a27dd9576847"}, + {file = "coverage-7.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b679e171f1c104a5668550ada700e3c4937110dbdd153b7ef9055c4f1a1ee3cc"}, + {file = "coverage-7.11.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ca61691ba8c5b6797deb221a0d09d7470364733ea9c69425a640f1f01b7c5bf0"}, + {file = "coverage-7.11.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:aef1747ede4bd8ca9cfc04cc3011516500c6891f1b33a94add3253f6f876b7b7"}, + {file = "coverage-7.11.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a1839d08406e4cba2953dcc0ffb312252f14d7c4c96919f70167611f4dee2623"}, + {file = "coverage-7.11.0-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e0eb0a2dcc62478eb5b4cbb80b97bdee852d7e280b90e81f11b407d0b81c4287"}, + {file = "coverage-7.11.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bc1fbea96343b53f65d5351d8fd3b34fd415a2670d7c300b06d3e14a5af4f552"}, + {file = "coverage-7.11.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:214b622259dd0cf435f10241f1333d32caa64dbc27f8790ab693428a141723de"}, + {file = "coverage-7.11.0-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:258d9967520cca899695d4eb7ea38be03f06951d6ca2f21fb48b1235f791e601"}, + {file = "coverage-7.11.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cf9e6ff4ca908ca15c157c409d608da77a56a09877b97c889b98fb2c32b6465e"}, + {file = "coverage-7.11.0-cp311-cp311-win32.whl", hash = "sha256:fcc15fc462707b0680cff6242c48625da7f9a16a28a41bb8fd7a4280920e676c"}, + {file = "coverage-7.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:865965bf955d92790f1facd64fe7ff73551bd2c1e7e6b26443934e9701ba30b9"}, + {file = "coverage-7.11.0-cp311-cp311-win_arm64.whl", hash = "sha256:5693e57a065760dcbeb292d60cc4d0231a6d4b6b6f6a3191561e1d5e8820b745"}, + {file = "coverage-7.11.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9c49e77811cf9d024b95faf86c3f059b11c0c9be0b0d61bc598f453703bd6fd1"}, + {file = "coverage-7.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a61e37a403a778e2cda2a6a39abcc895f1d984071942a41074b5c7ee31642007"}, + {file = "coverage-7.11.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c79cae102bb3b1801e2ef1511fb50e91ec83a1ce466b2c7c25010d884336de46"}, + {file = "coverage-7.11.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:16ce17ceb5d211f320b62df002fa7016b7442ea0fd260c11cec8ce7730954893"}, + {file = "coverage-7.11.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:80027673e9d0bd6aef86134b0771845e2da85755cf686e7c7c59566cf5a89115"}, + {file = "coverage-7.11.0-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:4d3ffa07a08657306cd2215b0da53761c4d73cb54d9143b9303a6481ec0cd415"}, + {file = "coverage-7.11.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a3b6a5f8b2524fd6c1066bc85bfd97e78709bb5e37b5b94911a6506b65f47186"}, + {file = "coverage-7.11.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fcc0a4aa589de34bc56e1a80a740ee0f8c47611bdfb28cd1849de60660f3799d"}, + {file = "coverage-7.11.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:dba82204769d78c3fd31b35c3d5f46e06511936c5019c39f98320e05b08f794d"}, + {file = "coverage-7.11.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:81b335f03ba67309a95210caf3eb43bd6fe75a4e22ba653ef97b4696c56c7ec2"}, + {file = "coverage-7.11.0-cp312-cp312-win32.whl", hash = "sha256:037b2d064c2f8cc8716fe4d39cb705779af3fbf1ba318dc96a1af858888c7bb5"}, + {file = "coverage-7.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:d66c0104aec3b75e5fd897e7940188ea1892ca1d0235316bf89286d6a22568c0"}, + {file = "coverage-7.11.0-cp312-cp312-win_arm64.whl", hash = "sha256:d91ebeac603812a09cf6a886ba6e464f3bbb367411904ae3790dfe28311b15ad"}, + {file = "coverage-7.11.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:cc3f49e65ea6e0d5d9bd60368684fe52a704d46f9e7fc413918f18d046ec40e1"}, + {file = "coverage-7.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f39ae2f63f37472c17b4990f794035c9890418b1b8cca75c01193f3c8d3e01be"}, + {file = "coverage-7.11.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7db53b5cdd2917b6eaadd0b1251cf4e7d96f4a8d24e174bdbdf2f65b5ea7994d"}, + {file = "coverage-7.11.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10ad04ac3a122048688387828b4537bc9cf60c0bf4869c1e9989c46e45690b82"}, + {file = "coverage-7.11.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4036cc9c7983a2b1f2556d574d2eb2154ac6ed55114761685657e38782b23f52"}, + {file = "coverage-7.11.0-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7ab934dd13b1c5e94b692b1e01bd87e4488cb746e3a50f798cb9464fd128374b"}, + {file = "coverage-7.11.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59a6e5a265f7cfc05f76e3bb53eca2e0dfe90f05e07e849930fecd6abb8f40b4"}, + {file = "coverage-7.11.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:df01d6c4c81e15a7c88337b795bb7595a8596e92310266b5072c7e301168efbd"}, + {file = "coverage-7.11.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:8c934bd088eed6174210942761e38ee81d28c46de0132ebb1801dbe36a390dcc"}, + {file = "coverage-7.11.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5a03eaf7ec24078ad64a07f02e30060aaf22b91dedf31a6b24d0d98d2bba7f48"}, + {file = "coverage-7.11.0-cp313-cp313-win32.whl", hash = "sha256:695340f698a5f56f795b2836abe6fb576e7c53d48cd155ad2f80fd24bc63a040"}, + {file = "coverage-7.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:2727d47fce3ee2bac648528e41455d1b0c46395a087a229deac75e9f88ba5a05"}, + {file = "coverage-7.11.0-cp313-cp313-win_arm64.whl", hash = "sha256:0efa742f431529699712b92ecdf22de8ff198df41e43aeaaadf69973eb93f17a"}, + {file = "coverage-7.11.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:587c38849b853b157706407e9ebdca8fd12f45869edb56defbef2daa5fb0812b"}, + {file = "coverage-7.11.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b971bdefdd75096163dd4261c74be813c4508477e39ff7b92191dea19f24cd37"}, + {file = "coverage-7.11.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:269bfe913b7d5be12ab13a95f3a76da23cf147be7fa043933320ba5625f0a8de"}, + {file = "coverage-7.11.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:dadbcce51a10c07b7c72b0ce4a25e4b6dcb0c0372846afb8e5b6307a121eb99f"}, + {file = "coverage-7.11.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9ed43fa22c6436f7957df036331f8fe4efa7af132054e1844918866cd228af6c"}, + {file = "coverage-7.11.0-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9516add7256b6713ec08359b7b05aeff8850c98d357784c7205b2e60aa2513fa"}, + {file = "coverage-7.11.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:eb92e47c92fcbcdc692f428da67db33337fa213756f7adb6a011f7b5a7a20740"}, + {file = "coverage-7.11.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:d06f4fc7acf3cabd6d74941d53329e06bab00a8fe10e4df2714f0b134bfc64ef"}, + {file = "coverage-7.11.0-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:6fbcee1a8f056af07ecd344482f711f563a9eb1c2cad192e87df00338ec3cdb0"}, + {file = "coverage-7.11.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dbbf012be5f32533a490709ad597ad8a8ff80c582a95adc8d62af664e532f9ca"}, + {file = "coverage-7.11.0-cp313-cp313t-win32.whl", hash = "sha256:cee6291bb4fed184f1c2b663606a115c743df98a537c969c3c64b49989da96c2"}, + {file = "coverage-7.11.0-cp313-cp313t-win_amd64.whl", hash = "sha256:a386c1061bf98e7ea4758e4313c0ab5ecf57af341ef0f43a0bf26c2477b5c268"}, + {file = "coverage-7.11.0-cp313-cp313t-win_arm64.whl", hash = "sha256:f9ea02ef40bb83823b2b04964459d281688fe173e20643870bb5d2edf68bc836"}, + {file = "coverage-7.11.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:c770885b28fb399aaf2a65bbd1c12bf6f307ffd112d6a76c5231a94276f0c497"}, + {file = "coverage-7.11.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a3d0e2087dba64c86a6b254f43e12d264b636a39e88c5cc0a01a7c71bcfdab7e"}, + {file = "coverage-7.11.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:73feb83bb41c32811973b8565f3705caf01d928d972b72042b44e97c71fd70d1"}, + {file = "coverage-7.11.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c6f31f281012235ad08f9a560976cc2fc9c95c17604ff3ab20120fe480169bca"}, + {file = "coverage-7.11.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9570ad567f880ef675673992222746a124b9595506826b210fbe0ce3f0499cd"}, + {file = "coverage-7.11.0-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8badf70446042553a773547a61fecaa734b55dc738cacf20c56ab04b77425e43"}, + {file = "coverage-7.11.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a09c1211959903a479e389685b7feb8a17f59ec5a4ef9afde7650bd5eabc2777"}, + {file = "coverage-7.11.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:5ef83b107f50db3f9ae40f69e34b3bd9337456c5a7fe3461c7abf8b75dd666a2"}, + {file = "coverage-7.11.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:f91f927a3215b8907e214af77200250bb6aae36eca3f760f89780d13e495388d"}, + {file = "coverage-7.11.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:cdbcd376716d6b7fbfeedd687a6c4be019c5a5671b35f804ba76a4c0a778cba4"}, + {file = "coverage-7.11.0-cp314-cp314-win32.whl", hash = "sha256:bab7ec4bb501743edc63609320aaec8cd9188b396354f482f4de4d40a9d10721"}, + {file = "coverage-7.11.0-cp314-cp314-win_amd64.whl", hash = "sha256:3d4ba9a449e9364a936a27322b20d32d8b166553bfe63059bd21527e681e2fad"}, + {file = "coverage-7.11.0-cp314-cp314-win_arm64.whl", hash = "sha256:ce37f215223af94ef0f75ac68ea096f9f8e8c8ec7d6e8c346ee45c0d363f0479"}, + {file = "coverage-7.11.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:f413ce6e07e0d0dc9c433228727b619871532674b45165abafe201f200cc215f"}, + {file = "coverage-7.11.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:05791e528a18f7072bf5998ba772fe29db4da1234c45c2087866b5ba4dea710e"}, + {file = "coverage-7.11.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cacb29f420cfeb9283b803263c3b9a068924474ff19ca126ba9103e1278dfa44"}, + {file = "coverage-7.11.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:314c24e700d7027ae3ab0d95fbf8d53544fca1f20345fd30cd219b737c6e58d3"}, + {file = "coverage-7.11.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:630d0bd7a293ad2fc8b4b94e5758c8b2536fdf36c05f1681270203e463cbfa9b"}, + {file = "coverage-7.11.0-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e89641f5175d65e2dbb44db15fe4ea48fade5d5bbb9868fdc2b4fce22f4a469d"}, + {file = "coverage-7.11.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c9f08ea03114a637dab06cedb2e914da9dc67fa52c6015c018ff43fdde25b9c2"}, + {file = "coverage-7.11.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:ce9f3bde4e9b031eaf1eb61df95c1401427029ea1bfddb8621c1161dcb0fa02e"}, + {file = "coverage-7.11.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:e4dc07e95495923d6fd4d6c27bf70769425b71c89053083843fd78f378558996"}, + {file = "coverage-7.11.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:424538266794db2861db4922b05d729ade0940ee69dcf0591ce8f69784db0e11"}, + {file = "coverage-7.11.0-cp314-cp314t-win32.whl", hash = "sha256:4c1eeb3fb8eb9e0190bebafd0462936f75717687117339f708f395fe455acc73"}, + {file = "coverage-7.11.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b56efee146c98dbf2cf5cffc61b9829d1e94442df4d7398b26892a53992d3547"}, + {file = "coverage-7.11.0-cp314-cp314t-win_arm64.whl", hash = "sha256:b5c2705afa83f49bd91962a4094b6b082f94aef7626365ab3f8f4bd159c5acf3"}, + {file = "coverage-7.11.0-py3-none-any.whl", hash = "sha256:4b7589765348d78fb4e5fb6ea35d07564e387da2fc5efff62e0222971f155f68"}, + {file = "coverage-7.11.0.tar.gz", hash = "sha256:167bd504ac1ca2af7ff3b81d245dfea0292c5032ebef9d66cc08a7d28c1b8050"}, ] [package.dependencies] @@ -630,14 +866,32 @@ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.1 [package.extras] toml = ["tomli"] +[[package]] +name = "cssutils" +version = "2.11.1" +description = "A CSS Cascading Style Sheets library for Python" +optional = true +python-versions = ">=3.8" +files = [ + {file = "cssutils-2.11.1-py3-none-any.whl", hash = "sha256:a67bfdfdff4f3867fab43698ec4897c1a828eca5973f4073321b3bccaf1199b1"}, + {file = "cssutils-2.11.1.tar.gz", hash = "sha256:0563a76513b6af6eebbe788c3bf3d01c920e46b3f90c8416738c5cfc773ff8e2"}, +] + +[package.dependencies] +more-itertools = "*" + +[package.extras] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +test = ["cssselect", "importlib-resources", "jaraco.test (>=5.1)", "lxml", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"] + [[package]] name = "dbus-python" -version = "1.3.2" +version = "1.4.0" description = "Python bindings for libdbus" optional = true python-versions = ">=3.7" files = [ - {file = "dbus-python-1.3.2.tar.gz", hash = "sha256:ad67819308618b5069537be237f8e68ca1c7fcc95ee4a121fe6845b1418248f8"}, + {file = "dbus-python-1.4.0.tar.gz", hash = "sha256:991666e498f60dbf3e49b8b7678f5559b8a65034fdf61aae62cdecdb7d89c770"}, ] [package.extras] @@ -646,40 +900,117 @@ test = ["tap.py"] [[package]] name = "decorator" -version = "5.1.1" +version = "5.2.1" description = "Decorators for Humans" optional = true -python-versions = ">=3.5" +python-versions = ">=3.8" files = [ - {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, - {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, + {file = "decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a"}, + {file = "decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360"}, ] +[[package]] +name = "dict2css" +version = "0.3.0.post1" +description = "A μ-library for constructing cascading style sheets from Python dictionaries." +optional = true +python-versions = ">=3.6" +files = [ + {file = "dict2css-0.3.0.post1-py3-none-any.whl", hash = "sha256:f006a6b774c3e31869015122ae82c491fd25e7de4a75607a62aa3e798f837e0d"}, + {file = "dict2css-0.3.0.post1.tar.gz", hash = "sha256:89c544c21c4ca7472c3fffb9d37d3d926f606329afdb751dc1de67a411b70719"}, +] + +[package.dependencies] +cssutils = ">=2.2.0" +domdf-python-tools = ">=2.2.0" + +[[package]] +name = "docstrfmt" +version = "1.11.1" +description = "docstrfmt: A formatter for Sphinx flavored reStructuredText." +optional = false +python-versions = ">=3.9" +files = [ + {file = "docstrfmt-1.11.1-py3-none-any.whl", hash = "sha256:6782d8663321c3a7c40be08a36fbcb1ea9e46d1efba85411ba807d97f384871a"}, + {file = "docstrfmt-1.11.1.tar.gz", hash = "sha256:d41e19d6c5d524cc7f8ff6cbfecb8762d77e696b9fe4f5057269051fb966fc80"}, +] + +[package.dependencies] +black = ">=24" +click = ">=8" +docutils = ">=0.20" +libcst = ">=1" +platformdirs = ">=4" +roman = "*" +sphinx = ">=7" +tabulate = ">=0.9" +toml = {version = ">=0.10", markers = "python_version < \"3.11\""} + +[package.extras] +ci = ["coveralls"] +d = ["aiohttp (>=3)"] +dev = ["docstrfmt[lint]", "docstrfmt[test]", "packaging"] +lint = ["pre-commit", "ruff (>=0.0.292)"] +test = ["pytest", "pytest-aiohttp"] + [[package]] name = "docutils" version = "0.21.2" description = "Docutils -- Python Documentation Utilities" -optional = true +optional = false python-versions = ">=3.9" files = [ {file = "docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2"}, {file = "docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f"}, ] +[[package]] +name = "domdf-python-tools" +version = "3.10.0" +description = "Helpful functions for Python 🐍 🛠️" +optional = true +python-versions = ">=3.6" +files = [ + {file = "domdf_python_tools-3.10.0-py3-none-any.whl", hash = "sha256:5e71c1be71bbcc1f881d690c8984b60e64298ec256903b3147f068bc33090c36"}, + {file = "domdf_python_tools-3.10.0.tar.gz", hash = "sha256:2ae308d2f4f1e9145f5f4ba57f840fbfd1c2983ee26e4824347789649d3ae298"}, +] + +[package.dependencies] +natsort = ">=7.0.1" +typing-extensions = ">=3.7.4.1" + +[package.extras] +all = ["pytz (>=2019.1)"] +dates = ["pytz (>=2019.1)"] + [[package]] name = "exceptiongroup" -version = "1.2.2" +version = "1.3.0" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, - {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, + {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, + {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, ] +[package.dependencies] +typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""} + [package.extras] test = ["pytest (>=6)"] +[[package]] +name = "filelock" +version = "3.20.2" +description = "A platform independent file lock." +optional = true +python-versions = ">=3.10" +files = [ + {file = "filelock-3.20.2-py3-none-any.whl", hash = "sha256:fbba7237d6ea277175a32c54bb71ef814a8546d8601269e1bfc388de333974e8"}, + {file = "filelock-3.20.2.tar.gz", hash = "sha256:a2241ff4ddde2a7cebddf78e39832509cb045d18ec1a09d7248d6bfc6bfbbe64"}, +] + [[package]] name = "filetype" version = "1.2.0" @@ -693,22 +1024,22 @@ files = [ [[package]] name = "flask" -version = "3.1.0" +version = "3.1.2" description = "A simple framework for building complex web applications." optional = false python-versions = ">=3.9" files = [ - {file = "flask-3.1.0-py3-none-any.whl", hash = "sha256:d667207822eb83f1c4b50949b1623c8fc8d51f2341d65f72e1a1815397551136"}, - {file = "flask-3.1.0.tar.gz", hash = "sha256:5f873c5184c897c8d9d1b05df1e3d01b14910ce69607a117bd3277098a5836ac"}, + {file = "flask-3.1.2-py3-none-any.whl", hash = "sha256:ca1d8112ec8a6158cc29ea4858963350011b5c846a414cdb7a954aa9e967d03c"}, + {file = "flask-3.1.2.tar.gz", hash = "sha256:bf656c15c80190ed628ad08cdfd3aaa35beb087855e2f494910aa3774cc4fd87"}, ] [package.dependencies] -blinker = ">=1.9" +blinker = ">=1.9.0" click = ">=8.1.3" -importlib-metadata = {version = ">=3.6", markers = "python_version < \"3.10\""} -itsdangerous = ">=2.2" -Jinja2 = ">=3.1.2" -Werkzeug = ">=3.1" +itsdangerous = ">=2.2.0" +jinja2 = ">=3.1.2" +markupsafe = ">=2.1.1" +werkzeug = ">=3.1.0" [package.extras] async = ["asgiref (>=3.2)"] @@ -716,43 +1047,65 @@ dotenv = ["python-dotenv"] [[package]] name = "flask-cors" -version = "5.0.0" -description = "A Flask extension adding a decorator for CORS support" +version = "6.0.1" +description = "A Flask extension simplifying CORS support" optional = true -python-versions = "*" +python-versions = "<4.0,>=3.9" files = [ - {file = "Flask_Cors-5.0.0-py2.py3-none-any.whl", hash = "sha256:b9e307d082a9261c100d8fb0ba909eec6a228ed1b60a8315fd85f783d61910bc"}, - {file = "flask_cors-5.0.0.tar.gz", hash = "sha256:5aadb4b950c4e93745034594d9f3ea6591f734bb3662e16e255ffbf5e89c88ef"}, + {file = "flask_cors-6.0.1-py3-none-any.whl", hash = "sha256:c7b2cbfb1a31aa0d2e5341eea03a6805349f7a61647daee1a15c46bbe981494c"}, + {file = "flask_cors-6.0.1.tar.gz", hash = "sha256:d81bcb31f07b0985be7f48406247e9243aced229b7747219160a0559edd678db"}, ] [package.dependencies] -Flask = ">=0.9" +flask = ">=0.9" +Werkzeug = ">=0.7" [[package]] name = "h11" -version = "0.14.0" +version = "0.16.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, - {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, + {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, + {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, ] +[[package]] +name = "html5lib" +version = "1.1" +description = "HTML parser based on the WHATWG HTML specification" +optional = true +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "html5lib-1.1-py2.py3-none-any.whl", hash = "sha256:0d78f8fde1c230e99fe37986a60526d7049ed4bf8a9fadbad5f00e22e58e041d"}, + {file = "html5lib-1.1.tar.gz", hash = "sha256:b2e5b40261e20f354d198eae92afc10d750afb487ed5e50f9c4eaf07c184146f"}, +] + +[package.dependencies] +six = ">=1.9" +webencodings = "*" + +[package.extras] +all = ["chardet (>=2.2)", "genshi", "lxml"] +chardet = ["chardet (>=2.2)"] +genshi = ["genshi"] +lxml = ["lxml"] + [[package]] name = "httpcore" -version = "1.0.7" +version = "1.0.9" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, - {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, + {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, + {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, ] [package.dependencies] certifi = "*" -h11 = ">=0.13,<0.15" +h11 = ">=0.16" [package.extras] asyncio = ["anyio (>=4.0,<5.0)"] @@ -762,13 +1115,13 @@ trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httpx" -version = "0.28.0" +version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpx-0.28.0-py3-none-any.whl", hash = "sha256:dc0b419a0cfeb6e8b34e85167c0da2671206f5095f1baa9663d23bcfd6b535fc"}, - {file = "httpx-0.28.0.tar.gz", hash = "sha256:0858d3bab51ba7e386637f22a61d8ccddaeec5f3fe4209da3a6168dbb91573e0"}, + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, ] [package.dependencies] @@ -786,13 +1139,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "idna" -version = "3.10" +version = "3.11" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, - {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, + {file = "idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea"}, + {file = "idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902"}, ] [package.extras] @@ -813,112 +1166,77 @@ files = [ name = "imagesize" version = "1.4.1" description = "Getting image size from png/jpeg/jpeg2000/gif file" -optional = true +optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"}, {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"}, ] -[[package]] -name = "importlib-metadata" -version = "8.5.0" -description = "Read metadata from Python packages" -optional = false -python-versions = ">=3.8" -files = [ - {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, - {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, -] - -[package.dependencies] -zipp = ">=3.20" - -[package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] -cover = ["pytest-cov"] -doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=2.2)"] -perf = ["ipython"] -test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] -type = ["pytest-mypy"] - [[package]] name = "inflate64" -version = "1.0.0" +version = "1.0.1" description = "deflate64 compression/decompression library" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "inflate64-1.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a90c0bdf4a7ecddd8a64cc977181810036e35807f56b0bcacee9abb0fcfd18dc"}, - {file = "inflate64-1.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:57fe7c14aebf1c5a74fc3b70d355be1280a011521a76aa3895486e62454f4242"}, - {file = "inflate64-1.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d90730165f471d61a1a694a5e354f3ffa938227e8dcecb62d5d728e8069cee94"}, - {file = "inflate64-1.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:543f400201f5c101141af3c79c82059e1aa6ef4f1584a7f1fa035fb2e465097f"}, - {file = "inflate64-1.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ceca14f7ec19fb44b047f56c50efb7521b389d222bba2b0a10286a0caeb03fa"}, - {file = "inflate64-1.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b559937a42f0c175b4d2dfc7eb53b97bdc87efa9add15ed5549c6abc1e89d02f"}, - {file = "inflate64-1.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5ff8bd2a562343fcbc4eea26fdc368904a3b5f6bb8262344274d3d74a1de15bb"}, - {file = "inflate64-1.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:0fe481f31695d35a433c3044ac8fd5d9f5069aaad03a0c04b570eb258ce655aa"}, - {file = "inflate64-1.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:35a45f6979ad5874d4d4898c2fc770b136e61b96b850118fdaec5a5af1b9123a"}, - {file = "inflate64-1.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:022ca1cc928e7365a05f7371ff06af143c6c667144965e2cf9a9236a2ae1c291"}, - {file = "inflate64-1.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46792ecf3565d64fd2c519b0a780c03a57e195613c9954ef94e739a057b3fd06"}, - {file = "inflate64-1.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a70ea2e456c15f7aa7c74b8ab8f20b4f8940ec657604c9f0a9de3342f280fff"}, - {file = "inflate64-1.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e243ea9bd36a035059f2365bd6d156ff59717fbafb0255cb0c75bf151bf6904"}, - {file = "inflate64-1.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4dc392dec1cd11cacda3d2637214ca45e38202e8a4f31d4a4e566d6e90625fc4"}, - {file = "inflate64-1.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8b402a50eda7ee75f342fc346d33a41bca58edc222a4b17f9be0db1daed459fa"}, - {file = "inflate64-1.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:f5924499dc8800928c0ee4580fa8eb4ffa880b2cce4431537d0390e503a9c9ee"}, - {file = "inflate64-1.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0c644bf7208e20825ca3bbb5fb1f7f495cfcb49eb01a5f67338796d44a42f2bf"}, - {file = "inflate64-1.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9964a4eaf26a9d36f82a1d9b12c28e35800dd3d99eb340453ed12ac90c2976a8"}, - {file = "inflate64-1.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2cccded63865640d03253897be7232b2bbac295fe43914c61f86a57aa23bb61d"}, - {file = "inflate64-1.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d491f104fb3701926ebd82b8c9250dfba0ddcab584504e26f1e4adb26730378d"}, - {file = "inflate64-1.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ebad4a6cd2a2c1d81be0b09d4006479f3b258803c49a9224ef8ca0b649072fa"}, - {file = "inflate64-1.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:6823b2c0cff3a8159140f3b17ec64fb8ec0e663b45a6593618ecdde8aeecb5b2"}, - {file = "inflate64-1.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:228d504239d27958e71fc77e3119a6ac4528127df38468a0c95a5bd3927204b8"}, - {file = "inflate64-1.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:ae2572e06bcfe15e3bbf77d4e4a6d6c55e2a70d6abceaaf60c5c3653ddb96dfd"}, - {file = "inflate64-1.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c10ca61212a753bbce6d341e7cfa779c161b839281f1f9fdc15cf1f324ce7c5b"}, - {file = "inflate64-1.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a982dc93920f9450da4d4f25c5e5c1288ef053b1d618cedc91adb67e035e35f5"}, - {file = "inflate64-1.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ca0310b2c55bc40394c5371db2a22f705fd594226cc09432e1eb04d3aed83930"}, - {file = "inflate64-1.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e95044ae55a161144445527a2efad550851fecc699066423d24b2634a6a83710"}, - {file = "inflate64-1.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34de6902c39d9225459583d5034182d371fc694bc3cfd6c0fc89aa62e9809faf"}, - {file = "inflate64-1.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ebafbd813213dc470719cd0a2bcb53aab89d9059f4e75386048b4c4dcdb2fd99"}, - {file = "inflate64-1.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:75448c7b414dadaeeb11dab9f75e022aa1e0ee19b00f570e9f58e933603d71ac"}, - {file = "inflate64-1.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:2be4e01c1b04761874cb44b35b6103ca5846bc36c18fc3ff5e8cbcd8bfc15e9f"}, - {file = "inflate64-1.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bf2981b95c1f26242bb084d9a07f3feb0cfe3d6d0a8d90f42389803bc1252c4a"}, - {file = "inflate64-1.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9373ccf0661cc72ac84a0ad622634144da5ce7d57c9572ed0723d67a149feed2"}, - {file = "inflate64-1.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e4650c6f65011ec57cf5cd96b92d5b7c6f59e502930c86eb8227c93cf02dc270"}, - {file = "inflate64-1.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a475e8822f1a74c873e60b8f270773757ade024097ca39e43402d47c049c67d4"}, - {file = "inflate64-1.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4367480733ac8daf368f6fc704b7c9db85521ee745eb5bd443f4b97d2051acc"}, - {file = "inflate64-1.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6c5775c91f94f5eced9160fb0af12a09f3e030194f91a6a46e706a79350bd056"}, - {file = "inflate64-1.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d76d205b844d78ce04768060084ef20e64dcc63a3e9166674f857acaf4d140ed"}, - {file = "inflate64-1.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:92f0dc6af0e8e97324981178dc442956cbff1247a56d1e201af8d865244653f8"}, - {file = "inflate64-1.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f79542478e49e471e8b23556700e6f688a40dc93e9a746f77a546c13251b59b1"}, - {file = "inflate64-1.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a270be6b10cde01258c0097a663a307c62d12c78eb8f62f8e29f205335942c9"}, - {file = "inflate64-1.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1616a87ff04f583e9558cc247ec0b72a30d540ee0c17cc77823be175c0ec92f0"}, - {file = "inflate64-1.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:137ca6b315f0157a786c3a755a09395ca69aed8bcf42ad3437cb349f5ebc86d2"}, - {file = "inflate64-1.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8140942d1614bdeb5a9ddd7559348c5c77f884a42424aef7ccf149ccfb93aa08"}, - {file = "inflate64-1.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fe3f9051338bb7d07b5e7d88420d666b5109f33ae39aa55ecd1a053c0f22b1b"}, - {file = "inflate64-1.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36342338e957c790fc630d4afcdcc3926beb2ecaea0b302336079e8fa37e57a0"}, - {file = "inflate64-1.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:9b65cc701ef33ab20dbfd1d64088ffd89a8c265b356d2c21ba0ec565661645ef"}, - {file = "inflate64-1.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:dd6d3e7d47df43210a995fd1f5989602b64de3f2a17cf4cbff553518b3577fd4"}, - {file = "inflate64-1.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f033b2879696b855200cde5ca4e293132c7499df790acb2c0dacb336d5e83b1"}, - {file = "inflate64-1.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f816d1c8a0593375c289e285c96deaee9c2d8742cb0edbd26ee05588a9ae657"}, - {file = "inflate64-1.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1facd35319b6a391ee4c3d709c7c650bcada8cd7141d86cd8c2257287f45e6e6"}, - {file = "inflate64-1.0.0.tar.gz", hash = "sha256:3278827b803cf006a1df251f3e13374c7d26db779e5a33329cc11789b804bc2d"}, + {file = "inflate64-1.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5122a188995e47a735ab969edc9129d42bbd97b993df5a3f0819b87205ce81b4"}, + {file = "inflate64-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:975ed694c680e46a5c0bb872380a9c9da271a91f9c0646561c58e8f3714347d4"}, + {file = "inflate64-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8bcaf445d9cda5f7358e0c2b78144641560f8ce9e3e4351099754c49d26a34e8"}, + {file = "inflate64-1.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daede09baba24117279109b30fdf935195e91957e31b995b86f8dd01711376ee"}, + {file = "inflate64-1.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0df40eaaba4fb8379d5c4fa5f56cc24741c4f1a91d4aef66438207473351ceaa"}, + {file = "inflate64-1.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ef90855ff63d53c8fd3bfbf85b5280b22f82b9ab2e21a7eee45b8a19d9866c42"}, + {file = "inflate64-1.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5daa4566c0b009c9ab8a6bf18ce407d14f5dbbb0d3068f3a43af939a17e117a7"}, + {file = "inflate64-1.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:d58a360b59685561a8feacee743479a9d7cc17c8d210aa1f2ae221f2513973cb"}, + {file = "inflate64-1.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:31198c5f156806cee05b69b149074042b7b7d39274ff4c259b898e617294ac17"}, + {file = "inflate64-1.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4ab693bb1cd92573a997f8fe7b90a2ec1e17a507884598f5640656257b95ef49"}, + {file = "inflate64-1.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:95b6a60e305e6e759e37d6c36691fcb87678922c56b3ddc2df06cd56e04f41f6"}, + {file = "inflate64-1.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:711ef889bdb3b3b296881d1e49830a3a896938fba7033c4287f1aed9b9a20111"}, + {file = "inflate64-1.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3178495970ecb5c6a32167a8b57fdeef3bf4e2843eaf8f2d8f816f523741e36"}, + {file = "inflate64-1.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e8373b7feedf10236eb56d21598a19a3eb51077c3702d0ce3456b827374025e1"}, + {file = "inflate64-1.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cf026d5c885f2d2bbf233e9a0c8c6d046ec727e2467024ffe0ac76b5be308258"}, + {file = "inflate64-1.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:3aa7489241e6c6f6d34b9561efdf06031c35305b864267a5b8f406abcd3e85c5"}, + {file = "inflate64-1.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b81b3d373190ecd82901f42afd90b7127e9bdef341032a94db381c750ed3ddb2"}, + {file = "inflate64-1.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dbfddc5dac975227c20997f0ac515917a15421767c6bff0c209ac6ff9d7b17cc"}, + {file = "inflate64-1.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2adeabe79cc2f90bca832673520c8cbad7370f86353e151293add7ca529bed34"}, + {file = "inflate64-1.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b235c97a05dbe2f92f0f057426e4d05a449e1fccf8e9aa88075ea9c6a06a182"}, + {file = "inflate64-1.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19b74e30734dca5f1c83ca07074e1f25bf7b63f4a5ee7e074d9a4cb05af65cd5"}, + {file = "inflate64-1.0.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b298feb85204b5ef148ccf807744c836fffed7c1ed3ec8bc9b4e323a03163291"}, + {file = "inflate64-1.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8a4c75241bc442267f79b8242135f2ded29405662c44b9353d34fbd4fa6e56b3"}, + {file = "inflate64-1.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:7b210392f0830ab27371e36478592f47757f5ea6c09ddb96e2125847b309eb5e"}, + {file = "inflate64-1.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8dd58aa1adc4f98bf9b52baffa8f2ddf589e071a90db2f2bec9024328d4608cf"}, + {file = "inflate64-1.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c108be2b87e88c966570f84f839eb37f489b45dc3fa3046dc228327af6e921bb"}, + {file = "inflate64-1.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:63971c6b096c0d533c0e38b4257f5a7748501a8bc04d00cf239bd06467888703"}, + {file = "inflate64-1.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d0077edb6b1cabfa2223b71a4a725e5755148f551a7a396c7d5698e45fb8828"}, + {file = "inflate64-1.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f05b5f2a6f1bf2f70e9c20d997261711cbc1ae477379662b05b36911da60a67"}, + {file = "inflate64-1.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f3c7402165f7e15789caa0787e5a349465d9a454105d0c3a0ccf2e9cdfb8117"}, + {file = "inflate64-1.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:39bced168822e4bf2f545d1b6dbeded6db01c32629d9e4549ef2cd1604a12e1b"}, + {file = "inflate64-1.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:70bb6a22d300d8ca25c26bc60afb5662c5a96d97a801962874d0461568512789"}, + {file = "inflate64-1.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f3d5ea758358a1cc50f9e8e41de2134e9b5c5ca8bbcd88d1cd135d0e953d0fa8"}, + {file = "inflate64-1.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8fa102c834314c3d7edbf249d1be0bce5d12a9e122228a7ac3f861ee82c3dc5c"}, + {file = "inflate64-1.0.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c2ae56a34e6cc2a712418ac82332e5d550ef8599e0ffb64c19b86d63a7df0c5"}, + {file = "inflate64-1.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9808ae50b5db661770992566e51e648cac286c32bd80892b151e7b1eca81afe8"}, + {file = "inflate64-1.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:04b2788c6a26e1e525f53cc3d8c58782d41f18bef8d2a34a3d58beaaf0bfdd3b"}, + {file = "inflate64-1.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67fd5b1f9e433b0abab8cb91f4da94d16223a5241008268a57f4729fdbfc4dbc"}, + {file = "inflate64-1.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6f3b00c17ae365e82fc3d48ff9a7a566820a6c8c55b4e16c6cfbcbd46505a72"}, + {file = "inflate64-1.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:91c0c1d41c1655fb0189630baaa894a3b778d77062bb90ca11db878422948395"}, + {file = "inflate64-1.0.1.tar.gz", hash = "sha256:3b1c83c22651b5942b35829df526e89602e494192bf021e0d7d0b600e76c429d"}, ] [package.extras] -check = ["check-manifest", "flake8", "flake8-black", "flake8-deprecated", "isort (>=5.0.3)", "mypy (>=0.940)", "mypy-extensions (>=0.4.1)", "pygments", "readme-renderer", "twine"] +check = ["check-manifest", "flake8", "flake8-black", "flake8-deprecated", "flake8-isort", "mypy (>=1.10.0)", "mypy_extensions (>=0.4.1)", "pygments", "readme-renderer", "twine"] docs = ["docutils", "sphinx (>=5.0)"] -test = ["pyannotate", "pytest"] +test = ["pytest"] [[package]] name = "iniconfig" -version = "2.0.0" +version = "2.3.0" description = "brain-dead simple config-ini parsing" optional = false -python-versions = ">=3.7" +python-versions = ">=3.10" files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, + {file = "iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12"}, + {file = "iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730"}, ] [[package]] @@ -934,103 +1252,107 @@ files = [ [[package]] name = "jellyfish" -version = "1.1.0" -description = "Approximate and phonetic matching of strings." +version = "1.2.1" +description = "" optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" files = [ - {file = "jellyfish-1.1.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:feb1fa5838f2bb6dbc9f6d07dabf4b9d91e130b289d72bd70dc33b651667688f"}, - {file = "jellyfish-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:623fa58cca9b8e594a46e7b9cf3af629588a202439d97580a153d6af24736a1b"}, - {file = "jellyfish-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a87e4a17006f7cdd7027a053aeeaacfb0b3366955e242cd5b74bbf882bafe022"}, - {file = "jellyfish-1.1.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f10fa36491840bda29f2164cc49e61244ea27c5db5a66aaa437724f5626f5610"}, - {file = "jellyfish-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24f91daaa515284cdb691b1e01b0f91f9c9e51e685420725a1ded4f54d5376ff"}, - {file = "jellyfish-1.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:65e58350618ebb1488246998a7356a8c9a7c839ec3ecfe936df55be6776fc173"}, - {file = "jellyfish-1.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5c5ed62b23093b11de130c3fe1b381a2d3bfaf086757fa21341ac6f30a353e92"}, - {file = "jellyfish-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c42aa02e791d3e5a8fc6a96bec9f64ebbb2afef27b01eca201b56132e3d0c64e"}, - {file = "jellyfish-1.1.0-cp310-none-win32.whl", hash = "sha256:84680353261161c627cbdd622ea4243e3d3da75894bfacc2f3fcbbe56e8e59d4"}, - {file = "jellyfish-1.1.0-cp310-none-win_amd64.whl", hash = "sha256:017c794b89d827d0306cb056fc5fbd040ff558a90ff0e68a6b60d6e6ba661fe3"}, - {file = "jellyfish-1.1.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:fed2e4ecf9b4995d2aa771453d0a0fdf47a5e1b13dbd74b98a30cb0070ede30c"}, - {file = "jellyfish-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61a382ba8a3d3cd0bd50029062d54d3a0726679be248789fef6a3901eee47a60"}, - {file = "jellyfish-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a4b526ed2080b97431454075c46c19baddc944e95cc605248e32a2a07be231e"}, - {file = "jellyfish-1.1.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0fa7450c3217724b73099cb18ee594926fcbc1cc4d9964350f31a4c1dc267b35"}, - {file = "jellyfish-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33ebb6e9647d5d52f4d461a163449f6d1c73f1a80ccbe98bb17efac0062a6423"}, - {file = "jellyfish-1.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:759172602343115f910d7c63b39239051e32425115bc31ab4dafdaf6177f880c"}, - {file = "jellyfish-1.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:273fdc362ccdb09259eec9bc4abdc2467d9a54bd94d05ae22e71423dd1357255"}, - {file = "jellyfish-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bd5c335f8d762447691dc0572f4eaf0cfdfbfffb6dce740341425ab1b32134ff"}, - {file = "jellyfish-1.1.0-cp311-none-win32.whl", hash = "sha256:cc16a60a42f1541ad9c13c72c797107388227f01189aa3c0ec7ee9b939e57ea8"}, - {file = "jellyfish-1.1.0-cp311-none-win_amd64.whl", hash = "sha256:95dfe61eabf360a92e6d76d1c4dbafa29bcb3f70e2ad7354de2661141fcce038"}, - {file = "jellyfish-1.1.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:828a7000d369cbd4d812b88510c01fdab20b73dc54c63cdbe03bdff67ab362d0"}, - {file = "jellyfish-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e250dc1074d730a03c96ac9dfce44716cf45e0e2825cbddaf32a015cdf9cf594"}, - {file = "jellyfish-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87dc2a82c45b773a579fb695a5956a54106c1187f27c9ccee8508726d2e59cfc"}, - {file = "jellyfish-1.1.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e41677ec860454da5977c698fc64fed73b4054a92c5c62ba7d1af535f8082ac7"}, - {file = "jellyfish-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9d4002d01252f18eb26f28b66f6c9ce0696221804d8769553c5912b2f221a18"}, - {file = "jellyfish-1.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:936df26c10ca6cd6b4f0fb97753087354c568e2129c197cbb4e0f0172db7511f"}, - {file = "jellyfish-1.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:684c2093fa0d68a91146e15a1e9ca859259b19d3bc36ec4d60948d86751f744e"}, - {file = "jellyfish-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2fcaefebe9d67f282d89d3a66646b77184a42b3eca2771636789b2dc1288c003"}, - {file = "jellyfish-1.1.0-cp312-none-win32.whl", hash = "sha256:e512c99941a257541ffd9f75c7a5c4689de0206841b72f1eb015599d17fed2c3"}, - {file = "jellyfish-1.1.0-cp312-none-win_amd64.whl", hash = "sha256:2b928bad2887c662783a4d9b5828ed1fa0e943f680589f7fc002c456fc02e184"}, - {file = "jellyfish-1.1.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:5d510b04e2a39f27aef391ca18bf527ec5d9a2438a63731b87faada83996cb92"}, - {file = "jellyfish-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:57d005cc5daa4d0a8d88341d86b1dce24e3f1d7721da75326c0b7af598a4f58c"}, - {file = "jellyfish-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:889edab0fb2a29d29c148c9327752df525c9bdaef03eef01d1bd9c1f90b47ebf"}, - {file = "jellyfish-1.1.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:937b657aacba8fe8482ebc5fea5ba1aee987ecb9da0f037bfb8a1a9045d05746"}, - {file = "jellyfish-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cb5088436ce1fdabcb46aed3a3cc215f0432313596f4e5abe5300ed833b697c"}, - {file = "jellyfish-1.1.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:af74156301a0ff05a22e8cf46250678e23fa447279ba6dffbf9feff01128f51d"}, - {file = "jellyfish-1.1.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:3f978bc430bbed4df3c10b2a66be7b5bddd09e6c2856c7a17fa2298fb193d4d4"}, - {file = "jellyfish-1.1.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:b460f0bbde533f6f8624c1d7439e7f511b227ca18a58781e7f38f21961bd3f09"}, - {file = "jellyfish-1.1.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:7cd4b706cb6c4739846d78a398c67996cb451b09a732a625793cfe8d4f37af1b"}, - {file = "jellyfish-1.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:61cded25b47fe6b4c2ea9478c0a5a7531845218525a1b2627c67907ee9fe9b15"}, - {file = "jellyfish-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04bf33577059afba33227977e4a2c08ccb954eb77c849fde564af3e31ee509d9"}, - {file = "jellyfish-1.1.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:327496501a44fbdfe0602fdc6a7d4317a7598202f1f652c9c4f0a49529a385cd"}, - {file = "jellyfish-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0d1e6bac549cc2919b83d0ebe26566404ae3dfef5ef86229d1d826e3aeaba4b"}, - {file = "jellyfish-1.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b5fec525f15b39687dbfd75589333df4e6f6d15d3b1e0ada02bf206363dfd2af"}, - {file = "jellyfish-1.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8b2faf015e86a9efd5679b3abde83cbd8f3104b9e89445aa76b8481b206b3e67"}, - {file = "jellyfish-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b73efda07d52a1583afb8915a5f9feb017d0b60ae6d03071b21cc4f0a8a08ec1"}, - {file = "jellyfish-1.1.0-cp38-none-win32.whl", hash = "sha256:4a5199583a956d313be825972d7c14a0d9e455884acd12c03d05e4272c6c3bb8"}, - {file = "jellyfish-1.1.0-cp38-none-win_amd64.whl", hash = "sha256:755b68920a839f9e2b4813f0990a8dadcc9a24980bb29839f636ab5e36aaa256"}, - {file = "jellyfish-1.1.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e965241e54f9cb9be6fe8f7a1376b6cc61ff831de017bde9150156771820f669"}, - {file = "jellyfish-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e59a4c3bf0847dfff44195a4c250bc9e281b1c403f6212534ee36fc7c913dc1"}, - {file = "jellyfish-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84fa4e72b7754060d352604e07ea89af98403b0436caad443276ae46135b7fd7"}, - {file = "jellyfish-1.1.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:125e9bfd1cc2c053eae3afa04fa142bbc8b3c1290a40a3416271b221f7e6bc87"}, - {file = "jellyfish-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4a8fff36462bf1bdaa339d58fadd7e79a63690902e6d7ddd65a84efc3a4cc6d"}, - {file = "jellyfish-1.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6b438b3d7f970cfd8f77b30b05694537a54c08f3775b35debae45ff5a469f1a5"}, - {file = "jellyfish-1.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:cf8d26c3735b5c2764cc53482dec14bb9b794ba829db3cd4c9a29d194a61cada"}, - {file = "jellyfish-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f341d0582ecac0aa73f380056dc8d25d8a60104f94debe8bf3f924a32a11588d"}, - {file = "jellyfish-1.1.0-cp39-none-win32.whl", hash = "sha256:49f2be59573b22d0adb615585ff66ca050198ec1f9f6784eec168bcd8137caf5"}, - {file = "jellyfish-1.1.0-cp39-none-win_amd64.whl", hash = "sha256:c58988138666b1cd860004c1afc7a09bb402e71e16e1f324be5c5d2b85fdfa3e"}, - {file = "jellyfish-1.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54effec80c7a5013bea8e2ea6cd87fdd35a2c5b35f86ccf69ec33f4212245f25"}, - {file = "jellyfish-1.1.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:12ae67e9016c9a173453023fd7b400ec002bbc106c12722d914c53951acfa190"}, - {file = "jellyfish-1.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efd342f9d4fb0ead8a3c30fe26e442308fb665ca37f4aa97baf448d814469bf1"}, - {file = "jellyfish-1.1.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b0dc9f1bb335b6caa412c3d27028e25d315ef2bc993d425db93e451d7bc28056"}, - {file = "jellyfish-1.1.0-pp310-pypy310_pp73-musllinux_1_1_i686.whl", hash = "sha256:3f12cb59b3266e37ec47bd7c2c37faadc74ae8ccdc0190444daeafda3bd93da2"}, - {file = "jellyfish-1.1.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c7ea99734b7767243b5b98eca953f0d719b48b0d630af3965638699728ef7523"}, - {file = "jellyfish-1.1.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1a90889fdb96ca27fc176e19a472c736e044d7190c924d9b7cfb0444881f921c"}, - {file = "jellyfish-1.1.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:c01cdf0d52d07e07fb0dfa2b3c03ca3b5a07088f08b38b06376ed228d842e501"}, - {file = "jellyfish-1.1.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a4678a2623cc83fde7ff683ba78d308edf7e54a1c81dd295cdf525761b9fcc1"}, - {file = "jellyfish-1.1.0-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b557b8e1fdad4a36f467ee44f5532a4a13e5300b93b2b5e70ff75d0d16458132"}, - {file = "jellyfish-1.1.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b5c34d12730d912bafab9f6daaa7fb2c6fa6afc0a8fc2c4cdc017df485d8d843"}, - {file = "jellyfish-1.1.0-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d977a1e0fa3814d517b16d58a39a16e449bbd900b966dd921e770d0fd67bfa45"}, - {file = "jellyfish-1.1.0-pp38-pypy38_pp73-musllinux_1_1_i686.whl", hash = "sha256:6662152bf510cc7daef18965dd80cfa98710b479bda87a3170c86c4e0a6dc1ab"}, - {file = "jellyfish-1.1.0-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e447e3807c73aeda7b592919c105bf98ce0297a228aff68aafe4fe70a39b9a78"}, - {file = "jellyfish-1.1.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ca252e6088c6afe5f8138ce9f557157ad0329f0610914ba50729c641d57cd662"}, - {file = "jellyfish-1.1.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b2512ab6a1625a168796faaa159e1d1b8847cb3d0cc2b1b09ae77ff0623e7d10"}, - {file = "jellyfish-1.1.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b868da3186306efb48fbd8a8dee0a742a5c8bc9c4c74aa5003914a8600435ba8"}, - {file = "jellyfish-1.1.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bcc2cb1f007ddfad2f9175a8c1f934a8a0a6cc73187e2339fe1a4b3fd90b263e"}, - {file = "jellyfish-1.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e17885647f3a0faf1518cf6b319865b2e84439cfc16a3ea14468513c0fba227"}, - {file = "jellyfish-1.1.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:84ea543d05e6b7a7a704d45ebd9c753e2425da01fc5000ddc149031be541c4d5"}, - {file = "jellyfish-1.1.0-pp39-pypy39_pp73-musllinux_1_1_i686.whl", hash = "sha256:065a59ab0d02969d45e5ab4b0315ed6f5977a4eb8eaef24f2589e25b85822d18"}, - {file = "jellyfish-1.1.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f747f34071e1558151b342a2bf96b813e04b5384024ba7c50f3c907fbaab484f"}, - {file = "jellyfish-1.1.0.tar.gz", hash = "sha256:2a2eec494c81dc1eb23dfef543110dad1873538eccaffabea8520bdac8aecbc1"}, + {file = "jellyfish-1.2.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b35d4b5b688f759ffd075190a9850b04671bad14c5b37124eb43e99306ec16ea"}, + {file = "jellyfish-1.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b37b76ea338c4a473c34a9b9e1e033a78aafb9040a8c0eea579fc5805d8e4b46"}, + {file = "jellyfish-1.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:137cfcc26396d0f2e1265ac61f800bb921921ea722a43dd897e58190f767c474"}, + {file = "jellyfish-1.2.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab1bfea271ce4bda09d975080d5465cf5a8b127e7c0ea61ea3f972417a7a2193"}, + {file = "jellyfish-1.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2348f698f9c1d72023afc8d39939045421a01da9b7e3078e3029227e35f28419"}, + {file = "jellyfish-1.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4072e21ad4036af41bd57b447b1dda64fe60aa679cfa8854ba0a0338152439f1"}, + {file = "jellyfish-1.2.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:cf6cd68921f2bacc547ba1cf64ad0e76bc1727f3bab13bba2e5f5869aba038b1"}, + {file = "jellyfish-1.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:01647c12261bc1f7b102e918e7665497176d87f6fc96271439c8855872bc2606"}, + {file = "jellyfish-1.2.1-cp310-cp310-win32.whl", hash = "sha256:ddf05ea471da2808d77ecfa425d8884124b4754f4d483afa7703b6655530cf5c"}, + {file = "jellyfish-1.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:e4a210a960f3917da757b0581750b6e0a8db9acef68dafbc1b6e2ae39e847ba8"}, + {file = "jellyfish-1.2.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:9913789a98ccf49213fbb1dabc597847a0ec33d3b0e151689498f4b38ba9be0f"}, + {file = "jellyfish-1.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4e36d9000d4f7e1a35689a74ec7749d27a216dfa6c47cac2e5ad3de8a523bd69"}, + {file = "jellyfish-1.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7853d2ed7d6929c029312ec849410f1ea7ae76ce72ad1140fb73f6e8a1e6aa4f"}, + {file = "jellyfish-1.2.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:68080af234256ef943f0add6fc79816b0c643d8df291c17a85c1b6e45bdfbb96"}, + {file = "jellyfish-1.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c5acb213aa75a61bcfc176566e20f2503069667e760d83d403b59e115fef0dd"}, + {file = "jellyfish-1.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4b28fcefc0c3534277ff0306e6c10672fb050f4784b5f3be7037e80801569fb5"}, + {file = "jellyfish-1.2.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f69aeb08659a6c81d559bbe319075e3417434ae5b3a5e4a758d1c4055a03497a"}, + {file = "jellyfish-1.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:63770120cc3386dcc13bcc4df508ab281a6b14c3b2c0e33586439a6c40ee122f"}, + {file = "jellyfish-1.2.1-cp311-cp311-win32.whl", hash = "sha256:ecf62d4aad0baa8832ab60f96e7baedbe6558bd292597503d927e9c5bce745d8"}, + {file = "jellyfish-1.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:bd186c041d9be86c4fa5e2490943ce5d7f05b472f45d7f49426f259f3dd20bc4"}, + {file = "jellyfish-1.2.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:32a85b752cb51463face13e2b1797cfa617cd7fb7073f15feaa4020a86a346ce"}, + {file = "jellyfish-1.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:675ab43840488944899ca87f02d4813c1e32107e56afaba7489705a70214e8aa"}, + {file = "jellyfish-1.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c888f624d03e55e501bc438906505c79fb307d8da37a6dda18dd1ac2e6d5ea9c"}, + {file = "jellyfish-1.2.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d2b56a1fd2c5126c4a3362ec4470291cdd3c7daa22f583da67e75e30dc425ce6"}, + {file = "jellyfish-1.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a3ccff843822e7f3ad6f91662488a3630724c8587976bce114f3c7238e8ffa1"}, + {file = "jellyfish-1.2.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:10da696747e2de0336180fd5ba77ef769a7c80f9743123545f7fc0251efbbcec"}, + {file = "jellyfish-1.2.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:c3c18f13175a9c90f3abd8805720b0eb3e10eca1d5d4e0cf57722b2a62d62016"}, + {file = "jellyfish-1.2.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0368596e176bf548b3be2979ff33e274fb6d5e13b2cebe85137b8b698b002a85"}, + {file = "jellyfish-1.2.1-cp312-cp312-win32.whl", hash = "sha256:451ddf4094e108e33d3b86d7817a7e20a2c5e6812d08c34ee22f6a595f38dcca"}, + {file = "jellyfish-1.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:15318c13070fe6d9caeb7e10f9cdf89ff47c9d20f05a9a2c0d3b5cb8062a7033"}, + {file = "jellyfish-1.2.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:4b3e3223aaad74e18aacc74775e01815e68af810258ceea6fa6a81b19f384312"}, + {file = "jellyfish-1.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e967e67058b78189d2b20a9586c7720a05ec4a580d6a98c796cd5cd2b7b11303"}, + {file = "jellyfish-1.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32581c50b34a09889b2d96796170e53da313a1e7fde32be63c82e50e7e791e3c"}, + {file = "jellyfish-1.2.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07b022412ebece96759006cb015d46b8218d7f896d8b327c6bbee784ddf38ed9"}, + {file = "jellyfish-1.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80a49eb817eaa6591f43a31e5c93d79904de62537f029907ef88c050d781a638"}, + {file = "jellyfish-1.2.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:e1b990fb15985571616f7f40a12d6fa062897b19fb5359b6dec3cd811d802c24"}, + {file = "jellyfish-1.2.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:dd895cf63fac0a9f11b524fff810d9a6081dcf3c518b34172ac8684eb504dd43"}, + {file = "jellyfish-1.2.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:6d2bac5982d7a08759ea487bfa00149e6aa8a3be7cd43c4ed1be1e3505425c69"}, + {file = "jellyfish-1.2.1-cp313-cp313-win32.whl", hash = "sha256:509355ebedec69a8bf0cc113a6bf9c01820d12fe2eea44f47dfa809faf2d5463"}, + {file = "jellyfish-1.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:9c747ae5c0fb4bd519f6abbfe4bd704b2f1c63fd4dd3dbb8d8864478974e1571"}, + {file = "jellyfish-1.2.1-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:212aaf177236192a735bbbf5938717aa8518d14a25b08b015e47e783e70be060"}, + {file = "jellyfish-1.2.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b8986d9768daddd5e87abf513ae168ea0afe690a444d4c82d5b1b14b0d045820"}, + {file = "jellyfish-1.2.1-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fa0ba0946f3c274f6a87aaa3c631dc70a363bd46cceea828ce777e8db653b6f"}, + {file = "jellyfish-1.2.1-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6e76b23431a667cd485fb562428d1ad29bae9fdd0fcdfb5a51cc8087bae0e88c"}, + {file = "jellyfish-1.2.1-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a058f4c6a591d5e5a47569f5648a26303ba19c76a960fef7e0beba2aa959e52e"}, + {file = "jellyfish-1.2.1-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:6a49ce2a580edd3b16b69421137deef464e2f8907f9ef906d49950b1a52908c1"}, + {file = "jellyfish-1.2.1-cp314-cp314-musllinux_1_1_i686.whl", hash = "sha256:c85aa2bc76a36d92a3197f406f86636664d5b323727dfec4fa2842a8a24a06ae"}, + {file = "jellyfish-1.2.1-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:29cfa8bfb72aacf2d611a3313b358ed4d4140fa3d3efcffea750c8e7f8acb1aa"}, + {file = "jellyfish-1.2.1-cp314-cp314-win32.whl", hash = "sha256:f121218dc33fb318c34ddd889dc7362606ce1316af2bb63b73cc1df81523ca34"}, + {file = "jellyfish-1.2.1-cp314-cp314-win_amd64.whl", hash = "sha256:9a73b5c6425a70ebd440579a677eb4f03b327b2f59090db34e6c937aeea5aabd"}, + {file = "jellyfish-1.2.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5335f622458aa105289a8e358bc32ecd1b9634b6ffec3e77ea3577e49c297171"}, + {file = "jellyfish-1.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c51e565f85ce38cf9388c4f916d53888b0fa34788fcebe3aff3db24948e0960"}, + {file = "jellyfish-1.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14bbb30d988dec1d12183cf5d4621c908f98add2009c72a185e8c3e8d00b804f"}, + {file = "jellyfish-1.2.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9930e20f0e9f65ad1d57d98290c2be3abd75812d058815605f44a56056fb9a66"}, + {file = "jellyfish-1.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0028857c5381c9d55e21cc6cb0d7f9545c3a9a7bb7dbca3960fe0a898c691ac2"}, + {file = "jellyfish-1.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:56da7632e029912af25e25422fae3b6df318400297d552791f4b21da6d815ed6"}, + {file = "jellyfish-1.2.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a3cab91020e3ff7565e55a611ec3e3257c093ac950d55778a48bfc8c57562b6e"}, + {file = "jellyfish-1.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0b21c1596ce283fd7ee954eb0eeb007d59e480364324bcd91ad55146e91f3936"}, + {file = "jellyfish-1.2.1-cp39-cp39-win32.whl", hash = "sha256:1098ce1f84ae3f147f0a18a6803ffb09b9c8cd5fedce42465643ca0b5c9d0224"}, + {file = "jellyfish-1.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:4b013876109d91fa6fc871ffa4e0dbfda11820c33dc4ad0e2967b3fc1187f804"}, + {file = "jellyfish-1.2.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c499ea3a134130797c50e367687a6a46a12653c59af381bee92c41a5ab0bd55d"}, + {file = "jellyfish-1.2.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:91cad49a4fb731b726afc5ae385a3217a7016ed88a04da40c131cff8136a5db5"}, + {file = "jellyfish-1.2.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bda2275f31a64adf3483e39f7a4e2107f7dfe3a3f85f0d2c0cb6ae5fbe4a443"}, + {file = "jellyfish-1.2.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:98a133b40dc00cfda6609e1b0cb0ab0b77796fc2719aae886a12009514f73499"}, + {file = "jellyfish-1.2.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa30c7b59bd1c5e105693108a6d7a98f3e7a1a59e23e15bc5897b91fd5849f5"}, + {file = "jellyfish-1.2.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:db97d873f23b0c15b4ed911ece10e5cc0bb96cdc53666d5c3788bd0af81807f1"}, + {file = "jellyfish-1.2.1-pp310-pypy310_pp73-musllinux_1_1_i686.whl", hash = "sha256:393f609fd6139ce782e747e22c399483ffc58341009e6a97e39ffe5f5b2c674c"}, + {file = "jellyfish-1.2.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fb3c6e537cb4605c22895a8d4a10cdb26611ba2bbfc7f0b4c1d06bb9d8aad648"}, + {file = "jellyfish-1.2.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:748dc45a0394fbe9120b8b3b9a39fab0967c7e2d6ecdd5304af018e774f80f96"}, + {file = "jellyfish-1.2.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:13f1ac9caba22af10bfe42f674822643c0266009f882e0fe652079706dc5d13a"}, + {file = "jellyfish-1.2.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ffeeb6c78c45fbb6d2a22b0173fb8a6af849001d6c26fab49c525136dbd9734"}, + {file = "jellyfish-1.2.1-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1354b558a0a16597b6032dd0af64bebd24994f7e7484cf14993320eb764b06cb"}, + {file = "jellyfish-1.2.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5977810972c6f0b2e61252c4758fd5aee21abf663ff309881195a99d37daa94"}, + {file = "jellyfish-1.2.1-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:536c80d8d4ec7f39cbb10b85d926ff96cef3cde4a83ca0991c07cd9835d5dc13"}, + {file = "jellyfish-1.2.1-pp311-pypy311_pp73-musllinux_1_1_i686.whl", hash = "sha256:21baa92d4a5112167721156f6d061c2ae105f2995b3a5e19cec6662928f0c439"}, + {file = "jellyfish-1.2.1-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68ea3ddd4dae1152a7f7155ef02a7bfad919611158d71b301f9aa167685819af"}, + {file = "jellyfish-1.2.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d7be8021658b46b22500a77f1707901bd98fc210f185c229b81c74efd3c1baf2"}, + {file = "jellyfish-1.2.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:bcdcd603a7737cd3f5a2ab10ce9b49844329deb81c2daafcd8131e54fc730205"}, + {file = "jellyfish-1.2.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c28a4ae3e201e1c1b7bacacd40e2e76c4068b90c9ae3a0d525e0ac98206f1cc"}, + {file = "jellyfish-1.2.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bebccd0652ac1c7e438ae1f451edefde63d14b3af6f6daa30c599919dcb92886"}, + {file = "jellyfish-1.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05be396aebe3dce7a8cb2f97727ecdf99e86457c48e97190775dce33f8b7e39d"}, + {file = "jellyfish-1.2.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:9d4448c874959ae012cda0f6d570ac0bd7f0fcf12007714eaebf86b86919b66f"}, + {file = "jellyfish-1.2.1-pp39-pypy39_pp73-musllinux_1_1_i686.whl", hash = "sha256:4a21d7eda5e6996772055f798e3fe1de1b33b3edad7f6cf0567097a21585a812"}, + {file = "jellyfish-1.2.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:a0ef6f0ecc085c1f8fddb048f538c8bb89989e5d470eab45d4e9bd48ee73a40d"}, + {file = "jellyfish-1.2.1.tar.gz", hash = "sha256:72d2fda61b23babe862018729be73c8b0dc12e3e6601f36f6e65d905e249f4db"}, ] [[package]] name = "jinja2" -version = "3.1.4" +version = "3.1.6" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" files = [ - {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, - {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, ] [package.dependencies] @@ -1041,20 +1363,20 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "joblib" -version = "1.4.2" +version = "1.5.2" description = "Lightweight pipelining with Python functions" optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6"}, - {file = "joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e"}, + {file = "joblib-1.5.2-py3-none-any.whl", hash = "sha256:4e1f0bdbb987e6d843c70cf43714cb276623def372df3c22fe5266b2670bc241"}, + {file = "joblib-1.5.2.tar.gz", hash = "sha256:3faa5c39054b2f03ca547da9b2f52fde67c06240c31853f306aea97f13647b55"}, ] [[package]] name = "langdetect" version = "1.0.9" description = "Language detection library ported from Google's language-detection." -optional = true +optional = false python-versions = "*" files = [ {file = "langdetect-1.0.9-py2-none-any.whl", hash = "sha256:7cbc0746252f19e76f77c0b1690aadf01963be835ef0cd4b56dddf2a8f1dfc2a"}, @@ -1149,289 +1471,401 @@ dev = ["changelist (==0.5)"] lint = ["pre-commit (==3.7.0)"] test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"] +[[package]] +name = "libcst" +version = "1.8.6" +description = "A concrete syntax tree with AST-like properties for Python 3.0 through 3.14 programs." +optional = false +python-versions = ">=3.9" +files = [ + {file = "libcst-1.8.6-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a20c5182af04332cc94d8520792befda06d73daf2865e6dddc5161c72ea92cb9"}, + {file = "libcst-1.8.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:36473e47cb199b7e6531d653ee6ffed057de1d179301e6c67f651f3af0b499d6"}, + {file = "libcst-1.8.6-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:06fc56335a45d61b7c1b856bfab4587b84cfe31e9d6368f60bb3c9129d900f58"}, + {file = "libcst-1.8.6-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:6b23d14a7fc0addd9795795763af26b185deb7c456b1e7cc4d5228e69dab5ce8"}, + {file = "libcst-1.8.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:16cfe0cfca5fd840e1fb2c30afb628b023d3085b30c3484a79b61eae9d6fe7ba"}, + {file = "libcst-1.8.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:455f49a93aea4070132c30ebb6c07c2dea0ba6c1fde5ffde59fc45dbb9cfbe4b"}, + {file = "libcst-1.8.6-cp310-cp310-win_amd64.whl", hash = "sha256:72cca15800ffc00ba25788e4626189fe0bc5fe2a0c1cb4294bce2e4df21cc073"}, + {file = "libcst-1.8.6-cp310-cp310-win_arm64.whl", hash = "sha256:6cad63e3a26556b020b634d25a8703b605c0e0b491426b3e6b9e12ed20f09100"}, + {file = "libcst-1.8.6-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3649a813660fbffd7bc24d3f810b1f75ac98bd40d9d6f56d1f0ee38579021073"}, + {file = "libcst-1.8.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0cbe17067055829607c5ba4afa46bfa4d0dd554c0b5a583546e690b7367a29b6"}, + {file = "libcst-1.8.6-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:59a7e388c57d21d63722018978a8ddba7b176e3a99bd34b9b84a576ed53f2978"}, + {file = "libcst-1.8.6-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:b6c1248cc62952a3a005792b10cdef2a4e130847be9c74f33a7d617486f7e532"}, + {file = "libcst-1.8.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6421a930b028c5ef4a943b32a5a78b7f1bf15138214525a2088f11acbb7d3d64"}, + {file = "libcst-1.8.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6d8b67874f2188399a71a71731e1ba2d1a2c3173b7565d1cc7ffb32e8fbaba5b"}, + {file = "libcst-1.8.6-cp311-cp311-win_amd64.whl", hash = "sha256:b0d8c364c44ae343937f474b2e492c1040df96d94530377c2f9263fb77096e4f"}, + {file = "libcst-1.8.6-cp311-cp311-win_arm64.whl", hash = "sha256:5dcaaebc835dfe5755bc85f9b186fb7e2895dda78e805e577fef1011d51d5a5c"}, + {file = "libcst-1.8.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0c13d5bd3d8414a129e9dccaf0e5785108a4441e9b266e1e5e9d1f82d1b943c9"}, + {file = "libcst-1.8.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f1472eeafd67cdb22544e59cf3bfc25d23dc94058a68cf41f6654ff4fcb92e09"}, + {file = "libcst-1.8.6-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:089c58e75cb142ec33738a1a4ea7760a28b40c078ab2fd26b270dac7d2633a4d"}, + {file = "libcst-1.8.6-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:c9d7aeafb1b07d25a964b148c0dda9451efb47bbbf67756e16eeae65004b0eb5"}, + {file = "libcst-1.8.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:207481197afd328aa91d02670c15b48d0256e676ce1ad4bafb6dc2b593cc58f1"}, + {file = "libcst-1.8.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:375965f34cc6f09f5f809244d3ff9bd4f6cb6699f571121cebce53622e7e0b86"}, + {file = "libcst-1.8.6-cp312-cp312-win_amd64.whl", hash = "sha256:da95b38693b989eaa8d32e452e8261cfa77fe5babfef1d8d2ac25af8c4aa7e6d"}, + {file = "libcst-1.8.6-cp312-cp312-win_arm64.whl", hash = "sha256:bff00e1c766658adbd09a175267f8b2f7616e5ee70ce45db3d7c4ce6d9f6bec7"}, + {file = "libcst-1.8.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7445479ebe7d1aff0ee094ab5a1c7718e1ad78d33e3241e1a1ec65dcdbc22ffb"}, + {file = "libcst-1.8.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4fc3fef8a2c983e7abf5d633e1884c5dd6fa0dcb8f6e32035abd3d3803a3a196"}, + {file = "libcst-1.8.6-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:1a3a5e4ee870907aa85a4076c914ae69066715a2741b821d9bf16f9579de1105"}, + {file = "libcst-1.8.6-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:6609291c41f7ad0bac570bfca5af8fea1f4a27987d30a1fa8b67fe5e67e6c78d"}, + {file = "libcst-1.8.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:25eaeae6567091443b5374b4c7d33a33636a2d58f5eda02135e96fc6c8807786"}, + {file = "libcst-1.8.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:04030ea4d39d69a65873b1d4d877def1c3951a7ada1824242539e399b8763d30"}, + {file = "libcst-1.8.6-cp313-cp313-win_amd64.whl", hash = "sha256:8066f1b70f21a2961e96bedf48649f27dfd5ea68be5cd1bed3742b047f14acde"}, + {file = "libcst-1.8.6-cp313-cp313-win_arm64.whl", hash = "sha256:c188d06b583900e662cd791a3f962a8c96d3dfc9b36ea315be39e0a4c4792ebf"}, + {file = "libcst-1.8.6-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c41c76e034a1094afed7057023b1d8967f968782433f7299cd170eaa01ec033e"}, + {file = "libcst-1.8.6-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5432e785322aba3170352f6e72b32bea58d28abd141ac37cc9b0bf6b7c778f58"}, + {file = "libcst-1.8.6-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:85b7025795b796dea5284d290ff69de5089fc8e989b25d6f6f15b6800be7167f"}, + {file = "libcst-1.8.6-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:536567441182a62fb706e7aa954aca034827b19746832205953b2c725d254a93"}, + {file = "libcst-1.8.6-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2f04d3672bde1704f383a19e8f8331521abdbc1ed13abb349325a02ac56e5012"}, + {file = "libcst-1.8.6-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:7f04febcd70e1e67917be7de513c8d4749d2e09206798558d7fe632134426ea4"}, + {file = "libcst-1.8.6-cp313-cp313t-win_amd64.whl", hash = "sha256:1dc3b897c8b0f7323412da3f4ad12b16b909150efc42238e19cbf19b561cc330"}, + {file = "libcst-1.8.6-cp313-cp313t-win_arm64.whl", hash = "sha256:44f38139fa95e488db0f8976f9c7ca39a64d6bc09f2eceef260aa1f6da6a2e42"}, + {file = "libcst-1.8.6-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:b188e626ce61de5ad1f95161b8557beb39253de4ec74fc9b1f25593324a0279c"}, + {file = "libcst-1.8.6-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:87e74f7d7dfcba9efa91127081e22331d7c42515f0a0ac6e81d4cf2c3ed14661"}, + {file = "libcst-1.8.6-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:3a926a4b42015ee24ddfc8ae940c97bd99483d286b315b3ce82f3bafd9f53474"}, + {file = "libcst-1.8.6-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:3f4fbb7f569e69fd9e89d9d9caa57ca42c577c28ed05062f96a8c207594e75b8"}, + {file = "libcst-1.8.6-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:08bd63a8ce674be431260649e70fca1d43f1554f1591eac657f403ff8ef82c7a"}, + {file = "libcst-1.8.6-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e00e275d4ba95d4963431ea3e409aa407566a74ee2bf309a402f84fc744abe47"}, + {file = "libcst-1.8.6-cp314-cp314-win_amd64.whl", hash = "sha256:fea5c7fa26556eedf277d4f72779c5ede45ac3018650721edd77fd37ccd4a2d4"}, + {file = "libcst-1.8.6-cp314-cp314-win_arm64.whl", hash = "sha256:bb9b4077bdf8857b2483879cbbf70f1073bc255b057ec5aac8a70d901bb838e9"}, + {file = "libcst-1.8.6-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:55ec021a296960c92e5a33b8d93e8ad4182b0eab657021f45262510a58223de1"}, + {file = "libcst-1.8.6-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ba9ab2b012fbd53b36cafd8f4440a6b60e7e487cd8b87428e57336b7f38409a4"}, + {file = "libcst-1.8.6-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:c0a0cc80aebd8aa15609dd4d330611cbc05e9b4216bcaeabba7189f99ef07c28"}, + {file = "libcst-1.8.6-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:42a4f68121e2e9c29f49c97f6154e8527cd31021809cc4a941c7270aa64f41aa"}, + {file = "libcst-1.8.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8a434c521fadaf9680788b50d5c21f4048fa85ed19d7d70bd40549fbaeeecab1"}, + {file = "libcst-1.8.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6a65f844d813ab4ef351443badffa0ae358f98821561d19e18b3190f59e71996"}, + {file = "libcst-1.8.6-cp314-cp314t-win_amd64.whl", hash = "sha256:bdb14bc4d4d83a57062fed2c5da93ecb426ff65b0dc02ddf3481040f5f074a82"}, + {file = "libcst-1.8.6-cp314-cp314t-win_arm64.whl", hash = "sha256:819c8081e2948635cab60c603e1bbdceccdfe19104a242530ad38a36222cb88f"}, + {file = "libcst-1.8.6-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:cb2679ef532f9fa5be5c5a283b6357cb6e9888a8dd889c4bb2b01845a29d8c0b"}, + {file = "libcst-1.8.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:203ec2a83f259baf686b9526268cd23d048d38be5589594ef143aee50a4faf7e"}, + {file = "libcst-1.8.6-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6366ab2107425bf934b0c83311177f2a371bfc757ee8c6ad4a602d7cbcc2f363"}, + {file = "libcst-1.8.6-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:6aa11df6c58812f731172b593fcb485d7ba09ccc3b52fea6c7f26a43377dc748"}, + {file = "libcst-1.8.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:351ab879c2fd20d9cb2844ed1ea3e617ed72854d3d1e2b0880ede9c3eea43ba8"}, + {file = "libcst-1.8.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:98fa1ca321c81fb1f02e5c43f956ca543968cc1a30b264fd8e0a2e1b0b0bf106"}, + {file = "libcst-1.8.6-cp39-cp39-win_amd64.whl", hash = "sha256:25fc7a1303cad7639ad45ec38c06789b4540b7258e9a108924aaa2c132af4aca"}, + {file = "libcst-1.8.6-cp39-cp39-win_arm64.whl", hash = "sha256:4d7bbdd35f3abdfb5ac5d1a674923572dab892b126a58da81ff2726102d6ec2e"}, + {file = "libcst-1.8.6.tar.gz", hash = "sha256:f729c37c9317126da9475bdd06a7208eb52fcbd180a6341648b45a56b4ba708b"}, +] + +[package.dependencies] +pyyaml = [ + {version = ">=5.2", markers = "python_version < \"3.13\""}, + {version = ">=6.0.3", markers = "python_version >= \"3.14\""}, +] +pyyaml-ft = {version = ">=8.0.0", markers = "python_version == \"3.13\""} + [[package]] name = "librosa" -version = "0.10.2.post1" +version = "0.11.0" description = "Python module for audio and music processing" optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "librosa-0.10.2.post1-py3-none-any.whl", hash = "sha256:dc882750e8b577a63039f25661b7e39ec4cfbacc99c1cffba666cd664fb0a7a0"}, - {file = "librosa-0.10.2.post1.tar.gz", hash = "sha256:cd99f16717cbcd1e0983e37308d1db46a6f7dfc2e396e5a9e61e6821e44bd2e7"}, + {file = "librosa-0.11.0-py3-none-any.whl", hash = "sha256:0b6415c4fd68bff4c29288abe67c6d80b587e0e1e2cfb0aad23e4559504a7fa1"}, + {file = "librosa-0.11.0.tar.gz", hash = "sha256:f5ed951ca189b375bbe2e33b2abd7e040ceeee302b9bbaeeffdfddb8d0ace908"}, ] [package.dependencies] audioread = ">=2.1.9" decorator = ">=4.3.0" -joblib = ">=0.14" -lazy-loader = ">=0.1" +joblib = ">=1.0" +lazy_loader = ">=0.1" msgpack = ">=1.0" numba = ">=0.51.0" -numpy = ">=1.20.3,<1.22.0 || >1.22.0,<1.22.1 || >1.22.1,<1.22.2 || >1.22.2" +numpy = ">=1.22.3" pooch = ">=1.1" -scikit-learn = ">=0.20.0" -scipy = ">=1.2.0" +scikit-learn = ">=1.1.0" +scipy = ">=1.6.0" soundfile = ">=0.12.1" soxr = ">=0.3.2" -typing-extensions = ">=4.1.1" +standard-aifc = {version = "*", markers = "python_version >= \"3.13\""} +standard-sunau = {version = "*", markers = "python_version >= \"3.13\""} +typing_extensions = ">=4.1.1" [package.extras] display = ["matplotlib (>=3.5.0)"] -docs = ["ipython (>=7.0)", "matplotlib (>=3.5.0)", "mir-eval (>=0.5)", "numba (>=0.51)", "numpydoc", "presets", "sphinx (!=1.3.1)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.7)", "sphinx-multiversion (>=0.2.3)", "sphinx-rtd-theme (>=1.2.0)", "sphinxcontrib-svg2pdfconverter"] +docs = ["ipython (>=7.0)", "matplotlib (>=3.5.0)", "mir_eval (>=0.5)", "numba (>=0.51)", "numpydoc", "presets", "sphinx (!=1.3.1)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.7)", "sphinx-multiversion (>=0.2.3)", "sphinx_rtd_theme (>=1.2.0)", "sphinxcontrib-googleanalytics (>=0.4)", "sphinxcontrib-svg2pdfconverter"] tests = ["matplotlib (>=3.5.0)", "packaging (>=20.0)", "pytest", "pytest-cov", "pytest-mpl", "resampy (>=0.2.2)", "samplerate", "types-decorator"] [[package]] name = "llvmlite" -version = "0.43.0" +version = "0.45.1" description = "lightweight wrapper around basic LLVM functionality" optional = true -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "llvmlite-0.43.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a289af9a1687c6cf463478f0fa8e8aa3b6fb813317b0d70bf1ed0759eab6f761"}, - {file = "llvmlite-0.43.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d4fd101f571a31acb1559ae1af30f30b1dc4b3186669f92ad780e17c81e91bc"}, - {file = "llvmlite-0.43.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d434ec7e2ce3cc8f452d1cd9a28591745de022f931d67be688a737320dfcead"}, - {file = "llvmlite-0.43.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6912a87782acdff6eb8bf01675ed01d60ca1f2551f8176a300a886f09e836a6a"}, - {file = "llvmlite-0.43.0-cp310-cp310-win_amd64.whl", hash = "sha256:14f0e4bf2fd2d9a75a3534111e8ebeb08eda2f33e9bdd6dfa13282afacdde0ed"}, - {file = "llvmlite-0.43.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3e8d0618cb9bfe40ac38a9633f2493d4d4e9fcc2f438d39a4e854f39cc0f5f98"}, - {file = "llvmlite-0.43.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0a9a1a39d4bf3517f2af9d23d479b4175ead205c592ceeb8b89af48a327ea57"}, - {file = "llvmlite-0.43.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1da416ab53e4f7f3bc8d4eeba36d801cc1894b9fbfbf2022b29b6bad34a7df2"}, - {file = "llvmlite-0.43.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977525a1e5f4059316b183fb4fd34fa858c9eade31f165427a3977c95e3ee749"}, - {file = "llvmlite-0.43.0-cp311-cp311-win_amd64.whl", hash = "sha256:d5bd550001d26450bd90777736c69d68c487d17bf371438f975229b2b8241a91"}, - {file = "llvmlite-0.43.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f99b600aa7f65235a5a05d0b9a9f31150c390f31261f2a0ba678e26823ec38f7"}, - {file = "llvmlite-0.43.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:35d80d61d0cda2d767f72de99450766250560399edc309da16937b93d3b676e7"}, - {file = "llvmlite-0.43.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eccce86bba940bae0d8d48ed925f21dbb813519169246e2ab292b5092aba121f"}, - {file = "llvmlite-0.43.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df6509e1507ca0760787a199d19439cc887bfd82226f5af746d6977bd9f66844"}, - {file = "llvmlite-0.43.0-cp312-cp312-win_amd64.whl", hash = "sha256:7a2872ee80dcf6b5dbdc838763d26554c2a18aa833d31a2635bff16aafefb9c9"}, - {file = "llvmlite-0.43.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9cd2a7376f7b3367019b664c21f0c61766219faa3b03731113ead75107f3b66c"}, - {file = "llvmlite-0.43.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18e9953c748b105668487b7c81a3e97b046d8abf95c4ddc0cd3c94f4e4651ae8"}, - {file = "llvmlite-0.43.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74937acd22dc11b33946b67dca7680e6d103d6e90eeaaaf932603bec6fe7b03a"}, - {file = "llvmlite-0.43.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc9efc739cc6ed760f795806f67889923f7274276f0eb45092a1473e40d9b867"}, - {file = "llvmlite-0.43.0-cp39-cp39-win_amd64.whl", hash = "sha256:47e147cdda9037f94b399bf03bfd8a6b6b1f2f90be94a454e3386f006455a9b4"}, - {file = "llvmlite-0.43.0.tar.gz", hash = "sha256:ae2b5b5c3ef67354824fb75517c8db5fbe93bc02cd9671f3c62271626bc041d5"}, + {file = "llvmlite-0.45.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:1b1af0c910af0978aa55fa4f60bbb3e9f39b41e97c2a6d94d199897be62ba07a"}, + {file = "llvmlite-0.45.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02a164db2d79088bbd6e0d9633b4fe4021d6379d7e4ac7cc85ed5f44b06a30c5"}, + {file = "llvmlite-0.45.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f2d47f34e4029e6df3395de34cc1c66440a8d72712993a6e6168db228686711b"}, + {file = "llvmlite-0.45.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f7319e5f9f90720578a7f56fbc805bdfb4bc071b507c7611f170d631c3c0f1e0"}, + {file = "llvmlite-0.45.1-cp310-cp310-win_amd64.whl", hash = "sha256:4edb62e685867799e336723cb9787ec6598d51d0b1ed9af0f38e692aa757e898"}, + {file = "llvmlite-0.45.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:60f92868d5d3af30b4239b50e1717cb4e4e54f6ac1c361a27903b318d0f07f42"}, + {file = "llvmlite-0.45.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:98baab513e19beb210f1ef39066288784839a44cd504e24fff5d17f1b3cf0860"}, + {file = "llvmlite-0.45.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3adc2355694d6a6fbcc024d59bb756677e7de506037c878022d7b877e7613a36"}, + {file = "llvmlite-0.45.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2f3377a6db40f563058c9515dedcc8a3e562d8693a106a28f2ddccf2c8fcf6ca"}, + {file = "llvmlite-0.45.1-cp311-cp311-win_amd64.whl", hash = "sha256:f9c272682d91e0d57f2a76c6d9ebdfccc603a01828cdbe3d15273bdca0c3363a"}, + {file = "llvmlite-0.45.1-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:28e763aba92fe9c72296911e040231d486447c01d4f90027c8e893d89d49b20e"}, + {file = "llvmlite-0.45.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1a53f4b74ee9fd30cb3d27d904dadece67a7575198bd80e687ee76474620735f"}, + {file = "llvmlite-0.45.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5b3796b1b1e1c14dcae34285d2f4ea488402fbd2c400ccf7137603ca3800864f"}, + {file = "llvmlite-0.45.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:779e2f2ceefef0f4368548685f0b4adde34e5f4b457e90391f570a10b348d433"}, + {file = "llvmlite-0.45.1-cp312-cp312-win_amd64.whl", hash = "sha256:9e6c9949baf25d9aa9cd7cf0f6d011b9ca660dd17f5ba2b23bdbdb77cc86b116"}, + {file = "llvmlite-0.45.1-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:d9ea9e6f17569a4253515cc01dade70aba536476e3d750b2e18d81d7e670eb15"}, + {file = "llvmlite-0.45.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:c9f3cadee1630ce4ac18ea38adebf2a4f57a89bd2740ce83746876797f6e0bfb"}, + {file = "llvmlite-0.45.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:57c48bf2e1083eedbc9406fb83c4e6483017879714916fe8be8a72a9672c995a"}, + {file = "llvmlite-0.45.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3aa3dfceda4219ae39cf18806c60eeb518c1680ff834b8b311bd784160b9ce40"}, + {file = "llvmlite-0.45.1-cp313-cp313-win_amd64.whl", hash = "sha256:080e6f8d0778a8239cd47686d402cb66eb165e421efa9391366a9b7e5810a38b"}, + {file = "llvmlite-0.45.1.tar.gz", hash = "sha256:09430bb9d0bb58fc45a45a57c7eae912850bedc095cd0810a57de109c69e1c32"}, ] [[package]] name = "lxml" -version = "5.3.0" +version = "6.0.2" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." optional = true -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd36439be765e2dde7660212b5275641edbc813e7b24668831a5c8ac91180656"}, - {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ae5fe5c4b525aa82b8076c1a59d642c17b6e8739ecf852522c6321852178119d"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:501d0d7e26b4d261fca8132854d845e4988097611ba2531408ec91cf3fd9d20a"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb66442c2546446944437df74379e9cf9e9db353e61301d1a0e26482f43f0dd8"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e41506fec7a7f9405b14aa2d5c8abbb4dbbd09d88f9496958b6d00cb4d45330"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f7d4a670107d75dfe5ad080bed6c341d18c4442f9378c9f58e5851e86eb79965"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41ce1f1e2c7755abfc7e759dc34d7d05fd221723ff822947132dc934d122fe22"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:44264ecae91b30e5633013fb66f6ddd05c006d3e0e884f75ce0b4755b3e3847b"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:3c174dc350d3ec52deb77f2faf05c439331d6ed5e702fc247ccb4e6b62d884b7"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:2dfab5fa6a28a0b60a20638dc48e6343c02ea9933e3279ccb132f555a62323d8"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b1c8c20847b9f34e98080da785bb2336ea982e7f913eed5809e5a3c872900f32"}, - {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2c86bf781b12ba417f64f3422cfc302523ac9cd1d8ae8c0f92a1c66e56ef2e86"}, - {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c162b216070f280fa7da844531169be0baf9ccb17263cf5a8bf876fcd3117fa5"}, - {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:36aef61a1678cb778097b4a6eeae96a69875d51d1e8f4d4b491ab3cfb54b5a03"}, - {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f65e5120863c2b266dbcc927b306c5b78e502c71edf3295dfcb9501ec96e5fc7"}, - {file = "lxml-5.3.0-cp310-cp310-win32.whl", hash = "sha256:ef0c1fe22171dd7c7c27147f2e9c3e86f8bdf473fed75f16b0c2e84a5030ce80"}, - {file = "lxml-5.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:052d99051e77a4f3e8482c65014cf6372e61b0a6f4fe9edb98503bb5364cfee3"}, - {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:74bcb423462233bc5d6066e4e98b0264e7c1bed7541fff2f4e34fe6b21563c8b"}, - {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a3d819eb6f9b8677f57f9664265d0a10dd6551d227afb4af2b9cd7bdc2ccbf18"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b8f5db71b28b8c404956ddf79575ea77aa8b1538e8b2ef9ec877945b3f46442"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3406b63232fc7e9b8783ab0b765d7c59e7c59ff96759d8ef9632fca27c7ee4"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ecdd78ab768f844c7a1d4a03595038c166b609f6395e25af9b0f3f26ae1230f"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:168f2dfcfdedf611eb285efac1516c8454c8c99caf271dccda8943576b67552e"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa617107a410245b8660028a7483b68e7914304a6d4882b5ff3d2d3eb5948d8c"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:69959bd3167b993e6e710b99051265654133a98f20cec1d9b493b931942e9c16"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:bd96517ef76c8654446fc3db9242d019a1bb5fe8b751ba414765d59f99210b79"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:ab6dd83b970dc97c2d10bc71aa925b84788c7c05de30241b9e96f9b6d9ea3080"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eec1bb8cdbba2925bedc887bc0609a80e599c75b12d87ae42ac23fd199445654"}, - {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6a7095eeec6f89111d03dabfe5883a1fd54da319c94e0fb104ee8f23616b572d"}, - {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6f651ebd0b21ec65dfca93aa629610a0dbc13dbc13554f19b0113da2e61a4763"}, - {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f422a209d2455c56849442ae42f25dbaaba1c6c3f501d58761c619c7836642ec"}, - {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:62f7fdb0d1ed2065451f086519865b4c90aa19aed51081979ecd05a21eb4d1be"}, - {file = "lxml-5.3.0-cp311-cp311-win32.whl", hash = "sha256:c6379f35350b655fd817cd0d6cbeef7f265f3ae5fedb1caae2eb442bbeae9ab9"}, - {file = "lxml-5.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c52100e2c2dbb0649b90467935c4b0de5528833c76a35ea1a2691ec9f1ee7a1"}, - {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e99f5507401436fdcc85036a2e7dc2e28d962550afe1cbfc07c40e454256a859"}, - {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:384aacddf2e5813a36495233b64cb96b1949da72bef933918ba5c84e06af8f0e"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:874a216bf6afaf97c263b56371434e47e2c652d215788396f60477540298218f"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65ab5685d56914b9a2a34d67dd5488b83213d680b0c5d10b47f81da5a16b0b0e"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aac0bbd3e8dd2d9c45ceb82249e8bdd3ac99131a32b4d35c8af3cc9db1657179"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b369d3db3c22ed14c75ccd5af429086f166a19627e84a8fdade3f8f31426e52a"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24037349665434f375645fa9d1f5304800cec574d0310f618490c871fd902b3"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:62d172f358f33a26d6b41b28c170c63886742f5b6772a42b59b4f0fa10526cb1"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:c1f794c02903c2824fccce5b20c339a1a14b114e83b306ff11b597c5f71a1c8d"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:5d6a6972b93c426ace71e0be9a6f4b2cfae9b1baed2eed2006076a746692288c"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:3879cc6ce938ff4eb4900d901ed63555c778731a96365e53fadb36437a131a99"}, - {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:74068c601baff6ff021c70f0935b0c7bc528baa8ea210c202e03757c68c5a4ff"}, - {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ecd4ad8453ac17bc7ba3868371bffb46f628161ad0eefbd0a855d2c8c32dd81a"}, - {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7e2f58095acc211eb9d8b5771bf04df9ff37d6b87618d1cbf85f92399c98dae8"}, - {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e63601ad5cd8f860aa99d109889b5ac34de571c7ee902d6812d5d9ddcc77fa7d"}, - {file = "lxml-5.3.0-cp312-cp312-win32.whl", hash = "sha256:17e8d968d04a37c50ad9c456a286b525d78c4a1c15dd53aa46c1d8e06bf6fa30"}, - {file = "lxml-5.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:c1a69e58a6bb2de65902051d57fde951febad631a20a64572677a1052690482f"}, - {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c72e9563347c7395910de6a3100a4840a75a6f60e05af5e58566868d5eb2d6a"}, - {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e92ce66cd919d18d14b3856906a61d3f6b6a8500e0794142338da644260595cd"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d04f064bebdfef9240478f7a779e8c5dc32b8b7b0b2fc6a62e39b928d428e51"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c2fb570d7823c2bbaf8b419ba6e5662137f8166e364a8b2b91051a1fb40ab8b"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c120f43553ec759f8de1fee2f4794452b0946773299d44c36bfe18e83caf002"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:562e7494778a69086f0312ec9689f6b6ac1c6b65670ed7d0267e49f57ffa08c4"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:423b121f7e6fa514ba0c7918e56955a1d4470ed35faa03e3d9f0e3baa4c7e492"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c00f323cc00576df6165cc9d21a4c21285fa6b9989c5c39830c3903dc4303ef3"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:1fdc9fae8dd4c763e8a31e7630afef517eab9f5d5d31a278df087f307bf601f4"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:658f2aa69d31e09699705949b5fc4719cbecbd4a97f9656a232e7d6c7be1a367"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1473427aff3d66a3fa2199004c3e601e6c4500ab86696edffdbc84954c72d832"}, - {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a87de7dd873bf9a792bf1e58b1c3887b9264036629a5bf2d2e6579fe8e73edff"}, - {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0d7b36afa46c97875303a94e8f3ad932bf78bace9e18e603f2085b652422edcd"}, - {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cf120cce539453ae086eacc0130a324e7026113510efa83ab42ef3fcfccac7fb"}, - {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df5c7333167b9674aa8ae1d4008fa4bc17a313cc490b2cca27838bbdcc6bb15b"}, - {file = "lxml-5.3.0-cp313-cp313-win32.whl", hash = "sha256:c802e1c2ed9f0c06a65bc4ed0189d000ada8049312cfeab6ca635e39c9608957"}, - {file = "lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d"}, - {file = "lxml-5.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:8f0de2d390af441fe8b2c12626d103540b5d850d585b18fcada58d972b74a74e"}, - {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1afe0a8c353746e610bd9031a630a95bcfb1a720684c3f2b36c4710a0a96528f"}, - {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56b9861a71575f5795bde89256e7467ece3d339c9b43141dbdd54544566b3b94"}, - {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:9fb81d2824dff4f2e297a276297e9031f46d2682cafc484f49de182aa5e5df99"}, - {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2c226a06ecb8cdef28845ae976da407917542c5e6e75dcac7cc33eb04aaeb237"}, - {file = "lxml-5.3.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:7d3d1ca42870cdb6d0d29939630dbe48fa511c203724820fc0fd507b2fb46577"}, - {file = "lxml-5.3.0-cp36-cp36m-win32.whl", hash = "sha256:094cb601ba9f55296774c2d57ad68730daa0b13dc260e1f941b4d13678239e70"}, - {file = "lxml-5.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:eafa2c8658f4e560b098fe9fc54539f86528651f61849b22111a9b107d18910c"}, - {file = "lxml-5.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cb83f8a875b3d9b458cada4f880fa498646874ba4011dc974e071a0a84a1b033"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25f1b69d41656b05885aa185f5fdf822cb01a586d1b32739633679699f220391"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23e0553b8055600b3bf4a00b255ec5c92e1e4aebf8c2c09334f8368e8bd174d6"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ada35dd21dc6c039259596b358caab6b13f4db4d4a7f8665764d616daf9cc1d"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:81b4e48da4c69313192d8c8d4311e5d818b8be1afe68ee20f6385d0e96fc9512"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:2bc9fd5ca4729af796f9f59cd8ff160fe06a474da40aca03fcc79655ddee1a8b"}, - {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07da23d7ee08577760f0a71d67a861019103e4812c87e2fab26b039054594cc5"}, - {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:ea2e2f6f801696ad7de8aec061044d6c8c0dd4037608c7cab38a9a4d316bfb11"}, - {file = "lxml-5.3.0-cp37-cp37m-win32.whl", hash = "sha256:5c54afdcbb0182d06836cc3d1be921e540be3ebdf8b8a51ee3ef987537455f84"}, - {file = "lxml-5.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f2901429da1e645ce548bf9171784c0f74f0718c3f6150ce166be39e4dd66c3e"}, - {file = "lxml-5.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c56a1d43b2f9ee4786e4658c7903f05da35b923fb53c11025712562d5cc02753"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ee8c39582d2652dcd516d1b879451500f8db3fe3607ce45d7c5957ab2596040"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdf3a3059611f7585a78ee10399a15566356116a4288380921a4b598d807a22"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:146173654d79eb1fc97498b4280c1d3e1e5d58c398fa530905c9ea50ea849b22"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0a7056921edbdd7560746f4221dca89bb7a3fe457d3d74267995253f46343f15"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:9e4b47ac0f5e749cfc618efdf4726269441014ae1d5583e047b452a32e221920"}, - {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f914c03e6a31deb632e2daa881fe198461f4d06e57ac3d0e05bbcab8eae01945"}, - {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:213261f168c5e1d9b7535a67e68b1f59f92398dd17a56d934550837143f79c42"}, - {file = "lxml-5.3.0-cp38-cp38-win32.whl", hash = "sha256:218c1b2e17a710e363855594230f44060e2025b05c80d1f0661258142b2add2e"}, - {file = "lxml-5.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:315f9542011b2c4e1d280e4a20ddcca1761993dda3afc7a73b01235f8641e903"}, - {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1ffc23010330c2ab67fac02781df60998ca8fe759e8efde6f8b756a20599c5de"}, - {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2b3778cb38212f52fac9fe913017deea2fdf4eb1a4f8e4cfc6b009a13a6d3fcc"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b0c7a688944891086ba192e21c5229dea54382f4836a209ff8d0a660fac06be"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:747a3d3e98e24597981ca0be0fd922aebd471fa99d0043a3842d00cdcad7ad6a"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86a6b24b19eaebc448dc56b87c4865527855145d851f9fc3891673ff97950540"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b11a5d918a6216e521c715b02749240fb07ae5a1fefd4b7bf12f833bc8b4fe70"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68b87753c784d6acb8a25b05cb526c3406913c9d988d51f80adecc2b0775d6aa"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:109fa6fede314cc50eed29e6e56c540075e63d922455346f11e4d7a036d2b8cf"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:02ced472497b8362c8e902ade23e3300479f4f43e45f4105c85ef43b8db85229"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:6b038cc86b285e4f9fea2ba5ee76e89f21ed1ea898e287dc277a25884f3a7dfe"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:7437237c6a66b7ca341e868cda48be24b8701862757426852c9b3186de1da8a2"}, - {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7f41026c1d64043a36fda21d64c5026762d53a77043e73e94b71f0521939cc71"}, - {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:482c2f67761868f0108b1743098640fbb2a28a8e15bf3f47ada9fa59d9fe08c3"}, - {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:1483fd3358963cc5c1c9b122c80606a3a79ee0875bcac0204149fa09d6ff2727"}, - {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2dec2d1130a9cda5b904696cec33b2cfb451304ba9081eeda7f90f724097300a"}, - {file = "lxml-5.3.0-cp39-cp39-win32.whl", hash = "sha256:a0eabd0a81625049c5df745209dc7fcef6e2aea7793e5f003ba363610aa0a3ff"}, - {file = "lxml-5.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:89e043f1d9d341c52bf2af6d02e6adde62e0a46e6755d5eb60dc6e4f0b8aeca2"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7b1cd427cb0d5f7393c31b7496419da594fe600e6fdc4b105a54f82405e6626c"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51806cfe0279e06ed8500ce19479d757db42a30fd509940b1701be9c86a5ff9a"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee70d08fd60c9565ba8190f41a46a54096afa0eeb8f76bd66f2c25d3b1b83005"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:8dc2c0395bea8254d8daebc76dcf8eb3a95ec2a46fa6fae5eaccee366bfe02ce"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6ba0d3dcac281aad8a0e5b14c7ed6f9fa89c8612b47939fc94f80b16e2e9bc83"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:6e91cf736959057f7aac7adfc83481e03615a8e8dd5758aa1d95ea69e8931dba"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:94d6c3782907b5e40e21cadf94b13b0842ac421192f26b84c45f13f3c9d5dc27"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c300306673aa0f3ed5ed9372b21867690a17dba38c68c44b287437c362ce486b"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78d9b952e07aed35fe2e1a7ad26e929595412db48535921c5013edc8aa4a35ce"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:01220dca0d066d1349bd6a1726856a78f7929f3878f7e2ee83c296c69495309e"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2d9b8d9177afaef80c53c0a9e30fa252ff3036fb1c6494d427c066a4ce6a282f"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:20094fc3f21ea0a8669dc4c61ed7fa8263bd37d97d93b90f28fc613371e7a875"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ace2c2326a319a0bb8a8b0e5b570c764962e95818de9f259ce814ee666603f19"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92e67a0be1639c251d21e35fe74df6bcc40cba445c2cda7c4a967656733249e2"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd5350b55f9fecddc51385463a4f67a5da829bc741e38cf689f38ec9023f54ab"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c1fefd7e3d00921c44dc9ca80a775af49698bbfd92ea84498e56acffd4c5469"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:71a8dd38fbd2f2319136d4ae855a7078c69c9a38ae06e0c17c73fd70fc6caad8"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:97acf1e1fd66ab53dacd2c35b319d7e548380c2e9e8c54525c6e76d21b1ae3b1"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:68934b242c51eb02907c5b81d138cb977b2129a0a75a8f8b60b01cb8586c7b21"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b710bc2b8292966b23a6a0121f7a6c51d45d2347edcc75f016ac123b8054d3f2"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18feb4b93302091b1541221196a2155aa296c363fd233814fa11e181adebc52f"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3eb44520c4724c2e1a57c0af33a379eee41792595023f367ba3952a2d96c2aab"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:609251a0ca4770e5a8768ff902aa02bf636339c5a93f9349b48eb1f606f7f3e9"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:516f491c834eb320d6c843156440fe7fc0d50b33e44387fcec5b02f0bc118a4c"}, - {file = "lxml-5.3.0.tar.gz", hash = "sha256:4e109ca30d1edec1ac60cdbe341905dc3b8f55b16855e03a54aaf59e51ec8c6f"}, + {file = "lxml-6.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e77dd455b9a16bbd2a5036a63ddbd479c19572af81b624e79ef422f929eef388"}, + {file = "lxml-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d444858b9f07cefff6455b983aea9a67f7462ba1f6cbe4a21e8bf6791bf2153"}, + {file = "lxml-6.0.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f952dacaa552f3bb8834908dddd500ba7d508e6ea6eb8c52eb2d28f48ca06a31"}, + {file = "lxml-6.0.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:71695772df6acea9f3c0e59e44ba8ac50c4f125217e84aab21074a1a55e7e5c9"}, + {file = "lxml-6.0.2-cp310-cp310-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:17f68764f35fd78d7c4cc4ef209a184c38b65440378013d24b8aecd327c3e0c8"}, + {file = "lxml-6.0.2-cp310-cp310-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:058027e261afed589eddcfe530fcc6f3402d7fd7e89bfd0532df82ebc1563dba"}, + {file = "lxml-6.0.2-cp310-cp310-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8ffaeec5dfea5881d4c9d8913a32d10cfe3923495386106e4a24d45300ef79c"}, + {file = "lxml-6.0.2-cp310-cp310-manylinux_2_31_armv7l.whl", hash = "sha256:f2e3b1a6bb38de0bc713edd4d612969dd250ca8b724be8d460001a387507021c"}, + {file = "lxml-6.0.2-cp310-cp310-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d6690ec5ec1cce0385cb20896b16be35247ac8c2046e493d03232f1c2414d321"}, + {file = "lxml-6.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f2a50c3c1d11cad0ebebbac357a97b26aa79d2bcaf46f256551152aa85d3a4d1"}, + {file = "lxml-6.0.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:3efe1b21c7801ffa29a1112fab3b0f643628c30472d507f39544fd48e9549e34"}, + {file = "lxml-6.0.2-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:59c45e125140b2c4b33920d21d83681940ca29f0b83f8629ea1a2196dc8cfe6a"}, + {file = "lxml-6.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:452b899faa64f1805943ec1c0c9ebeaece01a1af83e130b69cdefeda180bb42c"}, + {file = "lxml-6.0.2-cp310-cp310-win32.whl", hash = "sha256:1e786a464c191ca43b133906c6903a7e4d56bef376b75d97ccbb8ec5cf1f0a4b"}, + {file = "lxml-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:dacf3c64ef3f7440e3167aa4b49aa9e0fb99e0aa4f9ff03795640bf94531bcb0"}, + {file = "lxml-6.0.2-cp310-cp310-win_arm64.whl", hash = "sha256:45f93e6f75123f88d7f0cfd90f2d05f441b808562bf0bc01070a00f53f5028b5"}, + {file = "lxml-6.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:13e35cbc684aadf05d8711a5d1b5857c92e5e580efa9a0d2be197199c8def607"}, + {file = "lxml-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b1675e096e17c6fe9c0e8c81434f5736c0739ff9ac6123c87c2d452f48fc938"}, + {file = "lxml-6.0.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8ac6e5811ae2870953390452e3476694196f98d447573234592d30488147404d"}, + {file = "lxml-6.0.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5aa0fc67ae19d7a64c3fe725dc9a1bb11f80e01f78289d05c6f62545affec438"}, + {file = "lxml-6.0.2-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de496365750cc472b4e7902a485d3f152ecf57bd3ba03ddd5578ed8ceb4c5964"}, + {file = "lxml-6.0.2-cp311-cp311-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:200069a593c5e40b8f6fc0d84d86d970ba43138c3e68619ffa234bc9bb806a4d"}, + {file = "lxml-6.0.2-cp311-cp311-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7d2de809c2ee3b888b59f995625385f74629707c9355e0ff856445cdcae682b7"}, + {file = "lxml-6.0.2-cp311-cp311-manylinux_2_31_armv7l.whl", hash = "sha256:b2c3da8d93cf5db60e8858c17684c47d01fee6405e554fb55018dd85fc23b178"}, + {file = "lxml-6.0.2-cp311-cp311-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:442de7530296ef5e188373a1ea5789a46ce90c4847e597856570439621d9c553"}, + {file = "lxml-6.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2593c77efde7bfea7f6389f1ab249b15ed4aa5bc5cb5131faa3b843c429fbedb"}, + {file = "lxml-6.0.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:3e3cb08855967a20f553ff32d147e14329b3ae70ced6edc2f282b94afbc74b2a"}, + {file = "lxml-6.0.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2ed6c667fcbb8c19c6791bbf40b7268ef8ddf5a96940ba9404b9f9a304832f6c"}, + {file = "lxml-6.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b8f18914faec94132e5b91e69d76a5c1d7b0c73e2489ea8929c4aaa10b76bbf7"}, + {file = "lxml-6.0.2-cp311-cp311-win32.whl", hash = "sha256:6605c604e6daa9e0d7f0a2137bdc47a2e93b59c60a65466353e37f8272f47c46"}, + {file = "lxml-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e5867f2651016a3afd8dd2c8238baa66f1e2802f44bc17e236f547ace6647078"}, + {file = "lxml-6.0.2-cp311-cp311-win_arm64.whl", hash = "sha256:4197fb2534ee05fd3e7afaab5d8bfd6c2e186f65ea7f9cd6a82809c887bd1285"}, + {file = "lxml-6.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:a59f5448ba2ceccd06995c95ea59a7674a10de0810f2ce90c9006f3cbc044456"}, + {file = "lxml-6.0.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e8113639f3296706fbac34a30813929e29247718e88173ad849f57ca59754924"}, + {file = "lxml-6.0.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a8bef9b9825fa8bc816a6e641bb67219489229ebc648be422af695f6e7a4fa7f"}, + {file = "lxml-6.0.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:65ea18d710fd14e0186c2f973dc60bb52039a275f82d3c44a0e42b43440ea534"}, + {file = "lxml-6.0.2-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c371aa98126a0d4c739ca93ceffa0fd7a5d732e3ac66a46e74339acd4d334564"}, + {file = "lxml-6.0.2-cp312-cp312-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:700efd30c0fa1a3581d80a748157397559396090a51d306ea59a70020223d16f"}, + {file = "lxml-6.0.2-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c33e66d44fe60e72397b487ee92e01da0d09ba2d66df8eae42d77b6d06e5eba0"}, + {file = "lxml-6.0.2-cp312-cp312-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:90a345bbeaf9d0587a3aaffb7006aa39ccb6ff0e96a57286c0cb2fd1520ea192"}, + {file = "lxml-6.0.2-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:064fdadaf7a21af3ed1dcaa106b854077fbeada827c18f72aec9346847cd65d0"}, + {file = "lxml-6.0.2-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fbc74f42c3525ac4ffa4b89cbdd00057b6196bcefe8bce794abd42d33a018092"}, + {file = "lxml-6.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6ddff43f702905a4e32bc24f3f2e2edfe0f8fde3277d481bffb709a4cced7a1f"}, + {file = "lxml-6.0.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6da5185951d72e6f5352166e3da7b0dc27aa70bd1090b0eb3f7f7212b53f1bb8"}, + {file = "lxml-6.0.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:57a86e1ebb4020a38d295c04fc79603c7899e0df71588043eb218722dabc087f"}, + {file = "lxml-6.0.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:2047d8234fe735ab77802ce5f2297e410ff40f5238aec569ad7c8e163d7b19a6"}, + {file = "lxml-6.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6f91fd2b2ea15a6800c8e24418c0775a1694eefc011392da73bc6cef2623b322"}, + {file = "lxml-6.0.2-cp312-cp312-win32.whl", hash = "sha256:3ae2ce7d6fedfb3414a2b6c5e20b249c4c607f72cb8d2bb7cc9c6ec7c6f4e849"}, + {file = "lxml-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:72c87e5ee4e58a8354fb9c7c84cbf95a1c8236c127a5d1b7683f04bed8361e1f"}, + {file = "lxml-6.0.2-cp312-cp312-win_arm64.whl", hash = "sha256:61cb10eeb95570153e0c0e554f58df92ecf5109f75eacad4a95baa709e26c3d6"}, + {file = "lxml-6.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:9b33d21594afab46f37ae58dfadd06636f154923c4e8a4d754b0127554eb2e77"}, + {file = "lxml-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c8963287d7a4c5c9a432ff487c52e9c5618667179c18a204bdedb27310f022f"}, + {file = "lxml-6.0.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1941354d92699fb5ffe6ed7b32f9649e43c2feb4b97205f75866f7d21aa91452"}, + {file = "lxml-6.0.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bb2f6ca0ae2d983ded09357b84af659c954722bbf04dea98030064996d156048"}, + {file = "lxml-6.0.2-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eb2a12d704f180a902d7fa778c6d71f36ceb7b0d317f34cdc76a5d05aa1dd1df"}, + {file = "lxml-6.0.2-cp313-cp313-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:6ec0e3f745021bfed19c456647f0298d60a24c9ff86d9d051f52b509663feeb1"}, + {file = "lxml-6.0.2-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:846ae9a12d54e368933b9759052d6206a9e8b250291109c48e350c1f1f49d916"}, + {file = "lxml-6.0.2-cp313-cp313-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ef9266d2aa545d7374938fb5c484531ef5a2ec7f2d573e62f8ce722c735685fd"}, + {file = "lxml-6.0.2-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:4077b7c79f31755df33b795dc12119cb557a0106bfdab0d2c2d97bd3cf3dffa6"}, + {file = "lxml-6.0.2-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a7c5d5e5f1081955358533be077166ee97ed2571d6a66bdba6ec2f609a715d1a"}, + {file = "lxml-6.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:8f8d0cbd0674ee89863a523e6994ac25fd5be9c8486acfc3e5ccea679bad2679"}, + {file = "lxml-6.0.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:2cbcbf6d6e924c28f04a43f3b6f6e272312a090f269eff68a2982e13e5d57659"}, + {file = "lxml-6.0.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dfb874cfa53340009af6bdd7e54ebc0d21012a60a4e65d927c2e477112e63484"}, + {file = "lxml-6.0.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:fb8dae0b6b8b7f9e96c26fdd8121522ce5de9bb5538010870bd538683d30e9a2"}, + {file = "lxml-6.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:358d9adae670b63e95bc59747c72f4dc97c9ec58881d4627fe0120da0f90d314"}, + {file = "lxml-6.0.2-cp313-cp313-win32.whl", hash = "sha256:e8cd2415f372e7e5a789d743d133ae474290a90b9023197fd78f32e2dc6873e2"}, + {file = "lxml-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:b30d46379644fbfc3ab81f8f82ae4de55179414651f110a1514f0b1f8f6cb2d7"}, + {file = "lxml-6.0.2-cp313-cp313-win_arm64.whl", hash = "sha256:13dcecc9946dca97b11b7c40d29fba63b55ab4170d3c0cf8c0c164343b9bfdcf"}, + {file = "lxml-6.0.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:b0c732aa23de8f8aec23f4b580d1e52905ef468afb4abeafd3fec77042abb6fe"}, + {file = "lxml-6.0.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:4468e3b83e10e0317a89a33d28f7aeba1caa4d1a6fd457d115dd4ffe90c5931d"}, + {file = "lxml-6.0.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:abd44571493973bad4598a3be7e1d807ed45aa2adaf7ab92ab7c62609569b17d"}, + {file = "lxml-6.0.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:370cd78d5855cfbffd57c422851f7d3864e6ae72d0da615fca4dad8c45d375a5"}, + {file = "lxml-6.0.2-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:901e3b4219fa04ef766885fb40fa516a71662a4c61b80c94d25336b4934b71c0"}, + {file = "lxml-6.0.2-cp314-cp314-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:a4bf42d2e4cf52c28cc1812d62426b9503cdb0c87a6de81442626aa7d69707ba"}, + {file = "lxml-6.0.2-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b2c7fdaa4d7c3d886a42534adec7cfac73860b89b4e5298752f60aa5984641a0"}, + {file = "lxml-6.0.2-cp314-cp314-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:98a5e1660dc7de2200b00d53fa00bcd3c35a3608c305d45a7bbcaf29fa16e83d"}, + {file = "lxml-6.0.2-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:dc051506c30b609238d79eda75ee9cab3e520570ec8219844a72a46020901e37"}, + {file = "lxml-6.0.2-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8799481bbdd212470d17513a54d568f44416db01250f49449647b5ab5b5dccb9"}, + {file = "lxml-6.0.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:9261bb77c2dab42f3ecd9103951aeca2c40277701eb7e912c545c1b16e0e4917"}, + {file = "lxml-6.0.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:65ac4a01aba353cfa6d5725b95d7aed6356ddc0a3cd734de00124d285b04b64f"}, + {file = "lxml-6.0.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:b22a07cbb82fea98f8a2fd814f3d1811ff9ed76d0fc6abc84eb21527596e7cc8"}, + {file = "lxml-6.0.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:d759cdd7f3e055d6bc8d9bec3ad905227b2e4c785dc16c372eb5b5e83123f48a"}, + {file = "lxml-6.0.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:945da35a48d193d27c188037a05fec5492937f66fb1958c24fc761fb9d40d43c"}, + {file = "lxml-6.0.2-cp314-cp314-win32.whl", hash = "sha256:be3aaa60da67e6153eb15715cc2e19091af5dc75faef8b8a585aea372507384b"}, + {file = "lxml-6.0.2-cp314-cp314-win_amd64.whl", hash = "sha256:fa25afbadead523f7001caf0c2382afd272c315a033a7b06336da2637d92d6ed"}, + {file = "lxml-6.0.2-cp314-cp314-win_arm64.whl", hash = "sha256:063eccf89df5b24e361b123e257e437f9e9878f425ee9aae3144c77faf6da6d8"}, + {file = "lxml-6.0.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:6162a86d86893d63084faaf4ff937b3daea233e3682fb4474db07395794fa80d"}, + {file = "lxml-6.0.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:414aaa94e974e23a3e92e7ca5b97d10c0cf37b6481f50911032c69eeb3991bba"}, + {file = "lxml-6.0.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:48461bd21625458dd01e14e2c38dd0aea69addc3c4f960c30d9f59d7f93be601"}, + {file = "lxml-6.0.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:25fcc59afc57d527cfc78a58f40ab4c9b8fd096a9a3f964d2781ffb6eb33f4ed"}, + {file = "lxml-6.0.2-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5179c60288204e6ddde3f774a93350177e08876eaf3ab78aa3a3649d43eb7d37"}, + {file = "lxml-6.0.2-cp314-cp314t-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:967aab75434de148ec80597b75062d8123cadf2943fb4281f385141e18b21338"}, + {file = "lxml-6.0.2-cp314-cp314t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d100fcc8930d697c6561156c6810ab4a508fb264c8b6779e6e61e2ed5e7558f9"}, + {file = "lxml-6.0.2-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ca59e7e13e5981175b8b3e4ab84d7da57993eeff53c07764dcebda0d0e64ecd"}, + {file = "lxml-6.0.2-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:957448ac63a42e2e49531b9d6c0fa449a1970dbc32467aaad46f11545be9af1d"}, + {file = "lxml-6.0.2-cp314-cp314t-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b7fc49c37f1786284b12af63152fe1d0990722497e2d5817acfe7a877522f9a9"}, + {file = "lxml-6.0.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e19e0643cc936a22e837f79d01a550678da8377d7d801a14487c10c34ee49c7e"}, + {file = "lxml-6.0.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:1db01e5cf14345628e0cbe71067204db658e2fb8e51e7f33631f5f4735fefd8d"}, + {file = "lxml-6.0.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:875c6b5ab39ad5291588aed6925fac99d0097af0dd62f33c7b43736043d4a2ec"}, + {file = "lxml-6.0.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:cdcbed9ad19da81c480dfd6dd161886db6096083c9938ead313d94b30aadf272"}, + {file = "lxml-6.0.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:80dadc234ebc532e09be1975ff538d154a7fa61ea5031c03d25178855544728f"}, + {file = "lxml-6.0.2-cp314-cp314t-win32.whl", hash = "sha256:da08e7bb297b04e893d91087df19638dc7a6bb858a954b0cc2b9f5053c922312"}, + {file = "lxml-6.0.2-cp314-cp314t-win_amd64.whl", hash = "sha256:252a22982dca42f6155125ac76d3432e548a7625d56f5a273ee78a5057216eca"}, + {file = "lxml-6.0.2-cp314-cp314t-win_arm64.whl", hash = "sha256:bb4c1847b303835d89d785a18801a883436cdfd5dc3d62947f9c49e24f0f5a2c"}, + {file = "lxml-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a656ca105115f6b766bba324f23a67914d9c728dafec57638e2b92a9dcd76c62"}, + {file = "lxml-6.0.2-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c54d83a2188a10ebdba573f16bd97135d06c9ef60c3dc495315c7a28c80a263f"}, + {file = "lxml-6.0.2-cp38-cp38-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:1ea99340b3c729beea786f78c38f60f4795622f36e305d9c9be402201efdc3b7"}, + {file = "lxml-6.0.2-cp38-cp38-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:af85529ae8d2a453feee4c780d9406a5e3b17cee0dd75c18bd31adcd584debc3"}, + {file = "lxml-6.0.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:fe659f6b5d10fb5a17f00a50eb903eb277a71ee35df4615db573c069bcf967ac"}, + {file = "lxml-6.0.2-cp38-cp38-win32.whl", hash = "sha256:5921d924aa5468c939d95c9814fa9f9b5935a6ff4e679e26aaf2951f74043512"}, + {file = "lxml-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:0aa7070978f893954008ab73bb9e3c24a7c56c054e00566a21b553dc18105fca"}, + {file = "lxml-6.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2c8458c2cdd29589a8367c09c8f030f1d202be673f0ca224ec18590b3b9fb694"}, + {file = "lxml-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3fee0851639d06276e6b387f1c190eb9d7f06f7f53514e966b26bae46481ec90"}, + {file = "lxml-6.0.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b2142a376b40b6736dfc214fd2902409e9e3857eff554fed2d3c60f097e62a62"}, + {file = "lxml-6.0.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a6b5b39cc7e2998f968f05309e666103b53e2edd01df8dc51b90d734c0825444"}, + {file = "lxml-6.0.2-cp39-cp39-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d4aec24d6b72ee457ec665344a29acb2d35937d5192faebe429ea02633151aad"}, + {file = "lxml-6.0.2-cp39-cp39-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:b42f4d86b451c2f9d06ffb4f8bbc776e04df3ba070b9fe2657804b1b40277c48"}, + {file = "lxml-6.0.2-cp39-cp39-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6cdaefac66e8b8f30e37a9b4768a391e1f8a16a7526d5bc77a7928408ef68e93"}, + {file = "lxml-6.0.2-cp39-cp39-manylinux_2_31_armv7l.whl", hash = "sha256:b738f7e648735714bbb82bdfd030203360cfeab7f6e8a34772b3c8c8b820568c"}, + {file = "lxml-6.0.2-cp39-cp39-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:daf42de090d59db025af61ce6bdb2521f0f102ea0e6ea310f13c17610a97da4c"}, + {file = "lxml-6.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:66328dabea70b5ba7e53d94aa774b733cf66686535f3bc9250a7aab53a91caaf"}, + {file = "lxml-6.0.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:e237b807d68a61fc3b1e845407e27e5eb8ef69bc93fe8505337c1acb4ee300b6"}, + {file = "lxml-6.0.2-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:ac02dc29fd397608f8eb15ac1610ae2f2f0154b03f631e6d724d9e2ad4ee2c84"}, + {file = "lxml-6.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:817ef43a0c0b4a77bd166dc9a09a555394105ff3374777ad41f453526e37f9cb"}, + {file = "lxml-6.0.2-cp39-cp39-win32.whl", hash = "sha256:bc532422ff26b304cfb62b328826bd995c96154ffd2bac4544f37dbb95ecaa8f"}, + {file = "lxml-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:995e783eb0374c120f528f807443ad5a83a656a8624c467ea73781fc5f8a8304"}, + {file = "lxml-6.0.2-cp39-cp39-win_arm64.whl", hash = "sha256:08b9d5e803c2e4725ae9e8559ee880e5328ed61aa0935244e0515d7d9dbec0aa"}, + {file = "lxml-6.0.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:e748d4cf8fef2526bb2a589a417eba0c8674e29ffcb570ce2ceca44f1e567bf6"}, + {file = "lxml-6.0.2-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4ddb1049fa0579d0cbd00503ad8c58b9ab34d1254c77bc6a5576d96ec7853dba"}, + {file = "lxml-6.0.2-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cb233f9c95f83707dae461b12b720c1af9c28c2d19208e1be03387222151daf5"}, + {file = "lxml-6.0.2-pp310-pypy310_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bc456d04db0515ce3320d714a1eac7a97774ff0849e7718b492d957da4631dd4"}, + {file = "lxml-6.0.2-pp310-pypy310_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2613e67de13d619fd283d58bda40bff0ee07739f624ffee8b13b631abf33083d"}, + {file = "lxml-6.0.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:24a8e756c982c001ca8d59e87c80c4d9dcd4d9b44a4cbeb8d9be4482c514d41d"}, + {file = "lxml-6.0.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1c06035eafa8404b5cf475bb37a9f6088b0aca288d4ccc9d69389750d5543700"}, + {file = "lxml-6.0.2-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c7d13103045de1bdd6fe5d61802565f1a3537d70cd3abf596aa0af62761921ee"}, + {file = "lxml-6.0.2-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0a3c150a95fbe5ac91de323aa756219ef9cf7fde5a3f00e2281e30f33fa5fa4f"}, + {file = "lxml-6.0.2-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:60fa43be34f78bebb27812ed90f1925ec99560b0fa1decdb7d12b84d857d31e9"}, + {file = "lxml-6.0.2-pp311-pypy311_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:21c73b476d3cfe836be731225ec3421fa2f048d84f6df6a8e70433dff1376d5a"}, + {file = "lxml-6.0.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:27220da5be049e936c3aca06f174e8827ca6445a4353a1995584311487fc4e3e"}, + {file = "lxml-6.0.2.tar.gz", hash = "sha256:cd79f3367bd74b317dda655dc8fcfa304d9eb6e4fb06b7168c5cf27f96e0cd62"}, ] [package.extras] cssselect = ["cssselect (>=0.7)"] -html-clean = ["lxml-html-clean"] +html-clean = ["lxml_html_clean"] html5 = ["html5lib"] htmlsoup = ["BeautifulSoup4"] -source = ["Cython (>=3.0.11)"] [[package]] name = "markupsafe" -version = "3.0.2" +version = "3.0.3" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.9" files = [ - {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, - {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, + {file = "markupsafe-3.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f981d352f04553a7171b8e44369f2af4055f888dfb147d55e42d29e29e74559"}, + {file = "markupsafe-3.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c1493fb6e50ab01d20a22826e57520f1284df32f2d8601fdd90b6304601419"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ba88449deb3de88bd40044603fafffb7bc2b055d626a330323a9ed736661695"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f42d0984e947b8adf7dd6dde396e720934d12c506ce84eea8476409563607591"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c0c0b3ade1c0b13b936d7970b1d37a57acde9199dc2aecc4c336773e1d86049c"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0303439a41979d9e74d18ff5e2dd8c43ed6c6001fd40e5bf2e43f7bd9bbc523f"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:d2ee202e79d8ed691ceebae8e0486bd9a2cd4794cec4824e1c99b6f5009502f6"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:177b5253b2834fe3678cb4a5f0059808258584c559193998be2601324fdeafb1"}, + {file = "markupsafe-3.0.3-cp310-cp310-win32.whl", hash = "sha256:2a15a08b17dd94c53a1da0438822d70ebcd13f8c3a95abe3a9ef9f11a94830aa"}, + {file = "markupsafe-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:c4ffb7ebf07cfe8931028e3e4c85f0357459a3f9f9490886198848f4fa002ec8"}, + {file = "markupsafe-3.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:e2103a929dfa2fcaf9bb4e7c091983a49c9ac3b19c9061b6d5427dd7d14d81a1"}, + {file = "markupsafe-3.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad"}, + {file = "markupsafe-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a"}, + {file = "markupsafe-3.0.3-cp311-cp311-win32.whl", hash = "sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19"}, + {file = "markupsafe-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01"}, + {file = "markupsafe-3.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c"}, + {file = "markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e"}, + {file = "markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b"}, + {file = "markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d"}, + {file = "markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c"}, + {file = "markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f"}, + {file = "markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795"}, + {file = "markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12"}, + {file = "markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed"}, + {file = "markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5"}, + {file = "markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485"}, + {file = "markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73"}, + {file = "markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287"}, + {file = "markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe"}, + {file = "markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe"}, + {file = "markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9"}, + {file = "markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581"}, + {file = "markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4"}, + {file = "markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab"}, + {file = "markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa"}, + {file = "markupsafe-3.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15d939a21d546304880945ca1ecb8a039db6b4dc49b2c5a400387cdae6a62e26"}, + {file = "markupsafe-3.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f71a396b3bf33ecaa1626c255855702aca4d3d9fea5e051b41ac59a9c1c41edc"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f4b68347f8c5eab4a13419215bdfd7f8c9b19f2b25520968adfad23eb0ce60c"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e8fc20152abba6b83724d7ff268c249fa196d8259ff481f3b1476383f8f24e42"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:949b8d66bc381ee8b007cd945914c721d9aba8e27f71959d750a46f7c282b20b"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:3537e01efc9d4dccdf77221fb1cb3b8e1a38d5428920e0657ce299b20324d758"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:591ae9f2a647529ca990bc681daebdd52c8791ff06c2bfa05b65163e28102ef2"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a320721ab5a1aba0a233739394eb907f8c8da5c98c9181d1161e77a0c8e36f2d"}, + {file = "markupsafe-3.0.3-cp39-cp39-win32.whl", hash = "sha256:df2449253ef108a379b8b5d6b43f4b1a8e81a061d6537becd5582fba5f9196d7"}, + {file = "markupsafe-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:7c3fb7d25180895632e5d3148dbdc29ea38ccb7fd210aa27acbd1201a1902c6e"}, + {file = "markupsafe-3.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:38664109c14ffc9e7437e86b4dceb442b0096dfe3541d7864d9cbe1da4cf36c8"}, + {file = "markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698"}, ] [[package]] @@ -1453,92 +1887,85 @@ mutagen = ">=1.46" test = ["tox"] [[package]] -name = "mock" -version = "5.1.0" -description = "Rolling backport of unittest.mock for all Pythons" -optional = false -python-versions = ">=3.6" +name = "more-itertools" +version = "10.8.0" +description = "More routines for operating on iterables, beyond itertools" +optional = true +python-versions = ">=3.9" files = [ - {file = "mock-5.1.0-py3-none-any.whl", hash = "sha256:18c694e5ae8a208cdb3d2c20a993ca1a7b0efa258c247a1e565150f477f83744"}, - {file = "mock-5.1.0.tar.gz", hash = "sha256:5e96aad5ccda4718e0a229ed94b2024df75cc2d55575ba5762d31f5767b8767d"}, + {file = "more_itertools-10.8.0-py3-none-any.whl", hash = "sha256:52d4362373dcf7c52546bc4af9a86ee7c4579df9a8dc268be0a2f949d376cc9b"}, + {file = "more_itertools-10.8.0.tar.gz", hash = "sha256:f638ddf8a1a0d134181275fb5d58b086ead7c6a72429ad725c67503f13ba30bd"}, ] -[package.extras] -build = ["blurb", "twine", "wheel"] -docs = ["sphinx"] -test = ["pytest", "pytest-cov"] - [[package]] name = "msgpack" -version = "1.1.0" +version = "1.1.2" description = "MessagePack serializer" optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd"}, - {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d"}, - {file = "msgpack-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:914571a2a5b4e7606997e169f64ce53a8b1e06f2cf2c3a7273aa106236d43dd5"}, - {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921af52214dcbb75e6bdf6a661b23c3e6417f00c603dd2070bccb5c3ef499f5"}, - {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8ce0b22b890be5d252de90d0e0d119f363012027cf256185fc3d474c44b1b9e"}, - {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73322a6cc57fcee3c0c57c4463d828e9428275fb85a27aa2aa1a92fdc42afd7b"}, - {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e1f3c3d21f7cf67bcf2da8e494d30a75e4cf60041d98b3f79875afb5b96f3a3f"}, - {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64fc9068d701233effd61b19efb1485587560b66fe57b3e50d29c5d78e7fef68"}, - {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:42f754515e0f683f9c79210a5d1cad631ec3d06cea5172214d2176a42e67e19b"}, - {file = "msgpack-1.1.0-cp310-cp310-win32.whl", hash = "sha256:3df7e6b05571b3814361e8464f9304c42d2196808e0119f55d0d3e62cd5ea044"}, - {file = "msgpack-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:685ec345eefc757a7c8af44a3032734a739f8c45d1b0ac45efc5d8977aa4720f"}, - {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d364a55082fb2a7416f6c63ae383fbd903adb5a6cf78c5b96cc6316dc1cedc7"}, - {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79ec007767b9b56860e0372085f8504db5d06bd6a327a335449508bbee9648fa"}, - {file = "msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ad622bf7756d5a497d5b6836e7fc3752e2dd6f4c648e24b1803f6048596f701"}, - {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e59bca908d9ca0de3dc8684f21ebf9a690fe47b6be93236eb40b99af28b6ea6"}, - {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1da8f11a3dd397f0a32c76165cf0c4eb95b31013a94f6ecc0b280c05c91b59"}, - {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452aff037287acb1d70a804ffd022b21fa2bb7c46bee884dbc864cc9024128a0"}, - {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8da4bf6d54ceed70e8861f833f83ce0814a2b72102e890cbdfe4b34764cdd66e"}, - {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:41c991beebf175faf352fb940bf2af9ad1fb77fd25f38d9142053914947cdbf6"}, - {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a52a1f3a5af7ba1c9ace055b659189f6c669cf3657095b50f9602af3a3ba0fe5"}, - {file = "msgpack-1.1.0-cp311-cp311-win32.whl", hash = "sha256:58638690ebd0a06427c5fe1a227bb6b8b9fdc2bd07701bec13c2335c82131a88"}, - {file = "msgpack-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd2906780f25c8ed5d7b323379f6138524ba793428db5d0e9d226d3fa6aa1788"}, - {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d"}, - {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2"}, - {file = "msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420"}, - {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2"}, - {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39"}, - {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f"}, - {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247"}, - {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c"}, - {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b"}, - {file = "msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b"}, - {file = "msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f"}, - {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf"}, - {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330"}, - {file = "msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734"}, - {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e"}, - {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca"}, - {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915"}, - {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d"}, - {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434"}, - {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c"}, - {file = "msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc"}, - {file = "msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f"}, - {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c40ffa9a15d74e05ba1fe2681ea33b9caffd886675412612d93ab17b58ea2fec"}, - {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1ba6136e650898082d9d5a5217d5906d1e138024f836ff48691784bbe1adf96"}, - {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0856a2b7e8dcb874be44fea031d22e5b3a19121be92a1e098f46068a11b0870"}, - {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:471e27a5787a2e3f974ba023f9e265a8c7cfd373632247deb225617e3100a3c7"}, - {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:646afc8102935a388ffc3914b336d22d1c2d6209c773f3eb5dd4d6d3b6f8c1cb"}, - {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:13599f8829cfbe0158f6456374e9eea9f44eee08076291771d8ae93eda56607f"}, - {file = "msgpack-1.1.0-cp38-cp38-win32.whl", hash = "sha256:8a84efb768fb968381e525eeeb3d92857e4985aacc39f3c47ffd00eb4509315b"}, - {file = "msgpack-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:879a7b7b0ad82481c52d3c7eb99bf6f0645dbdec5134a4bddbd16f3506947feb"}, - {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:53258eeb7a80fc46f62fd59c876957a2d0e15e6449a9e71842b6d24419d88ca1"}, - {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e7b853bbc44fb03fbdba34feb4bd414322180135e2cb5164f20ce1c9795ee48"}, - {file = "msgpack-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3e9b4936df53b970513eac1758f3882c88658a220b58dcc1e39606dccaaf01c"}, - {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46c34e99110762a76e3911fc923222472c9d681f1094096ac4102c18319e6468"}, - {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a706d1e74dd3dea05cb54580d9bd8b2880e9264856ce5068027eed09680aa74"}, - {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:534480ee5690ab3cbed89d4c8971a5c631b69a8c0883ecfea96c19118510c846"}, - {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8cf9e8c3a2153934a23ac160cc4cba0ec035f6867c8013cc6077a79823370346"}, - {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3180065ec2abbe13a4ad37688b61b99d7f9e012a535b930e0e683ad6bc30155b"}, - {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5a91481a3cc573ac8c0d9aace09345d989dc4a0202b7fcb312c88c26d4e71a8"}, - {file = "msgpack-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f80bc7d47f76089633763f952e67f8214cb7b3ee6bfa489b3cb6a84cfac114cd"}, - {file = "msgpack-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:4d1b7ff2d6146e16e8bd665ac726a89c74163ef8cd39fa8c1087d4e52d3a2325"}, - {file = "msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e"}, + {file = "msgpack-1.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0051fffef5a37ca2cd16978ae4f0aef92f164df86823871b5162812bebecd8e2"}, + {file = "msgpack-1.1.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a605409040f2da88676e9c9e5853b3449ba8011973616189ea5ee55ddbc5bc87"}, + {file = "msgpack-1.1.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b696e83c9f1532b4af884045ba7f3aa741a63b2bc22617293a2c6a7c645f251"}, + {file = "msgpack-1.1.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:365c0bbe981a27d8932da71af63ef86acc59ed5c01ad929e09a0b88c6294e28a"}, + {file = "msgpack-1.1.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:41d1a5d875680166d3ac5c38573896453bbbea7092936d2e107214daf43b1d4f"}, + {file = "msgpack-1.1.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:354e81bcdebaab427c3df4281187edc765d5d76bfb3a7c125af9da7a27e8458f"}, + {file = "msgpack-1.1.2-cp310-cp310-win32.whl", hash = "sha256:e64c8d2f5e5d5fda7b842f55dec6133260ea8f53c4257d64494c534f306bf7a9"}, + {file = "msgpack-1.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:db6192777d943bdaaafb6ba66d44bf65aa0e9c5616fa1d2da9bb08828c6b39aa"}, + {file = "msgpack-1.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2e86a607e558d22985d856948c12a3fa7b42efad264dca8a3ebbcfa2735d786c"}, + {file = "msgpack-1.1.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:283ae72fc89da59aa004ba147e8fc2f766647b1251500182fac0350d8af299c0"}, + {file = "msgpack-1.1.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:61c8aa3bd513d87c72ed0b37b53dd5c5a0f58f2ff9f26e1555d3bd7948fb7296"}, + {file = "msgpack-1.1.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:454e29e186285d2ebe65be34629fa0e8605202c60fbc7c4c650ccd41870896ef"}, + {file = "msgpack-1.1.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7bc8813f88417599564fafa59fd6f95be417179f76b40325b500b3c98409757c"}, + {file = "msgpack-1.1.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bafca952dc13907bdfdedfc6a5f579bf4f292bdd506fadb38389afa3ac5b208e"}, + {file = "msgpack-1.1.2-cp311-cp311-win32.whl", hash = "sha256:602b6740e95ffc55bfb078172d279de3773d7b7db1f703b2f1323566b878b90e"}, + {file = "msgpack-1.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:d198d275222dc54244bf3327eb8cbe00307d220241d9cec4d306d49a44e85f68"}, + {file = "msgpack-1.1.2-cp311-cp311-win_arm64.whl", hash = "sha256:86f8136dfa5c116365a8a651a7d7484b65b13339731dd6faebb9a0242151c406"}, + {file = "msgpack-1.1.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:70a0dff9d1f8da25179ffcf880e10cf1aad55fdb63cd59c9a49a1b82290062aa"}, + {file = "msgpack-1.1.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:446abdd8b94b55c800ac34b102dffd2f6aa0ce643c55dfc017ad89347db3dbdb"}, + {file = "msgpack-1.1.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c63eea553c69ab05b6747901b97d620bb2a690633c77f23feb0c6a947a8a7b8f"}, + {file = "msgpack-1.1.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:372839311ccf6bdaf39b00b61288e0557916c3729529b301c52c2d88842add42"}, + {file = "msgpack-1.1.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2929af52106ca73fcb28576218476ffbb531a036c2adbcf54a3664de124303e9"}, + {file = "msgpack-1.1.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:be52a8fc79e45b0364210eef5234a7cf8d330836d0a64dfbb878efa903d84620"}, + {file = "msgpack-1.1.2-cp312-cp312-win32.whl", hash = "sha256:1fff3d825d7859ac888b0fbda39a42d59193543920eda9d9bea44d958a878029"}, + {file = "msgpack-1.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:1de460f0403172cff81169a30b9a92b260cb809c4cb7e2fc79ae8d0510c78b6b"}, + {file = "msgpack-1.1.2-cp312-cp312-win_arm64.whl", hash = "sha256:be5980f3ee0e6bd44f3a9e9dea01054f175b50c3e6cdb692bc9424c0bbb8bf69"}, + {file = "msgpack-1.1.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4efd7b5979ccb539c221a4c4e16aac1a533efc97f3b759bb5a5ac9f6d10383bf"}, + {file = "msgpack-1.1.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:42eefe2c3e2af97ed470eec850facbe1b5ad1d6eacdbadc42ec98e7dcf68b4b7"}, + {file = "msgpack-1.1.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1fdf7d83102bf09e7ce3357de96c59b627395352a4024f6e2458501f158bf999"}, + {file = "msgpack-1.1.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fac4be746328f90caa3cd4bc67e6fe36ca2bf61d5c6eb6d895b6527e3f05071e"}, + {file = "msgpack-1.1.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fffee09044073e69f2bad787071aeec727183e7580443dfeb8556cbf1978d162"}, + {file = "msgpack-1.1.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5928604de9b032bc17f5099496417f113c45bc6bc21b5c6920caf34b3c428794"}, + {file = "msgpack-1.1.2-cp313-cp313-win32.whl", hash = "sha256:a7787d353595c7c7e145e2331abf8b7ff1e6673a6b974ded96e6d4ec09f00c8c"}, + {file = "msgpack-1.1.2-cp313-cp313-win_amd64.whl", hash = "sha256:a465f0dceb8e13a487e54c07d04ae3ba131c7c5b95e2612596eafde1dccf64a9"}, + {file = "msgpack-1.1.2-cp313-cp313-win_arm64.whl", hash = "sha256:e69b39f8c0aa5ec24b57737ebee40be647035158f14ed4b40e6f150077e21a84"}, + {file = "msgpack-1.1.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e23ce8d5f7aa6ea6d2a2b326b4ba46c985dbb204523759984430db7114f8aa00"}, + {file = "msgpack-1.1.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:6c15b7d74c939ebe620dd8e559384be806204d73b4f9356320632d783d1f7939"}, + {file = "msgpack-1.1.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99e2cb7b9031568a2a5c73aa077180f93dd2e95b4f8d3b8e14a73ae94a9e667e"}, + {file = "msgpack-1.1.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:180759d89a057eab503cf62eeec0aa61c4ea1200dee709f3a8e9397dbb3b6931"}, + {file = "msgpack-1.1.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:04fb995247a6e83830b62f0b07bf36540c213f6eac8e851166d8d86d83cbd014"}, + {file = "msgpack-1.1.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8e22ab046fa7ede9e36eeb4cfad44d46450f37bb05d5ec482b02868f451c95e2"}, + {file = "msgpack-1.1.2-cp314-cp314-win32.whl", hash = "sha256:80a0ff7d4abf5fecb995fcf235d4064b9a9a8a40a3ab80999e6ac1e30b702717"}, + {file = "msgpack-1.1.2-cp314-cp314-win_amd64.whl", hash = "sha256:9ade919fac6a3e7260b7f64cea89df6bec59104987cbea34d34a2fa15d74310b"}, + {file = "msgpack-1.1.2-cp314-cp314-win_arm64.whl", hash = "sha256:59415c6076b1e30e563eb732e23b994a61c159cec44deaf584e5cc1dd662f2af"}, + {file = "msgpack-1.1.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:897c478140877e5307760b0ea66e0932738879e7aa68144d9b78ea4c8302a84a"}, + {file = "msgpack-1.1.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a668204fa43e6d02f89dbe79a30b0d67238d9ec4c5bd8a940fc3a004a47b721b"}, + {file = "msgpack-1.1.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5559d03930d3aa0f3aacb4c42c776af1a2ace2611871c84a75afe436695e6245"}, + {file = "msgpack-1.1.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:70c5a7a9fea7f036b716191c29047374c10721c389c21e9ffafad04df8c52c90"}, + {file = "msgpack-1.1.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f2cb069d8b981abc72b41aea1c580ce92d57c673ec61af4c500153a626cb9e20"}, + {file = "msgpack-1.1.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d62ce1f483f355f61adb5433ebfd8868c5f078d1a52d042b0a998682b4fa8c27"}, + {file = "msgpack-1.1.2-cp314-cp314t-win32.whl", hash = "sha256:1d1418482b1ee984625d88aa9585db570180c286d942da463533b238b98b812b"}, + {file = "msgpack-1.1.2-cp314-cp314t-win_amd64.whl", hash = "sha256:5a46bf7e831d09470ad92dff02b8b1ac92175ca36b087f904a0519857c6be3ff"}, + {file = "msgpack-1.1.2-cp314-cp314t-win_arm64.whl", hash = "sha256:d99ef64f349d5ec3293688e91486c5fdb925ed03807f64d98d205d2713c60b46"}, + {file = "msgpack-1.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ea5405c46e690122a76531ab97a079e184c0daf491e588592d6a23d3e32af99e"}, + {file = "msgpack-1.1.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9fba231af7a933400238cb357ecccf8ab5d51535ea95d94fc35b7806218ff844"}, + {file = "msgpack-1.1.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a8f6e7d30253714751aa0b0c84ae28948e852ee7fb0524082e6716769124bc23"}, + {file = "msgpack-1.1.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:94fd7dc7d8cb0a54432f296f2246bc39474e017204ca6f4ff345941d4ed285a7"}, + {file = "msgpack-1.1.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:350ad5353a467d9e3b126d8d1b90fe05ad081e2e1cef5753f8c345217c37e7b8"}, + {file = "msgpack-1.1.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6bde749afe671dc44893f8d08e83bf475a1a14570d67c4bb5cec5573463c8833"}, + {file = "msgpack-1.1.2-cp39-cp39-win32.whl", hash = "sha256:ad09b984828d6b7bb52d1d1d0c9be68ad781fa004ca39216c8a1e63c0f34ba3c"}, + {file = "msgpack-1.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:67016ae8c8965124fdede9d3769528ad8284f14d635337ffa6a713a580f6c030"}, + {file = "msgpack-1.1.2.tar.gz", hash = "sha256:3b60763c1373dd60f398488069bcdc703cd08a711477b5d480eecc9f9626f47e"}, ] [[package]] @@ -1557,17 +1984,6 @@ check = ["check-manifest", "flake8", "flake8-black", "isort (>=5.0.3)", "pygment test = ["coverage[toml] (>=5.2)", "coveralls (>=2.1.1)", "hypothesis", "pyannotate", "pytest", "pytest-cov"] type = ["mypy", "mypy-extensions"] -[[package]] -name = "musicbrainzngs" -version = "0.7.1" -description = "Python bindings for the MusicBrainz NGS and the Cover Art Archive webservices" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "musicbrainzngs-0.7.1-py2.py3-none-any.whl", hash = "sha256:e841a8f975104c0a72290b09f59326050194081a5ae62ee512f41915090e1a10"}, - {file = "musicbrainzngs-0.7.1.tar.gz", hash = "sha256:ab1c0100fd0b305852e65f2ed4113c6de12e68afd55186987b8ed97e0f98e627"}, -] - [[package]] name = "mutagen" version = "1.47.0" @@ -1581,49 +1997,56 @@ files = [ [[package]] name = "mypy" -version = "1.13.0" +version = "1.18.2" description = "Optional static typing for Python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, - {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, - {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, - {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, - {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, - {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, - {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, - {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, - {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, - {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, - {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, - {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, - {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, - {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, - {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, - {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, - {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, - {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, - {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, - {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, - {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, - {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, - {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, - {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, - {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, - {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, - {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, - {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, - {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, - {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, - {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, - {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, + {file = "mypy-1.18.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c1eab0cf6294dafe397c261a75f96dc2c31bffe3b944faa24db5def4e2b0f77c"}, + {file = "mypy-1.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7a780ca61fc239e4865968ebc5240bb3bf610ef59ac398de9a7421b54e4a207e"}, + {file = "mypy-1.18.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:448acd386266989ef11662ce3c8011fd2a7b632e0ec7d61a98edd8e27472225b"}, + {file = "mypy-1.18.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f9e171c465ad3901dc652643ee4bffa8e9fef4d7d0eece23b428908c77a76a66"}, + {file = "mypy-1.18.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:592ec214750bc00741af1f80cbf96b5013d81486b7bb24cb052382c19e40b428"}, + {file = "mypy-1.18.2-cp310-cp310-win_amd64.whl", hash = "sha256:7fb95f97199ea11769ebe3638c29b550b5221e997c63b14ef93d2e971606ebed"}, + {file = "mypy-1.18.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:807d9315ab9d464125aa9fcf6d84fde6e1dc67da0b6f80e7405506b8ac72bc7f"}, + {file = "mypy-1.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:776bb00de1778caf4db739c6e83919c1d85a448f71979b6a0edd774ea8399341"}, + {file = "mypy-1.18.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1379451880512ffce14505493bd9fe469e0697543717298242574882cf8cdb8d"}, + {file = "mypy-1.18.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1331eb7fd110d60c24999893320967594ff84c38ac6d19e0a76c5fd809a84c86"}, + {file = "mypy-1.18.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3ca30b50a51e7ba93b00422e486cbb124f1c56a535e20eff7b2d6ab72b3b2e37"}, + {file = "mypy-1.18.2-cp311-cp311-win_amd64.whl", hash = "sha256:664dc726e67fa54e14536f6e1224bcfce1d9e5ac02426d2326e2bb4e081d1ce8"}, + {file = "mypy-1.18.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:33eca32dd124b29400c31d7cf784e795b050ace0e1f91b8dc035672725617e34"}, + {file = "mypy-1.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a3c47adf30d65e89b2dcd2fa32f3aeb5e94ca970d2c15fcb25e297871c8e4764"}, + {file = "mypy-1.18.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d6c838e831a062f5f29d11c9057c6009f60cb294fea33a98422688181fe2893"}, + {file = "mypy-1.18.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01199871b6110a2ce984bde85acd481232d17413868c9807e95c1b0739a58914"}, + {file = "mypy-1.18.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a2afc0fa0b0e91b4599ddfe0f91e2c26c2b5a5ab263737e998d6817874c5f7c8"}, + {file = "mypy-1.18.2-cp312-cp312-win_amd64.whl", hash = "sha256:d8068d0afe682c7c4897c0f7ce84ea77f6de953262b12d07038f4d296d547074"}, + {file = "mypy-1.18.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:07b8b0f580ca6d289e69209ec9d3911b4a26e5abfde32228a288eb79df129fcc"}, + {file = "mypy-1.18.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ed4482847168439651d3feee5833ccedbf6657e964572706a2adb1f7fa4dfe2e"}, + {file = "mypy-1.18.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3ad2afadd1e9fea5cf99a45a822346971ede8685cc581ed9cd4d42eaf940986"}, + {file = "mypy-1.18.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a431a6f1ef14cf8c144c6b14793a23ec4eae3db28277c358136e79d7d062f62d"}, + {file = "mypy-1.18.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7ab28cc197f1dd77a67e1c6f35cd1f8e8b73ed2217e4fc005f9e6a504e46e7ba"}, + {file = "mypy-1.18.2-cp313-cp313-win_amd64.whl", hash = "sha256:0e2785a84b34a72ba55fb5daf079a1003a34c05b22238da94fcae2bbe46f3544"}, + {file = "mypy-1.18.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:62f0e1e988ad41c2a110edde6c398383a889d95b36b3e60bcf155f5164c4fdce"}, + {file = "mypy-1.18.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8795a039bab805ff0c1dfdb8cd3344642c2b99b8e439d057aba30850b8d3423d"}, + {file = "mypy-1.18.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6ca1e64b24a700ab5ce10133f7ccd956a04715463d30498e64ea8715236f9c9c"}, + {file = "mypy-1.18.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d924eef3795cc89fecf6bedc6ed32b33ac13e8321344f6ddbf8ee89f706c05cb"}, + {file = "mypy-1.18.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:20c02215a080e3a2be3aa50506c67242df1c151eaba0dcbc1e4e557922a26075"}, + {file = "mypy-1.18.2-cp314-cp314-win_amd64.whl", hash = "sha256:749b5f83198f1ca64345603118a6f01a4e99ad4bf9d103ddc5a3200cc4614adf"}, + {file = "mypy-1.18.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:25a9c8fb67b00599f839cf472713f54249a62efd53a54b565eb61956a7e3296b"}, + {file = "mypy-1.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2b9c7e284ee20e7598d6f42e13ca40b4928e6957ed6813d1ab6348aa3f47133"}, + {file = "mypy-1.18.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d6985ed057513e344e43a26cc1cd815c7a94602fb6a3130a34798625bc2f07b6"}, + {file = "mypy-1.18.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22f27105f1525ec024b5c630c0b9f36d5c1cc4d447d61fe51ff4bd60633f47ac"}, + {file = "mypy-1.18.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:030c52d0ea8144e721e49b1f68391e39553d7451f0c3f8a7565b59e19fcb608b"}, + {file = "mypy-1.18.2-cp39-cp39-win_amd64.whl", hash = "sha256:aa5e07ac1a60a253445797e42b8b2963c9675563a94f11291ab40718b016a7a0"}, + {file = "mypy-1.18.2-py3-none-any.whl", hash = "sha256:22a1748707dd62b58d2ae53562ffc4d7f8bcc727e8ac7cbc69c053ddc874d47e"}, + {file = "mypy-1.18.2.tar.gz", hash = "sha256:06a398102a5f203d7477b2923dda3634c36727fa5c237d8f859ef90c42a9924b"}, ] [package.dependencies] -mypy-extensions = ">=1.0.0" +mypy_extensions = ">=1.0.0" +pathspec = ">=0.9.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.6.0" +typing_extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] @@ -1634,112 +2057,220 @@ reports = ["lxml"] [[package]] name = "mypy-extensions" -version = "1.0.0" +version = "1.1.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false -python-versions = ">=3.5" +python-versions = ">=3.8" files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, + {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, + {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, ] +[[package]] +name = "natsort" +version = "8.4.0" +description = "Simple yet flexible natural sorting in Python." +optional = true +python-versions = ">=3.7" +files = [ + {file = "natsort-8.4.0-py3-none-any.whl", hash = "sha256:4732914fb471f56b5cce04d7bae6f164a592c7712e1c85f9ef585e197299521c"}, + {file = "natsort-8.4.0.tar.gz", hash = "sha256:45312c4a0e5507593da193dedd04abb1469253b601ecaf63445ad80f0a1ea581"}, +] + +[package.extras] +fast = ["fastnumbers (>=2.0.0)"] +icu = ["PyICU (>=1.0.0)"] + [[package]] name = "numba" -version = "0.60.0" +version = "0.62.1" description = "compiling Python code using LLVM" optional = true -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "numba-0.60.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d761de835cd38fb400d2c26bb103a2726f548dc30368853121d66201672e651"}, - {file = "numba-0.60.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:159e618ef213fba758837f9837fb402bbe65326e60ba0633dbe6c7f274d42c1b"}, - {file = "numba-0.60.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1527dc578b95c7c4ff248792ec33d097ba6bef9eda466c948b68dfc995c25781"}, - {file = "numba-0.60.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe0b28abb8d70f8160798f4de9d486143200f34458d34c4a214114e445d7124e"}, - {file = "numba-0.60.0-cp310-cp310-win_amd64.whl", hash = "sha256:19407ced081d7e2e4b8d8c36aa57b7452e0283871c296e12d798852bc7d7f198"}, - {file = "numba-0.60.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a17b70fc9e380ee29c42717e8cc0bfaa5556c416d94f9aa96ba13acb41bdece8"}, - {file = "numba-0.60.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3fb02b344a2a80efa6f677aa5c40cd5dd452e1b35f8d1c2af0dfd9ada9978e4b"}, - {file = "numba-0.60.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f4fde652ea604ea3c86508a3fb31556a6157b2c76c8b51b1d45eb40c8598703"}, - {file = "numba-0.60.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4142d7ac0210cc86432b818338a2bc368dc773a2f5cf1e32ff7c5b378bd63ee8"}, - {file = "numba-0.60.0-cp311-cp311-win_amd64.whl", hash = "sha256:cac02c041e9b5bc8cf8f2034ff6f0dbafccd1ae9590dc146b3a02a45e53af4e2"}, - {file = "numba-0.60.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7da4098db31182fc5ffe4bc42c6f24cd7d1cb8a14b59fd755bfee32e34b8404"}, - {file = "numba-0.60.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:38d6ea4c1f56417076ecf8fc327c831ae793282e0ff51080c5094cb726507b1c"}, - {file = "numba-0.60.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:62908d29fb6a3229c242e981ca27e32a6e606cc253fc9e8faeb0e48760de241e"}, - {file = "numba-0.60.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0ebaa91538e996f708f1ab30ef4d3ddc344b64b5227b67a57aa74f401bb68b9d"}, - {file = "numba-0.60.0-cp312-cp312-win_amd64.whl", hash = "sha256:f75262e8fe7fa96db1dca93d53a194a38c46da28b112b8a4aca168f0df860347"}, - {file = "numba-0.60.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:01ef4cd7d83abe087d644eaa3d95831b777aa21d441a23703d649e06b8e06b74"}, - {file = "numba-0.60.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:819a3dfd4630d95fd574036f99e47212a1af41cbcb019bf8afac63ff56834449"}, - {file = "numba-0.60.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0b983bd6ad82fe868493012487f34eae8bf7dd94654951404114f23c3466d34b"}, - {file = "numba-0.60.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c151748cd269ddeab66334bd754817ffc0cabd9433acb0f551697e5151917d25"}, - {file = "numba-0.60.0-cp39-cp39-win_amd64.whl", hash = "sha256:3031547a015710140e8c87226b4cfe927cac199835e5bf7d4fe5cb64e814e3ab"}, - {file = "numba-0.60.0.tar.gz", hash = "sha256:5df6158e5584eece5fc83294b949fd30b9f1125df7708862205217e068aabf16"}, + {file = "numba-0.62.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a323df9d36a0da1ca9c592a6baaddd0176d9f417ef49a65bb81951dce69d941a"}, + {file = "numba-0.62.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1e1f4781d3f9f7c23f16eb04e76ca10b5a3516e959634bd226fc48d5d8e7a0a"}, + {file = "numba-0.62.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:14432af305ea68627a084cd702124fd5d0c1f5b8a413b05f4e14757202d1cf6c"}, + {file = "numba-0.62.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f180922adf159ae36c2fe79fb94ffaa74cf5cb3688cb72dba0a904b91e978507"}, + {file = "numba-0.62.1-cp310-cp310-win_amd64.whl", hash = "sha256:f41834909d411b4b8d1c68f745144136f21416547009c1e860cc2098754b4ca7"}, + {file = "numba-0.62.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:f43e24b057714e480fe44bc6031de499e7cf8150c63eb461192caa6cc8530bc8"}, + {file = "numba-0.62.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:57cbddc53b9ee02830b828a8428757f5c218831ccc96490a314ef569d8342b7b"}, + {file = "numba-0.62.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:604059730c637c7885386521bb1b0ddcbc91fd56131a6dcc54163d6f1804c872"}, + {file = "numba-0.62.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d6c540880170bee817011757dc9049dba5a29db0c09b4d2349295991fe3ee55f"}, + {file = "numba-0.62.1-cp311-cp311-win_amd64.whl", hash = "sha256:03de6d691d6b6e2b76660ba0f38f37b81ece8b2cc524a62f2a0cfae2bfb6f9da"}, + {file = "numba-0.62.1-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:1b743b32f8fa5fff22e19c2e906db2f0a340782caf024477b97801b918cf0494"}, + {file = "numba-0.62.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:90fa21b0142bcf08ad8e32a97d25d0b84b1e921bc9423f8dda07d3652860eef6"}, + {file = "numba-0.62.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6ef84d0ac19f1bf80431347b6f4ce3c39b7ec13f48f233a48c01e2ec06ecbc59"}, + {file = "numba-0.62.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9315cc5e441300e0ca07c828a627d92a6802bcbf27c5487f31ae73783c58da53"}, + {file = "numba-0.62.1-cp312-cp312-win_amd64.whl", hash = "sha256:44e3aa6228039992f058f5ebfcfd372c83798e9464297bdad8cc79febcf7891e"}, + {file = "numba-0.62.1-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:b72489ba8411cc9fdcaa2458d8f7677751e94f0109eeb53e5becfdc818c64afb"}, + {file = "numba-0.62.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:44a1412095534a26fb5da2717bc755b57da5f3053965128fe3dc286652cc6a92"}, + {file = "numba-0.62.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8c9460b9e936c5bd2f0570e20a0a5909ee6e8b694fd958b210e3bde3a6dba2d7"}, + {file = "numba-0.62.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:728f91a874192df22d74e3fd42c12900b7ce7190b1aad3574c6c61b08313e4c5"}, + {file = "numba-0.62.1-cp313-cp313-win_amd64.whl", hash = "sha256:bbf3f88b461514287df66bc8d0307e949b09f2b6f67da92265094e8fa1282dd8"}, + {file = "numba-0.62.1.tar.gz", hash = "sha256:7b774242aa890e34c21200a1fc62e5b5757d5286267e71103257f4e2af0d5161"}, ] [package.dependencies] -llvmlite = "==0.43.*" -numpy = ">=1.22,<2.1" +llvmlite = "==0.45.*" +numpy = ">=1.22,<2.4" [[package]] name = "numpy" -version = "2.0.2" +version = "2.2.6" description = "Fundamental package for array computing in Python" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "numpy-2.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:51129a29dbe56f9ca83438b706e2e69a39892b5eda6cedcb6b0c9fdc9b0d3ece"}, - {file = "numpy-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f15975dfec0cf2239224d80e32c3170b1d168335eaedee69da84fbe9f1f9cd04"}, - {file = "numpy-2.0.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8c5713284ce4e282544c68d1c3b2c7161d38c256d2eefc93c1d683cf47683e66"}, - {file = "numpy-2.0.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:becfae3ddd30736fe1889a37f1f580e245ba79a5855bff5f2a29cb3ccc22dd7b"}, - {file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2da5960c3cf0df7eafefd806d4e612c5e19358de82cb3c343631188991566ccd"}, - {file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:496f71341824ed9f3d2fd36cf3ac57ae2e0165c143b55c3a035ee219413f3318"}, - {file = "numpy-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a61ec659f68ae254e4d237816e33171497e978140353c0c2038d46e63282d0c8"}, - {file = "numpy-2.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d731a1c6116ba289c1e9ee714b08a8ff882944d4ad631fd411106a30f083c326"}, - {file = "numpy-2.0.2-cp310-cp310-win32.whl", hash = "sha256:984d96121c9f9616cd33fbd0618b7f08e0cfc9600a7ee1d6fd9b239186d19d97"}, - {file = "numpy-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:c7b0be4ef08607dd04da4092faee0b86607f111d5ae68036f16cc787e250a131"}, - {file = "numpy-2.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:49ca4decb342d66018b01932139c0961a8f9ddc7589611158cb3c27cbcf76448"}, - {file = "numpy-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:11a76c372d1d37437857280aa142086476136a8c0f373b2e648ab2c8f18fb195"}, - {file = "numpy-2.0.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:807ec44583fd708a21d4a11d94aedf2f4f3c3719035c76a2bbe1fe8e217bdc57"}, - {file = "numpy-2.0.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8cafab480740e22f8d833acefed5cc87ce276f4ece12fdaa2e8903db2f82897a"}, - {file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a15f476a45e6e5a3a79d8a14e62161d27ad897381fecfa4a09ed5322f2085669"}, - {file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13e689d772146140a252c3a28501da66dfecd77490b498b168b501835041f951"}, - {file = "numpy-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9ea91dfb7c3d1c56a0e55657c0afb38cf1eeae4544c208dc465c3c9f3a7c09f9"}, - {file = "numpy-2.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c1c9307701fec8f3f7a1e6711f9089c06e6284b3afbbcd259f7791282d660a15"}, - {file = "numpy-2.0.2-cp311-cp311-win32.whl", hash = "sha256:a392a68bd329eafac5817e5aefeb39038c48b671afd242710b451e76090e81f4"}, - {file = "numpy-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:286cd40ce2b7d652a6f22efdfc6d1edf879440e53e76a75955bc0c826c7e64dc"}, - {file = "numpy-2.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:df55d490dea7934f330006d0f81e8551ba6010a5bf035a249ef61a94f21c500b"}, - {file = "numpy-2.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8df823f570d9adf0978347d1f926b2a867d5608f434a7cff7f7908c6570dcf5e"}, - {file = "numpy-2.0.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9a92ae5c14811e390f3767053ff54eaee3bf84576d99a2456391401323f4ec2c"}, - {file = "numpy-2.0.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:a842d573724391493a97a62ebbb8e731f8a5dcc5d285dfc99141ca15a3302d0c"}, - {file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05e238064fc0610c840d1cf6a13bf63d7e391717d247f1bf0318172e759e692"}, - {file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0123ffdaa88fa4ab64835dcbde75dcdf89c453c922f18dced6e27c90d1d0ec5a"}, - {file = "numpy-2.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:96a55f64139912d61de9137f11bf39a55ec8faec288c75a54f93dfd39f7eb40c"}, - {file = "numpy-2.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec9852fb39354b5a45a80bdab5ac02dd02b15f44b3804e9f00c556bf24b4bded"}, - {file = "numpy-2.0.2-cp312-cp312-win32.whl", hash = "sha256:671bec6496f83202ed2d3c8fdc486a8fc86942f2e69ff0e986140339a63bcbe5"}, - {file = "numpy-2.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:cfd41e13fdc257aa5778496b8caa5e856dc4896d4ccf01841daee1d96465467a"}, - {file = "numpy-2.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9059e10581ce4093f735ed23f3b9d283b9d517ff46009ddd485f1747eb22653c"}, - {file = "numpy-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:423e89b23490805d2a5a96fe40ec507407b8ee786d66f7328be214f9679df6dd"}, - {file = "numpy-2.0.2-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:2b2955fa6f11907cf7a70dab0d0755159bca87755e831e47932367fc8f2f2d0b"}, - {file = "numpy-2.0.2-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:97032a27bd9d8988b9a97a8c4d2c9f2c15a81f61e2f21404d7e8ef00cb5be729"}, - {file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e795a8be3ddbac43274f18588329c72939870a16cae810c2b73461c40718ab1"}, - {file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26b258c385842546006213344c50655ff1555a9338e2e5e02a0756dc3e803dd"}, - {file = "numpy-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fec9451a7789926bcf7c2b8d187292c9f93ea30284802a0ab3f5be8ab36865d"}, - {file = "numpy-2.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9189427407d88ff25ecf8f12469d4d39d35bee1db5d39fc5c168c6f088a6956d"}, - {file = "numpy-2.0.2-cp39-cp39-win32.whl", hash = "sha256:905d16e0c60200656500c95b6b8dca5d109e23cb24abc701d41c02d74c6b3afa"}, - {file = "numpy-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:a3f4ab0caa7f053f6797fcd4e1e25caee367db3112ef2b6ef82d749530768c73"}, - {file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7f0a0c6f12e07fa94133c8a67404322845220c06a9e80e85999afe727f7438b8"}, - {file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:312950fdd060354350ed123c0e25a71327d3711584beaef30cdaa93320c392d4"}, - {file = "numpy-2.0.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26df23238872200f63518dd2aa984cfca675d82469535dc7162dc2ee52d9dd5c"}, - {file = "numpy-2.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a46288ec55ebbd58947d31d72be2c63cbf839f0a63b49cb755022310792a3385"}, - {file = "numpy-2.0.2.tar.gz", hash = "sha256:883c987dee1880e2a864ab0dc9892292582510604156762362d9326444636e78"}, + {file = "numpy-2.2.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b412caa66f72040e6d268491a59f2c43bf03eb6c96dd8f0307829feb7fa2b6fb"}, + {file = "numpy-2.2.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e41fd67c52b86603a91c1a505ebaef50b3314de0213461c7a6e99c9a3beff90"}, + {file = "numpy-2.2.6-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:37e990a01ae6ec7fe7fa1c26c55ecb672dd98b19c3d0e1d1f326fa13cb38d163"}, + {file = "numpy-2.2.6-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:5a6429d4be8ca66d889b7cf70f536a397dc45ba6faeb5f8c5427935d9592e9cf"}, + {file = "numpy-2.2.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efd28d4e9cd7d7a8d39074a4d44c63eda73401580c5c76acda2ce969e0a38e83"}, + {file = "numpy-2.2.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc7b73d02efb0e18c000e9ad8b83480dfcd5dfd11065997ed4c6747470ae8915"}, + {file = "numpy-2.2.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:74d4531beb257d2c3f4b261bfb0fc09e0f9ebb8842d82a7b4209415896adc680"}, + {file = "numpy-2.2.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8fc377d995680230e83241d8a96def29f204b5782f371c532579b4f20607a289"}, + {file = "numpy-2.2.6-cp310-cp310-win32.whl", hash = "sha256:b093dd74e50a8cba3e873868d9e93a85b78e0daf2e98c6797566ad8044e8363d"}, + {file = "numpy-2.2.6-cp310-cp310-win_amd64.whl", hash = "sha256:f0fd6321b839904e15c46e0d257fdd101dd7f530fe03fd6359c1ea63738703f3"}, + {file = "numpy-2.2.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f9f1adb22318e121c5c69a09142811a201ef17ab257a1e66ca3025065b7f53ae"}, + {file = "numpy-2.2.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c820a93b0255bc360f53eca31a0e676fd1101f673dda8da93454a12e23fc5f7a"}, + {file = "numpy-2.2.6-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3d70692235e759f260c3d837193090014aebdf026dfd167834bcba43e30c2a42"}, + {file = "numpy-2.2.6-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:481b49095335f8eed42e39e8041327c05b0f6f4780488f61286ed3c01368d491"}, + {file = "numpy-2.2.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b64d8d4d17135e00c8e346e0a738deb17e754230d7e0810ac5012750bbd85a5a"}, + {file = "numpy-2.2.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba10f8411898fc418a521833e014a77d3ca01c15b0c6cdcce6a0d2897e6dbbdf"}, + {file = "numpy-2.2.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bd48227a919f1bafbdda0583705e547892342c26fb127219d60a5c36882609d1"}, + {file = "numpy-2.2.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9551a499bf125c1d4f9e250377c1ee2eddd02e01eac6644c080162c0c51778ab"}, + {file = "numpy-2.2.6-cp311-cp311-win32.whl", hash = "sha256:0678000bb9ac1475cd454c6b8c799206af8107e310843532b04d49649c717a47"}, + {file = "numpy-2.2.6-cp311-cp311-win_amd64.whl", hash = "sha256:e8213002e427c69c45a52bbd94163084025f533a55a59d6f9c5b820774ef3303"}, + {file = "numpy-2.2.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:41c5a21f4a04fa86436124d388f6ed60a9343a6f767fced1a8a71c3fbca038ff"}, + {file = "numpy-2.2.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de749064336d37e340f640b05f24e9e3dd678c57318c7289d222a8a2f543e90c"}, + {file = "numpy-2.2.6-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:894b3a42502226a1cac872f840030665f33326fc3dac8e57c607905773cdcde3"}, + {file = "numpy-2.2.6-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:71594f7c51a18e728451bb50cc60a3ce4e6538822731b2933209a1f3614e9282"}, + {file = "numpy-2.2.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2618db89be1b4e05f7a1a847a9c1c0abd63e63a1607d892dd54668dd92faf87"}, + {file = "numpy-2.2.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd83c01228a688733f1ded5201c678f0c53ecc1006ffbc404db9f7a899ac6249"}, + {file = "numpy-2.2.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:37c0ca431f82cd5fa716eca9506aefcabc247fb27ba69c5062a6d3ade8cf8f49"}, + {file = "numpy-2.2.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fe27749d33bb772c80dcd84ae7e8df2adc920ae8297400dabec45f0dedb3f6de"}, + {file = "numpy-2.2.6-cp312-cp312-win32.whl", hash = "sha256:4eeaae00d789f66c7a25ac5f34b71a7035bb474e679f410e5e1a94deb24cf2d4"}, + {file = "numpy-2.2.6-cp312-cp312-win_amd64.whl", hash = "sha256:c1f9540be57940698ed329904db803cf7a402f3fc200bfe599334c9bd84a40b2"}, + {file = "numpy-2.2.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0811bb762109d9708cca4d0b13c4f67146e3c3b7cf8d34018c722adb2d957c84"}, + {file = "numpy-2.2.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:287cc3162b6f01463ccd86be154f284d0893d2b3ed7292439ea97eafa8170e0b"}, + {file = "numpy-2.2.6-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:f1372f041402e37e5e633e586f62aa53de2eac8d98cbfb822806ce4bbefcb74d"}, + {file = "numpy-2.2.6-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:55a4d33fa519660d69614a9fad433be87e5252f4b03850642f88993f7b2ca566"}, + {file = "numpy-2.2.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f92729c95468a2f4f15e9bb94c432a9229d0d50de67304399627a943201baa2f"}, + {file = "numpy-2.2.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bc23a79bfabc5d056d106f9befb8d50c31ced2fbc70eedb8155aec74a45798f"}, + {file = "numpy-2.2.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e3143e4451880bed956e706a3220b4e5cf6172ef05fcc397f6f36a550b1dd868"}, + {file = "numpy-2.2.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4f13750ce79751586ae2eb824ba7e1e8dba64784086c98cdbbcc6a42112ce0d"}, + {file = "numpy-2.2.6-cp313-cp313-win32.whl", hash = "sha256:5beb72339d9d4fa36522fc63802f469b13cdbe4fdab4a288f0c441b74272ebfd"}, + {file = "numpy-2.2.6-cp313-cp313-win_amd64.whl", hash = "sha256:b0544343a702fa80c95ad5d3d608ea3599dd54d4632df855e4c8d24eb6ecfa1c"}, + {file = "numpy-2.2.6-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0bca768cd85ae743b2affdc762d617eddf3bcf8724435498a1e80132d04879e6"}, + {file = "numpy-2.2.6-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fc0c5673685c508a142ca65209b4e79ed6740a4ed6b2267dbba90f34b0b3cfda"}, + {file = "numpy-2.2.6-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:5bd4fc3ac8926b3819797a7c0e2631eb889b4118a9898c84f585a54d475b7e40"}, + {file = "numpy-2.2.6-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:fee4236c876c4e8369388054d02d0e9bb84821feb1a64dd59e137e6511a551f8"}, + {file = "numpy-2.2.6-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1dda9c7e08dc141e0247a5b8f49cf05984955246a327d4c48bda16821947b2f"}, + {file = "numpy-2.2.6-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f447e6acb680fd307f40d3da4852208af94afdfab89cf850986c3ca00562f4fa"}, + {file = "numpy-2.2.6-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:389d771b1623ec92636b0786bc4ae56abafad4a4c513d36a55dce14bd9ce8571"}, + {file = "numpy-2.2.6-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8e9ace4a37db23421249ed236fdcdd457d671e25146786dfc96835cd951aa7c1"}, + {file = "numpy-2.2.6-cp313-cp313t-win32.whl", hash = "sha256:038613e9fb8c72b0a41f025a7e4c3f0b7a1b5d768ece4796b674c8f3fe13efff"}, + {file = "numpy-2.2.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6031dd6dfecc0cf9f668681a37648373bddd6421fff6c66ec1624eed0180ee06"}, + {file = "numpy-2.2.6-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0b605b275d7bd0c640cad4e5d30fa701a8d59302e127e5f79138ad62762c3e3d"}, + {file = "numpy-2.2.6-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:7befc596a7dc9da8a337f79802ee8adb30a552a94f792b9c9d18c840055907db"}, + {file = "numpy-2.2.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce47521a4754c8f4593837384bd3424880629f718d87c5d44f8ed763edd63543"}, + {file = "numpy-2.2.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d042d24c90c41b54fd506da306759e06e568864df8ec17ccc17e9e884634fd00"}, + {file = "numpy-2.2.6.tar.gz", hash = "sha256:e29554e2bef54a90aa5cc07da6ce955accb83f21ab5de01a62c8478897b264fd"}, +] + +[[package]] +name = "numpy" +version = "2.3.4" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.11" +files = [ + {file = "numpy-2.3.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e78aecd2800b32e8347ce49316d3eaf04aed849cd5b38e0af39f829a4e59f5eb"}, + {file = "numpy-2.3.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7fd09cc5d65bda1e79432859c40978010622112e9194e581e3415a3eccc7f43f"}, + {file = "numpy-2.3.4-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:1b219560ae2c1de48ead517d085bc2d05b9433f8e49d0955c82e8cd37bd7bf36"}, + {file = "numpy-2.3.4-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:bafa7d87d4c99752d07815ed7a2c0964f8ab311eb8168f41b910bd01d15b6032"}, + {file = "numpy-2.3.4-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36dc13af226aeab72b7abad501d370d606326a0029b9f435eacb3b8c94b8a8b7"}, + {file = "numpy-2.3.4-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a7b2f9a18b5ff9824a6af80de4f37f4ec3c2aab05ef08f51c77a093f5b89adda"}, + {file = "numpy-2.3.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9984bd645a8db6ca15d850ff996856d8762c51a2239225288f08f9050ca240a0"}, + {file = "numpy-2.3.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:64c5825affc76942973a70acf438a8ab618dbd692b84cd5ec40a0a0509edc09a"}, + {file = "numpy-2.3.4-cp311-cp311-win32.whl", hash = "sha256:ed759bf7a70342f7817d88376eb7142fab9fef8320d6019ef87fae05a99874e1"}, + {file = "numpy-2.3.4-cp311-cp311-win_amd64.whl", hash = "sha256:faba246fb30ea2a526c2e9645f61612341de1a83fb1e0c5edf4ddda5a9c10996"}, + {file = "numpy-2.3.4-cp311-cp311-win_arm64.whl", hash = "sha256:4c01835e718bcebe80394fd0ac66c07cbb90147ebbdad3dcecd3f25de2ae7e2c"}, + {file = "numpy-2.3.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ef1b5a3e808bc40827b5fa2c8196151a4c5abe110e1726949d7abddfe5c7ae11"}, + {file = "numpy-2.3.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c2f91f496a87235c6aaf6d3f3d89b17dba64996abadccb289f48456cff931ca9"}, + {file = "numpy-2.3.4-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:f77e5b3d3da652b474cc80a14084927a5e86a5eccf54ca8ca5cbd697bf7f2667"}, + {file = "numpy-2.3.4-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:8ab1c5f5ee40d6e01cbe96de5863e39b215a4d24e7d007cad56c7184fdf4aeef"}, + {file = "numpy-2.3.4-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77b84453f3adcb994ddbd0d1c5d11db2d6bda1a2b7fd5ac5bd4649d6f5dc682e"}, + {file = "numpy-2.3.4-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4121c5beb58a7f9e6dfdee612cb24f4df5cd4db6e8261d7f4d7450a997a65d6a"}, + {file = "numpy-2.3.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:65611ecbb00ac9846efe04db15cbe6186f562f6bb7e5e05f077e53a599225d16"}, + {file = "numpy-2.3.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dabc42f9c6577bcc13001b8810d300fe814b4cfbe8a92c873f269484594f9786"}, + {file = "numpy-2.3.4-cp312-cp312-win32.whl", hash = "sha256:a49d797192a8d950ca59ee2d0337a4d804f713bb5c3c50e8db26d49666e351dc"}, + {file = "numpy-2.3.4-cp312-cp312-win_amd64.whl", hash = "sha256:985f1e46358f06c2a09921e8921e2c98168ed4ae12ccd6e5e87a4f1857923f32"}, + {file = "numpy-2.3.4-cp312-cp312-win_arm64.whl", hash = "sha256:4635239814149e06e2cb9db3dd584b2fa64316c96f10656983b8026a82e6e4db"}, + {file = "numpy-2.3.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c090d4860032b857d94144d1a9976b8e36709e40386db289aaf6672de2a81966"}, + {file = "numpy-2.3.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a13fc473b6db0be619e45f11f9e81260f7302f8d180c49a22b6e6120022596b3"}, + {file = "numpy-2.3.4-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:3634093d0b428e6c32c3a69b78e554f0cd20ee420dcad5a9f3b2a63762ce4197"}, + {file = "numpy-2.3.4-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:043885b4f7e6e232d7df4f51ffdef8c36320ee9d5f227b380ea636722c7ed12e"}, + {file = "numpy-2.3.4-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4ee6a571d1e4f0ea6d5f22d6e5fbd6ed1dc2b18542848e1e7301bd190500c9d7"}, + {file = "numpy-2.3.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fc8a63918b04b8571789688b2780ab2b4a33ab44bfe8ccea36d3eba51228c953"}, + {file = "numpy-2.3.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:40cc556d5abbc54aabe2b1ae287042d7bdb80c08edede19f0c0afb36ae586f37"}, + {file = "numpy-2.3.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ecb63014bb7f4ce653f8be7f1df8cbc6093a5a2811211770f6606cc92b5a78fd"}, + {file = "numpy-2.3.4-cp313-cp313-win32.whl", hash = "sha256:e8370eb6925bb8c1c4264fec52b0384b44f675f191df91cbe0140ec9f0955646"}, + {file = "numpy-2.3.4-cp313-cp313-win_amd64.whl", hash = "sha256:56209416e81a7893036eea03abcb91c130643eb14233b2515c90dcac963fe99d"}, + {file = "numpy-2.3.4-cp313-cp313-win_arm64.whl", hash = "sha256:a700a4031bc0fd6936e78a752eefb79092cecad2599ea9c8039c548bc097f9bc"}, + {file = "numpy-2.3.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:86966db35c4040fdca64f0816a1c1dd8dbd027d90fca5a57e00e1ca4cd41b879"}, + {file = "numpy-2.3.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:838f045478638b26c375ee96ea89464d38428c69170360b23a1a50fa4baa3562"}, + {file = "numpy-2.3.4-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d7315ed1dab0286adca467377c8381cd748f3dc92235f22a7dfc42745644a96a"}, + {file = "numpy-2.3.4-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:84f01a4d18b2cc4ade1814a08e5f3c907b079c847051d720fad15ce37aa930b6"}, + {file = "numpy-2.3.4-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:817e719a868f0dacde4abdfc5c1910b301877970195db9ab6a5e2c4bd5b121f7"}, + {file = "numpy-2.3.4-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85e071da78d92a214212cacea81c6da557cab307f2c34b5f85b628e94803f9c0"}, + {file = "numpy-2.3.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2ec646892819370cf3558f518797f16597b4e4669894a2ba712caccc9da53f1f"}, + {file = "numpy-2.3.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:035796aaaddfe2f9664b9a9372f089cfc88bd795a67bd1bfe15e6e770934cf64"}, + {file = "numpy-2.3.4-cp313-cp313t-win32.whl", hash = "sha256:fea80f4f4cf83b54c3a051f2f727870ee51e22f0248d3114b8e755d160b38cfb"}, + {file = "numpy-2.3.4-cp313-cp313t-win_amd64.whl", hash = "sha256:15eea9f306b98e0be91eb344a94c0e630689ef302e10c2ce5f7e11905c704f9c"}, + {file = "numpy-2.3.4-cp313-cp313t-win_arm64.whl", hash = "sha256:b6c231c9c2fadbae4011ca5e7e83e12dc4a5072f1a1d85a0a7b3ed754d145a40"}, + {file = "numpy-2.3.4-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:81c3e6d8c97295a7360d367f9f8553973651b76907988bb6066376bc2252f24e"}, + {file = "numpy-2.3.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:7c26b0b2bf58009ed1f38a641f3db4be8d960a417ca96d14e5b06df1506d41ff"}, + {file = "numpy-2.3.4-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:62b2198c438058a20b6704351b35a1d7db881812d8512d67a69c9de1f18ca05f"}, + {file = "numpy-2.3.4-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:9d729d60f8d53a7361707f4b68a9663c968882dd4f09e0d58c044c8bf5faee7b"}, + {file = "numpy-2.3.4-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bd0c630cf256b0a7fd9d0a11c9413b42fef5101219ce6ed5a09624f5a65392c7"}, + {file = "numpy-2.3.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d5e081bc082825f8b139f9e9fe42942cb4054524598aaeb177ff476cc76d09d2"}, + {file = "numpy-2.3.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:15fb27364ed84114438fff8aaf998c9e19adbeba08c0b75409f8c452a8692c52"}, + {file = "numpy-2.3.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:85d9fb2d8cd998c84d13a79a09cc0c1091648e848e4e6249b0ccd7f6b487fa26"}, + {file = "numpy-2.3.4-cp314-cp314-win32.whl", hash = "sha256:e73d63fd04e3a9d6bc187f5455d81abfad05660b212c8804bf3b407e984cd2bc"}, + {file = "numpy-2.3.4-cp314-cp314-win_amd64.whl", hash = "sha256:3da3491cee49cf16157e70f607c03a217ea6647b1cea4819c4f48e53d49139b9"}, + {file = "numpy-2.3.4-cp314-cp314-win_arm64.whl", hash = "sha256:6d9cd732068e8288dbe2717177320723ccec4fb064123f0caf9bbd90ab5be868"}, + {file = "numpy-2.3.4-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:22758999b256b595cf0b1d102b133bb61866ba5ceecf15f759623b64c020c9ec"}, + {file = "numpy-2.3.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:9cb177bc55b010b19798dc5497d540dea67fd13a8d9e882b2dae71de0cf09eb3"}, + {file = "numpy-2.3.4-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:0f2bcc76f1e05e5ab58893407c63d90b2029908fa41f9f1cc51eecce936c3365"}, + {file = "numpy-2.3.4-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:8dc20bde86802df2ed8397a08d793da0ad7a5fd4ea3ac85d757bf5dd4ad7c252"}, + {file = "numpy-2.3.4-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5e199c087e2aa71c8f9ce1cb7a8e10677dc12457e7cc1be4798632da37c3e86e"}, + {file = "numpy-2.3.4-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85597b2d25ddf655495e2363fe044b0ae999b75bc4d630dc0d886484b03a5eb0"}, + {file = "numpy-2.3.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:04a69abe45b49c5955923cf2c407843d1c85013b424ae8a560bba16c92fe44a0"}, + {file = "numpy-2.3.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e1708fac43ef8b419c975926ce1eaf793b0c13b7356cfab6ab0dc34c0a02ac0f"}, + {file = "numpy-2.3.4-cp314-cp314t-win32.whl", hash = "sha256:863e3b5f4d9915aaf1b8ec79ae560ad21f0b8d5e3adc31e73126491bb86dee1d"}, + {file = "numpy-2.3.4-cp314-cp314t-win_amd64.whl", hash = "sha256:962064de37b9aef801d33bc579690f8bfe6c5e70e29b61783f60bcba838a14d6"}, + {file = "numpy-2.3.4-cp314-cp314t-win_arm64.whl", hash = "sha256:8b5a9a39c45d852b62693d9b3f3e0fe052541f804296ff401a72a1b60edafb29"}, + {file = "numpy-2.3.4-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:6e274603039f924c0fe5cb73438fa9246699c78a6df1bd3decef9ae592ae1c05"}, + {file = "numpy-2.3.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d149aee5c72176d9ddbc6803aef9c0f6d2ceeea7626574fc68518da5476fa346"}, + {file = "numpy-2.3.4-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:6d34ed9db9e6395bb6cd33286035f73a59b058169733a9db9f85e650b88df37e"}, + {file = "numpy-2.3.4-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:fdebe771ca06bb8d6abce84e51dca9f7921fe6ad34a0c914541b063e9a68928b"}, + {file = "numpy-2.3.4-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:957e92defe6c08211eb77902253b14fe5b480ebc5112bc741fd5e9cd0608f847"}, + {file = "numpy-2.3.4-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13b9062e4f5c7ee5c7e5be96f29ba71bc5a37fed3d1d77c37390ae00724d296d"}, + {file = "numpy-2.3.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:81b3a59793523e552c4a96109dde028aa4448ae06ccac5a76ff6532a85558a7f"}, + {file = "numpy-2.3.4.tar.gz", hash = "sha256:a7d018bfedb375a8d979ac758b120ba846a7fe764911a64465fd87b8729f4a6a"}, ] [[package]] name = "oauthlib" -version = "3.2.2" +version = "3.3.1" description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, - {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, + {file = "oauthlib-3.3.1-py3-none-any.whl", hash = "sha256:88119c938d2b8fb88561af5f6ee0eec8cc8d552b7bb1f712743136eb7523b7a1"}, + {file = "oauthlib-3.3.1.tar.gz", hash = "sha256:0f0f8aa759826a193cf66c12ea1af1637f87b9b4622d46e866952bb022e538c9"}, ] [package.extras] @@ -1749,137 +2280,175 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] [[package]] name = "packaging" -version = "24.2" +version = "25.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, - {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, ] [[package]] name = "pillow" -version = "11.0.0" -description = "Python Imaging Library (Fork)" +version = "12.0.0" +description = "Python Imaging Library (fork)" optional = true -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "pillow-11.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:6619654954dc4936fcff82db8eb6401d3159ec6be81e33c6000dfd76ae189947"}, - {file = "pillow-11.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b3c5ac4bed7519088103d9450a1107f76308ecf91d6dabc8a33a2fcfb18d0fba"}, - {file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a65149d8ada1055029fcb665452b2814fe7d7082fcb0c5bed6db851cb69b2086"}, - {file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88a58d8ac0cc0e7f3a014509f0455248a76629ca9b604eca7dc5927cc593c5e9"}, - {file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c26845094b1af3c91852745ae78e3ea47abf3dbcd1cf962f16b9a5fbe3ee8488"}, - {file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:1a61b54f87ab5786b8479f81c4b11f4d61702830354520837f8cc791ebba0f5f"}, - {file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:674629ff60030d144b7bca2b8330225a9b11c482ed408813924619c6f302fdbb"}, - {file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:598b4e238f13276e0008299bd2482003f48158e2b11826862b1eb2ad7c768b97"}, - {file = "pillow-11.0.0-cp310-cp310-win32.whl", hash = "sha256:9a0f748eaa434a41fccf8e1ee7a3eed68af1b690e75328fd7a60af123c193b50"}, - {file = "pillow-11.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a5629742881bcbc1f42e840af185fd4d83a5edeb96475a575f4da50d6ede337c"}, - {file = "pillow-11.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:ee217c198f2e41f184f3869f3e485557296d505b5195c513b2bfe0062dc537f1"}, - {file = "pillow-11.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1c1d72714f429a521d8d2d018badc42414c3077eb187a59579f28e4270b4b0fc"}, - {file = "pillow-11.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:499c3a1b0d6fc8213519e193796eb1a86a1be4b1877d678b30f83fd979811d1a"}, - {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8b2351c85d855293a299038e1f89db92a2f35e8d2f783489c6f0b2b5f3fe8a3"}, - {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f4dba50cfa56f910241eb7f883c20f1e7b1d8f7d91c750cd0b318bad443f4d5"}, - {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5ddbfd761ee00c12ee1be86c9c0683ecf5bb14c9772ddbd782085779a63dd55b"}, - {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:45c566eb10b8967d71bf1ab8e4a525e5a93519e29ea071459ce517f6b903d7fa"}, - {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b4fd7bd29610a83a8c9b564d457cf5bd92b4e11e79a4ee4716a63c959699b306"}, - {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cb929ca942d0ec4fac404cbf520ee6cac37bf35be479b970c4ffadf2b6a1cad9"}, - {file = "pillow-11.0.0-cp311-cp311-win32.whl", hash = "sha256:006bcdd307cc47ba43e924099a038cbf9591062e6c50e570819743f5607404f5"}, - {file = "pillow-11.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:52a2d8323a465f84faaba5236567d212c3668f2ab53e1c74c15583cf507a0291"}, - {file = "pillow-11.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:16095692a253047fe3ec028e951fa4221a1f3ed3d80c397e83541a3037ff67c9"}, - {file = "pillow-11.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2c0a187a92a1cb5ef2c8ed5412dd8d4334272617f532d4ad4de31e0495bd923"}, - {file = "pillow-11.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:084a07ef0821cfe4858fe86652fffac8e187b6ae677e9906e192aafcc1b69903"}, - {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8069c5179902dcdce0be9bfc8235347fdbac249d23bd90514b7a47a72d9fecf4"}, - {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f02541ef64077f22bf4924f225c0fd1248c168f86e4b7abdedd87d6ebaceab0f"}, - {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:fcb4621042ac4b7865c179bb972ed0da0218a076dc1820ffc48b1d74c1e37fe9"}, - {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:00177a63030d612148e659b55ba99527803288cea7c75fb05766ab7981a8c1b7"}, - {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8853a3bf12afddfdf15f57c4b02d7ded92c7a75a5d7331d19f4f9572a89c17e6"}, - {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3107c66e43bda25359d5ef446f59c497de2b5ed4c7fdba0894f8d6cf3822dafc"}, - {file = "pillow-11.0.0-cp312-cp312-win32.whl", hash = "sha256:86510e3f5eca0ab87429dd77fafc04693195eec7fd6a137c389c3eeb4cfb77c6"}, - {file = "pillow-11.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:8ec4a89295cd6cd4d1058a5e6aec6bf51e0eaaf9714774e1bfac7cfc9051db47"}, - {file = "pillow-11.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:27a7860107500d813fcd203b4ea19b04babe79448268403172782754870dac25"}, - {file = "pillow-11.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcd1fb5bb7b07f64c15618c89efcc2cfa3e95f0e3bcdbaf4642509de1942a699"}, - {file = "pillow-11.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0e038b0745997c7dcaae350d35859c9715c71e92ffb7e0f4a8e8a16732150f38"}, - {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ae08bd8ffc41aebf578c2af2f9d8749d91f448b3bfd41d7d9ff573d74f2a6b2"}, - {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d69bfd8ec3219ae71bcde1f942b728903cad25fafe3100ba2258b973bd2bc1b2"}, - {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:61b887f9ddba63ddf62fd02a3ba7add935d053b6dd7d58998c630e6dbade8527"}, - {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:c6a660307ca9d4867caa8d9ca2c2658ab685de83792d1876274991adec7b93fa"}, - {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:73e3a0200cdda995c7e43dd47436c1548f87a30bb27fb871f352a22ab8dcf45f"}, - {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fba162b8872d30fea8c52b258a542c5dfd7b235fb5cb352240c8d63b414013eb"}, - {file = "pillow-11.0.0-cp313-cp313-win32.whl", hash = "sha256:f1b82c27e89fffc6da125d5eb0ca6e68017faf5efc078128cfaa42cf5cb38798"}, - {file = "pillow-11.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:8ba470552b48e5835f1d23ecb936bb7f71d206f9dfeee64245f30c3270b994de"}, - {file = "pillow-11.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:846e193e103b41e984ac921b335df59195356ce3f71dcfd155aa79c603873b84"}, - {file = "pillow-11.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4ad70c4214f67d7466bea6a08061eba35c01b1b89eaa098040a35272a8efb22b"}, - {file = "pillow-11.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:6ec0d5af64f2e3d64a165f490d96368bb5dea8b8f9ad04487f9ab60dc4bb6003"}, - {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c809a70e43c7977c4a42aefd62f0131823ebf7dd73556fa5d5950f5b354087e2"}, - {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:4b60c9520f7207aaf2e1d94de026682fc227806c6e1f55bba7606d1c94dd623a"}, - {file = "pillow-11.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1e2688958a840c822279fda0086fec1fdab2f95bf2b717b66871c4ad9859d7e8"}, - {file = "pillow-11.0.0-cp313-cp313t-win32.whl", hash = "sha256:607bbe123c74e272e381a8d1957083a9463401f7bd01287f50521ecb05a313f8"}, - {file = "pillow-11.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c39ed17edea3bc69c743a8dd3e9853b7509625c2462532e62baa0732163a904"}, - {file = "pillow-11.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:75acbbeb05b86bc53cbe7b7e6fe00fbcf82ad7c684b3ad82e3d711da9ba287d3"}, - {file = "pillow-11.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2e46773dc9f35a1dd28bd6981332fd7f27bec001a918a72a79b4133cf5291dba"}, - {file = "pillow-11.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2679d2258b7f1192b378e2893a8a0a0ca472234d4c2c0e6bdd3380e8dfa21b6a"}, - {file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eda2616eb2313cbb3eebbe51f19362eb434b18e3bb599466a1ffa76a033fb916"}, - {file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ec184af98a121fb2da42642dea8a29ec80fc3efbaefb86d8fdd2606619045d"}, - {file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:8594f42df584e5b4bb9281799698403f7af489fba84c34d53d1c4bfb71b7c4e7"}, - {file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:c12b5ae868897c7338519c03049a806af85b9b8c237b7d675b8c5e089e4a618e"}, - {file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:70fbbdacd1d271b77b7721fe3cdd2d537bbbd75d29e6300c672ec6bb38d9672f"}, - {file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5178952973e588b3f1360868847334e9e3bf49d19e169bbbdfaf8398002419ae"}, - {file = "pillow-11.0.0-cp39-cp39-win32.whl", hash = "sha256:8c676b587da5673d3c75bd67dd2a8cdfeb282ca38a30f37950511766b26858c4"}, - {file = "pillow-11.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:94f3e1780abb45062287b4614a5bc0874519c86a777d4a7ad34978e86428b8dd"}, - {file = "pillow-11.0.0-cp39-cp39-win_arm64.whl", hash = "sha256:290f2cc809f9da7d6d622550bbf4c1e57518212da51b6a30fe8e0a270a5b78bd"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1187739620f2b365de756ce086fdb3604573337cc28a0d3ac4a01ab6b2d2a6d2"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fbbcb7b57dc9c794843e3d1258c0fbf0f48656d46ffe9e09b63bbd6e8cd5d0a2"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d203af30149ae339ad1b4f710d9844ed8796e97fda23ffbc4cc472968a47d0b"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21a0d3b115009ebb8ac3d2ebec5c2982cc693da935f4ab7bb5c8ebe2f47d36f2"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:73853108f56df97baf2bb8b522f3578221e56f646ba345a372c78326710d3830"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e58876c91f97b0952eb766123bfef372792ab3f4e3e1f1a2267834c2ab131734"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:224aaa38177597bb179f3ec87eeefcce8e4f85e608025e9cfac60de237ba6316"}, - {file = "pillow-11.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5bd2d3bdb846d757055910f0a59792d33b555800813c3b39ada1829c372ccb06"}, - {file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:375b8dd15a1f5d2feafff536d47e22f69625c1aa92f12b339ec0b2ca40263273"}, - {file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:daffdf51ee5db69a82dd127eabecce20729e21f7a3680cf7cbb23f0829189790"}, - {file = "pillow-11.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7326a1787e3c7b0429659e0a944725e1b03eeaa10edd945a86dead1913383944"}, - {file = "pillow-11.0.0.tar.gz", hash = "sha256:72bacbaf24ac003fea9bff9837d1eedb6088758d41e100c1552930151f677739"}, + {file = "pillow-12.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:3adfb466bbc544b926d50fe8f4a4e6abd8c6bffd28a26177594e6e9b2b76572b"}, + {file = "pillow-12.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1ac11e8ea4f611c3c0147424eae514028b5e9077dd99ab91e1bd7bc33ff145e1"}, + {file = "pillow-12.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d49e2314c373f4c2b39446fb1a45ed333c850e09d0c59ac79b72eb3b95397363"}, + {file = "pillow-12.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c7b2a63fd6d5246349f3d3f37b14430d73ee7e8173154461785e43036ffa96ca"}, + {file = "pillow-12.0.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d64317d2587c70324b79861babb9c09f71fbb780bad212018874b2c013d8600e"}, + {file = "pillow-12.0.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d77153e14b709fd8b8af6f66a3afbb9ed6e9fc5ccf0b6b7e1ced7b036a228782"}, + {file = "pillow-12.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:32ed80ea8a90ee3e6fa08c21e2e091bba6eda8eccc83dbc34c95169507a91f10"}, + {file = "pillow-12.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c828a1ae702fc712978bda0320ba1b9893d99be0badf2647f693cc01cf0f04fa"}, + {file = "pillow-12.0.0-cp310-cp310-win32.whl", hash = "sha256:bd87e140e45399c818fac4247880b9ce719e4783d767e030a883a970be632275"}, + {file = "pillow-12.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:455247ac8a4cfb7b9bc45b7e432d10421aea9fc2e74d285ba4072688a74c2e9d"}, + {file = "pillow-12.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:6ace95230bfb7cd79ef66caa064bbe2f2a1e63d93471c3a2e1f1348d9f22d6b7"}, + {file = "pillow-12.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0fd00cac9c03256c8b2ff58f162ebcd2587ad3e1f2e397eab718c47e24d231cc"}, + {file = "pillow-12.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3475b96f5908b3b16c47533daaa87380c491357d197564e0ba34ae75c0f3257"}, + {file = "pillow-12.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:110486b79f2d112cf6add83b28b627e369219388f64ef2f960fef9ebaf54c642"}, + {file = "pillow-12.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5269cc1caeedb67e6f7269a42014f381f45e2e7cd42d834ede3c703a1d915fe3"}, + {file = "pillow-12.0.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aa5129de4e174daccbc59d0a3b6d20eaf24417d59851c07ebb37aeb02947987c"}, + {file = "pillow-12.0.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bee2a6db3a7242ea309aa7ee8e2780726fed67ff4e5b40169f2c940e7eb09227"}, + {file = "pillow-12.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:90387104ee8400a7b4598253b4c406f8958f59fcf983a6cea2b50d59f7d63d0b"}, + {file = "pillow-12.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bc91a56697869546d1b8f0a3ff35224557ae7f881050e99f615e0119bf934b4e"}, + {file = "pillow-12.0.0-cp311-cp311-win32.whl", hash = "sha256:27f95b12453d165099c84f8a8bfdfd46b9e4bda9e0e4b65f0635430027f55739"}, + {file = "pillow-12.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:b583dc9070312190192631373c6c8ed277254aa6e6084b74bdd0a6d3b221608e"}, + {file = "pillow-12.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:759de84a33be3b178a64c8ba28ad5c135900359e85fb662bc6e403ad4407791d"}, + {file = "pillow-12.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:53561a4ddc36facb432fae7a9d8afbfaf94795414f5cdc5fc52f28c1dca90371"}, + {file = "pillow-12.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:71db6b4c1653045dacc1585c1b0d184004f0d7e694c7b34ac165ca70c0838082"}, + {file = "pillow-12.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2fa5f0b6716fc88f11380b88b31fe591a06c6315e955c096c35715788b339e3f"}, + {file = "pillow-12.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:82240051c6ca513c616f7f9da06e871f61bfd7805f566275841af15015b8f98d"}, + {file = "pillow-12.0.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:55f818bd74fe2f11d4d7cbc65880a843c4075e0ac7226bc1a23261dbea531953"}, + {file = "pillow-12.0.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b87843e225e74576437fd5b6a4c2205d422754f84a06942cfaf1dc32243e45a8"}, + {file = "pillow-12.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c607c90ba67533e1b2355b821fef6764d1dd2cbe26b8c1005ae84f7aea25ff79"}, + {file = "pillow-12.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:21f241bdd5080a15bc86d3466a9f6074a9c2c2b314100dd896ac81ee6db2f1ba"}, + {file = "pillow-12.0.0-cp312-cp312-win32.whl", hash = "sha256:dd333073e0cacdc3089525c7df7d39b211bcdf31fc2824e49d01c6b6187b07d0"}, + {file = "pillow-12.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9fe611163f6303d1619bbcb653540a4d60f9e55e622d60a3108be0d5b441017a"}, + {file = "pillow-12.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:7dfb439562f234f7d57b1ac6bc8fe7f838a4bd49c79230e0f6a1da93e82f1fad"}, + {file = "pillow-12.0.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:0869154a2d0546545cde61d1789a6524319fc1897d9ee31218eae7a60ccc5643"}, + {file = "pillow-12.0.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:a7921c5a6d31b3d756ec980f2f47c0cfdbce0fc48c22a39347a895f41f4a6ea4"}, + {file = "pillow-12.0.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:1ee80a59f6ce048ae13cda1abf7fbd2a34ab9ee7d401c46be3ca685d1999a399"}, + {file = "pillow-12.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c50f36a62a22d350c96e49ad02d0da41dbd17ddc2e29750dbdba4323f85eb4a5"}, + {file = "pillow-12.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5193fde9a5f23c331ea26d0cf171fbf67e3f247585f50c08b3e205c7aeb4589b"}, + {file = "pillow-12.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bde737cff1a975b70652b62d626f7785e0480918dece11e8fef3c0cf057351c3"}, + {file = "pillow-12.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a6597ff2b61d121172f5844b53f21467f7082f5fb385a9a29c01414463f93b07"}, + {file = "pillow-12.0.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0b817e7035ea7f6b942c13aa03bb554fc44fea70838ea21f8eb31c638326584e"}, + {file = "pillow-12.0.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f4f1231b7dec408e8670264ce63e9c71409d9583dd21d32c163e25213ee2a344"}, + {file = "pillow-12.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e51b71417049ad6ab14c49608b4a24d8fb3fe605e5dfabfe523b58064dc3d27"}, + {file = "pillow-12.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d120c38a42c234dc9a8c5de7ceaaf899cf33561956acb4941653f8bdc657aa79"}, + {file = "pillow-12.0.0-cp313-cp313-win32.whl", hash = "sha256:4cc6b3b2efff105c6a1656cfe59da4fdde2cda9af1c5e0b58529b24525d0a098"}, + {file = "pillow-12.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:4cf7fed4b4580601c4345ceb5d4cbf5a980d030fd5ad07c4d2ec589f95f09905"}, + {file = "pillow-12.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:9f0b04c6b8584c2c193babcccc908b38ed29524b29dd464bc8801bf10d746a3a"}, + {file = "pillow-12.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7fa22993bac7b77b78cae22bad1e2a987ddf0d9015c63358032f84a53f23cdc3"}, + {file = "pillow-12.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f135c702ac42262573fe9714dfe99c944b4ba307af5eb507abef1667e2cbbced"}, + {file = "pillow-12.0.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c85de1136429c524e55cfa4e033b4a7940ac5c8ee4d9401cc2d1bf48154bbc7b"}, + {file = "pillow-12.0.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:38df9b4bfd3db902c9c2bd369bcacaf9d935b2fff73709429d95cc41554f7b3d"}, + {file = "pillow-12.0.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7d87ef5795da03d742bf49439f9ca4d027cde49c82c5371ba52464aee266699a"}, + {file = "pillow-12.0.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aff9e4d82d082ff9513bdd6acd4f5bd359f5b2c870907d2b0a9c5e10d40c88fe"}, + {file = "pillow-12.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8d8ca2b210ada074d57fcee40c30446c9562e542fc46aedc19baf758a93532ee"}, + {file = "pillow-12.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:99a7f72fb6249302aa62245680754862a44179b545ded638cf1fef59befb57ef"}, + {file = "pillow-12.0.0-cp313-cp313t-win32.whl", hash = "sha256:4078242472387600b2ce8d93ade8899c12bf33fa89e55ec89fe126e9d6d5d9e9"}, + {file = "pillow-12.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2c54c1a783d6d60595d3514f0efe9b37c8808746a66920315bfd34a938d7994b"}, + {file = "pillow-12.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:26d9f7d2b604cd23aba3e9faf795787456ac25634d82cd060556998e39c6fa47"}, + {file = "pillow-12.0.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:beeae3f27f62308f1ddbcfb0690bf44b10732f2ef43758f169d5e9303165d3f9"}, + {file = "pillow-12.0.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:d4827615da15cd59784ce39d3388275ec093ae3ee8d7f0c089b76fa87af756c2"}, + {file = "pillow-12.0.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:3e42edad50b6909089750e65c91aa09aaf1e0a71310d383f11321b27c224ed8a"}, + {file = "pillow-12.0.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:e5d8efac84c9afcb40914ab49ba063d94f5dbdf5066db4482c66a992f47a3a3b"}, + {file = "pillow-12.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:266cd5f2b63ff316d5a1bba46268e603c9caf5606d44f38c2873c380950576ad"}, + {file = "pillow-12.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:58eea5ebe51504057dd95c5b77d21700b77615ab0243d8152793dc00eb4faf01"}, + {file = "pillow-12.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f13711b1a5ba512d647a0e4ba79280d3a9a045aaf7e0cc6fbe96b91d4cdf6b0c"}, + {file = "pillow-12.0.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6846bd2d116ff42cba6b646edf5bf61d37e5cbd256425fa089fee4ff5c07a99e"}, + {file = "pillow-12.0.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c98fa880d695de164b4135a52fd2e9cd7b7c90a9d8ac5e9e443a24a95ef9248e"}, + {file = "pillow-12.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fa3ed2a29a9e9d2d488b4da81dcb54720ac3104a20bf0bd273f1e4648aff5af9"}, + {file = "pillow-12.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d034140032870024e6b9892c692fe2968493790dd57208b2c37e3fb35f6df3ab"}, + {file = "pillow-12.0.0-cp314-cp314-win32.whl", hash = "sha256:1b1b133e6e16105f524a8dec491e0586d072948ce15c9b914e41cdadd209052b"}, + {file = "pillow-12.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:8dc232e39d409036af549c86f24aed8273a40ffa459981146829a324e0848b4b"}, + {file = "pillow-12.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:d52610d51e265a51518692045e372a4c363056130d922a7351429ac9f27e70b0"}, + {file = "pillow-12.0.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1979f4566bb96c1e50a62d9831e2ea2d1211761e5662afc545fa766f996632f6"}, + {file = "pillow-12.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b2e4b27a6e15b04832fe9bf292b94b5ca156016bbc1ea9c2c20098a0320d6cf6"}, + {file = "pillow-12.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fb3096c30df99fd01c7bf8e544f392103d0795b9f98ba71a8054bcbf56b255f1"}, + {file = "pillow-12.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7438839e9e053ef79f7112c881cef684013855016f928b168b81ed5835f3e75e"}, + {file = "pillow-12.0.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d5c411a8eaa2299322b647cd932586b1427367fd3184ffbb8f7a219ea2041ca"}, + {file = "pillow-12.0.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d7e091d464ac59d2c7ad8e7e08105eaf9dafbc3883fd7265ffccc2baad6ac925"}, + {file = "pillow-12.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:792a2c0be4dcc18af9d4a2dfd8a11a17d5e25274a1062b0ec1c2d79c76f3e7f8"}, + {file = "pillow-12.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:afbefa430092f71a9593a99ab6a4e7538bc9eabbf7bf94f91510d3503943edc4"}, + {file = "pillow-12.0.0-cp314-cp314t-win32.whl", hash = "sha256:3830c769decf88f1289680a59d4f4c46c72573446352e2befec9a8512104fa52"}, + {file = "pillow-12.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:905b0365b210c73afb0ebe9101a32572152dfd1c144c7e28968a331b9217b94a"}, + {file = "pillow-12.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:99353a06902c2e43b43e8ff74ee65a7d90307d82370604746738a1e0661ccca7"}, + {file = "pillow-12.0.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b22bd8c974942477156be55a768f7aa37c46904c175be4e158b6a86e3a6b7ca8"}, + {file = "pillow-12.0.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:805ebf596939e48dbb2e4922a1d3852cfc25c38160751ce02da93058b48d252a"}, + {file = "pillow-12.0.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cae81479f77420d217def5f54b5b9d279804d17e982e0f2fa19b1d1e14ab5197"}, + {file = "pillow-12.0.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aeaefa96c768fc66818730b952a862235d68825c178f1b3ffd4efd7ad2edcb7c"}, + {file = "pillow-12.0.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:09f2d0abef9e4e2f349305a4f8cc784a8a6c2f58a8c4892eea13b10a943bd26e"}, + {file = "pillow-12.0.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bdee52571a343d721fb2eb3b090a82d959ff37fc631e3f70422e0c2e029f3e76"}, + {file = "pillow-12.0.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:b290fd8aa38422444d4b50d579de197557f182ef1068b75f5aa8558638b8d0a5"}, + {file = "pillow-12.0.0.tar.gz", hash = "sha256:87d4f8125c9988bfbed67af47dd7a953e2fc7b0cc1e7800ec6d2080d490bb353"}, ] [package.extras] -docs = ["furo", "olefile", "sphinx (>=8.1)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +docs = ["furo", "olefile", "sphinx (>=8.2)", "sphinx-autobuild", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] fpx = ["olefile"] mic = ["olefile"] -tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] -typing = ["typing-extensions"] +test-arrow = ["arro3-compute", "arro3-core", "nanoarrow", "pyarrow"] +tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma (>=5)", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "trove-classifiers (>=2024.10.12)"] xmp = ["defusedxml"] [[package]] name = "platformdirs" -version = "4.3.6" +version = "4.5.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false -python-versions = ">=3.8" +python-versions = ">=3.10" files = [ - {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, - {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, + {file = "platformdirs-4.5.0-py3-none-any.whl", hash = "sha256:e578a81bb873cbb89a41fcc904c7ef523cc18284b7e3b3ccf06aca1403b7ebd3"}, + {file = "platformdirs-4.5.0.tar.gz", hash = "sha256:70ddccdd7c99fc5942e9fc25636a8b34d04c24b335100223152c2803e4063312"}, ] [package.extras] -docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] -type = ["mypy (>=1.11.2)"] +docs = ["furo (>=2025.9.25)", "proselint (>=0.14)", "sphinx (>=8.2.3)", "sphinx-autodoc-typehints (>=3.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.4.2)", "pytest-cov (>=7)", "pytest-mock (>=3.15.1)"] +type = ["mypy (>=1.18.2)"] [[package]] name = "pluggy" -version = "1.5.0" +version = "1.6.0" description = "plugin and hook calling mechanisms for python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, - {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, ] [package.extras] dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] +testing = ["coverage", "pytest", "pytest-benchmark"] + +[[package]] +name = "polib" +version = "1.2.0" +description = "A library to manipulate gettext files (po and mo files)." +optional = false +python-versions = "*" +files = [ + {file = "polib-1.2.0-py2.py3-none-any.whl", hash = "sha256:1c77ee1b81feb31df9bca258cbc58db1bbb32d10214b173882452c73af06d62d"}, + {file = "polib-1.2.0.tar.gz", hash = "sha256:f3ef94aefed6e183e342a8a269ae1fc4742ba193186ad76f175938621dbfc26b"}, +] [[package]] name = "pooch" @@ -1904,43 +2473,45 @@ xxhash = ["xxhash (>=1.4.3)"] [[package]] name = "psutil" -version = "6.1.0" -description = "Cross-platform lib for process and system monitoring in Python." +version = "7.1.3" +description = "Cross-platform lib for process and system monitoring." optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +python-versions = ">=3.6" files = [ - {file = "psutil-6.1.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ff34df86226c0227c52f38b919213157588a678d049688eded74c76c8ba4a5d0"}, - {file = "psutil-6.1.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:c0e0c00aa18ca2d3b2b991643b799a15fc8f0563d2ebb6040f64ce8dc027b942"}, - {file = "psutil-6.1.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:000d1d1ebd634b4efb383f4034437384e44a6d455260aaee2eca1e9c1b55f047"}, - {file = "psutil-6.1.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:5cd2bcdc75b452ba2e10f0e8ecc0b57b827dd5d7aaffbc6821b2a9a242823a76"}, - {file = "psutil-6.1.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:045f00a43c737f960d273a83973b2511430d61f283a44c96bf13a6e829ba8fdc"}, - {file = "psutil-6.1.0-cp27-none-win32.whl", hash = "sha256:9118f27452b70bb1d9ab3198c1f626c2499384935aaf55388211ad982611407e"}, - {file = "psutil-6.1.0-cp27-none-win_amd64.whl", hash = "sha256:a8506f6119cff7015678e2bce904a4da21025cc70ad283a53b099e7620061d85"}, - {file = "psutil-6.1.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6e2dcd475ce8b80522e51d923d10c7871e45f20918e027ab682f94f1c6351688"}, - {file = "psutil-6.1.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0895b8414afafc526712c498bd9de2b063deaac4021a3b3c34566283464aff8e"}, - {file = "psutil-6.1.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dcbfce5d89f1d1f2546a2090f4fcf87c7f669d1d90aacb7d7582addece9fb38"}, - {file = "psutil-6.1.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:498c6979f9c6637ebc3a73b3f87f9eb1ec24e1ce53a7c5173b8508981614a90b"}, - {file = "psutil-6.1.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d905186d647b16755a800e7263d43df08b790d709d575105d419f8b6ef65423a"}, - {file = "psutil-6.1.0-cp36-cp36m-win32.whl", hash = "sha256:6d3fbbc8d23fcdcb500d2c9f94e07b1342df8ed71b948a2649b5cb060a7c94ca"}, - {file = "psutil-6.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:1209036fbd0421afde505a4879dee3b2fd7b1e14fee81c0069807adcbbcca747"}, - {file = "psutil-6.1.0-cp37-abi3-win32.whl", hash = "sha256:1ad45a1f5d0b608253b11508f80940985d1d0c8f6111b5cb637533a0e6ddc13e"}, - {file = "psutil-6.1.0-cp37-abi3-win_amd64.whl", hash = "sha256:a8fb3752b491d246034fa4d279ff076501588ce8cbcdbb62c32fd7a377d996be"}, - {file = "psutil-6.1.0.tar.gz", hash = "sha256:353815f59a7f64cdaca1c0307ee13558a0512f6db064e92fe833784f08539c7a"}, + {file = "psutil-7.1.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0005da714eee687b4b8decd3d6cc7c6db36215c9e74e5ad2264b90c3df7d92dc"}, + {file = "psutil-7.1.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:19644c85dcb987e35eeeaefdc3915d059dac7bd1167cdcdbf27e0ce2df0c08c0"}, + {file = "psutil-7.1.3-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95ef04cf2e5ba0ab9eaafc4a11eaae91b44f4ef5541acd2ee91d9108d00d59a7"}, + {file = "psutil-7.1.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1068c303be3a72f8e18e412c5b2a8f6d31750fb152f9cb106b54090296c9d251"}, + {file = "psutil-7.1.3-cp313-cp313t-win_amd64.whl", hash = "sha256:18349c5c24b06ac5612c0428ec2a0331c26443d259e2a0144a9b24b4395b58fa"}, + {file = "psutil-7.1.3-cp313-cp313t-win_arm64.whl", hash = "sha256:c525ffa774fe4496282fb0b1187725793de3e7c6b29e41562733cae9ada151ee"}, + {file = "psutil-7.1.3-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b403da1df4d6d43973dc004d19cee3b848e998ae3154cc8097d139b77156c353"}, + {file = "psutil-7.1.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ad81425efc5e75da3f39b3e636293360ad8d0b49bed7df824c79764fb4ba9b8b"}, + {file = "psutil-7.1.3-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f33a3702e167783a9213db10ad29650ebf383946e91bc77f28a5eb083496bc9"}, + {file = "psutil-7.1.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fac9cd332c67f4422504297889da5ab7e05fd11e3c4392140f7370f4208ded1f"}, + {file = "psutil-7.1.3-cp314-cp314t-win_amd64.whl", hash = "sha256:3792983e23b69843aea49c8f5b8f115572c5ab64c153bada5270086a2123c7e7"}, + {file = "psutil-7.1.3-cp314-cp314t-win_arm64.whl", hash = "sha256:31d77fcedb7529f27bb3a0472bea9334349f9a04160e8e6e5020f22c59893264"}, + {file = "psutil-7.1.3-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2bdbcd0e58ca14996a42adf3621a6244f1bb2e2e528886959c72cf1e326677ab"}, + {file = "psutil-7.1.3-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:bc31fa00f1fbc3c3802141eede66f3a2d51d89716a194bf2cd6fc68310a19880"}, + {file = "psutil-7.1.3-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3bb428f9f05c1225a558f53e30ccbad9930b11c3fc206836242de1091d3e7dd3"}, + {file = "psutil-7.1.3-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56d974e02ca2c8eb4812c3f76c30e28836fffc311d55d979f1465c1feeb2b68b"}, + {file = "psutil-7.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:f39c2c19fe824b47484b96f9692932248a54c43799a84282cfe58d05a6449efd"}, + {file = "psutil-7.1.3-cp37-abi3-win_arm64.whl", hash = "sha256:bd0d69cee829226a761e92f28140bec9a5ee9d5b4fb4b0cc589068dbfff559b1"}, + {file = "psutil-7.1.3.tar.gz", hash = "sha256:6c86281738d77335af7aec228328e944b30930899ea760ecf33a4dba66be5e74"}, ] [package.extras] -dev = ["black", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest-cov", "requests", "rstcheck", "ruff", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "wheel"] -test = ["pytest", "pytest-xdist", "setuptools"] +dev = ["abi3audit", "black", "check-manifest", "colorama", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pyreadline", "pytest", "pytest-cov", "pytest-instafail", "pytest-subtests", "pytest-xdist", "pywin32", "requests", "rstcheck", "ruff", "setuptools", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "validate-pyproject[all]", "virtualenv", "vulture", "wheel", "wheel", "wmi"] +test = ["pytest", "pytest-instafail", "pytest-subtests", "pytest-xdist", "pywin32", "setuptools", "wheel", "wmi"] [[package]] name = "py7zr" -version = "0.22.0" +version = "1.0.0" description = "Pure python 7-zip library" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "py7zr-0.22.0-py3-none-any.whl", hash = "sha256:993b951b313500697d71113da2681386589b7b74f12e48ba13cc12beca79d078"}, - {file = "py7zr-0.22.0.tar.gz", hash = "sha256:c6c7aea5913535184003b73938490f9a4d8418598e533f9ca991d3b8e45a139e"}, + {file = "py7zr-1.0.0-py3-none-any.whl", hash = "sha256:6f42d2ff34c808e9026ad11b721c13b41b0673cf2b4e8f8fb34f9d65ae143dd1"}, + {file = "py7zr-1.0.0.tar.gz", hash = "sha256:f6bfee81637c9032f6a9f0eb045a4bfc7a7ff4138becfc42d7cb89b54ffbfef1"}, ] [package.dependencies] @@ -1950,16 +2521,16 @@ inflate64 = ">=1.0.0,<1.1.0" multivolumefile = ">=0.2.3" psutil = {version = "*", markers = "sys_platform != \"cygwin\""} pybcj = ">=1.0.0,<1.1.0" -pycryptodomex = ">=3.16.0" -pyppmd = ">=1.1.0,<1.2.0" -pyzstd = ">=0.15.9" +pycryptodomex = ">=3.20.0" +pyppmd = ">=1.1.0,<1.3.0" +pyzstd = ">=0.16.1" texttable = "*" [package.extras] -check = ["black (>=23.1.0)", "check-manifest", "flake8 (<8)", "flake8-black (>=0.3.6)", "flake8-deprecated", "flake8-isort", "isort (>=5.0.3)", "lxml", "mypy (>=0.940)", "mypy-extensions (>=0.4.1)", "pygments", "readme-renderer", "twine", "types-psutil"] +check = ["black (>=24.8.0)", "check-manifest", "flake8 (<8)", "flake8-black (>=0.3.6)", "flake8-deprecated", "flake8-isort", "isort (>=5.13.2)", "lxml", "mypy (>=1.10.0)", "mypy_extensions (>=1.0.0)", "pygments", "pylint", "readme-renderer", "twine", "types-psutil"] debug = ["pytest", "pytest-leaks", "pytest-profiling"] -docs = ["docutils", "sphinx (>=5.0)", "sphinx-a4doc", "sphinx-py3doc-enhanced-theme"] -test = ["coverage[toml] (>=5.2)", "coveralls (>=2.1.1)", "py-cpuinfo", "pytest", "pytest-benchmark", "pytest-cov", "pytest-remotedata", "pytest-timeout"] +docs = ["docutils", "sphinx (>=7.0.0)", "sphinx-a4doc", "sphinx-py3doc-enhanced-theme"] +test = ["coverage[toml] (>=5.2)", "coveralls (>=2.1.1)", "py-cpuinfo", "pytest", "pytest-benchmark", "pytest-cov", "pytest-httpserver", "pytest-remotedata", "pytest-timeout", "requests"] test-compat = ["libarchive-c"] [[package]] @@ -1978,139 +2549,160 @@ requests = "*" [[package]] name = "pybcj" -version = "1.0.2" +version = "1.0.6" description = "bcj filter library" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "pybcj-1.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7bff28d97e47047d69a4ac6bf59adda738cf1d00adde8819117fdb65d966bdbc"}, - {file = "pybcj-1.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:198e0b4768b4025eb3309273d7e81dc53834b9a50092be6e0d9b3983cfd35c35"}, - {file = "pybcj-1.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fa26415b4a118ea790de9d38f244312f2510a9bb5c65e560184d241a6f391a2d"}, - {file = "pybcj-1.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fabb2be57e4ca28ea36c13146cdf97d73abd27c51741923fc6ba1e8cd33e255c"}, - {file = "pybcj-1.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75d6d613bae6f27678d5e44e89d61018779726aa6aa950c516d33a04b8af8c59"}, - {file = "pybcj-1.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ffae79ef8a1ea81ea2748ad7b7ad9b882aa88ddf65ce90f9e944df639eccc61"}, - {file = "pybcj-1.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bdb4d8ff5cba3e0bd1adee7d20dbb2b4d80cb31ac04d6ea1cd06cfc02d2ecd0d"}, - {file = "pybcj-1.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a29be917fbc99eca204b08407e0971e0205bfdad4b74ec915930675f352b669d"}, - {file = "pybcj-1.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a2562ebe5a0abec4da0229f8abb5e90ee97b178f19762eb925c1159be36828b3"}, - {file = "pybcj-1.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:af19bc61ded933001cd68f004ae2042bf1a78eb498a3c685ebd655fa1be90dbe"}, - {file = "pybcj-1.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f3f4a447800850aba7724a2274ea0a4800724520c1caf38f7d0dabf2f89a5e15"}, - {file = "pybcj-1.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce1c8af7a4761d2b1b531864d84113948daa0c4245775c63bd9874cb955f4662"}, - {file = "pybcj-1.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8007371f6f2b462f5aa05d5c2135d0a1bcf5b7bdd9bd15d86c730f588d10b7d3"}, - {file = "pybcj-1.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1079ca63ff8da5c936b76863690e0bd2489e8d4e0a3a340e032095dae805dd91"}, - {file = "pybcj-1.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e9a785eb26884429d9b9f6326e68c3638828c83bf6d42d2463c97ad5385caff2"}, - {file = "pybcj-1.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:9ea46e2d45469d13b7f25b08efcdb140220bab1ac5a850db0954591715b8caaa"}, - {file = "pybcj-1.0.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:21b5f2460629167340403d359289a173e0729ce8e84e3ce99462009d5d5e01a4"}, - {file = "pybcj-1.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:2940fb85730b9869254559c491cd83cf777e56c76a8a60df60e4be4f2a4248d7"}, - {file = "pybcj-1.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f40f3243139d675f43793a4e35c410c370f7b91ccae74e70c8b2f4877869f90e"}, - {file = "pybcj-1.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c2b3e60b65c7ac73e44335934e1e122da8d56db87840984601b3c5dc0ae4c19"}, - {file = "pybcj-1.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:746550dc7b5af4d04bb5fa4d065f18d39c925bcb5dee30db75747cd9a58bb6e8"}, - {file = "pybcj-1.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:8ce9b62b6aaa5b08773be8a919ecc4e865396c969f982b685eeca6e80c82abb7"}, - {file = "pybcj-1.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:493eab2b1f6f546730a6de0c5ceb75ce16f3767154e8ae30e2b70d41b928b7d2"}, - {file = "pybcj-1.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:ef55b96b7f2ed823e0b924de902065ec42ade856366c287dbb073fabd6b90ec1"}, - {file = "pybcj-1.0.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ed5b3dd9c209fe7b90990dee4ef21870dca39db1cd326553c314ee1b321c1cc"}, - {file = "pybcj-1.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:22a94885723f8362d4cb468e68910eef92d3e2b1293de82b8eacb4198ef6655f"}, - {file = "pybcj-1.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b8f9368036c9e658d8e3b3534086d298a5349c864542b34657cbe57c260daa49"}, - {file = "pybcj-1.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87108181c7a6ac4d3fc1e4551cab5db5eea7f9fdca611175243234cd94bcc59b"}, - {file = "pybcj-1.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db57f26b8c0162cfddb52b869efb1741b8c5e67fc536994f743074985f714c55"}, - {file = "pybcj-1.0.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bdf5bcac4f1da36ad43567ea6f6ef404347658dbbe417c87cdb1699f327d6337"}, - {file = "pybcj-1.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c3171bb95c9b45cbcad25589e1ae4f4ca4ea99dc1724c4e0671eb6b9055514e"}, - {file = "pybcj-1.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:f9a2585e0da9cf343ea27421995b881736a1eb604a7c1d4ca74126af94c3d4a8"}, - {file = "pybcj-1.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fdb7cd8271471a5979d84915c1ee57eea7e0a69c893225fc418db66883b0e2a7"}, - {file = "pybcj-1.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e96ae14062bdcddc3197300e6ee4efa6fbc6749be917db934eac66d0daaecb68"}, - {file = "pybcj-1.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a54ebdc8423ba99d75372708a882fcfc3b14d9d52cf195295ad53e5a47dab37f"}, - {file = "pybcj-1.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3602be737c6e9553c45ae89e6b0e556f64f34dabf27d5260317d1824d31b79d3"}, - {file = "pybcj-1.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63dd2ca52a48841f561bfec0fa3f208d375b0a8dcd3d7b236459e683ae29221d"}, - {file = "pybcj-1.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8204a714029784b1a08a3d790430d80b423b68615c5b1e67aabca5bd5419b77d"}, - {file = "pybcj-1.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:fde2376b180ae2620c102fbc3ef06638d306feae83964aaa5051ecbdda54845a"}, - {file = "pybcj-1.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:3b8d7810fb587adbffba025330cf212d9bbed8f29559656d05cb6609673f306a"}, - {file = "pybcj-1.0.2.tar.gz", hash = "sha256:c7f5bef7f47723c53420e377bc64d2553843bee8bcac5f0ad076ab1524780018"}, + {file = "pybcj-1.0.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0fc8eda59e9e52d807f411de6db30aadd7603aa0cb0a830f6f45226b74be1926"}, + {file = "pybcj-1.0.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0495443e8691510129f0c589ed956af4962c22b7963c5730b0c80c9c5b818c06"}, + {file = "pybcj-1.0.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c7998b546c3856dbe9ae879cb90393df80507f65097e7019785852769f4a990"}, + {file = "pybcj-1.0.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:335c859f85e718924f48b3ac967cda5528ccbef1e448a4462652cca688eee477"}, + {file = "pybcj-1.0.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:186fbb849883ac80764d96dbd253503dd9cecbcf6133504a0c9d6a2df81d5746"}, + {file = "pybcj-1.0.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:437bd5f5e6579bde404404ad2de915d1306c389595c68d0eb8933fee1408e951"}, + {file = "pybcj-1.0.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:933d6be8f07c653ff3eba16900376b3212249be1c71caf9db17f4cd52da5076c"}, + {file = "pybcj-1.0.6-cp310-cp310-win_amd64.whl", hash = "sha256:90e169b669bbed30e22d36ba97d23dcfc71e044d3be41c8010fd6a53950725e5"}, + {file = "pybcj-1.0.6-cp310-cp310-win_arm64.whl", hash = "sha256:06441026c773f8abeb7816566acfffe7cd65a9b69094197a9de64d0496cd4c3c"}, + {file = "pybcj-1.0.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0275564a1afc4b2d1a6ff465384fb73a64622a88b6e4856cb7964ba2335a06e"}, + {file = "pybcj-1.0.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fa794b134b4ee183a4ceb739e9c3a445a24ee12e7e3231c37820f66848db4c52"}, + {file = "pybcj-1.0.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d8945e8157c7fa469db110fc78579d154a31d121d14705b26d7d3ec3a471c8e"}, + {file = "pybcj-1.0.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7109177b4f77526a6ce4b565ee37483f5a5dd29bc92eaea6739b3c58618aeb7"}, + {file = "pybcj-1.0.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c48cbc9ebed137ac8759d0f2c3d12b999581dae7b4f84d974888c402f00fdb78"}, + {file = "pybcj-1.0.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6dccff82008e3cb5e5e639737320c02341b8718e189b9ece13f0230e0d57e7af"}, + {file = "pybcj-1.0.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e4e68cfc4fb099e8200386ac2255a9f514b8bb056189273bcce874bda3597459"}, + {file = "pybcj-1.0.6-cp311-cp311-win_amd64.whl", hash = "sha256:13747c01b60bf955878267718f28c36e2bbb81fb8495b0173b21083c7d08a4a4"}, + {file = "pybcj-1.0.6-cp311-cp311-win_arm64.whl", hash = "sha256:6f81d6106c50c5e91c16ad58584fd7ab9eb941360188547e0184b1ede9e47f1d"}, + {file = "pybcj-1.0.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f5d1dbc76f615595d7d8f3846c07f607fb1e2305d085c34556b32dacf8e88d12"}, + {file = "pybcj-1.0.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1398f556ed2afe16ae363a2b6e8cf6aeda3aa21861757286bc6c498278886c60"}, + {file = "pybcj-1.0.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e269cfc7b6286af87c5447c9f8c685f19cff011cac64947ffb4cd98919696a7f"}, + {file = "pybcj-1.0.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7393d0b0dcaa0b1a7850245def78fa14438809e9a3f73b1057a975229d623fd3"}, + {file = "pybcj-1.0.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e252891698d3e01d0f60eb5adfe849038cd2d429cb9510f915a0759301f1884d"}, + {file = "pybcj-1.0.6-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ae5c891fcda9d5a6826a1b8e843b1e52811358594121553e6683e65b13eccce7"}, + {file = "pybcj-1.0.6-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:eac3cb317df1cefed2783ce9cafdae61899dd02f2f4749dc0f4494a7c425745f"}, + {file = "pybcj-1.0.6-cp312-cp312-win_amd64.whl", hash = "sha256:72ebec5cda5a48de169c2d7548ea2ce7f48732de0175d7e0e665ca7360eaa4c4"}, + {file = "pybcj-1.0.6-cp312-cp312-win_arm64.whl", hash = "sha256:8f1f75a01e45d01ecf88d31910ca1ace5d345e3bfb7c18db0af3d0c393209b63"}, + {file = "pybcj-1.0.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:3e6800eb599ce766e588095eedb2a2c45a93928d1880420e8ecfad7eff0c73dc"}, + {file = "pybcj-1.0.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:69a841ca0d3df978a2145488cec58460fa4604395321178ba421384cff26062f"}, + {file = "pybcj-1.0.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:887521da03302c96048803490073bd0423ff408a3adca2543c6ee86bc0af7578"}, + {file = "pybcj-1.0.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39a5a9a2d0e1fa4ddd2617a549c11e5022888af86dc8e29537cfee7f5761127d"}, + {file = "pybcj-1.0.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57757bc382f326bd93eb277a9edfc8dff6c22f480da467f0c5a5d63b9d092a41"}, + {file = "pybcj-1.0.6-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:cb1872b24b30d8473df433f3364e828b021964229d47a07f7bfc08496dbfd23e"}, + {file = "pybcj-1.0.6-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:5fedfeed96ab0e34207097f663b94e8c7076025c2c7af6a482e670e808ea5bb0"}, + {file = "pybcj-1.0.6-cp313-cp313-win_amd64.whl", hash = "sha256:caefc3109bf172ad37b52e21dc16c84cf495b2ea2890cc7256cdf0188914508d"}, + {file = "pybcj-1.0.6-cp313-cp313-win_arm64.whl", hash = "sha256:b24367175528da452a19e4c55368d5c907f4584072dc6aeee8990e2a5e6910fc"}, + {file = "pybcj-1.0.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:558128fbc201c9f11c1b1df30377fab3821ebb736c28e5eaf9fff9cc9e56b806"}, + {file = "pybcj-1.0.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d05f4026154d77c97486d5ce04261b473e3ec8c2f7cf0f937b7baa439c616559"}, + {file = "pybcj-1.0.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:96ce9c428800ecc0d52cec9947ee167f3a7f913cc2ba58b9a462e7f19c52ac4b"}, + {file = "pybcj-1.0.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05038a58d78ab15a847ed90c17d924be5b7848f27a43517dc88a5589fba1ca78"}, + {file = "pybcj-1.0.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:591f58891ff52585a38894b28c8b952e4c7be93f65d6d43751672cde8edeff36"}, + {file = "pybcj-1.0.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e1f416250101631ac04705a19d78ec407d261da9dffa0e1fa1f1f2d9409ec70d"}, + {file = "pybcj-1.0.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:27c489dd9e0d9745ebf7cd4344f23b6cb655edb2dea879ca63a0558a993e0d4b"}, + {file = "pybcj-1.0.6-cp39-cp39-win_amd64.whl", hash = "sha256:56fe3ff939653c6b0e35aa105170af3494ee9e2469494ef1d0fa2bac3fdd99d0"}, + {file = "pybcj-1.0.6-cp39-cp39-win_arm64.whl", hash = "sha256:6c88e1a04b90547f0470e4d2bd190bbe6b73c8666d4f7196c3ca43a379a15de5"}, + {file = "pybcj-1.0.6.tar.gz", hash = "sha256:70bbe2dc185993351955bfe8f61395038f96f5de92bb3a436acb01505781f8f2"}, ] [package.extras] -check = ["check-manifest", "flake8 (<5)", "flake8-black", "flake8-colors", "flake8-isort", "flake8-pyi", "flake8-typing-imports", "mypy (>=0.812)", "mypy-extensions (>=0.4.3)", "pygments", "readme-renderer"] +check = ["check-manifest", "flake8 (<5)", "flake8-black", "flake8-colors", "flake8-isort", "flake8-pyi", "flake8-typing-imports", "mypy (>=1.10.0)", "pygments", "readme-renderer"] test = ["coverage[toml] (>=5.2)", "hypothesis", "pytest (>=6.0)", "pytest-cov"] [[package]] name = "pycairo" -version = "1.27.0" +version = "1.28.0" description = "Python interface for cairo" optional = true python-versions = ">=3.9" files = [ - {file = "pycairo-1.27.0-cp310-cp310-win32.whl", hash = "sha256:e20f431244634cf244ab6b4c3a2e540e65746eed1324573cf291981c3e65fc05"}, - {file = "pycairo-1.27.0-cp310-cp310-win_amd64.whl", hash = "sha256:03bf570e3919901572987bc69237b648fe0de242439980be3e606b396e3318c9"}, - {file = "pycairo-1.27.0-cp311-cp311-win32.whl", hash = "sha256:9a9b79f92a434dae65c34c830bb9abdbd92654195e73d52663cbe45af1ad14b2"}, - {file = "pycairo-1.27.0-cp311-cp311-win_amd64.whl", hash = "sha256:d40a6d80b15dacb3672dc454df4bc4ab3988c6b3f36353b24a255dc59a1c8aea"}, - {file = "pycairo-1.27.0-cp312-cp312-win32.whl", hash = "sha256:e2239b9bb6c05edae5f3be97128e85147a155465e644f4d98ea0ceac7afc04ee"}, - {file = "pycairo-1.27.0-cp312-cp312-win_amd64.whl", hash = "sha256:27cb4d3a80e3b9990af552818515a8e466e0317063a6e61585533f1a86f1b7d5"}, - {file = "pycairo-1.27.0-cp313-cp313-win32.whl", hash = "sha256:01505c138a313df2469f812405963532fc2511fb9bca9bdc8e0ab94c55d1ced8"}, - {file = "pycairo-1.27.0-cp313-cp313-win_amd64.whl", hash = "sha256:b0349d744c068b6644ae23da6ada111c8a8a7e323b56cbce3707cba5bdb474cc"}, - {file = "pycairo-1.27.0-cp39-cp39-win32.whl", hash = "sha256:f9ca8430751f1fdcd3f072377560c9e15608b9a42d61375469db853566993c9b"}, - {file = "pycairo-1.27.0-cp39-cp39-win_amd64.whl", hash = "sha256:1b1321652a6e27c4de3069709b1cae22aed2707fd8c5e889c04a95669228af2a"}, - {file = "pycairo-1.27.0.tar.gz", hash = "sha256:5cb21e7a00a2afcafea7f14390235be33497a2cce53a98a19389492a60628430"}, + {file = "pycairo-1.28.0-cp310-cp310-win32.whl", hash = "sha256:53e6dbc98456f789965dad49ef89ce2c62f9a10fc96c8d084e14da0ffb73d8a6"}, + {file = "pycairo-1.28.0-cp310-cp310-win_amd64.whl", hash = "sha256:c8ab91a75025f984bc327ada335c787efb61c929ea0512063793cb36cee503d4"}, + {file = "pycairo-1.28.0-cp310-cp310-win_arm64.whl", hash = "sha256:e955328c1a5147bf71ee94e206413ce15e12630296a79788fcd246c80e5337b8"}, + {file = "pycairo-1.28.0-cp311-cp311-win32.whl", hash = "sha256:0fee15f5d72b13ba5fd065860312493dc1bca6ff2dce200ee9d704e11c94e60a"}, + {file = "pycairo-1.28.0-cp311-cp311-win_amd64.whl", hash = "sha256:6339979bfec8b58a06476094a9a5c104bd5a99932ddaff16ca0d9203d2f4482c"}, + {file = "pycairo-1.28.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6ae15392e28ebfc0b35d8dc05d395d3b6be4bad9ad4caecf0fa12c8e7150225"}, + {file = "pycairo-1.28.0-cp312-cp312-win32.whl", hash = "sha256:c00cfbb7f30eb7ca1d48886712932e2d91e8835a8496f4e423878296ceba573e"}, + {file = "pycairo-1.28.0-cp312-cp312-win_amd64.whl", hash = "sha256:d50d190f5033992b55050b9f337ee42a45c3568445d5e5d7987bab96c278d8a6"}, + {file = "pycairo-1.28.0-cp312-cp312-win_arm64.whl", hash = "sha256:957e0340ee1c279d197d4f7cfa96f6d8b48e453eec711fca999748d752468ff4"}, + {file = "pycairo-1.28.0-cp313-cp313-win32.whl", hash = "sha256:d13352429d8a08a1cb3607767d23d2fb32e4c4f9faa642155383980ec1478c24"}, + {file = "pycairo-1.28.0-cp313-cp313-win_amd64.whl", hash = "sha256:082aef6b3a9dcc328fa648d38ed6b0a31c863e903ead57dd184b2e5f86790140"}, + {file = "pycairo-1.28.0-cp313-cp313-win_arm64.whl", hash = "sha256:026afd53b75291917a7412d9fe46dcfbaa0c028febd46ff1132d44a53ac2c8b6"}, + {file = "pycairo-1.28.0-cp314-cp314-win32.whl", hash = "sha256:d0ab30585f536101ad6f09052fc3895e2a437ba57531ea07223d0e076248025d"}, + {file = "pycairo-1.28.0-cp314-cp314-win_amd64.whl", hash = "sha256:94f2ed204999ab95a0671a0fa948ffbb9f3d6fb8731fe787917f6d022d9c1c0f"}, + {file = "pycairo-1.28.0-cp39-cp39-win32.whl", hash = "sha256:3ed16d48b8a79cc584cb1cb0ad62dfb265f2dda6d6a19ef5aab181693e19c83c"}, + {file = "pycairo-1.28.0-cp39-cp39-win_amd64.whl", hash = "sha256:da0d1e6d4842eed4d52779222c6e43d254244a486ca9fdab14e30042fd5bdf28"}, + {file = "pycairo-1.28.0-cp39-cp39-win_arm64.whl", hash = "sha256:458877513eb2125513122e8aa9c938630e94bb0574f94f4fb5ab55eb23d6e9ac"}, + {file = "pycairo-1.28.0.tar.gz", hash = "sha256:26ec5c6126781eb167089a123919f87baa2740da2cca9098be8b3a6b91cc5fbc"}, ] [[package]] name = "pycparser" -version = "2.22" +version = "2.23" description = "C parser in Python" optional = false python-versions = ">=3.8" files = [ - {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, - {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, + {file = "pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934"}, + {file = "pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2"}, ] [[package]] name = "pycryptodomex" -version = "3.21.0" +version = "3.23.0" description = "Cryptographic library for Python" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ - {file = "pycryptodomex-3.21.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:dbeb84a399373df84a69e0919c1d733b89e049752426041deeb30d68e9867822"}, - {file = "pycryptodomex-3.21.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:a192fb46c95489beba9c3f002ed7d93979423d1b2a53eab8771dbb1339eb3ddd"}, - {file = "pycryptodomex-3.21.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:1233443f19d278c72c4daae749872a4af3787a813e05c3561c73ab0c153c7b0f"}, - {file = "pycryptodomex-3.21.0-cp27-cp27m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbb07f88e277162b8bfca7134b34f18b400d84eac7375ce73117f865e3c80d4c"}, - {file = "pycryptodomex-3.21.0-cp27-cp27m-musllinux_1_1_aarch64.whl", hash = "sha256:e859e53d983b7fe18cb8f1b0e29d991a5c93be2c8dd25db7db1fe3bd3617f6f9"}, - {file = "pycryptodomex-3.21.0-cp27-cp27m-win32.whl", hash = "sha256:ef046b2e6c425647971b51424f0f88d8a2e0a2a63d3531817968c42078895c00"}, - {file = "pycryptodomex-3.21.0-cp27-cp27m-win_amd64.whl", hash = "sha256:da76ebf6650323eae7236b54b1b1f0e57c16483be6e3c1ebf901d4ada47563b6"}, - {file = "pycryptodomex-3.21.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:c07e64867a54f7e93186a55bec08a18b7302e7bee1b02fd84c6089ec215e723a"}, - {file = "pycryptodomex-3.21.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:56435c7124dd0ce0c8bdd99c52e5d183a0ca7fdcd06c5d5509423843f487dd0b"}, - {file = "pycryptodomex-3.21.0-cp27-cp27mu-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65d275e3f866cf6fe891411be9c1454fb58809ccc5de6d3770654c47197acd65"}, - {file = "pycryptodomex-3.21.0-cp27-cp27mu-musllinux_1_1_aarch64.whl", hash = "sha256:5241bdb53bcf32a9568770a6584774b1b8109342bd033398e4ff2da052123832"}, - {file = "pycryptodomex-3.21.0-cp36-abi3-macosx_10_9_universal2.whl", hash = "sha256:34325b84c8b380675fd2320d0649cdcbc9cf1e0d1526edbe8fce43ed858cdc7e"}, - {file = "pycryptodomex-3.21.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:103c133d6cd832ae7266feb0a65b69e3a5e4dbbd6f3a3ae3211a557fd653f516"}, - {file = "pycryptodomex-3.21.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77ac2ea80bcb4b4e1c6a596734c775a1615d23e31794967416afc14852a639d3"}, - {file = "pycryptodomex-3.21.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9aa0cf13a1a1128b3e964dc667e5fe5c6235f7d7cfb0277213f0e2a783837cc2"}, - {file = "pycryptodomex-3.21.0-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:46eb1f0c8d309da63a2064c28de54e5e614ad17b7e2f88df0faef58ce192fc7b"}, - {file = "pycryptodomex-3.21.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:cc7e111e66c274b0df5f4efa679eb31e23c7545d702333dfd2df10ab02c2a2ce"}, - {file = "pycryptodomex-3.21.0-cp36-abi3-musllinux_1_2_i686.whl", hash = "sha256:770d630a5c46605ec83393feaa73a9635a60e55b112e1fb0c3cea84c2897aa0a"}, - {file = "pycryptodomex-3.21.0-cp36-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:52e23a0a6e61691134aa8c8beba89de420602541afaae70f66e16060fdcd677e"}, - {file = "pycryptodomex-3.21.0-cp36-abi3-win32.whl", hash = "sha256:a3d77919e6ff56d89aada1bd009b727b874d464cb0e2e3f00a49f7d2e709d76e"}, - {file = "pycryptodomex-3.21.0-cp36-abi3-win_amd64.whl", hash = "sha256:b0e9765f93fe4890f39875e6c90c96cb341767833cfa767f41b490b506fa9ec0"}, - {file = "pycryptodomex-3.21.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:feaecdce4e5c0045e7a287de0c4351284391fe170729aa9182f6bd967631b3a8"}, - {file = "pycryptodomex-3.21.0-pp27-pypy_73-win32.whl", hash = "sha256:365aa5a66d52fd1f9e0530ea97f392c48c409c2f01ff8b9a39c73ed6f527d36c"}, - {file = "pycryptodomex-3.21.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3efddfc50ac0ca143364042324046800c126a1d63816d532f2e19e6f2d8c0c31"}, - {file = "pycryptodomex-3.21.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0df2608682db8279a9ebbaf05a72f62a321433522ed0e499bc486a6889b96bf3"}, - {file = "pycryptodomex-3.21.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5823d03e904ea3e53aebd6799d6b8ec63b7675b5d2f4a4bd5e3adcb512d03b37"}, - {file = "pycryptodomex-3.21.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:27e84eeff24250ffec32722334749ac2a57a5fd60332cd6a0680090e7c42877e"}, - {file = "pycryptodomex-3.21.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8ef436cdeea794015263853311f84c1ff0341b98fc7908e8a70595a68cefd971"}, - {file = "pycryptodomex-3.21.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a1058e6dfe827f4209c5cae466e67610bcd0d66f2f037465daa2a29d92d952b"}, - {file = "pycryptodomex-3.21.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9ba09a5b407cbb3bcb325221e346a140605714b5e880741dc9a1e9ecf1688d42"}, - {file = "pycryptodomex-3.21.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8a9d8342cf22b74a746e3c6c9453cb0cfbb55943410e3a2619bd9164b48dc9d9"}, - {file = "pycryptodomex-3.21.0.tar.gz", hash = "sha256:222d0bd05381dd25c32dd6065c071ebf084212ab79bab4599ba9e6a3e0009e6c"}, + {file = "pycryptodomex-3.23.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:add243d204e125f189819db65eed55e6b4713f70a7e9576c043178656529cec7"}, + {file = "pycryptodomex-3.23.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1c6d919fc8429e5cb228ba8c0d4d03d202a560b421c14867a65f6042990adc8e"}, + {file = "pycryptodomex-3.23.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:1c3a65ad441746b250d781910d26b7ed0a396733c6f2dbc3327bd7051ec8a541"}, + {file = "pycryptodomex-3.23.0-cp27-cp27m-win32.whl", hash = "sha256:47f6d318fe864d02d5e59a20a18834819596c4ed1d3c917801b22b92b3ffa648"}, + {file = "pycryptodomex-3.23.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:d9825410197a97685d6a1fa2a86196430b01877d64458a20e95d4fd00d739a08"}, + {file = "pycryptodomex-3.23.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:267a3038f87a8565bd834317dbf053a02055915acf353bf42ededb9edaf72010"}, + {file = "pycryptodomex-3.23.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:7b37e08e3871efe2187bc1fd9320cc81d87caf19816c648f24443483005ff886"}, + {file = "pycryptodomex-3.23.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:91979028227543010d7b2ba2471cf1d1e398b3f183cb105ac584df0c36dac28d"}, + {file = "pycryptodomex-3.23.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b8962204c47464d5c1c4038abeadd4514a133b28748bcd9fa5b6d62e3cec6fa"}, + {file = "pycryptodomex-3.23.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a33986a0066860f7fcf7c7bd2bc804fa90e434183645595ae7b33d01f3c91ed8"}, + {file = "pycryptodomex-3.23.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7947ab8d589e3178da3d7cdeabe14f841b391e17046954f2fbcd941705762b5"}, + {file = "pycryptodomex-3.23.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c25e30a20e1b426e1f0fa00131c516f16e474204eee1139d1603e132acffc314"}, + {file = "pycryptodomex-3.23.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:da4fa650cef02db88c2b98acc5434461e027dce0ae8c22dd5a69013eaf510006"}, + {file = "pycryptodomex-3.23.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:58b851b9effd0d072d4ca2e4542bf2a4abcf13c82a29fd2c93ce27ee2a2e9462"}, + {file = "pycryptodomex-3.23.0-cp313-cp313t-win32.whl", hash = "sha256:a9d446e844f08299236780f2efa9898c818fe7e02f17263866b8550c7d5fb328"}, + {file = "pycryptodomex-3.23.0-cp313-cp313t-win_amd64.whl", hash = "sha256:bc65bdd9fc8de7a35a74cab1c898cab391a4add33a8fe740bda00f5976ca4708"}, + {file = "pycryptodomex-3.23.0-cp313-cp313t-win_arm64.whl", hash = "sha256:c885da45e70139464f082018ac527fdaad26f1657a99ee13eecdce0f0ca24ab4"}, + {file = "pycryptodomex-3.23.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:06698f957fe1ab229a99ba2defeeae1c09af185baa909a31a5d1f9d42b1aaed6"}, + {file = "pycryptodomex-3.23.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b2c2537863eccef2d41061e82a881dcabb04944c5c06c5aa7110b577cc487545"}, + {file = "pycryptodomex-3.23.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43c446e2ba8df8889e0e16f02211c25b4934898384c1ec1ec04d7889c0333587"}, + {file = "pycryptodomex-3.23.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f489c4765093fb60e2edafdf223397bc716491b2b69fe74367b70d6999257a5c"}, + {file = "pycryptodomex-3.23.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bdc69d0d3d989a1029df0eed67cc5e8e5d968f3724f4519bd03e0ec68df7543c"}, + {file = "pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6bbcb1dd0f646484939e142462d9e532482bc74475cecf9c4903d4e1cd21f003"}, + {file = "pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:8a4fcd42ccb04c31268d1efeecfccfd1249612b4de6374205376b8f280321744"}, + {file = "pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:55ccbe27f049743a4caf4f4221b166560d3438d0b1e5ab929e07ae1702a4d6fd"}, + {file = "pycryptodomex-3.23.0-cp37-abi3-win32.whl", hash = "sha256:189afbc87f0b9f158386bf051f720e20fa6145975f1e76369303d0f31d1a8d7c"}, + {file = "pycryptodomex-3.23.0-cp37-abi3-win_amd64.whl", hash = "sha256:52e5ca58c3a0b0bd5e100a9fbc8015059b05cffc6c66ce9d98b4b45e023443b9"}, + {file = "pycryptodomex-3.23.0-cp37-abi3-win_arm64.whl", hash = "sha256:02d87b80778c171445d67e23d1caef279bf4b25c3597050ccd2e13970b57fd51"}, + {file = "pycryptodomex-3.23.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:febec69c0291efd056c65691b6d9a339f8b4bc43c6635b8699471248fe897fea"}, + {file = "pycryptodomex-3.23.0-pp27-pypy_73-win32.whl", hash = "sha256:c84b239a1f4ec62e9c789aafe0543f0594f0acd90c8d9e15bcece3efe55eca66"}, + {file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ebfff755c360d674306e5891c564a274a47953562b42fb74a5c25b8fc1fb1cb5"}, + {file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eca54f4bb349d45afc17e3011ed4264ef1cc9e266699874cdd1349c504e64798"}, + {file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2596e643d4365e14d0879dc5aafe6355616c61c2176009270f3048f6d9a61f"}, + {file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fdfac7cda115bca3a5abb2f9e43bc2fb66c2b65ab074913643803ca7083a79ea"}, + {file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:14c37aaece158d0ace436f76a7bb19093db3b4deade9797abfc39ec6cd6cc2fe"}, + {file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7de1e40a41a5d7f1ac42b6569b10bcdded34339950945948529067d8426d2785"}, + {file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bffc92138d75664b6d543984db7893a628559b9e78658563b0395e2a5fb47ed9"}, + {file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df027262368334552db2c0ce39706b3fb32022d1dce34673d0f9422df004b96a"}, + {file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e79f1aaff5a3a374e92eb462fa9e598585452135012e2945f96874ca6eeb1ff"}, + {file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:27e13c80ac9a0a1d050ef0a7e0a18cc04c8850101ec891815b6c5a0375e8a245"}, + {file = "pycryptodomex-3.23.0.tar.gz", hash = "sha256:71909758f010c82bc99b0abf4ea12012c98962fbf0583c2164f8b84533c2e4da"}, ] [[package]] name = "pydata-sphinx-theme" -version = "0.16.0" +version = "0.16.1" description = "Bootstrap-based Sphinx theme from the PyData community" optional = true python-versions = ">=3.9" files = [ - {file = "pydata_sphinx_theme-0.16.0-py3-none-any.whl", hash = "sha256:18c810ee4e67e05281e371e156c1fb5bb0fa1f2747240461b225272f7d8d57d8"}, - {file = "pydata_sphinx_theme-0.16.0.tar.gz", hash = "sha256:721dd26e05fa8b992d66ef545536e6cbe0110afb9865820a08894af1ad6f7707"}, + {file = "pydata_sphinx_theme-0.16.1-py3-none-any.whl", hash = "sha256:225331e8ac4b32682c18fcac5a57a6f717c4e632cea5dd0e247b55155faeccde"}, + {file = "pydata_sphinx_theme-0.16.1.tar.gz", hash = "sha256:a08b7f0b7f70387219dc659bff0893a7554d5eb39b59d3b8ef37b8401b7642d7"}, ] [package.dependencies] @@ -2131,13 +2723,13 @@ test = ["pytest", "pytest-cov", "pytest-regressions", "sphinx[test]"] [[package]] name = "pygments" -version = "2.18.0" +version = "2.19.2" description = "Pygments is a syntax highlighting package written in Python." -optional = true +optional = false python-versions = ">=3.8" files = [ - {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, - {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, + {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, + {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, ] [package.extras] @@ -2145,12 +2737,12 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pygobject" -version = "3.50.0" +version = "3.54.5" description = "Python bindings for GObject Introspection" optional = true -python-versions = "<4.0,>=3.9" +python-versions = ">=3.9" files = [ - {file = "pygobject-3.50.0.tar.gz", hash = "sha256:4500ad3dbf331773d8dedf7212544c999a76fc96b63a91b3dcac1e5925a1d103"}, + {file = "pygobject-3.54.5.tar.gz", hash = "sha256:b6656f6348f5245606cf15ea48c384c7f05156c75ead206c1b246c80a22fb585"}, ] [package.dependencies] @@ -2158,146 +2750,149 @@ pycairo = ">=1.16" [[package]] name = "pylast" -version = "5.3.0" +version = "6.0.0" description = "A Python interface to Last.fm and Libre.fm" optional = false -python-versions = ">=3.8" +python-versions = ">=3.10" files = [ - {file = "pylast-5.3.0-py3-none-any.whl", hash = "sha256:4cc47cdcb05baf24a5cea10a012c17df0fe13e22911296a69835b127458a7308"}, - {file = "pylast-5.3.0.tar.gz", hash = "sha256:637943b1b0e6045dd85ed7389db6071a1fea45cc7ff90dc6126fd509ca6fae2f"}, + {file = "pylast-6.0.0-py3-none-any.whl", hash = "sha256:8570017a955a04c5694e7ad38b13081b6119531b4a10bfc771ccd0b9d4f900ee"}, + {file = "pylast-6.0.0.tar.gz", hash = "sha256:09748dcdb97ddc812c65460bea73f7cce578b2b8ed4d9f6a0d1da122f8b05c5c"}, ] [package.dependencies] -httpx = "*" +httpx = ">=0.26" [package.extras] tests = ["flaky", "pytest", "pytest-cov", "pytest-random-order", "pyyaml"] [[package]] name = "pyppmd" -version = "1.1.0" +version = "1.2.0" description = "PPMd compression/decompression library" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "pyppmd-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c5cd428715413fe55abf79dc9fc54924ba7e518053e1fc0cbdf80d0d99cf1442"}, - {file = "pyppmd-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e96cc43f44b7658be2ea764e7fa99c94cb89164dbb7cdf209178effc2168319"}, - {file = "pyppmd-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dd20142869094bceef5ab0b160f4fff790ad1f612313a1e3393a51fc3ba5d57e"}, - {file = "pyppmd-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4f9b51e45c11e805e74ea6f6355e98a6423b5bbd92f45aceee24761bdc3d3b8"}, - {file = "pyppmd-1.1.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:459f85e928fb968d0e34fb6191fd8c4e710012d7d884fa2b317b2e11faac7c59"}, - {file = "pyppmd-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f73cf2aaf60477eef17f5497d14b6099d8be9748390ad2b83d1c88214d050c05"}, - {file = "pyppmd-1.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2ea3ae0e92c0b5345cd3a4e145e01bbd79c2d95355481ea5d833b5c0cb202a2d"}, - {file = "pyppmd-1.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:775172c740133c0162a01c1a5443d0e312246881cdd6834421b644d89a634b91"}, - {file = "pyppmd-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:14421030f1d46f69829698bdd960698a3b3df0925e3c470e82cfcdd4446b7bc1"}, - {file = "pyppmd-1.1.0-cp310-cp310-win32.whl", hash = "sha256:b691264f9962532aca3bba5be848b6370e596d0a2ca722c86df388be08d0568a"}, - {file = "pyppmd-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:216b0d969a3f06e35fbfef979706d987d105fcb1e37b0b1324f01ee143719c4a"}, - {file = "pyppmd-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1f8c51044ee4df1b004b10bf6b3c92f95ea86cfe1111210d303dca44a56e4282"}, - {file = "pyppmd-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ac25b3a13d1ac9b8f0bde46952e10848adc79d932f2b548a6491ef8825ae0045"}, - {file = "pyppmd-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c8d3003eebe6aabe22ba744a38a146ed58a25633420d5da882b049342b7c8036"}, - {file = "pyppmd-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c520656bc12100aa6388df27dd7ac738577f38bf43f4a4bea78e1861e579ea5"}, - {file = "pyppmd-1.1.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c2a3e807028159a705951f5cb5d005f94caed11d0984e59cc50506de543e22d"}, - {file = "pyppmd-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec8a2447e69444703e2b273247bfcd4b540ec601780eff07da16344c62d2993d"}, - {file = "pyppmd-1.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b9e0c8053e69cad6a92a0889b3324f567afc75475b4f54727de553ac4fc85780"}, - {file = "pyppmd-1.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5938d256e8d2a2853dc3af8bb58ae6b4a775c46fc891dbe1826a0b3ceb624031"}, - {file = "pyppmd-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1ce5822d8bea920856232ccfb3c26b56b28b6846ea1b0eb3d5cb9592a026649e"}, - {file = "pyppmd-1.1.0-cp311-cp311-win32.whl", hash = "sha256:2a9e894750f2a52b03e3bc0d7cf004d96c3475a59b1af7e797d808d7d29c9ffe"}, - {file = "pyppmd-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:969555c72e72fe2b4dd944127521a8f2211caddb5df452bbc2506b5adfac539e"}, - {file = "pyppmd-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9d6ef8fd818884e914bc209f7961c9400a4da50d178bba25efcef89f09ec9169"}, - {file = "pyppmd-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95f28e2ecf3a9656bd7e766aaa1162b6872b575627f18715f8b046e8617c124a"}, - {file = "pyppmd-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:37f3557ea65ee417abcdf5f49d35df00bb9f6f252639cae57aeefcd0dd596133"}, - {file = "pyppmd-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e84b25d088d7727d50218f57f92127cdb839acd6ec3de670b6680a4cf0b2d2a"}, - {file = "pyppmd-1.1.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99ed42891986dac8c2ecf52bddfb777900233d867aa18849dbba6f3335600466"}, - {file = "pyppmd-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6fe69b82634488ada75ba07efb90cd5866fa3d64a2c12932b6e8ae207a14e5f"}, - {file = "pyppmd-1.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:60981ffde1fe6ade750b690b35318c41a1160a8505597fda2c39a74409671217"}, - {file = "pyppmd-1.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:46e8240315476f57aac23d71e6de003e122b65feba7c68f4cc46a089a82a7cd4"}, - {file = "pyppmd-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c0308e2e76ecb4c878a18c2d7a7c61dbca89b4ef138f65d5f5ead139154dcdea"}, - {file = "pyppmd-1.1.0-cp312-cp312-win32.whl", hash = "sha256:b4fa4c27dc1314d019d921f2aa19e17f99250557e7569eeb70e180558f46af74"}, - {file = "pyppmd-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:c269d21e15f4175df27cf00296476097af76941f948734c642d7fb6e85b9b3b9"}, - {file = "pyppmd-1.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a04ef5fd59818b035855723af85ce008c8191d31216706ffcbeedc505efca269"}, - {file = "pyppmd-1.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1e3ebcf5f95142268afa5cc46457d9dab2d29a3ccfd020a1129dd9d6bd021be1"}, - {file = "pyppmd-1.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4ad046a9525d1f52e93bc642a4cec0bf344a3ba1a15923e424e7a50f8ca003d8"}, - {file = "pyppmd-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:169e5023c86ed1f7587961900f58aa78ad8a3d59de1e488a2228b5ba3de52402"}, - {file = "pyppmd-1.1.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:baf798e76edd9da975cc536f943756a1b1755eb8ed87371f86f76d7c16e8d034"}, - {file = "pyppmd-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d63be8c068879194c1e7548d0c57f54a4d305ba204cd0c7499b678f0aee893ef"}, - {file = "pyppmd-1.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d5fc178a3c21af78858acbac9782fca6a927267694c452e0882c55fec6e78319"}, - {file = "pyppmd-1.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:28a1ab1ef0a31adce9b4c837b7b9acb01ce8f1f702ff3ff884f03d21c2f6b9bb"}, - {file = "pyppmd-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5fef43bfe98ada0a608adf03b2d205e071259027ab50523954c42eef7adcef67"}, - {file = "pyppmd-1.1.0-cp38-cp38-win32.whl", hash = "sha256:6b980902797eab821299a1c9f42fa78eff2826a6b0b0f6bde8a621f9765ffd55"}, - {file = "pyppmd-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:80cde69013f357483abe0c3ff30c55dc5e6b4f72b068f91792ce282c51dc0bff"}, - {file = "pyppmd-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2aeea1bf585c6b8771fa43a6abd704da92f8a46a6d0020953af15d7f3c82e48c"}, - {file = "pyppmd-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7759bdb137694d4ab0cfa5ff2c75c212d90714c7da93544694f68001a0c38e12"}, - {file = "pyppmd-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:db64a4fe956a2e700a737a1d019f526e6ccece217c163b28b354a43464cc495b"}, - {file = "pyppmd-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f788ae8f5a9e79cd777b7969d3401b2a2b87f47abe306c2a03baca30595e9bd"}, - {file = "pyppmd-1.1.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:324a178935c140210fca2043c688b77e79281da8172d2379a06e094f41735851"}, - {file = "pyppmd-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:363030bbcb7902fb9eeb59ffc262581ca5dd7790ba950328242fd2491c54d99b"}, - {file = "pyppmd-1.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:31b882584f86440b0ff7906385c9f9d9853e5799197abaafdae2245f87d03f01"}, - {file = "pyppmd-1.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b991b4501492ec3380b605fe30bee0b61480d305e98519d81c2a658b2de01593"}, - {file = "pyppmd-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b6108044d943b826f97a9e79201242f61392d6c1fadba463b2069c4e6bc961e1"}, - {file = "pyppmd-1.1.0-cp39-cp39-win32.whl", hash = "sha256:c45ce2968b7762d2cacf622b0a8f260295c6444e0883fd21a21017e3eaef16ed"}, - {file = "pyppmd-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:f5289f32ab4ec5f96a95da51309abd1769f928b0bff62047b3bc25c878c16ccb"}, - {file = "pyppmd-1.1.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ad5da9f7592158e6b6b51d7cd15e536d8b23afbb4d22cba4e5744c7e0a3548b1"}, - {file = "pyppmd-1.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc6543e7d12ef0a1466d291d655e3d6bca59c7336dbb53b62ccdd407822fb52b"}, - {file = "pyppmd-1.1.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5e4008a45910e3c8c227f6f240de67eb14454c015dc3d8060fc41e230f395d3"}, - {file = "pyppmd-1.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9301fa39d1fb0ed09a10b4c5d7f0074113e96a1ead16ba7310bedf95f7ef660c"}, - {file = "pyppmd-1.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:59521a3c6028da0cb5780ba16880047b00163432a6b975da2f6123adfc1b0be8"}, - {file = "pyppmd-1.1.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d7ec02f1778dd68547e497625d66d7858ce10ea199146eb1d80ee23ba42954be"}, - {file = "pyppmd-1.1.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f062ca743f9b99fe88d417b4d351af9b4ff1a7cbd3d765c058bb97de976d57f1"}, - {file = "pyppmd-1.1.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:088e326b180a0469ac936849f5e1e5320118c22c9d9e673e9c8551153b839c84"}, - {file = "pyppmd-1.1.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:897fa9ab5ff588a1000b8682835c5acf219329aa2bbfec478100e57d1204eeab"}, - {file = "pyppmd-1.1.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3af4338cc48cd59ee213af61d936419774a0f8600b9aa2013cd1917b108424f0"}, - {file = "pyppmd-1.1.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:cce8cd2d4ceebe2dbf41db6dfebe4c2e621314b3af8a2df2cba5eb5fa277f122"}, - {file = "pyppmd-1.1.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62e57927dbcb91fb6290a41cd83743b91b9d85858efb16a0dd34fac208ee1c6b"}, - {file = "pyppmd-1.1.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:435317949a6f35e54cdf08e0af6916ace427351e7664ac1593980114668f0aaa"}, - {file = "pyppmd-1.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f66b0d0e32b8fb8707f1d2552f13edfc2917e8ed0bdf4d62e2ce190d2c70834"}, - {file = "pyppmd-1.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:650a663a591e06fb8096c213f4070b158981c8c3bf9c166ce7e4c360873f2750"}, - {file = "pyppmd-1.1.0.tar.gz", hash = "sha256:1d38ce2e4b7eb84b53bc8a52380b94f66ba6c39328b8800b30c2b5bf31693973"}, + {file = "pyppmd-1.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4a25d8b2a71e0cc6f34475c36450e905586b13d0c88fb28471655c215f370908"}, + {file = "pyppmd-1.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc9dd8a6261152591a352d91e5e52c16b81fa760f64c361a7afb24a1f3b5e048"}, + {file = "pyppmd-1.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2cd2694f43720fa1304c1fa31b8a1e7d80162f045e91569fb93473277c2747b8"}, + {file = "pyppmd-1.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0354919ab0f4065d76c64ad0dc21f14116651a2124cf4915b96c4e76d9caf470"}, + {file = "pyppmd-1.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:416c15576924ff9d2852fbe53d162c946e0466ce79d8a03a058e6f09a54934f0"}, + {file = "pyppmd-1.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:dcdd5bf53f562af2a9be76739be69c9de080dfa59a4c4a8bcc4a163f9c5cb53e"}, + {file = "pyppmd-1.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c67196af6cfcc68e72a8fffbc332d743327bb9323cb7f3709e27185e743c7272"}, + {file = "pyppmd-1.2.0-cp310-cp310-win32.whl", hash = "sha256:d529c78382a2315db22c93e1c831231ee3fd2ad5a352f61496d72474558c6b00"}, + {file = "pyppmd-1.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:1f19285ae4dd20bb917c4fd177f0911847feb3abada91cec5fd5d9d5f1b9f3e0"}, + {file = "pyppmd-1.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:30068ed6da301f6ba25219f96d828f3c3a80ca227647571d21c7704301e095e6"}, + {file = "pyppmd-1.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1a5f0b78d68620ffb51c46c34c9e0ec02c40bb68e903e6c3ce02870c528164af"}, + {file = "pyppmd-1.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5f1ee49b88fd2e58a144b1ae0da9c2fe0dabc1962531da9475badbed6fba61fc"}, + {file = "pyppmd-1.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c98697fea3f3baf5ffc759fd41c766d708ff3fba7379776031077527873ce4ac"}, + {file = "pyppmd-1.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a3087d7ee6fc35db0bfecabd1df4615f2a9d58a56af61f5fc18b9ce2b379cbf"}, + {file = "pyppmd-1.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69fe10feb24a92e673b68aca5d945564232d09e25a4e185899e0c657096ae695"}, + {file = "pyppmd-1.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:aa40c982d1df515cd4cb366d3e1ae95ce22f3c20e6b8b2d31aa492679f7ad78c"}, + {file = "pyppmd-1.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a5c03dd85da64a237c601dd690d8eb95951b7c2eef91b89e110eb208010c6035"}, + {file = "pyppmd-1.2.0-cp311-cp311-win32.whl", hash = "sha256:c577f3dadd514979255e9df6eb89a63409d0e91855bb8c0969ffcd67d5d4f124"}, + {file = "pyppmd-1.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:f29dfb7aaf4b49ebc09d726fcdeabbce1cb21e9cf3a055244bb1384b8b51dd3b"}, + {file = "pyppmd-1.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:bf26c2def22322135fbaaa3de3c0963062c1835bd43d595478e3a2a674466a1a"}, + {file = "pyppmd-1.2.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d28cc9febcf37f2ff08b9e25d472de529e8973119c0a3279603b1915c809dd45"}, + {file = "pyppmd-1.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0f07d5376e1f699d09fbb9139562e5b72a347100aecaa73b688fa08461b3c118"}, + {file = "pyppmd-1.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:874f52eae03647b653aa34476f4e23c4c30458245c0eb7aa7fb290940abbd5b9"}, + {file = "pyppmd-1.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abafffb3d5b292924eafd8214ad80487400cf358c4e9dc2ac6c21d2c651c5ee2"}, + {file = "pyppmd-1.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e955de43991346d4ccb28a74fb4c80cadecf72a6724705301fe1adb569689fe"}, + {file = "pyppmd-1.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14ed0846c3bcee506555cd943db950d5787a6ffa1089e05deced010759ef1fe5"}, + {file = "pyppmd-1.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3caef2fb93a63d696b21e5ff72cb2955687b5dfcbed1938936334f9f7737fcd3"}, + {file = "pyppmd-1.2.0-cp312-cp312-win32.whl", hash = "sha256:011c813389476e84387599ad4aa834100e888c6608a6e7d6f07ea7cbac8a8e65"}, + {file = "pyppmd-1.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:42c7c9050b44b713676d255f0c212b8ff5c0463821053960c89292cf6b6221cc"}, + {file = "pyppmd-1.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:5768bff11936047613bcb91ee466f21779efc24360bd7953bd338b32da88577a"}, + {file = "pyppmd-1.2.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:4aa8ffca1727872923d2673045975bca571eb810cf14a21d048648da5237369b"}, + {file = "pyppmd-1.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6dc00f0ce9f79e1c1c87d9998220a714ab8216873b6c996776b88ab23efe05ac"}, + {file = "pyppmd-1.2.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d437881763ffd0d19079402f50e7f4aad5895e3cd5312d095edef0b32dac3aef"}, + {file = "pyppmd-1.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c763f2e3a011d5e96dfa0195f38accce9a14d489725a3d3641a74addbb5b72"}, + {file = "pyppmd-1.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38e3835a1951d18dd273000c870a4eb2804c20c400aa3c72231449f300cedf19"}, + {file = "pyppmd-1.2.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c76b8881fc087e70338b1cccd452bd12566206587a0d0d8266ba2833be902194"}, + {file = "pyppmd-1.2.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:8b43e299310e27af5a4bc505bcc87d10cfc38ae28e5ed1f6a779d811705e5ad6"}, + {file = "pyppmd-1.2.0-cp313-cp313-win32.whl", hash = "sha256:4b3249256f8a7ecdd36477f277b232a46ee2c8ca280b23faaeacb7f50cab949a"}, + {file = "pyppmd-1.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:625896f5da7038fe7145907b68b0b58f7c13b88ad6bbfdc1c20c05654c17fa6c"}, + {file = "pyppmd-1.2.0-cp313-cp313-win_arm64.whl", hash = "sha256:bec8abbf1edb0300c0a2e4f1bbad6a96154de3e507a2b054a0b1187f1c2e4982"}, + {file = "pyppmd-1.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9b5c3284be4dccebb87d81c14b148c81e035356cd01a29889736c75672f6187d"}, + {file = "pyppmd-1.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:40bfa26fdb3332a6a8d90fe1f6e0d9f489505a014911b470d66f2f79caea6d61"}, + {file = "pyppmd-1.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:75b173bbc9164cdc6fb257d3480269cc26b1eb102ad72281a98cf90e0f7dc860"}, + {file = "pyppmd-1.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91534eb8c9c0bff9d6c6ec5eb5119a583d31bb9f8cf208d5a925b4e2293c9a7b"}, + {file = "pyppmd-1.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:edc4fcd928bf6219bcddb8230a5830e33a35b684b16ca3e8d1357b17029a9ef7"}, + {file = "pyppmd-1.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5ff515c2c3544096fe524f341c244787d6449b36692d27131bf74d5075e5c83b"}, + {file = "pyppmd-1.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:af9be87228cba6b543531260f44675a23b4a1527158a44162dce186157cb13d9"}, + {file = "pyppmd-1.2.0-cp39-cp39-win32.whl", hash = "sha256:3674b5eba0e312b9af987ec7e6af59248f54db9a7f5ca63add5365d6c6639e9e"}, + {file = "pyppmd-1.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:cff27496fd164b587f150abba9524cae81629adbd2e9472f09e7b2b24b2d4939"}, + {file = "pyppmd-1.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:c9d0f5a903045ee6b399f5fb308e192e39f8f1f551b61441a595676d95dc76ad"}, + {file = "pyppmd-1.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86e252979fc5ae2492ebb46ed0eed0625a46a2cce519c4616b870eab58d77fb7"}, + {file = "pyppmd-1.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9095d8b098ce8cb5c1e404843a16e5167fb5bdebb4d6aed259d43dd2d73cfca3"}, + {file = "pyppmd-1.2.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:064307c7fec7bdf3da63f5e28c0f1c5cb5c9bf888c1b268c6df3c131391ab345"}, + {file = "pyppmd-1.2.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c012c17a53b6d9744e0514b17b0c4169c5f21fb54b4db7a0119bc2d7b3fcc609"}, + {file = "pyppmd-1.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0877758ffa73b2e9d2f93b698e17336a4d8acab8d9a3d17cd7960aec08347387"}, + {file = "pyppmd-1.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ac0960d2d0a1738af3ca3f27c6ed6eead38518d77875a47b2b4aae90ae933f4"}, + {file = "pyppmd-1.2.0.tar.gz", hash = "sha256:cc04af92f1d26831ec96963439dfb27c96467b5452b94436a6af696649a121fd"}, ] [package.extras] -check = ["check-manifest", "flake8 (<5)", "flake8-black", "flake8-isort", "isort (>=5.0.3)", "mypy (>=0.812)", "mypy-extensions (>=0.4.3)", "pygments", "readme-renderer"] -docs = ["sphinx (>=2.3)", "sphinx-rtd-theme"] +check = ["check-manifest", "flake8", "flake8-black", "flake8-isort", "mypy (>=1.10.0)", "pygments", "readme-renderer"] +docs = ["sphinx", "sphinx_rtd_theme"] fuzzer = ["atheris", "hypothesis"] test = ["coverage[toml] (>=5.2)", "hypothesis", "pytest (>=6.0)", "pytest-benchmark", "pytest-cov", "pytest-timeout"] +[[package]] +name = "pyrate-limiter" +version = "2.10.0" +description = "Python Rate-Limiter using Leaky-Bucket Algorithm" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "pyrate_limiter-2.10.0-py3-none-any.whl", hash = "sha256:a99e52159f5ed5eb58118bed8c645e30818e7c0e0d127a0585c8277c776b0f7f"}, + {file = "pyrate_limiter-2.10.0.tar.gz", hash = "sha256:98cc52cdbe058458e945ae87d4fd5a73186497ffa545ee6e98372f8599a5bd34"}, +] + +[package.extras] +all = ["filelock (>=3.0)", "redis (>=3.3,<4.0)", "redis-py-cluster (>=2.1.3,<3.0.0)"] +docs = ["furo (>=2022.3.4,<2023.0.0)", "myst-parser (>=0.17)", "sphinx (>=4.3.0,<5.0.0)", "sphinx-autodoc-typehints (>=1.17,<2.0)", "sphinx-copybutton (>=0.5)", "sphinxcontrib-apidoc (>=0.3,<0.4)"] + [[package]] name = "pytest" -version = "8.3.4" +version = "8.4.2" description = "pytest: simple powerful testing with Python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, - {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, + {file = "pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79"}, + {file = "pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01"}, ] [package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" +colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1", markers = "python_version < \"3.11\""} +iniconfig = ">=1" +packaging = ">=20" pluggy = ">=1.5,<2" +pygments = ">=2.7.2" tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] -dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-cov" -version = "6.0.0" +version = "7.0.0" description = "Pytest plugin for measuring coverage." optional = false python-versions = ">=3.9" files = [ - {file = "pytest-cov-6.0.0.tar.gz", hash = "sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0"}, - {file = "pytest_cov-6.0.0-py3-none-any.whl", hash = "sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35"}, + {file = "pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861"}, + {file = "pytest_cov-7.0.0.tar.gz", hash = "sha256:33c97eda2e049a0c5298e91f519302a1334c26ac65c1a483d6206fd458361af1"}, ] [package.dependencies] -coverage = {version = ">=7.5", extras = ["toml"]} -pytest = ">=4.6" +coverage = {version = ">=7.10.6", extras = ["toml"]} +pluggy = ">=1.2" +pytest = ">=7" [package.extras] -testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] +testing = ["process-tests", "pytest-xdist", "virtualenv"] [[package]] name = "pytest-flask" @@ -2348,13 +2943,13 @@ twisted = ["Twisted"] [[package]] name = "python3-discogs-client" -version = "2.7.1" +version = "2.8" description = "Python API client for Discogs" optional = false python-versions = "*" files = [ - {file = "python3_discogs_client-2.7.1-py3-none-any.whl", hash = "sha256:5fb5f3d2f288a8ce2c8c152444258bacedb35b7d61bc466bddae332b6c737444"}, - {file = "python3_discogs_client-2.7.1.tar.gz", hash = "sha256:f2453582f5d044ea5847d27cfe56473179e51c9a836913b46db803c20ae598f9"}, + {file = "python3_discogs_client-2.8-py3-none-any.whl", hash = "sha256:60d63a613da73afeb818015e680fa5f007ffaa94d97578070e7ee4f11dc1b1b3"}, + {file = "python3_discogs_client-2.8.tar.gz", hash = "sha256:0f2c77f4ff491a6ef60fe892032028df899808e65efcd48249b4ecf21146b33b"}, ] [package.dependencies] @@ -2365,6 +2960,20 @@ requests = "*" [package.extras] docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +[[package]] +name = "pytokens" +version = "0.2.0" +description = "A Fast, spec compliant Python 3.13+ tokenizer that runs on older Pythons." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytokens-0.2.0-py3-none-any.whl", hash = "sha256:74d4b318c67f4295c13782ddd9abcb7e297ec5630ad060eb90abf7ebbefe59f8"}, + {file = "pytokens-0.2.0.tar.gz", hash = "sha256:532d6421364e5869ea57a9523bf385f02586d4662acbcc0342afd69511b4dd43"}, +] + +[package.extras] +dev = ["black", "build", "mypy", "pytest", "pytest-cov", "setuptools", "tox", "twine", "wheel"] + [[package]] name = "pyxdg" version = "0.28" @@ -2378,158 +2987,218 @@ files = [ [[package]] name = "pyyaml" -version = "6.0.2" +version = "6.0.3" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" files = [ - {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, - {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, - {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, - {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, - {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, - {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, - {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, - {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, - {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, - {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, - {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, - {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, - {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, - {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, - {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, - {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, - {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, - {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, - {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, - {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, - {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, - {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, - {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, - {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, - {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, - {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, - {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, - {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, - {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, - {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, - {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, - {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, - {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, - {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, - {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, + {file = "PyYAML-6.0.3-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:c2514fceb77bc5e7a2f7adfaa1feb2fb311607c9cb518dbc378688ec73d8292f"}, + {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c57bb8c96f6d1808c030b1687b9b5fb476abaa47f0db9c0101f5e9f394e97f4"}, + {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efd7b85f94a6f21e4932043973a7ba2613b059c4a000551892ac9f1d11f5baf3"}, + {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22ba7cfcad58ef3ecddc7ed1db3409af68d023b7f940da23c6c2a1890976eda6"}, + {file = "PyYAML-6.0.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6344df0d5755a2c9a276d4473ae6b90647e216ab4757f8426893b5dd2ac3f369"}, + {file = "PyYAML-6.0.3-cp38-cp38-win32.whl", hash = "sha256:3ff07ec89bae51176c0549bc4c63aa6202991da2d9a6129d7aef7f1407d3f295"}, + {file = "PyYAML-6.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:5cf4e27da7e3fbed4d6c3d8e797387aaad68102272f8f9752883bc32d61cb87b"}, + {file = "pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b"}, + {file = "pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956"}, + {file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8"}, + {file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:66291b10affd76d76f54fad28e22e51719ef9ba22b29e1d7d03d6777a9174198"}, + {file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9c7708761fccb9397fe64bbc0395abcae8c4bf7b0eac081e12b809bf47700d0b"}, + {file = "pyyaml-6.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:418cf3f2111bc80e0933b2cd8cd04f286338bb88bdc7bc8e6dd775ebde60b5e0"}, + {file = "pyyaml-6.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e0b74767e5f8c593e8c9b5912019159ed0533c70051e9cce3e8b6aa699fcd69"}, + {file = "pyyaml-6.0.3-cp310-cp310-win32.whl", hash = "sha256:28c8d926f98f432f88adc23edf2e6d4921ac26fb084b028c733d01868d19007e"}, + {file = "pyyaml-6.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:bdb2c67c6c1390b63c6ff89f210c8fd09d9a1217a465701eac7316313c915e4c"}, + {file = "pyyaml-6.0.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e"}, + {file = "pyyaml-6.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824"}, + {file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c"}, + {file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00"}, + {file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d"}, + {file = "pyyaml-6.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a"}, + {file = "pyyaml-6.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4"}, + {file = "pyyaml-6.0.3-cp311-cp311-win32.whl", hash = "sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b"}, + {file = "pyyaml-6.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf"}, + {file = "pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196"}, + {file = "pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0"}, + {file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28"}, + {file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c"}, + {file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc"}, + {file = "pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e"}, + {file = "pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea"}, + {file = "pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5"}, + {file = "pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b"}, + {file = "pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd"}, + {file = "pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8"}, + {file = "pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1"}, + {file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c"}, + {file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5"}, + {file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6"}, + {file = "pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6"}, + {file = "pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be"}, + {file = "pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26"}, + {file = "pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c"}, + {file = "pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb"}, + {file = "pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac"}, + {file = "pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310"}, + {file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7"}, + {file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788"}, + {file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5"}, + {file = "pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764"}, + {file = "pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35"}, + {file = "pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac"}, + {file = "pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3"}, + {file = "pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3"}, + {file = "pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba"}, + {file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c"}, + {file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702"}, + {file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c"}, + {file = "pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065"}, + {file = "pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65"}, + {file = "pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9"}, + {file = "pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b"}, + {file = "pyyaml-6.0.3-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:b865addae83924361678b652338317d1bd7e79b1f4596f96b96c77a5a34b34da"}, + {file = "pyyaml-6.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c3355370a2c156cffb25e876646f149d5d68f5e0a3ce86a5084dd0b64a994917"}, + {file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3c5677e12444c15717b902a5798264fa7909e41153cdf9ef7ad571b704a63dd9"}, + {file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5ed875a24292240029e4483f9d4a4b8a1ae08843b9c54f43fcc11e404532a8a5"}, + {file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0150219816b6a1fa26fb4699fb7daa9caf09eb1999f3b70fb6e786805e80375a"}, + {file = "pyyaml-6.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fa160448684b4e94d80416c0fa4aac48967a969efe22931448d853ada8baf926"}, + {file = "pyyaml-6.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:27c0abcb4a5dac13684a37f76e701e054692a9b2d3064b70f5e4eb54810553d7"}, + {file = "pyyaml-6.0.3-cp39-cp39-win32.whl", hash = "sha256:1ebe39cb5fc479422b83de611d14e2c0d3bb2a18bbcb01f229ab3cfbd8fee7a0"}, + {file = "pyyaml-6.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:2e71d11abed7344e42a8849600193d15b6def118602c4c176f748e4583246007"}, + {file = "pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f"}, +] + +[[package]] +name = "pyyaml-ft" +version = "8.0.0" +description = "YAML parser and emitter for Python with support for free-threading" +optional = false +python-versions = ">=3.13" +files = [ + {file = "pyyaml_ft-8.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8c1306282bc958bfda31237f900eb52c9bedf9b93a11f82e1aab004c9a5657a6"}, + {file = "pyyaml_ft-8.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:30c5f1751625786c19de751e3130fc345ebcba6a86f6bddd6e1285342f4bbb69"}, + {file = "pyyaml_ft-8.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fa992481155ddda2e303fcc74c79c05eddcdbc907b888d3d9ce3ff3e2adcfb0"}, + {file = "pyyaml_ft-8.0.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cec6c92b4207004b62dfad1f0be321c9f04725e0f271c16247d8b39c3bf3ea42"}, + {file = "pyyaml_ft-8.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06237267dbcab70d4c0e9436d8f719f04a51123f0ca2694c00dd4b68c338e40b"}, + {file = "pyyaml_ft-8.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:8a7f332bc565817644cdb38ffe4739e44c3e18c55793f75dddb87630f03fc254"}, + {file = "pyyaml_ft-8.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7d10175a746be65f6feb86224df5d6bc5c049ebf52b89a88cf1cd78af5a367a8"}, + {file = "pyyaml_ft-8.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:58e1015098cf8d8aec82f360789c16283b88ca670fe4275ef6c48c5e30b22a96"}, + {file = "pyyaml_ft-8.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:e64fa5f3e2ceb790d50602b2fd4ec37abbd760a8c778e46354df647e7c5a4ebb"}, + {file = "pyyaml_ft-8.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8d445bf6ea16bb93c37b42fdacfb2f94c8e92a79ba9e12768c96ecde867046d1"}, + {file = "pyyaml_ft-8.0.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c56bb46b4fda34cbb92a9446a841da3982cdde6ea13de3fbd80db7eeeab8b49"}, + {file = "pyyaml_ft-8.0.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dab0abb46eb1780da486f022dce034b952c8ae40753627b27a626d803926483b"}, + {file = "pyyaml_ft-8.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd48d639cab5ca50ad957b6dd632c7dd3ac02a1abe0e8196a3c24a52f5db3f7a"}, + {file = "pyyaml_ft-8.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:052561b89d5b2a8e1289f326d060e794c21fa068aa11255fe71d65baf18a632e"}, + {file = "pyyaml_ft-8.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:3bb4b927929b0cb162fb1605392a321e3333e48ce616cdcfa04a839271373255"}, + {file = "pyyaml_ft-8.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:de04cfe9439565e32f178106c51dd6ca61afaa2907d143835d501d84703d3793"}, + {file = "pyyaml_ft-8.0.0.tar.gz", hash = "sha256:0c947dce03954c7b5d38869ed4878b2e6ff1d44b08a0d84dc83fdad205ae39ab"}, ] [[package]] name = "pyzstd" -version = "0.16.2" +version = "0.18.0" description = "Python bindings to Zstandard (zstd) compression library." optional = false python-versions = ">=3.5" files = [ - {file = "pyzstd-0.16.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:637376c8f8cbd0afe1cab613f8c75fd502bd1016bf79d10760a2d5a00905fe62"}, - {file = "pyzstd-0.16.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3e7a7118cbcfa90ca2ddbf9890c7cb582052a9a8cf2b7e2c1bbaf544bee0f16a"}, - {file = "pyzstd-0.16.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a74cb1ba05876179525144511eed3bd5a509b0ab2b10632c1215a85db0834dfd"}, - {file = "pyzstd-0.16.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7c084dde218ffbf112e507e72cbf626b8f58ce9eb23eec129809e31037984662"}, - {file = "pyzstd-0.16.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4646459ebd3d7a59ddbe9312f020bcf7cdd1f059a2ea07051258f7af87a0b31"}, - {file = "pyzstd-0.16.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14bfc2833cc16d7657fc93259edeeaa793286e5031b86ca5dc861ba49b435fce"}, - {file = "pyzstd-0.16.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f27d488f19e5bf27d1e8aa1ae72c6c0a910f1e1ffbdf3c763d02ab781295dd27"}, - {file = "pyzstd-0.16.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:91e134ca968ff7dcfa8b7d433318f01d309b74ee87e0d2bcadc117c08e1c80db"}, - {file = "pyzstd-0.16.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6b5f64cd3963c58b8f886eb6139bb8d164b42a74f8a1bb95d49b4804f4592d61"}, - {file = "pyzstd-0.16.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:0b4a8266871b9e0407f9fd8e8d077c3558cf124d174e6357b523d14f76971009"}, - {file = "pyzstd-0.16.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:1bb19f7acac30727354c25125922aa59f44d82e0e6a751df17d0d93ff6a73853"}, - {file = "pyzstd-0.16.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3008325b7368e794d66d4d98f2ee1d867ef5afd09fd388646ae02b25343c420d"}, - {file = "pyzstd-0.16.2-cp310-cp310-win32.whl", hash = "sha256:66f2d5c0bbf5bf32c577aa006197b3525b80b59804450e2c32fbcc2d16e850fd"}, - {file = "pyzstd-0.16.2-cp310-cp310-win_amd64.whl", hash = "sha256:5fe5f5459ebe1161095baa7a86d04ab625b35148f6c425df0347ed6c90a2fd58"}, - {file = "pyzstd-0.16.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1c1bdbe7f01c7f37d5cd07be70e32a84010d7dfd6677920c0de04cf7d245b60d"}, - {file = "pyzstd-0.16.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1882a3ceaaf9adc12212d587d150ec5e58cfa9a765463d803d739abbd3ac0f7a"}, - {file = "pyzstd-0.16.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea46a8b9d60f6a6eba29facba54c0f0d70328586f7ef0da6f57edf7e43db0303"}, - {file = "pyzstd-0.16.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d7865bc06589cdcecdede0deefe3da07809d5b7ad9044c224d7b2a0867256957"}, - {file = "pyzstd-0.16.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:52f938a65b409c02eb825e8c77fc5ea54508b8fc44b5ce226db03011691ae8cc"}, - {file = "pyzstd-0.16.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e97620d3f53a0282947304189deef7ca7f7d0d6dfe15033469dc1c33e779d5e5"}, - {file = "pyzstd-0.16.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7c40e9983d017108670dc8df68ceef14c7c1cf2d19239213274783041d0e64c"}, - {file = "pyzstd-0.16.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7cd4b3b2c6161066e4bde6af1cf78ed3acf5d731884dd13fdf31f1db10830080"}, - {file = "pyzstd-0.16.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:454f31fd84175bb203c8c424f2255a343fa9bd103461a38d1bf50487c3b89508"}, - {file = "pyzstd-0.16.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:5ef754a93743f08fb0386ce3596780bfba829311b49c8f4107af1a4bcc16935d"}, - {file = "pyzstd-0.16.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:be81081db9166e10846934f0e3576a263cbe18d81eca06e6a5c23533f8ce0dc6"}, - {file = "pyzstd-0.16.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:738bcb2fa1e5f1868986f5030955e64de53157fa1141d01f3a4daf07a1aaf644"}, - {file = "pyzstd-0.16.2-cp311-cp311-win32.whl", hash = "sha256:0ea214c9b97046867d1657d55979021028d583704b30c481a9c165191b08d707"}, - {file = "pyzstd-0.16.2-cp311-cp311-win_amd64.whl", hash = "sha256:c17c0fc02f0e75b0c7cd21f8eaf4c6ce4112333b447d93da1773a5f705b2c178"}, - {file = "pyzstd-0.16.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d4081fd841a9efe9ded7290ee7502dbf042c4158b90edfadea3b8a072c8ec4e1"}, - {file = "pyzstd-0.16.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fd3fa45d2aeb65367dd702806b2e779d13f1a3fa2d13d5ec777cfd09de6822de"}, - {file = "pyzstd-0.16.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8b5f0d2c07994a5180d8259d51df6227a57098774bb0618423d7eb4a7303467"}, - {file = "pyzstd-0.16.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60c9d25b15c7ae06ed5d516d096a0d8254f9bed4368b370a09cccf191eaab5cb"}, - {file = "pyzstd-0.16.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:29acf31ce37254f6cad08deb24b9d9ba954f426fa08f8fae4ab4fdc51a03f4ae"}, - {file = "pyzstd-0.16.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ec77612a17697a9f7cf6634ffcee616eba9b997712fdd896e77fd19ab3a0618"}, - {file = "pyzstd-0.16.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:313ea4974be93be12c9a640ab40f0fc50a023178aae004a8901507b74f190173"}, - {file = "pyzstd-0.16.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e91acdefc8c2c6c3b8d5b1b5fe837dce4e591ecb7c0a2a50186f552e57d11203"}, - {file = "pyzstd-0.16.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:929bd91a403539e72b5b5cb97f725ac4acafe692ccf52f075e20cd9bf6e5493d"}, - {file = "pyzstd-0.16.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:740837a379aa32d110911ebcbbc524f9a9b145355737527543a884bd8777ca4f"}, - {file = "pyzstd-0.16.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:adfc0e80dd157e6d1e0b0112c8ecc4b58a7a23760bd9623d74122ef637cfbdb6"}, - {file = "pyzstd-0.16.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:79b183beae1c080ad3dca39019e49b7785391947f9aab68893ad85d27828c6e7"}, - {file = "pyzstd-0.16.2-cp312-cp312-win32.whl", hash = "sha256:b8d00631a3c466bc313847fab2a01f6b73b3165de0886fb03210e08567ae3a89"}, - {file = "pyzstd-0.16.2-cp312-cp312-win_amd64.whl", hash = "sha256:c0d43764e9a60607f35d8cb3e60df772a678935ab0e02e2804d4147377f4942c"}, - {file = "pyzstd-0.16.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3ae9ae7ad730562810912d7ecaf1fff5eaf4c726f4b4dfe04784ed5f06d7b91f"}, - {file = "pyzstd-0.16.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2ce8d3c213f76a564420f3d0137066ac007ce9fb4e156b989835caef12b367a7"}, - {file = "pyzstd-0.16.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2c14dac23c865e2d78cebd9087e148674b7154f633afd4709b4cd1520b99a61"}, - {file = "pyzstd-0.16.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4527969d66a943e36ef374eda847e918077de032d58b5df84d98ffd717b6fa77"}, - {file = "pyzstd-0.16.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd8256149b88e657e99f31e6d4b114c8ff2935951f1d8bb8e1fe501b224999c0"}, - {file = "pyzstd-0.16.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5bd1f1822d65c9054bf36d35307bf8ed4aa2d2d6827431761a813628ff671b1d"}, - {file = "pyzstd-0.16.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6733f4d373ec9ad2c1976cf06f973a3324c1f9abe236d114d6bb91165a397d"}, - {file = "pyzstd-0.16.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7bec165ab6524663f00b69bfefd13a46a69fed3015754abaf81b103ec73d92c6"}, - {file = "pyzstd-0.16.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e4460fa6949aac6528a1ad0de8871079600b12b3ef4db49316306786a3598321"}, - {file = "pyzstd-0.16.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:75df79ea0315c97d88337953a17daa44023dbf6389f8151903d371513f503e3c"}, - {file = "pyzstd-0.16.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:93e1d45f4a196afb6f18682c79bdd5399277ead105b67f30b35c04c207966071"}, - {file = "pyzstd-0.16.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:075e18b871f38a503b5d23e40a661adfc750bd4bd0bb8b208c1e290f3ceb8fa2"}, - {file = "pyzstd-0.16.2-cp313-cp313-win32.whl", hash = "sha256:9e4295eb299f8d87e3487852bca033d30332033272a801ca8130e934475e07a9"}, - {file = "pyzstd-0.16.2-cp313-cp313-win_amd64.whl", hash = "sha256:18deedc70f858f4cf574e59f305d2a0678e54db2751a33dba9f481f91bc71c28"}, - {file = "pyzstd-0.16.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a9892b707ef52f599098b1e9528df0e7849c5ec01d3e8035fb0e67de4b464839"}, - {file = "pyzstd-0.16.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4fbd647864341f3c174c4a6d7f20e6ea6b4be9d840fb900dc0faf0849561badc"}, - {file = "pyzstd-0.16.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20ac2c15656cc6194c4fed1cb0e8159f9394d4ea1d58be755448743d2ec6c9c4"}, - {file = "pyzstd-0.16.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b239fb9a20c1be3374b9a2bd183ba624fd22ad7a3f67738c0d80cda68b4ae1d3"}, - {file = "pyzstd-0.16.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc52400412cdae2635e0978b8d6bcc0028cc638fdab2fd301f6d157675d26896"}, - {file = "pyzstd-0.16.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b766a6aeb8dbb6c46e622e7a1aebfa9ab03838528273796941005a5ce7257b1"}, - {file = "pyzstd-0.16.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd4b8676052f9d59579242bf3cfe5fd02532b6a9a93ab7737c118ae3b8509dc"}, - {file = "pyzstd-0.16.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1c6c0a677aac7c0e3d2d2605d4d68ffa9893fdeeb2e071040eb7c8750969d463"}, - {file = "pyzstd-0.16.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:15f9c2d612e7e2023d68d321d1b479846751f792af89141931d44e82ae391394"}, - {file = "pyzstd-0.16.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:11740bff847aad23beef4085a1bb767d101895881fe891f0a911aa27d43c372c"}, - {file = "pyzstd-0.16.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b9067483ebe860e4130a03ee665b3d7be4ec1608b208e645d5e7eb3492379464"}, - {file = "pyzstd-0.16.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:988f0ba19b14c2fe0afefc444ac1edfb2f497b7d7c3212b2f587504cc2ec804e"}, - {file = "pyzstd-0.16.2-cp39-cp39-win32.whl", hash = "sha256:8855acb1c3e3829030b9e9e9973b19e2d70f33efb14ad5c474b4d086864c959c"}, - {file = "pyzstd-0.16.2-cp39-cp39-win_amd64.whl", hash = "sha256:018e88378df5e76f5e1d8cf4416576603b6bc4a103cbc66bb593eaac54c758de"}, - {file = "pyzstd-0.16.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4b631117b97a42ff6dfd0ffc885a92fff462d7c34766b28383c57b996f863338"}, - {file = "pyzstd-0.16.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:56493a3fbe1b651a02102dd0902b0aa2377a732ff3544fb6fb3f114ca18db52f"}, - {file = "pyzstd-0.16.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1eae9bdba4a1e5d3181331f403114ff5b8ce0f4b569f48eba2b9beb2deef1e4"}, - {file = "pyzstd-0.16.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1be6972391c8aeecc7e61feb96ffc8e77a401bcba6ed994e7171330c45a1948"}, - {file = "pyzstd-0.16.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:761439d687e3a5687c2ff5c6a1190e1601362a4a3e8c6c82ff89719d51d73e19"}, - {file = "pyzstd-0.16.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f5fbdb8cf31b60b2dc586fecb9b73e2f172c21a0b320ed275f7b8d8a866d9003"}, - {file = "pyzstd-0.16.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:183f26e34f9becf0f2db38be9c0bfb136753d228bcb47c06c69175901bea7776"}, - {file = "pyzstd-0.16.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:88318b64b5205a67748148d6d244097fa6cf61fcea02ad3435511b9e7155ae16"}, - {file = "pyzstd-0.16.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73142aa2571b6480136a1865ebda8257e09eabbc8bcd54b222202f6fa4febe1e"}, - {file = "pyzstd-0.16.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d3f8877c29a97f1b1bba16f3d3ab01ad10ad3da7bad317aecf36aaf8848b37c"}, - {file = "pyzstd-0.16.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1f25754562473ac7de856b8331ebd5964f5d85601045627a5f0bb0e4e899990"}, - {file = "pyzstd-0.16.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:6ce17e84310080c55c02827ad9bb17893c00a845c8386a328b346f814aabd2c1"}, - {file = "pyzstd-0.16.2.tar.gz", hash = "sha256:179c1a2ea1565abf09c5f2fd72f9ce7c54b2764cf7369e05c0bfd8f1f67f63d2"}, + {file = "pyzstd-0.18.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:79bb84d866bf57ad2c4bc6b8247628b38e965c4f66288f887bf90f546a42ae04"}, + {file = "pyzstd-0.18.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0576c48e2f7a2c457538414a6197397c343b1bf5bfe9332b049afd0366c0c92"}, + {file = "pyzstd-0.18.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ea7702484795ee3c16c48a03d990123e833f1e1d6baabbe9a53256238eb04cbc"}, + {file = "pyzstd-0.18.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9c412ac29a9ebb76c8c40f2df146327b460ce184bbbdaa5bc9257317dce4caa8"}, + {file = "pyzstd-0.18.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:36baae4201196c2ec6567faf4a3f19c68211efc2fca30836c885b848ed057f66"}, + {file = "pyzstd-0.18.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f6d9c8a535af243c5a19f2d66c3733595ab633e00b97237d877e70e8389edc5"}, + {file = "pyzstd-0.18.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a533550740ce8c721aae27b377fb1160df68a9f457f16015ec8e47547a033dfc"}, + {file = "pyzstd-0.18.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fdd76049c8ccbb98276cfa78d807b4a497ec6bad2603361eceae993c6130e5bf"}, + {file = "pyzstd-0.18.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:09b73fe07a8d81898ef1575cb3063816168abb3305c1a9f30110383b61a4ee92"}, + {file = "pyzstd-0.18.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6baf9fd75d0af4f5d677b6e2d8dd3deb359c4ec2250c8536fe5ea48fd9305199"}, + {file = "pyzstd-0.18.0-cp310-cp310-win32.whl", hash = "sha256:c0634ab42226d2ad96c94d57fd242df2ca9417350c2969eb97c8c61d9574ba69"}, + {file = "pyzstd-0.18.0-cp310-cp310-win_amd64.whl", hash = "sha256:ec99569321a99b9868666c85a5846151f9a16b6a222b59b2570e2ddeefd4d80c"}, + {file = "pyzstd-0.18.0-cp310-cp310-win_arm64.whl", hash = "sha256:85371149cc1d8168461981084438b9f2f139c1699e989fef44562f7504ba0632"}, + {file = "pyzstd-0.18.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:848914835a8a984d4c5fad2355dc66f0aca979b35ec22753c9e694be8e98403c"}, + {file = "pyzstd-0.18.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3938fea87fe83113b5d8ec2925bb265b4c540e374bb0ec73e5528de58d68c393"}, + {file = "pyzstd-0.18.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9af4bcde7dde46ca7e82a4c6f5fda1760bcbfd15525dbea36fe625263ef06b5e"}, + {file = "pyzstd-0.18.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:15d9419d173d26de25342235256aba363190e48e3fd8a8988420a26221b45320"}, + {file = "pyzstd-0.18.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0b84f75f0494087afad31363e80a3463d1f32a0a6265f1a24660e6422b2b6fa6"}, + {file = "pyzstd-0.18.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2cfcdf0e46020bda2e98814464ca3ae830da83937c4c61776bf8835c7094214e"}, + {file = "pyzstd-0.18.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8551b6bc3690fb76e730967a628b6aab0d9331c38a41f5cddb546be994771191"}, + {file = "pyzstd-0.18.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6883b47a4d5d5489890e24e74ef14c1f16dcd68bb326b86911ae0e254e33e4b7"}, + {file = "pyzstd-0.18.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:929dec930296362ce03fee81877fa93a68ca4de3af75fdfa96ecbe0e366b2ee3"}, + {file = "pyzstd-0.18.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:278c80fdeaf857b620295cc815a31f6478fcb217d476ac889985a43b2b67e9bd"}, + {file = "pyzstd-0.18.0-cp311-cp311-win32.whl", hash = "sha256:0d1b678644894e49b5a448f02eebe0ac31bde6f51813168f5ff223d7212e1974"}, + {file = "pyzstd-0.18.0-cp311-cp311-win_amd64.whl", hash = "sha256:8285a464aed201b166bb0d2f4667485b61b607cf89f12943b1f21f7e84cb4550"}, + {file = "pyzstd-0.18.0-cp311-cp311-win_arm64.whl", hash = "sha256:942badf996589e5ab6cbdd0f7dd33f5dc2cd7ed0b65441c96b9a12ffa7700d51"}, + {file = "pyzstd-0.18.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5eef13ee3e230e50c01b288d581664e8758f7b831271f6f32cfc29823a6ab365"}, + {file = "pyzstd-0.18.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f78d6ef80d2f355b5bc1a897e9aa58659e85170b3fa268f3211c4979c768264c"}, + {file = "pyzstd-0.18.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:394175aeeb4e2255ff5340b32f6db79375b3ffb25514fe4c1439015a7f335ec2"}, + {file = "pyzstd-0.18.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3250c551f526d3b966cf4a2199a8d9538dc5c7083b7a26a45f305f8f2ab20a06"}, + {file = "pyzstd-0.18.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a99ca80053ca37be21f05f6c4152c70777e0eface72b08277cb4b10b6d286e79"}, + {file = "pyzstd-0.18.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5dc4488536e87ff0aac698b9cd65f2913ac87417b3952d80be32463c8e95cc35"}, + {file = "pyzstd-0.18.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c12da158f6ec1180be0a3d6f531050dfc1357a25e5d0fd8dd99d4506d2a3f448"}, + {file = "pyzstd-0.18.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f9a7d6bff36dfbe87dce1730e4b70d6ab49058a6f8ea22e85b33642491a2d053"}, + {file = "pyzstd-0.18.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0f56086bf8019f7c809a406dcc182ce0fb0d3623a9edf351ed80dbb484514613"}, + {file = "pyzstd-0.18.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1eb69217ad9b760537e93f2d578c7927b788a9cac0e2104e536855a2797b5b09"}, + {file = "pyzstd-0.18.0-cp312-cp312-win32.whl", hash = "sha256:05ce49412c7aef970e0a6be8e9add4748bc474a7f13533a14555642022f871e9"}, + {file = "pyzstd-0.18.0-cp312-cp312-win_amd64.whl", hash = "sha256:e951c3013b9df479cff758d578b83837b2531d02fb6c3e59166a756795697e19"}, + {file = "pyzstd-0.18.0-cp312-cp312-win_arm64.whl", hash = "sha256:33b54781c66a86e33c93c89ae426811d0aa35a216a23116fc5d5162449284305"}, + {file = "pyzstd-0.18.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:65117997d1e10e9b41336c90c2c4877c8d27533f753272805ff39df15fd5298a"}, + {file = "pyzstd-0.18.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8550efbfb5944343666d0e79d6a3687adcbeb4dbf17aa743146a25e72d12d47f"}, + {file = "pyzstd-0.18.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ac61854c4a77df66695540549a89f4c67039e4181a9158b8646425f1d56d947a"}, + {file = "pyzstd-0.18.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4c453369483f67480f86d67a7b63ef22827db65e7f0d4bec7992bb81751a94b9"}, + {file = "pyzstd-0.18.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4ef4b757b2df808ac15058fc2aa41e07d93843ee5a95629ff51eb6e8f1950951"}, + {file = "pyzstd-0.18.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b42529770febd331e23c5e8a68e9899acb0cc0806ee4c970354806c0ceeec6c7"}, + {file = "pyzstd-0.18.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7f54d13c269cdc37d2f73c9b3e70c6d2bb168dec768a472d54c2ed830bb19fb9"}, + {file = "pyzstd-0.18.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e6686460ca4be536dca1b6f2f80055f383a78e92e68e03a14806428572c4fdba"}, + {file = "pyzstd-0.18.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:8da3978d7de9095cacc5089bd0c435ab84ebd127e0979cd31fa1b216111644af"}, + {file = "pyzstd-0.18.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1ebc87e6e50547cff97e07c3fed9999d79b6327c9c4143c3049a7cfeacb2cdba"}, + {file = "pyzstd-0.18.0-cp313-cp313-win32.whl", hash = "sha256:2dd203f2534b16dea2761394fda4e0f3c465a5109ae6450bdaada67e6ac14a45"}, + {file = "pyzstd-0.18.0-cp313-cp313-win_amd64.whl", hash = "sha256:98f43488f88b859291d6bdc51cc7793d1eab17aa9382b17d762944bbb8567c98"}, + {file = "pyzstd-0.18.0-cp313-cp313-win_arm64.whl", hash = "sha256:cff8922e25e19d8fbd95b53f451e637bc80e826ab53c8777a885d4e99d1c0c2d"}, + {file = "pyzstd-0.18.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:67f795ec745cfd6930cdaf5118fcdd8d87ce02b07b254d37efe75afd33ce9917"}, + {file = "pyzstd-0.18.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a8a589673b9b417a084e393f18d09a16b67b87a80f80da6d3b4f84dd983c9b3d"}, + {file = "pyzstd-0.18.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fdaee8c33f96a6568225e821e6cc33045917628ae0bc7d8d3855332085c1aa7c"}, + {file = "pyzstd-0.18.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:42bf45d8e835d7c9c0bef98ff703143a5129edf09ef6c3b757037cbf79eabcaa"}, + {file = "pyzstd-0.18.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2f4dff2a15e2047baea9359d3a547dee80f61887f17e0f23190b4b932fd617e4"}, + {file = "pyzstd-0.18.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ed87932d6c534fc8921f7d44a4dadb32881e10ebc68935175a2cba254f5cc83"}, + {file = "pyzstd-0.18.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7d08a372b2b7fa1fd24217424e13d3d794e01299c43c8bd55f50934ef0785779"}, + {file = "pyzstd-0.18.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:e8403108172e24622f51732a336a89fe32bf3842965e0dc677c65df3a562f3ad"}, + {file = "pyzstd-0.18.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5604eeb7f00ec308b7e878dae92abfc4eee2e5d238765a62d4fadc0d57bbbff3"}, + {file = "pyzstd-0.18.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d6b300c5240409f1e7ab9972ab2a880a1949447d8414dbc11d89c10bfcb31aa5"}, + {file = "pyzstd-0.18.0-cp314-cp314-win32.whl", hash = "sha256:83f4fe1409a59c45a5e6fccb4d451e1e3dd03a5fabebd2dd6ba651468f54025e"}, + {file = "pyzstd-0.18.0-cp314-cp314-win_amd64.whl", hash = "sha256:73c3dcd9a16f1669ed6eef0dad1d840b7dd6070ab7d48719171ca691101e7975"}, + {file = "pyzstd-0.18.0-cp314-cp314-win_arm64.whl", hash = "sha256:61333bbb337b9746284624ed14f6238838dfae1e395691ba49f227015374f760"}, + {file = "pyzstd-0.18.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9bccd16621016b83c2d5d40408806a841bbca2860370dca5ef0e3db005417aca"}, + {file = "pyzstd-0.18.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c7ee6747541594a5851bae720d5ab070ba9ef644df779507f35819ea61fd83fd"}, + {file = "pyzstd-0.18.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3ea0d70b4ec72b9d5feae4ec665ef8a4cd48f442921f2100117229c900a5a713"}, + {file = "pyzstd-0.18.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d581aeeba9a3ed13e304b0efc27efdf310b58c1e69ebb99a08e0eeea3a392310"}, + {file = "pyzstd-0.18.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d582d2fab7cc3e7606c2b09093f914e6e8b942ec52aa992a3a25d9d3ed7ba295"}, + {file = "pyzstd-0.18.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a25a72afa7d66d47a881e475ffe88d9961b36052bf6a512af3b84de22b20d41f"}, + {file = "pyzstd-0.18.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5b4feed895f32b314f2b3aa3ba6a4e0ce903c6764f31ad78e68b6c3fa31415ac"}, + {file = "pyzstd-0.18.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:20d9524adbc4efc8a1680e59cc325bc73ff56bf70bb54d233c3540efcb7bf476"}, + {file = "pyzstd-0.18.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:72c25d14217854883b571f101253d39443ea2f226f85cf3223b4d4a4d644618d"}, + {file = "pyzstd-0.18.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c335605ac7d018ca2d4d68cc0bac10e3c4ccf8e9686972dfc569a4df53f7a8d3"}, + {file = "pyzstd-0.18.0-cp39-cp39-win32.whl", hash = "sha256:64ebf9bd8065388d778c4ab6d9c4e913c00633abcfbf55236202dd0398520cc0"}, + {file = "pyzstd-0.18.0-cp39-cp39-win_amd64.whl", hash = "sha256:4a32751ac634eb685bec42935b0f6e494f018843da09596da3f2a0072ae8273b"}, + {file = "pyzstd-0.18.0-cp39-cp39-win_arm64.whl", hash = "sha256:6b64efb254fdc3c90ed4c74185beee62c24e517288aacfb3abd95c127e6f8f52"}, + {file = "pyzstd-0.18.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:35934369fcdfde6fb932f88fa441337c8ddaf4b08e7b0b12952010f0ba2082f7"}, + {file = "pyzstd-0.18.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:55b8e12c9657359a697440e88a8535d1a771025e5d8f1c3087ad69ba11bee6d2"}, + {file = "pyzstd-0.18.0-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:134d33d3e56b5083c8f827b63254c2abf85d6ace2b323e69d28e3954b5b71883"}, + {file = "pyzstd-0.18.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a6c4bffa0157ef9e5cfa32413a5a79448e5affadece4982df274f1b5aae3a680"}, + {file = "pyzstd-0.18.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8c36824d94cf77997a899b60886cc2be3ac969083f1d74eb4dd4127234ba50a4"}, + {file = "pyzstd-0.18.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:788e0889db436cd6d16a3b490006ab80a913d8ce6f46db127f1888066ff4560b"}, + {file = "pyzstd-0.18.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:5e70b7c36a40d7f946bf6391a206374b057299735d366fad6524d3b9f392441f"}, + {file = "pyzstd-0.18.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:571c5f71622943387370f76de8cc0de3d5c6217ab0f38386cb127665e4e09275"}, + {file = "pyzstd-0.18.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de0b730f374b583894d58b79cff76569540baf1e84bc493be191d3128b58e559"}, + {file = "pyzstd-0.18.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:b32184013f33dba2fabcdda89f2a83289f5b717a0c2477cda764e53fdafec7ee"}, + {file = "pyzstd-0.18.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:27c281abfc2f13f19df92793f66e12cd0a19038ccbc02684af2a14bce664fdc4"}, + {file = "pyzstd-0.18.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7313f3a9bd2cb11158e5eaab3d5d2cd6b4582702e383a08ebb8273d0d45c3e49"}, + {file = "pyzstd-0.18.0-pp39-pypy39_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3ec4ae014abf835bd9995ee1b318fdf4e955ffb8439838373bdc19c80d51a541"}, + {file = "pyzstd-0.18.0-pp39-pypy39_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:94c2f15f0e67acf89bec97ea276f7a5ad4e6d0267f62f12424bf044a0de280a0"}, + {file = "pyzstd-0.18.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:898e41170fde5aa73105a0262572c286bafc5f24c7b4cf131168d9b198e4c586"}, + {file = "pyzstd-0.18.0.tar.gz", hash = "sha256:81b6851ab1ca2e5f2c709e896a1362e3065a64f271f43db77fb7d5e4a78e9861"}, ] +[package.dependencies] +typing-extensions = {version = ">=4.13.2", markers = "python_version < \"3.13\""} + [[package]] name = "rarfile" version = "4.2" @@ -2556,20 +3225,144 @@ files = [ [package.dependencies] cffi = "*" +[[package]] +name = "regex" +version = "2025.11.3" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = ">=3.9" +files = [ + {file = "regex-2025.11.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2b441a4ae2c8049106e8b39973bfbddfb25a179dda2bdb99b0eeb60c40a6a3af"}, + {file = "regex-2025.11.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2fa2eed3f76677777345d2f81ee89f5de2f5745910e805f7af7386a920fa7313"}, + {file = "regex-2025.11.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d8b4a27eebd684319bdf473d39f1d79eed36bf2cd34bd4465cdb4618d82b3d56"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5cf77eac15bd264986c4a2c63353212c095b40f3affb2bc6b4ef80c4776c1a28"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b7f9ee819f94c6abfa56ec7b1dbab586f41ebbdc0a57e6524bd5e7f487a878c7"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:838441333bc90b829406d4a03cb4b8bf7656231b84358628b0406d803931ef32"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cfe6d3f0c9e3b7e8c0c694b24d25e677776f5ca26dce46fd6b0489f9c8339391"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2ab815eb8a96379a27c3b6157fcb127c8f59c36f043c1678110cea492868f1d5"}, + {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:728a9d2d173a65b62bdc380b7932dd8e74ed4295279a8fe1021204ce210803e7"}, + {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:509dc827f89c15c66a0c216331260d777dd6c81e9a4e4f830e662b0bb296c313"}, + {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:849202cd789e5f3cf5dcc7822c34b502181b4824a65ff20ce82da5524e45e8e9"}, + {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b6f78f98741dcc89607c16b1e9426ee46ce4bf31ac5e6b0d40e81c89f3481ea5"}, + {file = "regex-2025.11.3-cp310-cp310-win32.whl", hash = "sha256:149eb0bba95231fb4f6d37c8f760ec9fa6fabf65bab555e128dde5f2475193ec"}, + {file = "regex-2025.11.3-cp310-cp310-win_amd64.whl", hash = "sha256:ee3a83ce492074c35a74cc76cf8235d49e77b757193a5365ff86e3f2f93db9fd"}, + {file = "regex-2025.11.3-cp310-cp310-win_arm64.whl", hash = "sha256:38af559ad934a7b35147716655d4a2f79fcef2d695ddfe06a06ba40ae631fa7e"}, + {file = "regex-2025.11.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:eadade04221641516fa25139273505a1c19f9bf97589a05bc4cfcd8b4a618031"}, + {file = "regex-2025.11.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:feff9e54ec0dd3833d659257f5c3f5322a12eee58ffa360984b716f8b92983f4"}, + {file = "regex-2025.11.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3b30bc921d50365775c09a7ed446359e5c0179e9e2512beec4a60cbcef6ddd50"}, + {file = "regex-2025.11.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f99be08cfead2020c7ca6e396c13543baea32343b7a9a5780c462e323bd8872f"}, + {file = "regex-2025.11.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6dd329a1b61c0ee95ba95385fb0c07ea0d3fe1a21e1349fa2bec272636217118"}, + {file = "regex-2025.11.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4c5238d32f3c5269d9e87be0cf096437b7622b6920f5eac4fd202468aaeb34d2"}, + {file = "regex-2025.11.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10483eefbfb0adb18ee9474498c9a32fcf4e594fbca0543bb94c48bac6183e2e"}, + {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:78c2d02bb6e1da0720eedc0bad578049cad3f71050ef8cd065ecc87691bed2b0"}, + {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e6b49cd2aad93a1790ce9cffb18964f6d3a4b0b3dbdbd5de094b65296fce6e58"}, + {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:885b26aa3ee56433b630502dc3d36ba78d186a00cc535d3806e6bfd9ed3c70ab"}, + {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ddd76a9f58e6a00f8772e72cff8ebcff78e022be95edf018766707c730593e1e"}, + {file = "regex-2025.11.3-cp311-cp311-win32.whl", hash = "sha256:3e816cc9aac1cd3cc9a4ec4d860f06d40f994b5c7b4d03b93345f44e08cc68bf"}, + {file = "regex-2025.11.3-cp311-cp311-win_amd64.whl", hash = "sha256:087511f5c8b7dfbe3a03f5d5ad0c2a33861b1fc387f21f6f60825a44865a385a"}, + {file = "regex-2025.11.3-cp311-cp311-win_arm64.whl", hash = "sha256:1ff0d190c7f68ae7769cd0313fe45820ba07ffebfddfaa89cc1eb70827ba0ddc"}, + {file = "regex-2025.11.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bc8ab71e2e31b16e40868a40a69007bc305e1109bd4658eb6cad007e0bf67c41"}, + {file = "regex-2025.11.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:22b29dda7e1f7062a52359fca6e58e548e28c6686f205e780b02ad8ef710de36"}, + {file = "regex-2025.11.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3a91e4a29938bc1a082cc28fdea44be420bf2bebe2665343029723892eb073e1"}, + {file = "regex-2025.11.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08b884f4226602ad40c5d55f52bf91a9df30f513864e0054bad40c0e9cf1afb7"}, + {file = "regex-2025.11.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3e0b11b2b2433d1c39c7c7a30e3f3d0aeeea44c2a8d0bae28f6b95f639927a69"}, + {file = "regex-2025.11.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:87eb52a81ef58c7ba4d45c3ca74e12aa4b4e77816f72ca25258a85b3ea96cb48"}, + {file = "regex-2025.11.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a12ab1f5c29b4e93db518f5e3872116b7e9b1646c9f9f426f777b50d44a09e8c"}, + {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7521684c8c7c4f6e88e35ec89680ee1aa8358d3f09d27dfbdf62c446f5d4c695"}, + {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7fe6e5440584e94cc4b3f5f4d98a25e29ca12dccf8873679a635638349831b98"}, + {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8e026094aa12b43f4fd74576714e987803a315c76edb6b098b9809db5de58f74"}, + {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:435bbad13e57eb5606a68443af62bed3556de2f46deb9f7d4237bc2f1c9fb3a0"}, + {file = "regex-2025.11.3-cp312-cp312-win32.whl", hash = "sha256:3839967cf4dc4b985e1570fd8d91078f0c519f30491c60f9ac42a8db039be204"}, + {file = "regex-2025.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:e721d1b46e25c481dc5ded6f4b3f66c897c58d2e8cfdf77bbced84339108b0b9"}, + {file = "regex-2025.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:64350685ff08b1d3a6fff33f45a9ca183dc1d58bbfe4981604e70ec9801bbc26"}, + {file = "regex-2025.11.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c1e448051717a334891f2b9a620fe36776ebf3dd8ec46a0b877c8ae69575feb4"}, + {file = "regex-2025.11.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9b5aca4d5dfd7fbfbfbdaf44850fcc7709a01146a797536a8f84952e940cca76"}, + {file = "regex-2025.11.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:04d2765516395cf7dda331a244a3282c0f5ae96075f728629287dfa6f76ba70a"}, + {file = "regex-2025.11.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d9903ca42bfeec4cebedba8022a7c97ad2aab22e09573ce9976ba01b65e4361"}, + {file = "regex-2025.11.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:639431bdc89d6429f6721625e8129413980ccd62e9d3f496be618a41d205f160"}, + {file = "regex-2025.11.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f117efad42068f9715677c8523ed2be1518116d1c49b1dd17987716695181efe"}, + {file = "regex-2025.11.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4aecb6f461316adf9f1f0f6a4a1a3d79e045f9b71ec76055a791affa3b285850"}, + {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3b3a5f320136873cc5561098dfab677eea139521cb9a9e8db98b7e64aef44cbc"}, + {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:75fa6f0056e7efb1f42a1c34e58be24072cb9e61a601340cc1196ae92326a4f9"}, + {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:dbe6095001465294f13f1adcd3311e50dd84e5a71525f20a10bd16689c61ce0b"}, + {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:454d9b4ae7881afbc25015b8627c16d88a597479b9dea82b8c6e7e2e07240dc7"}, + {file = "regex-2025.11.3-cp313-cp313-win32.whl", hash = "sha256:28ba4d69171fc6e9896337d4fc63a43660002b7da53fc15ac992abcf3410917c"}, + {file = "regex-2025.11.3-cp313-cp313-win_amd64.whl", hash = "sha256:bac4200befe50c670c405dc33af26dad5a3b6b255dd6c000d92fe4629f9ed6a5"}, + {file = "regex-2025.11.3-cp313-cp313-win_arm64.whl", hash = "sha256:2292cd5a90dab247f9abe892ac584cb24f0f54680c73fcb4a7493c66c2bf2467"}, + {file = "regex-2025.11.3-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:1eb1ebf6822b756c723e09f5186473d93236c06c579d2cc0671a722d2ab14281"}, + {file = "regex-2025.11.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:1e00ec2970aab10dc5db34af535f21fcf32b4a31d99e34963419636e2f85ae39"}, + {file = "regex-2025.11.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a4cb042b615245d5ff9b3794f56be4138b5adc35a4166014d31d1814744148c7"}, + {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44f264d4bf02f3176467d90b294d59bf1db9fe53c141ff772f27a8b456b2a9ed"}, + {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7be0277469bf3bd7a34a9c57c1b6a724532a0d235cd0dc4e7f4316f982c28b19"}, + {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0d31e08426ff4b5b650f68839f5af51a92a5b51abd8554a60c2fbc7c71f25d0b"}, + {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e43586ce5bd28f9f285a6e729466841368c4a0353f6fd08d4ce4630843d3648a"}, + {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:0f9397d561a4c16829d4e6ff75202c1c08b68a3bdbfe29dbfcdb31c9830907c6"}, + {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:dd16e78eb18ffdb25ee33a0682d17912e8cc8a770e885aeee95020046128f1ce"}, + {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:ffcca5b9efe948ba0661e9df0fa50d2bc4b097c70b9810212d6b62f05d83b2dd"}, + {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c56b4d162ca2b43318ac671c65bd4d563e841a694ac70e1a976ac38fcf4ca1d2"}, + {file = "regex-2025.11.3-cp313-cp313t-win32.whl", hash = "sha256:9ddc42e68114e161e51e272f667d640f97e84a2b9ef14b7477c53aac20c2d59a"}, + {file = "regex-2025.11.3-cp313-cp313t-win_amd64.whl", hash = "sha256:7a7c7fdf755032ffdd72c77e3d8096bdcb0eb92e89e17571a196f03d88b11b3c"}, + {file = "regex-2025.11.3-cp313-cp313t-win_arm64.whl", hash = "sha256:df9eb838c44f570283712e7cff14c16329a9f0fb19ca492d21d4b7528ee6821e"}, + {file = "regex-2025.11.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:9697a52e57576c83139d7c6f213d64485d3df5bf84807c35fa409e6c970801c6"}, + {file = "regex-2025.11.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e18bc3f73bd41243c9b38a6d9f2366cd0e0137a9aebe2d8ff76c5b67d4c0a3f4"}, + {file = "regex-2025.11.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:61a08bcb0ec14ff4e0ed2044aad948d0659604f824cbd50b55e30b0ec6f09c73"}, + {file = "regex-2025.11.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c9c30003b9347c24bcc210958c5d167b9e4f9be786cb380a7d32f14f9b84674f"}, + {file = "regex-2025.11.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4e1e592789704459900728d88d41a46fe3969b82ab62945560a31732ffc19a6d"}, + {file = "regex-2025.11.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6538241f45eb5a25aa575dbba1069ad786f68a4f2773a29a2bd3dd1f9de787be"}, + {file = "regex-2025.11.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bce22519c989bb72a7e6b36a199384c53db7722fe669ba891da75907fe3587db"}, + {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:66d559b21d3640203ab9075797a55165d79017520685fb407b9234d72ab63c62"}, + {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:669dcfb2e38f9e8c69507bace46f4889e3abbfd9b0c29719202883c0a603598f"}, + {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:32f74f35ff0f25a5021373ac61442edcb150731fbaa28286bbc8bb1582c89d02"}, + {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e6c7a21dffba883234baefe91bc3388e629779582038f75d2a5be918e250f0ed"}, + {file = "regex-2025.11.3-cp314-cp314-win32.whl", hash = "sha256:795ea137b1d809eb6836b43748b12634291c0ed55ad50a7d72d21edf1cd565c4"}, + {file = "regex-2025.11.3-cp314-cp314-win_amd64.whl", hash = "sha256:9f95fbaa0ee1610ec0fc6b26668e9917a582ba80c52cc6d9ada15e30aa9ab9ad"}, + {file = "regex-2025.11.3-cp314-cp314-win_arm64.whl", hash = "sha256:dfec44d532be4c07088c3de2876130ff0fbeeacaa89a137decbbb5f665855a0f"}, + {file = "regex-2025.11.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ba0d8a5d7f04f73ee7d01d974d47c5834f8a1b0224390e4fe7c12a3a92a78ecc"}, + {file = "regex-2025.11.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:442d86cf1cfe4faabf97db7d901ef58347efd004934da045c745e7b5bd57ac49"}, + {file = "regex-2025.11.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:fd0a5e563c756de210bb964789b5abe4f114dacae9104a47e1a649b910361536"}, + {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bf3490bcbb985a1ae97b2ce9ad1c0f06a852d5b19dde9b07bdf25bf224248c95"}, + {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3809988f0a8b8c9dcc0f92478d6501fac7200b9ec56aecf0ec21f4a2ec4b6009"}, + {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f4ff94e58e84aedb9c9fce66d4ef9f27a190285b451420f297c9a09f2b9abee9"}, + {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7eb542fd347ce61e1321b0a6b945d5701528dca0cd9759c2e3bb8bd57e47964d"}, + {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d6c2d5919075a1f2e413c00b056ea0c2f065b3f5fe83c3d07d325ab92dce51d6"}, + {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:3f8bf11a4827cc7ce5a53d4ef6cddd5ad25595d3c1435ef08f76825851343154"}, + {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:22c12d837298651e5550ac1d964e4ff57c3f56965fc1812c90c9fb2028eaf267"}, + {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:62ba394a3dda9ad41c7c780f60f6e4a70988741415ae96f6d1bf6c239cf01379"}, + {file = "regex-2025.11.3-cp314-cp314t-win32.whl", hash = "sha256:4bf146dca15cdd53224a1bf46d628bd7590e4a07fbb69e720d561aea43a32b38"}, + {file = "regex-2025.11.3-cp314-cp314t-win_amd64.whl", hash = "sha256:adad1a1bcf1c9e76346e091d22d23ac54ef28e1365117d99521631078dfec9de"}, + {file = "regex-2025.11.3-cp314-cp314t-win_arm64.whl", hash = "sha256:c54f768482cef41e219720013cd05933b6f971d9562544d691c68699bf2b6801"}, + {file = "regex-2025.11.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:81519e25707fc076978c6143b81ea3dc853f176895af05bf7ec51effe818aeec"}, + {file = "regex-2025.11.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3bf28b1873a8af8bbb58c26cc56ea6e534d80053b41fb511a35795b6de507e6a"}, + {file = "regex-2025.11.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:856a25c73b697f2ce2a24e7968285579e62577a048526161a2c0f53090bea9f9"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8a3d571bd95fade53c86c0517f859477ff3a93c3fde10c9e669086f038e0f207"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:732aea6de26051af97b94bc98ed86448821f839d058e5d259c72bf6d73ad0fc0"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:51c1c1847128238f54930edb8805b660305dca164645a9fd29243f5610beea34"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22dd622a402aad4558277305350699b2be14bc59f64d64ae1d928ce7d072dced"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f3b5a391c7597ffa96b41bd5cbd2ed0305f515fcbb367dfa72735679d5502364"}, + {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:cc4076a5b4f36d849fd709284b4a3b112326652f3b0466f04002a6c15a0c96c1"}, + {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:a295ca2bba5c1c885826ce3125fa0b9f702a1be547d821c01d65f199e10c01e2"}, + {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b4774ff32f18e0504bfc4e59a3e71e18d83bc1e171a3c8ed75013958a03b2f14"}, + {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22e7d1cdfa88ef33a2ae6aa0d707f9255eb286ffbd90045f1088246833223aee"}, + {file = "regex-2025.11.3-cp39-cp39-win32.whl", hash = "sha256:74d04244852ff73b32eeede4f76f51c5bcf44bc3c207bc3e6cf1c5c45b890708"}, + {file = "regex-2025.11.3-cp39-cp39-win_amd64.whl", hash = "sha256:7a50cd39f73faa34ec18d6720ee25ef10c4c1839514186fcda658a06c06057a2"}, + {file = "regex-2025.11.3-cp39-cp39-win_arm64.whl", hash = "sha256:43b4fb020e779ca81c1b5255015fe2b82816c76ec982354534ad9ec09ad7c9e3"}, + {file = "regex-2025.11.3.tar.gz", hash = "sha256:1fedc720f9bb2494ce31a58a1631f9c82df6a09b49c19517ea5cc280b4541e01"}, +] + [[package]] name = "requests" -version = "2.32.3" +version = "2.32.5" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, - {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, + {file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"}, + {file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"}, ] [package.dependencies] certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" +charset_normalizer = ">=2,<4" idna = ">=2.5,<4" urllib3 = ">=1.21.1,<3" @@ -2612,6 +3405,24 @@ requests = ">=2.0.0" [package.extras] rsa = ["oauthlib[signedtoken] (>=3.0.0)"] +[[package]] +name = "requests-ratelimiter" +version = "0.7.0" +description = "Rate-limiting for the requests library" +optional = false +python-versions = "<4.0,>=3.7" +files = [ + {file = "requests_ratelimiter-0.7.0-py3-none-any.whl", hash = "sha256:1a7ef2faaa790272722db8539728690046237766fcc479f85b9591e5356a8185"}, + {file = "requests_ratelimiter-0.7.0.tar.gz", hash = "sha256:a070c8a359a6f3a001b0ccb08f17228b7ae0a6e21d8df5b6f6bd58389cddde45"}, +] + +[package.dependencies] +pyrate-limiter = "<3.0" +requests = ">=2.20" + +[package.extras] +docs = ["furo (>=2023.3,<2024.0)", "myst-parser (>=1.0)", "sphinx (>=5.2,<6.0)", "sphinx-autodoc-typehints (>=1.22,<2.0)", "sphinx-copybutton (>=0.5)"] + [[package]] name = "resampy" version = "0.4.3" @@ -2634,13 +3445,13 @@ tests = ["pytest (<8)", "pytest-cov", "scipy (>=1.1)"] [[package]] name = "responses" -version = "0.25.3" +version = "0.25.8" description = "A utility library for mocking out the `requests` Python library." optional = false python-versions = ">=3.8" files = [ - {file = "responses-0.25.3-py3-none-any.whl", hash = "sha256:521efcbc82081ab8daa588e08f7e8a64ce79b91c39f6e62199b19159bea7dbcb"}, - {file = "responses-0.25.3.tar.gz", hash = "sha256:617b9247abd9ae28313d57a75880422d55ec63c29d33d629697590a034358dba"}, + {file = "responses-0.25.8-py3-none-any.whl", hash = "sha256:0c710af92def29c8352ceadff0c3fe340ace27cf5af1bbe46fb71275bcd2831c"}, + {file = "responses-0.25.8.tar.gz", hash = "sha256:9374d047a575c8f781b94454db5cab590b6029505f488d12899ddb10a4af1cf4"}, ] [package.dependencies] @@ -2651,134 +3462,338 @@ urllib3 = ">=1.25.10,<3.0" [package.extras] tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asyncio", "pytest-cov", "pytest-httpserver", "tomli", "tomli-w", "types-PyYAML", "types-requests"] +[[package]] +name = "roman" +version = "5.1" +description = "Integer to Roman numerals converter" +optional = false +python-versions = ">=3.9" +files = [ + {file = "roman-5.1-py3-none-any.whl", hash = "sha256:bf595d8a9bc4a8e8b1dfa23e1d4def0251b03b494786df6b8c3d3f1635ce285a"}, + {file = "roman-5.1.tar.gz", hash = "sha256:3a86572e9bc9183e771769601189e5fa32f1620ffeceebb9eca836affb409986"}, +] + +[[package]] +name = "ruamel-yaml" +version = "0.18.16" +description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" +optional = true +python-versions = ">=3.8" +files = [ + {file = "ruamel.yaml-0.18.16-py3-none-any.whl", hash = "sha256:048f26d64245bae57a4f9ef6feb5b552a386830ef7a826f235ffb804c59efbba"}, + {file = "ruamel.yaml-0.18.16.tar.gz", hash = "sha256:a6e587512f3c998b2225d68aa1f35111c29fad14aed561a26e73fab729ec5e5a"}, +] + +[package.dependencies] +"ruamel.yaml.clib" = {version = ">=0.2.7", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.14\""} + +[package.extras] +docs = ["mercurial (>5.7)", "ryd"] +jinja2 = ["ruamel.yaml.jinja2 (>=0.2)"] + +[[package]] +name = "ruamel-yaml-clib" +version = "0.2.15" +description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml" +optional = true +python-versions = ">=3.9" +files = [ + {file = "ruamel_yaml_clib-0.2.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:88eea8baf72f0ccf232c22124d122a7f26e8a24110a0273d9bcddcb0f7e1fa03"}, + {file = "ruamel_yaml_clib-0.2.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b6f7d74d094d1f3a4e157278da97752f16ee230080ae331fcc219056ca54f77"}, + {file = "ruamel_yaml_clib-0.2.15-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:4be366220090d7c3424ac2b71c90d1044ea34fca8c0b88f250064fd06087e614"}, + {file = "ruamel_yaml_clib-0.2.15-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f66f600833af58bea694d5892453f2270695b92200280ee8c625ec5a477eed3"}, + {file = "ruamel_yaml_clib-0.2.15-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da3d6adadcf55a93c214d23941aef4abfd45652110aed6580e814152f385b862"}, + {file = "ruamel_yaml_clib-0.2.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e9fde97ecb7bb9c41261c2ce0da10323e9227555c674989f8d9eb7572fc2098d"}, + {file = "ruamel_yaml_clib-0.2.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:05c70f7f86be6f7bee53794d80050a28ae7e13e4a0087c1839dcdefd68eb36b6"}, + {file = "ruamel_yaml_clib-0.2.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6f1d38cbe622039d111b69e9ca945e7e3efebb30ba998867908773183357f3ed"}, + {file = "ruamel_yaml_clib-0.2.15-cp310-cp310-win32.whl", hash = "sha256:fe239bdfdae2302e93bd6e8264bd9b71290218fff7084a9db250b55caaccf43f"}, + {file = "ruamel_yaml_clib-0.2.15-cp310-cp310-win_amd64.whl", hash = "sha256:468858e5cbde0198337e6a2a78eda8c3fb148bdf4c6498eaf4bc9ba3f8e780bd"}, + {file = "ruamel_yaml_clib-0.2.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c583229f336682b7212a43d2fa32c30e643d3076178fb9f7a6a14dde85a2d8bd"}, + {file = "ruamel_yaml_clib-0.2.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56ea19c157ed8c74b6be51b5fa1c3aff6e289a041575f0556f66e5fb848bb137"}, + {file = "ruamel_yaml_clib-0.2.15-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5fea0932358e18293407feb921d4f4457db837b67ec1837f87074667449f9401"}, + {file = "ruamel_yaml_clib-0.2.15-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef71831bd61fbdb7aa0399d5c4da06bea37107ab5c79ff884cc07f2450910262"}, + {file = "ruamel_yaml_clib-0.2.15-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:617d35dc765715fa86f8c3ccdae1e4229055832c452d4ec20856136acc75053f"}, + {file = "ruamel_yaml_clib-0.2.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b45498cc81a4724a2d42273d6cfc243c0547ad7c6b87b4f774cb7bcc131c98d"}, + {file = "ruamel_yaml_clib-0.2.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:def5663361f6771b18646620fca12968aae730132e104688766cf8a3b1d65922"}, + {file = "ruamel_yaml_clib-0.2.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:014181cdec565c8745b7cbc4de3bf2cc8ced05183d986e6d1200168e5bb59490"}, + {file = "ruamel_yaml_clib-0.2.15-cp311-cp311-win32.whl", hash = "sha256:d290eda8f6ada19e1771b54e5706b8f9807e6bb08e873900d5ba114ced13e02c"}, + {file = "ruamel_yaml_clib-0.2.15-cp311-cp311-win_amd64.whl", hash = "sha256:bdc06ad71173b915167702f55d0f3f027fc61abd975bd308a0968c02db4a4c3e"}, + {file = "ruamel_yaml_clib-0.2.15-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cb15a2e2a90c8475df45c0949793af1ff413acfb0a716b8b94e488ea95ce7cff"}, + {file = "ruamel_yaml_clib-0.2.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:64da03cbe93c1e91af133f5bec37fd24d0d4ba2418eaf970d7166b0a26a148a2"}, + {file = "ruamel_yaml_clib-0.2.15-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f6d3655e95a80325b84c4e14c080b2470fe4f33b6846f288379ce36154993fb1"}, + {file = "ruamel_yaml_clib-0.2.15-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:71845d377c7a47afc6592aacfea738cc8a7e876d586dfba814501d8c53c1ba60"}, + {file = "ruamel_yaml_clib-0.2.15-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11e5499db1ccbc7f4b41f0565e4f799d863ea720e01d3e99fa0b7b5fcd7802c9"}, + {file = "ruamel_yaml_clib-0.2.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4b293a37dc97e2b1e8a1aec62792d1e52027087c8eea4fc7b5abd2bdafdd6642"}, + {file = "ruamel_yaml_clib-0.2.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:512571ad41bba04eac7268fe33f7f4742210ca26a81fe0c75357fa682636c690"}, + {file = "ruamel_yaml_clib-0.2.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e5e9f630c73a490b758bf14d859a39f375e6999aea5ddd2e2e9da89b9953486a"}, + {file = "ruamel_yaml_clib-0.2.15-cp312-cp312-win32.whl", hash = "sha256:f4421ab780c37210a07d138e56dd4b51f8642187cdfb433eb687fe8c11de0144"}, + {file = "ruamel_yaml_clib-0.2.15-cp312-cp312-win_amd64.whl", hash = "sha256:2b216904750889133d9222b7b873c199d48ecbb12912aca78970f84a5aa1a4bc"}, + {file = "ruamel_yaml_clib-0.2.15-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4dcec721fddbb62e60c2801ba08c87010bd6b700054a09998c4d09c08147b8fb"}, + {file = "ruamel_yaml_clib-0.2.15-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:65f48245279f9bb301d1276f9679b82e4c080a1ae25e679f682ac62446fac471"}, + {file = "ruamel_yaml_clib-0.2.15-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:46895c17ead5e22bea5e576f1db7e41cb273e8d062c04a6a49013d9f60996c25"}, + {file = "ruamel_yaml_clib-0.2.15-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3eb199178b08956e5be6288ee0b05b2fb0b5c1f309725ad25d9c6ea7e27f962a"}, + {file = "ruamel_yaml_clib-0.2.15-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4d1032919280ebc04a80e4fb1e93f7a738129857eaec9448310e638c8bccefcf"}, + {file = "ruamel_yaml_clib-0.2.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ab0df0648d86a7ecbd9c632e8f8d6b21bb21b5fc9d9e095c796cacf32a728d2d"}, + {file = "ruamel_yaml_clib-0.2.15-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:331fb180858dd8534f0e61aa243b944f25e73a4dae9962bd44c46d1761126bbf"}, + {file = "ruamel_yaml_clib-0.2.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fd4c928ddf6bce586285daa6d90680b9c291cfd045fc40aad34e445d57b1bf51"}, + {file = "ruamel_yaml_clib-0.2.15-cp313-cp313-win32.whl", hash = "sha256:bf0846d629e160223805db9fe8cc7aec16aaa11a07310c50c8c7164efa440aec"}, + {file = "ruamel_yaml_clib-0.2.15-cp313-cp313-win_amd64.whl", hash = "sha256:45702dfbea1420ba3450bb3dd9a80b33f0badd57539c6aac09f42584303e0db6"}, + {file = "ruamel_yaml_clib-0.2.15-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:753faf20b3a5906faf1fc50e4ddb8c074cb9b251e00b14c18b28492f933ac8ef"}, + {file = "ruamel_yaml_clib-0.2.15-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:480894aee0b29752560a9de46c0e5f84a82602f2bc5c6cde8db9a345319acfdf"}, + {file = "ruamel_yaml_clib-0.2.15-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:4d3b58ab2454b4747442ac76fab66739c72b1e2bb9bd173d7694b9f9dbc9c000"}, + {file = "ruamel_yaml_clib-0.2.15-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bfd309b316228acecfa30670c3887dcedf9b7a44ea39e2101e75d2654522acd4"}, + {file = "ruamel_yaml_clib-0.2.15-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2812ff359ec1f30129b62372e5f22a52936fac13d5d21e70373dbca5d64bb97c"}, + {file = "ruamel_yaml_clib-0.2.15-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7e74ea87307303ba91073b63e67f2c667e93f05a8c63079ee5b7a5c8d0d7b043"}, + {file = "ruamel_yaml_clib-0.2.15-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:713cd68af9dfbe0bb588e144a61aad8dcc00ef92a82d2e87183ca662d242f524"}, + {file = "ruamel_yaml_clib-0.2.15-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:542d77b72786a35563f97069b9379ce762944e67055bea293480f7734b2c7e5e"}, + {file = "ruamel_yaml_clib-0.2.15-cp314-cp314-win32.whl", hash = "sha256:424ead8cef3939d690c4b5c85ef5b52155a231ff8b252961b6516ed7cf05f6aa"}, + {file = "ruamel_yaml_clib-0.2.15-cp314-cp314-win_amd64.whl", hash = "sha256:ac9b8d5fa4bb7fd2917ab5027f60d4234345fd366fe39aa711d5dca090aa1467"}, + {file = "ruamel_yaml_clib-0.2.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:923816815974425fbb1f1bf57e85eca6e14d8adc313c66db21c094927ad01815"}, + {file = "ruamel_yaml_clib-0.2.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dcc7f3162d3711fd5d52e2267e44636e3e566d1e5675a5f0b30e98f2c4af7974"}, + {file = "ruamel_yaml_clib-0.2.15-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5d3c9210219cbc0f22706f19b154c9a798ff65a6beeafbf77fc9c057ec806f7d"}, + {file = "ruamel_yaml_clib-0.2.15-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bb7b728fd9f405aa00b4a0b17ba3f3b810d0ccc5f77f7373162e9b5f0ff75d5"}, + {file = "ruamel_yaml_clib-0.2.15-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3cb75a3c14f1d6c3c2a94631e362802f70e83e20d1f2b2ef3026c05b415c4900"}, + {file = "ruamel_yaml_clib-0.2.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:badd1d7283f3e5894779a6ea8944cc765138b96804496c91812b2829f70e18a7"}, + {file = "ruamel_yaml_clib-0.2.15-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:0ba6604bbc3dfcef844631932d06a1a4dcac3fee904efccf582261948431628a"}, + {file = "ruamel_yaml_clib-0.2.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a8220fd4c6f98485e97aea65e1df76d4fed1678ede1fe1d0eed2957230d287c4"}, + {file = "ruamel_yaml_clib-0.2.15-cp39-cp39-win32.whl", hash = "sha256:04d21dc9c57d9608225da28285900762befbb0165ae48482c15d8d4989d4af14"}, + {file = "ruamel_yaml_clib-0.2.15-cp39-cp39-win_amd64.whl", hash = "sha256:27dc656e84396e6d687f97c6e65fb284d100483628f02d95464fd731743a4afe"}, + {file = "ruamel_yaml_clib-0.2.15.tar.gz", hash = "sha256:46e4cc8c43ef6a94885f72512094e482114a8a706d3c555a34ed4b0d20200600"}, +] + [[package]] name = "ruff" -version = "0.8.1" +version = "0.14.3" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.8.1-py3-none-linux_armv6l.whl", hash = "sha256:fae0805bd514066f20309f6742f6ee7904a773eb9e6c17c45d6b1600ca65c9b5"}, - {file = "ruff-0.8.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b8a4f7385c2285c30f34b200ca5511fcc865f17578383db154e098150ce0a087"}, - {file = "ruff-0.8.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:cd054486da0c53e41e0086e1730eb77d1f698154f910e0cd9e0d64274979a209"}, - {file = "ruff-0.8.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2029b8c22da147c50ae577e621a5bfbc5d1fed75d86af53643d7a7aee1d23871"}, - {file = "ruff-0.8.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2666520828dee7dfc7e47ee4ea0d928f40de72056d929a7c5292d95071d881d1"}, - {file = "ruff-0.8.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:333c57013ef8c97a53892aa56042831c372e0bb1785ab7026187b7abd0135ad5"}, - {file = "ruff-0.8.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:288326162804f34088ac007139488dcb43de590a5ccfec3166396530b58fb89d"}, - {file = "ruff-0.8.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b12c39b9448632284561cbf4191aa1b005882acbc81900ffa9f9f471c8ff7e26"}, - {file = "ruff-0.8.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:364e6674450cbac8e998f7b30639040c99d81dfb5bbc6dfad69bc7a8f916b3d1"}, - {file = "ruff-0.8.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b22346f845fec132aa39cd29acb94451d030c10874408dbf776af3aaeb53284c"}, - {file = "ruff-0.8.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b2f2f7a7e7648a2bfe6ead4e0a16745db956da0e3a231ad443d2a66a105c04fa"}, - {file = "ruff-0.8.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:adf314fc458374c25c5c4a4a9270c3e8a6a807b1bec018cfa2813d6546215540"}, - {file = "ruff-0.8.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a885d68342a231b5ba4d30b8c6e1b1ee3a65cf37e3d29b3c74069cdf1ee1e3c9"}, - {file = "ruff-0.8.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:d2c16e3508c8cc73e96aa5127d0df8913d2290098f776416a4b157657bee44c5"}, - {file = "ruff-0.8.1-py3-none-win32.whl", hash = "sha256:93335cd7c0eaedb44882d75a7acb7df4b77cd7cd0d2255c93b28791716e81790"}, - {file = "ruff-0.8.1-py3-none-win_amd64.whl", hash = "sha256:2954cdbe8dfd8ab359d4a30cd971b589d335a44d444b6ca2cb3d1da21b75e4b6"}, - {file = "ruff-0.8.1-py3-none-win_arm64.whl", hash = "sha256:55873cc1a473e5ac129d15eccb3c008c096b94809d693fc7053f588b67822737"}, - {file = "ruff-0.8.1.tar.gz", hash = "sha256:3583db9a6450364ed5ca3f3b4225958b24f78178908d5c4bc0f46251ccca898f"}, + {file = "ruff-0.14.3-py3-none-linux_armv6l.whl", hash = "sha256:876b21e6c824f519446715c1342b8e60f97f93264012de9d8d10314f8a79c371"}, + {file = "ruff-0.14.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b6fd8c79b457bedd2abf2702b9b472147cd860ed7855c73a5247fa55c9117654"}, + {file = "ruff-0.14.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:71ff6edca490c308f083156938c0c1a66907151263c4abdcb588602c6e696a14"}, + {file = "ruff-0.14.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:786ee3ce6139772ff9272aaf43296d975c0217ee1b97538a98171bf0d21f87ed"}, + {file = "ruff-0.14.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cd6291d0061811c52b8e392f946889916757610d45d004e41140d81fb6cd5ddc"}, + {file = "ruff-0.14.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a497ec0c3d2c88561b6d90f9c29f5ae68221ac00d471f306fa21fa4264ce5fcd"}, + {file = "ruff-0.14.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e231e1be58fc568950a04fbe6887c8e4b85310e7889727e2b81db205c45059eb"}, + {file = "ruff-0.14.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:469e35872a09c0e45fecf48dd960bfbce056b5db2d5e6b50eca329b4f853ae20"}, + {file = "ruff-0.14.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d6bc90307c469cb9d28b7cfad90aaa600b10d67c6e22026869f585e1e8a2db0"}, + {file = "ruff-0.14.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2f8a0bbcffcfd895df39c9a4ecd59bb80dca03dc43f7fb63e647ed176b741e"}, + {file = "ruff-0.14.3-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:678fdd7c7d2d94851597c23ee6336d25f9930b460b55f8598e011b57c74fd8c5"}, + {file = "ruff-0.14.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1ec1ac071e7e37e0221d2f2dbaf90897a988c531a8592a6a5959f0603a1ecf5e"}, + {file = "ruff-0.14.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:afcdc4b5335ef440d19e7df9e8ae2ad9f749352190e96d481dc501b753f0733e"}, + {file = "ruff-0.14.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:7bfc42f81862749a7136267a343990f865e71fe2f99cf8d2958f684d23ce3dfa"}, + {file = "ruff-0.14.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a65e448cfd7e9c59fae8cf37f9221585d3354febaad9a07f29158af1528e165f"}, + {file = "ruff-0.14.3-py3-none-win32.whl", hash = "sha256:f3d91857d023ba93e14ed2d462ab62c3428f9bbf2b4fbac50a03ca66d31991f7"}, + {file = "ruff-0.14.3-py3-none-win_amd64.whl", hash = "sha256:d7b7006ac0756306db212fd37116cce2bd307e1e109375e1c6c106002df0ae5f"}, + {file = "ruff-0.14.3-py3-none-win_arm64.whl", hash = "sha256:26eb477ede6d399d898791d01961e16b86f02bc2486d0d1a7a9bb2379d055dc1"}, + {file = "ruff-0.14.3.tar.gz", hash = "sha256:4ff876d2ab2b161b6de0aa1f5bd714e8e9b4033dc122ee006925fbacc4f62153"}, ] [[package]] name = "scikit-learn" -version = "1.5.2" +version = "1.7.2" description = "A set of python modules for machine learning and data mining" optional = true -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "scikit_learn-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:299406827fb9a4f862626d0fe6c122f5f87f8910b86fe5daa4c32dcd742139b6"}, - {file = "scikit_learn-1.5.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:2d4cad1119c77930b235579ad0dc25e65c917e756fe80cab96aa3b9428bd3fb0"}, - {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c412ccc2ad9bf3755915e3908e677b367ebc8d010acbb3f182814524f2e5540"}, - {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a686885a4b3818d9e62904d91b57fa757fc2bed3e465c8b177be652f4dd37c8"}, - {file = "scikit_learn-1.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:c15b1ca23d7c5f33cc2cb0a0d6aaacf893792271cddff0edbd6a40e8319bc113"}, - {file = "scikit_learn-1.5.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03b6158efa3faaf1feea3faa884c840ebd61b6484167c711548fce208ea09445"}, - {file = "scikit_learn-1.5.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1ff45e26928d3b4eb767a8f14a9a6efbf1cbff7c05d1fb0f95f211a89fd4f5de"}, - {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f763897fe92d0e903aa4847b0aec0e68cadfff77e8a0687cabd946c89d17e675"}, - {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8b0ccd4a902836493e026c03256e8b206656f91fbcc4fde28c57a5b752561f1"}, - {file = "scikit_learn-1.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:6c16d84a0d45e4894832b3c4d0bf73050939e21b99b01b6fd59cbb0cf39163b6"}, - {file = "scikit_learn-1.5.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f932a02c3f4956dfb981391ab24bda1dbd90fe3d628e4b42caef3e041c67707a"}, - {file = "scikit_learn-1.5.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:3b923d119d65b7bd555c73be5423bf06c0105678ce7e1f558cb4b40b0a5502b1"}, - {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f60021ec1574e56632be2a36b946f8143bf4e5e6af4a06d85281adc22938e0dd"}, - {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:394397841449853c2290a32050382edaec3da89e35b3e03d6cc966aebc6a8ae6"}, - {file = "scikit_learn-1.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:57cc1786cfd6bd118220a92ede80270132aa353647684efa385a74244a41e3b1"}, - {file = "scikit_learn-1.5.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9a702e2de732bbb20d3bad29ebd77fc05a6b427dc49964300340e4c9328b3f5"}, - {file = "scikit_learn-1.5.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:b0768ad641981f5d3a198430a1d31c3e044ed2e8a6f22166b4d546a5116d7908"}, - {file = "scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:178ddd0a5cb0044464fc1bfc4cca5b1833bfc7bb022d70b05db8530da4bb3dd3"}, - {file = "scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7284ade780084d94505632241bf78c44ab3b6f1e8ccab3d2af58e0e950f9c12"}, - {file = "scikit_learn-1.5.2-cp313-cp313-win_amd64.whl", hash = "sha256:b7b0f9a0b1040830d38c39b91b3a44e1b643f4b36e36567b80b7c6bd2202a27f"}, - {file = "scikit_learn-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:757c7d514ddb00ae249832fe87100d9c73c6ea91423802872d9e74970a0e40b9"}, - {file = "scikit_learn-1.5.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:52788f48b5d8bca5c0736c175fa6bdaab2ef00a8f536cda698db61bd89c551c1"}, - {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:643964678f4b5fbdc95cbf8aec638acc7aa70f5f79ee2cdad1eec3df4ba6ead8"}, - {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca64b3089a6d9b9363cd3546f8978229dcbb737aceb2c12144ee3f70f95684b7"}, - {file = "scikit_learn-1.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:3bed4909ba187aca80580fe2ef370d9180dcf18e621a27c4cf2ef10d279a7efe"}, - {file = "scikit_learn-1.5.2.tar.gz", hash = "sha256:b4237ed7b3fdd0a4882792e68ef2545d5baa50aca3bb45aa7df468138ad8f94d"}, + {file = "scikit_learn-1.7.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b33579c10a3081d076ab403df4a4190da4f4432d443521674637677dc91e61f"}, + {file = "scikit_learn-1.7.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:36749fb62b3d961b1ce4fedf08fa57a1986cd409eff2d783bca5d4b9b5fce51c"}, + {file = "scikit_learn-1.7.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7a58814265dfc52b3295b1900cfb5701589d30a8bb026c7540f1e9d3499d5ec8"}, + {file = "scikit_learn-1.7.2-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a847fea807e278f821a0406ca01e387f97653e284ecbd9750e3ee7c90347f18"}, + {file = "scikit_learn-1.7.2-cp310-cp310-win_amd64.whl", hash = "sha256:ca250e6836d10e6f402436d6463d6c0e4d8e0234cfb6a9a47835bd392b852ce5"}, + {file = "scikit_learn-1.7.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7509693451651cd7361d30ce4e86a1347493554f172b1c72a39300fa2aea79e"}, + {file = "scikit_learn-1.7.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:0486c8f827c2e7b64837c731c8feff72c0bd2b998067a8a9cbc10643c31f0fe1"}, + {file = "scikit_learn-1.7.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:89877e19a80c7b11a2891a27c21c4894fb18e2c2e077815bcade10d34287b20d"}, + {file = "scikit_learn-1.7.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8da8bf89d4d79aaec192d2bda62f9b56ae4e5b4ef93b6a56b5de4977e375c1f1"}, + {file = "scikit_learn-1.7.2-cp311-cp311-win_amd64.whl", hash = "sha256:9b7ed8d58725030568523e937c43e56bc01cadb478fc43c042a9aca1dacb3ba1"}, + {file = "scikit_learn-1.7.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8d91a97fa2b706943822398ab943cde71858a50245e31bc71dba62aab1d60a96"}, + {file = "scikit_learn-1.7.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:acbc0f5fd2edd3432a22c69bed78e837c70cf896cd7993d71d51ba6708507476"}, + {file = "scikit_learn-1.7.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e5bf3d930aee75a65478df91ac1225ff89cd28e9ac7bd1196853a9229b6adb0b"}, + {file = "scikit_learn-1.7.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b4d6e9deed1a47aca9fe2f267ab8e8fe82ee20b4526b2c0cd9e135cea10feb44"}, + {file = "scikit_learn-1.7.2-cp312-cp312-win_amd64.whl", hash = "sha256:6088aa475f0785e01bcf8529f55280a3d7d298679f50c0bb70a2364a82d0b290"}, + {file = "scikit_learn-1.7.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0b7dacaa05e5d76759fb071558a8b5130f4845166d88654a0f9bdf3eb57851b7"}, + {file = "scikit_learn-1.7.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:abebbd61ad9e1deed54cca45caea8ad5f79e1b93173dece40bb8e0c658dbe6fe"}, + {file = "scikit_learn-1.7.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:502c18e39849c0ea1a5d681af1dbcf15f6cce601aebb657aabbfe84133c1907f"}, + {file = "scikit_learn-1.7.2-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7a4c328a71785382fe3fe676a9ecf2c86189249beff90bf85e22bdb7efaf9ae0"}, + {file = "scikit_learn-1.7.2-cp313-cp313-win_amd64.whl", hash = "sha256:63a9afd6f7b229aad94618c01c252ce9e6fa97918c5ca19c9a17a087d819440c"}, + {file = "scikit_learn-1.7.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:9acb6c5e867447b4e1390930e3944a005e2cb115922e693c08a323421a6966e8"}, + {file = "scikit_learn-1.7.2-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:2a41e2a0ef45063e654152ec9d8bcfc39f7afce35b08902bfe290c2498a67a6a"}, + {file = "scikit_learn-1.7.2-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:98335fb98509b73385b3ab2bd0639b1f610541d3988ee675c670371d6a87aa7c"}, + {file = "scikit_learn-1.7.2-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:191e5550980d45449126e23ed1d5e9e24b2c68329ee1f691a3987476e115e09c"}, + {file = "scikit_learn-1.7.2-cp313-cp313t-win_amd64.whl", hash = "sha256:57dc4deb1d3762c75d685507fbd0bc17160144b2f2ba4ccea5dc285ab0d0e973"}, + {file = "scikit_learn-1.7.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fa8f63940e29c82d1e67a45d5297bdebbcb585f5a5a50c4914cc2e852ab77f33"}, + {file = "scikit_learn-1.7.2-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:f95dc55b7902b91331fa4e5845dd5bde0580c9cd9612b1b2791b7e80c3d32615"}, + {file = "scikit_learn-1.7.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9656e4a53e54578ad10a434dc1f993330568cfee176dff07112b8785fb413106"}, + {file = "scikit_learn-1.7.2-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:96dc05a854add0e50d3f47a1ef21a10a595016da5b007c7d9cd9d0bffd1fcc61"}, + {file = "scikit_learn-1.7.2-cp314-cp314-win_amd64.whl", hash = "sha256:bb24510ed3f9f61476181e4db51ce801e2ba37541def12dc9333b946fc7a9cf8"}, + {file = "scikit_learn-1.7.2.tar.gz", hash = "sha256:20e9e49ecd130598f1ca38a1d85090e1a600147b9c02fa6f15d69cb53d968fda"}, ] [package.dependencies] joblib = ">=1.2.0" -numpy = ">=1.19.5" -scipy = ">=1.6.0" +numpy = ">=1.22.0" +scipy = ">=1.8.0" threadpoolctl = ">=3.1.0" [package.extras] -benchmark = ["matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "pandas (>=1.1.5)"] -build = ["cython (>=3.0.10)", "meson-python (>=0.16.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)"] -docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.5.0)", "sphinx-design (>=0.6.0)", "sphinx-gallery (>=0.16.0)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)"] -examples = ["matplotlib (>=3.3.4)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)"] -install = ["joblib (>=1.2.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)", "threadpoolctl (>=3.1.0)"] -maintenance = ["conda-lock (==2.5.6)"] -tests = ["black (>=24.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.9)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.2.1)", "scikit-image (>=0.17.2)"] +benchmark = ["matplotlib (>=3.5.0)", "memory_profiler (>=0.57.0)", "pandas (>=1.4.0)"] +build = ["cython (>=3.0.10)", "meson-python (>=0.17.1)", "numpy (>=1.22.0)", "scipy (>=1.8.0)"] +docs = ["Pillow (>=8.4.0)", "matplotlib (>=3.5.0)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.4.0)", "plotly (>=5.14.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.19.0)", "seaborn (>=0.9.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.5.0)", "sphinx-design (>=0.6.0)", "sphinx-gallery (>=0.17.1)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)", "towncrier (>=24.8.0)"] +examples = ["matplotlib (>=3.5.0)", "pandas (>=1.4.0)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.19.0)", "seaborn (>=0.9.0)"] +install = ["joblib (>=1.2.0)", "numpy (>=1.22.0)", "scipy (>=1.8.0)", "threadpoolctl (>=3.1.0)"] +maintenance = ["conda-lock (==3.0.1)"] +tests = ["matplotlib (>=3.5.0)", "mypy (>=1.15)", "numpydoc (>=1.2.0)", "pandas (>=1.4.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pyamg (>=4.2.1)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.11.7)", "scikit-image (>=0.19.0)"] [[package]] name = "scipy" -version = "1.13.1" +version = "1.15.3" description = "Fundamental algorithms for scientific computing in Python" optional = true -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "scipy-1.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:20335853b85e9a49ff7572ab453794298bcf0354d8068c5f6775a0eabf350aca"}, - {file = "scipy-1.13.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d605e9c23906d1994f55ace80e0125c587f96c020037ea6aa98d01b4bd2e222f"}, - {file = "scipy-1.13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cfa31f1def5c819b19ecc3a8b52d28ffdcc7ed52bb20c9a7589669dd3c250989"}, - {file = "scipy-1.13.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26264b282b9da0952a024ae34710c2aff7d27480ee91a2e82b7b7073c24722f"}, - {file = "scipy-1.13.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:eccfa1906eacc02de42d70ef4aecea45415f5be17e72b61bafcfd329bdc52e94"}, - {file = "scipy-1.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:2831f0dc9c5ea9edd6e51e6e769b655f08ec6db6e2e10f86ef39bd32eb11da54"}, - {file = "scipy-1.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:27e52b09c0d3a1d5b63e1105f24177e544a222b43611aaf5bc44d4a0979e32f9"}, - {file = "scipy-1.13.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:54f430b00f0133e2224c3ba42b805bfd0086fe488835effa33fa291561932326"}, - {file = "scipy-1.13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e89369d27f9e7b0884ae559a3a956e77c02114cc60a6058b4e5011572eea9299"}, - {file = "scipy-1.13.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a78b4b3345f1b6f68a763c6e25c0c9a23a9fd0f39f5f3d200efe8feda560a5fa"}, - {file = "scipy-1.13.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:45484bee6d65633752c490404513b9ef02475b4284c4cfab0ef946def50b3f59"}, - {file = "scipy-1.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:5713f62f781eebd8d597eb3f88b8bf9274e79eeabf63afb4a737abc6c84ad37b"}, - {file = "scipy-1.13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5d72782f39716b2b3509cd7c33cdc08c96f2f4d2b06d51e52fb45a19ca0c86a1"}, - {file = "scipy-1.13.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:017367484ce5498445aade74b1d5ab377acdc65e27095155e448c88497755a5d"}, - {file = "scipy-1.13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:949ae67db5fa78a86e8fa644b9a6b07252f449dcf74247108c50e1d20d2b4627"}, - {file = "scipy-1.13.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de3ade0e53bc1f21358aa74ff4830235d716211d7d077e340c7349bc3542e884"}, - {file = "scipy-1.13.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2ac65fb503dad64218c228e2dc2d0a0193f7904747db43014645ae139c8fad16"}, - {file = "scipy-1.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:cdd7dacfb95fea358916410ec61bbc20440f7860333aee6d882bb8046264e949"}, - {file = "scipy-1.13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:436bbb42a94a8aeef855d755ce5a465479c721e9d684de76bf61a62e7c2b81d5"}, - {file = "scipy-1.13.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:8335549ebbca860c52bf3d02f80784e91a004b71b059e3eea9678ba994796a24"}, - {file = "scipy-1.13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d533654b7d221a6a97304ab63c41c96473ff04459e404b83275b60aa8f4b7004"}, - {file = "scipy-1.13.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:637e98dcf185ba7f8e663e122ebf908c4702420477ae52a04f9908707456ba4d"}, - {file = "scipy-1.13.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a014c2b3697bde71724244f63de2476925596c24285c7a637364761f8710891c"}, - {file = "scipy-1.13.1-cp39-cp39-win_amd64.whl", hash = "sha256:392e4ec766654852c25ebad4f64e4e584cf19820b980bc04960bca0b0cd6eaa2"}, - {file = "scipy-1.13.1.tar.gz", hash = "sha256:095a87a0312b08dfd6a6155cbbd310a8c51800fc931b8c0b84003014b874ed3c"}, + {file = "scipy-1.15.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:a345928c86d535060c9c2b25e71e87c39ab2f22fc96e9636bd74d1dbf9de448c"}, + {file = "scipy-1.15.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:ad3432cb0f9ed87477a8d97f03b763fd1d57709f1bbde3c9369b1dff5503b253"}, + {file = "scipy-1.15.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:aef683a9ae6eb00728a542b796f52a5477b78252edede72b8327a886ab63293f"}, + {file = "scipy-1.15.3-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:1c832e1bd78dea67d5c16f786681b28dd695a8cb1fb90af2e27580d3d0967e92"}, + {file = "scipy-1.15.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:263961f658ce2165bbd7b99fa5135195c3a12d9bef045345016b8b50c315cb82"}, + {file = "scipy-1.15.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2abc762b0811e09a0d3258abee2d98e0c703eee49464ce0069590846f31d40"}, + {file = "scipy-1.15.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ed7284b21a7a0c8f1b6e5977ac05396c0d008b89e05498c8b7e8f4a1423bba0e"}, + {file = "scipy-1.15.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5380741e53df2c566f4d234b100a484b420af85deb39ea35a1cc1be84ff53a5c"}, + {file = "scipy-1.15.3-cp310-cp310-win_amd64.whl", hash = "sha256:9d61e97b186a57350f6d6fd72640f9e99d5a4a2b8fbf4b9ee9a841eab327dc13"}, + {file = "scipy-1.15.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:993439ce220d25e3696d1b23b233dd010169b62f6456488567e830654ee37a6b"}, + {file = "scipy-1.15.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:34716e281f181a02341ddeaad584205bd2fd3c242063bd3423d61ac259ca7eba"}, + {file = "scipy-1.15.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3b0334816afb8b91dab859281b1b9786934392aa3d527cd847e41bb6f45bee65"}, + {file = "scipy-1.15.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:6db907c7368e3092e24919b5e31c76998b0ce1684d51a90943cb0ed1b4ffd6c1"}, + {file = "scipy-1.15.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:721d6b4ef5dc82ca8968c25b111e307083d7ca9091bc38163fb89243e85e3889"}, + {file = "scipy-1.15.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39cb9c62e471b1bb3750066ecc3a3f3052b37751c7c3dfd0fd7e48900ed52982"}, + {file = "scipy-1.15.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:795c46999bae845966368a3c013e0e00947932d68e235702b5c3f6ea799aa8c9"}, + {file = "scipy-1.15.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:18aaacb735ab38b38db42cb01f6b92a2d0d4b6aabefeb07f02849e47f8fb3594"}, + {file = "scipy-1.15.3-cp311-cp311-win_amd64.whl", hash = "sha256:ae48a786a28412d744c62fd7816a4118ef97e5be0bee968ce8f0a2fba7acf3bb"}, + {file = "scipy-1.15.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6ac6310fdbfb7aa6612408bd2f07295bcbd3fda00d2d702178434751fe48e019"}, + {file = "scipy-1.15.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:185cd3d6d05ca4b44a8f1595af87f9c372bb6acf9c808e99aa3e9aa03bd98cf6"}, + {file = "scipy-1.15.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:05dc6abcd105e1a29f95eada46d4a3f251743cfd7d3ae8ddb4088047f24ea477"}, + {file = "scipy-1.15.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:06efcba926324df1696931a57a176c80848ccd67ce6ad020c810736bfd58eb1c"}, + {file = "scipy-1.15.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05045d8b9bfd807ee1b9f38761993297b10b245f012b11b13b91ba8945f7e45"}, + {file = "scipy-1.15.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:271e3713e645149ea5ea3e97b57fdab61ce61333f97cfae392c28ba786f9bb49"}, + {file = "scipy-1.15.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6cfd56fc1a8e53f6e89ba3a7a7251f7396412d655bca2aa5611c8ec9a6784a1e"}, + {file = "scipy-1.15.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0ff17c0bb1cb32952c09217d8d1eed9b53d1463e5f1dd6052c7857f83127d539"}, + {file = "scipy-1.15.3-cp312-cp312-win_amd64.whl", hash = "sha256:52092bc0472cfd17df49ff17e70624345efece4e1a12b23783a1ac59a1b728ed"}, + {file = "scipy-1.15.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2c620736bcc334782e24d173c0fdbb7590a0a436d2fdf39310a8902505008759"}, + {file = "scipy-1.15.3-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:7e11270a000969409d37ed399585ee530b9ef6aa99d50c019de4cb01e8e54e62"}, + {file = "scipy-1.15.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:8c9ed3ba2c8a2ce098163a9bdb26f891746d02136995df25227a20e71c396ebb"}, + {file = "scipy-1.15.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0bdd905264c0c9cfa74a4772cdb2070171790381a5c4d312c973382fc6eaf730"}, + {file = "scipy-1.15.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79167bba085c31f38603e11a267d862957cbb3ce018d8b38f79ac043bc92d825"}, + {file = "scipy-1.15.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9deabd6d547aee2c9a81dee6cc96c6d7e9a9b1953f74850c179f91fdc729cb7"}, + {file = "scipy-1.15.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:dde4fc32993071ac0c7dd2d82569e544f0bdaff66269cb475e0f369adad13f11"}, + {file = "scipy-1.15.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f77f853d584e72e874d87357ad70f44b437331507d1c311457bed8ed2b956126"}, + {file = "scipy-1.15.3-cp313-cp313-win_amd64.whl", hash = "sha256:b90ab29d0c37ec9bf55424c064312930ca5f4bde15ee8619ee44e69319aab163"}, + {file = "scipy-1.15.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3ac07623267feb3ae308487c260ac684b32ea35fd81e12845039952f558047b8"}, + {file = "scipy-1.15.3-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:6487aa99c2a3d509a5227d9a5e889ff05830a06b2ce08ec30df6d79db5fcd5c5"}, + {file = "scipy-1.15.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:50f9e62461c95d933d5c5ef4a1f2ebf9a2b4e83b0db374cb3f1de104d935922e"}, + {file = "scipy-1.15.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:14ed70039d182f411ffc74789a16df3835e05dc469b898233a245cdfd7f162cb"}, + {file = "scipy-1.15.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a769105537aa07a69468a0eefcd121be52006db61cdd8cac8a0e68980bbb723"}, + {file = "scipy-1.15.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9db984639887e3dffb3928d118145ffe40eff2fa40cb241a306ec57c219ebbbb"}, + {file = "scipy-1.15.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:40e54d5c7e7ebf1aa596c374c49fa3135f04648a0caabcb66c52884b943f02b4"}, + {file = "scipy-1.15.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5e721fed53187e71d0ccf382b6bf977644c533e506c4d33c3fb24de89f5c3ed5"}, + {file = "scipy-1.15.3-cp313-cp313t-win_amd64.whl", hash = "sha256:76ad1fb5f8752eabf0fa02e4cc0336b4e8f021e2d5f061ed37d6d264db35e3ca"}, + {file = "scipy-1.15.3.tar.gz", hash = "sha256:eae3cf522bc7df64b42cad3925c876e1b0b6c35c1337c93e12c0f366f55b0eaf"}, ] [package.dependencies] -numpy = ">=1.22.4,<2.3" +numpy = ">=1.23.5,<2.5" [package.extras] -dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] -doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.12.0)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0)", "sphinx-design (>=0.4.0)"] -test = ["array-api-strict", "asv", "gmpy2", "hypothesis (>=6.30)", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] +dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] +doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.19.1)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.0.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)"] +test = ["Cython", "array-api-strict (>=2.0,<2.1.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "scipy" +version = "1.16.3" +description = "Fundamental algorithms for scientific computing in Python" +optional = true +python-versions = ">=3.11" +files = [ + {file = "scipy-1.16.3-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:40be6cf99e68b6c4321e9f8782e7d5ff8265af28ef2cd56e9c9b2638fa08ad97"}, + {file = "scipy-1.16.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:8be1ca9170fcb6223cc7c27f4305d680ded114a1567c0bd2bfcbf947d1b17511"}, + {file = "scipy-1.16.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:bea0a62734d20d67608660f69dcda23e7f90fb4ca20974ab80b6ed40df87a005"}, + {file = "scipy-1.16.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:2a207a6ce9c24f1951241f4693ede2d393f59c07abc159b2cb2be980820e01fb"}, + {file = "scipy-1.16.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:532fb5ad6a87e9e9cd9c959b106b73145a03f04c7d57ea3e6f6bb60b86ab0876"}, + {file = "scipy-1.16.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0151a0749efeaaab78711c78422d413c583b8cdd2011a3c1d6c794938ee9fdb2"}, + {file = "scipy-1.16.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b7180967113560cca57418a7bc719e30366b47959dd845a93206fbed693c867e"}, + {file = "scipy-1.16.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:deb3841c925eeddb6afc1e4e4a45e418d19ec7b87c5df177695224078e8ec733"}, + {file = "scipy-1.16.3-cp311-cp311-win_amd64.whl", hash = "sha256:53c3844d527213631e886621df5695d35e4f6a75f620dca412bcd292f6b87d78"}, + {file = "scipy-1.16.3-cp311-cp311-win_arm64.whl", hash = "sha256:9452781bd879b14b6f055b26643703551320aa8d79ae064a71df55c00286a184"}, + {file = "scipy-1.16.3-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:81fc5827606858cf71446a5e98715ba0e11f0dbc83d71c7409d05486592a45d6"}, + {file = "scipy-1.16.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:c97176013d404c7346bf57874eaac5187d969293bf40497140b0a2b2b7482e07"}, + {file = "scipy-1.16.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:2b71d93c8a9936046866acebc915e2af2e292b883ed6e2cbe5c34beb094b82d9"}, + {file = "scipy-1.16.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3d4a07a8e785d80289dfe66b7c27d8634a773020742ec7187b85ccc4b0e7b686"}, + {file = "scipy-1.16.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0553371015692a898e1aa858fed67a3576c34edefa6b7ebdb4e9dde49ce5c203"}, + {file = "scipy-1.16.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:72d1717fd3b5e6ec747327ce9bda32d5463f472c9dce9f54499e81fbd50245a1"}, + {file = "scipy-1.16.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1fb2472e72e24d1530debe6ae078db70fb1605350c88a3d14bc401d6306dbffe"}, + {file = "scipy-1.16.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c5192722cffe15f9329a3948c4b1db789fbb1f05c97899187dcf009b283aea70"}, + {file = "scipy-1.16.3-cp312-cp312-win_amd64.whl", hash = "sha256:56edc65510d1331dae01ef9b658d428e33ed48b4f77b1d51caf479a0253f96dc"}, + {file = "scipy-1.16.3-cp312-cp312-win_arm64.whl", hash = "sha256:a8a26c78ef223d3e30920ef759e25625a0ecdd0d60e5a8818b7513c3e5384cf2"}, + {file = "scipy-1.16.3-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:d2ec56337675e61b312179a1ad124f5f570c00f920cc75e1000025451b88241c"}, + {file = "scipy-1.16.3-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:16b8bc35a4cc24db80a0ec836a9286d0e31b2503cb2fd7ff7fb0e0374a97081d"}, + {file = "scipy-1.16.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:5803c5fadd29de0cf27fa08ccbfe7a9e5d741bf63e4ab1085437266f12460ff9"}, + {file = "scipy-1.16.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:b81c27fc41954319a943d43b20e07c40bdcd3ff7cf013f4fb86286faefe546c4"}, + {file = "scipy-1.16.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0c3b4dd3d9b08dbce0f3440032c52e9e2ab9f96ade2d3943313dfe51a7056959"}, + {file = "scipy-1.16.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7dc1360c06535ea6116a2220f760ae572db9f661aba2d88074fe30ec2aa1ff88"}, + {file = "scipy-1.16.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:663b8d66a8748051c3ee9c96465fb417509315b99c71550fda2591d7dd634234"}, + {file = "scipy-1.16.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eab43fae33a0c39006a88096cd7b4f4ef545ea0447d250d5ac18202d40b6611d"}, + {file = "scipy-1.16.3-cp313-cp313-win_amd64.whl", hash = "sha256:062246acacbe9f8210de8e751b16fc37458213f124bef161a5a02c7a39284304"}, + {file = "scipy-1.16.3-cp313-cp313-win_arm64.whl", hash = "sha256:50a3dbf286dbc7d84f176f9a1574c705f277cb6565069f88f60db9eafdbe3ee2"}, + {file = "scipy-1.16.3-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:fb4b29f4cf8cc5a8d628bc8d8e26d12d7278cd1f219f22698a378c3d67db5e4b"}, + {file = "scipy-1.16.3-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:8d09d72dc92742988b0e7750bddb8060b0c7079606c0d24a8cc8e9c9c11f9079"}, + {file = "scipy-1.16.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:03192a35e661470197556de24e7cb1330d84b35b94ead65c46ad6f16f6b28f2a"}, + {file = "scipy-1.16.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:57d01cb6f85e34f0946b33caa66e892aae072b64b034183f3d87c4025802a119"}, + {file = "scipy-1.16.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:96491a6a54e995f00a28a3c3badfff58fd093bf26cd5fb34a2188c8c756a3a2c"}, + {file = "scipy-1.16.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cd13e354df9938598af2be05822c323e97132d5e6306b83a3b4ee6724c6e522e"}, + {file = "scipy-1.16.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:63d3cdacb8a824a295191a723ee5e4ea7768ca5ca5f2838532d9f2e2b3ce2135"}, + {file = "scipy-1.16.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e7efa2681ea410b10dde31a52b18b0154d66f2485328830e45fdf183af5aefc6"}, + {file = "scipy-1.16.3-cp313-cp313t-win_amd64.whl", hash = "sha256:2d1ae2cf0c350e7705168ff2429962a89ad90c2d49d1dd300686d8b2a5af22fc"}, + {file = "scipy-1.16.3-cp313-cp313t-win_arm64.whl", hash = "sha256:0c623a54f7b79dd88ef56da19bc2873afec9673a48f3b85b18e4d402bdd29a5a"}, + {file = "scipy-1.16.3-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:875555ce62743e1d54f06cdf22c1e0bc47b91130ac40fe5d783b6dfa114beeb6"}, + {file = "scipy-1.16.3-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:bb61878c18a470021fb515a843dc7a76961a8daceaaaa8bad1332f1bf4b54657"}, + {file = "scipy-1.16.3-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:f2622206f5559784fa5c4b53a950c3c7c1cf3e84ca1b9c4b6c03f062f289ca26"}, + {file = "scipy-1.16.3-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:7f68154688c515cdb541a31ef8eb66d8cd1050605be9dcd74199cbd22ac739bc"}, + {file = "scipy-1.16.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8b3c820ddb80029fe9f43d61b81d8b488d3ef8ca010d15122b152db77dc94c22"}, + {file = "scipy-1.16.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d3837938ae715fc0fe3c39c0202de3a8853aff22ca66781ddc2ade7554b7e2cc"}, + {file = "scipy-1.16.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:aadd23f98f9cb069b3bd64ddc900c4d277778242e961751f77a8cb5c4b946fb0"}, + {file = "scipy-1.16.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b7c5f1bda1354d6a19bc6af73a649f8285ca63ac6b52e64e658a5a11d4d69800"}, + {file = "scipy-1.16.3-cp314-cp314-win_amd64.whl", hash = "sha256:e5d42a9472e7579e473879a1990327830493a7047506d58d73fc429b84c1d49d"}, + {file = "scipy-1.16.3-cp314-cp314-win_arm64.whl", hash = "sha256:6020470b9d00245926f2d5bb93b119ca0340f0d564eb6fbaad843eaebf9d690f"}, + {file = "scipy-1.16.3-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:e1d27cbcb4602680a49d787d90664fa4974063ac9d4134813332a8c53dbe667c"}, + {file = "scipy-1.16.3-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:9b9c9c07b6d56a35777a1b4cc8966118fb16cfd8daf6743867d17d36cfad2d40"}, + {file = "scipy-1.16.3-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:3a4c460301fb2cffb7f88528f30b3127742cff583603aa7dc964a52c463b385d"}, + {file = "scipy-1.16.3-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:f667a4542cc8917af1db06366d3f78a5c8e83badd56409f94d1eac8d8d9133fa"}, + {file = "scipy-1.16.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f379b54b77a597aa7ee5e697df0d66903e41b9c85a6dd7946159e356319158e8"}, + {file = "scipy-1.16.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4aff59800a3b7f786b70bfd6ab551001cb553244988d7d6b8299cb1ea653b353"}, + {file = "scipy-1.16.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:da7763f55885045036fabcebd80144b757d3db06ab0861415d1c3b7c69042146"}, + {file = "scipy-1.16.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ffa6eea95283b2b8079b821dc11f50a17d0571c92b43e2b5b12764dc5f9b285d"}, + {file = "scipy-1.16.3-cp314-cp314t-win_amd64.whl", hash = "sha256:d9f48cafc7ce94cf9b15c6bffdc443a81a27bf7075cf2dcd5c8b40f85d10c4e7"}, + {file = "scipy-1.16.3-cp314-cp314t-win_arm64.whl", hash = "sha256:21d9d6b197227a12dcbf9633320a4e34c6b0e51c57268df255a0942983bac562"}, + {file = "scipy-1.16.3.tar.gz", hash = "sha256:01e87659402762f43bd2fee13370553a17ada367d42e7487800bf2916535aecb"}, +] + +[package.dependencies] +numpy = ">=1.25.2,<2.6" + +[package.extras] +dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] +doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.19.1)", "jupytext", "linkify-it-py", "matplotlib (>=3.5)", "myst-nb (>=1.2.0)", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.2.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)"] +test = ["Cython", "array-api-strict (>=2.3.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja", "pooch", "pytest (>=8.0.0)", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] [[package]] name = "six" -version = "1.16.0" +version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] [[package]] @@ -2794,24 +3809,24 @@ files = [ [[package]] name = "snowballstemmer" -version = "2.2.0" -description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." -optional = true -python-versions = "*" +version = "3.0.1" +description = "This package provides 32 stemmers for 30 languages generated from Snowball algorithms." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*" files = [ - {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, - {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, + {file = "snowballstemmer-3.0.1-py3-none-any.whl", hash = "sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064"}, + {file = "snowballstemmer-3.0.1.tar.gz", hash = "sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895"}, ] [[package]] name = "soco" -version = "0.30.6" +version = "0.30.12" description = "SoCo (Sonos Controller) is a simple library to control Sonos speakers." optional = true python-versions = ">=3.6" files = [ - {file = "soco-0.30.6-py2.py3-none-any.whl", hash = "sha256:06c486218d0558a89276ed573ae2264d8e9bfd95a46a7dc253e03d19a3e6f423"}, - {file = "soco-0.30.6.tar.gz", hash = "sha256:7ae48e865dbf1d9fae8023e1b69465c2c4c17048992a05e9c017b35c43d4f4f2"}, + {file = "soco-0.30.12-py2.py3-none-any.whl", hash = "sha256:bb6c3bc7d5dd25cce77f76ff2da4df6dc17e387ff7c713f60205092f43da8766"}, + {file = "soco-0.30.12.tar.gz", hash = "sha256:9c5ee8191e0fbb2c79b8992931a2d38fb4360097a190d0aca20fef330138af51"}, ] [package.dependencies] @@ -2823,70 +3838,73 @@ xmltodict = "*" [package.extras] events-asyncio = ["aiohttp"] -testing = ["black (>=22.12.0)", "coveralls", "flake8", "graphviz", "importlib-metadata (<5)", "pylint", "pytest (>=2.5)", "pytest-cov (<2.6.0)", "requests-mock", "sphinx (==4.5.0)", "sphinx-rtd-theme", "twine", "wheel"] +testing = ["black (>=22.12.0)", "coveralls", "flake8", "graphviz", "importlib-metadata (<5)", "pylint", "pytest (>=2.5)", "pytest-cov (<2.6.0)", "requests-mock", "sphinx (==4.5.0)", "sphinx_rtd_theme", "twine", "wheel"] [[package]] name = "soundfile" -version = "0.12.1" +version = "0.13.1" description = "An audio library based on libsndfile, CFFI and NumPy" optional = true python-versions = "*" files = [ - {file = "soundfile-0.12.1-py2.py3-none-any.whl", hash = "sha256:828a79c2e75abab5359f780c81dccd4953c45a2c4cd4f05ba3e233ddf984b882"}, - {file = "soundfile-0.12.1-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:d922be1563ce17a69582a352a86f28ed8c9f6a8bc951df63476ffc310c064bfa"}, - {file = "soundfile-0.12.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:bceaab5c4febb11ea0554566784bcf4bc2e3977b53946dda2b12804b4fe524a8"}, - {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"}, - {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_31_x86_64.whl", hash = "sha256:074247b771a181859d2bc1f98b5ebf6d5153d2c397b86ee9e29ba602a8dfe2a6"}, - {file = "soundfile-0.12.1-py2.py3-none-win32.whl", hash = "sha256:59dfd88c79b48f441bbf6994142a19ab1de3b9bb7c12863402c2bc621e49091a"}, - {file = "soundfile-0.12.1-py2.py3-none-win_amd64.whl", hash = "sha256:0d86924c00b62552b650ddd28af426e3ff2d4dc2e9047dae5b3d8452e0a49a77"}, - {file = "soundfile-0.12.1.tar.gz", hash = "sha256:e8e1017b2cf1dda767aef19d2fd9ee5ebe07e050d430f77a0a7c66ba08b8cdae"}, + {file = "soundfile-0.13.1-py2.py3-none-any.whl", hash = "sha256:a23c717560da2cf4c7b5ae1142514e0fd82d6bbd9dfc93a50423447142f2c445"}, + {file = "soundfile-0.13.1-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:82dc664d19831933fe59adad199bf3945ad06d84bc111a5b4c0d3089a5b9ec33"}, + {file = "soundfile-0.13.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:743f12c12c4054921e15736c6be09ac26b3b3d603aef6fd69f9dde68748f2593"}, + {file = "soundfile-0.13.1-py2.py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:9c9e855f5a4d06ce4213f31918653ab7de0c5a8d8107cd2427e44b42df547deb"}, + {file = "soundfile-0.13.1-py2.py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:03267c4e493315294834a0870f31dbb3b28a95561b80b134f0bd3cf2d5f0e618"}, + {file = "soundfile-0.13.1-py2.py3-none-win32.whl", hash = "sha256:c734564fab7c5ddf8e9be5bf70bab68042cd17e9c214c06e365e20d64f9a69d5"}, + {file = "soundfile-0.13.1-py2.py3-none-win_amd64.whl", hash = "sha256:1e70a05a0626524a69e9f0f4dd2ec174b4e9567f4d8b6c11d38b5c289be36ee9"}, + {file = "soundfile-0.13.1.tar.gz", hash = "sha256:b2c68dab1e30297317080a5b43df57e302584c49e2942defdde0acccc53f0e5b"}, ] [package.dependencies] cffi = ">=1.0" - -[package.extras] -numpy = ["numpy"] +numpy = "*" [[package]] name = "soupsieve" -version = "2.6" +version = "2.8" description = "A modern CSS selector implementation for Beautiful Soup." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"}, - {file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"}, + {file = "soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c"}, + {file = "soupsieve-2.8.tar.gz", hash = "sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f"}, ] [[package]] name = "soxr" -version = "0.5.0.post1" +version = "1.0.0" description = "High quality, one-dimensional sample-rate conversion library" optional = true python-versions = ">=3.9" files = [ - {file = "soxr-0.5.0.post1-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:7406d782d85f8cf64e66b65e6b7721973de8a1dc50b9e88bc2288c343a987484"}, - {file = "soxr-0.5.0.post1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fa0a382fb8d8e2afed2c1642723b2d2d1b9a6728ff89f77f3524034c8885b8c9"}, - {file = "soxr-0.5.0.post1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b01d3efb95a2851f78414bcd00738b0253eec3f5a1e5482838e965ffef84969"}, - {file = "soxr-0.5.0.post1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fcc049b0a151a65aa75b92f0ac64bb2dba785d16b78c31c2b94e68c141751d6d"}, - {file = "soxr-0.5.0.post1-cp310-cp310-win_amd64.whl", hash = "sha256:97f269bc26937c267a2ace43a77167d0c5c8bba5a2b45863bb6042b5b50c474e"}, - {file = "soxr-0.5.0.post1-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:6fb77b626773a966e3d8f6cb24f6f74b5327fa5dc90f1ff492450e9cdc03a378"}, - {file = "soxr-0.5.0.post1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:39e0f791ba178d69cd676485dbee37e75a34f20daa478d90341ecb7f6d9d690f"}, - {file = "soxr-0.5.0.post1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f0b558f445ba4b64dbcb37b5f803052eee7d93b1dbbbb97b3ec1787cb5a28eb"}, - {file = "soxr-0.5.0.post1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca6903671808e0a6078b0d146bb7a2952b118dfba44008b2aa60f221938ba829"}, - {file = "soxr-0.5.0.post1-cp311-cp311-win_amd64.whl", hash = "sha256:c4d8d5283ed6f5efead0df2c05ae82c169cfdfcf5a82999c2d629c78b33775e8"}, - {file = "soxr-0.5.0.post1-cp312-abi3-macosx_10_14_x86_64.whl", hash = "sha256:fef509466c9c25f65eae0ce1e4b9ac9705d22c6038c914160ddaf459589c6e31"}, - {file = "soxr-0.5.0.post1-cp312-abi3-macosx_11_0_arm64.whl", hash = "sha256:4704ba6b13a3f1e41d12acf192878384c1c31f71ce606829c64abdf64a8d7d32"}, - {file = "soxr-0.5.0.post1-cp312-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd052a66471a7335b22a6208601a9d0df7b46b8d087dce4ff6e13eed6a33a2a1"}, - {file = "soxr-0.5.0.post1-cp312-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3f16810dd649ab1f433991d2a9661e9e6a116c2b4101039b53b3c3e90a094fc"}, - {file = "soxr-0.5.0.post1-cp312-abi3-win_amd64.whl", hash = "sha256:b1be9fee90afb38546bdbd7bde714d1d9a8c5a45137f97478a83b65e7f3146f6"}, - {file = "soxr-0.5.0.post1-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:c5af7b355959061beb90a1d73c4834ece4549f07b708f8c73c088153cec29935"}, - {file = "soxr-0.5.0.post1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e1dda616fc797b1507b65486f3116ed2c929f13c722922963dd419d64ada6c07"}, - {file = "soxr-0.5.0.post1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94de2812368e98cb42b4eaeddf8ee1657ecc19bd053f8e67b9b5aa12a3592012"}, - {file = "soxr-0.5.0.post1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c8e9c980637e03d3f345a4fd81d56477a58c294fb26205fa121bc4eb23d9d01"}, - {file = "soxr-0.5.0.post1-cp39-cp39-win_amd64.whl", hash = "sha256:7e71b0b0db450f36de70f1047505231db77a713f8c47df9342582ae8a4b828f2"}, - {file = "soxr-0.5.0.post1.tar.gz", hash = "sha256:7092b9f3e8a416044e1fa138c8172520757179763b85dc53aa9504f4813cff73"}, + {file = "soxr-1.0.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:b876a3156f67c76aef0cff1084eaf4088d9ca584bb569cb993f89a52ec5f399f"}, + {file = "soxr-1.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4d3b957a7b0cc19ae6aa45d40b2181474e53a8dd00efd7bce6bcf4e60e020892"}, + {file = "soxr-1.0.0-cp310-cp310-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89685faedebc45af71f08f9957b61cc6143bc94ba43fe38e97067f81e272969"}, + {file = "soxr-1.0.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d255741b2f0084fd02d4a2ddd77cd495be9e7e7b6f9dba1c9494f86afefac65b"}, + {file = "soxr-1.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:158a4a9055958c4b95ef91dbbe280cabb00946b5423b25a9b0ce31bd9e0a271e"}, + {file = "soxr-1.0.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:28e19d74a5ef45c0d7000f3c70ec1719e89077379df2a1215058914d9603d2d8"}, + {file = "soxr-1.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8dc69fc18884e53b72f6141fdf9d80997edbb4fec9dc2942edcb63abbe0d023"}, + {file = "soxr-1.0.0-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3f15450e6f65f22f02fcd4c5a9219c873b1e583a73e232805ff160c759a6b586"}, + {file = "soxr-1.0.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f73f57452f9df37b4de7a4052789fcbd474a5b28f38bba43278ae4b489d4384"}, + {file = "soxr-1.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:9f417c3d69236051cf5a1a7bad7c4bff04eb3d8fcaa24ac1cb06e26c8d48d8dc"}, + {file = "soxr-1.0.0-cp312-abi3-macosx_10_14_x86_64.whl", hash = "sha256:abecf4e39017f3fadb5e051637c272ae5778d838e5c3926a35db36a53e3a607f"}, + {file = "soxr-1.0.0-cp312-abi3-macosx_11_0_arm64.whl", hash = "sha256:e973d487ee46aa8023ca00a139db6e09af053a37a032fe22f9ff0cc2e19c94b4"}, + {file = "soxr-1.0.0-cp312-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e8ce273cca101aff3d8c387db5a5a41001ba76ef1837883438d3c652507a9ccc"}, + {file = "soxr-1.0.0-cp312-abi3-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e8f2a69686f2856d37823bbb7b78c3d44904f311fe70ba49b893af11d6b6047b"}, + {file = "soxr-1.0.0-cp312-abi3-win_amd64.whl", hash = "sha256:2a3b77b115ae7c478eecdbd060ed4f61beda542dfb70639177ac263aceda42a2"}, + {file = "soxr-1.0.0-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:392a5c70c04eb939c9c176bd6f654dec9a0eaa9ba33d8f1024ed63cf68cdba0a"}, + {file = "soxr-1.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:fdc41a1027ba46777186f26a8fba7893be913383414135577522da2fcc684490"}, + {file = "soxr-1.0.0-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:449acd1dfaf10f0ce6dfd75c7e2ef984890df94008765a6742dafb42061c1a24"}, + {file = "soxr-1.0.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:38b35c99e408b8f440c9376a5e1dd48014857cd977c117bdaa4304865ae0edd0"}, + {file = "soxr-1.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:a39b519acca2364aa726b24a6fd55acf29e4c8909102e0b858c23013c38328e5"}, + {file = "soxr-1.0.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:c120775b7d0ef9e974a5797a4695861e88653f7ecd0a2a532f089bc4452ba130"}, + {file = "soxr-1.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4e59e5f648bd6144e79a6e0596aa486218876293f5ddce3ca84b9d8f8aa34d6d"}, + {file = "soxr-1.0.0-cp39-cp39-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bb86c342862697dbd4a44043f275e5196f2d2c49dca374c78f19b7893988675d"}, + {file = "soxr-1.0.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3d2a4fadd88207c2991fb08c29fc189e7b2e298b598a94ea1747e42c8acb7a01"}, + {file = "soxr-1.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:c7f5ace8f04f924b21caedeeb69f2a7b3d83d2d436639498c08b2cebe181af14"}, + {file = "soxr-1.0.0.tar.gz", hash = "sha256:e07ee6c1d659bc6957034f4800c60cb8b98de798823e34d2a2bba1caa85a4509"}, ] [package.dependencies] @@ -2898,45 +3916,213 @@ test = ["pytest"] [[package]] name = "sphinx" -version = "7.4.7" +version = "8.1.3" description = "Python documentation generator" -optional = true -python-versions = ">=3.9" +optional = false +python-versions = ">=3.10" files = [ - {file = "sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239"}, - {file = "sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe"}, + {file = "sphinx-8.1.3-py3-none-any.whl", hash = "sha256:09719015511837b76bf6e03e42eb7595ac8c2e41eeb9c29c5b755c6b677992a2"}, + {file = "sphinx-8.1.3.tar.gz", hash = "sha256:43c1911eecb0d3e161ad78611bc905d1ad0e523e4ddc202a58a821773dc4c927"}, ] [package.dependencies] -alabaster = ">=0.7.14,<0.8.0" +alabaster = ">=0.7.14" babel = ">=2.13" colorama = {version = ">=0.4.6", markers = "sys_platform == \"win32\""} docutils = ">=0.20,<0.22" imagesize = ">=1.3" -importlib-metadata = {version = ">=6.0", markers = "python_version < \"3.10\""} Jinja2 = ">=3.1" packaging = ">=23.0" Pygments = ">=2.17" requests = ">=2.30.0" snowballstemmer = ">=2.2" -sphinxcontrib-applehelp = "*" -sphinxcontrib-devhelp = "*" -sphinxcontrib-htmlhelp = ">=2.0.0" -sphinxcontrib-jsmath = "*" -sphinxcontrib-qthelp = "*" +sphinxcontrib-applehelp = ">=1.0.7" +sphinxcontrib-devhelp = ">=1.0.6" +sphinxcontrib-htmlhelp = ">=2.0.6" +sphinxcontrib-jsmath = ">=1.0.1" +sphinxcontrib-qthelp = ">=1.0.6" sphinxcontrib-serializinghtml = ">=1.1.9" tomli = {version = ">=2", markers = "python_version < \"3.11\""} [package.extras] docs = ["sphinxcontrib-websupport"] -lint = ["flake8 (>=6.0)", "importlib-metadata (>=6.0)", "mypy (==1.10.1)", "pytest (>=6.0)", "ruff (==0.5.2)", "sphinx-lint (>=0.9)", "tomli (>=2)", "types-docutils (==0.21.0.20240711)", "types-requests (>=2.30.0)"] +lint = ["flake8 (>=6.0)", "mypy (==1.11.1)", "pyright (==1.1.384)", "pytest (>=6.0)", "ruff (==0.6.9)", "sphinx-lint (>=0.9)", "tomli (>=2)", "types-Pillow (==10.2.0.20240822)", "types-Pygments (==2.18.0.20240506)", "types-colorama (==0.4.15.20240311)", "types-defusedxml (==0.7.0.20240218)", "types-docutils (==0.21.0.20241005)", "types-requests (==2.32.0.20240914)", "types-urllib3 (==1.26.25.14)"] test = ["cython (>=3.0)", "defusedxml (>=0.7.1)", "pytest (>=8.0)", "setuptools (>=70.0)", "typing_extensions (>=4.9)"] +[[package]] +name = "sphinx-autodoc-typehints" +version = "3.0.1" +description = "Type hints (PEP 484) support for the Sphinx autodoc extension" +optional = true +python-versions = ">=3.10" +files = [ + {file = "sphinx_autodoc_typehints-3.0.1-py3-none-any.whl", hash = "sha256:4b64b676a14b5b79cefb6628a6dc8070e320d4963e8ff640a2f3e9390ae9045a"}, + {file = "sphinx_autodoc_typehints-3.0.1.tar.gz", hash = "sha256:b9b40dd15dee54f6f810c924f863f9cf1c54f9f3265c495140ea01be7f44fa55"}, +] + +[package.dependencies] +sphinx = ">=8.1.3" + +[package.extras] +docs = ["furo (>=2024.8.6)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "defusedxml (>=0.7.1)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "sphobjinv (>=2.3.1.2)", "typing-extensions (>=4.12.2)"] + +[[package]] +name = "sphinx-copybutton" +version = "0.5.2" +description = "Add a copy button to each of your code cells." +optional = true +python-versions = ">=3.7" +files = [ + {file = "sphinx-copybutton-0.5.2.tar.gz", hash = "sha256:4cf17c82fb9646d1bc9ca92ac280813a3b605d8c421225fd9913154103ee1fbd"}, + {file = "sphinx_copybutton-0.5.2-py3-none-any.whl", hash = "sha256:fb543fd386d917746c9a2c50360c7905b605726b9355cd26e9974857afeae06e"}, +] + +[package.dependencies] +sphinx = ">=1.8" + +[package.extras] +code-style = ["pre-commit (==2.12.1)"] +rtd = ["ipython", "myst-nb", "sphinx", "sphinx-book-theme", "sphinx-examples"] + +[[package]] +name = "sphinx-design" +version = "0.6.1" +description = "A sphinx extension for designing beautiful, view size responsive web components." +optional = true +python-versions = ">=3.9" +files = [ + {file = "sphinx_design-0.6.1-py3-none-any.whl", hash = "sha256:b11f37db1a802a183d61b159d9a202314d4d2fe29c163437001324fe2f19549c"}, + {file = "sphinx_design-0.6.1.tar.gz", hash = "sha256:b44eea3719386d04d765c1a8257caca2b3e6f8421d7b3a5e742c0fd45f84e632"}, +] + +[package.dependencies] +sphinx = ">=6,<9" + +[package.extras] +code-style = ["pre-commit (>=3,<4)"] +rtd = ["myst-parser (>=2,<4)"] +testing = ["defusedxml", "myst-parser (>=2,<4)", "pytest (>=8.3,<9.0)", "pytest-cov", "pytest-regressions"] +testing-no-myst = ["defusedxml", "pytest (>=8.3,<9.0)", "pytest-cov", "pytest-regressions"] +theme-furo = ["furo (>=2024.7.18,<2024.8.0)"] +theme-im = ["sphinx-immaterial (>=0.12.2,<0.13.0)"] +theme-pydata = ["pydata-sphinx-theme (>=0.15.2,<0.16.0)"] +theme-rtd = ["sphinx-rtd-theme (>=2.0,<3.0)"] +theme-sbt = ["sphinx-book-theme (>=1.1,<2.0)"] + +[[package]] +name = "sphinx-jinja2-compat" +version = "0.4.1" +description = "Patches Jinja2 v3 to restore compatibility with earlier Sphinx versions." +optional = true +python-versions = ">=3.6" +files = [ + {file = "sphinx_jinja2_compat-0.4.1-py3-none-any.whl", hash = "sha256:64ca0d46f0d8029fbe69ea612793a55e6ef0113e1bba4a85d402158c09f17a14"}, + {file = "sphinx_jinja2_compat-0.4.1.tar.gz", hash = "sha256:0188f0802d42c3da72997533b55a00815659a78d3f81d4b4747b1fb15a5728e6"}, +] + +[package.dependencies] +jinja2 = ">=2.10" +markupsafe = ">=1" +standard-imghdr = {version = "3.10.14", markers = "python_version >= \"3.13\""} + +[[package]] +name = "sphinx-lint" +version = "1.0.1" +description = "Check for stylistic and formal issues in .rst and .py files included in the documentation." +optional = false +python-versions = ">=3.10" +files = [ + {file = "sphinx_lint-1.0.1-py3-none-any.whl", hash = "sha256:914648e4cc6e677df3a09a8d72a33c8dfa0c2618c857e415ec3ebdc219ff0af1"}, + {file = "sphinx_lint-1.0.1.tar.gz", hash = "sha256:2b054ff3270fce56a8a18737665dd8ab04f45d8bfdf3a57a2313970b527ad612"}, +] + +[package.dependencies] +polib = "*" +regex = "*" + +[package.extras] +tests = ["pytest", "pytest-cov"] + +[[package]] +name = "sphinx-prompt" +version = "1.9.0" +description = "Sphinx directive to add unselectable prompt" +optional = true +python-versions = ">=3.10" +files = [ + {file = "sphinx_prompt-1.9.0-py3-none-any.whl", hash = "sha256:fd731446c03f043d1ff6df9f22414495b23067c67011cc21658ea8d36b3575fc"}, + {file = "sphinx_prompt-1.9.0.tar.gz", hash = "sha256:471b3c6d466dce780a9b167d9541865fd4e9a80ed46e31b06a52a0529ae995a1"}, +] + +[package.dependencies] +certifi = "*" +docutils = "*" +idna = "*" +pygments = "*" +Sphinx = ">=8.0.0,<9.0.0" +urllib3 = "*" + +[[package]] +name = "sphinx-tabs" +version = "3.4.5" +description = "Tabbed views for Sphinx" +optional = true +python-versions = "~=3.7" +files = [ + {file = "sphinx-tabs-3.4.5.tar.gz", hash = "sha256:ba9d0c1e3e37aaadd4b5678449eb08176770e0fc227e769b6ce747df3ceea531"}, + {file = "sphinx_tabs-3.4.5-py3-none-any.whl", hash = "sha256:92cc9473e2ecf1828ca3f6617d0efc0aa8acb06b08c56ba29d1413f2f0f6cf09"}, +] + +[package.dependencies] +docutils = "*" +pygments = "*" +sphinx = "*" + +[package.extras] +code-style = ["pre-commit (==2.13.0)"] +testing = ["bs4", "coverage", "pygments", "pytest (>=7.1,<8)", "pytest-cov", "pytest-regressions", "rinohtype"] + +[[package]] +name = "sphinx-toolbox" +version = "4.1.1" +description = "Box of handy tools for Sphinx 🧰 📔" +optional = true +python-versions = ">=3.7" +files = [ + {file = "sphinx_toolbox-4.1.1-py3-none-any.whl", hash = "sha256:1ee2616091453430ffe41e8371e0ddd22a5c1f504ba2dfb306f50870f3f7672a"}, + {file = "sphinx_toolbox-4.1.1.tar.gz", hash = "sha256:1bb1750bf9e1f72a54161b0867caf3b6bf2ee216ecb9f8c519f0a9348824954a"}, +] + +[package.dependencies] +apeye = ">=0.4.0" +autodocsumm = ">=0.2.0" +beautifulsoup4 = ">=4.9.1" +cachecontrol = {version = ">=0.13.0", extras = ["filecache"]} +dict2css = ">=0.2.3" +docutils = ">=0.16" +domdf-python-tools = ">=2.9.0" +filelock = ">=3.8.0" +html5lib = ">=1.1" +roman = ">4.0" +"ruamel.yaml" = ">=0.16.12,<=0.18.16" +sphinx = ">=3.2.0" +sphinx-autodoc-typehints = ">=1.11.1" +sphinx-jinja2-compat = ">=0.1.0" +sphinx-prompt = ">=1.1.0" +sphinx-tabs = ">=1.2.1,<3.4.7" +tabulate = ">=0.8.7" +typing-extensions = ">=3.7.4.3,<3.10.0.1 || >3.10.0.1" + +[package.extras] +all = ["coincidence (>=0.4.3)", "pygments (>=2.7.4,<=2.13.0)"] +testing = ["coincidence (>=0.4.3)", "pygments (>=2.7.4,<=2.13.0)"] + [[package]] name = "sphinxcontrib-applehelp" version = "2.0.0" description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" -optional = true +optional = false python-versions = ">=3.9" files = [ {file = "sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5"}, @@ -2952,7 +4138,7 @@ test = ["pytest"] name = "sphinxcontrib-devhelp" version = "2.0.0" description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp documents" -optional = true +optional = false python-versions = ">=3.9" files = [ {file = "sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2"}, @@ -2968,7 +4154,7 @@ test = ["pytest"] name = "sphinxcontrib-htmlhelp" version = "2.1.0" description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" -optional = true +optional = false python-versions = ">=3.9" files = [ {file = "sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8"}, @@ -2984,7 +4170,7 @@ test = ["html5lib", "pytest"] name = "sphinxcontrib-jsmath" version = "1.0.1" description = "A sphinx extension which renders display math in HTML via JavaScript" -optional = true +optional = false python-versions = ">=3.5" files = [ {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, @@ -2998,7 +4184,7 @@ test = ["flake8", "mypy", "pytest"] name = "sphinxcontrib-qthelp" version = "2.0.0" description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp documents" -optional = true +optional = false python-versions = ">=3.9" files = [ {file = "sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb"}, @@ -3014,7 +4200,7 @@ test = ["defusedxml (>=0.7.1)", "pytest"] name = "sphinxcontrib-serializinghtml" version = "2.0.0" description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)" -optional = true +optional = false python-versions = ">=3.9" files = [ {file = "sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331"}, @@ -3026,6 +4212,71 @@ lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] standalone = ["Sphinx (>=5)"] test = ["pytest"] +[[package]] +name = "standard-aifc" +version = "3.13.0" +description = "Standard library aifc redistribution. \"dead battery\"." +optional = true +python-versions = "*" +files = [ + {file = "standard_aifc-3.13.0-py3-none-any.whl", hash = "sha256:f7ae09cc57de1224a0dd8e3eb8f73830be7c3d0bc485de4c1f82b4a7f645ac66"}, + {file = "standard_aifc-3.13.0.tar.gz", hash = "sha256:64e249c7cb4b3daf2fdba4e95721f811bde8bdfc43ad9f936589b7bb2fae2e43"}, +] + +[package.dependencies] +audioop-lts = {version = "*", markers = "python_version >= \"3.13\""} +standard-chunk = {version = "*", markers = "python_version >= \"3.13\""} + +[[package]] +name = "standard-chunk" +version = "3.13.0" +description = "Standard library chunk redistribution. \"dead battery\"." +optional = true +python-versions = "*" +files = [ + {file = "standard_chunk-3.13.0-py3-none-any.whl", hash = "sha256:17880a26c285189c644bd5bd8f8ed2bdb795d216e3293e6dbe55bbd848e2982c"}, + {file = "standard_chunk-3.13.0.tar.gz", hash = "sha256:4ac345d37d7e686d2755e01836b8d98eda0d1a3ee90375e597ae43aaf064d654"}, +] + +[[package]] +name = "standard-imghdr" +version = "3.10.14" +description = "Standard library imghdr redistribution. \"dead battery\"." +optional = true +python-versions = "*" +files = [ + {file = "standard_imghdr-3.10.14-py3-none-any.whl", hash = "sha256:cdf6883163349624dee9a81d2853a20260337c4cd41c04e99c082e01833a08e2"}, + {file = "standard_imghdr-3.10.14.tar.gz", hash = "sha256:2598fe2e7c540dbda34b233295e10957ab8dc8ac6f3bd9eaa8d38be167232e52"}, +] + +[[package]] +name = "standard-sunau" +version = "3.13.0" +description = "Standard library sunau redistribution. \"dead battery\"." +optional = true +python-versions = "*" +files = [ + {file = "standard_sunau-3.13.0-py3-none-any.whl", hash = "sha256:53af624a9529c41062f4c2fd33837f297f3baa196b0cfceffea6555654602622"}, + {file = "standard_sunau-3.13.0.tar.gz", hash = "sha256:b319a1ac95a09a2378a8442f403c66f4fd4b36616d6df6ae82b8e536ee790908"}, +] + +[package.dependencies] +audioop-lts = {version = "*", markers = "python_version >= \"3.13\""} + +[[package]] +name = "tabulate" +version = "0.9.0" +description = "Pretty-print tabular data" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, + {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, +] + +[package.extras] +widechars = ["wcwidth"] + [[package]] name = "texttable" version = "1.7.0" @@ -3039,79 +4290,124 @@ files = [ [[package]] name = "threadpoolctl" -version = "3.5.0" +version = "3.6.0" description = "threadpoolctl" optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "threadpoolctl-3.5.0-py3-none-any.whl", hash = "sha256:56c1e26c150397e58c4926da8eeee87533b1e32bef131bd4bf6a2f45f3185467"}, - {file = "threadpoolctl-3.5.0.tar.gz", hash = "sha256:082433502dd922bf738de0d8bcc4fdcbf0979ff44c42bd40f5af8a282f6fa107"}, + {file = "threadpoolctl-3.6.0-py3-none-any.whl", hash = "sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb"}, + {file = "threadpoolctl-3.6.0.tar.gz", hash = "sha256:8ab8b4aa3491d812b623328249fab5302a68d2d71745c8a4c719a2fcaba9f44e"}, +] + +[[package]] +name = "titlecase" +version = "2.4.1" +description = "Python Port of John Gruber's titlecase.pl" +optional = false +python-versions = ">=3.7" +files = [ + {file = "titlecase-2.4.1.tar.gz", hash = "sha256:7d83a277ccbbda11a2944e78a63e5ccaf3d32f828c594312e4862f9a07f635f5"}, +] + +[package.extras] +regex = ["regex (>=2020.4.4)"] + +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, ] [[package]] name = "tomli" -version = "2.2.1" +version = "2.3.0" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" files = [ - {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, - {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, - {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, - {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, - {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, - {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, - {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, - {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, - {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, - {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, - {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, - {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, - {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, - {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, + {file = "tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45"}, + {file = "tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba"}, + {file = "tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf"}, + {file = "tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441"}, + {file = "tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845"}, + {file = "tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c"}, + {file = "tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456"}, + {file = "tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be"}, + {file = "tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac"}, + {file = "tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22"}, + {file = "tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f"}, + {file = "tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52"}, + {file = "tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8"}, + {file = "tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6"}, + {file = "tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876"}, + {file = "tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878"}, + {file = "tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b"}, + {file = "tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae"}, + {file = "tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b"}, + {file = "tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf"}, + {file = "tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f"}, + {file = "tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05"}, + {file = "tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606"}, + {file = "tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999"}, + {file = "tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e"}, + {file = "tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3"}, + {file = "tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc"}, + {file = "tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0"}, + {file = "tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879"}, + {file = "tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005"}, + {file = "tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463"}, + {file = "tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8"}, + {file = "tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77"}, + {file = "tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf"}, + {file = "tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530"}, + {file = "tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b"}, + {file = "tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67"}, + {file = "tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f"}, + {file = "tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0"}, + {file = "tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba"}, + {file = "tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b"}, + {file = "tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549"}, ] [[package]] name = "types-beautifulsoup4" -version = "4.12.0.20241020" +version = "4.12.0.20250516" description = "Typing stubs for beautifulsoup4" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "types-beautifulsoup4-4.12.0.20241020.tar.gz", hash = "sha256:158370d08d0cd448bd11b132a50ff5279237a5d4b5837beba074de152a513059"}, - {file = "types_beautifulsoup4-4.12.0.20241020-py3-none-any.whl", hash = "sha256:c95e66ce15a4f5f0835f7fbc5cd886321ae8294f977c495424eaf4225307fd30"}, + {file = "types_beautifulsoup4-4.12.0.20250516-py3-none-any.whl", hash = "sha256:5923399d4a1ba9cc8f0096fe334cc732e130269541d66261bb42ab039c0376ee"}, + {file = "types_beautifulsoup4-4.12.0.20250516.tar.gz", hash = "sha256:aa19dd73b33b70d6296adf92da8ab8a0c945c507e6fb7d5db553415cc77b417e"}, ] [package.dependencies] types-html5lib = "*" +[[package]] +name = "types-docutils" +version = "0.22.2.20251006" +description = "Typing stubs for docutils" +optional = false +python-versions = ">=3.9" +files = [ + {file = "types_docutils-0.22.2.20251006-py3-none-any.whl", hash = "sha256:1e61afdeb4fab4ae802034deea3e853ced5c9b5e1d156179000cb68c85daf384"}, + {file = "types_docutils-0.22.2.20251006.tar.gz", hash = "sha256:c36c0459106eda39e908e9147bcff9dbd88535975cde399433c428a517b9e3b2"}, +] + [[package]] name = "types-flask-cors" -version = "5.0.0.20240902" +version = "6.0.0.20250809" description = "Typing stubs for Flask-Cors" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "types-Flask-Cors-5.0.0.20240902.tar.gz", hash = "sha256:8921b273bf7cd9636df136b66408efcfa6338a935e5c8f53f5eff1cee03f3394"}, - {file = "types_Flask_Cors-5.0.0.20240902-py3-none-any.whl", hash = "sha256:595e5f36056cd128ab905832e055f2e5d116fbdc685356eea4490bc77df82137"}, + {file = "types_flask_cors-6.0.0.20250809-py3-none-any.whl", hash = "sha256:f6d660dddab946779f4263cb561bffe275d86cb8747ce02e9fec8d340780131b"}, + {file = "types_flask_cors-6.0.0.20250809.tar.gz", hash = "sha256:24380a2b82548634c0931d50b9aafab214eea9f85dcc04f15ab1518752a7e6aa"}, ] [package.dependencies] @@ -3119,24 +4415,13 @@ Flask = ">=2.0.0" [[package]] name = "types-html5lib" -version = "1.1.11.20241018" +version = "1.1.11.20251014" description = "Typing stubs for html5lib" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "types-html5lib-1.1.11.20241018.tar.gz", hash = "sha256:98042555ff78d9e3a51c77c918b1041acbb7eb6c405408d8a9e150ff5beccafa"}, - {file = "types_html5lib-1.1.11.20241018-py3-none-any.whl", hash = "sha256:3f1e064d9ed2c289001ae6392c84c93833abb0816165c6ff0abfc304a779f403"}, -] - -[[package]] -name = "types-mock" -version = "5.1.0.20240425" -description = "Typing stubs for mock" -optional = false -python-versions = ">=3.8" -files = [ - {file = "types-mock-5.1.0.20240425.tar.gz", hash = "sha256:5281a645d72e827d70043e3cc144fe33b1c003db084f789dc203aa90e812a5a4"}, - {file = "types_mock-5.1.0.20240425-py3-none-any.whl", hash = "sha256:d586a01d39ad919d3ddcd73de6cde73ca7f3c69707219f722d1b8d7733641ad7"}, + {file = "types_html5lib-1.1.11.20251014-py3-none-any.whl", hash = "sha256:4ff2cf18dfc547009ab6fa4190fc3de464ba815c9090c3dd4a5b65f664bfa76c"}, + {file = "types_html5lib-1.1.11.20251014.tar.gz", hash = "sha256:cc628d626e0111a2426a64f5f061ecfd113958b69ff6b3dc0eaaed2347ba9455"}, ] [[package]] @@ -3152,24 +4437,24 @@ files = [ [[package]] name = "types-pyyaml" -version = "6.0.12.20240917" +version = "6.0.12.20250915" description = "Typing stubs for PyYAML" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "types-PyYAML-6.0.12.20240917.tar.gz", hash = "sha256:d1405a86f9576682234ef83bcb4e6fff7c9305c8b1fbad5e0bcd4f7dbdc9c587"}, - {file = "types_PyYAML-6.0.12.20240917-py3-none-any.whl", hash = "sha256:392b267f1c0fe6022952462bf5d6523f31e37f6cea49b14cee7ad634b6301570"}, + {file = "types_pyyaml-6.0.12.20250915-py3-none-any.whl", hash = "sha256:e7d4d9e064e89a3b3cae120b4990cd370874d2bf12fa5f46c97018dd5d3c9ab6"}, + {file = "types_pyyaml-6.0.12.20250915.tar.gz", hash = "sha256:0f8b54a528c303f0e6f7165687dd33fafa81c807fcac23f632b63aa624ced1d3"}, ] [[package]] name = "types-requests" -version = "2.32.0.20241016" +version = "2.32.4.20250913" description = "Typing stubs for requests" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"}, - {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"}, + {file = "types_requests-2.32.4.20250913-py3-none-any.whl", hash = "sha256:78c9c1fffebbe0fa487a418e0fa5252017e9c60d1a2da394077f1780f655d7e1"}, + {file = "types_requests-2.32.4.20250913.tar.gz", hash = "sha256:abd6d4f9ce3a9383f269775a9835a4c24e5cd6b9f647d64f88aa4613c33def5d"}, ] [package.dependencies] @@ -3188,35 +4473,35 @@ files = [ [[package]] name = "typing-extensions" -version = "4.12.2" -description = "Backported and Experimental Type Hints for Python 3.8+" +version = "4.15.0" +description = "Backported and Experimental Type Hints for Python 3.9+" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, + {file = "typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548"}, + {file = "typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466"}, ] [[package]] name = "unidecode" -version = "1.3.8" +version = "1.4.0" description = "ASCII transliterations of Unicode text" optional = false -python-versions = ">=3.5" +python-versions = ">=3.7" files = [ - {file = "Unidecode-1.3.8-py3-none-any.whl", hash = "sha256:d130a61ce6696f8148a3bd8fe779c99adeb4b870584eeb9526584e9aa091fd39"}, - {file = "Unidecode-1.3.8.tar.gz", hash = "sha256:cfdb349d46ed3873ece4586b96aa75258726e2fa8ec21d6f00a591d98806c2f4"}, + {file = "Unidecode-1.4.0-py3-none-any.whl", hash = "sha256:c3c7606c27503ad8d501270406e345ddb480a7b5f38827eafe4fa82a137f0021"}, + {file = "Unidecode-1.4.0.tar.gz", hash = "sha256:ce35985008338b676573023acc382d62c264f307c8f7963733405add37ea2b23"}, ] [[package]] name = "urllib3" -version = "2.2.3" +version = "2.5.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, - {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, + {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, + {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, ] [package.extras] @@ -3225,6 +4510,17 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "webencodings" +version = "0.5.1" +description = "Character encoding aliases for legacy web content" +optional = true +python-versions = "*" +files = [ + {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, + {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, +] + [[package]] name = "werkzeug" version = "3.1.3" @@ -3244,33 +4540,17 @@ watchdog = ["watchdog (>=2.3)"] [[package]] name = "xmltodict" -version = "0.14.2" +version = "1.0.2" description = "Makes working with XML feel like you are working with JSON" optional = true -python-versions = ">=3.6" -files = [ - {file = "xmltodict-0.14.2-py2.py3-none-any.whl", hash = "sha256:20cc7d723ed729276e808f26fb6b3599f786cbc37e06c65e192ba77c40f20aac"}, - {file = "xmltodict-0.14.2.tar.gz", hash = "sha256:201e7c28bb210e374999d1dde6382923ab0ed1a8a5faeece48ab525b7810a553"}, -] - -[[package]] -name = "zipp" -version = "3.21.0" -description = "Backport of pathlib-compatible object wrapper for zip files" -optional = false python-versions = ">=3.9" files = [ - {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, - {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, + {file = "xmltodict-1.0.2-py3-none-any.whl", hash = "sha256:62d0fddb0dcbc9f642745d8bbf4d81fd17d6dfaec5a15b5c1876300aad92af0d"}, + {file = "xmltodict-1.0.2.tar.gz", hash = "sha256:54306780b7c2175a3967cad1db92f218207e5bc1aba697d887807c0fb68b7649"}, ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] -cover = ["pytest-cov"] -doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=2.2)"] -test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] -type = ["pytest-mypy"] +test = ["pytest", "pytest-cov"] [extras] absubmit = ["requests"] @@ -3280,7 +4560,7 @@ beatport = ["requests-oauthlib"] bpd = ["PyGObject"] chroma = ["pyacoustid"] discogs = ["python3-discogs-client"] -docs = ["pydata-sphinx-theme", "sphinx"] +docs = ["docutils", "pydata-sphinx-theme", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx-toolbox"] embedart = ["Pillow"] embyupdate = ["requests"] fetchart = ["Pillow", "beautifulsoup4", "langdetect", "requests"] @@ -3297,9 +4577,10 @@ replaygain = ["PyGObject"] scrub = ["mutagen"] sonosupdate = ["soco"] thumbnails = ["Pillow", "pyxdg"] +titlecase = ["titlecase"] web = ["flask", "flask-cors"] [metadata] lock-version = "2.0" -python-versions = ">=3.9,<4" -content-hash = "d609e83f7ffeefc12e28d627e5646aa5c1a6f5a56d7013bb649a468069550dba" +python-versions = ">=3.10,<4" +content-hash = "8a1714daca55eab559558f2d4bd63d4857686eb607bf4b24f1ea6dbd412e6641" diff --git a/pyproject.toml b/pyproject.toml index d985c54ea..dbfc2715b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "beets" -version = "2.2.0" +version = "2.5.1" description = "music tagger and library organizer" authors = ["Adrian Sampson <adrian@radbox.org>"] maintainers = ["Serene-Arc"] @@ -17,8 +17,10 @@ classifiers = [ "Environment :: Web Environment", "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Programming Language :: Python :: Implementation :: CPython", ] packages = [ @@ -39,18 +41,22 @@ Changelog = "https://github.com/beetbox/beets/blob/master/docs/changelog.rst" "Bug Tracker" = "https://github.com/beetbox/beets/issues" [tool.poetry.dependencies] -python = ">=3.9,<4" +python = ">=3.10,<4" colorama = { version = "*", markers = "sys_platform == 'win32'" } -confuse = ">=1.5.0" +confuse = ">=2.1.0" jellyfish = "*" lap = ">=0.5.12" mediafile = ">=0.12.0" -musicbrainzngs = ">=0.4" -numpy = ">=1.24.4" +numpy = [ + { python = "<3.13", version = ">=2.0.2" }, + { python = ">=3.13", version = ">=2.3.4" }, +] platformdirs = ">=3.5.0" pyyaml = "*" -typing_extensions = { version = "*", python = "<=3.10" } +requests = ">=2.32.5" +requests-ratelimiter = ">=0.7.0" +typing_extensions = "*" unidecode = ">=1.3.6" beautifulsoup4 = { version = "*", optional = true } @@ -58,7 +64,15 @@ dbus-python = { version = "*", optional = true } flask = { version = "*", optional = true } flask-cors = { version = "*", optional = true } langdetect = { version = "*", optional = true } -librosa = { version = "^0.10.2.post1", optional = true } +librosa = { version = ">=0.11", optional = true } +scipy = [ # for librosa + { python = "<3.13", version = ">=1.13.1", optional = true }, + { python = ">=3.13", version = ">=1.16.1", optional = true }, +] +numba = [ # for librosa + { python = "<3.13", version = ">=0.60", optional = true }, + { python = ">=3.13", version = ">=0.62.1", optional = true }, +] mutagen = { version = ">=1.33", optional = true } Pillow = { version = "*", optional = true } py7zr = { version = "*", optional = true } @@ -70,19 +84,23 @@ python3-discogs-client = { version = ">=2.3.15", optional = true } pyxdg = { version = "*", optional = true } rarfile = { version = "*", optional = true } reflink = { version = "*", optional = true } -requests = { version = "*", optional = true } resampy = { version = ">=0.4.3", optional = true } requests-oauthlib = { version = ">=0.6.1", optional = true } soco = { version = "*", optional = true } +docutils = { version = ">=0.20.1", optional = true } pydata-sphinx-theme = { version = "*", optional = true } sphinx = { version = "*", optional = true } +sphinx-design = { version = ">=0.6.1", optional = true } +sphinx-copybutton = { version = ">=0.5.2", optional = true } +sphinx-toolbox = { version = ">=4.1.0", optional = true } +titlecase = { version = "^2.4.1", optional = true } [tool.poetry.group.test.dependencies] beautifulsoup4 = "*" codecov = ">=2.1.13" flask = "*" -mock = "*" +langdetect = "*" pylast = "*" pytest = "*" pytest-cov = "*" @@ -95,14 +113,17 @@ rarfile = "*" requests-mock = ">=1.12.1" requests_oauthlib = "*" responses = ">=0.3.0" +titlecase = "^2.4.1" [tool.poetry.group.lint.dependencies] +docstrfmt = ">=1.11.1" ruff = ">=0.6.4" +sphinx-lint = ">=1.0.0" [tool.poetry.group.typing.dependencies] mypy = "*" types-beautifulsoup4 = "*" -types-mock = "*" +types-docutils = ">=0.22.2.20251006" types-Flask-Cors = "*" types-Pillow = "*" types-PyYAML = "*" @@ -121,10 +142,18 @@ aura = ["flask", "flask-cors", "Pillow"] autobpm = ["librosa", "resampy"] # badfiles # mp3val and flac beatport = ["requests-oauthlib"] -bpd = ["PyGObject"] # python-gi and GStreamer 1.0+ +bpd = ["PyGObject"] # gobject-introspection, gstreamer1.0-plugins-base, python3-gst-1.0 chroma = ["pyacoustid"] # chromaprint or fpcalc # convert # ffmpeg -docs = ["pydata-sphinx-theme", "sphinx"] +docs = [ + "docutils", + "pydata-sphinx-theme", + "sphinx", + "sphinx-lint", + "sphinx-design", + "sphinx-copybutton", + "sphinx-toolbox", +] discogs = ["python3-discogs-client"] embedart = ["Pillow"] # ImageMagick embyupdate = ["requests"] @@ -145,6 +174,7 @@ replaygain = [ ] # python-gi and GStreamer 1.0+ or mp3gain/aacgain or Python Audio Tools or ffmpeg scrub = ["mutagen"] sonosupdate = ["soco"] +titlecase = ["titlecase"] thumbnails = ["Pillow", "pyxdg"] web = ["flask", "flask-cors"] @@ -157,7 +187,7 @@ build-backend = "poetry.core.masonry.api" [tool.pipx-install] poethepoet = ">=0.26" -poetry = ">=1.8" +poetry = ">=1.8,<2" [tool.poe.tasks.build] help = "Build the package" @@ -191,16 +221,25 @@ cmd = "mypy" [tool.poe.tasks.docs] help = "Build documentation" -cmd = "make -C docs html" +args = [{ name = "COMMANDS", positional = true, multiple = true, default = "html" }] +cmd = "make -C docs $COMMANDS" [tool.poe.tasks.format] help = "Format the codebase" cmd = "ruff format" +[tool.poe.tasks.format-docs] +help = "Format the documentation" +cmd = "docstrfmt docs *.rst" + [tool.poe.tasks.lint] help = "Check the code for linting issues. Accepts ruff options." cmd = "ruff check" +[tool.poe.tasks.lint-docs] +help = "Lint the documentation" +shell = "sphinx-lint --enable all --disable default-role $(git ls-files '*.rst')" + [tool.poe.tasks.update-dependencies] help = "Update dependencies to their latest versions." cmd = "poetry update -vv" @@ -242,6 +281,14 @@ done """ interpreter = "zsh" +[tool.docstrfmt] +line-length = 80 +extend-exclude = [ + "docs/_templates/**/*", + "docs/api/**/*", + "README_kr.rst", +] + [tool.ruff] target-version = "py39" line-length = 80 @@ -253,16 +300,25 @@ select = [ "E", # pycodestyle "F", # pyflakes # "B", # flake8-bugbear + "G", # flake8-logging-format "I", # isort + "ISC", # flake8-implicit-str-concat "N", # pep8-naming "PT", # flake8-pytest-style # "RUF", # ruff - # "UP", # pyupgrade + "UP", # pyupgrade "TCH", # flake8-type-checking "W", # pycodestyle ] +ignore = [ + "TC006", # no need to quote 'cast's since we use 'from __future__ import annotations' +] + [tool.ruff.lint.per-file-ignores] "beets/**" = ["PT"] +"test/plugins/test_ftintitle.py" = ["E501"] +"test/test_util.py" = ["E501"] +"test/ui/test_field_diff.py" = ["E501"] [tool.ruff.lint.isort] split-on-trailing-comma = false diff --git a/setup.cfg b/setup.cfg index e3472b04c..000c4a77e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -3,10 +3,13 @@ cache_dir = /tmp/pytest_cache # slightly more verbose output console_output_style = count +# pretty-print test names in the Codecov U +junit_family = legacy addopts = # show all skipped/failed/xfailed tests in the summary except passed -ra --strict-config + --junitxml=.reports/pytest.xml markers = on_lyrics_update: mark a test to run only after lyrics source code is updated integration_test: mark a test as an integration test @@ -15,7 +18,7 @@ markers = data_file = .reports/coverage/data branch = true relative_files = true -omit = +omit = beets/test/* beetsplug/_typing.py @@ -34,7 +37,6 @@ exclude_also = show_contexts = true [mypy] -files = beets,beetsplug,test,extra,docs allow_any_generics = false # FIXME: Would be better to actually type the libraries (if under our control), # or write our own stubs. For now, silence errors @@ -46,6 +48,8 @@ explicit_package_bases = true # config for all files. [[mypy-beets.plugins]] disallow_untyped_decorators = true -disallow_any_generics = true check_untyped_defs = true -allow_redefinition = true + +[[mypy-beets.metadata_plugins]] +disallow_untyped_decorators = true +check_untyped_defs = true diff --git a/test/autotag/test_distance.py b/test/autotag/test_distance.py new file mode 100644 index 000000000..3686f82c9 --- /dev/null +++ b/test/autotag/test_distance.py @@ -0,0 +1,357 @@ +import re + +import pytest + +from beets.autotag import AlbumInfo, TrackInfo +from beets.autotag.distance import ( + Distance, + distance, + string_dist, + track_distance, +) +from beets.library import Item +from beets.metadata_plugins import MetadataSourcePlugin, get_penalty +from beets.plugins import BeetsPlugin + +_p = pytest.param + + +class TestDistance: + @pytest.fixture(autouse=True, scope="class") + def setup_config(self, config): + config["match"]["distance_weights"]["data_source"] = 2.0 + config["match"]["distance_weights"]["album"] = 4.0 + config["match"]["distance_weights"]["medium"] = 2.0 + + @pytest.fixture + def dist(self): + return Distance() + + def test_add(self, dist): + dist.add("add", 1.0) + + assert dist._penalties == {"add": [1.0]} + + @pytest.mark.parametrize( + "key, args_with_expected", + [ + ( + "equality", + [ + (("ghi", ["abc", "def", "ghi"]), [0.0]), + (("xyz", ["abc", "def", "ghi"]), [0.0, 1.0]), + (("abc", re.compile(r"ABC", re.I)), [0.0, 1.0, 0.0]), + ], + ), + ("expr", [((True,), [1.0]), ((False,), [1.0, 0.0])]), + ( + "number", + [ + ((1, 1), [0.0]), + ((1, 2), [0.0, 1.0]), + ((2, 1), [0.0, 1.0, 1.0]), + ((-1, 2), [0.0, 1.0, 1.0, 1.0, 1.0, 1.0]), + ], + ), + ( + "priority", + [ + (("abc", "abc"), [0.0]), + (("def", ["abc", "def"]), [0.0, 0.5]), + (("gh", ["ab", "cd", "ef", re.compile("GH", re.I)]), [0.0, 0.5, 0.75]), # noqa: E501 + (("xyz", ["abc", "def"]), [0.0, 0.5, 0.75, 1.0]), + ], + ), + ( + "ratio", + [ + ((25, 100), [0.25]), + ((10, 5), [0.25, 1.0]), + ((-5, 5), [0.25, 1.0, 0.0]), + ((5, 0), [0.25, 1.0, 0.0, 0.0]), + ], + ), + ( + "string", + [ + (("abc", "bcd"), [2 / 3]), + (("abc", None), [2 / 3, 1]), + ((None, None), [2 / 3, 1, 0]), + ], + ), + ], + ) # fmt: skip + def test_add_methods(self, dist, key, args_with_expected): + method = getattr(dist, f"add_{key}") + for arg_set, expected in args_with_expected: + method(key, *arg_set) + assert dist._penalties[key] == expected + + def test_distance(self, dist): + dist.add("album", 0.5) + dist.add("media", 0.25) + dist.add("media", 0.75) + + assert dist.distance == 0.5 + assert dist.max_distance == 6.0 + assert dist.raw_distance == 3.0 + + assert dist["album"] == 1 / 3 + assert dist["media"] == 1 / 6 + + def test_operators(self, dist): + dist.add("data_source", 0.0) + dist.add("album", 0.5) + dist.add("medium", 0.25) + dist.add("medium", 0.75) + assert len(dist) == 2 + assert list(dist) == [("album", 0.2), ("medium", 0.2)] + assert dist == 0.4 + assert dist < 1.0 + assert dist > 0.0 + assert dist - 0.4 == 0.0 + assert 0.4 - dist == 0.0 + assert float(dist) == 0.4 + + def test_penalties_sort(self, dist): + dist.add("album", 0.1875) + dist.add("medium", 0.75) + assert dist.items() == [("medium", 0.25), ("album", 0.125)] + + # Sort by key if distance is equal. + dist = Distance() + dist.add("album", 0.375) + dist.add("medium", 0.75) + assert dist.items() == [("album", 0.25), ("medium", 0.25)] + + def test_update(self, dist): + dist1 = dist + dist1.add("album", 0.5) + dist1.add("media", 1.0) + + dist2 = Distance() + dist2.add("album", 0.75) + dist2.add("album", 0.25) + dist2.add("media", 0.05) + + dist1.update(dist2) + + assert dist1._penalties == { + "album": [0.5, 0.75, 0.25], + "media": [1.0, 0.05], + } + + +class TestTrackDistance: + @pytest.fixture(scope="class") + def info(self): + return TrackInfo(title="title", artist="artist") + + @pytest.mark.parametrize( + "title, artist, expected_penalty", + [ + _p("title", "artist", False, id="identical"), + _p("title", "Various Artists", False, id="tolerate-va"), + _p("title", "different artist", True, id="different-artist"), + _p("different title", "artist", True, id="different-title"), + ], + ) + def test_track_distance(self, info, title, artist, expected_penalty): + item = Item(artist=artist, title=title) + + dist = track_distance(item, info, incl_artist=True) + assert bool(dist) == expected_penalty, dist._penalties + + +class TestAlbumDistance: + @pytest.fixture(scope="class") + def items(self): + return [ + Item( + title=title, + track=track, + artist="artist", + album="album", + length=1, + ) + for title, track in [("one", 1), ("two", 2), ("three", 3)] + ] + + @pytest.fixture + def get_dist(self, items): + def inner(info: AlbumInfo): + return distance(items, info, list(zip(items, info.tracks))) + + return inner + + @pytest.fixture + def info(self, items): + return AlbumInfo( + artist="artist", + album="album", + tracks=[ + TrackInfo( + title=i.title, + artist=i.artist, + index=i.track, + length=i.length, + ) + for i in items + ], + va=False, + ) + + def test_identical_albums(self, get_dist, info): + assert get_dist(info) == 0 + + def test_incomplete_album(self, get_dist, info): + info.tracks.pop(2) + + assert 0 < float(get_dist(info)) < 0.2 + + def test_overly_complete_album(self, get_dist, info): + info.tracks.append( + Item(index=4, title="four", artist="artist", length=1) + ) + + assert 0 < float(get_dist(info)) < 0.2 + + @pytest.mark.parametrize("va", [True, False]) + def test_albumartist(self, get_dist, info, va): + info.artist = "another artist" + info.va = va + + assert bool(get_dist(info)) is not va + + def test_comp_no_track_artists(self, get_dist, info): + # Some VA releases don't have track artists (incomplete metadata). + info.artist = "another artist" + info.va = True + for track in info.tracks: + track.artist = None + + assert get_dist(info) == 0 + + def test_comp_track_artists_do_not_match(self, get_dist, info): + info.va = True + info.tracks[0].artist = "another artist" + + assert get_dist(info) != 0 + + def test_tracks_out_of_order(self, get_dist, info): + tracks = info.tracks + tracks[1].title, tracks[2].title = tracks[2].title, tracks[1].title + + assert 0 < float(get_dist(info)) < 0.2 + + def test_two_medium_release(self, get_dist, info): + info.tracks[0].medium_index = 1 + info.tracks[1].medium_index = 2 + info.tracks[2].medium_index = 1 + + assert get_dist(info) == 0 + + +class TestStringDistance: + @pytest.mark.parametrize( + "string1, string2", + [ + ("Some String", "Some String"), + ("Some String", "Some.String!"), + ("Some String", "sOME sTring"), + ("My Song (EP)", "My Song"), + ("The Song Title", "Song Title, The"), + ("A Song Title", "Song Title, A"), + ("An Album Title", "Album Title, An"), + ("", ""), + ("Untitled", "[Untitled]"), + ("And", "&"), + ("\xe9\xe1\xf1", "ean"), + ], + ) + def test_matching_distance(self, string1, string2): + assert string_dist(string1, string2) == 0.0 + + def test_different_distance(self): + assert string_dist("Some String", "Totally Different") != 0.0 + + @pytest.mark.parametrize( + "string1, string2, reference", + [ + ("XXX Band Name", "The Band Name", "Band Name"), + ("One .Two.", "One (Two)", "One"), + ("One .Two.", "One [Two]", "One"), + ("My Song blah Someone", "My Song feat Someone", "My Song"), + ], + ) + def test_relative_weights(self, string1, string2, reference): + assert string_dist(string2, reference) < string_dist(string1, reference) + + def test_solo_pattern(self): + # Just make sure these don't crash. + string_dist("The ", "") + string_dist("(EP)", "(EP)") + string_dist(", An", "") + + +class TestDataSourceDistance: + MATCH = 0.0 + MISMATCH = 0.125 + + @pytest.fixture(autouse=True) + def setup(self, monkeypatch, penalty, weight, multiple_data_sources): + monkeypatch.setitem(Distance._weights, "data_source", weight) + get_penalty.cache_clear() + + class TestMetadataSourcePlugin(MetadataSourcePlugin): + def album_for_id(self, *args, **kwargs): ... + def track_for_id(self, *args, **kwargs): ... + def candidates(self, *args, **kwargs): ... + def item_candidates(self, *args, **kwargs): ... + + # We use BeetsPlugin here to check if our compatibility layer + # for pre 2.4.0 MetadataPlugins is working as expected + # TODO: Replace BeetsPlugin with TestMetadataSourcePlugin in v3.0.0 + with pytest.deprecated_call(): + + class OriginalPlugin(BeetsPlugin): + data_source = "Original" + + class OtherPlugin(TestMetadataSourcePlugin): + @property + def data_source_mismatch_penalty(self): + return penalty + + monkeypatch.setattr( + "beets.metadata_plugins.find_metadata_source_plugins", + lambda: ( + [OriginalPlugin(), OtherPlugin()] + if multiple_data_sources + else [OtherPlugin()] + ), + ) + + @pytest.mark.parametrize( + "item,info,penalty,weight,multiple_data_sources,expected_distance", + [ + _p("Original", "Original", 0.5, 1.0, True, MATCH, id="match"), + _p("Original", "Other", 0.5, 1.0, True, MISMATCH, id="mismatch"), + _p("Other", "Original", 0.5, 1.0, True, MISMATCH, id="mismatch"), + _p("Original", "unknown", 0.5, 1.0, True, MISMATCH, id="mismatch-unknown"), # noqa: E501 + _p("Original", None, 0.5, 1.0, True, MISMATCH, id="mismatch-no-info"), # noqa: E501 + _p(None, "Other", 0.5, 1.0, True, MISMATCH, id="mismatch-no-original-multiple-sources"), # noqa: E501 + _p(None, "Other", 0.5, 1.0, False, MATCH, id="match-no-original-but-single-source"), # noqa: E501 + _p("unknown", "unknown", 0.5, 1.0, True, MATCH, id="match-unknown"), + _p("Original", "Other", 1.0, 1.0, True, 0.25, id="mismatch-max-penalty"), # noqa: E501 + _p("Original", "Other", 0.5, 5.0, True, 0.3125, id="mismatch-high-weight"), # noqa: E501 + _p("Original", "Other", 0.0, 1.0, True, MATCH, id="match-no-penalty"), # noqa: E501 + _p("Original", "Other", 0.5, 0.0, True, MATCH, id="match-no-weight"), # noqa: E501 + ], + ) # fmt: skip + def test_distance(self, item, info, expected_distance): + item = Item(data_source=item) + info = TrackInfo(data_source=info, title="") + + dist = track_distance(item, info) + + assert dist.distance == expected_distance diff --git a/test/conftest.py b/test/conftest.py index 95509bdb6..059526d2f 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,7 +1,13 @@ +import inspect import os import pytest +from beets.autotag.distance import Distance +from beets.dbcore.query import Query +from beets.test.helper import ConfigMixin +from beets.util import cached_classproperty + def skip_marked_items(items: list[pytest.Item], marker_name: str, reason: str): for item in (i for i in items if i.get_closest_marker(marker_name)): @@ -21,3 +27,36 @@ def pytest_collection_modifyitems( skip_marked_items( items, "on_lyrics_update", "No change in lyrics source code" ) + + +def pytest_make_parametrize_id(config, val, argname): + """Generate readable test identifiers for pytest parametrized tests. + + Provides custom string representations for: + - Query classes/instances: use class name + - Lambda functions: show abbreviated source + - Other values: use standard repr() + """ + if inspect.isclass(val) and issubclass(val, Query): + return val.__name__ + + if inspect.isfunction(val) and val.__name__ == "<lambda>": + return inspect.getsource(val).split("lambda")[-1][:30] + + return repr(val) + + +def pytest_assertrepr_compare(op, left, right): + if isinstance(left, Distance) or isinstance(right, Distance): + return [f"Comparing Distance: {float(left)} {op} {float(right)}"] + + +@pytest.fixture(autouse=True) +def clear_cached_classproperty(): + cached_classproperty.cache.clear() + + +@pytest.fixture(scope="module") +def config(): + """Provide a fresh beets configuration for a module, when requested.""" + return ConfigMixin().config diff --git a/test/plugins/conftest.py b/test/plugins/conftest.py new file mode 100644 index 000000000..7e443004c --- /dev/null +++ b/test/plugins/conftest.py @@ -0,0 +1,22 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest +import requests + +if TYPE_CHECKING: + from requests_mock import Mocker + + +@pytest.fixture +def requests_mock(requests_mock, monkeypatch) -> Mocker: + """Use plain session wherever MB requests are mocked. + + This avoids rate limiting requests to speed up tests. + """ + monkeypatch.setattr( + "beetsplug._utils.musicbrainz.MusicBrainzAPI.create_session", + lambda _: requests.Session(), + ) + return requests_mock diff --git a/test/plugins/lyrics_pages.py b/test/plugins/lyrics_pages.py index 2d681e111..15cb812a1 100644 --- a/test/plugins/lyrics_pages.py +++ b/test/plugins/lyrics_pages.py @@ -108,45 +108,6 @@ lyrics_pages = [ url_title="The Beatles - Lady Madonna Lyrics | AZLyrics.com", marks=[xfail_on_ci("AZLyrics is blocked by Cloudflare")], ), - LyricsPage.make( - "http://www.chartlyrics.com/_LsLsZ7P4EK-F-LD4dJgDQ/Lady+Madonna.aspx", - """ - Lady Madonna, - Children at your feet - Wonder how you manage to make ends meet. - - Who finds the money - When you pay the rent? - Did you think that money was heaven-sent? - - Friday night arrives without a suitcase. - Sunday morning creeping like a nun. - Monday's child has learned to tie his bootlace. - - See how they run. - - Lady Madonna, - Baby at your breast - Wonders how you manage to feed the rest. - - See how they run. - - Lady Madonna, - Lying on the bed. - Listen to the music playing in your head. - - Tuesday afternoon is never ending. - Wednesday morning papers didn't come. - Thursday night your stockings needed mending. - - See how they run. - - Lady Madonna, - Children at your feet - Wonder how you manage to make ends meet. - """, - url_title="The Beatles Lady Madonna lyrics", - ), LyricsPage.make( "https://www.dainuzodziai.lt/m/mergaites-nori-mylet-atlanta/", """ @@ -167,6 +128,7 @@ lyrics_pages = [ artist="Atlanta", track_title="Mergaitės Nori Mylėt", url_title="Mergaitės nori mylėt – Atlanta | Dainų Žodžiai", + marks=[xfail_on_ci("Expired SSL certificate")], ), LyricsPage.make( "https://genius.com/The-beatles-lady-madonna-lyrics", @@ -367,34 +329,40 @@ lyrics_pages = [ url_title="The Beatles - Lady Madonna Lyrics", ), LyricsPage.make( - "https://www.lyricsmode.com/lyrics/b/beatles/lady_madonna.html", + "https://www.lyricsmode.com/lyrics/b/beatles/mother_natures_son.html", """ - Lady Madonna, children at your feet. - Wonder how you manage to make ends meet. - Who finds the money? When you pay the rent? - Did you think that money was heaven sent? + Born a poor young country boy, Mother Nature's son + All day long I'm sitting singing songs for everyone - Friday night arrives without a suitcase. - Sunday morning creep in like a nun. - Mondays child has learned to tie his bootlace. - See how they run. + Sit beside a mountain stream, see her waters rise + Listen to the pretty sound of music as she flies - Lady Madonna, baby at your breast. - Wonder how you manage to feed the rest. + Doo doo doo doo doo doo doo doo doo doo doo + Doo doo doo doo doo doo doo doo doo + Doo doo doo - See how they run. - Lady Madonna, lying on the bed, - Listen to the music playing in your head. + Find me in my field of grass, Mother Nature's son + Swaying daises sing a lazy song beneath the sun - Tuesday afternoon is never ending. - Wednesday morning papers didn't come. - Thursday night you stockings needed mending. - See how they run. + Doo doo doo doo doo doo doo doo doo doo doo + Doo doo doo doo doo doo doo doo doo + Doo doo doo doo doo doo + Yeah yeah yeah - Lady Madonna, children at your feet. - Wonder how you manage to make ends meet. + Mm mm mm mm mm mm mm + Mm mm mm, ooh ooh ooh + Mm mm mm mm mm mm mm + Mm mm mm mm, wah wah wah + + Wah, Mother Nature's son """, - url_title="Lady Madonna lyrics by The Beatles - original song full text. Official Lady Madonna lyrics, 2024 version | LyricsMode.com", # noqa: E501 + artist="The Beatles", + track_title="Mother Nature's Son", + url_title=( + "Mother Nature's Son lyrics by The Beatles - original song full" + " text. Official Mother Nature's Son lyrics, 2025 version" + " | LyricsMode.com" + ), ), LyricsPage.make( "https://www.lyricsontop.com/amy-winehouse-songs/jazz-n-blues-lyrics.html", @@ -567,6 +535,7 @@ lyrics_pages = [ Wonder how you manage to make ends meet. """, url_title="The Beatles - Lady Madonna", + marks=[xfail_on_ci("Sweetslyrics also fails with 403 FORBIDDEN in CI")], ), LyricsPage.make( "https://www.tekstowo.pl/piosenka,the_beatles,lady_madonna.html", @@ -605,5 +574,6 @@ lyrics_pages = [ Children at your feet Wonder how you manage to make ends meet """, + marks=[pytest.mark.xfail(reason="Tekstowo seems to be broken again")], ), ] diff --git a/test/plugins/test_albumtypes.py b/test/plugins/test_albumtypes.py index 8be1ff011..0a9d53349 100644 --- a/test/plugins/test_albumtypes.py +++ b/test/plugins/test_albumtypes.py @@ -16,9 +16,9 @@ from collections.abc import Sequence -from beets.autotag.mb import VARIOUS_ARTISTS_ID from beets.test.helper import PluginTestCase from beetsplug.albumtypes import AlbumTypesPlugin +from beetsplug.musicbrainz import VARIOUS_ARTISTS_ID class AlbumTypesPluginTest(PluginTestCase): diff --git a/test/plugins/test_art.py b/test/plugins/test_art.py index acb712354..02d23d59b 100644 --- a/test/plugins/test_art.py +++ b/test/plugins/test_art.py @@ -14,8 +14,13 @@ """Tests for the album art fetchers.""" +from __future__ import annotations + import os import shutil +import unittest +from pathlib import Path +from typing import TYPE_CHECKING from unittest.mock import patch import confuse @@ -37,6 +42,11 @@ from beetsplug import fetchart logger = logging.getLogger("beets.test_art") +if TYPE_CHECKING: + from collections.abc import Iterator, Sequence + + from beets.library import Album + class Settings: """Used to pass settings to the ArtSources when the plugin isn't fully @@ -48,6 +58,19 @@ class Settings: setattr(self, k, v) +class DummyRemoteArtSource(fetchart.RemoteArtSource): + NAME = "Dummy Art Source" + ID = "dummy" + + def get( + self, + album: Album, + plugin: fetchart.FetchArtPlugin, + paths: None | Sequence[bytes], + ) -> Iterator[fetchart.Candidate]: + return iter(()) + + class UseThePlugin(CleanupModulesMixin, BeetsTestCase): modules = (fetchart.__name__, ArtResizer.__module__) @@ -66,11 +89,11 @@ class CAAHelper: MBID_RELASE = "rid" MBID_GROUP = "rgid" - RELEASE_URL = "coverartarchive.org/release/{}".format(MBID_RELASE) - GROUP_URL = "coverartarchive.org/release-group/{}".format(MBID_GROUP) + RELEASE_URL = f"coverartarchive.org/release/{MBID_RELASE}" + GROUP_URL = f"coverartarchive.org/release-group/{MBID_GROUP}" - RELEASE_URL = "https://" + RELEASE_URL - GROUP_URL = "https://" + GROUP_URL + RELEASE_URL = f"https://{RELEASE_URL}" + GROUP_URL = f"https://{GROUP_URL}" RESPONSE_RELEASE = """{ "images": [ @@ -202,9 +225,11 @@ class FetchImageTest(FetchImageTestCase): def setUp(self): super().setUp() self.dpath = os.path.join(self.temp_dir, b"arttest") - self.source = fetchart.RemoteArtSource(logger, self.plugin.config) + self.source = DummyRemoteArtSource(logger, self.plugin.config) self.settings = Settings(maxwidth=0) - self.candidate = fetchart.Candidate(logger, url=self.URL) + self.candidate = fetchart.Candidate( + logger, self.source.ID, url=self.URL + ) def test_invalid_type_returns_none(self): self.mock_response(self.URL, "image/watercolour") @@ -220,13 +245,13 @@ class FetchImageTest(FetchImageTestCase): self.mock_response(self.URL, "image/png") self.source.fetch_image(self.candidate, self.settings) assert os.path.splitext(self.candidate.path)[1] == b".png" - self.assertExists(self.candidate.path) + assert Path(os.fsdecode(self.candidate.path)).exists() def test_does_not_rely_on_server_content_type(self): self.mock_response(self.URL, "image/jpeg", "image/png") self.source.fetch_image(self.candidate, self.settings) assert os.path.splitext(self.candidate.path)[1] == b".png" - self.assertExists(self.candidate.path) + assert Path(os.fsdecode(self.candidate.path)).exists() class FSArtTest(UseThePlugin): @@ -236,7 +261,9 @@ class FSArtTest(UseThePlugin): os.mkdir(syspath(self.dpath)) self.source = fetchart.FileSystem(logger, self.plugin.config) - self.settings = Settings(cautious=False, cover_names=("art",)) + self.settings = Settings( + cautious=False, cover_names=("art",), fallback=None + ) def test_finds_jpg_in_directory(self): _common.touch(os.path.join(self.dpath, b"a.jpg")) @@ -260,6 +287,13 @@ class FSArtTest(UseThePlugin): with pytest.raises(StopIteration): next(self.source.get(None, self.settings, [self.dpath])) + def test_configured_fallback_is_used(self): + fallback = os.path.join(self.temp_dir, b"a.jpg") + _common.touch(fallback) + self.settings.fallback = fallback + candidate = next(self.source.get(None, self.settings, [self.dpath])) + assert candidate.path == fallback + def test_empty_dir(self): with pytest.raises(StopIteration): next(self.source.get(None, self.settings, [self.dpath])) @@ -280,10 +314,8 @@ class FSArtTest(UseThePlugin): class CombinedTest(FetchImageTestCase, CAAHelper): ASIN = "xxxx" MBID = "releaseid" - AMAZON_URL = "https://images.amazon.com/images/P/{}.01.LZZZZZZZ.jpg".format( - ASIN - ) - AAO_URL = "https://www.albumart.org/index_detail.php?asin={}".format(ASIN) + AMAZON_URL = f"https://images.amazon.com/images/P/{ASIN}.01.LZZZZZZZ.jpg" + AAO_URL = f"https://www.albumart.org/index_detail.php?asin={ASIN}" def setUp(self): super().setUp() @@ -432,7 +464,7 @@ class ITunesStoreTest(UseThePlugin): self.mock_response(fetchart.ITunesStore.API_URL, json) candidate = next(self.source.get(self.album, self.settings, [])) assert candidate.url == "url_to_the_image" - assert candidate.match == fetchart.Candidate.MATCH_EXACT + assert candidate.match == fetchart.MetadataMatch.EXACT def test_itunesstore_no_result(self): json = '{"results": []}' @@ -471,7 +503,7 @@ class ITunesStoreTest(UseThePlugin): self.mock_response(fetchart.ITunesStore.API_URL, json) candidate = next(self.source.get(self.album, self.settings, [])) assert candidate.url == "url_to_the_image" - assert candidate.match == fetchart.Candidate.MATCH_FALLBACK + assert candidate.match == fetchart.MetadataMatch.FALLBACK def test_itunesstore_returns_result_without_artwork(self): json = """{ @@ -683,7 +715,7 @@ class FanartTVTest(UseThePlugin): def test_fanarttv_finds_image(self): album = _common.Bag(mb_releasegroupid="thereleasegroupid") self.mock_response( - fetchart.FanartTV.API_ALBUMS + "thereleasegroupid", + f"{fetchart.FanartTV.API_ALBUMS}thereleasegroupid", self.RESPONSE_MULTIPLE, ) candidate = next(self.source.get(album, self.settings, [])) @@ -692,7 +724,7 @@ class FanartTVTest(UseThePlugin): def test_fanarttv_returns_no_result_when_error_received(self): album = _common.Bag(mb_releasegroupid="thereleasegroupid") self.mock_response( - fetchart.FanartTV.API_ALBUMS + "thereleasegroupid", + f"{fetchart.FanartTV.API_ALBUMS}thereleasegroupid", self.RESPONSE_ERROR, ) with pytest.raises(StopIteration): @@ -701,7 +733,7 @@ class FanartTVTest(UseThePlugin): def test_fanarttv_returns_no_result_with_malformed_response(self): album = _common.Bag(mb_releasegroupid="thereleasegroupid") self.mock_response( - fetchart.FanartTV.API_ALBUMS + "thereleasegroupid", + f"{fetchart.FanartTV.API_ALBUMS}thereleasegroupid", self.RESPONSE_MALFORMED, ) with pytest.raises(StopIteration): @@ -711,7 +743,7 @@ class FanartTVTest(UseThePlugin): # The source used to fail when there were images present, but no cover album = _common.Bag(mb_releasegroupid="thereleasegroupid") self.mock_response( - fetchart.FanartTV.API_ALBUMS + "thereleasegroupid", + f"{fetchart.FanartTV.API_ALBUMS}thereleasegroupid", self.RESPONSE_NO_ART, ) with pytest.raises(StopIteration): @@ -724,10 +756,14 @@ class ArtImporterTest(UseThePlugin): super().setUp() # Mock the album art fetcher to always return our test file. - self.art_file = os.path.join(self.temp_dir, b"tmpcover.jpg") - _common.touch(self.art_file) + self.art_file = self.temp_dir_path / "tmpcover.jpg" + self.art_file.touch() self.old_afa = self.plugin.art_for_album - self.afa_response = fetchart.Candidate(logger, path=self.art_file) + self.afa_response = fetchart.Candidate( + logger, + source_name="test", + path=self.art_file, + ) def art_for_album(i, p, local_only=False): return self.afa_response @@ -776,12 +812,10 @@ class ArtImporterTest(UseThePlugin): self.plugin.fetch_art(self.session, self.task) self.plugin.assign_art(self.session, self.task) - artpath = self.lib.albums()[0].artpath + artpath = self.lib.albums()[0].art_filepath if should_exist: - assert artpath == os.path.join( - os.path.dirname(self.i.path), b"cover.jpg" - ) - self.assertExists(artpath) + assert artpath == self.i.filepath.parent / "cover.jpg" + assert artpath.exists() else: assert artpath is None return artpath @@ -800,21 +834,25 @@ class ArtImporterTest(UseThePlugin): def test_leave_original_file_in_place(self): self._fetch_art(True) - self.assertExists(self.art_file) + assert self.art_file.exists() def test_delete_original_file(self): prev_move = config["import"]["move"].get() try: config["import"]["move"] = True self._fetch_art(True) - self.assertNotExists(self.art_file) + assert not self.art_file.exists() finally: config["import"]["move"] = prev_move def test_do_not_delete_original_if_already_in_place(self): artdest = os.path.join(os.path.dirname(self.i.path), b"cover.jpg") - shutil.copyfile(syspath(self.art_file), syspath(artdest)) - self.afa_response = fetchart.Candidate(logger, path=artdest) + shutil.copyfile(self.art_file, syspath(artdest)) + self.afa_response = fetchart.Candidate( + logger, + source_name="test", + path=artdest, + ) self._fetch_art(True) def test_fetch_art_if_imported_file_deleted(self): @@ -829,157 +867,138 @@ class ArtImporterTest(UseThePlugin): self.plugin.batch_fetch_art( self.lib, self.lib.albums(), force=False, quiet=False ) - self.assertExists(self.album.artpath) + assert self.album.art_filepath.exists() -class ArtForAlbumTest(UseThePlugin): - """Tests that fetchart.art_for_album respects the scale & filesize - configurations (e.g., minwidth, enforce_ratio, max_filesize) +class AlbumArtOperationTestCase(UseThePlugin): + """Base test case for album art operations. + + Provides common setup for testing album art processing operations by setting + up a mock filesystem source that returns a predefined test image. """ - IMG_225x225 = os.path.join(_common.RSRC, b"abbey.jpg") - IMG_348x348 = os.path.join(_common.RSRC, b"abbey-different.jpg") - IMG_500x490 = os.path.join(_common.RSRC, b"abbey-similar.jpg") + IMAGE_PATH = os.path.join(_common.RSRC, b"abbey-similar.jpg") + IMAGE_FILESIZE = os.stat(util.syspath(IMAGE_PATH)).st_size + IMAGE_WIDTH = 500 + IMAGE_HEIGHT = 490 + IMAGE_WIDTH_HEIGHT_DIFF = IMAGE_WIDTH - IMAGE_HEIGHT - IMG_225x225_SIZE = os.stat(util.syspath(IMG_225x225)).st_size - IMG_348x348_SIZE = os.stat(util.syspath(IMG_348x348)).st_size - - RESIZE_OP = "resize" - DEINTERLACE_OP = "deinterlace" - REFORMAT_OP = "reformat" - - def setUp(self): - super().setUp() - - self.old_fs_source_get = fetchart.FileSystem.get + @classmethod + def setUpClass(cls): + super().setUpClass() def fs_source_get(_self, album, settings, paths): if paths: - yield fetchart.Candidate(logger, path=self.image_file) + yield fetchart.Candidate( + logger, source_name=_self.ID, path=cls.IMAGE_PATH + ) - fetchart.FileSystem.get = fs_source_get + patch("beetsplug.fetchart.FileSystem.get", fs_source_get).start() + cls.addClassCleanup(patch.stopall) - self.album = _common.Bag() + def get_album_art(self): + return self.plugin.art_for_album(_common.Bag(), [""], True) - def tearDown(self): - fetchart.FileSystem.get = self.old_fs_source_get - super().tearDown() - def assertImageIsValidArt(self, image_file, should_exist): - self.assertExists(image_file) - self.image_file = image_file +class AlbumArtOperationConfigurationTest(AlbumArtOperationTestCase): + """Check that scale & filesize configuration is respected. - candidate = self.plugin.art_for_album(self.album, [""], True) + Depending on `minwidth`, `enforce_ratio`, `margin_px`, and `margin_percent` + configuration the plugin should or should not return an art candidate. + """ - if should_exist: - assert candidate is not None - assert candidate.path == self.image_file - self.assertExists(candidate.path) - else: - assert candidate is None + def test_minwidth(self): + self.plugin.minwidth = self.IMAGE_WIDTH / 2 + assert self.get_album_art() - def _assert_image_operated(self, image_file, operation, should_operate): - self.image_file = image_file - with patch.object( - ArtResizer.shared, operation, return_value=self.image_file - ) as mock_operation: - self.plugin.art_for_album(self.album, [""], True) - assert mock_operation.called == should_operate + self.plugin.minwidth = self.IMAGE_WIDTH * 2 + assert not self.get_album_art() - def _require_backend(self): - """Skip the test if the art resizer doesn't have ImageMagick or - PIL (so comparisons and measurements are unavailable). - """ - if not ArtResizer.shared.local: - self.skipTest("ArtResizer has no local imaging backend available") - - def test_respect_minwidth(self): - self._require_backend() - self.plugin.minwidth = 300 - self.assertImageIsValidArt(self.IMG_225x225, False) - self.assertImageIsValidArt(self.IMG_348x348, True) - - def test_respect_enforce_ratio_yes(self): - self._require_backend() + def test_enforce_ratio(self): self.plugin.enforce_ratio = True - self.assertImageIsValidArt(self.IMG_500x490, False) - self.assertImageIsValidArt(self.IMG_225x225, True) + assert not self.get_album_art() - def test_respect_enforce_ratio_no(self): self.plugin.enforce_ratio = False - self.assertImageIsValidArt(self.IMG_500x490, True) + assert self.get_album_art() - def test_respect_enforce_ratio_px_above(self): - self._require_backend() + def test_enforce_ratio_with_px_margin(self): self.plugin.enforce_ratio = True - self.plugin.margin_px = 5 - self.assertImageIsValidArt(self.IMG_500x490, False) - def test_respect_enforce_ratio_px_below(self): - self._require_backend() + self.plugin.margin_px = self.IMAGE_WIDTH_HEIGHT_DIFF * 0.5 + assert not self.get_album_art() + + self.plugin.margin_px = self.IMAGE_WIDTH_HEIGHT_DIFF * 1.5 + assert self.get_album_art() + + def test_enforce_ratio_with_percent_margin(self): self.plugin.enforce_ratio = True - self.plugin.margin_px = 15 - self.assertImageIsValidArt(self.IMG_500x490, True) + diff_by_width = self.IMAGE_WIDTH_HEIGHT_DIFF / self.IMAGE_WIDTH - def test_respect_enforce_ratio_percent_above(self): - self._require_backend() - self.plugin.enforce_ratio = True - self.plugin.margin_percent = (500 - 490) / 500 * 0.5 - self.assertImageIsValidArt(self.IMG_500x490, False) + self.plugin.margin_percent = diff_by_width * 0.5 + assert not self.get_album_art() - def test_respect_enforce_ratio_percent_below(self): - self._require_backend() - self.plugin.enforce_ratio = True - self.plugin.margin_percent = (500 - 490) / 500 * 1.5 - self.assertImageIsValidArt(self.IMG_500x490, True) + self.plugin.margin_percent = diff_by_width * 1.5 + assert self.get_album_art() - def test_resize_if_necessary(self): - self._require_backend() - self.plugin.maxwidth = 300 - self._assert_image_operated(self.IMG_225x225, self.RESIZE_OP, False) - self._assert_image_operated(self.IMG_348x348, self.RESIZE_OP, True) - def test_fileresize(self): - self._require_backend() - self.plugin.max_filesize = self.IMG_225x225_SIZE // 2 - self._assert_image_operated(self.IMG_225x225, self.RESIZE_OP, True) +class AlbumArtPerformOperationTest(AlbumArtOperationTestCase): + """Test that the art is resized and deinterlaced if necessary.""" - def test_fileresize_if_necessary(self): - self._require_backend() - self.plugin.max_filesize = self.IMG_225x225_SIZE - self._assert_image_operated(self.IMG_225x225, self.RESIZE_OP, False) - self.assertImageIsValidArt(self.IMG_225x225, True) + def setUp(self): + super().setUp() + self.resizer_mock = patch.object( + ArtResizer.shared, "resize", return_value=self.IMAGE_PATH + ).start() + self.deinterlacer_mock = patch.object( + ArtResizer.shared, "deinterlace", return_value=self.IMAGE_PATH + ).start() - def test_fileresize_no_scale(self): - self._require_backend() - self.plugin.maxwidth = 300 - self.plugin.max_filesize = self.IMG_225x225_SIZE // 2 - self._assert_image_operated(self.IMG_225x225, self.RESIZE_OP, True) + def test_resize(self): + self.plugin.maxwidth = self.IMAGE_WIDTH / 2 + assert self.get_album_art() + assert self.resizer_mock.called - def test_fileresize_and_scale(self): - self._require_backend() - self.plugin.maxwidth = 200 - self.plugin.max_filesize = self.IMG_225x225_SIZE // 2 - self._assert_image_operated(self.IMG_225x225, self.RESIZE_OP, True) + def test_file_resized(self): + self.plugin.max_filesize = self.IMAGE_FILESIZE // 2 + assert self.get_album_art() + assert self.resizer_mock.called - def test_deinterlace(self): - self._require_backend() + def test_file_not_resized(self): + self.plugin.max_filesize = self.IMAGE_FILESIZE + assert self.get_album_art() + assert not self.resizer_mock.called + + def test_file_resized_but_not_scaled(self): + self.plugin.maxwidth = self.IMAGE_WIDTH * 2 + self.plugin.max_filesize = self.IMAGE_FILESIZE // 2 + assert self.get_album_art() + assert self.resizer_mock.called + + def test_file_resized_and_scaled(self): + self.plugin.maxwidth = self.IMAGE_WIDTH / 2 + self.plugin.max_filesize = self.IMAGE_FILESIZE // 2 + assert self.get_album_art() + assert self.resizer_mock.called + + def test_deinterlaced(self): self.plugin.deinterlace = True - self._assert_image_operated(self.IMG_225x225, self.DEINTERLACE_OP, True) + assert self.get_album_art() + assert self.deinterlacer_mock.called + + def test_not_deinterlaced(self): self.plugin.deinterlace = False - self._assert_image_operated( - self.IMG_225x225, self.DEINTERLACE_OP, False - ) + assert self.get_album_art() + assert not self.deinterlacer_mock.called - def test_deinterlace_and_resize(self): - self._require_backend() - self.plugin.maxwidth = 300 + def test_deinterlaced_and_resized(self): + self.plugin.maxwidth = self.IMAGE_WIDTH / 2 self.plugin.deinterlace = True - self._assert_image_operated(self.IMG_348x348, self.DEINTERLACE_OP, True) - self._assert_image_operated(self.IMG_348x348, self.RESIZE_OP, True) + assert self.get_album_art() + assert self.deinterlacer_mock.called + assert self.resizer_mock.called -class DeprecatedConfigTest(BeetsTestCase): +class DeprecatedConfigTest(unittest.TestCase): """While refactoring the plugin, the remote_priority option was deprecated, and a new codepath should translate its effect. Check that it actually does so. @@ -997,7 +1016,7 @@ class DeprecatedConfigTest(BeetsTestCase): assert isinstance(self.plugin.sources[-1], fetchart.FileSystem) -class EnforceRatioConfigTest(BeetsTestCase): +class EnforceRatioConfigTest(unittest.TestCase): """Throw some data at the regexes.""" def _load_with_config(self, values, should_raise): diff --git a/test/plugins/test_aura.py b/test/plugins/test_aura.py index f4535c738..7e840008e 100644 --- a/test/plugins/test_aura.py +++ b/test/plugins/test_aura.py @@ -1,7 +1,7 @@ import os from http import HTTPStatus from pathlib import Path -from typing import Any, Optional +from typing import Any import pytest from flask.testing import Client @@ -58,9 +58,7 @@ class TestAuraResponse: def get_response_data(self, client: Client, item): """Return a callback accepting `endpoint` and `params` parameters.""" - def get( - endpoint: str, params: dict[str, str] - ) -> Optional[dict[str, Any]]: + def get(endpoint: str, params: dict[str, str]) -> dict[str, Any] | None: """Add additional `params` and GET the given endpoint. `include` parameter is added to every call to check that the diff --git a/test/plugins/test_beatport.py b/test/plugins/test_beatport.py index d072340b5..b92a3bf15 100644 --- a/test/plugins/test_beatport.py +++ b/test/plugins/test_beatport.py @@ -14,6 +14,7 @@ """Tests for the 'beatport' plugin.""" +import unittest from datetime import timedelta from beets.test import _common @@ -585,7 +586,7 @@ class BeatportTest(BeetsTestCase): assert track.genre == test_track.genre -class BeatportResponseEmptyTest(BeetsTestCase): +class BeatportResponseEmptyTest(unittest.TestCase): def _make_tracks_response(self): results = [ { diff --git a/test/plugins/test_player.py b/test/plugins/test_bpd.py similarity index 96% rename from test/plugins/test_player.py rename to test/plugins/test_bpd.py index b17a78c17..16e424d7e 100644 --- a/test/plugins/test_player.py +++ b/test/plugins/test_bpd.py @@ -14,19 +14,15 @@ """Tests for BPD's implementation of the MPD protocol.""" -import importlib.util import multiprocessing as mp import os import socket -import sys import tempfile import threading import time import unittest from contextlib import contextmanager - -# Mock GstPlayer so that the forked process doesn't attempt to import gi: -from unittest import mock +from unittest.mock import MagicMock, patch import confuse import pytest @@ -34,43 +30,8 @@ import yaml from beets.test.helper import PluginTestCase from beets.util import bluelet -from beetsplug import bpd -gstplayer = importlib.util.module_from_spec( - importlib.util.find_spec("beetsplug.bpd.gstplayer") -) - - -def _gstplayer_play(*_): - bpd.gstplayer._GstPlayer.playing = True - return mock.DEFAULT - - -gstplayer._GstPlayer = mock.MagicMock( - spec_set=[ - "time", - "volume", - "playing", - "run", - "play_file", - "pause", - "stop", - "seek", - "play", - "get_decoders", - ], - **{ - "playing": False, - "volume": 0, - "time.return_value": (0, 0), - "play_file.side_effect": _gstplayer_play, - "play.side_effect": _gstplayer_play, - "get_decoders.return_value": {"default": ({"audio/mpeg"}, {"mp3"})}, - }, -) -gstplayer.GstPlayer = lambda _: gstplayer._GstPlayer -sys.modules["beetsplug.bpd.gstplayer"] = gstplayer -bpd.gstplayer = gstplayer +bpd = pytest.importorskip("beetsplug.bpd") class CommandParseTest(unittest.TestCase): @@ -256,7 +217,7 @@ def implements(commands, fail=False): bluelet_listener = bluelet.Listener -@mock.patch("beets.util.bluelet.Listener") +@patch("beets.util.bluelet.Listener") def start_server(args, assigned_port, listener_patch): """Start the bpd server, writing the port to `assigned_port`.""" @@ -311,7 +272,7 @@ class BPDTestHelper(PluginTestCase): """ # Create a config file: config = { - "pluginpath": [os.fsdecode(self.temp_dir)], + "pluginpath": [str(self.temp_dir_path)], "plugins": "bpd", # use port 0 to let the OS choose a free port "bpd": {"host": host, "port": 0, "control_port": 0}, @@ -320,7 +281,7 @@ class BPDTestHelper(PluginTestCase): config["bpd"]["password"] = password config_file = tempfile.NamedTemporaryFile( mode="wb", - dir=os.fsdecode(self.temp_dir), + dir=str(self.temp_dir_path), suffix=".yaml", delete=False, ) @@ -938,7 +899,7 @@ class BPDPlaylistsTest(BPDTestHelper): response = client.send_command("load", "anything") self._assert_failed(response, bpd.ERROR_NO_EXIST) - @unittest.skip + @unittest.expectedFailure def test_cmd_playlistadd(self): with self.run_bpd() as client: self._bpd_add(client, self.item1, playlist="anything") @@ -1128,7 +1089,7 @@ class BPDConnectionTest(BPDTestHelper): self._assert_ok(response) assert self.TAGTYPES == set(response.data["tagtype"]) - @unittest.skip + @unittest.expectedFailure def test_tagtypes_mask(self): with self.run_bpd() as client: response = client.send_command("tagtypes", "clear") @@ -1169,6 +1130,10 @@ class BPDReflectionTest(BPDTestHelper): fail=True, ) + @patch( + "beetsplug.bpd.gstplayer.GstPlayer.get_decoders", + MagicMock(return_value={"default": ({"audio/mpeg"}, {"mp3"})}), + ) def test_cmd_decoders(self): with self.run_bpd() as client: response = client.send_command("decoders") diff --git a/test/plugins/test_convert.py b/test/plugins/test_convert.py index a2b4eaf67..9ae0ebf6d 100644 --- a/test/plugins/test_convert.py +++ b/test/plugins/test_convert.py @@ -18,6 +18,7 @@ import os.path import re import sys import unittest +from pathlib import Path import pytest from mediafile import MediaFile @@ -32,7 +33,6 @@ from beets.test.helper import ( capture_log, control_stdin, ) -from beets.util import bytestring_path, displayable_path from beetsplug import convert @@ -49,40 +49,18 @@ class ConvertMixin: """ if re.search("[^a-zA-Z0-9]", tag): raise ValueError( - "tag '{}' must only contain letters and digits".format(tag) + f"tag '{tag}' must only contain letters and digits" ) # A Python script that copies the file and appends a tag. stub = os.path.join(_common.RSRC, b"convert_stub.py").decode("utf-8") - return "{} {} $source $dest {}".format( - shell_quote(sys.executable), shell_quote(stub), tag - ) + return f"{shell_quote(sys.executable)} {shell_quote(stub)} $source $dest {tag}" - def assertFileTag(self, path, tag): - """Assert that the path is a file and the files content ends - with `tag`. - """ - display_tag = tag - tag = tag.encode("utf-8") - self.assertIsFile(path) - with open(path, "rb") as f: - f.seek(-len(display_tag), os.SEEK_END) - assert ( - f.read() == tag - ), f"{displayable_path(path)} is not tagged with {display_tag}" - - def assertNoFileTag(self, path, tag): - """Assert that the path is a file and the files content does not - end with `tag`. - """ - display_tag = tag - tag = tag.encode("utf-8") - self.assertIsFile(path) - with open(path, "rb") as f: - f.seek(-len(tag), os.SEEK_END) - assert ( - f.read() != tag - ), f"{displayable_path(path)} is unexpectedly tagged with {display_tag}" + def file_endswith(self, path: Path, tag: str): + """Check the path is a file and if its content ends with `tag`.""" + assert path.exists() + assert path.is_file() + return path.read_bytes().endswith(tag.encode("utf-8")) class ConvertTestCase(ConvertMixin, PluginTestCase): @@ -106,7 +84,7 @@ class ImportConvertTest(AsIsImporterMixin, ImportHelper, ConvertTestCase): def test_import_converted(self): self.run_asis_importer() item = self.lib.items().get() - self.assertFileTag(item.path, "convert") + assert self.file_endswith(item.filepath, "convert") # FIXME: fails on windows @unittest.skipIf(sys.platform == "win32", "win32") @@ -117,16 +95,16 @@ class ImportConvertTest(AsIsImporterMixin, ImportHelper, ConvertTestCase): item = self.lib.items().get() assert item is not None - self.assertIsFile(item.path) + assert item.filepath.is_file() def test_delete_originals(self): self.config["convert"]["delete_originals"] = True self.run_asis_importer() for path in self.importer.paths: for root, dirnames, filenames in os.walk(path): - assert ( - len(fnmatch.filter(filenames, "*.mp3")) == 0 - ), f"Non-empty import directory {util.displayable_path(path)}" + assert len(fnmatch.filter(filenames, "*.mp3")) == 0, ( + f"Non-empty import directory {util.displayable_path(path)}" + ) def get_count_of_import_files(self): import_file_count = 0 @@ -159,11 +137,10 @@ class ConvertCliTest(ConvertTestCase, ConvertCommand): self.album = self.add_album_fixture(ext="ogg") self.item = self.album.items()[0] - self.convert_dest = bytestring_path( - os.path.join(self.temp_dir, b"convert_dest") - ) + self.convert_dest = self.temp_dir_path / "convert_dest" + self.converted_mp3 = self.convert_dest / "converted.mp3" self.config["convert"] = { - "dest": self.convert_dest, + "dest": str(self.convert_dest), "paths": {"default": "converted"}, "format": "mp3", "formats": { @@ -179,19 +156,16 @@ class ConvertCliTest(ConvertTestCase, ConvertCommand): def test_convert(self): with control_stdin("y"): self.run_convert() - converted = os.path.join(self.convert_dest, b"converted.mp3") - self.assertFileTag(converted, "mp3") + assert self.file_endswith(self.converted_mp3, "mp3") def test_convert_with_auto_confirmation(self): self.run_convert("--yes") - converted = os.path.join(self.convert_dest, b"converted.mp3") - self.assertFileTag(converted, "mp3") + assert self.file_endswith(self.converted_mp3, "mp3") def test_reject_confirmation(self): with control_stdin("n"): self.run_convert() - converted = os.path.join(self.convert_dest, b"converted.mp3") - self.assertNotExists(converted) + assert not self.converted_mp3.exists() def test_convert_keep_new(self): assert os.path.splitext(self.item.path)[1] == b".ogg" @@ -205,8 +179,7 @@ class ConvertCliTest(ConvertTestCase, ConvertCommand): def test_format_option(self): with control_stdin("y"): self.run_convert("--format", "opus") - converted = os.path.join(self.convert_dest, b"converted.ops") - self.assertFileTag(converted, "opus") + assert self.file_endswith(self.convert_dest / "converted.ops", "opus") def test_embed_album_art(self): self.config["convert"]["embed"] = True @@ -218,12 +191,11 @@ class ConvertCliTest(ConvertTestCase, ConvertCommand): with control_stdin("y"): self.run_convert() - converted = os.path.join(self.convert_dest, b"converted.mp3") - mediafile = MediaFile(converted) + mediafile = MediaFile(self.converted_mp3) assert mediafile.images[0].data == image_data def test_skip_existing(self): - converted = os.path.join(self.convert_dest, b"converted.mp3") + converted = self.converted_mp3 self.touch(converted, content="XXX") self.run_convert("--yes") with open(converted) as f: @@ -231,8 +203,7 @@ class ConvertCliTest(ConvertTestCase, ConvertCommand): def test_pretend(self): self.run_convert("--pretend") - converted = os.path.join(self.convert_dest, b"converted.mp3") - self.assertNotExists(converted) + assert not self.converted_mp3.exists() def test_empty_query(self): with capture_log("beets.convert") as logs: @@ -243,55 +214,76 @@ class ConvertCliTest(ConvertTestCase, ConvertCommand): self.config["convert"]["max_bitrate"] = 5000 with control_stdin("y"): self.run_convert() - converted = os.path.join(self.convert_dest, b"converted.mp3") - self.assertFileTag(converted, "mp3") + assert self.file_endswith(self.converted_mp3, "mp3") def test_transcode_when_maxbr_set_low_and_different_formats(self): self.config["convert"]["max_bitrate"] = 5 with control_stdin("y"): self.run_convert() - converted = os.path.join(self.convert_dest, b"converted.mp3") - self.assertFileTag(converted, "mp3") + assert self.file_endswith(self.converted_mp3, "mp3") def test_transcode_when_maxbr_set_to_none_and_different_formats(self): with control_stdin("y"): self.run_convert() - converted = os.path.join(self.convert_dest, b"converted.mp3") - self.assertFileTag(converted, "mp3") + assert self.file_endswith(self.converted_mp3, "mp3") def test_no_transcode_when_maxbr_set_high_and_same_formats(self): self.config["convert"]["max_bitrate"] = 5000 self.config["convert"]["format"] = "ogg" with control_stdin("y"): self.run_convert() - converted = os.path.join(self.convert_dest, b"converted.ogg") - self.assertNoFileTag(converted, "ogg") + assert not self.file_endswith( + self.convert_dest / "converted.ogg", "ogg" + ) + + def test_force_overrides_max_bitrate_and_same_formats(self): + self.config["convert"]["max_bitrate"] = 5000 + self.config["convert"]["format"] = "ogg" + + with control_stdin("y"): + self.run_convert("--force") + + converted = self.convert_dest / "converted.ogg" + assert self.file_endswith(converted, "ogg") def test_transcode_when_maxbr_set_low_and_same_formats(self): self.config["convert"]["max_bitrate"] = 5 self.config["convert"]["format"] = "ogg" with control_stdin("y"): self.run_convert() - converted = os.path.join(self.convert_dest, b"converted.ogg") - self.assertFileTag(converted, "ogg") + assert self.file_endswith(self.convert_dest / "converted.ogg", "ogg") def test_transcode_when_maxbr_set_to_none_and_same_formats(self): self.config["convert"]["format"] = "ogg" with control_stdin("y"): self.run_convert() - converted = os.path.join(self.convert_dest, b"converted.ogg") - self.assertNoFileTag(converted, "ogg") + assert not self.file_endswith( + self.convert_dest / "converted.ogg", "ogg" + ) def test_playlist(self): with control_stdin("y"): self.run_convert("--playlist", "playlist.m3u8") - m3u_created = os.path.join(self.convert_dest, b"playlist.m3u8") - assert os.path.exists(m3u_created) + assert (self.convert_dest / "playlist.m3u8").exists() def test_playlist_pretend(self): self.run_convert("--playlist", "playlist.m3u8", "--pretend") - m3u_created = os.path.join(self.convert_dest, b"playlist.m3u8") - assert not os.path.exists(m3u_created) + assert not (self.convert_dest / "playlist.m3u8").exists() + + def test_force_overrides_no_convert(self): + self.config["convert"]["formats"]["opus"] = { + "command": self.tagged_copy_cmd("opus"), + "extension": "ops", + } + self.config["convert"]["no_convert"] = "format:ogg" + + [item] = self.add_item_fixtures(ext="ogg") + + with control_stdin("y"): + self.run_convert_path(item, "--format", "opus", "--force") + + converted = self.convert_dest / "converted.ops" + assert self.file_endswith(converted, "opus") @_common.slow_test() @@ -301,9 +293,9 @@ class NeverConvertLossyFilesTest(ConvertTestCase, ConvertCommand): def setUp(self): super().setUp() - self.convert_dest = os.path.join(self.temp_dir, b"convert_dest") + self.convert_dest = self.temp_dir_path / "convert_dest" self.config["convert"] = { - "dest": self.convert_dest, + "dest": str(self.convert_dest), "paths": {"default": "converted"}, "never_convert_lossy_files": True, "format": "mp3", @@ -316,23 +308,36 @@ class NeverConvertLossyFilesTest(ConvertTestCase, ConvertCommand): [item] = self.add_item_fixtures(ext="flac") with control_stdin("y"): self.run_convert_path(item) - converted = os.path.join(self.convert_dest, b"converted.mp3") - self.assertFileTag(converted, "mp3") + converted = self.convert_dest / "converted.mp3" + assert self.file_endswith(converted, "mp3") def test_transcode_from_lossy(self): self.config["convert"]["never_convert_lossy_files"] = False [item] = self.add_item_fixtures(ext="ogg") with control_stdin("y"): self.run_convert_path(item) - converted = os.path.join(self.convert_dest, b"converted.mp3") - self.assertFileTag(converted, "mp3") + converted = self.convert_dest / "converted.mp3" + assert self.file_endswith(converted, "mp3") def test_transcode_from_lossy_prevented(self): [item] = self.add_item_fixtures(ext="ogg") with control_stdin("y"): self.run_convert_path(item) - converted = os.path.join(self.convert_dest, b"converted.ogg") - self.assertNoFileTag(converted, "mp3") + converted = self.convert_dest / "converted.ogg" + assert not self.file_endswith(converted, "mp3") + + def test_force_overrides_never_convert_lossy_files(self): + self.config["convert"]["formats"]["opus"] = { + "command": self.tagged_copy_cmd("opus"), + "extension": "ops", + } + [item] = self.add_item_fixtures(ext="ogg") + + with control_stdin("y"): + self.run_convert_path(item, "--format", "opus", "--force") + + converted = self.convert_dest / "converted.ops" + assert self.file_endswith(converted, "opus") class TestNoConvert: diff --git a/test/plugins/test_discogs.py b/test/plugins/test_discogs.py index 5e327ab27..eb65bc588 100644 --- a/test/plugins/test_discogs.py +++ b/test/plugins/test_discogs.py @@ -21,7 +21,6 @@ import pytest from beets import config from beets.test._common import Bag from beets.test.helper import BeetsTestCase, capture_log -from beets.util.id_extractors import extract_discogs_id_regex from beetsplug.discogs import DiscogsPlugin @@ -83,7 +82,7 @@ class DGAlbumInfoTest(BeetsTestCase): """Return a Bag that mimics a discogs_client.Release with a tracklist where tracks have the specified `positions`.""" tracks = [ - self._make_track("TITLE%s" % i, position) + self._make_track(f"TITLE{i}", position) for (i, position) in enumerate(positions, start=1) ] return self._make_release(tracks) @@ -172,27 +171,6 @@ class DGAlbumInfoTest(BeetsTestCase): assert t[3].index == 4 assert t[3].medium_total == 1 - def test_parse_position(self): - """Test the conversion of discogs `position` to medium, medium_index - and subtrack_index.""" - # List of tuples (discogs_position, (medium, medium_index, subindex) - positions = [ - ("1", (None, "1", None)), - ("A12", ("A", "12", None)), - ("12-34", ("12-", "34", None)), - ("CD1-1", ("CD1-", "1", None)), - ("1.12", (None, "1", "12")), - ("12.a", (None, "12", "A")), - ("12.34", (None, "12", "34")), - ("1ab", (None, "1", "AB")), - # Non-standard - ("IV", ("IV", None, None)), - ] - - d = DiscogsPlugin() - for position, expected in positions: - assert d.get_track_index(position) == expected - def test_parse_tracklist_without_sides(self): """Test standard Discogs position 12.2.9#1: "without sides".""" release = self._make_release_from_positions(["1", "2", "3"]) @@ -369,37 +347,6 @@ class DGAlbumInfoTest(BeetsTestCase): assert d is None assert "Release does not contain the required fields" in logs[0] - def test_album_for_id(self): - """Test parsing for a valid Discogs release_id""" - test_patterns = [ - ( - "http://www.discogs.com/G%C3%BCnther-Lause-Meru-Ep/release/4354798", - 4354798, - ), - ( - "http://www.discogs.com/release/4354798-G%C3%BCnther-Lause-Meru-Ep", - 4354798, - ), - ( - "http://www.discogs.com/G%C3%BCnther-4354798Lause-Meru-Ep/release/4354798", # NOQA E501 - 4354798, - ), - ( - "http://www.discogs.com/release/4354798-G%C3%BCnther-4354798Lause-Meru-Ep/", # NOQA E501 - 4354798, - ), - ("[r4354798]", 4354798), - ("r4354798", 4354798), - ("4354798", 4354798), - ("yet-another-metadata-provider.org/foo/12345", ""), - ("005b84a0-ecd6-39f1-b2f6-6eb48756b268", ""), - ] - for test_pattern, expected in test_patterns: - match = extract_discogs_id_regex(test_pattern) - if not match: - match = "" - assert match == expected - def test_default_genre_style_settings(self): """Test genre default settings, genres to genre, styles to style""" release = self._make_release_from_positions(["1", "2"]) @@ -427,6 +374,245 @@ class DGAlbumInfoTest(BeetsTestCase): assert d.genre == "GENRE1, GENRE2" assert d.style is None + def test_strip_disambiguation(self): + """Test removing disambiguation from all disambiguated fields.""" + data = { + "id": 123, + "uri": "https://www.discogs.com/release/123456-something", + "tracklist": [ + { + "title": "track", + "position": "A", + "type_": "track", + "duration": "5:44", + "artists": [ + {"name": "TEST ARTIST (5)", "tracks": "", "id": 11146} + ], + } + ], + "artists": [ + {"name": "ARTIST NAME (2)", "id": 321, "join": "&"}, + {"name": "OTHER ARTIST (5)", "id": 321, "join": ""}, + ], + "title": "title", + "labels": [ + { + "name": "LABEL NAME (5)", + "catno": "catalog number", + } + ], + } + release = Bag( + data=data, + title=data["title"], + artists=[Bag(data=d) for d in data["artists"]], + ) + d = DiscogsPlugin().get_album_info(release) + assert d.artist == "ARTIST NAME & OTHER ARTIST" + assert d.tracks[0].artist == "TEST ARTIST" + assert d.label == "LABEL NAME" + + def test_strip_disambiguation_false(self): + """Test disabling disambiguation removal from all disambiguated fields.""" + config["discogs"]["strip_disambiguation"] = False + data = { + "id": 123, + "uri": "https://www.discogs.com/release/123456-something", + "tracklist": [ + { + "title": "track", + "position": "A", + "type_": "track", + "duration": "5:44", + "artists": [ + {"name": "TEST ARTIST (5)", "tracks": "", "id": 11146} + ], + } + ], + "artists": [ + {"name": "ARTIST NAME (2)", "id": 321, "join": "&"}, + {"name": "OTHER ARTIST (5)", "id": 321, "join": ""}, + ], + "title": "title", + "labels": [ + { + "name": "LABEL NAME (5)", + "catno": "catalog number", + } + ], + } + release = Bag( + data=data, + title=data["title"], + artists=[Bag(data=d) for d in data["artists"]], + ) + d = DiscogsPlugin().get_album_info(release) + assert d.artist == "ARTIST NAME (2) & OTHER ARTIST (5)" + assert d.tracks[0].artist == "TEST ARTIST (5)" + assert d.label == "LABEL NAME (5)" + config["discogs"]["strip_disambiguation"] = True + + +@pytest.mark.parametrize( + "track_artist_anv,track_artist", + [(False, "ARTIST Feat. PERFORMER"), (True, "VARIATION Feat. VARIATION")], +) +@pytest.mark.parametrize( + "album_artist_anv,album_artist", + [(False, "ARTIST & SOLOIST"), (True, "VARIATION & VARIATION")], +) +@pytest.mark.parametrize( + "artist_credit_anv,track_artist_credit,album_artist_credit", + [ + (False, "ARTIST Feat. PERFORMER", "ARTIST & SOLOIST"), + (True, "VARIATION Feat. VARIATION", "VARIATION & VARIATION"), + ], +) +@patch("beetsplug.discogs.DiscogsPlugin.setup", Mock()) +def test_anv( + track_artist_anv, + track_artist, + album_artist_anv, + album_artist, + artist_credit_anv, + track_artist_credit, + album_artist_credit, +): + """Test using artist name variations.""" + data = { + "id": 123, + "uri": "https://www.discogs.com/release/123456-something", + "tracklist": [ + { + "title": "track", + "position": "A", + "type_": "track", + "duration": "5:44", + "artists": [ + { + "name": "ARTIST", + "tracks": "", + "anv": "VARIATION", + "id": 11146, + } + ], + "extraartists": [ + { + "name": "PERFORMER", + "role": "Featuring", + "anv": "VARIATION", + "id": 787, + } + ], + } + ], + "artists": [ + {"name": "ARTIST (4)", "anv": "VARIATION", "id": 321, "join": "&"}, + {"name": "SOLOIST", "anv": "VARIATION", "id": 445, "join": ""}, + ], + "title": "title", + } + release = Bag( + data=data, + title=data["title"], + artists=[Bag(data=d) for d in data["artists"]], + ) + config["discogs"]["anv"]["album_artist"] = album_artist_anv + config["discogs"]["anv"]["artist"] = track_artist_anv + config["discogs"]["anv"]["artist_credit"] = artist_credit_anv + r = DiscogsPlugin().get_album_info(release) + assert r.artist == album_artist + assert r.artist_credit == album_artist_credit + assert r.tracks[0].artist == track_artist + assert r.tracks[0].artist_credit == track_artist_credit + + +@patch("beetsplug.discogs.DiscogsPlugin.setup", Mock()) +def test_anv_album_artist(): + """Test using artist name variations when the album artist + is the same as the track artist, but only the track artist + should use the artist name variation.""" + data = { + "id": 123, + "uri": "https://www.discogs.com/release/123456-something", + "tracklist": [ + { + "title": "track", + "position": "A", + "type_": "track", + "duration": "5:44", + } + ], + "artists": [ + {"name": "ARTIST (4)", "anv": "VARIATION", "id": 321}, + ], + "title": "title", + } + release = Bag( + data=data, + title=data["title"], + artists=[Bag(data=d) for d in data["artists"]], + ) + config["discogs"]["anv"]["album_artist"] = False + config["discogs"]["anv"]["artist"] = True + config["discogs"]["anv"]["artist_credit"] = False + r = DiscogsPlugin().get_album_info(release) + assert r.artist == "ARTIST" + assert r.artist_credit == "ARTIST" + assert r.tracks[0].artist == "VARIATION" + assert r.tracks[0].artist_credit == "ARTIST" + + +@pytest.mark.parametrize( + "track, expected_artist", + [ + ( + { + "type_": "track", + "title": "track", + "position": "1", + "duration": "5:00", + "artists": [ + {"name": "NEW ARTIST", "tracks": "", "id": 11146}, + {"name": "VOCALIST", "tracks": "", "id": 344, "join": "&"}, + ], + "extraartists": [ + { + "name": "SOLOIST", + "id": 3, + "role": "Featuring", + }, + { + "name": "PERFORMER (1)", + "id": 5, + "role": "Other Role, Featuring", + }, + { + "name": "RANDOM", + "id": 8, + "role": "Written-By", + }, + { + "name": "MUSICIAN", + "id": 10, + "role": "Featuring [Uncredited]", + }, + ], + }, + "NEW ARTIST, VOCALIST Feat. SOLOIST, PERFORMER, MUSICIAN", + ), + ], +) +@patch("beetsplug.discogs.DiscogsPlugin.setup", Mock()) +def test_parse_featured_artists(track, expected_artist): + """Tests the plugins ability to parse a featured artist. + Initial check with one featured artist, two featured artists, + and three. Ignores artists that are not listed as featured.""" + t = DiscogsPlugin().get_track_info( + track, 1, 1, ("ARTIST", "ARTIST CREDIT", 2) + ) + assert t.artist == expected_artist + @pytest.mark.parametrize( "formats, expected_media, expected_albumtype", @@ -449,3 +635,22 @@ def test_get_media_and_albumtype(formats, expected_media, expected_albumtype): result = DiscogsPlugin.get_media_and_albumtype(formats) assert result == (expected_media, expected_albumtype) + + +@pytest.mark.parametrize( + "position, medium, index, subindex", + [ + ("1", None, "1", None), + ("A12", "A", "12", None), + ("12-34", "12-", "34", None), + ("CD1-1", "CD1-", "1", None), + ("1.12", None, "1", "12"), + ("12.a", None, "12", "A"), + ("12.34", None, "12", "34"), + ("1ab", None, "1", "AB"), + # Non-standard + ("IV", "IV", None, None), + ], +) +def test_get_track_index(position, medium, index, subindex): + assert DiscogsPlugin.get_track_index(position) == (medium, index, subindex) diff --git a/test/plugins/test_edit.py b/test/plugins/test_edit.py index 2d557d623..d0e03d0e5 100644 --- a/test/plugins/test_edit.py +++ b/test/plugins/test_edit.py @@ -19,9 +19,9 @@ from beets.dbcore.query import TrueQuery from beets.library import Item from beets.test import _common from beets.test.helper import ( + AutotagImportTestCase, AutotagStub, BeetsTestCase, - ImportTestCase, PluginMixin, TerminalImportMixin, control_stdin, @@ -134,22 +134,6 @@ class EditCommandTest(EditMixin, BeetsTestCase): {f: item[f] for f in item._fields} for item in self.album.items() ] - def assertCounts( - self, - mock_write, - album_count=ALBUM_COUNT, - track_count=TRACK_COUNT, - write_call_count=TRACK_COUNT, - title_starts_with="", - ): - """Several common assertions on Album, Track and call counts.""" - assert len(self.lib.albums()) == album_count - assert len(self.lib.items()) == track_count - assert mock_write.call_count == write_call_count - assert all( - i.title.startswith(title_starts_with) for i in self.lib.items() - ) - def test_title_edit_discard(self, mock_write): """Edit title for all items in the library, then discard changes.""" # Edit track titles. @@ -159,9 +143,7 @@ class EditCommandTest(EditMixin, BeetsTestCase): ["c"], ) - self.assertCounts( - mock_write, write_call_count=0, title_starts_with="t\u00eftle" - ) + assert mock_write.call_count == 0 self.assertItemFieldsModified(self.album.items(), self.items_orig, []) def test_title_edit_apply(self, mock_write): @@ -173,11 +155,7 @@ class EditCommandTest(EditMixin, BeetsTestCase): ["a"], ) - self.assertCounts( - mock_write, - write_call_count=self.TRACK_COUNT, - title_starts_with="modified t\u00eftle", - ) + assert mock_write.call_count == self.TRACK_COUNT self.assertItemFieldsModified( self.album.items(), self.items_orig, ["title", "mtime"] ) @@ -191,16 +169,43 @@ class EditCommandTest(EditMixin, BeetsTestCase): ["a"], ) - self.assertCounts( - mock_write, - write_call_count=1, - ) + assert mock_write.call_count == 1 # No changes except on last item. self.assertItemFieldsModified( list(self.album.items())[:-1], self.items_orig[:-1], [] ) assert list(self.album.items())[-1].title == "modified t\u00eftle 9" + def test_title_edit_keep_editing_then_apply(self, mock_write): + """Edit titles, keep editing once, then apply changes.""" + self.run_mocked_command( + {"replacements": {"t\u00eftle": "modified t\u00eftle"}}, + # keep Editing, then Apply + ["e", "a"], + ) + + assert mock_write.call_count == self.TRACK_COUNT + self.assertItemFieldsModified( + self.album.items(), + self.items_orig, + ["title", "mtime"], + ) + + def test_title_edit_keep_editing_then_cancel(self, mock_write): + """Edit titles, keep editing once, then cancel.""" + self.run_mocked_command( + {"replacements": {"t\u00eftle": "modified t\u00eftle"}}, + # keep Editing, then Cancel + ["e", "c"], + ) + + assert mock_write.call_count == 0 + self.assertItemFieldsModified( + self.album.items(), + self.items_orig, + [], + ) + def test_noedit(self, mock_write): """Do not edit anything.""" # Do not edit anything. @@ -210,9 +215,7 @@ class EditCommandTest(EditMixin, BeetsTestCase): [], ) - self.assertCounts( - mock_write, write_call_count=0, title_starts_with="t\u00eftle" - ) + assert mock_write.call_count == 0 self.assertItemFieldsModified(self.album.items(), self.items_orig, []) def test_album_edit_apply(self, mock_write): @@ -226,7 +229,7 @@ class EditCommandTest(EditMixin, BeetsTestCase): ["a"], ) - self.assertCounts(mock_write, write_call_count=self.TRACK_COUNT) + assert mock_write.call_count == self.TRACK_COUNT self.assertItemFieldsModified( self.album.items(), self.items_orig, ["album", "mtime"] ) @@ -249,9 +252,7 @@ class EditCommandTest(EditMixin, BeetsTestCase): # Even though a flexible attribute was written (which is not directly # written to the tags), write should still be called since templates # might use it. - self.assertCounts( - mock_write, write_call_count=1, title_starts_with="t\u00eftle" - ) + assert mock_write.call_count == 1 def test_a_album_edit_apply(self, mock_write): """Album query (-a), edit album field, apply changes.""" @@ -263,7 +264,7 @@ class EditCommandTest(EditMixin, BeetsTestCase): ) self.album.load() - self.assertCounts(mock_write, write_call_count=self.TRACK_COUNT) + assert mock_write.call_count == self.TRACK_COUNT assert self.album.album == "modified \u00e4lbum" self.assertItemFieldsModified( self.album.items(), self.items_orig, ["album", "mtime"] @@ -279,7 +280,7 @@ class EditCommandTest(EditMixin, BeetsTestCase): ) self.album.load() - self.assertCounts(mock_write, write_call_count=self.TRACK_COUNT) + assert mock_write.call_count == self.TRACK_COUNT assert self.album.albumartist == "the modified album artist" self.assertItemFieldsModified( self.album.items(), self.items_orig, ["albumartist", "mtime"] @@ -295,9 +296,7 @@ class EditCommandTest(EditMixin, BeetsTestCase): ["n"], ) - self.assertCounts( - mock_write, write_call_count=0, title_starts_with="t\u00eftle" - ) + assert mock_write.call_count == 0 def test_invalid_yaml(self, mock_write): """Edit the yaml file incorrectly (resulting in a well-formed but @@ -309,17 +308,17 @@ class EditCommandTest(EditMixin, BeetsTestCase): [], ) - self.assertCounts( - mock_write, write_call_count=0, title_starts_with="t\u00eftle" - ) + assert mock_write.call_count == 0 @_common.slow_test() class EditDuringImporterTestCase( - EditMixin, TerminalImportMixin, ImportTestCase + EditMixin, TerminalImportMixin, AutotagImportTestCase ): """TODO""" + matching = AutotagStub.GOOD + IGNORED = ["added", "album_id", "id", "mtime", "path"] def setUp(self): @@ -327,12 +326,6 @@ class EditDuringImporterTestCase( # Create some mediafiles, and store them for comparison. self.prepare_album_for_import(1) self.items_orig = [Item.from_path(f.path) for f in self.import_media] - self.matcher = AutotagStub().install() - self.matcher.matching = AutotagStub.GOOD - - def tearDown(self): - super().tearDown() - self.matcher.restore() @_common.slow_test() diff --git a/test/plugins/test_embedart.py b/test/plugins/test_embedart.py index 2d2d68153..d40025374 100644 --- a/test/plugins/test_embedart.py +++ b/test/plugins/test_embedart.py @@ -13,6 +13,7 @@ # included in all copies or substantial portions of the Software. +import os import os.path import shutil import tempfile @@ -22,11 +23,17 @@ from unittest.mock import MagicMock, patch import pytest from mediafile import MediaFile -from beets import art, config, logging, ui +from beets import config, logging, ui from beets.test import _common -from beets.test.helper import BeetsTestCase, FetchImageHelper, PluginMixin +from beets.test.helper import ( + BeetsTestCase, + FetchImageHelper, + IOMixin, + PluginMixin, +) from beets.util import bytestring_path, displayable_path, syspath from beets.util.artresizer import ArtResizer +from beetsplug._utils import art from test.test_art_resize import DummyIMBackend @@ -34,24 +41,47 @@ def require_artresizer_compare(test): def wrapper(*args, **kwargs): if not ArtResizer.shared.can_compare: raise unittest.SkipTest("compare not available") - else: - return test(*args, **kwargs) + + # PHASH computation in ImageMagick changed at some point in an + # undocumented way. Check at a low level that comparisons of our + # fixtures give the expected results. Only then, plugin logic tests + # below are meaningful. + # cf. https://github.com/ImageMagick/ImageMagick/discussions/5191 + # It would be better to investigate what exactly change in IM and + # handle that in ArtResizer.IMBackend.{can_compare,compare}. + # Skipping the tests as below is a quick fix to CI, but users may + # still see unexpected behaviour. + abbey_artpath = os.path.join(_common.RSRC, b"abbey.jpg") + abbey_similarpath = os.path.join(_common.RSRC, b"abbey-similar.jpg") + abbey_differentpath = os.path.join(_common.RSRC, b"abbey-different.jpg") + compare_threshold = 20 + + similar_compares_ok = ArtResizer.shared.compare( + abbey_artpath, + abbey_similarpath, + compare_threshold, + ) + different_compares_ok = ArtResizer.shared.compare( + abbey_artpath, + abbey_differentpath, + compare_threshold, + ) + if not similar_compares_ok or different_compares_ok: + raise unittest.SkipTest("IM version with broken compare") + + return test(*args, **kwargs) wrapper.__name__ = test.__name__ return wrapper -class EmbedartCliTest(PluginMixin, FetchImageHelper, BeetsTestCase): +class EmbedartCliTest(IOMixin, PluginMixin, FetchImageHelper, BeetsTestCase): plugin = "embedart" small_artpath = os.path.join(_common.RSRC, b"image-2x3.jpg") abbey_artpath = os.path.join(_common.RSRC, b"abbey.jpg") abbey_similarpath = os.path.join(_common.RSRC, b"abbey-similar.jpg") abbey_differentpath = os.path.join(_common.RSRC, b"abbey-different.jpg") - def setUp(self): - super().setUp() # Converter is threaded - self.io.install() - def _setup_data(self, artpath=None): if not artpath: artpath = self.small_artpath @@ -115,9 +145,7 @@ class EmbedartCliTest(PluginMixin, FetchImageHelper, BeetsTestCase): if os.path.isfile(syspath(tmp_path)): os.remove(syspath(tmp_path)) self.fail( - "Artwork file {} was not deleted".format( - displayable_path(tmp_path) - ) + f"Artwork file {displayable_path(tmp_path)} was not deleted" ) def test_art_file_missing(self): @@ -153,9 +181,9 @@ class EmbedartCliTest(PluginMixin, FetchImageHelper, BeetsTestCase): self.run_command("embedart", "-y", "-f", self.abbey_differentpath) mediafile = MediaFile(syspath(item.path)) - assert ( - mediafile.images[0].data == self.image_data - ), f"Image written is not {displayable_path(self.abbey_artpath)}" + assert mediafile.images[0].data == self.image_data, ( + f"Image written is not {displayable_path(self.abbey_artpath)}" + ) @require_artresizer_compare def test_accept_similar_art(self): @@ -167,31 +195,29 @@ class EmbedartCliTest(PluginMixin, FetchImageHelper, BeetsTestCase): self.run_command("embedart", "-y", "-f", self.abbey_similarpath) mediafile = MediaFile(syspath(item.path)) - assert ( - mediafile.images[0].data == self.image_data - ), f"Image written is not {displayable_path(self.abbey_similarpath)}" + assert mediafile.images[0].data == self.image_data, ( + f"Image written is not {displayable_path(self.abbey_similarpath)}" + ) def test_non_ascii_album_path(self): resource_path = os.path.join(_common.RSRC, b"image.mp3") album = self.add_album_fixture() trackpath = album.items()[0].path - albumpath = album.path shutil.copy(syspath(resource_path), syspath(trackpath)) self.run_command("extractart", "-n", "extracted") - self.assertExists(os.path.join(albumpath, b"extracted.png")) + assert (album.filepath / "extracted.png").exists() def test_extracted_extension(self): resource_path = os.path.join(_common.RSRC, b"image-jpeg.mp3") album = self.add_album_fixture() trackpath = album.items()[0].path - albumpath = album.path shutil.copy(syspath(resource_path), syspath(trackpath)) self.run_command("extractart", "-n", "extracted") - self.assertExists(os.path.join(albumpath, b"extracted.jpg")) + assert (album.filepath / "extracted.jpg").exists() def test_clear_art_with_yes_input(self): self._setup_data() @@ -258,7 +284,7 @@ class DummyArtResizer(ArtResizer): @patch("beets.util.artresizer.subprocess") -@patch("beets.art.extract") +@patch("beetsplug._utils.art.extract") class ArtSimilarityTest(unittest.TestCase): def setUp(self): self.item = _common.item() diff --git a/test/plugins/test_embyupdate.py b/test/plugins/test_embyupdate.py index 8def5dca5..9c7104371 100644 --- a/test/plugins/test_embyupdate.py +++ b/test/plugins/test_embyupdate.py @@ -143,7 +143,7 @@ class EmbyUpdateTest(PluginTestCase): responses.add( responses.POST, - ("http://localhost:8096" "/Users/AuthenticateByName"), + ("http://localhost:8096/Users/AuthenticateByName"), body=body, status=200, content_type="application/json", diff --git a/test/plugins/test_fetchart.py b/test/plugins/test_fetchart.py index 853820d92..96d882e9a 100644 --- a/test/plugins/test_fetchart.py +++ b/test/plugins/test_fetchart.py @@ -98,3 +98,8 @@ class FetchartCliTest(PluginTestCase): self.run_command("fetchart") self.album.load() self.check_cover_is_stored() + + def test_colorization(self): + self.config["ui"]["color"] = True + out = self.run_with_output("fetchart") + assert " - the älbum: \x1b[1;31mno art found\x1b[39;49;00m\n" == out diff --git a/test/plugins/test_fromfilename.py b/test/plugins/test_fromfilename.py new file mode 100644 index 000000000..f13e88aea --- /dev/null +++ b/test/plugins/test_fromfilename.py @@ -0,0 +1,99 @@ +# This file is part of beets. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Tests for the fromfilename plugin.""" + +import pytest + +from beetsplug import fromfilename + + +class Session: + pass + + +class Item: + def __init__(self, path): + self.path = path + self.track = 0 + self.artist = "" + self.title = "" + + +class Task: + def __init__(self, items): + self.items = items + self.is_album = True + + +@pytest.mark.parametrize( + "song1, song2", + [ + ( + ( + "/tmp/01 - The Artist - Song One.m4a", + 1, + "The Artist", + "Song One", + ), + ( + "/tmp/02. - The Artist - Song Two.m4a", + 2, + "The Artist", + "Song Two", + ), + ), + ( + ("/tmp/01-The_Artist-Song_One.m4a", 1, "The_Artist", "Song_One"), + ("/tmp/02.-The_Artist-Song_Two.m4a", 2, "The_Artist", "Song_Two"), + ), + ( + ("/tmp/01 - Song_One.m4a", 1, "", "Song_One"), + ("/tmp/02. - Song_Two.m4a", 2, "", "Song_Two"), + ), + ( + ("/tmp/Song One by The Artist.m4a", 0, "The Artist", "Song One"), + ("/tmp/Song Two by The Artist.m4a", 0, "The Artist", "Song Two"), + ), + (("/tmp/01.m4a", 1, "", "01"), ("/tmp/02.m4a", 2, "", "02")), + ( + ("/tmp/Song One.m4a", 0, "", "Song One"), + ("/tmp/Song Two.m4a", 0, "", "Song Two"), + ), + ], +) +def test_fromfilename(song1, song2): + """ + Each "song" is a tuple of path, expected track number, expected artist, + expected title. + + We use two songs for each test for two reasons: + - The plugin needs more than one item to look for uniform strings in paths + in order to guess if the string describes an artist or a title. + - Sometimes we allow for an optional "." after the track number in paths. + """ + + session = Session() + item1 = Item(song1[0]) + item2 = Item(song2[0]) + task = Task([item1, item2]) + + f = fromfilename.FromFilenamePlugin() + f.filename_task(task, session) + + assert task.items[0].track == song1[1] + assert task.items[0].artist == song1[2] + assert task.items[0].title == song1[3] + assert task.items[1].track == song2[1] + assert task.items[1].artist == song2[2] + assert task.items[1].title == song2[3] diff --git a/test/plugins/test_ftintitle.py b/test/plugins/test_ftintitle.py index 1dbe4a727..51bd4f9c8 100644 --- a/test/plugins/test_ftintitle.py +++ b/test/plugins/test_ftintitle.py @@ -14,178 +14,444 @@ """Tests for the 'ftintitle' plugin.""" -import unittest +from collections.abc import Generator +from typing import TypeAlias +import pytest + +from beets.library.models import Album, Item from beets.test.helper import PluginTestCase from beetsplug import ftintitle +ConfigValue: TypeAlias = str | bool | list[str] + class FtInTitlePluginFunctional(PluginTestCase): plugin = "ftintitle" - def _ft_add_item(self, path, artist, title, aartist): - return self.add_item( - path=path, - artist=artist, - artist_sort=artist, - title=title, - albumartist=aartist, + +@pytest.fixture +def env() -> Generator[FtInTitlePluginFunctional, None, None]: + case = FtInTitlePluginFunctional(methodName="runTest") + case.setUp() + try: + yield case + finally: + case.tearDown() + + +def set_config( + env: FtInTitlePluginFunctional, + cfg: dict[str, ConfigValue] | None, +) -> None: + cfg = {} if cfg is None else cfg + defaults = { + "drop": False, + "auto": True, + "keep_in_artist": False, + "custom_words": [], + } + env.config["ftintitle"].set(defaults) + env.config["ftintitle"].set(cfg) + + +def add_item( + env: FtInTitlePluginFunctional, + path: str, + artist: str, + title: str, + albumartist: str | None, +) -> Item: + return env.add_item( + path=path, + artist=artist, + artist_sort=artist, + title=title, + albumartist=albumartist, + ) + + +@pytest.mark.parametrize( + "cfg, cmd_args, given, expected", + [ + pytest.param( + None, + ("ftintitle",), + ("Alice", "Song 1", "Alice"), + ("Alice", "Song 1"), + id="no-featured-artist", + ), + pytest.param( + {"format": "feat {0}"}, + ("ftintitle",), + ("Alice ft. Bob", "Song 1", None), + ("Alice", "Song 1 feat Bob"), + id="no-albumartist-custom-format", + ), + pytest.param( + None, + ("ftintitle",), + ("Alice", "Song 1", None), + ("Alice", "Song 1"), + id="no-albumartist-no-feature", + ), + pytest.param( + {"format": "featuring {0}"}, + ("ftintitle",), + ("Alice ft Bob", "Song 1", "George"), + ("Alice", "Song 1 featuring Bob"), + id="guest-artist-custom-format", + ), + pytest.param( + None, + ("ftintitle",), + ("Alice", "Song 1", "George"), + ("Alice", "Song 1"), + id="guest-artist-no-feature", + ), + # ---- drop (-d) variants ---- + pytest.param( + None, + ("ftintitle", "-d"), + ("Alice ft Bob", "Song 1", "Alice"), + ("Alice", "Song 1"), + id="drop-self-ft", + ), + pytest.param( + None, + ("ftintitle", "-d"), + ("Alice", "Song 1", "Alice"), + ("Alice", "Song 1"), + id="drop-self-no-ft", + ), + pytest.param( + None, + ("ftintitle", "-d"), + ("Alice ft Bob", "Song 1", "George"), + ("Alice", "Song 1"), + id="drop-guest-ft", + ), + pytest.param( + None, + ("ftintitle", "-d"), + ("Alice", "Song 1", "George"), + ("Alice", "Song 1"), + id="drop-guest-no-ft", + ), + # ---- custom format variants ---- + pytest.param( + {"format": "feat. {}"}, + ("ftintitle",), + ("Alice ft Bob", "Song 1", "Alice"), + ("Alice", "Song 1 feat. Bob"), + id="custom-format-feat-dot", + ), + pytest.param( + {"format": "featuring {}"}, + ("ftintitle",), + ("Alice feat. Bob", "Song 1", "Alice"), + ("Alice", "Song 1 featuring Bob"), + id="custom-format-featuring", + ), + pytest.param( + {"format": "with {}"}, + ("ftintitle",), + ("Alice feat Bob", "Song 1", "Alice"), + ("Alice", "Song 1 with Bob"), + id="custom-format-with", + ), + # ---- keep_in_artist variants ---- + pytest.param( + {"format": "feat. {}", "keep_in_artist": True}, + ("ftintitle",), + ("Alice ft Bob", "Song 1", "Alice"), + ("Alice ft Bob", "Song 1 feat. Bob"), + id="keep-in-artist-add-to-title", + ), + pytest.param( + {"format": "feat. {}", "keep_in_artist": True}, + ("ftintitle", "-d"), + ("Alice ft Bob", "Song 1", "Alice"), + ("Alice ft Bob", "Song 1"), + id="keep-in-artist-drop-from-title", + ), + # ---- custom_words variants ---- + pytest.param( + {"format": "featuring {}", "custom_words": ["med"]}, + ("ftintitle",), + ("Alice med Bob", "Song 1", "Alice"), + ("Alice", "Song 1 featuring Bob"), + id="custom-feat-words", + ), + pytest.param( + { + "format": "featuring {}", + "keep_in_artist": True, + "custom_words": ["med"], + }, + ("ftintitle",), + ("Alice med Bob", "Song 1", "Alice"), + ("Alice med Bob", "Song 1 featuring Bob"), + id="custom-feat-words-keep-in-artists", + ), + pytest.param( + { + "format": "featuring {}", + "keep_in_artist": True, + "custom_words": ["med"], + }, + ( + "ftintitle", + "-d", + ), + ("Alice med Bob", "Song 1", "Alice"), + ("Alice med Bob", "Song 1"), + id="custom-feat-words-keep-in-artists-drop-from-title", + ), + # ---- preserve_album_artist variants ---- + pytest.param( + { + "format": "feat. {}", + "preserve_album_artist": True, + }, + ("ftintitle",), + ("Alice feat. Bob", "Song 1", "Alice"), + ("Alice", "Song 1 feat. Bob"), + id="skip-if-artist-and-album-artists-is-the-same-different-match", + ), + pytest.param( + { + "format": "feat. {}", + "preserve_album_artist": False, + }, + ("ftintitle",), + ("Alice feat. Bob", "Song 1", "Alice"), + ("Alice", "Song 1 feat. Bob"), + id="skip-if-artist-and-album-artists-is-the-same-different-match-b", + ), + pytest.param( + { + "format": "feat. {}", + "preserve_album_artist": True, + }, + ("ftintitle",), + ("Alice feat. Bob", "Song 1", "Alice feat. Bob"), + ("Alice feat. Bob", "Song 1"), + id="skip-if-artist-and-album-artists-is-the-same-matching-match", + ), + pytest.param( + { + "format": "feat. {}", + "preserve_album_artist": False, + }, + ("ftintitle",), + ("Alice feat. Bob", "Song 1", "Alice feat. Bob"), + ("Alice", "Song 1 feat. Bob"), + id="skip-if-artist-and-album-artists-is-the-same-matching-match-b", + ), + # ---- titles with brackets/parentheses ---- + pytest.param( + {"format": "ft. {}", "bracket_keywords": ["mix"]}, + ("ftintitle",), + ("Alice ft. Bob", "Song 1 (Club Mix)", "Alice"), + ("Alice", "Song 1 ft. Bob (Club Mix)"), + id="ft-inserted-before-matching-bracket-keyword", + ), + pytest.param( + {"format": "ft. {}", "bracket_keywords": ["nomatch"]}, + ("ftintitle",), + ("Alice ft. Bob", "Song 1 (Club Remix)", "Alice"), + ("Alice", "Song 1 (Club Remix) ft. Bob"), + id="ft-inserted-at-end-no-bracket-keyword-match", + ), + ], +) +def test_ftintitle_functional( + env: FtInTitlePluginFunctional, + cfg: dict[str, str | bool | list[str]] | None, + cmd_args: tuple[str, ...], + given: tuple[str, str, str | None], + expected: tuple[str, str], +) -> None: + set_config(env, cfg) + ftintitle.FtInTitlePlugin() + + artist, title, albumartist = given + item = add_item(env, "/", artist, title, albumartist) + + env.run_command(*cmd_args) + item.load() + + expected_artist, expected_title = expected + assert item["artist"] == expected_artist + assert item["title"] == expected_title + + +@pytest.mark.parametrize( + "artist,albumartist,expected", + [ + ("Alice ft. Bob", "Alice", "Bob"), + ("Alice feat Bob", "Alice", "Bob"), + ("Alice featuring Bob", "Alice", "Bob"), + ("Alice & Bob", "Alice", "Bob"), + ("Alice and Bob", "Alice", "Bob"), + ("Alice With Bob", "Alice", "Bob"), + ("Alice defeat Bob", "Alice", None), + ("Alice & Bob", "Bob", "Alice"), + ("Alice ft. Bob", "Bob", "Alice"), + ("Alice ft. Carol", "Bob", "Carol"), + ], +) +def test_find_feat_part( + artist: str, + albumartist: str, + expected: str | None, +) -> None: + assert ftintitle.find_feat_part(artist, albumartist) == expected + + +@pytest.mark.parametrize( + "given,expected", + [ + ("Alice ft. Bob", ("Alice", "Bob")), + ("Alice feat Bob", ("Alice", "Bob")), + ("Alice feat. Bob", ("Alice", "Bob")), + ("Alice featuring Bob", ("Alice", "Bob")), + ("Alice & Bob", ("Alice", "Bob")), + ("Alice and Bob", ("Alice", "Bob")), + ("Alice With Bob", ("Alice", "Bob")), + ("Alice defeat Bob", ("Alice defeat Bob", None)), + ("Alice & Bob feat Charlie", ("Alice & Bob", "Charlie")), + ("Alice & Bob ft. Charlie", ("Alice & Bob", "Charlie")), + ("Alice & Bob featuring Charlie", ("Alice & Bob", "Charlie")), + ("Alice and Bob feat Charlie", ("Alice and Bob", "Charlie")), + ], +) +def test_split_on_feat( + given: str, + expected: tuple[str, str | None], +) -> None: + assert ftintitle.split_on_feat(given) == expected + + +@pytest.mark.parametrize( + "given,keywords,expected", + [ + ## default keywords + # different braces and keywords + ("Song (Remix)", None, "Song ft. Bob (Remix)"), + ("Song [Version]", None, "Song ft. Bob [Version]"), + ("Song {Extended Mix}", None, "Song ft. Bob {Extended Mix}"), + ("Song <Instrumental>", None, "Song ft. Bob <Instrumental>"), + # two keyword clauses + ("Song (Remix) (Live)", None, "Song ft. Bob (Remix) (Live)"), + # brace insensitivity + ("Song (Live) [Remix]", None, "Song ft. Bob (Live) [Remix]"), + ("Song [Edit] (Remastered)", None, "Song ft. Bob [Edit] (Remastered)"), + # negative cases + ("Song", None, "Song ft. Bob"), # no clause + ("Song (Arbitrary)", None, "Song (Arbitrary) ft. Bob"), # no keyword + ("Song (", None, "Song ( ft. Bob"), # no matching brace or keyword + ("Song (Live", None, "Song (Live ft. Bob"), # no matching brace with keyword + # one keyword clause, one non-keyword clause + ("Song (Live) (Arbitrary)", None, "Song ft. Bob (Live) (Arbitrary)"), + ("Song (Arbitrary) (Remix)", None, "Song (Arbitrary) ft. Bob (Remix)"), + # nested brackets - same type + ("Song (Remix (Extended))", None, "Song ft. Bob (Remix (Extended))"), + ("Song [Arbitrary [Description]]", None, "Song [Arbitrary [Description]] ft. Bob"), + # nested brackets - different types + ("Song (Remix [Extended])", None, "Song ft. Bob (Remix [Extended])"), + # nested - returns outer start position despite inner keyword + ("Song [Arbitrary {Extended}]", None, "Song ft. Bob [Arbitrary {Extended}]"), + ("Song {Live <Arbitrary>}", None, "Song ft. Bob {Live <Arbitrary>}"), + ("Song <Remaster (Arbitrary)>", None, "Song ft. Bob <Remaster (Arbitrary)>"), + ("Song <Extended> [Live]", None, "Song ft. Bob <Extended> [Live]"), + ("Song (Version) <Live>", None, "Song ft. Bob (Version) <Live>"), + ("Song (Arbitrary [Description])", None, "Song (Arbitrary [Description]) ft. Bob"), + ("Song [Description (Arbitrary)]", None, "Song [Description (Arbitrary)] ft. Bob"), + ## custom keywords + ("Song (Live)", ["live"], "Song ft. Bob (Live)"), + ("Song (Concert)", ["concert"], "Song ft. Bob (Concert)"), + ("Song (Remix)", ["custom"], "Song (Remix) ft. Bob"), + ("Song (Custom)", ["custom"], "Song ft. Bob (Custom)"), + ("Song", [], "Song ft. Bob"), + ("Song (", [], "Song ( ft. Bob"), + # Multi-word keyword tests + ("Song (Club Mix)", ["club mix"], "Song ft. Bob (Club Mix)"), # Positive: matches multi-word + ("Song (Club Remix)", ["club mix"], "Song (Club Remix) ft. Bob"), # Negative: no match + ], +) # fmt: skip +def test_insert_ft_into_title( + given: str, + keywords: list[str] | None, + expected: str, +) -> None: + assert ( + ftintitle.FtInTitlePlugin.insert_ft_into_title( + given, "ft. Bob", keywords ) - - def _ft_set_config( - self, ftformat, drop=False, auto=True, keep_in_artist=False - ): - self.config["ftintitle"]["format"] = ftformat - self.config["ftintitle"]["drop"] = drop - self.config["ftintitle"]["auto"] = auto - self.config["ftintitle"]["keep_in_artist"] = keep_in_artist - - def test_functional_drop(self): - item = self._ft_add_item("/", "Alice ft Bob", "Song 1", "Alice") - self.run_command("ftintitle", "-d") - item.load() - assert item["artist"] == "Alice" - assert item["title"] == "Song 1" - - def test_functional_not_found(self): - item = self._ft_add_item("/", "Alice ft Bob", "Song 1", "George") - self.run_command("ftintitle", "-d") - item.load() - # item should be unchanged - assert item["artist"] == "Alice ft Bob" - assert item["title"] == "Song 1" - - def test_functional_custom_format(self): - self._ft_set_config("feat. {0}") - item = self._ft_add_item("/", "Alice ft Bob", "Song 1", "Alice") - self.run_command("ftintitle") - item.load() - assert item["artist"] == "Alice" - assert item["title"] == "Song 1 feat. Bob" - - self._ft_set_config("featuring {0}") - item = self._ft_add_item("/", "Alice feat. Bob", "Song 1", "Alice") - self.run_command("ftintitle") - item.load() - assert item["artist"] == "Alice" - assert item["title"] == "Song 1 featuring Bob" - - self._ft_set_config("with {0}") - item = self._ft_add_item("/", "Alice feat Bob", "Song 1", "Alice") - self.run_command("ftintitle") - item.load() - assert item["artist"] == "Alice" - assert item["title"] == "Song 1 with Bob" - - def test_functional_keep_in_artist(self): - self._ft_set_config("feat. {0}", keep_in_artist=True) - item = self._ft_add_item("/", "Alice ft Bob", "Song 1", "Alice") - self.run_command("ftintitle") - item.load() - assert item["artist"] == "Alice ft Bob" - assert item["title"] == "Song 1 feat. Bob" - - item = self._ft_add_item("/", "Alice ft Bob", "Song 1", "Alice") - self.run_command("ftintitle", "-d") - item.load() - assert item["artist"] == "Alice ft Bob" - assert item["title"] == "Song 1" + == expected + ) -class FtInTitlePluginTest(unittest.TestCase): - def setUp(self): - """Set up configuration""" - ftintitle.FtInTitlePlugin() +@pytest.mark.parametrize( + "given,expected", + [ + ("Alice ft. Bob", True), + ("Alice feat. Bob", True), + ("Alice feat Bob", True), + ("Alice featuring Bob", True), + ("Alice (ft. Bob)", True), + ("Alice (feat. Bob)", True), + ("Alice [ft. Bob]", True), + ("Alice [feat. Bob]", True), + ("Alice defeat Bob", False), + ("Aliceft.Bob", False), + ("Alice (defeat Bob)", False), + ("Live and Let Go", False), + ("Come With Me", False), + ], +) +def test_contains_feat(given: str, expected: bool) -> None: + assert ftintitle.contains_feat(given) is expected - def test_find_feat_part(self): - test_cases = [ - { - "artist": "Alice ft. Bob", - "album_artist": "Alice", - "feat_part": "Bob", - }, - { - "artist": "Alice feat Bob", - "album_artist": "Alice", - "feat_part": "Bob", - }, - { - "artist": "Alice featuring Bob", - "album_artist": "Alice", - "feat_part": "Bob", - }, - { - "artist": "Alice & Bob", - "album_artist": "Alice", - "feat_part": "Bob", - }, - { - "artist": "Alice and Bob", - "album_artist": "Alice", - "feat_part": "Bob", - }, - { - "artist": "Alice With Bob", - "album_artist": "Alice", - "feat_part": "Bob", - }, - { - "artist": "Alice defeat Bob", - "album_artist": "Alice", - "feat_part": None, - }, - { - "artist": "Alice & Bob", - "album_artist": "Bob", - "feat_part": "Alice", - }, - { - "artist": "Alice ft. Bob", - "album_artist": "Bob", - "feat_part": "Alice", - }, - { - "artist": "Alice ft. Carol", - "album_artist": "Bob", - "feat_part": None, - }, - ] - for test_case in test_cases: - feat_part = ftintitle.find_feat_part( - test_case["artist"], test_case["album_artist"] - ) - assert feat_part == test_case["feat_part"] +@pytest.mark.parametrize( + "given,custom_words,expected", + [ + ("Alice ft. Bob", [], True), + ("Alice feat. Bob", [], True), + ("Alice feat Bob", [], True), + ("Alice featuring Bob", [], True), + ("Alice (ft. Bob)", [], True), + ("Alice (feat. Bob)", [], True), + ("Alice [ft. Bob]", [], True), + ("Alice [feat. Bob]", [], True), + ("Alice defeat Bob", [], False), + ("Aliceft.Bob", [], False), + ("Alice (defeat Bob)", [], False), + ("Live and Let Go", [], False), + ("Come With Me", [], False), + ("Alice x Bob", ["x"], True), + ("Alice x Bob", ["X"], True), + ("Alice och Xavier", ["x"], False), + ("Alice ft. Xavier", ["x"], True), + ("Alice med Carol", ["med"], True), + ("Alice med Carol", [], False), + ], +) +def test_custom_words( + given: str, custom_words: list[str] | None, expected: bool +) -> None: + if custom_words is None: + custom_words = [] + assert ftintitle.contains_feat(given, custom_words) is expected - def test_split_on_feat(self): - parts = ftintitle.split_on_feat("Alice ft. Bob") - assert parts == ("Alice", "Bob") - parts = ftintitle.split_on_feat("Alice feat Bob") - assert parts == ("Alice", "Bob") - parts = ftintitle.split_on_feat("Alice feat. Bob") - assert parts == ("Alice", "Bob") - parts = ftintitle.split_on_feat("Alice featuring Bob") - assert parts == ("Alice", "Bob") - parts = ftintitle.split_on_feat("Alice & Bob") - assert parts == ("Alice", "Bob") - parts = ftintitle.split_on_feat("Alice and Bob") - assert parts == ("Alice", "Bob") - parts = ftintitle.split_on_feat("Alice With Bob") - assert parts == ("Alice", "Bob") - parts = ftintitle.split_on_feat("Alice defeat Bob") - assert parts == ("Alice defeat Bob", None) - def test_contains_feat(self): - assert ftintitle.contains_feat("Alice ft. Bob") - assert ftintitle.contains_feat("Alice feat. Bob") - assert ftintitle.contains_feat("Alice feat Bob") - assert ftintitle.contains_feat("Alice featuring Bob") - assert ftintitle.contains_feat("Alice (ft. Bob)") - assert ftintitle.contains_feat("Alice (feat. Bob)") - assert ftintitle.contains_feat("Alice [ft. Bob]") - assert ftintitle.contains_feat("Alice [feat. Bob]") - assert not ftintitle.contains_feat("Alice defeat Bob") - assert not ftintitle.contains_feat("Aliceft.Bob") - assert not ftintitle.contains_feat("Alice (defeat Bob)") - assert not ftintitle.contains_feat("Live and Let Go") - assert not ftintitle.contains_feat("Come With Me") +def test_album_template_value(): + album = Album() + album["albumartist"] = "Foo ft. Bar" + assert ftintitle._album_artist_no_feat(album) == "Foo" + + album["albumartist"] = "Foobar" + assert ftintitle._album_artist_no_feat(album) == "Foobar" diff --git a/test/plugins/test_hook.py b/test/plugins/test_hook.py index 993b95911..033e1ea64 100644 --- a/test/plugins/test_hook.py +++ b/test/plugins/test_hook.py @@ -15,17 +15,17 @@ from __future__ import annotations -import os.path +import os import sys import unittest from contextlib import contextmanager -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING from beets import plugins from beets.test.helper import PluginTestCase, capture_log if TYPE_CHECKING: - from collections.abc import Iterator + from collections.abc import Callable, Iterator class HookTestCase(PluginTestCase): @@ -37,12 +37,14 @@ class HookTestCase(PluginTestCase): class HookLogsTest(HookTestCase): + HOOK: plugins.EventType = "write" + @contextmanager def _configure_logs(self, command: str) -> Iterator[list[str]]: - config = {"hooks": [self._get_hook("test_event", command)]} + config = {"hooks": [self._get_hook(self.HOOK, command)]} with self.configure_plugin(config), capture_log("beets.hook") as logs: - plugins.send("test_event") + plugins.send(self.HOOK) yield logs def test_hook_empty_command(self): @@ -53,13 +55,13 @@ class HookLogsTest(HookTestCase): @unittest.skipIf(sys.platform == "win32", "win32") def test_hook_non_zero_exit(self): with self._configure_logs('sh -c "exit 1"') as logs: - assert "hook: hook for test_event exited with status 1" in logs + assert f"hook: hook for {self.HOOK} exited with status 1" in logs def test_hook_non_existent_command(self): with self._configure_logs("non-existent-command") as logs: logs = "\n".join(logs) - assert "hook: hook for test_event failed: " in logs + assert f"hook: hook for {self.HOOK} failed: " in logs # The error message is different for each OS. Unfortunately the text is # different in each case, where the only shared text is the string # 'file' and substring 'Err' @@ -68,14 +70,11 @@ class HookLogsTest(HookTestCase): class HookCommandTest(HookTestCase): - TEST_HOOK_COUNT = 2 - - events = [f"test_event_{i}" for i in range(TEST_HOOK_COUNT)] + EVENTS: list[plugins.EventType] = ["write", "after_write"] def setUp(self): super().setUp() - temp_dir = os.fsdecode(self.temp_dir) - self.paths = [os.path.join(temp_dir, e) for e in self.events] + self.paths = [str(self.temp_dir_path / e) for e in self.EVENTS] def _test_command( self, @@ -94,13 +93,14 @@ class HookCommandTest(HookTestCase): 2. Assert that a file has been created under the original path, which proves that the configured hook command has been executed. """ + events_with_paths = list(zip(self.EVENTS, self.paths)) hooks = [ self._get_hook(e, f"touch {make_test_path(e, p)}") - for e, p in zip(self.events, self.paths) + for e, p in events_with_paths ] with self.configure_plugin({"hooks": hooks}): - for event, path in zip(self.events, self.paths): + for event, path in events_with_paths: if send_path_kwarg: plugins.send(event, path=path) else: diff --git a/test/plugins/test_importadded.py b/test/plugins/test_importadded.py index 608afb399..352471f9b 100644 --- a/test/plugins/test_importadded.py +++ b/test/plugins/test_importadded.py @@ -20,7 +20,7 @@ import os import pytest from beets import importer -from beets.test.helper import AutotagStub, ImportTestCase, PluginMixin +from beets.test.helper import AutotagImportTestCase, PluginMixin from beets.util import displayable_path, syspath from beetsplug.importadded import ImportAddedPlugin @@ -41,7 +41,7 @@ def modify_mtimes(paths, offset=-60000): os.utime(syspath(path), (mstat.st_atime, mstat.st_mtime + offset * i)) -class ImportAddedTest(PluginMixin, ImportTestCase): +class ImportAddedTest(PluginMixin, AutotagImportTestCase): # The minimum mtime of the files to be imported plugin = "importadded" min_mtime = None @@ -56,14 +56,8 @@ class ImportAddedTest(PluginMixin, ImportTestCase): self.min_mtime = min( os.path.getmtime(mfile.path) for mfile in self.import_media ) - self.matcher = AutotagStub().install() - self.matcher.matching = AutotagStub.IDENT self.importer = self.setup_importer() - self.importer.add_choice(importer.action.APPLY) - - def tearDown(self): - super().tearDown() - self.matcher.restore() + self.importer.add_choice(importer.Action.APPLY) def find_media_file(self, item): """Find the pre-import MediaFile for an Item""" @@ -71,29 +65,26 @@ class ImportAddedTest(PluginMixin, ImportTestCase): if m.title.replace("Tag", "Applied") == item.title: return m raise AssertionError( - "No MediaFile found for Item " + displayable_path(item.path) + f"No MediaFile found for Item {displayable_path(item.path)}" ) - def assertEqualTimes(self, first, second, msg=None): - """For comparing file modification times at a sufficient precision""" - assert first == pytest.approx(second, rel=1e-4), msg - - def assertAlbumImport(self): + def test_import_album_with_added_dates(self): self.importer.run() + album = self.lib.albums().get() assert album.added == self.min_mtime for item in album.items(): assert item.added == self.min_mtime - def test_import_album_with_added_dates(self): - self.assertAlbumImport() - def test_import_album_inplace_with_added_dates(self): self.config["import"]["copy"] = False - self.config["import"]["move"] = False - self.config["import"]["link"] = False - self.config["import"]["hardlink"] = False - self.assertAlbumImport() + + self.importer.run() + + album = self.lib.albums().get() + assert album.added == self.min_mtime + for item in album.items(): + assert item.added == self.min_mtime def test_import_album_with_preserved_mtimes(self): self.config["importadded"]["preserve_mtimes"] = True @@ -101,10 +92,12 @@ class ImportAddedTest(PluginMixin, ImportTestCase): album = self.lib.albums().get() assert album.added == self.min_mtime for item in album.items(): - self.assertEqualTimes(item.added, self.min_mtime) + assert item.added == pytest.approx(self.min_mtime, rel=1e-4) mediafile_mtime = os.path.getmtime(self.find_media_file(item).path) - self.assertEqualTimes(item.mtime, mediafile_mtime) - self.assertEqualTimes(os.path.getmtime(item.path), mediafile_mtime) + assert item.mtime == pytest.approx(mediafile_mtime, rel=1e-4) + assert os.path.getmtime(item.path) == pytest.approx( + mediafile_mtime, rel=1e-4 + ) def test_reimported_album_skipped(self): # Import and record the original added dates @@ -119,22 +112,21 @@ class ImportAddedTest(PluginMixin, ImportTestCase): self.importer.run() # Verify the reimported items album = self.lib.albums().get() - self.assertEqualTimes(album.added, album_added_before) + assert album.added == pytest.approx(album_added_before, rel=1e-4) items_added_after = {item.path: item.added for item in album.items()} for item_path, added_after in items_added_after.items(): - self.assertEqualTimes( - items_added_before[item_path], - added_after, - "reimport modified Item.added for " - + displayable_path(item_path), - ) + assert items_added_before[item_path] == pytest.approx( + added_after, rel=1e-4 + ), f"reimport modified Item.added for {displayable_path(item_path)}" def test_import_singletons_with_added_dates(self): self.config["import"]["singletons"] = True self.importer.run() for item in self.lib.items(): mfile = self.find_media_file(item) - self.assertEqualTimes(item.added, os.path.getmtime(mfile.path)) + assert item.added == pytest.approx( + os.path.getmtime(mfile.path), rel=1e-4 + ) def test_import_singletons_with_preserved_mtimes(self): self.config["import"]["singletons"] = True @@ -142,9 +134,11 @@ class ImportAddedTest(PluginMixin, ImportTestCase): self.importer.run() for item in self.lib.items(): mediafile_mtime = os.path.getmtime(self.find_media_file(item).path) - self.assertEqualTimes(item.added, mediafile_mtime) - self.assertEqualTimes(item.mtime, mediafile_mtime) - self.assertEqualTimes(os.path.getmtime(item.path), mediafile_mtime) + assert item.added == pytest.approx(mediafile_mtime, rel=1e-4) + assert item.mtime == pytest.approx(mediafile_mtime, rel=1e-4) + assert os.path.getmtime(item.path) == pytest.approx( + mediafile_mtime, rel=1e-4 + ) def test_reimported_singletons_skipped(self): self.config["import"]["singletons"] = True @@ -161,9 +155,6 @@ class ImportAddedTest(PluginMixin, ImportTestCase): # Verify the reimported items items_added_after = {item.path: item.added for item in self.lib.items()} for item_path, added_after in items_added_after.items(): - self.assertEqualTimes( - items_added_before[item_path], - added_after, - "reimport modified Item.added for " - + displayable_path(item_path), - ) + assert items_added_before[item_path] == pytest.approx( + added_after, rel=1e-4 + ), f"reimport modified Item.added for {displayable_path(item_path)}" diff --git a/test/plugins/test_importfeeds.py b/test/plugins/test_importfeeds.py index 5f1f915ad..3f51eca76 100644 --- a/test/plugins/test_importfeeds.py +++ b/test/plugins/test_importfeeds.py @@ -1,22 +1,22 @@ import datetime import os -import os.path -from beets import config from beets.library import Album, Item -from beets.test.helper import BeetsTestCase +from beets.test.helper import PluginTestCase from beetsplug.importfeeds import ImportFeedsPlugin -class ImportfeedsTestTest(BeetsTestCase): +class ImportFeedsTest(PluginTestCase): + plugin = "importfeeds" + def setUp(self): super().setUp() self.importfeeds = ImportFeedsPlugin() - self.feeds_dir = os.path.join(os.fsdecode(self.temp_dir), "importfeeds") - config["importfeeds"]["dir"] = self.feeds_dir + self.feeds_dir = self.temp_dir_path / "importfeeds" + self.config["importfeeds"]["dir"] = str(self.feeds_dir) def test_multi_format_album_playlist(self): - config["importfeeds"]["formats"] = "m3u_multi" + self.config["importfeeds"]["formats"] = "m3u_multi" album = Album(album="album/name", id=1) item_path = os.path.join("path", "to", "item") item = Item(title="song", album_id=1, path=item_path) @@ -24,16 +24,14 @@ class ImportfeedsTestTest(BeetsTestCase): self.lib.add(item) self.importfeeds.album_imported(self.lib, album) - playlist_path = os.path.join( - self.feeds_dir, os.listdir(self.feeds_dir)[0] - ) - assert playlist_path.endswith("album_name.m3u") + playlist_path = self.feeds_dir / next(self.feeds_dir.iterdir()) + assert str(playlist_path).endswith("album_name.m3u") with open(playlist_path) as playlist: assert item_path in playlist.read() def test_playlist_in_subdir(self): - config["importfeeds"]["formats"] = "m3u" - config["importfeeds"]["m3u_name"] = os.path.join( + self.config["importfeeds"]["formats"] = "m3u" + self.config["importfeeds"]["m3u_name"] = os.path.join( "subdir", "imported.m3u" ) album = Album(album="album/name", id=1) @@ -43,16 +41,14 @@ class ImportfeedsTestTest(BeetsTestCase): self.lib.add(item) self.importfeeds.album_imported(self.lib, album) - playlist = os.path.join( - self.feeds_dir, config["importfeeds"]["m3u_name"].get() - ) + playlist = self.feeds_dir / self.config["importfeeds"]["m3u_name"].get() playlist_subdir = os.path.dirname(playlist) assert os.path.isdir(playlist_subdir) assert os.path.isfile(playlist) def test_playlist_per_session(self): - config["importfeeds"]["formats"] = "m3u_session" - config["importfeeds"]["m3u_name"] = "imports.m3u" + self.config["importfeeds"]["formats"] = "m3u_session" + self.config["importfeeds"]["m3u_name"] = "imports.m3u" album = Album(album="album/name", id=1) item_path = os.path.join("path", "to", "item") item = Item(title="song", album_id=1, path=item_path) @@ -62,7 +58,7 @@ class ImportfeedsTestTest(BeetsTestCase): self.importfeeds.import_begin(self) self.importfeeds.album_imported(self.lib, album) date = datetime.datetime.now().strftime("%Y%m%d_%Hh%M") - playlist = os.path.join(self.feeds_dir, f"imports_{date}.m3u") + playlist = self.feeds_dir / f"imports_{date}.m3u" assert os.path.isfile(playlist) with open(playlist) as playlist_contents: assert item_path in playlist_contents.read() diff --git a/test/plugins/test_importsource.py b/test/plugins/test_importsource.py new file mode 100644 index 000000000..a4f498181 --- /dev/null +++ b/test/plugins/test_importsource.py @@ -0,0 +1,146 @@ +# This file is part of beets. +# Copyright 2025, Stig Inge Lea Bjornsen. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + + +"""Tests for the `importsource` plugin.""" + +import os +import time + +from beets import importer, plugins +from beets.test.helper import AutotagImportTestCase, PluginMixin, control_stdin +from beets.util import syspath +from beetsplug.importsource import ImportSourcePlugin + +_listeners = ImportSourcePlugin.listeners + + +def preserve_plugin_listeners(): + """Preserve the initial plugin listeners as they would otherwise be + deleted after the first setup / tear down cycle. + """ + if not ImportSourcePlugin.listeners: + ImportSourcePlugin.listeners = _listeners + + +class ImportSourceTest(PluginMixin, AutotagImportTestCase): + plugin = "importsource" + preload_plugin = False + + def setUp(self): + preserve_plugin_listeners() + super().setUp() + self.config[self.plugin]["suggest_removal"] = True + self.load_plugins() + self.prepare_album_for_import(2) + self.importer = self.setup_importer() + self.importer.add_choice(importer.Action.APPLY) + self.importer.run() + self.all_items = self.lib.albums().get().items() + self.item_to_remove = self.all_items[0] + + def interact(self, stdin_input: str): + with control_stdin(stdin_input): + self.run_command( + "remove", + f"path:{syspath(self.item_to_remove.path)}", + ) + + def test_do_nothing(self): + self.interact("N") + + assert os.path.exists(self.item_to_remove.source_path) + + def test_remove_single(self): + self.interact("y\nD") + + assert not os.path.exists(self.item_to_remove.source_path) + + def test_remove_all_from_single(self): + self.interact("y\nR\ny") + + for item in self.all_items: + assert not os.path.exists(item.source_path) + + def test_stop_suggesting(self): + self.interact("y\nS") + + for item in self.all_items: + assert os.path.exists(item.source_path) + + def test_source_path_attribute_written(self): + """Test that source_path attribute is correctly written to imported items. + + The items should already have source_path from the setUp import + """ + for item in self.all_items: + assert "source_path" in item + assert item.source_path # Should not be empty + + def test_source_files_not_modified_during_import(self): + """Test that source files timestamps are not changed during import.""" + # Prepare fresh files and record timestamps + test_album_path = self.import_path / "test_album" + import_paths = self.prepare_album_for_import( + 2, album_path=test_album_path + ) + original_mtimes = { + path: os.stat(path).st_mtime for path in import_paths + } + + # Small delay to detect timestamp changes + time.sleep(0.1) + + # Run a fresh import + importer_session = self.setup_importer() + importer_session.add_choice(importer.Action.APPLY) + importer_session.run() + + # Verify timestamps haven't changed + for path, original_mtime in original_mtimes.items(): + current_mtime = os.stat(path).st_mtime + assert current_mtime == original_mtime, ( + f"Source file timestamp changed: {path}" + ) + + def test_prevent_suggest_removal_on_reimport(self): + """Test that removal suggestions are prevented during reimport.""" + album = self.lib.albums().get() + mb_albumid = album.mb_albumid + + # Reimport from library + reimporter = self.setup_importer(import_dir=self.libdir) + reimporter.add_choice(importer.Action.APPLY) + reimporter.run() + + plugin = plugins._instances[0] + assert mb_albumid in plugin.stop_suggestions_for_albums + + # Calling suggest_removal should exit early without prompting + item = self.lib.items().get() + plugin.suggest_removal(item) + assert os.path.exists(item.source_path) + + def test_prevent_suggest_removal_handles_skipped_task(self): + """Test that skipped tasks don't crash prevent_suggest_removal.""" + + class MockTask: + skip = True + + def imported_items(self): + return "whatever" + + plugin = plugins._instances[0] + mock_task = MockTask() + plugin.prevent_suggest_removal(None, mock_task) diff --git a/test/plugins/test_inline.py b/test/plugins/test_inline.py new file mode 100644 index 000000000..79118bd06 --- /dev/null +++ b/test/plugins/test_inline.py @@ -0,0 +1,62 @@ +# This file is part of beets. +# Copyright 2025, Gabe Push. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +from beets import config, plugins +from beets.test.helper import PluginTestCase +from beetsplug.inline import InlinePlugin + + +class TestInlineRecursion(PluginTestCase): + def test_no_recursion_when_inline_shadows_fixed_field(self): + config["plugins"] = ["inline"] + + config["item_fields"] = { + "track_no": ( + "f'{disc:02d}-{track:02d}' if disctotal > 1 else f'{track:02d}'" + ) + } + + plugins._instances.clear() + plugins.load_plugins() + + item = self.add_item_fixture( + artist="Artist", + album="Album", + title="Title", + track=1, + disc=1, + disctotal=1, + ) + + out = item.evaluate_template("$track_no") + + assert out == "01" + + def test_inline_function_body_item_field(self): + plugin = InlinePlugin() + func = plugin.compile_inline( + "return track + 1", album=False, field_name="next_track" + ) + + item = self.add_item_fixture(track=3) + assert func(item) == 4 + + def test_inline_album_expression_uses_items(self): + plugin = InlinePlugin() + func = plugin.compile_inline( + "len(items)", album=True, field_name="item_count" + ) + + album = self.add_album_fixture() + assert func(album) == len(list(album.items())) diff --git a/test/plugins/test_ipfs.py b/test/plugins/test_ipfs.py index 096bc393b..b94bd551b 100644 --- a/test/plugins/test_ipfs.py +++ b/test/plugins/test_ipfs.py @@ -37,7 +37,7 @@ class IPFSPluginTest(PluginTestCase): try: if check_item.get("ipfs", with_album=False): ipfs_item = os.fsdecode(os.path.basename(want_item.path)) - want_path = "/ipfs/{}/{}".format(test_album.ipfs, ipfs_item) + want_path = f"/ipfs/{test_album.ipfs}/{ipfs_item}" want_path = bytestring_path(want_path) assert check_item.path == want_path assert ( diff --git a/test/plugins/test_lastgenre.py b/test/plugins/test_lastgenre.py index 49d219de9..026001e38 100644 --- a/test/plugins/test_lastgenre.py +++ b/test/plugins/test_lastgenre.py @@ -14,17 +14,18 @@ """Tests for the 'lastgenre' plugin.""" -from unittest.mock import Mock +from unittest.mock import Mock, patch import pytest -from beets import config from beets.test import _common -from beets.test.helper import BeetsTestCase +from beets.test.helper import PluginTestCase from beetsplug import lastgenre -class LastGenrePluginTest(BeetsTestCase): +class LastGenrePluginTest(PluginTestCase): + plugin = "lastgenre" + def setUp(self): super().setUp() self.plugin = lastgenre.LastGenrePlugin() @@ -32,12 +33,12 @@ class LastGenrePluginTest(BeetsTestCase): def _setup_config( self, whitelist=False, canonical=False, count=1, prefer_specific=False ): - config["lastgenre"]["canonical"] = canonical - config["lastgenre"]["count"] = count - config["lastgenre"]["prefer_specific"] = prefer_specific + self.config["lastgenre"]["canonical"] = canonical + self.config["lastgenre"]["count"] = count + self.config["lastgenre"]["prefer_specific"] = prefer_specific if isinstance(whitelist, (bool, (str,))): # Filename, default, or disabled. - config["lastgenre"]["whitelist"] = whitelist + self.config["lastgenre"]["whitelist"] = whitelist self.plugin.setup() if not isinstance(whitelist, (bool, (str,))): # Explicit list of genres. @@ -132,6 +133,33 @@ class LastGenrePluginTest(BeetsTestCase): "math rock", ] + @patch("beets.ui.should_write", Mock(return_value=True)) + @patch( + "beetsplug.lastgenre.LastGenrePlugin._get_genre", + Mock(return_value=("Mock Genre", "mock stage")), + ) + def test_pretend_option_skips_library_updates(self): + item = self.create_item( + album="Pretend Album", + albumartist="Pretend Artist", + artist="Pretend Artist", + title="Pretend Track", + genre="Original Genre", + ) + album = self.lib.add_album([item]) + + def unexpected_store(*_, **__): + raise AssertionError("Unexpected store call") + + # Verify that try_write was never called (file operations skipped) + with patch("beetsplug.lastgenre.Item.store", unexpected_store): + output = self.run_with_output("lastgenre", "--pretend") + + assert "Mock Genre" in output + album.load() + assert album.genre == "Original Genre" + assert album.items()[0].genre == "Original Genre" + def test_no_duplicate(self): """Remove duplicated genres.""" self._setup_config(count=99) @@ -442,18 +470,89 @@ class LastGenrePluginTest(BeetsTestCase): }, ("Jazz", "keep + artist, whitelist"), ), + # 13 - canonicalization transforms non-whitelisted genres to canonical forms + # + # "Acid Techno" is not in the default whitelist, thus gets resolved "up" in the + # tree to "Techno" and "Electronic". + ( + { + "force": True, + "keep_existing": False, + "source": "album", + "whitelist": True, + "canonical": True, + "prefer_specific": False, + "count": 10, + }, + "", + { + "album": ["acid techno"], + }, + ("Techno, Electronic", "album, whitelist"), + ), + # 14 - canonicalization transforms whitelisted genres to canonical forms and + # includes originals + # + # "Detroit Techno" is in the default whitelist, thus it stays and and also gets + # resolved "up" in the tree to "Techno" and "Electronic". The same happens for + # newly fetched genre "Acid House". + ( + { + "force": True, + "keep_existing": True, + "source": "album", + "whitelist": True, + "canonical": True, + "prefer_specific": False, + "count": 10, + "extended_debug": True, + }, + "detroit techno", + { + "album": ["acid house"], + }, + ( + "Detroit Techno, Techno, Electronic, Acid House, House", + "keep + album, whitelist", + ), + ), + # 15 - canonicalization transforms non-whitelisted original genres to canonical + # forms and deduplication works. + # + # "Cosmic Disco" is not in the default whitelist, thus gets resolved "up" in the + # tree to "Disco" and "Electronic". New genre "Detroit Techno" resolves to + # "Techno". Both resolve to "Electronic" which gets deduplicated. + ( + { + "force": True, + "keep_existing": True, + "source": "album", + "whitelist": True, + "canonical": True, + "prefer_specific": False, + "count": 10, + }, + "Cosmic Disco", + { + "album": ["Detroit Techno"], + }, + ( + "Disco, Electronic, Detroit Techno, Techno", + "keep + album, whitelist", + ), + ), ], ) def test_get_genre(config_values, item_genre, mock_genres, expected_result): """Test _get_genre with various configurations.""" - def mock_fetch_track_genre(self, obj=None): + def mock_fetch_track_genre(self, trackartist, tracktitle): return mock_genres["track"] - def mock_fetch_album_genre(self, obj): + def mock_fetch_album_genre(self, albumartist, albumtitle): return mock_genres["album"] - def mock_fetch_artist_genre(self, obj): + def mock_fetch_artist_genre(self, artist): return mock_genres["artist"] # Mock the last.fm fetchers. When whitelist enabled, we can assume only @@ -463,11 +562,11 @@ def test_get_genre(config_values, item_genre, mock_genres, expected_result): lastgenre.LastGenrePlugin.fetch_album_genre = mock_fetch_album_genre lastgenre.LastGenrePlugin.fetch_artist_genre = mock_fetch_artist_genre - # Configure - config["lastgenre"] = config_values - # Initialize plugin instance and item plugin = lastgenre.LastGenrePlugin() + # Configure + plugin.config.set(config_values) + plugin.setup() # Loads default whitelist and canonicalization tree item = _common.item() item.genre = item_genre diff --git a/test/plugins/test_limit.py b/test/plugins/test_limit.py index 12700295e..d77e47ca8 100644 --- a/test/plugins/test_limit.py +++ b/test/plugins/test_limit.py @@ -42,8 +42,8 @@ class LimitPluginTest(PluginTestCase): # a subset of tests has only `num_limit` results, identified by a # range filter on the track number - self.track_head_range = "track:.." + str(self.num_limit) - self.track_tail_range = "track:" + str(self.num_limit + 1) + ".." + self.track_head_range = f"track:..{self.num_limit}" + self.track_tail_range = f"track:{self.num_limit + 1}{'..'}" def test_no_limit(self): """Returns all when there is no limit or filter.""" @@ -82,13 +82,13 @@ class LimitPluginTest(PluginTestCase): def test_prefix_when_correctly_ordered(self): """Returns the expected number with the query prefix and filter when the prefix portion (correctly) appears last.""" - correct_order = self.track_tail_range + " " + self.num_limit_prefix + correct_order = f"{self.track_tail_range} {self.num_limit_prefix}" result = self.lib.items(correct_order) assert len(result) == self.num_limit def test_prefix_when_incorrectly_ordred(self): """Returns no results with the query prefix and filter when the prefix portion (incorrectly) appears first.""" - incorrect_order = self.num_limit_prefix + " " + self.track_tail_range + incorrect_order = f"{self.num_limit_prefix} {self.track_tail_range}" result = self.lib.items(incorrect_order) assert len(result) == 0 diff --git a/test/plugins/test_listenbrainz.py b/test/plugins/test_listenbrainz.py new file mode 100644 index 000000000..b94cff219 --- /dev/null +++ b/test/plugins/test_listenbrainz.py @@ -0,0 +1,47 @@ +import pytest + +from beets.test.helper import ConfigMixin +from beetsplug.listenbrainz import ListenBrainzPlugin + + +class TestListenBrainzPlugin(ConfigMixin): + @pytest.fixture(scope="class") + def plugin(self) -> ListenBrainzPlugin: + self.config["listenbrainz"]["token"] = "test_token" + self.config["listenbrainz"]["username"] = "test_user" + return ListenBrainzPlugin() + + @pytest.mark.parametrize( + "search_response, expected_id", + [([{"id": "id1"}], "id1"), ([], None)], + ids=["found", "not_found"], + ) + def test_get_mb_recording_id( + self, plugin, requests_mock, search_response, expected_id + ): + requests_mock.get( + "/ws/2/recording", json={"recordings": search_response} + ) + track = {"track_metadata": {"track_name": "S", "release_name": "A"}} + + assert plugin.get_mb_recording_id(track) == expected_id + + def test_get_track_info(self, plugin, requests_mock): + requests_mock.get( + "/ws/2/recording/id1?inc=releases%2Bartist-credits", + json={ + "title": "T", + "artist-credit": [], + "releases": [{"title": "Al", "date": "2023-01"}], + }, + ) + + assert plugin.get_track_info([{"identifier": "id1"}]) == [ + { + "identifier": "id1", + "title": "T", + "artist": None, + "album": "Al", + "year": "2023", + } + ] diff --git a/test/plugins/test_lyrics.py b/test/plugins/test_lyrics.py index 74e727099..945a7158c 100644 --- a/test/plugins/test_lyrics.py +++ b/test/plugins/test_lyrics.py @@ -14,8 +14,6 @@ """Tests for the 'lyrics' plugin.""" -import importlib.util -import os import re import textwrap from functools import partial @@ -30,11 +28,6 @@ from beetsplug import lyrics from .lyrics_pages import LyricsPage, lyrics_pages -github_ci = os.environ.get("GITHUB_ACTIONS") == "true" -if not github_ci and not importlib.util.find_spec("langdetect"): - pytest.skip("langdetect isn't available", allow_module_level=True) - - PHRASE_BY_TITLE = { "Lady Madonna": "friday night arrives without a suitcase", "Jazz'n'blues": "as i check my balance i kiss the screen", diff --git a/test/plugins/test_mbcollection.py b/test/plugins/test_mbcollection.py new file mode 100644 index 000000000..adfadc103 --- /dev/null +++ b/test/plugins/test_mbcollection.py @@ -0,0 +1,142 @@ +import re +import uuid +from contextlib import nullcontext as does_not_raise + +import pytest + +from beets.library import Album +from beets.test.helper import PluginMixin, TestHelper +from beets.ui import UserError +from beetsplug import mbcollection + + +class TestMbCollectionPlugin(PluginMixin, TestHelper): + """Tests for the MusicBrainzCollectionPlugin class methods.""" + + plugin = "mbcollection" + + COLLECTION_ID = str(uuid.uuid4()) + + @pytest.fixture(autouse=True) + def setup_config(self): + self.config["musicbrainz"]["user"] = "testuser" + self.config["musicbrainz"]["pass"] = "testpass" + self.config["mbcollection"]["collection"] = self.COLLECTION_ID + + @pytest.fixture(autouse=True) + def helper(self): + self.setup_beets() + + yield self + + self.teardown_beets() + + @pytest.mark.parametrize( + "user_collections,expectation", + [ + ( + [], + pytest.raises( + UserError, match=r"no collections exist for user" + ), + ), + ( + [{"id": "c1", "entity-type": "event"}], + pytest.raises(UserError, match=r"No release collection found."), + ), + ( + [{"id": "c1", "entity-type": "release"}], + pytest.raises(UserError, match=r"invalid collection ID"), + ), + ( + [{"id": COLLECTION_ID, "entity-type": "release"}], + does_not_raise(), + ), + ], + ids=["no collections", "no release collections", "invalid ID", "valid"], + ) + def test_get_collection_validation( + self, requests_mock, user_collections, expectation + ): + requests_mock.get( + "/ws/2/collection", json={"collections": user_collections} + ) + + with expectation: + mbcollection.MusicBrainzCollectionPlugin().collection + + def test_mbupdate(self, helper, requests_mock, monkeypatch): + """Verify mbupdate sync of a MusicBrainz collection with the library. + + This test ensures that the command: + - fetches collection releases using paginated requests, + - submits releases that exist locally but are missing from the remote + collection + - and removes releases from the remote collection that are not in the + local library. Small chunk sizes are forced to exercise pagination and + batching logic. + """ + for mb_albumid in [ + # already present in remote collection + "in_collection1", + "in_collection2", + # two new albums not in remote collection + "00000000-0000-0000-0000-000000000001", + "00000000-0000-0000-0000-000000000002", + ]: + helper.lib.add(Album(mb_albumid=mb_albumid)) + + # The relevant collection + requests_mock.get( + "/ws/2/collection", + json={ + "collections": [ + { + "id": self.COLLECTION_ID, + "entity-type": "release", + "release-count": 3, + } + ] + }, + ) + + collection_releases = f"/ws/2/collection/{self.COLLECTION_ID}/releases" + # Force small fetch chunk to require multiple paged requests. + monkeypatch.setattr( + "beetsplug.mbcollection.MBCollection.FETCH_CHUNK_SIZE", 2 + ) + # 3 releases are fetched in two pages. + requests_mock.get( + re.compile(rf".*{collection_releases}\b.*&offset=0.*"), + json={ + "releases": [{"id": "in_collection1"}, {"id": "not_in_library"}] + }, + ) + requests_mock.get( + re.compile(rf".*{collection_releases}\b.*&offset=2.*"), + json={"releases": [{"id": "in_collection2"}]}, + ) + + # Force small submission chunk + monkeypatch.setattr( + "beetsplug.mbcollection.MBCollection.SUBMISSION_CHUNK_SIZE", 1 + ) + # so that releases are added using two requests + requests_mock.put( + re.compile( + rf".*{collection_releases}/00000000-0000-0000-0000-000000000001" + ) + ) + requests_mock.put( + re.compile( + rf".*{collection_releases}/00000000-0000-0000-0000-000000000002" + ) + ) + # and finally, one release is removed + requests_mock.delete( + re.compile(rf".*{collection_releases}/not_in_library") + ) + + helper.run_command("mbupdate", "--remove") + + assert requests_mock.call_count == 6 diff --git a/test/plugins/test_mbpseudo.py b/test/plugins/test_mbpseudo.py new file mode 100644 index 000000000..6b382ab16 --- /dev/null +++ b/test/plugins/test_mbpseudo.py @@ -0,0 +1,267 @@ +import json +import pathlib +from copy import deepcopy + +import pytest + +from beets.autotag import AlbumMatch +from beets.autotag.distance import Distance +from beets.autotag.hooks import AlbumInfo, TrackInfo +from beets.library import Item +from beets.test.helper import PluginMixin +from beetsplug._typing import JSONDict +from beetsplug.mbpseudo import ( + _STATUS_PSEUDO, + MusicBrainzPseudoReleasePlugin, + PseudoAlbumInfo, +) + + +@pytest.fixture(scope="module") +def rsrc_dir(pytestconfig: pytest.Config): + return pytestconfig.rootpath / "test" / "rsrc" / "mbpseudo" + + +@pytest.fixture +def official_release(rsrc_dir: pathlib.Path) -> JSONDict: + info_json = (rsrc_dir / "official_release.json").read_text(encoding="utf-8") + return json.loads(info_json) + + +@pytest.fixture +def pseudo_release(rsrc_dir: pathlib.Path) -> JSONDict: + info_json = (rsrc_dir / "pseudo_release.json").read_text(encoding="utf-8") + return json.loads(info_json) + + +@pytest.fixture +def official_release_info() -> AlbumInfo: + return AlbumInfo( + tracks=[TrackInfo(title="百花繚乱")], + album_id="official", + album="百花繚乱", + ) + + +@pytest.fixture +def pseudo_release_info() -> AlbumInfo: + return AlbumInfo( + tracks=[TrackInfo(title="In Bloom")], + album_id="pseudo", + album="In Bloom", + ) + + +@pytest.mark.usefixtures("config") +class TestPseudoAlbumInfo: + def test_album_id_always_from_pseudo( + self, official_release_info: AlbumInfo, pseudo_release_info: AlbumInfo + ): + info = PseudoAlbumInfo(pseudo_release_info, official_release_info) + info.use_official_as_ref() + assert info.album_id == "pseudo" + + def test_get_attr_from_pseudo( + self, official_release_info: AlbumInfo, pseudo_release_info: AlbumInfo + ): + info = PseudoAlbumInfo(pseudo_release_info, official_release_info) + assert info.album == "In Bloom" + + def test_get_attr_from_official( + self, official_release_info: AlbumInfo, pseudo_release_info: AlbumInfo + ): + info = PseudoAlbumInfo(pseudo_release_info, official_release_info) + info.use_official_as_ref() + assert info.album == info.get_official_release().album + + def test_determine_best_ref( + self, official_release_info: AlbumInfo, pseudo_release_info: AlbumInfo + ): + info = PseudoAlbumInfo( + pseudo_release_info, official_release_info, data_source="test" + ) + item = Item(title="百花繚乱") + + assert info.determine_best_ref([item]) == "official" + + info.use_pseudo_as_ref() + assert info.data_source == "test" + + +class TestMBPseudoMixin(PluginMixin): + plugin = "mbpseudo" + + @pytest.fixture(autouse=True) + def patch_get_release(self, monkeypatch, pseudo_release: JSONDict): + monkeypatch.setattr( + "beetsplug._utils.musicbrainz.MusicBrainzAPI.get_release", + lambda _, album_id: deepcopy( + {pseudo_release["id"]: pseudo_release}[album_id] + ), + ) + + @pytest.fixture(scope="class") + def plugin_config(self): + return {"scripts": ["Latn", "Dummy"]} + + @pytest.fixture + def mbpseudo_plugin(self, plugin_config) -> MusicBrainzPseudoReleasePlugin: + self.config[self.plugin].set(plugin_config) + return MusicBrainzPseudoReleasePlugin() + + +class TestMBPseudoPlugin(TestMBPseudoMixin): + def test_scripts_init( + self, mbpseudo_plugin: MusicBrainzPseudoReleasePlugin + ): + assert mbpseudo_plugin._scripts == ["Latn", "Dummy"] + + @pytest.mark.parametrize( + "album_id", + [ + "a5ce1d11-2e32-45a4-b37f-c1589d46b103", + "-5ce1d11-2e32-45a4-b37f-c1589d46b103", + ], + ) + def test_extract_id_uses_music_brainz_pattern( + self, + mbpseudo_plugin: MusicBrainzPseudoReleasePlugin, + album_id: str, + ): + if album_id.startswith("-"): + assert mbpseudo_plugin._extract_id(album_id) is None + else: + assert mbpseudo_plugin._extract_id(album_id) == album_id + + def test_album_info_for_pseudo_release( + self, + mbpseudo_plugin: MusicBrainzPseudoReleasePlugin, + pseudo_release: JSONDict, + ): + album_info = mbpseudo_plugin.album_info(pseudo_release) + assert not isinstance(album_info, PseudoAlbumInfo) + assert album_info.data_source == "MusicBrainzPseudoRelease" + assert album_info.albumstatus == _STATUS_PSEUDO + + @pytest.mark.parametrize( + "json_key", + [ + "type", + "direction", + "release", + ], + ) + def test_interception_skip_when_rel_values_dont_match( + self, + mbpseudo_plugin: MusicBrainzPseudoReleasePlugin, + official_release: JSONDict, + json_key: str, + ): + del official_release["release-relations"][0][json_key] + + album_info = mbpseudo_plugin.album_info(official_release) + assert not isinstance(album_info, PseudoAlbumInfo) + assert album_info.data_source == "MusicBrainzPseudoRelease" + + def test_interception_skip_when_script_doesnt_match( + self, + mbpseudo_plugin: MusicBrainzPseudoReleasePlugin, + official_release: JSONDict, + ): + official_release["release-relations"][0]["release"][ + "text-representation" + ]["script"] = "Null" + + album_info = mbpseudo_plugin.album_info(official_release) + assert not isinstance(album_info, PseudoAlbumInfo) + assert album_info.data_source == "MusicBrainzPseudoRelease" + + def test_interception( + self, + mbpseudo_plugin: MusicBrainzPseudoReleasePlugin, + official_release: JSONDict, + ): + album_info = mbpseudo_plugin.album_info(official_release) + assert isinstance(album_info, PseudoAlbumInfo) + assert album_info.data_source == "MusicBrainzPseudoRelease" + + def test_final_adjustment_skip( + self, + mbpseudo_plugin: MusicBrainzPseudoReleasePlugin, + ): + match = AlbumMatch( + distance=Distance(), + info=AlbumInfo(tracks=[], data_source="mb"), + mapping={}, + extra_items=[], + extra_tracks=[], + ) + + mbpseudo_plugin._adjust_final_album_match(match) + assert match.info.data_source == "mb" + + def test_final_adjustment( + self, + mbpseudo_plugin: MusicBrainzPseudoReleasePlugin, + official_release_info: AlbumInfo, + pseudo_release_info: AlbumInfo, + ): + pseudo_album_info = PseudoAlbumInfo( + pseudo_release=pseudo_release_info, + official_release=official_release_info, + data_source=mbpseudo_plugin.data_source, + ) + pseudo_album_info.use_official_as_ref() + + item = Item() + item["title"] = "百花繚乱" + + match = AlbumMatch( + distance=Distance(), + info=pseudo_album_info, + mapping={item: pseudo_album_info.tracks[0]}, + extra_items=[], + extra_tracks=[], + ) + + mbpseudo_plugin._adjust_final_album_match(match) + + assert match.info.data_source == "MusicBrainz" + assert match.info.album_id == "pseudo" + assert match.info.album == "In Bloom" + + +class TestMBPseudoPluginCustomTagsOnly(TestMBPseudoMixin): + @pytest.fixture(scope="class") + def plugin_config(self): + return {"scripts": ["Latn", "Dummy"], "custom_tags_only": True} + + def test_custom_tags( + self, + config, + mbpseudo_plugin: MusicBrainzPseudoReleasePlugin, + official_release: JSONDict, + ): + config["import"]["languages"] = ["en", "jp"] + album_info = mbpseudo_plugin.album_info(official_release) + assert not isinstance(album_info, PseudoAlbumInfo) + assert album_info.data_source == "MusicBrainzPseudoRelease" + assert album_info["album_transl"] == "In Bloom" + assert album_info["album_artist_transl"] == "Lilas Ikuta" + assert album_info.tracks[0]["title_transl"] == "In Bloom" + assert album_info.tracks[0]["artist_transl"] == "Lilas Ikuta" + + def test_custom_tags_with_import_languages( + self, + config, + mbpseudo_plugin: MusicBrainzPseudoReleasePlugin, + official_release: JSONDict, + ): + config["import"]["languages"] = [] + album_info = mbpseudo_plugin.album_info(official_release) + assert not isinstance(album_info, PseudoAlbumInfo) + assert album_info.data_source == "MusicBrainzPseudoRelease" + assert album_info["album_transl"] == "In Bloom" + assert album_info["album_artist_transl"] == "Lilas Ikuta" + assert album_info.tracks[0]["title_transl"] == "In Bloom" + assert album_info.tracks[0]["artist_transl"] == "Lilas Ikuta" diff --git a/test/plugins/test_mbsubmit.py b/test/plugins/test_mbsubmit.py index f92d85973..712c90866 100644 --- a/test/plugins/test_mbsubmit.py +++ b/test/plugins/test_mbsubmit.py @@ -14,8 +14,7 @@ from beets.test.helper import ( - AutotagStub, - ImportTestCase, + AutotagImportTestCase, PluginMixin, TerminalImportMixin, capture_stdout, @@ -23,23 +22,18 @@ from beets.test.helper import ( ) -class MBSubmitPluginTest(PluginMixin, TerminalImportMixin, ImportTestCase): +class MBSubmitPluginTest( + PluginMixin, TerminalImportMixin, AutotagImportTestCase +): plugin = "mbsubmit" def setUp(self): super().setUp() self.prepare_album_for_import(2) self.setup_importer() - self.matcher = AutotagStub().install() - - def tearDown(self): - super().tearDown() - self.matcher.restore() def test_print_tracks_output(self): """Test the output of the "print tracks" choice.""" - self.matcher.matching = AutotagStub.BAD - with capture_stdout() as output: with control_stdin("\n".join(["p", "s"])): # Print tracks; Skip @@ -55,8 +49,6 @@ class MBSubmitPluginTest(PluginMixin, TerminalImportMixin, ImportTestCase): def test_print_tracks_output_as_tracks(self): """Test the output of the "print tracks" choice, as singletons.""" - self.matcher.matching = AutotagStub.BAD - with capture_stdout() as output: with control_stdin("\n".join(["t", "s", "p", "s"])): # as Tracks; Skip; Print tracks; Skip @@ -64,6 +56,6 @@ class MBSubmitPluginTest(PluginMixin, TerminalImportMixin, ImportTestCase): # Manually build the string for comparing the output. tracklist = ( - "Open files with Picard? " "02. Tag Track 2 - Tag Artist (0:01)" + "Open files with Picard? 02. Tag Track 2 - Tag Artist (0:01)" ) assert tracklist in output.getvalue() diff --git a/test/plugins/test_mbsync.py b/test/plugins/test_mbsync.py index 088165ef5..bb88e5e63 100644 --- a/test/plugins/test_mbsync.py +++ b/test/plugins/test_mbsync.py @@ -23,7 +23,7 @@ class MbsyncCliTest(PluginTestCase): plugin = "mbsync" @patch( - "beets.plugins.album_for_id", + "beets.metadata_plugins.album_for_id", Mock( side_effect=lambda *_: AlbumInfo( album_id="album id", @@ -33,7 +33,7 @@ class MbsyncCliTest(PluginTestCase): ), ) @patch( - "beets.plugins.track_for_id", + "beets.metadata_plugins.track_for_id", Mock( side_effect=lambda *_: TrackInfo( track_id="singleton id", title="new title" diff --git a/test/plugins/test_missing.py b/test/plugins/test_missing.py new file mode 100644 index 000000000..d12f2b4cf --- /dev/null +++ b/test/plugins/test_missing.py @@ -0,0 +1,61 @@ +import uuid + +import pytest + +from beets.library import Album +from beets.test.helper import PluginMixin, TestHelper + + +@pytest.fixture +def helper(): + helper = TestHelper() + helper.setup_beets() + + yield helper + + helper.teardown_beets() + + +class TestMissingAlbums(PluginMixin): + plugin = "missing" + album_in_lib = Album( + album="Album", + albumartist="Artist", + mb_albumartistid=str(uuid.uuid4()), + mb_albumid="album", + ) + + @pytest.mark.parametrize( + "release_from_mb,expected_output", + [ + pytest.param( + {"id": "other", "title": "Other Album"}, + "Artist - Other Album\n", + id="missing", + ), + pytest.param( + {"id": album_in_lib.mb_albumid, "title": album_in_lib.album}, + "", + marks=pytest.mark.xfail( + reason=( + "Album in lib must not be reported as missing." + " Needs fixing." + ) + ), + id="not missing", + ), + ], + ) + def test_missing_artist_albums( + self, requests_mock, helper, release_from_mb, expected_output + ): + helper.lib.add(self.album_in_lib) + requests_mock.get( + f"/ws/2/release-group?artist={self.album_in_lib.mb_albumartistid}", + json={"release-groups": [release_from_mb]}, + ) + + with self.configure_plugin({}): + assert ( + helper.run_with_output("missing", "--album") == expected_output + ) diff --git a/test/plugins/test_mpdstats.py b/test/plugins/test_mpdstats.py index dcaf196ef..6f5d3f3ce 100644 --- a/test/plugins/test_mpdstats.py +++ b/test/plugins/test_mpdstats.py @@ -77,7 +77,7 @@ class MPDStatsTest(PluginTestCase): except KeyboardInterrupt: pass - log.debug.assert_has_calls([call('unhandled status "{0}"', ANY)]) + log.debug.assert_has_calls([call('unhandled status "{}"', ANY)]) log.info.assert_has_calls( - [call("pause"), call("playing {0}", ANY), call("stop")] + [call("pause"), call("playing {}", ANY), call("stop")] ) diff --git a/test/test_mb.py b/test/plugins/test_musicbrainz.py similarity index 62% rename from test/test_mb.py rename to test/plugins/test_musicbrainz.py index 37b5c0fff..733287204 100644 --- a/test/test_mb.py +++ b/test/plugins/test_musicbrainz.py @@ -14,14 +14,25 @@ """Tests for MusicBrainz API wrapper.""" +import unittest from unittest import mock +import pytest + from beets import config -from beets.autotag import mb -from beets.test.helper import BeetsTestCase +from beets.library import Item +from beets.test.helper import BeetsTestCase, PluginMixin +from beetsplug import musicbrainz -class MBAlbumInfoTest(BeetsTestCase): +class MusicBrainzTestCase(BeetsTestCase): + def setUp(self): + super().setUp() + self.mb = musicbrainz.MusicBrainzPlugin() + self.config["match"]["preferred"]["countries"] = ["US"] + + +class MBAlbumInfoTest(MusicBrainzTestCase): def _make_release( self, date_str="2009", @@ -54,8 +65,10 @@ class MBAlbumInfoTest(BeetsTestCase): } ], "date": "3001", - "medium-list": [], - "label-info-list": [ + "media": [], + "genres": [{"count": 1, "name": "GENRE"}], + "tags": [{"count": 1, "name": "TAG"}], + "label-info": [ { "catalog-number": "CATALOG NUMBER", "label": {"name": "LABEL NAME"}, @@ -68,10 +81,11 @@ class MBAlbumInfoTest(BeetsTestCase): "country": "COUNTRY", "status": "STATUS", "barcode": "BARCODE", + "release-events": [{"area": None, "date": "2021-03-26"}], } if multi_artist_credit: - release["artist-credit"].append(" & ") # add join phase + release["artist-credit"][0]["joinphrase"] = " & " release["artist-credit"].append( { "artist": { @@ -89,7 +103,7 @@ class MBAlbumInfoTest(BeetsTestCase): for recording in tracks: i += 1 track = { - "id": "RELEASE TRACK ID %d" % i, + "id": f"RELEASE TRACK ID {i}", "recording": recording, "position": i, "number": "A1", @@ -112,7 +126,7 @@ class MBAlbumInfoTest(BeetsTestCase): ] if multi_artist_credit: - track["artist-credit"].append(" & ") # add join phase + track["artist-credit"][0]["joinphrase"] = " & " track["artist-credit"].append( { "artist": { @@ -130,17 +144,17 @@ class MBAlbumInfoTest(BeetsTestCase): for recording in data_tracks: i += 1 data_track = { - "id": "RELEASE TRACK ID %d" % i, + "id": f"RELEASE TRACK ID {i}", "recording": recording, "position": i, "number": "A1", } data_track_list.append(data_track) - release["medium-list"].append( + release["media"].append( { "position": "1", - "track-list": track_list, - "data-track-list": data_track_list, + "tracks": track_list, + "data-tracks": data_track_list, "format": medium_format, "title": "MEDIUM TITLE", } @@ -176,7 +190,7 @@ class MBAlbumInfoTest(BeetsTestCase): } ] if multi_artist_credit: - track["artist-credit"].append(" & ") # add join phase + track["artist-credit"][0]["joinphrase"] = " & " track["artist-credit"].append( { "artist": { @@ -188,11 +202,10 @@ class MBAlbumInfoTest(BeetsTestCase): } ) if remixer: - track["artist-relation-list"] = [ + track["artist-relations"] = [ { "type": "remixer", "type-id": "RELATION TYPE ID", - "target": "RECORDING REMIXER ARTIST ID", "direction": "RECORDING RELATION DIRECTION", "artist": { "id": "RECORDING REMIXER ARTIST ID", @@ -203,14 +216,14 @@ class MBAlbumInfoTest(BeetsTestCase): } ] if video: - track["video"] = "true" + track["video"] = True if disambiguation: track["disambiguation"] = disambiguation return track def test_parse_release_with_year(self): release = self._make_release("1984") - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.album == "ALBUM TITLE" assert d.album_id == "ALBUM ID" assert d.artist == "ARTIST NAME" @@ -221,12 +234,12 @@ class MBAlbumInfoTest(BeetsTestCase): def test_parse_release_type(self): release = self._make_release("1984") - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.albumtype == "album" def test_parse_release_full_date(self): release = self._make_release("1987-03-31") - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.original_year == 1987 assert d.original_month == 3 assert d.original_day == 31 @@ -238,7 +251,7 @@ class MBAlbumInfoTest(BeetsTestCase): ] release = self._make_release(tracks=tracks) - d = mb.album_info(release) + d = self.mb.album_info(release) t = d.tracks assert len(t) == 2 assert t[0].title == "TITLE ONE" @@ -255,7 +268,7 @@ class MBAlbumInfoTest(BeetsTestCase): ] release = self._make_release(tracks=tracks) - d = mb.album_info(release) + d = self.mb.album_info(release) t = d.tracks assert t[0].medium_index == 1 assert t[0].index == 1 @@ -269,7 +282,7 @@ class MBAlbumInfoTest(BeetsTestCase): ] release = self._make_release(tracks=tracks) - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.mediums == 1 t = d.tracks assert t[0].medium == 1 @@ -289,14 +302,14 @@ class MBAlbumInfoTest(BeetsTestCase): "number": "A1", } ] - release["medium-list"].append( + release["media"].append( { "position": "2", - "track-list": second_track_list, + "tracks": second_track_list, } ) - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.mediums == 2 t = d.tracks assert t[0].medium == 1 @@ -308,79 +321,81 @@ class MBAlbumInfoTest(BeetsTestCase): def test_parse_release_year_month_only(self): release = self._make_release("1987-03") - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.original_year == 1987 assert d.original_month == 3 def test_no_durations(self): tracks = [self._make_track("TITLE", "ID", None)] release = self._make_release(tracks=tracks) - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.tracks[0].length is None def test_track_length_overrides_recording_length(self): tracks = [self._make_track("TITLE", "ID", 1.0 * 1000.0)] release = self._make_release(tracks=tracks, track_length=2.0 * 1000.0) - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.tracks[0].length == 2.0 def test_no_release_date(self): release = self._make_release(None) - d = mb.album_info(release) + d = self.mb.album_info(release) assert not d.original_year assert not d.original_month assert not d.original_day def test_various_artists_defaults_false(self): release = self._make_release(None) - d = mb.album_info(release) + d = self.mb.album_info(release) assert not d.va def test_detect_various_artists(self): release = self._make_release(None) - release["artist-credit"][0]["artist"]["id"] = mb.VARIOUS_ARTISTS_ID - d = mb.album_info(release) + release["artist-credit"][0]["artist"]["id"] = ( + musicbrainz.VARIOUS_ARTISTS_ID + ) + d = self.mb.album_info(release) assert d.va def test_parse_artist_sort_name(self): release = self._make_release(None) - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.artist_sort == "ARTIST SORT NAME" def test_parse_releasegroupid(self): release = self._make_release(None) - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.releasegroup_id == "RELEASE GROUP ID" def test_parse_asin(self): release = self._make_release(None) - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.asin == "ALBUM ASIN" def test_parse_catalognum(self): release = self._make_release(None) - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.catalognum == "CATALOG NUMBER" def test_parse_textrepr(self): release = self._make_release(None) - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.script == "SCRIPT" assert d.language == "LANGUAGE" def test_parse_country(self): release = self._make_release(None) - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.country == "COUNTRY" def test_parse_status(self): release = self._make_release(None) - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.albumstatus == "STATUS" def test_parse_barcode(self): release = self._make_release(None) - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.barcode == "BARCODE" def test_parse_media(self): @@ -389,12 +404,12 @@ class MBAlbumInfoTest(BeetsTestCase): self._make_track("TITLE TWO", "ID TWO", 200.0 * 1000.0), ] release = self._make_release(None, tracks=tracks) - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.media == "FORMAT" def test_parse_disambig(self): release = self._make_release(None) - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.albumdisambig == "R_DISAMBIGUATION" assert d.releasegroupdisambig == "RG_DISAMBIGUATION" @@ -404,7 +419,7 @@ class MBAlbumInfoTest(BeetsTestCase): self._make_track("TITLE TWO", "ID TWO", 200.0 * 1000.0), ] release = self._make_release(None, tracks=tracks) - d = mb.album_info(release) + d = self.mb.album_info(release) t = d.tracks assert t[0].disctitle == "MEDIUM TITLE" assert t[1].disctitle == "MEDIUM TITLE" @@ -412,13 +427,13 @@ class MBAlbumInfoTest(BeetsTestCase): def test_missing_language(self): release = self._make_release(None) del release["text-representation"]["language"] - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.language is None def test_parse_recording_artist(self): tracks = [self._make_track("a", "b", 1, True)] release = self._make_release(None, tracks=tracks) - track = mb.album_info(release).tracks[0] + track = self.mb.album_info(release).tracks[0] assert track.artist == "RECORDING ARTIST NAME" assert track.artist_id == "RECORDING ARTIST ID" assert track.artist_sort == "RECORDING ARTIST SORT NAME" @@ -427,7 +442,7 @@ class MBAlbumInfoTest(BeetsTestCase): def test_parse_recording_artist_multi(self): tracks = [self._make_track("a", "b", 1, True, multi_artist_credit=True)] release = self._make_release(None, tracks=tracks) - track = mb.album_info(release).tracks[0] + track = self.mb.album_info(release).tracks[0] assert track.artist == "RECORDING ARTIST NAME & RECORDING ARTIST 2 NAME" assert track.artist_id == "RECORDING ARTIST ID" assert ( @@ -459,7 +474,7 @@ class MBAlbumInfoTest(BeetsTestCase): def test_track_artist_overrides_recording_artist(self): tracks = [self._make_track("a", "b", 1, True)] release = self._make_release(None, tracks=tracks, track_artist=True) - track = mb.album_info(release).tracks[0] + track = self.mb.album_info(release).tracks[0] assert track.artist == "TRACK ARTIST NAME" assert track.artist_id == "TRACK ARTIST ID" assert track.artist_sort == "TRACK ARTIST SORT NAME" @@ -470,7 +485,7 @@ class MBAlbumInfoTest(BeetsTestCase): release = self._make_release( None, tracks=tracks, track_artist=True, multi_artist_credit=True ) - track = mb.album_info(release).tracks[0] + track = self.mb.album_info(release).tracks[0] assert track.artist == "TRACK ARTIST NAME & TRACK ARTIST 2 NAME" assert track.artist_id == "TRACK ARTIST ID" assert ( @@ -495,14 +510,34 @@ class MBAlbumInfoTest(BeetsTestCase): def test_parse_recording_remixer(self): tracks = [self._make_track("a", "b", 1, remixer=True)] release = self._make_release(None, tracks=tracks) - track = mb.album_info(release).tracks[0] + track = self.mb.album_info(release).tracks[0] assert track.remixer == "RECORDING REMIXER ARTIST NAME" def test_data_source(self): release = self._make_release() - d = mb.album_info(release) + d = self.mb.album_info(release) assert d.data_source == "MusicBrainz" + def test_genres(self): + config["musicbrainz"]["genres"] = True + config["musicbrainz"]["genres_tag"] = "genre" + release = self._make_release() + d = self.mb.album_info(release) + assert d.genre == "GENRE" + + def test_tags(self): + config["musicbrainz"]["genres"] = True + config["musicbrainz"]["genres_tag"] = "tag" + release = self._make_release() + d = self.mb.album_info(release) + assert d.genre == "TAG" + + def test_no_genres(self): + config["musicbrainz"]["genres"] = False + release = self._make_release() + d = self.mb.album_info(release) + assert d.genre is None + def test_ignored_media(self): config["match"]["ignored_media"] = ["IGNORED1", "IGNORED2"] tracks = [ @@ -510,7 +545,7 @@ class MBAlbumInfoTest(BeetsTestCase): self._make_track("TITLE TWO", "ID TWO", 200.0 * 1000.0), ] release = self._make_release(tracks=tracks, medium_format="IGNORED1") - d = mb.album_info(release) + d = self.mb.album_info(release) assert len(d.tracks) == 0 def test_no_ignored_media(self): @@ -520,7 +555,7 @@ class MBAlbumInfoTest(BeetsTestCase): self._make_track("TITLE TWO", "ID TWO", 200.0 * 1000.0), ] release = self._make_release(tracks=tracks, medium_format="NON-IGNORED") - d = mb.album_info(release) + d = self.mb.album_info(release) assert len(d.tracks) == 2 def test_skip_data_track(self): @@ -530,7 +565,7 @@ class MBAlbumInfoTest(BeetsTestCase): self._make_track("TITLE TWO", "ID TWO", 200.0 * 1000.0), ] release = self._make_release(tracks=tracks) - d = mb.album_info(release) + d = self.mb.album_info(release) assert len(d.tracks) == 2 assert d.tracks[0].title == "TITLE ONE" assert d.tracks[1].title == "TITLE TWO" @@ -546,7 +581,7 @@ class MBAlbumInfoTest(BeetsTestCase): ) ] release = self._make_release(tracks=tracks, data_tracks=data_tracks) - d = mb.album_info(release) + d = self.mb.album_info(release) assert len(d.tracks) == 2 assert d.tracks[0].title == "TITLE ONE" assert d.tracks[1].title == "TITLE TWO" @@ -563,7 +598,7 @@ class MBAlbumInfoTest(BeetsTestCase): ) ] release = self._make_release(tracks=tracks, data_tracks=data_tracks) - d = mb.album_info(release) + d = self.mb.album_info(release) assert len(d.tracks) == 3 assert d.tracks[0].title == "TITLE ONE" assert d.tracks[1].title == "TITLE TWO" @@ -578,7 +613,7 @@ class MBAlbumInfoTest(BeetsTestCase): self._make_track("TITLE TWO", "ID TWO", 200.0 * 1000.0), ] release = self._make_release(tracks=tracks) - d = mb.album_info(release) + d = self.mb.album_info(release) assert len(d.tracks) == 2 assert d.tracks[0].title == "TITLE ONE" assert d.tracks[1].title == "TITLE TWO" @@ -594,7 +629,7 @@ class MBAlbumInfoTest(BeetsTestCase): ) ] release = self._make_release(tracks=tracks, data_tracks=data_tracks) - d = mb.album_info(release) + d = self.mb.album_info(release) assert len(d.tracks) == 2 assert d.tracks[0].title == "TITLE ONE" assert d.tracks[1].title == "TITLE TWO" @@ -610,7 +645,7 @@ class MBAlbumInfoTest(BeetsTestCase): self._make_track("TITLE TWO", "ID TWO", 200.0 * 1000.0), ] release = self._make_release(tracks=tracks) - d = mb.album_info(release) + d = self.mb.album_info(release) assert len(d.tracks) == 3 assert d.tracks[0].title == "TITLE ONE" assert d.tracks[1].title == "TITLE VIDEO" @@ -629,7 +664,7 @@ class MBAlbumInfoTest(BeetsTestCase): ) ] release = self._make_release(tracks=tracks, data_tracks=data_tracks) - d = mb.album_info(release) + d = self.mb.album_info(release) assert len(d.tracks) == 3 assert d.tracks[0].title == "TITLE ONE" assert d.tracks[1].title == "TITLE TWO" @@ -647,73 +682,62 @@ class MBAlbumInfoTest(BeetsTestCase): ] release = self._make_release(tracks=tracks) - d = mb.album_info(release) + d = self.mb.album_info(release) t = d.tracks assert len(t) == 2 assert t[0].trackdisambig is None assert t[1].trackdisambig == "SECOND TRACK" -class ParseIDTest(BeetsTestCase): - def test_parse_id_correct(self): - id_string = "28e32c71-1450-463e-92bf-e0a46446fc11" - out = mb._parse_id(id_string) - assert out == id_string - - def test_parse_id_non_id_returns_none(self): - id_string = "blah blah" - out = mb._parse_id(id_string) - assert out is None - - def test_parse_id_url_finds_id(self): - id_string = "28e32c71-1450-463e-92bf-e0a46446fc11" - id_url = "https://musicbrainz.org/entity/%s" % id_string - out = mb._parse_id(id_url) - assert out == id_string - - -class ArtistFlatteningTest(BeetsTestCase): +class ArtistFlatteningTest(unittest.TestCase): def _credit_dict(self, suffix=""): return { "artist": { - "name": "NAME" + suffix, - "sort-name": "SORT" + suffix, + "name": f"NAME{suffix}", + "sort-name": f"SORT{suffix}", }, - "name": "CREDIT" + suffix, + "name": f"CREDIT{suffix}", } def _add_alias(self, credit_dict, suffix="", locale="", primary=False): alias = { - "alias": "ALIAS" + suffix, + "name": f"ALIAS{suffix}", "locale": locale, - "sort-name": "ALIASSORT" + suffix, + "sort-name": f"ALIASSORT{suffix}", } if primary: alias["primary"] = "primary" - if "alias-list" not in credit_dict["artist"]: - credit_dict["artist"]["alias-list"] = [] - credit_dict["artist"]["alias-list"].append(alias) + if "aliases" not in credit_dict["artist"]: + credit_dict["artist"]["aliases"] = [] + credit_dict["artist"]["aliases"].append(alias) def test_single_artist(self): credit = [self._credit_dict()] - a, s, c = mb._flatten_artist_credit(credit) + a, s, c = musicbrainz._flatten_artist_credit(credit) assert a == "NAME" assert s == "SORT" assert c == "CREDIT" - a, s, c = mb._multi_artist_credit(credit, include_join_phrase=False) + a, s, c = musicbrainz._multi_artist_credit( + credit, include_join_phrase=False + ) assert a == ["NAME"] assert s == ["SORT"] assert c == ["CREDIT"] def test_two_artists(self): - credit = [self._credit_dict("a"), " AND ", self._credit_dict("b")] - a, s, c = mb._flatten_artist_credit(credit) + credit = [ + {**self._credit_dict("a"), "joinphrase": " AND "}, + self._credit_dict("b"), + ] + a, s, c = musicbrainz._flatten_artist_credit(credit) assert a == "NAMEa AND NAMEb" assert s == "SORTa AND SORTb" assert c == "CREDITa AND CREDITb" - a, s, c = mb._multi_artist_credit(credit, include_join_phrase=False) + a, s, c = musicbrainz._multi_artist_credit( + credit, include_join_phrase=False + ) assert a == ["NAMEa", "NAMEb"] assert s == ["SORTa", "SORTb"] assert c == ["CREDITa", "CREDITb"] @@ -730,342 +754,350 @@ class ArtistFlatteningTest(BeetsTestCase): # test no alias config["import"]["languages"] = [""] - flat = mb._flatten_artist_credit([credit_dict]) + flat = musicbrainz._flatten_artist_credit([credit_dict]) assert flat == ("NAME", "SORT", "CREDIT") # test en primary config["import"]["languages"] = ["en"] - flat = mb._flatten_artist_credit([credit_dict]) + flat = musicbrainz._flatten_artist_credit([credit_dict]) assert flat == ("ALIASen", "ALIASSORTen", "CREDIT") # test en_GB en primary config["import"]["languages"] = ["en_GB", "en"] - flat = mb._flatten_artist_credit([credit_dict]) + flat = musicbrainz._flatten_artist_credit([credit_dict]) assert flat == ("ALIASen_GB", "ALIASSORTen_GB", "CREDIT") # test en en_GB primary config["import"]["languages"] = ["en", "en_GB"] - flat = mb._flatten_artist_credit([credit_dict]) + flat = musicbrainz._flatten_artist_credit([credit_dict]) assert flat == ("ALIASen", "ALIASSORTen", "CREDIT") # test fr primary config["import"]["languages"] = ["fr"] - flat = mb._flatten_artist_credit([credit_dict]) + flat = musicbrainz._flatten_artist_credit([credit_dict]) assert flat == ("ALIASfr_P", "ALIASSORTfr_P", "CREDIT") # test for not matching non-primary config["import"]["languages"] = ["pt_BR", "fr"] - flat = mb._flatten_artist_credit([credit_dict]) + flat = musicbrainz._flatten_artist_credit([credit_dict]) assert flat == ("ALIASfr_P", "ALIASSORTfr_P", "CREDIT") -class MBLibraryTest(BeetsTestCase): - def test_match_track(self): - with mock.patch("musicbrainzngs.search_recordings") as p: - p.return_value = { - "recording-list": [ - { - "title": "foo", - "id": "bar", - "length": 42, - } - ], - } - ti = list(mb.match_track("hello", "there"))[0] - - p.assert_called_with(artist="hello", recording="there", limit=5) - assert ti.title == "foo" - assert ti.track_id == "bar" - - def test_match_album(self): - mbid = "d2a6f856-b553-40a0-ac54-a321e8e2da99" - with mock.patch("musicbrainzngs.search_releases") as sp: - sp.return_value = { - "release-list": [ - { - "id": mbid, - } - ], - } - with mock.patch("musicbrainzngs.get_release_by_id") as gp: - gp.return_value = { - "release": { - "title": "hi", - "id": mbid, - "status": "status", - "medium-list": [ - { - "track-list": [ - { - "id": "baz", - "recording": { - "title": "foo", - "id": "bar", - "length": 42, - }, - "position": 9, - "number": "A1", - } - ], - "position": 5, - } - ], - "artist-credit": [ - { - "artist": { - "name": "some-artist", - "id": "some-id", - }, - } - ], - "release-group": { - "id": "another-id", - }, - } - } - - ai = list(mb.match_album("hello", "there"))[0] - - sp.assert_called_with(artist="hello", release="there", limit=5) - gp.assert_called_with(mbid, mock.ANY) - assert ai.tracks[0].title == "foo" - assert ai.album == "hi" - - def test_match_track_empty(self): - with mock.patch("musicbrainzngs.search_recordings") as p: - til = list(mb.match_track(" ", " ")) - assert not p.called - assert til == [] - - def test_match_album_empty(self): - with mock.patch("musicbrainzngs.search_releases") as p: - ail = list(mb.match_album(" ", " ")) - assert not p.called - assert ail == [] - +class MBLibraryTest(MusicBrainzTestCase): def test_follow_pseudo_releases(self): side_effect = [ { - "release": { - "title": "pseudo", - "id": "d2a6f856-b553-40a0-ac54-a321e8e2da02", - "status": "Pseudo-Release", - "medium-list": [ - { - "track-list": [ - { - "id": "baz", - "recording": { - "title": "translated title", - "id": "bar", - "length": 42, - }, - "position": 9, - "number": "A1", - } - ], - "position": 5, - } - ], - "artist-credit": [ - { - "artist": { - "name": "some-artist", - "id": "some-id", - }, - } - ], - "release-group": { - "id": "another-id", - }, - "release-relation-list": [ - { - "type": "transl-tracklisting", - "target": "d2a6f856-b553-40a0-ac54-a321e8e2da01", - "direction": "backward", - } - ], - } + "title": "pseudo", + "id": "d2a6f856-b553-40a0-ac54-a321e8e2da02", + "status": "Pseudo-Release", + "media": [ + { + "tracks": [ + { + "id": "baz", + "recording": { + "title": "translated title", + "id": "bar", + "length": 42, + }, + "position": 9, + "number": "A1", + } + ], + "position": 5, + } + ], + "artist-credit": [ + { + "artist": { + "name": "some-artist", + "id": "some-id", + }, + } + ], + "release-group": { + "id": "another-id", + }, + "release-relations": [ + { + "type": "transl-tracklisting", + "direction": "backward", + "release": { + "id": "d2a6f856-b553-40a0-ac54-a321e8e2da01" + }, + } + ], }, { - "release": { - "title": "actual", - "id": "d2a6f856-b553-40a0-ac54-a321e8e2da01", - "status": "Official", - "medium-list": [ - { - "track-list": [ - { - "id": "baz", - "recording": { - "title": "original title", - "id": "bar", - "length": 42, - }, - "position": 9, - "number": "A1", - } - ], - "position": 5, - } - ], - "artist-credit": [ - { - "artist": { - "name": "some-artist", - "id": "some-id", - }, - } - ], - "release-group": { - "id": "another-id", - }, - "country": "COUNTRY", - } + "title": "actual", + "id": "d2a6f856-b553-40a0-ac54-a321e8e2da01", + "status": "Official", + "media": [ + { + "tracks": [ + { + "id": "baz", + "recording": { + "title": "original title", + "id": "bar", + "length": 42, + }, + "position": 9, + "number": "A1", + } + ], + "position": 5, + } + ], + "artist-credit": [ + { + "artist": { + "name": "some-artist", + "id": "some-id", + }, + } + ], + "release-group": { + "id": "another-id", + }, + "country": "COUNTRY", }, ] - with mock.patch("musicbrainzngs.get_release_by_id") as gp: + with mock.patch( + "beetsplug._utils.musicbrainz.MusicBrainzAPI.get_release" + ) as gp: gp.side_effect = side_effect - album = mb.album_for_id("d2a6f856-b553-40a0-ac54-a321e8e2da02") + album = self.mb.album_for_id("d2a6f856-b553-40a0-ac54-a321e8e2da02") assert album.country == "COUNTRY" def test_pseudo_releases_with_empty_links(self): side_effect = [ { - "release": { - "title": "pseudo", - "id": "d2a6f856-b553-40a0-ac54-a321e8e2da02", - "status": "Pseudo-Release", - "medium-list": [ - { - "track-list": [ - { - "id": "baz", - "recording": { - "title": "translated title", - "id": "bar", - "length": 42, - }, - "position": 9, - "number": "A1", - } - ], - "position": 5, - } - ], - "artist-credit": [ - { - "artist": { - "name": "some-artist", - "id": "some-id", - }, - } - ], - "release-group": { - "id": "another-id", - }, - "release-relation-list": [], - } - }, + "title": "pseudo", + "id": "d2a6f856-b553-40a0-ac54-a321e8e2da02", + "status": "Pseudo-Release", + "media": [ + { + "tracks": [ + { + "id": "baz", + "recording": { + "title": "translated title", + "id": "bar", + "length": 42, + }, + "position": 9, + "number": "A1", + } + ], + "position": 5, + } + ], + "artist-credit": [ + { + "artist": { + "name": "some-artist", + "id": "some-id", + }, + } + ], + "release-group": { + "id": "another-id", + }, + } ] - with mock.patch("musicbrainzngs.get_release_by_id") as gp: + with mock.patch( + "beetsplug._utils.musicbrainz.MusicBrainzAPI.get_release" + ) as gp: gp.side_effect = side_effect - album = mb.album_for_id("d2a6f856-b553-40a0-ac54-a321e8e2da02") + album = self.mb.album_for_id("d2a6f856-b553-40a0-ac54-a321e8e2da02") assert album.country is None def test_pseudo_releases_without_links(self): side_effect = [ { - "release": { - "title": "pseudo", - "id": "d2a6f856-b553-40a0-ac54-a321e8e2da02", - "status": "Pseudo-Release", - "medium-list": [ - { - "track-list": [ - { - "id": "baz", - "recording": { - "title": "translated title", - "id": "bar", - "length": 42, - }, - "position": 9, - "number": "A1", - } - ], - "position": 5, - } - ], - "artist-credit": [ - { - "artist": { - "name": "some-artist", - "id": "some-id", - }, - } - ], - "release-group": { - "id": "another-id", - }, - } - }, + "title": "pseudo", + "id": "d2a6f856-b553-40a0-ac54-a321e8e2da02", + "status": "Pseudo-Release", + "media": [ + { + "tracks": [ + { + "id": "baz", + "recording": { + "title": "translated title", + "id": "bar", + "length": 42, + }, + "position": 9, + "number": "A1", + } + ], + "position": 5, + } + ], + "artist-credit": [ + { + "artist": { + "name": "some-artist", + "id": "some-id", + }, + } + ], + "release-group": { + "id": "another-id", + }, + } ] - with mock.patch("musicbrainzngs.get_release_by_id") as gp: + with mock.patch( + "beetsplug._utils.musicbrainz.MusicBrainzAPI.get_release" + ) as gp: gp.side_effect = side_effect - album = mb.album_for_id("d2a6f856-b553-40a0-ac54-a321e8e2da02") + album = self.mb.album_for_id("d2a6f856-b553-40a0-ac54-a321e8e2da02") assert album.country is None def test_pseudo_releases_with_unsupported_links(self): side_effect = [ { - "release": { - "title": "pseudo", - "id": "d2a6f856-b553-40a0-ac54-a321e8e2da02", - "status": "Pseudo-Release", - "medium-list": [ - { - "track-list": [ - { - "id": "baz", - "recording": { - "title": "translated title", - "id": "bar", - "length": 42, - }, - "position": 9, - "number": "A1", - } - ], - "position": 5, - } - ], - "artist-credit": [ - { - "artist": { - "name": "some-artist", - "id": "some-id", - }, - } - ], - "release-group": { - "id": "another-id", - }, - "release-relation-list": [ - { - "type": "remaster", - "target": "d2a6f856-b553-40a0-ac54-a321e8e2da01", - "direction": "backward", - } - ], - } - }, + "title": "pseudo", + "id": "d2a6f856-b553-40a0-ac54-a321e8e2da02", + "status": "Pseudo-Release", + "media": [ + { + "tracks": [ + { + "id": "baz", + "recording": { + "title": "translated title", + "id": "bar", + "length": 42, + }, + "position": 9, + "number": "A1", + } + ], + "position": 5, + } + ], + "artist-credit": [ + { + "artist": { + "name": "some-artist", + "id": "some-id", + }, + } + ], + "release-group": { + "id": "another-id", + }, + "release-relations": [ + { + "type": "remaster", + "direction": "backward", + "release": { + "id": "d2a6f856-b553-40a0-ac54-a321e8e2da01" + }, + } + ], + } ] - with mock.patch("musicbrainzngs.get_release_by_id") as gp: + with mock.patch( + "beetsplug._utils.musicbrainz.MusicBrainzAPI.get_release" + ) as gp: gp.side_effect = side_effect - album = mb.album_for_id("d2a6f856-b553-40a0-ac54-a321e8e2da02") + album = self.mb.album_for_id("d2a6f856-b553-40a0-ac54-a321e8e2da02") assert album.country is None + + +class TestMusicBrainzPlugin(PluginMixin): + plugin = "musicbrainz" + + mbid = "d2a6f856-b553-40a0-ac54-a321e8e2da99" + RECORDING = {"title": "foo", "id": "bar", "length": 42} + + @pytest.fixture + def plugin_config(self): + return {} + + @pytest.fixture + def mb(self, plugin_config): + self.config[self.plugin].set(plugin_config) + + return musicbrainz.MusicBrainzPlugin() + + @pytest.mark.parametrize( + "plugin_config,va_likely,expected_additional_criteria", + [ + ({}, False, {"artist": "Artist "}), + ({}, True, {"arid": "89ad4ac3-39f7-470e-963a-56509c546377"}), + ( + {"extra_tags": ["label", "catalognum"]}, + False, + {"artist": "Artist ", "label": "abc", "catno": "ABC123"}, + ), + ], + ) + def test_get_album_criteria( + self, mb, va_likely, expected_additional_criteria + ): + items = [ + Item(catalognum="ABC 123", label="abc"), + Item(catalognum="ABC 123", label="abc"), + Item(catalognum="ABC 123", label="def"), + ] + + assert mb.get_album_criteria(items, "Artist ", " Album", va_likely) == { + "release": " Album", + **expected_additional_criteria, + } + + def test_item_candidates(self, monkeypatch, mb): + monkeypatch.setattr( + "beetsplug._utils.musicbrainz.MusicBrainzAPI.get_json", + lambda *_, **__: {"recordings": [self.RECORDING]}, + ) + + candidates = list(mb.item_candidates(Item(), "hello", "there")) + + assert len(candidates) == 1 + assert candidates[0].track_id == self.RECORDING["id"] + + def test_candidates(self, monkeypatch, mb): + monkeypatch.setattr( + "beetsplug._utils.musicbrainz.MusicBrainzAPI.get_json", + lambda *_, **__: {"releases": [{"id": self.mbid}]}, + ) + monkeypatch.setattr( + "beetsplug._utils.musicbrainz.MusicBrainzAPI.get_release", + lambda *_, **__: { + "title": "hi", + "id": self.mbid, + "status": "status", + "media": [ + { + "tracks": [ + { + "id": "baz", + "recording": self.RECORDING, + "position": 9, + "number": "A1", + } + ], + "position": 5, + } + ], + "artist-credit": [ + {"artist": {"name": "some-artist", "id": "some-id"}} + ], + "release-group": {"id": "another-id"}, + }, + ) + candidates = list(mb.candidates([], "hello", "there", False)) + + assert len(candidates) == 1 + assert candidates[0].tracks[0].track_id == self.RECORDING["id"] + assert candidates[0].album == "hi" diff --git a/test/plugins/test_parentwork.py b/test/plugins/test_parentwork.py index 99267f6ff..2218e9fd6 100644 --- a/test/plugins/test_parentwork.py +++ b/test/plugins/test_parentwork.py @@ -14,74 +14,10 @@ """Tests for the 'parentwork' plugin.""" -from unittest.mock import patch - import pytest from beets.library import Item from beets.test.helper import PluginTestCase -from beetsplug import parentwork - -work = { - "work": { - "id": "1", - "title": "work", - "work-relation-list": [ - {"type": "parts", "direction": "backward", "work": {"id": "2"}} - ], - "artist-relation-list": [ - { - "type": "composer", - "artist": { - "name": "random composer", - "sort-name": "composer, random", - }, - } - ], - } -} -dp_work = { - "work": { - "id": "2", - "title": "directparentwork", - "work-relation-list": [ - {"type": "parts", "direction": "backward", "work": {"id": "3"}} - ], - "artist-relation-list": [ - { - "type": "composer", - "artist": { - "name": "random composer", - "sort-name": "composer, random", - }, - } - ], - } -} -p_work = { - "work": { - "id": "3", - "title": "parentwork", - "artist-relation-list": [ - { - "type": "composer", - "artist": { - "name": "random composer", - "sort-name": "composer, random", - }, - } - ], - } -} - - -def mock_workid_response(mbid, includes): - if mbid == "1": - return work - elif mbid == "2": - return dp_work - elif mbid == "3": - return p_work @pytest.mark.integration_test @@ -93,8 +29,7 @@ class ParentWorkIntegrationTest(PluginTestCase): item = Item( path="/file", mb_workid="e27bda6e-531e-36d3-9cd7-b8ebc18e8c53", - parentwork_workid_current="e27bda6e-531e-36d3-9cd7-\ - b8ebc18e8c53", + parentwork_workid_current="e27bda6e-531e-36d3-9cd7-b8ebc18e8c53", ) item.add(self.lib) @@ -109,8 +44,7 @@ class ParentWorkIntegrationTest(PluginTestCase): path="/file", mb_workid="e27bda6e-531e-36d3-9cd7-b8ebc18e8c53", mb_parentworkid="XXX", - parentwork_workid_current="e27bda6e-531e-36d3-9cd7-\ - b8ebc18e8c53", + parentwork_workid_current="e27bda6e-531e-36d3-9cd7-b8ebc18e8c53", parentwork="whatever", ) item.add(self.lib) @@ -124,11 +58,9 @@ class ParentWorkIntegrationTest(PluginTestCase): self.config["parentwork"]["force"] = False item = Item( path="/file", - mb_workid="e27bda6e-531e-36d3-9cd7-\ - b8ebc18e8c53", + mb_workid="e27bda6e-531e-36d3-9cd7-b8ebc18e8c53", mb_parentworkid="XXX", - parentwork_workid_current="e27bda6e-531e-36d3-9cd7-\ - b8ebc18e8c53", + parentwork_workid_current="e27bda6e-531e-36d3-9cd7-b8ebc18e8c53", parentwork="whatever", ) item.add(self.lib) @@ -138,35 +70,56 @@ class ParentWorkIntegrationTest(PluginTestCase): item.load() assert item["mb_parentworkid"] == "XXX" - # test different cases, still with Matthew Passion Ouverture or Mozart - # requiem - - def test_direct_parent_work_real(self): - mb_workid = "2e4a3668-458d-3b2a-8be2-0b08e0d8243a" - assert ( - "f04b42df-7251-4d86-a5ee-67cfa49580d1" - == parentwork.direct_parent_id(mb_workid)[0] - ) - assert ( - "45afb3b2-18ac-4187-bc72-beb1b1c194ba" - == parentwork.work_parent_id(mb_workid)[0] - ) - class ParentWorkTest(PluginTestCase): plugin = "parentwork" - def setUp(self): - """Set up configuration""" - super().setUp() - self.patcher = patch( - "musicbrainzngs.get_work_by_id", side_effect=mock_workid_response + @pytest.fixture(autouse=True) + def patch_works(self, requests_mock): + requests_mock.get( + "/ws/2/work/1?inc=work-rels%2Bartist-rels", + json={ + "id": "1", + "title": "work", + "work-relations": [ + { + "type": "parts", + "direction": "backward", + "work": {"id": "2"}, + } + ], + }, + ) + requests_mock.get( + "/ws/2/work/2?inc=work-rels%2Bartist-rels", + json={ + "id": "2", + "title": "directparentwork", + "work-relations": [ + { + "type": "parts", + "direction": "backward", + "work": {"id": "3"}, + } + ], + }, + ) + requests_mock.get( + "/ws/2/work/3?inc=work-rels%2Bartist-rels", + json={ + "id": "3", + "title": "parentwork", + "artist-relations": [ + { + "type": "composer", + "artist": { + "name": "random composer", + "sort-name": "composer, random", + }, + } + ], + }, ) - self.patcher.start() - - def tearDown(self): - super().tearDown() - self.patcher.stop() def test_normal_case(self): item = Item(path="/file", mb_workid="1", parentwork_workid_current="1") @@ -208,7 +161,3 @@ class ParentWorkTest(PluginTestCase): item.load() assert item["mb_parentworkid"] == "XXX" - - def test_direct_parent_work(self): - assert "2" == parentwork.direct_parent_id("1")[0] - assert "3" == parentwork.work_parent_id("1")[0] diff --git a/test/plugins/test_permissions.py b/test/plugins/test_permissions.py index 274cd92ac..475e98194 100644 --- a/test/plugins/test_permissions.py +++ b/test/plugins/test_permissions.py @@ -6,7 +6,6 @@ from unittest.mock import Mock, patch from beets.test._common import touch from beets.test.helper import AsIsImporterMixin, ImportTestCase, PluginMixin -from beets.util import displayable_path from beetsplug.permissions import ( check_permissions, convert_perm, @@ -23,57 +22,25 @@ class PermissionsPluginTest(AsIsImporterMixin, PluginMixin, ImportTestCase): self.config["permissions"] = {"file": "777", "dir": "777"} def test_permissions_on_album_imported(self): - self.do_thing(True) + self.import_and_check_permissions() def test_permissions_on_item_imported(self): self.config["import"]["singletons"] = True - self.do_thing(True) + self.import_and_check_permissions() - @patch("os.chmod", Mock()) - def test_failing_to_set_permissions(self): - self.do_thing(False) - - def do_thing(self, expect_success): + def import_and_check_permissions(self): if platform.system() == "Windows": self.skipTest("permissions not available on Windows") - def get_stat(v): - return ( - os.stat(os.path.join(self.temp_dir, b"import", *v)).st_mode - & 0o777 - ) - - typs = ["file", "dir"] - - track_file = (b"album", b"track_1.mp3") - self.exp_perms = { - True: { - k: convert_perm(self.config["permissions"][k].get()) - for k in typs - }, - False: {k: get_stat(v) for (k, v) in zip(typs, (track_file, ()))}, - } + track_file = os.path.join(self.import_dir, b"album", b"track_1.mp3") + assert os.stat(track_file).st_mode & 0o777 != 511 self.run_asis_importer() item = self.lib.items().get() - self.assertPerms(item.path, "file", expect_success) - - for path in dirs_in_library(self.lib.directory, item.path): - self.assertPerms(path, "dir", expect_success) - - def assertPerms(self, path, typ, expect_success): - for x in [ - (True, self.exp_perms[expect_success][typ], "!="), - (False, self.exp_perms[not expect_success][typ], "=="), - ]: - msg = "{} : {} {} {}".format( - displayable_path(path), - oct(os.stat(path).st_mode), - x[2], - oct(x[1]), - ) - assert x[0] == check_permissions(path, x[1]), msg + paths = (item.path, *dirs_in_library(self.lib.directory, item.path)) + for path in paths: + assert os.stat(path).st_mode & 0o777 == 511 def test_convert_perm_from_string(self): assert convert_perm("10") == 8 diff --git a/test/plugins/test_play.py b/test/plugins/test_play.py index 712739633..b184db63f 100644 --- a/test/plugins/test_play.py +++ b/test/plugins/test_play.py @@ -49,7 +49,7 @@ class PlayPluginTest(CleanupModulesMixin, PluginTestCase): open_mock.assert_called_once_with(ANY, expected_cmd) expected_playlist = expected_playlist or self.item.path.decode("utf-8") - exp_playlist = expected_playlist + "\n" + exp_playlist = f"{expected_playlist}\n" with open(open_mock.call_args[0][0][0], "rb") as playlist: assert exp_playlist == playlist.read().decode("utf-8") @@ -96,9 +96,7 @@ class PlayPluginTest(CleanupModulesMixin, PluginTestCase): open_mock.assert_called_once_with(ANY, open_anything()) with open(open_mock.call_args[0][0][0], "rb") as f: playlist = f.read().decode("utf-8") - assert ( - f'{os.path.dirname(self.item.path.decode("utf-8"))}\n' == playlist - ) + assert f"{self.item.filepath.parent}\n" == playlist def test_raw(self, open_mock): self.config["play"]["raw"] = True @@ -107,6 +105,19 @@ class PlayPluginTest(CleanupModulesMixin, PluginTestCase): open_mock.assert_called_once_with([self.item.path], "echo") + def test_pls_marker(self, open_mock): + self.config["play"]["command"] = ( + "echo --some params --playlist=$playlist --some-more params" + ) + + self.run_command("play", "nice") + + open_mock.assert_called_once + + commandstr = open_mock.call_args_list[0][0][1] + assert commandstr.startswith("echo --some params --playlist=") + assert commandstr.endswith(" --some-more params") + def test_not_found(self, open_mock): self.run_command("play", "not found") @@ -125,9 +136,7 @@ class PlayPluginTest(CleanupModulesMixin, PluginTestCase): self.config["play"]["warning_threshold"] = 1 self.other_item = self.add_item(title="another NiceTitle") - expected_playlist = "{}\n{}".format( - self.item.path.decode("utf-8"), self.other_item.path.decode("utf-8") - ) + expected_playlist = f"{self.item.filepath}\n{self.other_item.filepath}" with control_stdin("a"): self.run_and_assert( diff --git a/test/plugins/test_playlist.py b/test/plugins/test_playlist.py index ee4059b70..a8c145696 100644 --- a/test/plugins/test_playlist.py +++ b/test/plugins/test_playlist.py @@ -72,12 +72,10 @@ class PlaylistTestCase(PluginTestCase): self.lib.add(i3) self.lib.add_album([i3]) - self.playlist_dir = os.path.join( - os.fsdecode(self.temp_dir), "playlists" - ) - os.makedirs(self.playlist_dir) + self.playlist_dir = self.temp_dir_path / "playlists" + self.playlist_dir.mkdir(parents=True, exist_ok=True) self.config["directory"] = self.music_dir - self.config["playlist"]["playlist_dir"] = self.playlist_dir + self.config["playlist"]["playlist_dir"] = str(self.playlist_dir) self.setup_test() self.load_plugins() @@ -93,14 +91,7 @@ class PlaylistQueryTest: assert {i.title for i in results} == {"some item", "another item"} def test_path_query_with_absolute_paths_in_playlist(self): - q = "playlist:{}".format( - quote( - os.path.join( - self.playlist_dir, - "absolute.m3u", - ) - ) - ) + q = f"playlist:{quote(os.path.join(self.playlist_dir, 'absolute.m3u'))}" results = self.lib.items(q) assert {i.title for i in results} == {"some item", "another item"} @@ -110,14 +101,7 @@ class PlaylistQueryTest: assert {i.title for i in results} == {"some item", "another item"} def test_path_query_with_relative_paths_in_playlist(self): - q = "playlist:{}".format( - quote( - os.path.join( - self.playlist_dir, - "relative.m3u", - ) - ) - ) + q = f"playlist:{quote(os.path.join(self.playlist_dir, 'relative.m3u'))}" results = self.lib.items(q) assert {i.title for i in results} == {"some item", "another item"} @@ -127,15 +111,7 @@ class PlaylistQueryTest: assert set(results) == set() def test_path_query_with_nonexisting_playlist(self): - q = "playlist:{}".format( - quote( - os.path.join( - self.playlist_dir, - self.playlist_dir, - "nonexisting.m3u", - ) - ) - ) + q = f"playlist:{os.path.join(self.playlist_dir, 'nonexisting.m3u')!r}" results = self.lib.items(q) assert set(results) == set() @@ -143,20 +119,22 @@ class PlaylistQueryTest: class PlaylistTestRelativeToLib(PlaylistQueryTest, PlaylistTestCase): def setup_test(self): with open(os.path.join(self.playlist_dir, "absolute.m3u"), "w") as f: - f.write( - "{}\n".format(os.path.join(self.music_dir, "a", "b", "c.mp3")) - ) - f.write( - "{}\n".format(os.path.join(self.music_dir, "d", "e", "f.mp3")) - ) - f.write( - "{}\n".format(os.path.join(self.music_dir, "nonexisting.mp3")) + f.writelines( + [ + os.path.join(self.music_dir, "a", "b", "c.mp3") + "\n", + os.path.join(self.music_dir, "d", "e", "f.mp3") + "\n", + os.path.join(self.music_dir, "nonexisting.mp3") + "\n", + ] ) with open(os.path.join(self.playlist_dir, "relative.m3u"), "w") as f: - f.write("{}\n".format(os.path.join("a", "b", "c.mp3"))) - f.write("{}\n".format(os.path.join("d", "e", "f.mp3"))) - f.write("{}\n".format("nonexisting.mp3")) + f.writelines( + [ + os.path.join("a", "b", "c.mp3") + "\n", + os.path.join("d", "e", "f.mp3") + "\n", + "nonexisting.mp3\n", + ] + ) self.config["playlist"]["relative_to"] = "library" @@ -164,20 +142,22 @@ class PlaylistTestRelativeToLib(PlaylistQueryTest, PlaylistTestCase): class PlaylistTestRelativeToDir(PlaylistQueryTest, PlaylistTestCase): def setup_test(self): with open(os.path.join(self.playlist_dir, "absolute.m3u"), "w") as f: - f.write( - "{}\n".format(os.path.join(self.music_dir, "a", "b", "c.mp3")) - ) - f.write( - "{}\n".format(os.path.join(self.music_dir, "d", "e", "f.mp3")) - ) - f.write( - "{}\n".format(os.path.join(self.music_dir, "nonexisting.mp3")) + f.writelines( + [ + os.path.join(self.music_dir, "a", "b", "c.mp3") + "\n", + os.path.join(self.music_dir, "d", "e", "f.mp3") + "\n", + os.path.join(self.music_dir, "nonexisting.mp3") + "\n", + ] ) with open(os.path.join(self.playlist_dir, "relative.m3u"), "w") as f: - f.write("{}\n".format(os.path.join("a", "b", "c.mp3"))) - f.write("{}\n".format(os.path.join("d", "e", "f.mp3"))) - f.write("{}\n".format("nonexisting.mp3")) + f.writelines( + [ + os.path.join("a", "b", "c.mp3") + "\n", + os.path.join("d", "e", "f.mp3") + "\n", + "nonexisting.mp3\n", + ] + ) self.config["playlist"]["relative_to"] = self.music_dir @@ -185,63 +165,58 @@ class PlaylistTestRelativeToDir(PlaylistQueryTest, PlaylistTestCase): class PlaylistTestRelativeToPls(PlaylistQueryTest, PlaylistTestCase): def setup_test(self): with open(os.path.join(self.playlist_dir, "absolute.m3u"), "w") as f: - f.write( - "{}\n".format(os.path.join(self.music_dir, "a", "b", "c.mp3")) - ) - f.write( - "{}\n".format(os.path.join(self.music_dir, "d", "e", "f.mp3")) - ) - f.write( - "{}\n".format(os.path.join(self.music_dir, "nonexisting.mp3")) + f.writelines( + [ + os.path.join(self.music_dir, "a", "b", "c.mp3") + "\n", + os.path.join(self.music_dir, "d", "e", "f.mp3") + "\n", + os.path.join(self.music_dir, "nonexisting.mp3") + "\n", + ] ) with open(os.path.join(self.playlist_dir, "relative.m3u"), "w") as f: - f.write( - "{}\n".format( + f.writelines( + [ os.path.relpath( os.path.join(self.music_dir, "a", "b", "c.mp3"), start=self.playlist_dir, ) - ) - ) - f.write( - "{}\n".format( + + "\n", os.path.relpath( os.path.join(self.music_dir, "d", "e", "f.mp3"), start=self.playlist_dir, ) - ) - ) - f.write( - "{}\n".format( + + "\n", os.path.relpath( os.path.join(self.music_dir, "nonexisting.mp3"), start=self.playlist_dir, ) - ) + + "\n", + ] ) self.config["playlist"]["relative_to"] = "playlist" - self.config["playlist"]["playlist_dir"] = self.playlist_dir + self.config["playlist"]["playlist_dir"] = str(self.playlist_dir) class PlaylistUpdateTest: def setup_test(self): with open(os.path.join(self.playlist_dir, "absolute.m3u"), "w") as f: - f.write( - "{}\n".format(os.path.join(self.music_dir, "a", "b", "c.mp3")) - ) - f.write( - "{}\n".format(os.path.join(self.music_dir, "d", "e", "f.mp3")) - ) - f.write( - "{}\n".format(os.path.join(self.music_dir, "nonexisting.mp3")) + f.writelines( + [ + os.path.join(self.music_dir, "a", "b", "c.mp3") + "\n", + os.path.join(self.music_dir, "d", "e", "f.mp3") + "\n", + os.path.join(self.music_dir, "nonexisting.mp3") + "\n", + ] ) with open(os.path.join(self.playlist_dir, "relative.m3u"), "w") as f: - f.write("{}\n".format(os.path.join("a", "b", "c.mp3"))) - f.write("{}\n".format(os.path.join("d", "e", "f.mp3"))) - f.write("{}\n".format("nonexisting.mp3")) + f.writelines( + [ + os.path.join("a", "b", "c.mp3") + "\n", + os.path.join("d", "e", "f.mp3") + "\n", + "nonexisting.mp3\n", + ] + ) self.config["playlist"]["auto"] = True self.config["playlist"]["relative_to"] = "library" @@ -251,9 +226,7 @@ class PlaylistTestItemMoved(PlaylistUpdateTest, PlaylistTestCase): def test_item_moved(self): # Emit item_moved event for an item that is in a playlist results = self.lib.items( - "path:{}".format( - quote(os.path.join(self.music_dir, "d", "e", "f.mp3")) - ) + f"path:{quote(os.path.join(self.music_dir, 'd', 'e', 'f.mp3'))}" ) item = results[0] beets.plugins.send( @@ -267,9 +240,7 @@ class PlaylistTestItemMoved(PlaylistUpdateTest, PlaylistTestCase): # Emit item_moved event for an item that is not in a playlist results = self.lib.items( - "path:{}".format( - quote(os.path.join(self.music_dir, "x", "y", "z.mp3")) - ) + f"path:{quote(os.path.join(self.music_dir, 'x', 'y', 'z.mp3'))}" ) item = results[0] beets.plugins.send( @@ -311,18 +282,14 @@ class PlaylistTestItemRemoved(PlaylistUpdateTest, PlaylistTestCase): def test_item_removed(self): # Emit item_removed event for an item that is in a playlist results = self.lib.items( - "path:{}".format( - quote(os.path.join(self.music_dir, "d", "e", "f.mp3")) - ) + f"path:{quote(os.path.join(self.music_dir, 'd', 'e', 'f.mp3'))}" ) item = results[0] beets.plugins.send("item_removed", item=item) # Emit item_removed event for an item that is not in a playlist results = self.lib.items( - "path:{}".format( - quote(os.path.join(self.music_dir, "x", "y", "z.mp3")) - ) + f"path:{quote(os.path.join(self.music_dir, 'x', 'y', 'z.mp3'))}" ) item = results[0] beets.plugins.send("item_removed", item=item) diff --git a/test/plugins/test_plexupdate.py b/test/plugins/test_plexupdate.py index f319db6ce..ab53d8c2e 100644 --- a/test/plugins/test_plexupdate.py +++ b/test/plugins/test_plexupdate.py @@ -29,7 +29,7 @@ class PlexUpdateTest(PluginTestCase): "</Directory>" '<Directory allowSync="0" art="/:/resources/artist-fanart.jpg" ' 'filters="1" refreshing="0" thumb="/:/resources/artist.png" ' - 'key="2" type="artist" title="' + escaped_section_name + '" ' + f'key="2" type="artist" title="{escaped_section_name}" ' 'composite="/library/sections/2/composite/1416929243" ' 'agent="com.plexapp.agents.lastfm" scanner="Plex Music Scanner" ' 'language="en" uuid="90897c95-b3bd-4778-a9c8-1f43cb78f047" ' diff --git a/test/plugins/test_plugin_mediafield.py b/test/plugins/test_plugin_mediafield.py index 898e891ce..84565b47b 100644 --- a/test/plugins/test_plugin_mediafield.py +++ b/test/plugins/test_plugin_mediafield.py @@ -43,7 +43,7 @@ list_field_extension = mediafile.ListMediaField( class ExtendedFieldTestMixin(BeetsTestCase): def _mediafile_fixture(self, name, extension="mp3"): - name = bytestring_path(name + "." + extension) + name = bytestring_path(f"{name}.{extension}") src = os.path.join(_common.RSRC, name) target = os.path.join(self.temp_dir, name) shutil.copy(syspath(src), syspath(target)) diff --git a/test/plugins/test_random.py b/test/plugins/test_random.py index 5bff1ee5e..9bcf8e59b 100644 --- a/test/plugins/test_random.py +++ b/test/plugins/test_random.py @@ -69,7 +69,7 @@ class RandomTest(TestHelper, unittest.TestCase): # Print a histogram (useful for debugging). if histogram: for i in range(len(self.items)): - print("{:2d} {}".format(i, "*" * positions.count(i))) + print(f"{i:2d} {'*' * positions.count(i)}") return self._stats(positions) mean1, stdev1, median1 = experiment("artist") diff --git a/test/plugins/test_replace.py b/test/plugins/test_replace.py new file mode 100644 index 000000000..a247e317a --- /dev/null +++ b/test/plugins/test_replace.py @@ -0,0 +1,115 @@ +import shutil +from pathlib import Path + +import pytest +from mediafile import MediaFile + +from beets import ui +from beets.test import _common +from beetsplug.replace import ReplacePlugin + +replace = ReplacePlugin() + + +class TestReplace: + @pytest.fixture(autouse=True) + def _fake_dir(self, tmp_path): + self.fake_dir = tmp_path + + @pytest.fixture(autouse=True) + def _fake_file(self, tmp_path): + self.fake_file = tmp_path + + def test_path_is_dir(self): + fake_directory = self.fake_dir / "fakeDir" + fake_directory.mkdir() + with pytest.raises(ui.UserError): + replace.file_check(fake_directory) + + def test_path_is_unsupported_file(self): + fake_file = self.fake_file / "fakefile.txt" + fake_file.write_text("test", encoding="utf-8") + with pytest.raises(ui.UserError): + replace.file_check(fake_file) + + def test_path_is_supported_file(self): + dest = self.fake_file / "full.mp3" + src = Path(_common.RSRC.decode()) / "full.mp3" + shutil.copyfile(src, dest) + + mediafile = MediaFile(dest) + mediafile.albumartist = "AAA" + mediafile.disctitle = "DDD" + mediafile.genres = ["a", "b", "c"] + mediafile.composer = None + mediafile.save() + + replace.file_check(Path(str(dest))) + + def test_select_song_valid_choice(self, monkeypatch, capfd): + songs = ["Song A", "Song B", "Song C"] + monkeypatch.setattr("builtins.input", lambda _: "2") + + selected_song = replace.select_song(songs) + + captured = capfd.readouterr() + + assert "1. Song A" in captured.out + assert "2. Song B" in captured.out + assert "3. Song C" in captured.out + assert selected_song == "Song B" + + def test_select_song_cancel(self, monkeypatch): + songs = ["Song A", "Song B", "Song C"] + monkeypatch.setattr("builtins.input", lambda _: "0") + + selected_song = replace.select_song(songs) + + assert selected_song is None + + def test_select_song_invalid_then_valid(self, monkeypatch, capfd): + songs = ["Song A", "Song B", "Song C"] + inputs = iter(["invalid", "4", "3"]) + monkeypatch.setattr("builtins.input", lambda _: next(inputs)) + + selected_song = replace.select_song(songs) + + captured = capfd.readouterr() + + assert "Invalid input. Please type in a number." in captured.out + assert ( + "Invalid choice. Please enter a number between 1 and 3." + in captured.out + ) + assert selected_song == "Song C" + + def test_confirm_replacement_file_not_exist(self): + class Song: + path = b"test123321.txt" + + song = Song() + + with pytest.raises(ui.UserError): + replace.confirm_replacement("test", song) + + def test_confirm_replacement_yes(self, monkeypatch): + src = Path(_common.RSRC.decode()) / "full.mp3" + monkeypatch.setattr("builtins.input", lambda _: "YES ") + + class Song: + path = str(src).encode() + + song = Song() + + assert replace.confirm_replacement("test", song) is True + + def test_confirm_replacement_no(self, monkeypatch): + src = Path(_common.RSRC.decode()) / "full.mp3" + monkeypatch.setattr("builtins.input", lambda _: "test123") + + class Song: + path = str(src).encode() + + song = Song() + + assert replace.confirm_replacement("test", song) is False diff --git a/test/plugins/test_replaygain.py b/test/plugins/test_replaygain.py index 091298766..094349b25 100644 --- a/test/plugins/test_replaygain.py +++ b/test/plugins/test_replaygain.py @@ -204,9 +204,7 @@ class ReplayGainCliTest: # This test is a lot less interesting if the backend cannot write # both tag types. self.skipTest( - "r128 tags for opus not supported on backend {}".format( - self.backend - ) + f"r128 tags for opus not supported on backend {self.backend}" ) album_rg = self._add_album(1) @@ -263,9 +261,7 @@ class ReplayGainCliTest: def test_cli_writes_only_r128_tags(self): if not self.has_r128_support: self.skipTest( - "r128 tags for opus not supported on backend {}".format( - self.backend - ) + f"r128 tags for opus not supported on backend {self.backend}" ) album = self._add_album(2, ext="opus") @@ -299,9 +295,7 @@ class ReplayGainCliTest: def test_r128_targetlevel_has_effect(self): if not self.has_r128_support: self.skipTest( - "r128 tags for opus not supported on backend {}".format( - self.backend - ) + f"r128 tags for opus not supported on backend {self.backend}" ) album = self._add_album(1, ext="opus") diff --git a/test/plugins/test_scrub.py b/test/plugins/test_scrub.py new file mode 100644 index 000000000..129d91a22 --- /dev/null +++ b/test/plugins/test_scrub.py @@ -0,0 +1,37 @@ +import os + +from mediafile import MediaFile + +from beets.test.helper import AsIsImporterMixin, ImportTestCase, PluginMixin + + +class ScrubbedImportTest(AsIsImporterMixin, PluginMixin, ImportTestCase): + db_on_disk = True + plugin = "scrub" + + def test_tags_not_scrubbed(self): + with self.configure_plugin({"auto": False}): + self.run_asis_importer(write=True) + + for item in self.lib.items(): + imported_file = MediaFile(os.path.join(item.path)) + assert imported_file.artist == "Tag Artist" + assert imported_file.album == "Tag Album" + + def test_tags_restored(self): + with self.configure_plugin({"auto": True}): + self.run_asis_importer(write=True) + + for item in self.lib.items(): + imported_file = MediaFile(os.path.join(item.path)) + assert imported_file.artist == "Tag Artist" + assert imported_file.album == "Tag Album" + + def test_tags_not_restored(self): + with self.configure_plugin({"auto": True}): + self.run_asis_importer(write=False) + + for item in self.lib.items(): + imported_file = MediaFile(os.path.join(item.path)) + assert imported_file.artist is None + assert imported_file.album is None diff --git a/test/plugins/test_smartplaylist.py b/test/plugins/test_smartplaylist.py index ade745c17..8ec2c74ce 100644 --- a/test/plugins/test_smartplaylist.py +++ b/test/plugins/test_smartplaylist.py @@ -13,7 +13,8 @@ # included in all copies or substantial portions of the Software. -from os import fsdecode, path, remove +from os import path, remove +from pathlib import Path from shutil import rmtree from tempfile import mkdtemp from unittest.mock import MagicMock, Mock, PropertyMock @@ -21,20 +22,19 @@ from unittest.mock import MagicMock, Mock, PropertyMock import pytest from beets import config -from beets.dbcore import OrQuery from beets.dbcore.query import FixedFieldSort, MultipleSort, NullSort from beets.library import Album, Item, parse_query_string from beets.test.helper import BeetsTestCase, PluginTestCase from beets.ui import UserError -from beets.util import CHAR_REPLACE, bytestring_path, syspath +from beets.util import CHAR_REPLACE, syspath from beetsplug.smartplaylist import SmartPlaylistPlugin class SmartPlaylistTest(BeetsTestCase): def test_build_queries(self): spl = SmartPlaylistPlugin() - assert spl._matched_playlists is None - assert spl._unmatched_playlists is None + assert spl._matched_playlists == set() + assert spl._unmatched_playlists == set() config["smartplaylist"]["playlists"].set([]) spl.build_queries() @@ -53,16 +53,17 @@ class SmartPlaylistTest(BeetsTestCase): foo_foo = parse_query_string("FOO foo", Item) baz_baz = parse_query_string("BAZ baz", Item) baz_baz2 = parse_query_string("BAZ baz", Album) - bar_bar = OrQuery( - ( - parse_query_string("BAR bar1", Album)[0], - parse_query_string("BAR bar2", Album)[0], - ) + # Multiple queries are now stored as a tuple of (query, sort) tuples + bar_queries = tuple( + [ + parse_query_string("BAR bar1", Album), + parse_query_string("BAR bar2", Album), + ] ) assert spl._unmatched_playlists == { ("foo", foo_foo, (None, None)), ("baz", baz_baz, baz_baz2), - ("bar", (None, None), (bar_bar, None)), + ("bar", (None, None), (bar_queries, None)), } def test_build_queries_with_sorts(self): @@ -85,19 +86,28 @@ class SmartPlaylistTest(BeetsTestCase): ) spl.build_queries() - sorts = {name: sort for name, (_, sort), _ in spl._unmatched_playlists} + + # Multiple queries now return a tuple of (query, sort) tuples, not combined + sorts = {} + for name, (query_data, sort), _ in spl._unmatched_playlists: + if isinstance(query_data, tuple): + # Tuple of queries - each has its own sort + sorts[name] = [s for _, s in query_data] + else: + sorts[name] = sort sort = FixedFieldSort # short cut since we're only dealing with this assert sorts["no_sort"] == NullSort() assert sorts["one_sort"] == sort("year") - assert sorts["only_empty_sorts"] is None - assert sorts["one_non_empty_sort"] == sort("year") - assert sorts["multiple_sorts"] == MultipleSort( - [sort("year"), sort("genre", False)] - ) - assert sorts["mixed"] == MultipleSort( - [sort("year"), sort("genre"), sort("id", False)] - ) + # Multiple queries store individual sorts in the tuple + assert all(isinstance(x, NullSort) for x in sorts["only_empty_sorts"]) + assert sorts["one_non_empty_sort"] == [sort("year"), NullSort()] + assert sorts["multiple_sorts"] == [sort("year"), sort("genre", False)] + assert sorts["mixed"] == [ + sort("year"), + NullSort(), + MultipleSort([sort("genre"), sort("id", False)]), + ] def test_matches(self): spl = SmartPlaylistPlugin() @@ -121,6 +131,15 @@ class SmartPlaylistTest(BeetsTestCase): assert spl.matches(i, query, a_query) assert spl.matches(a, query, a_query) + # Test with list of queries + q1 = Mock() + q1.match.return_value = False + q2 = Mock() + q2.match.side_effect = {i: True}.__getitem__ + queries_list = [(q1, None), (q2, None)] + assert spl.matches(i, queries_list, None) + assert not spl.matches(a, queries_list, None) + def test_db_changes(self): spl = SmartPlaylistPlugin() @@ -163,11 +182,11 @@ class SmartPlaylistTest(BeetsTestCase): q = Mock() a_q = Mock() pl = b"$title-my<playlist>.m3u", (q, None), (a_q, None) - spl._matched_playlists = [pl] + spl._matched_playlists = {pl} - dir = bytestring_path(mkdtemp()) + dir = mkdtemp() config["smartplaylist"]["relative_to"] = False - config["smartplaylist"]["playlist_dir"] = fsdecode(dir) + config["smartplaylist"]["playlist_dir"] = str(dir) try: spl.update_playlists(lib) except Exception: @@ -177,10 +196,9 @@ class SmartPlaylistTest(BeetsTestCase): lib.items.assert_called_once_with(q, None) lib.albums.assert_called_once_with(a_q, None) - m3u_filepath = path.join(dir, b"ta_ga_da-my_playlist_.m3u") - self.assertExists(m3u_filepath) - with open(syspath(m3u_filepath), "rb") as f: - content = f.read() + m3u_filepath = Path(dir, "ta_ga_da-my_playlist_.m3u") + assert m3u_filepath.exists() + content = m3u_filepath.read_bytes() rmtree(syspath(dir)) assert content == b"/tagada.mp3\n" @@ -206,13 +224,13 @@ class SmartPlaylistTest(BeetsTestCase): q = Mock() a_q = Mock() pl = b"$title-my<playlist>.m3u", (q, None), (a_q, None) - spl._matched_playlists = [pl] + spl._matched_playlists = {pl} - dir = bytestring_path(mkdtemp()) + dir = mkdtemp() config["smartplaylist"]["output"] = "extm3u" config["smartplaylist"]["prefix"] = "http://beets:8337/files" config["smartplaylist"]["relative_to"] = False - config["smartplaylist"]["playlist_dir"] = fsdecode(dir) + config["smartplaylist"]["playlist_dir"] = str(dir) try: spl.update_playlists(lib) except Exception: @@ -222,17 +240,15 @@ class SmartPlaylistTest(BeetsTestCase): lib.items.assert_called_once_with(q, None) lib.albums.assert_called_once_with(a_q, None) - m3u_filepath = path.join(dir, b"ta_ga_da-my_playlist_.m3u") - self.assertExists(m3u_filepath) - with open(syspath(m3u_filepath), "rb") as f: - content = f.read() + m3u_filepath = Path(dir, "ta_ga_da-my_playlist_.m3u") + assert m3u_filepath.exists() + content = m3u_filepath.read_bytes() rmtree(syspath(dir)) - assert ( - content - == b"#EXTM3U\n" - + b"#EXTINF:300,fake artist - fake title\n" - + b"http://beets:8337/files/tagada.mp3\n" + assert content == ( + b"#EXTM3U\n" + b"#EXTINF:300,fake artist - fake title\n" + b"http://beets:8337/files/tagada.mp3\n" ) def test_playlist_update_output_extm3u_fields(self): @@ -258,12 +274,12 @@ class SmartPlaylistTest(BeetsTestCase): q = Mock() a_q = Mock() pl = b"$title-my<playlist>.m3u", (q, None), (a_q, None) - spl._matched_playlists = [pl] + spl._matched_playlists = {pl} - dir = bytestring_path(mkdtemp()) + dir = mkdtemp() config["smartplaylist"]["output"] = "extm3u" config["smartplaylist"]["relative_to"] = False - config["smartplaylist"]["playlist_dir"] = fsdecode(dir) + config["smartplaylist"]["playlist_dir"] = str(dir) config["smartplaylist"]["fields"] = ["id", "genre"] try: spl.update_playlists(lib) @@ -274,17 +290,15 @@ class SmartPlaylistTest(BeetsTestCase): lib.items.assert_called_once_with(q, None) lib.albums.assert_called_once_with(a_q, None) - m3u_filepath = path.join(dir, b"ta_ga_da-my_playlist_.m3u") - self.assertExists(m3u_filepath) - with open(syspath(m3u_filepath), "rb") as f: - content = f.read() + m3u_filepath = Path(dir, "ta_ga_da-my_playlist_.m3u") + assert m3u_filepath.exists() + content = m3u_filepath.read_bytes() rmtree(syspath(dir)) - assert ( - content - == b"#EXTM3U\n" - + b'#EXTINF:300 id="456" genre="Fake%20Genre",Fake Artist - fake Title\n' - + b"/tagada.mp3\n" + assert content == ( + b"#EXTM3U\n" + b'#EXTINF:300 id="456" genre="Fake%20Genre",Fake Artist - fake Title\n' + b"/tagada.mp3\n" ) def test_playlist_update_uri_format(self): @@ -305,12 +319,12 @@ class SmartPlaylistTest(BeetsTestCase): q = Mock() a_q = Mock() pl = b"$title-my<playlist>.m3u", (q, None), (a_q, None) - spl._matched_playlists = [pl] + spl._matched_playlists = {pl} - dir = bytestring_path(mkdtemp()) + dir = mkdtemp() tpl = "http://beets:8337/item/$id/file" config["smartplaylist"]["uri_format"] = tpl - config["smartplaylist"]["playlist_dir"] = fsdecode(dir) + config["smartplaylist"]["playlist_dir"] = dir # The following options should be ignored when uri_format is set config["smartplaylist"]["relative_to"] = "/data" config["smartplaylist"]["prefix"] = "/prefix" @@ -324,14 +338,125 @@ class SmartPlaylistTest(BeetsTestCase): lib.items.assert_called_once_with(q, None) lib.albums.assert_called_once_with(a_q, None) - m3u_filepath = path.join(dir, b"ta_ga_da-my_playlist_.m3u") - self.assertExists(m3u_filepath) - with open(syspath(m3u_filepath), "rb") as f: - content = f.read() + m3u_filepath = Path(dir, "ta_ga_da-my_playlist_.m3u") + assert m3u_filepath.exists() + content = m3u_filepath.read_bytes() rmtree(syspath(dir)) assert content == b"http://beets:8337/item/3/file\n" + def test_playlist_update_multiple_queries_preserve_order(self): + """Test that multiple queries preserve their order in the playlist.""" + spl = SmartPlaylistPlugin() + + # Create three mock items + i1 = Mock(path=b"/item1.mp3", id=1) + i1.evaluate_template.return_value = "ordered.m3u" + i2 = Mock(path=b"/item2.mp3", id=2) + i2.evaluate_template.return_value = "ordered.m3u" + i3 = Mock(path=b"/item3.mp3", id=3) + i3.evaluate_template.return_value = "ordered.m3u" + + lib = Mock() + lib.replacements = CHAR_REPLACE + lib.albums.return_value = [] + + # Set up lib.items to return different items for different queries + q1 = Mock() + q2 = Mock() + q3 = Mock() + + def items_side_effect(query, sort): + if query == q1: + return [i1] + elif query == q2: + return [i2] + elif query == q3: + return [i3] + return [] + + lib.items.side_effect = items_side_effect + + # Create playlist with multiple queries (stored as tuple) + queries_and_sorts = ((q1, None), (q2, None), (q3, None)) + pl = "ordered.m3u", (queries_and_sorts, None), (None, None) + spl._matched_playlists = {pl} + + dir = mkdtemp() + config["smartplaylist"]["relative_to"] = False + config["smartplaylist"]["playlist_dir"] = str(dir) + try: + spl.update_playlists(lib) + except Exception: + rmtree(syspath(dir)) + raise + + # Verify that lib.items was called with queries in the correct order + assert lib.items.call_count == 3 + lib.items.assert_any_call(q1, None) + lib.items.assert_any_call(q2, None) + lib.items.assert_any_call(q3, None) + + m3u_filepath = Path(dir, "ordered.m3u") + assert m3u_filepath.exists() + content = m3u_filepath.read_bytes() + rmtree(syspath(dir)) + + # Items should be in order: i1, i2, i3 + assert content == b"/item1.mp3\n/item2.mp3\n/item3.mp3\n" + + def test_playlist_update_multiple_queries_no_duplicates(self): + """Test that items matching multiple queries only appear once.""" + spl = SmartPlaylistPlugin() + + # Create two mock items + i1 = Mock(path=b"/item1.mp3", id=1) + i1.evaluate_template.return_value = "dedup.m3u" + i2 = Mock(path=b"/item2.mp3", id=2) + i2.evaluate_template.return_value = "dedup.m3u" + + lib = Mock() + lib.replacements = CHAR_REPLACE + lib.albums.return_value = [] + + # Set up lib.items so both queries return overlapping items + q1 = Mock() + q2 = Mock() + + def items_side_effect(query, sort): + if query == q1: + return [i1, i2] # Both items match q1 + elif query == q2: + return [i2] # Only i2 matches q2 + return [] + + lib.items.side_effect = items_side_effect + + # Create playlist with multiple queries (stored as tuple) + queries_and_sorts = ((q1, None), (q2, None)) + pl = "dedup.m3u", (queries_and_sorts, None), (None, None) + spl._matched_playlists = {pl} + + dir = mkdtemp() + config["smartplaylist"]["relative_to"] = False + config["smartplaylist"]["playlist_dir"] = str(dir) + try: + spl.update_playlists(lib) + except Exception: + rmtree(syspath(dir)) + raise + + m3u_filepath = Path(dir, "dedup.m3u") + assert m3u_filepath.exists() + content = m3u_filepath.read_bytes() + rmtree(syspath(dir)) + + # i2 should only appear once even though it matches both queries + # Order should be: i1 (from q1), i2 (from q1, skipped in q2) + assert content == b"/item1.mp3\n/item2.mp3\n" + # Verify i2 is not duplicated + assert content.count(b"/item2.mp3") == 1 + class SmartPlaylistCLITest(PluginTestCase): plugin = "smartplaylist" @@ -346,22 +471,20 @@ class SmartPlaylistCLITest(PluginTestCase): {"name": "all.m3u", "query": ""}, ] ) - config["smartplaylist"]["playlist_dir"].set(fsdecode(self.temp_dir)) + config["smartplaylist"]["playlist_dir"].set(str(self.temp_dir_path)) def test_splupdate(self): with pytest.raises(UserError): self.run_with_output("splupdate", "tagada") self.run_with_output("splupdate", "my_playlist") - m3u_path = path.join(self.temp_dir, b"my_playlist.m3u") - self.assertExists(m3u_path) - with open(syspath(m3u_path), "rb") as f: - assert f.read() == self.item.path + b"\n" + m3u_path = self.temp_dir_path / "my_playlist.m3u" + assert m3u_path.exists() + assert m3u_path.read_bytes() == self.item.path + b"\n" remove(syspath(m3u_path)) self.run_with_output("splupdate", "my_playlist.m3u") - with open(syspath(m3u_path), "rb") as f: - assert f.read() == self.item.path + b"\n" + assert m3u_path.read_bytes() == self.item.path + b"\n" remove(syspath(m3u_path)) self.run_with_output("splupdate") diff --git a/test/plugins/test_spotify.py b/test/plugins/test_spotify.py index a2336df10..6e322ca0b 100644 --- a/test/plugins/test_spotify.py +++ b/test/plugins/test_spotify.py @@ -7,7 +7,7 @@ import responses from beets.library import Item from beets.test import _common -from beets.test.helper import BeetsTestCase +from beets.test.helper import PluginTestCase from beetsplug import spotify @@ -23,10 +23,11 @@ def _params(url): return parse_qs(urlparse(url).query) -class SpotifyPluginTest(BeetsTestCase): +class SpotifyPluginTest(PluginTestCase): + plugin = "spotify" + @responses.activate def setUp(self): - super().setUp() responses.add( responses.POST, spotify.SpotifyPlugin.oauth_token_url, @@ -39,6 +40,7 @@ class SpotifyPluginTest(BeetsTestCase): "scope": "", }, ) + super().setUp() self.spotify = spotify.SpotifyPlugin() opts = ArgumentsMock("list", False) self.spotify._parse_opts(opts) @@ -80,8 +82,8 @@ class SpotifyPluginTest(BeetsTestCase): params = _params(responses.calls[0].request.url) query = params["q"][0] assert "duifhjslkef" in query - assert "artist:ujydfsuihse" in query - assert "album:lkajsdflakjsd" in query + assert "artist:'ujydfsuihse'" in query + assert "album:'lkajsdflakjsd'" in query assert params["type"] == ["track"] @responses.activate @@ -115,8 +117,8 @@ class SpotifyPluginTest(BeetsTestCase): params = _params(responses.calls[0].request.url) query = params["q"][0] assert "Happy" in query - assert "artist:Pharrell Williams" in query - assert "album:Despicable Me 2" in query + assert "artist:'Pharrell Williams'" in query + assert "album:'Despicable Me 2'" in query assert params["type"] == ["track"] @responses.activate @@ -130,7 +132,7 @@ class SpotifyPluginTest(BeetsTestCase): responses.add( responses.GET, - spotify.SpotifyPlugin.track_url + "6NPVjNh8Jhru9xOmyQigds", + f"{spotify.SpotifyPlugin.track_url}6NPVjNh8Jhru9xOmyQigds", body=response_body, status=200, content_type="application/json", @@ -143,7 +145,7 @@ class SpotifyPluginTest(BeetsTestCase): responses.add( responses.GET, - spotify.SpotifyPlugin.album_url + "5l3zEmMrOhOzG8d8s83GOL", + f"{spotify.SpotifyPlugin.album_url}5l3zEmMrOhOzG8d8s83GOL", body=response_body, status=200, content_type="application/json", @@ -176,3 +178,132 @@ class SpotifyPluginTest(BeetsTestCase): results = self.spotify._match_library_tracks(self.lib, "Happy") assert 1 == len(results) assert "6NPVjNh8Jhru9xOmyQigds" == results[0]["id"] + + @responses.activate + def test_japanese_track(self): + """Ensure non-ASCII characters remain unchanged in search queries""" + + # Path to the mock JSON file for the Japanese track + json_file = os.path.join( + _common.RSRC, b"spotify", b"japanese_track_request.json" + ) + + # Load the mock JSON response + with open(json_file, "rb") as f: + response_body = f.read() + + # Mock Spotify Search API response + responses.add( + responses.GET, + spotify.SpotifyPlugin.search_url, + body=response_body, + status=200, + content_type="application/json", + ) + + # Create a mock item with Japanese metadata + item = Item( + mb_trackid="56789", + album="盗作", + albumartist="ヨルシカ", + title="思想犯", + length=10, + ) + item.add(self.lib) + + # Search without ascii encoding + + with self.configure_plugin( + { + "search_query_ascii": False, + } + ): + assert self.spotify.config["search_query_ascii"].get() is False + # Call the method to match library tracks + results = self.spotify._match_library_tracks(self.lib, item.title) + + # Assertions to verify results + assert results is not None + assert 1 == len(results) + assert results[0]["name"] == item.title + assert results[0]["artists"][0]["name"] == item.albumartist + assert results[0]["album"]["name"] == item.album + + # Verify search query parameters + params = _params(responses.calls[0].request.url) + query = params["q"][0] + assert item.title in query + assert f"artist:'{item.albumartist}'" in query + assert f"album:'{item.album}'" in query + assert not query.isascii() + + # Is not found in the library if ascii encoding is enabled + with self.configure_plugin( + { + "search_query_ascii": True, + } + ): + assert self.spotify.config["search_query_ascii"].get() is True + results = self.spotify._match_library_tracks(self.lib, item.title) + params = _params(responses.calls[1].request.url) + query = params["q"][0] + + assert query.isascii() + + @responses.activate + def test_multiartist_album_and_track(self): + """Tests if plugin is able to map multiple artists in an album and + track info correctly""" + + # Mock the Spotify 'Get Album' call + json_file = os.path.join( + _common.RSRC, b"spotify", b"multiartist_album.json" + ) + with open(json_file, "rb") as f: + album_response_body = f.read() + + responses.add( + responses.GET, + f"{spotify.SpotifyPlugin.album_url}0yhKyyjyKXWUieJ4w1IAEa", + body=album_response_body, + status=200, + content_type="application/json", + ) + + # Mock the Spotify 'Get Track' call + json_file = os.path.join( + _common.RSRC, b"spotify", b"multiartist_track.json" + ) + with open(json_file, "rb") as f: + track_response_body = f.read() + + responses.add( + responses.GET, + f"{spotify.SpotifyPlugin.track_url}6sjZfVJworBX6TqyjkxIJ1", + body=track_response_body, + status=200, + content_type="application/json", + ) + + album_info = self.spotify.album_for_id("0yhKyyjyKXWUieJ4w1IAEa") + assert album_info is not None + assert album_info.artist == "Project Skylate, Sugar Shrill" + assert album_info.artists == ["Project Skylate", "Sugar Shrill"] + assert album_info.artist_id == "6m8MRXIVKb6wQaPlBIDMr1" + assert album_info.artists_ids == [ + "6m8MRXIVKb6wQaPlBIDMr1", + "4kkAIoQmNT5xEoNH5BuQLe", + ] + + assert len(album_info.tracks) == 1 + assert album_info.tracks[0].artist == "Foo, Bar" + assert album_info.tracks[0].artists == ["Foo", "Bar"] + assert album_info.tracks[0].artist_id == "12345" + assert album_info.tracks[0].artists_ids == ["12345", "67890"] + + track_info = self.spotify.track_for_id("6sjZfVJworBX6TqyjkxIJ1") + assert track_info is not None + assert track_info.artist == "Foo, Bar" + assert track_info.artists == ["Foo", "Bar"] + assert track_info.artist_id == "12345" + assert track_info.artists_ids == ["12345", "67890"] diff --git a/test/plugins/test_subsonicupdate.py b/test/plugins/test_subsonicupdate.py index 891f75cb7..183c2bd67 100644 --- a/test/plugins/test_subsonicupdate.py +++ b/test/plugins/test_subsonicupdate.py @@ -1,11 +1,11 @@ """Tests for the 'subsonic' plugin.""" +import unittest from urllib.parse import parse_qs, urlparse import responses from beets import config -from beets.test.helper import BeetsTestCase from beetsplug import subsonicupdate @@ -24,7 +24,7 @@ def _params(url): return parse_qs(urlparse(url).query) -class SubsonicPluginTest(BeetsTestCase): +class SubsonicPluginTest(unittest.TestCase): """Test class for subsonicupdate.""" @responses.activate diff --git a/test/plugins/test_substitute.py b/test/plugins/test_substitute.py index 48014e231..fc3789c0b 100644 --- a/test/plugins/test_substitute.py +++ b/test/plugins/test_substitute.py @@ -55,8 +55,10 @@ class SubstitutePluginTest(PluginTestCase): [ ("King Creosote & Jon Hopkins", "King Creosote"), ( - "Michael Hurley, The Holy Modal Rounders, Jeffrey Frederick & " - + "The Clamtones", + ( + "Michael Hurley, The Holy Modal Rounders, Jeffrey" + " Frederick & The Clamtones" + ), "Michael Hurley", ), ("James Yorkston and the Athletes", "James Yorkston"), diff --git a/test/plugins/test_the.py b/test/plugins/test_the.py index bf073301b..c8f919de2 100644 --- a/test/plugins/test_the.py +++ b/test/plugins/test_the.py @@ -1,11 +1,12 @@ """Tests for the 'the' plugin""" +import unittest + from beets import config -from beets.test.helper import BeetsTestCase from beetsplug.the import FORMAT, PATTERN_A, PATTERN_THE, ThePlugin -class ThePluginTest(BeetsTestCase): +class ThePluginTest(unittest.TestCase): def test_unthe_with_default_patterns(self): assert ThePlugin().unthe("", PATTERN_THE) == "" assert ( diff --git a/test/plugins/test_thumbnails.py b/test/plugins/test_thumbnails.py index bd3e22714..fadac34c2 100644 --- a/test/plugins/test_thumbnails.py +++ b/test/plugins/test_thumbnails.py @@ -232,8 +232,7 @@ class ThumbnailsTest(BeetsTestCase): ) @patch("beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok", Mock()) - @patch("beetsplug.thumbnails.decargs") - def test_invokations(self, mock_decargs): + def test_invokations(self): plugin = ThumbnailsPlugin() plugin.process_album = Mock() album = Mock() @@ -243,7 +242,6 @@ class ThumbnailsTest(BeetsTestCase): album2 = Mock() lib.albums.return_value = [album, album2] plugin.process_query(lib, Mock(), None) - lib.albums.assert_called_once_with(mock_decargs.return_value) plugin.process_album.assert_has_calls( [call(album), call(album2)], any_order=True ) diff --git a/test/plugins/test_titlecase.py b/test/plugins/test_titlecase.py new file mode 100644 index 000000000..c25661bbf --- /dev/null +++ b/test/plugins/test_titlecase.py @@ -0,0 +1,426 @@ +# This file is part of beets. +# Copyright 2025, Henry Oberholtzer +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Tests for the 'titlecase' plugin""" + +from unittest.mock import patch + +from beets.autotag.hooks import AlbumInfo, TrackInfo +from beets.importer import ImportSession, ImportTask +from beets.library import Item +from beets.test.helper import PluginTestCase +from beetsplug.titlecase import TitlecasePlugin + +titlecase_fields_testcases = [ + ( + { + "fields": [ + "artist", + "albumartist", + "title", + "album", + "mb_albumd", + "year", + ], + "force_lowercase": True, + }, + Item( + artist="OPHIDIAN", + albumartist="ophiDIAN", + format="CD", + year=2003, + album="BLACKBOX", + title="KhAmElEoN", + ), + Item( + artist="Ophidian", + albumartist="Ophidian", + format="CD", + year=2003, + album="Blackbox", + title="Khameleon", + ), + ), +] + + +class TestTitlecasePlugin(PluginTestCase): + plugin = "titlecase" + preload_plugin = False + + def test_auto(self): + """Ensure automatic processing gets assigned""" + with self.configure_plugin({"auto": True, "after_choice": True}): + assert callable(TitlecasePlugin().import_stages[0]) + with self.configure_plugin({"auto": False, "after_choice": False}): + assert len(TitlecasePlugin().import_stages) == 0 + with self.configure_plugin({"auto": False, "after_choice": True}): + assert len(TitlecasePlugin().import_stages) == 0 + + def test_basic_titlecase(self): + """Check that default behavior is as expected.""" + testcases = [ + ("a", "A"), + ("PENDULUM", "Pendulum"), + ("Aaron-carl", "Aaron-Carl"), + ("LTJ bukem", "LTJ Bukem"), + ("(original mix)", "(Original Mix)"), + ("ALL CAPS TITLE", "All Caps Title"), + ] + for testcase in testcases: + given, expected = testcase + assert TitlecasePlugin().titlecase(given) == expected + + def test_small_first_last(self): + """Check the behavior for supporting small first last""" + testcases = [ + (True, "In a Silent Way", "In a Silent Way"), + (False, "In a Silent Way", "in a Silent Way"), + ] + for testcase in testcases: + sfl, given, expected = testcase + cfg = {"small_first_last": sfl} + with self.configure_plugin(cfg): + assert TitlecasePlugin().titlecase(given) == expected + + def test_preserve(self): + """Test using given strings to preserve case""" + preserve_list = [ + "easyFun", + "A.D.O.R", + "D'Angelo", + "ABBA", + "LaTeX", + "O.R.B", + "PinkPantheress", + "THE PSYCHIC ED RUSH", + "LTJ Bukem", + ] + for word in preserve_list: + with self.configure_plugin({"preserve": preserve_list}): + assert TitlecasePlugin().titlecase(word.upper()) == word + assert TitlecasePlugin().titlecase(word.lower()) == word + + def test_separators(self): + testcases = [ + ([], "it / a / in / of / to / the", "It / a / in / of / to / The"), + (["/"], "it / the test", "It / The Test"), + ( + ["/"], + "it / a / in / of / to / the", + "It / A / In / Of / To / The", + ), + (["/"], "//it/a/in/of/to/the", "//It/A/In/Of/To/The"), + ( + ["/", ";", "|"], + "it ; a / in | of / to | the", + "It ; A / In | Of / To | The", + ), + ] + for testcase in testcases: + separators, given, expected = testcase + with self.configure_plugin({"separators": separators}): + assert TitlecasePlugin().titlecase(given) == expected + + def test_all_caps(self): + testcases = [ + (True, "Unaffected", "Unaffected"), + (True, "RBMK1000", "RBMK1000"), + (False, "RBMK1000", "Rbmk1000"), + (True, "P A R I S!", "P A R I S!"), + (True, "pillow dub...", "Pillow Dub..."), + (False, "P A R I S!", "P a R I S!"), + ] + for testcase in testcases: + all_caps, given, expected = testcase + with self.configure_plugin({"all_caps": all_caps}): + assert TitlecasePlugin().titlecase(given) == expected + + def test_all_lowercase(self): + testcases = [ + (True, "Unaffected", "Unaffected"), + (True, "RBMK1000", "Rbmk1000"), + (True, "pillow dub...", "pillow dub..."), + (False, "pillow dub...", "Pillow Dub..."), + ] + for testcase in testcases: + all_lowercase, given, expected = testcase + with self.configure_plugin({"all_lowercase": all_lowercase}): + assert TitlecasePlugin().titlecase(given) == expected + + def test_received_info_handler(self): + testcases = [ + ( + TrackInfo( + album="test album", + artist_credit="test artist credit", + artists=["artist one", "artist two"], + ), + TrackInfo( + album="Test Album", + artist_credit="Test Artist Credit", + artists=["Artist One", "Artist Two"], + ), + ), + ( + AlbumInfo( + tracks=[ + TrackInfo( + album="test album", + artist_credit="test artist credit", + artists=["artist one", "artist two"], + ) + ], + album="test album", + artist_credit="test artist credit", + artists=["artist one", "artist two"], + ), + AlbumInfo( + tracks=[ + TrackInfo( + album="Test Album", + artist_credit="Test Artist Credit", + artists=["Artist One", "Artist Two"], + ) + ], + album="Test Album", + artist_credit="Test Artist Credit", + artists=["Artist One", "Artist Two"], + ), + ), + ] + cfg = {"fields": ["album", "artist_credit", "artists"]} + for testcase in testcases: + given, expected = testcase + with self.configure_plugin(cfg): + TitlecasePlugin().received_info_handler(given) + assert given == expected + + def test_titlecase_fields(self): + testcases = [ + # Test with preserve, replace, and mb_albumid + # Test with the_artist + ( + { + "preserve": ["D'Angelo"], + "replace": [("’", "'")], + "fields": ["artist", "albumartist", "mb_albumid"], + }, + Item( + artist="d’angelo and the vanguard", + mb_albumid="ab140e13-7b36-402a-a528-b69e3dee38a8", + albumartist="d’angelo", + format="CD", + album="the black messiah", + title="Till It's Done (Tutu)", + ), + Item( + artist="D'Angelo and The Vanguard", + mb_albumid="Ab140e13-7b36-402a-A528-B69e3dee38a8", + albumartist="D'Angelo", + format="CD", + album="the black messiah", + title="Till It's Done (Tutu)", + ), + ), + # Test with force_lowercase, preserve, and an incorrect field + ( + { + "force_lowercase": True, + "fields": [ + "artist", + "albumartist", + "format", + "title", + "year", + "label", + "format", + "INCORRECT_FIELD", + ], + "preserve": ["CD"], + }, + Item( + artist="OPHIDIAN", + albumartist="OphiDIAN", + format="cd", + year=2003, + album="BLACKBOX", + title="KhAmElEoN", + label="enzyme records", + ), + Item( + artist="Ophidian", + albumartist="Ophidian", + format="CD", + year=2003, + album="Blackbox", + title="Khameleon", + label="Enzyme Records", + ), + ), + # Test with no changes + ( + { + "fields": [ + "artist", + "artists", + "albumartist", + "format", + "title", + "year", + "label", + "format", + "INCORRECT_FIELD", + ], + "preserve": ["CD"], + }, + Item( + artist="Ophidian", + artists=["Ophidian"], + albumartist="Ophidian", + format="CD", + year=2003, + album="Blackbox", + title="Khameleon", + label="Enzyme Records", + ), + Item( + artist="Ophidian", + artists=["Ophidian"], + albumartist="Ophidian", + format="CD", + year=2003, + album="Blackbox", + title="Khameleon", + label="Enzyme Records", + ), + ), + # Test with the_artist disabled + ( + { + "the_artist": False, + "fields": [ + "artist", + "artists_sort", + ], + }, + Item( + artists_sort=["b-52s, the"], + artist="a day in the park", + ), + Item( + artists_sort=["B-52s, The"], + artist="A Day in the Park", + ), + ), + # Test to make sure preserve and the_artist + # dont target the middle of sentences + # show that The artist applies to any field + # with artist mentioned + ( + { + "preserve": ["PANTHER"], + "fields": ["artist", "artists", "artists_ids"], + }, + Item( + artist="pinkpantheress", + artists=["pinkpantheress", "artist_two"], + artists_ids=["the the", "the the"], + ), + Item( + artist="Pinkpantheress", + artists=["Pinkpantheress", "Artist_two"], + artists_ids=["The The", "The The"], + ), + ), + ] + for testcase in testcases: + cfg, given, expected = testcase + with self.configure_plugin(cfg): + TitlecasePlugin().titlecase_fields(given) + assert given.artist == expected.artist + assert given.artists == expected.artists + assert given.artists_sort == expected.artists_sort + assert given.albumartist == expected.albumartist + assert given.artists_ids == expected.artists_ids + assert given.format == expected.format + assert given.year == expected.year + assert given.title == expected.title + assert given.label == expected.label + + def test_cli_write(self): + given = Item( + album="retrodelica 2: back 2 the future", + artist="blue planet corporation", + title="generator", + ) + expected = Item( + album="Retrodelica 2: Back 2 the Future", + artist="Blue Planet Corporation", + title="Generator", + ) + cfg = {"fields": ["album", "artist", "title"]} + with self.configure_plugin(cfg): + given.add(self.lib) + self.run_command("titlecase") + assert self.lib.items().get().artist == expected.artist + assert self.lib.items().get().album == expected.album + assert self.lib.items().get().title == expected.title + self.lib.items().get().remove() + + def test_cli_no_write(self): + given = Item( + album="retrodelica 2: back 2 the future", + artist="blue planet corporation", + title="generator", + ) + expected = Item( + album="retrodelica 2: back 2 the future", + artist="blue planet corporation", + title="generator", + ) + cfg = {"fields": ["album", "artist", "title"]} + with self.configure_plugin(cfg): + given.add(self.lib) + self.run_command("-p", "titlecase") + assert self.lib.items().get().artist == expected.artist + assert self.lib.items().get().album == expected.album + assert self.lib.items().get().title == expected.title + self.lib.items().get().remove() + + def test_imported(self): + given = Item( + album="retrodelica 2: back 2 the future", + artist="blue planet corporation", + title="generator", + ) + expected = Item( + album="Retrodelica 2: Back 2 the Future", + artist="Blue Planet Corporation", + title="Generator", + ) + p = patch("beets.importer.ImportTask.imported_items", lambda x: [given]) + p.start() + with self.configure_plugin({"fields": ["album", "artist", "title"]}): + import_session = ImportSession( + self.lib, loghandler=None, paths=None, query=None + ) + import_task = ImportTask(toppath=None, paths=None, items=[given]) + TitlecasePlugin().imported(import_session, import_task) + import_task.add(self.lib) + item = self.lib.items().get() + assert item.artist == expected.artist + assert item.album == expected.album + assert item.title == expected.title + p.stop() diff --git a/test/plugins/test_types_plugin.py b/test/plugins/test_types_plugin.py index b41e9bb18..41807b80d 100644 --- a/test/plugins/test_types_plugin.py +++ b/test/plugins/test_types_plugin.py @@ -134,7 +134,7 @@ class TypesPluginTest(PluginTestCase): def test_unknown_type_error(self): self.config["types"] = {"flex": "unkown type"} with pytest.raises(ConfigValueError): - self.run_command("ls") + self.add_item(flex="test") def test_template_if_def(self): # Tests for a subtle bug when using %ifdef in templates along with diff --git a/test/plugins/test_web.py b/test/plugins/test_web.py index 2ad07bbe5..4a532e02c 100644 --- a/test/plugins/test_web.py +++ b/test/plugins/test_web.py @@ -118,6 +118,13 @@ class WebPluginTest(ItemInDBTestCase): assert response.status_code == 200 assert len(res_json["items"]) == 3 + def test_get_unique_item_artist(self): + response = self.client.get("/item/values/artist") + res_json = json.loads(response.data.decode("utf-8")) + + assert response.status_code == 200 + assert res_json["values"] == ["", "AAA Singers"] + def test_get_single_item_by_id(self): response = self.client.get("/item/1") res_json = json.loads(response.data.decode("utf-8")) @@ -142,7 +149,7 @@ class WebPluginTest(ItemInDBTestCase): def test_get_single_item_by_path(self): data_path = os.path.join(_common.RSRC, b"full.mp3") self.lib.add(Item.from_path(data_path)) - response = self.client.get("/item/path/" + data_path.decode("utf-8")) + response = self.client.get(f"/item/path/{data_path.decode('utf-8')}") res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 @@ -152,12 +159,11 @@ class WebPluginTest(ItemInDBTestCase): data_path = os.path.join(_common.RSRC, b"full.mp3") # data_path points to a valid file, but we have not added the file # to the library. - response = self.client.get("/item/path/" + data_path.decode("utf-8")) + response = self.client.get(f"/item/path/{data_path.decode('utf-8')}") assert response.status_code == 404 def test_get_item_empty_query(self): - """testing item query: <empty>""" response = self.client.get("/item/query/") res_json = json.loads(response.data.decode("utf-8")) @@ -165,7 +171,6 @@ class WebPluginTest(ItemInDBTestCase): assert len(res_json["items"]) == 3 def test_get_simple_item_query(self): - """testing item query: another""" response = self.client.get("/item/query/another") res_json = json.loads(response.data.decode("utf-8")) @@ -174,8 +179,7 @@ class WebPluginTest(ItemInDBTestCase): assert res_json["results"][0]["title"] == "another title" def test_query_item_string(self): - """testing item query: testattr:ABC""" - response = self.client.get("/item/query/testattr%3aABC") + response = self.client.get("/item/query/testattr%3aABC") # testattr:ABC res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 @@ -183,8 +187,9 @@ class WebPluginTest(ItemInDBTestCase): assert res_json["results"][0]["title"] == "and a third" def test_query_item_regex(self): - """testing item query: testattr::[A-C]+""" - response = self.client.get("/item/query/testattr%3a%3a[A-C]%2b") + response = self.client.get( + "/item/query/testattr%3a%3a[A-C]%2b" + ) # testattr::[A-C]+ res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 @@ -192,8 +197,9 @@ class WebPluginTest(ItemInDBTestCase): assert res_json["results"][0]["title"] == "and a third" def test_query_item_regex_backslash(self): - # """ testing item query: testattr::\w+ """ - response = self.client.get("/item/query/testattr%3a%3a%5cw%2b") + response = self.client.get( + "/item/query/testattr%3a%3a%5cw%2b" + ) # testattr::\w+ res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 @@ -201,7 +207,6 @@ class WebPluginTest(ItemInDBTestCase): assert res_json["results"][0]["title"] == "and a third" def test_query_item_path(self): - # """ testing item query: path:\somewhere\a """ """Note: path queries are special: the query item must match the path from the root all the way to a directory, so this matches 1 item""" """ Note: filesystem separators in the query must be '\' """ @@ -267,8 +272,9 @@ class WebPluginTest(ItemInDBTestCase): assert response_track_titles == {"title", "and a third"} def test_query_album_string(self): - """testing query: albumtest:xy""" - response = self.client.get("/album/query/albumtest%3axy") + response = self.client.get( + "/album/query/albumtest%3axy" + ) # albumtest:xy res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 @@ -276,8 +282,9 @@ class WebPluginTest(ItemInDBTestCase): assert res_json["results"][0]["album"] == "album" def test_query_album_artpath_regex(self): - """testing query: artpath::art_""" - response = self.client.get("/album/query/artpath%3a%3aart_") + response = self.client.get( + "/album/query/artpath%3a%3aart_" + ) # artpath::art_ res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 @@ -285,8 +292,9 @@ class WebPluginTest(ItemInDBTestCase): assert res_json["results"][0]["album"] == "other album" def test_query_album_regex_backslash(self): - # """ testing query: albumtest::\w+ """ - response = self.client.get("/album/query/albumtest%3a%3a%5cw%2b") + response = self.client.get( + "/album/query/albumtest%3a%3a%5cw%2b" + ) # albumtest::\w+ res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 @@ -310,18 +318,18 @@ class WebPluginTest(ItemInDBTestCase): ) # Check we can find the temporary item we just created - response = self.client.get("/item/" + str(item_id)) + response = self.client.get(f"/item/{item_id}") res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 assert res_json["id"] == item_id # Delete item by id - response = self.client.delete("/item/" + str(item_id)) + response = self.client.delete(f"/item/{item_id}") res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 # Check the item has gone - response = self.client.get("/item/" + str(item_id)) + response = self.client.get(f"/item/{item_id}") assert response.status_code == 404 # Note: if this fails, the item may still be around # and may cause other tests to fail @@ -336,18 +344,18 @@ class WebPluginTest(ItemInDBTestCase): item_id = self.lib.add(Item.from_path(ipath)) # Check we can find the temporary item we just created - response = self.client.get("/item/" + str(item_id)) + response = self.client.get(f"/item/{item_id}") res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 assert res_json["id"] == item_id # Delete item by id, without deleting file - response = self.client.delete("/item/" + str(item_id)) + response = self.client.delete(f"/item/{item_id}") res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 # Check the item has gone - response = self.client.get("/item/" + str(item_id)) + response = self.client.get(f"/item/{item_id}") assert response.status_code == 404 # Check the file has not gone @@ -364,18 +372,18 @@ class WebPluginTest(ItemInDBTestCase): item_id = self.lib.add(Item.from_path(ipath)) # Check we can find the temporary item we just created - response = self.client.get("/item/" + str(item_id)) + response = self.client.get(f"/item/{item_id}") res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 assert res_json["id"] == item_id # Delete item by id, with file - response = self.client.delete("/item/" + str(item_id) + "?delete") + response = self.client.delete(f"/item/{item_id}?delete") res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 # Check the item has gone - response = self.client.get("/item/" + str(item_id)) + response = self.client.get(f"/item/{item_id}") assert response.status_code == 404 # Check the file has gone @@ -427,17 +435,17 @@ class WebPluginTest(ItemInDBTestCase): ) # Check we can find the temporary item we just created - response = self.client.get("/item/" + str(item_id)) + response = self.client.get(f"/item/{item_id}") res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 assert res_json["id"] == item_id # Try to delete item by id - response = self.client.delete("/item/" + str(item_id)) + response = self.client.delete(f"/item/{item_id}") assert response.status_code == 405 # Check the item has not gone - response = self.client.get("/item/" + str(item_id)) + response = self.client.get(f"/item/{item_id}") res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 assert res_json["id"] == item_id @@ -481,18 +489,18 @@ class WebPluginTest(ItemInDBTestCase): ) # Check we can find the temporary album we just created - response = self.client.get("/album/" + str(album_id)) + response = self.client.get(f"/album/{album_id}") res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 assert res_json["id"] == album_id # Delete album by id - response = self.client.delete("/album/" + str(album_id)) + response = self.client.delete(f"/album/{album_id}") res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 # Check the album has gone - response = self.client.get("/album/" + str(album_id)) + response = self.client.get(f"/album/{album_id}") assert response.status_code == 404 # Note: if this fails, the album may still be around # and may cause other tests to fail @@ -543,17 +551,17 @@ class WebPluginTest(ItemInDBTestCase): ) # Check we can find the temporary album we just created - response = self.client.get("/album/" + str(album_id)) + response = self.client.get(f"/album/{album_id}") res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 assert res_json["id"] == album_id # Try to delete album by id - response = self.client.delete("/album/" + str(album_id)) + response = self.client.delete(f"/album/{album_id}") assert response.status_code == 405 # Check the item has not gone - response = self.client.get("/album/" + str(album_id)) + response = self.client.get(f"/album/{album_id}") res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 assert res_json["id"] == album_id @@ -603,7 +611,7 @@ class WebPluginTest(ItemInDBTestCase): ) # Check we can find the temporary item we just created - response = self.client.get("/item/" + str(item_id)) + response = self.client.get(f"/item/{item_id}") res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 assert res_json["id"] == item_id @@ -613,7 +621,7 @@ class WebPluginTest(ItemInDBTestCase): # Patch item by id # patch_json = json.JSONEncoder().encode({"test_patch_f2": "New"}]}) response = self.client.patch( - "/item/" + str(item_id), json={"test_patch_f2": "New"} + f"/item/{item_id}", json={"test_patch_f2": "New"} ) res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 @@ -622,7 +630,7 @@ class WebPluginTest(ItemInDBTestCase): assert res_json["test_patch_f2"] == "New" # Check the update has really worked - response = self.client.get("/item/" + str(item_id)) + response = self.client.get(f"/item/{item_id}") res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 assert res_json["id"] == item_id @@ -647,7 +655,7 @@ class WebPluginTest(ItemInDBTestCase): ) # Check we can find the temporary item we just created - response = self.client.get("/item/" + str(item_id)) + response = self.client.get(f"/item/{item_id}") res_json = json.loads(response.data.decode("utf-8")) assert response.status_code == 200 assert res_json["id"] == item_id @@ -657,7 +665,7 @@ class WebPluginTest(ItemInDBTestCase): # Patch item by id # patch_json = json.JSONEncoder().encode({"test_patch_f2": "New"}) response = self.client.patch( - "/item/" + str(item_id), json={"test_patch_f2": "New"} + f"/item/{item_id}", json={"test_patch_f2": "New"} ) assert response.status_code == 405 @@ -670,6 +678,6 @@ class WebPluginTest(ItemInDBTestCase): assert os.path.exists(ipath) item_id = self.lib.add(Item.from_path(ipath)) - response = self.client.get("/item/" + str(item_id) + "/file") + response = self.client.get(f"/item/{item_id}/file") assert response.status_code == 200 diff --git a/test/plugins/test_zero.py b/test/plugins/test_zero.py index 51913c8e0..b08bf0dca 100644 --- a/test/plugins/test_zero.py +++ b/test/plugins/test_zero.py @@ -249,6 +249,54 @@ class ZeroPluginTest(PluginTestCase): assert "id" not in z.fields_to_progs + def test_omit_single_disc_with_tags_single(self): + item = self.add_item_fixture( + disctotal=1, disc=1, comments="test comment" + ) + item.write() + with self.configure_plugin( + {"omit_single_disc": True, "fields": ["comments"]} + ): + item.write() + + mf = MediaFile(syspath(item.path)) + assert mf.comments is None + assert mf.disc == 0 + + def test_omit_single_disc_with_tags_multi(self): + item = self.add_item_fixture( + disctotal=4, disc=1, comments="test comment" + ) + item.write() + with self.configure_plugin( + {"omit_single_disc": True, "fields": ["comments"]} + ): + item.write() + + mf = MediaFile(syspath(item.path)) + assert mf.comments is None + assert mf.disc == 1 + + def test_omit_single_disc_only_change_single(self): + item = self.add_item_fixture(disctotal=1, disc=1) + item.write() + + with self.configure_plugin({"omit_single_disc": True}): + item.write() + + mf = MediaFile(syspath(item.path)) + assert mf.disc == 0 + + def test_omit_single_disc_only_change_multi(self): + item = self.add_item_fixture(disctotal=4, disc=1) + item.write() + + with self.configure_plugin({"omit_single_disc": True}): + item.write() + + mf = MediaFile(syspath(item.path)) + assert mf.disc == 1 + def test_empty_query_n_response_no_changes(self): item = self.add_item_fixture( year=2016, day=13, month=3, comments="test comment" diff --git a/test/plugins/utils/__init__.py b/test/plugins/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test/plugins/utils/test_musicbrainz.py b/test/plugins/utils/test_musicbrainz.py new file mode 100644 index 000000000..291f50eb5 --- /dev/null +++ b/test/plugins/utils/test_musicbrainz.py @@ -0,0 +1,82 @@ +from beetsplug._utils.musicbrainz import MusicBrainzAPI + + +def test_group_relations(): + raw_release = { + "id": "r1", + "relations": [ + {"target-type": "artist", "type": "vocal", "name": "A"}, + {"target-type": "url", "type": "streaming", "url": "http://s"}, + {"target-type": "url", "type": "purchase", "url": "http://p"}, + { + "target-type": "work", + "type": "performance", + "work": { + "relations": [ + { + "artist": {"name": "幾田りら"}, + "target-type": "artist", + "type": "composer", + }, + { + "target-type": "url", + "type": "lyrics", + "url": { + "resource": "https://utaten.com/lyric/tt24121002/" + }, + }, + { + "artist": {"name": "幾田りら"}, + "target-type": "artist", + "type": "lyricist", + }, + { + "target-type": "url", + "type": "lyrics", + "url": { + "resource": "https://www.uta-net.com/song/366579/" + }, + }, + ], + "title": "百花繚乱", + "type": "Song", + }, + }, + ], + } + + assert MusicBrainzAPI._group_relations(raw_release) == { + "id": "r1", + "artist-relations": [{"type": "vocal", "name": "A"}], + "url-relations": [ + {"type": "streaming", "url": "http://s"}, + {"type": "purchase", "url": "http://p"}, + ], + "work-relations": [ + { + "type": "performance", + "work": { + "artist-relations": [ + {"type": "composer", "artist": {"name": "幾田りら"}}, + {"type": "lyricist", "artist": {"name": "幾田りら"}}, + ], + "url-relations": [ + { + "type": "lyrics", + "url": { + "resource": "https://utaten.com/lyric/tt24121002/" + }, + }, + { + "type": "lyrics", + "url": { + "resource": "https://www.uta-net.com/song/366579/" + }, + }, + ], + "title": "百花繚乱", + "type": "Song", + }, + }, + ], + } diff --git a/test/plugins/utils/test_request_handler.py b/test/plugins/utils/test_request_handler.py new file mode 100644 index 000000000..6887283dc --- /dev/null +++ b/test/plugins/utils/test_request_handler.py @@ -0,0 +1,67 @@ +import io +from http import HTTPStatus +from unittest.mock import Mock +from urllib.error import URLError + +import pytest +import requests +from urllib3 import HTTPResponse +from urllib3.exceptions import NewConnectionError + +from beetsplug._utils.requests import RequestHandler + + +class TestRequestHandlerRetry: + @pytest.fixture(autouse=True) + def patch_connection(self, monkeypatch, last_response): + monkeypatch.setattr( + "urllib3.connectionpool.HTTPConnectionPool._make_request", + Mock( + side_effect=[ + NewConnectionError(None, "Connection failed"), + URLError("bad"), + last_response, + ] + ), + ) + + @pytest.fixture + def request_handler(self): + return RequestHandler() + + @pytest.mark.parametrize( + "last_response", + [ + HTTPResponse( + body=io.BytesIO(b"success"), + status=HTTPStatus.OK, + preload_content=False, + ), + ], + ids=["success"], + ) + def test_retry_on_connection_error(self, request_handler): + """Verify that the handler retries on connection errors.""" + response = request_handler.get("http://example.com/api") + + assert response.text == "success" + assert response.status_code == HTTPStatus.OK + + @pytest.mark.parametrize( + "last_response", + [ + ConnectionResetError, + HTTPResponse( + body=io.BytesIO(b"Server Error"), + status=HTTPStatus.INTERNAL_SERVER_ERROR, + preload_content=False, + ), + ], + ids=["conn_error", "server_error"], + ) + def test_retry_exhaustion(self, request_handler): + """Verify that the handler raises an error after exhausting retries.""" + with pytest.raises( + requests.exceptions.RequestException, match="Max retries exceeded" + ): + request_handler.get("http://example.com/api") diff --git a/test/test_vfs.py b/test/plugins/utils/test_vfs.py similarity index 97% rename from test/test_vfs.py rename to test/plugins/utils/test_vfs.py index 7f75fbd83..9505075f9 100644 --- a/test/test_vfs.py +++ b/test/plugins/utils/test_vfs.py @@ -14,9 +14,9 @@ """Tests for the virtual filesystem builder..""" -from beets import vfs from beets.test import _common from beets.test.helper import BeetsTestCase +from beetsplug._utils import vfs class VFSTest(BeetsTestCase): diff --git a/test/rsrc/lyrics/examplecom/beetssong.txt b/test/rsrc/lyrics/examplecom/beetssong.txt index c546dd602..436612ce0 100644 --- a/test/rsrc/lyrics/examplecom/beetssong.txt +++ b/test/rsrc/lyrics/examplecom/beetssong.txt @@ -221,7 +221,7 @@ e9.size = "120x600, 160x600"; <h2>John Doe <br> beets song lyrics</h2> <img src="images/phone-left.gif" alt="Ringtones left icon" width="16" height="17"> <a href="http://www.ringtonematcher.com/go/?sid=LBSMros&artist=The+John Doe&song=Beets+Song" target="_blank"><b><font size="+1" color="red" face="arial">Send "beets song" Ringtone to your Cell</font></b></a> <img src="images/phone-right.gif" alt="Ringtones right icon" width="16" height="17"><br><br><center>Beets is the media library management system for obsessive music geeks.<br> -The purpose of beets is to get your music collection right once and for all. It catalogs your collection, automatically improving its metadata as it goes. It then provides a bouquet of tools for manipulating and accessing your music.<br> +The purpose of beets is to get your music collection right once and for all. It catalogs your collection, automatically improving its metadata as it goes. It then provides a suite of tools for manipulating and accessing your music.<br> <div class='flow breaker'> </div> Here's an example of beets' brainy tag corrector doing its thing: Because beets is designed as a library, it can do almost anything you can imagine for your music collection. Via plugins, beets becomes a panacea</center> diff --git a/test/rsrc/mbpseudo/official_release.json b/test/rsrc/mbpseudo/official_release.json new file mode 100644 index 000000000..cd6bb3ba9 --- /dev/null +++ b/test/rsrc/mbpseudo/official_release.json @@ -0,0 +1,1878 @@ +{ + "aliases": [ + { + "begin": null, + "end": null, + "ended": false, + "locale": "en", + "name": "In Bloom", + "primary": true, + "sort-name": "In Bloom", + "type": "Release name", + "type-id": "df187855-059b-3514-9d5e-d240de0b4228" + } + ], + "artist-credit": [ + { + "artist": { + "aliases": [ + { + "begin": null, + "end": null, + "ended": false, + "locale": "en", + "name": "Lilas Ikuta", + "primary": true, + "sort-name": "Ikuta, Lilas", + "type": "Artist name", + "type-id": "894afba6-2816-3c24-8072-eadb66bd04bc" + } + ], + "country": "JP", + "disambiguation": "", + "genres": [ + { + "count": 1, + "disambiguation": "", + "id": "eba7715e-ee26-4989-8d49-9db382955419", + "name": "j-pop" + }, + { + "count": 1, + "disambiguation": "", + "id": "455f264b-db00-4716-991d-fbd32dc24523", + "name": "singer-songwriter" + } + ], + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "tags": [ + { + "count": 1, + "name": "j-pop" + }, + { + "count": 1, + "name": "singer-songwriter" + } + ], + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "joinphrase": "", + "name": "幾田りら" + } + ], + "artist-relations": [ + { + "artist": { + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": "2025", + "direction": "backward", + "end": "2025", + "ended": true, + "source-credit": "", + "target-credit": "Lilas Ikuta", + "type": "copyright", + "type-id": "730b5251-7432-4896-8fc6-e1cba943bfe1" + }, + { + "artist": { + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": "2025", + "direction": "backward", + "end": "2025", + "ended": true, + "source-credit": "", + "target-credit": "Lilas Ikuta", + "type": "phonographic copyright", + "type-id": "01d3488d-8d2a-4cff-9226-5250404db4dc" + } + ], + "asin": "B0DR8Y2YDC", + "barcode": "199066336168", + "country": "XW", + "cover-art-archive": { + "artwork": true, + "back": false, + "count": 1, + "darkened": false, + "front": true + }, + "date": "2025-01-10", + "disambiguation": "", + "genres": [], + "id": "a5ce1d11-2e32-45a4-b37f-c1589d46b103", + "label-info": [ + { + "catalog-number": "Lilas-020", + "label": { + "aliases": [ + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "2636621 Records DK", + "primary": null, + "sort-name": "2636621 Records DK", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Antipole", + "primary": null, + "sort-name": "Antipole", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Auto production", + "primary": null, + "sort-name": "Auto production", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Auto-Edición", + "primary": null, + "sort-name": "Auto-Edición", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Auto-Product", + "primary": null, + "sort-name": "Auto-Product", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Autoedición", + "primary": null, + "sort-name": "Autoedición", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Autoeditado", + "primary": null, + "sort-name": "Autoeditado", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Autoproduit", + "primary": null, + "sort-name": "Autoproduit", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Banana Skin Records", + "primary": null, + "sort-name": "Banana Skin Records", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Cannelle", + "primary": null, + "sort-name": "Cannelle", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Cece Natalie", + "primary": null, + "sort-name": "Cece Natalie", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Cherry X", + "primary": null, + "sort-name": "Cherry X", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Chung", + "primary": null, + "sort-name": "Chung", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Cody Johnson", + "primary": null, + "sort-name": "Cody Johnson", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Cowgirl Clue", + "primary": null, + "sort-name": "Cowgirl Clue", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "D.I.Y.", + "primary": null, + "sort-name": "D.I.Y.", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Damjan Mravunac Self-released)", + "primary": null, + "sort-name": "Damjan Mravunac Self-released)", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Demo", + "primary": null, + "sort-name": "Demo", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "DistroKid", + "primary": null, + "sort-name": "DistroKid", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Egzod", + "primary": null, + "sort-name": "Egzod", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Eigenverlag", + "primary": null, + "sort-name": "Eigenverlag", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Eigenvertrieb", + "primary": null, + "sort-name": "Eigenvertrieb", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "GRIND MODE", + "primary": null, + "sort-name": "GRIND MODE", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "INDIPENDANT", + "primary": null, + "sort-name": "INDIPENDANT", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Indepandant", + "primary": null, + "sort-name": "Indepandant", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Independant release", + "primary": null, + "sort-name": "Independant release", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Independent", + "primary": null, + "sort-name": "Independent", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Independente", + "primary": null, + "sort-name": "Independente", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Independiente", + "primary": null, + "sort-name": "Independiente", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Indie", + "primary": null, + "sort-name": "Indie", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Joost Klein", + "primary": null, + "sort-name": "Joost Klein", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Millington Records", + "primary": null, + "sort-name": "Millington Records", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "MoroseSound", + "primary": null, + "sort-name": "MoroseSound", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "N/A", + "primary": null, + "sort-name": "N/A", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "No Label", + "primary": null, + "sort-name": "No Label", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "None", + "primary": null, + "sort-name": "None", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "None Like Joshua", + "primary": null, + "sort-name": "None Like Joshua", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Not On A Lebel", + "primary": null, + "sort-name": "Not On A Lebel", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Not On Label", + "primary": null, + "sort-name": "Not On Label", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Offensively Average Productions", + "primary": null, + "sort-name": "Offensively Average Productions", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Ours", + "primary": null, + "sort-name": "Ours", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "P2019", + "primary": null, + "sort-name": "P2019", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "P2020", + "primary": null, + "sort-name": "P2020", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "P2021", + "primary": null, + "sort-name": "P2021", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "P2022", + "primary": null, + "sort-name": "P2022", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "P2023", + "primary": null, + "sort-name": "P2023", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "P2024", + "primary": null, + "sort-name": "P2024", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "P2025", + "primary": null, + "sort-name": "P2025", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Patriarchy", + "primary": null, + "sort-name": "Patriarchy", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Plini", + "primary": null, + "sort-name": "Plini", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Records DK", + "primary": null, + "sort-name": "Records DK", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Self Digital", + "primary": null, + "sort-name": "Self Digital", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Self Release", + "primary": null, + "sort-name": "Self Release", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Self Released", + "primary": null, + "sort-name": "Self Released", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Self-release", + "primary": null, + "sort-name": "Self-release", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Self-released", + "primary": null, + "sort-name": "Self-released", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Self-released/independent", + "primary": null, + "sort-name": "Self-released/independent", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Sevdaliza", + "primary": null, + "sort-name": "Sevdaliza", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "TOMMY CASH", + "primary": null, + "sort-name": "TOMMY CASH", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Take Van", + "primary": null, + "sort-name": "Take Van", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Talwiinder", + "primary": null, + "sort-name": "Talwiinder", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Unsigned", + "primary": null, + "sort-name": "Unsigned", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "VGR", + "primary": null, + "sort-name": "VGR", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "Woo Da Savage", + "primary": null, + "sort-name": "Woo Da Savage", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "YANAA", + "primary": null, + "sort-name": "YANAA", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": "fi", + "name": "[ei levymerkkiä]", + "primary": true, + "sort-name": "ei levymerkkiä", + "type": "Label name", + "type-id": "3a1a0c48-d885-3b89-87b2-9e8a483c5675" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": "nl", + "name": "[geen platenmaatschappij]", + "primary": true, + "sort-name": "[geen platenmaatschappij]", + "type": "Label name", + "type-id": "3a1a0c48-d885-3b89-87b2-9e8a483c5675" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": "et", + "name": "[ilma plaadifirmata]", + "primary": false, + "sort-name": "[ilma plaadifirmata]", + "type": "Label name", + "type-id": "3a1a0c48-d885-3b89-87b2-9e8a483c5675" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": "es", + "name": "[nada]", + "primary": true, + "sort-name": "[nada]", + "type": "Label name", + "type-id": "3a1a0c48-d885-3b89-87b2-9e8a483c5675" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": "en", + "name": "[no label]", + "primary": true, + "sort-name": "[no label]", + "type": "Label name", + "type-id": "3a1a0c48-d885-3b89-87b2-9e8a483c5675" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "[nolabel]", + "primary": null, + "sort-name": "[nolabel]", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "[none]", + "primary": null, + "sort-name": "[none]", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": "lt", + "name": "[nėra leidybinės kompanijos]", + "primary": false, + "sort-name": "[nėra leidybinės kompanijos]", + "type": "Label name", + "type-id": "3a1a0c48-d885-3b89-87b2-9e8a483c5675" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": "lt", + "name": "[nėra leidyklos]", + "primary": false, + "sort-name": "[nėra leidyklos]", + "type": "Label name", + "type-id": "3a1a0c48-d885-3b89-87b2-9e8a483c5675" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": "lt", + "name": "[nėra įrašų kompanijos]", + "primary": true, + "sort-name": "[nėra įrašų kompanijos]", + "type": "Label name", + "type-id": "3a1a0c48-d885-3b89-87b2-9e8a483c5675" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": "et", + "name": "[puudub]", + "primary": false, + "sort-name": "[puudub]", + "type": "Label name", + "type-id": "3a1a0c48-d885-3b89-87b2-9e8a483c5675" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": "ru", + "name": "[самиздат]", + "primary": false, + "sort-name": "samizdat", + "type": "Label name", + "type-id": "3a1a0c48-d885-3b89-87b2-9e8a483c5675" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": "ja", + "name": "[レーベルなし]", + "primary": true, + "sort-name": "[レーベルなし]", + "type": "Label name", + "type-id": "3a1a0c48-d885-3b89-87b2-9e8a483c5675" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "annapantsu music", + "primary": null, + "sort-name": "annapantsu music", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "auto-release", + "primary": null, + "sort-name": "auto-release", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "autoprod.", + "primary": null, + "sort-name": "autoprod.", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "ayesha erotica", + "primary": null, + "sort-name": "ayesha erotica", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "blank", + "primary": null, + "sort-name": "blank", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "cupcakKe", + "primary": null, + "sort-name": "cupcakKe", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "d.silvestre", + "primary": null, + "sort-name": "d.silvestre", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "dj-Jo", + "primary": null, + "sort-name": "dj-Jo", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "independent release", + "primary": null, + "sort-name": "independent release", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "lor2mg", + "primary": null, + "sort-name": "lor2mg", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "nyamura", + "primary": null, + "sort-name": "nyamura", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "pls dnt stp", + "primary": null, + "sort-name": "pls dnt stp", + "type": null, + "type-id": null + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "self", + "primary": null, + "sort-name": "self", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "self issued", + "primary": null, + "sort-name": "self issued", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "self-issued", + "primary": null, + "sort-name": "self-issued", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "white label", + "primary": null, + "sort-name": "white label", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "но лабел", + "primary": null, + "sort-name": "но лабел", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + }, + { + "begin": null, + "end": null, + "ended": false, + "locale": null, + "name": "独立发行", + "primary": null, + "sort-name": "独立发行", + "type": "Search hint", + "type-id": "829662f2-a781-3ec8-8b46-fbcea6196f81" + } + ], + "disambiguation": "Special purpose label – white labels, self-published releases and other “no label” releases", + "genres": [], + "id": "157afde4-4bf5-4039-8ad2-5a15acc85176", + "label-code": null, + "name": "[no label]", + "sort-name": "[no label]", + "tags": [ + { + "count": 12, + "name": "special purpose" + }, + { + "count": 18, + "name": "special purpose label" + } + ], + "type": "Production", + "type-id": "a2426aab-2dd4-339c-b47d-b4923a241678" + } + } + ], + "media": [ + { + "format": "Digital Media", + "format-id": "907a28d9-b3b2-3ef6-89a8-7b18d91d4794", + "id": "43f08d54-a896-3561-be75-b881cbc832d5", + "position": 1, + "title": "", + "track-count": 1, + "track-offset": 0, + "tracks": [ + { + "artist-credit": [ + { + "artist": { + "aliases": [ + { + "begin": null, + "end": null, + "ended": false, + "locale": "en", + "name": "Lilas Ikuta", + "primary": true, + "sort-name": "Ikuta, Lilas", + "type": "Artist name", + "type-id": "894afba6-2816-3c24-8072-eadb66bd04bc" + } + ], + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "joinphrase": "", + "name": "幾田りら" + } + ], + "id": "0bd01e8b-18e1-4708-b0a3-c9603b89ab97", + "length": 179239, + "number": "1", + "position": 1, + "recording": { + "aliases": [], + "artist-credit": [ + { + "artist": { + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "joinphrase": "", + "name": "幾田りら" + } + ], + "artist-relations": [ + { + "artist": { + "country": "JP", + "disambiguation": "Japanese composer/arranger/guitarist, agehasprings", + "id": "f24241fb-4d89-4bf2-8336-3f2a7d2c0025", + "name": "KOHD", + "sort-name": "KOHD", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "backward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "arranger", + "type-id": "22661fb8-cdb7-4f67-8385-b2a8be6c9f0d" + }, + { + "artist": { + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": "2025", + "direction": "backward", + "end": "2025", + "ended": true, + "source-credit": "", + "target-credit": "Lilas Ikuta", + "type": "phonographic copyright", + "type-id": "7fd5fbc0-fbf4-4d04-be23-417d50a4dc30" + }, + { + "artist": { + "country": "JP", + "disambiguation": "", + "id": "1d27ab8a-a0df-47cf-b4cc-d2d7a0712a05", + "name": "山本秀哉", + "sort-name": "Yamamoto, Shuya", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "backward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "producer", + "type-id": "5c0ceac3-feb4-41f0-868d-dc06f6e27fc0" + }, + { + "artist": { + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "backward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "vocal", + "type-id": "0fdbe3c6-7700-4a31-ae54-b53f06ae1cfa" + } + ], + "disambiguation": "", + "first-release-date": "2025-01-10", + "genres": [], + "id": "781724c1-a039-41e6-bd9b-770c3b9d5b8e", + "isrcs": [ + "JPP302400868" + ], + "length": 179546, + "tags": [], + "title": "百花繚乱", + "url-relations": [ + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "free streaming", + "type-id": "7e41ef12-a124-4324-afdb-fdbae687a89c", + "url": { + "id": "d076eaf9-5fde-4f6e-a946-cde16b67aa3b", + "resource": "https://open.spotify.com/track/782PTXsbAWB70ySDZ5NHmP" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "purchase for download", + "type-id": "92777657-504c-4acb-bd33-51a201bd57e1", + "url": { + "id": "64879627-6eca-4755-98b5-b2234a8dbc61", + "resource": "https://music.apple.com/jp/song/1857886416" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "streaming", + "type-id": "b5f3058a-666c-406f-aafb-f9249fc7b122", + "url": { + "id": "64879627-6eca-4755-98b5-b2234a8dbc61", + "resource": "https://music.apple.com/jp/song/1857886416" + } + } + ], + "video": false, + "work-relations": [ + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "performance", + "type-id": "a3005666-a872-32c3-ad06-98af558e99b0", + "work": { + "artist-relations": [ + { + "artist": { + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "backward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "composer", + "type-id": "d59d99ea-23d4-4a80-b066-edca32ee158f" + }, + { + "artist": { + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "backward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "lyricist", + "type-id": "3e48faba-ec01-47fd-8e89-30e81161661c" + } + ], + "attributes": [], + "disambiguation": "", + "id": "9e14d6b2-ac7d-43e9-82a9-561bc76ce2ed", + "iswcs": [], + "language": "jpn", + "languages": [ + "jpn" + ], + "title": "百花繚乱", + "type": "Song", + "type-id": "f061270a-2fd6-32f1-a641-f0f8676d14e6", + "url-relations": [ + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "backward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "lyrics", + "type-id": "e38e65aa-75e0-42ba-ace0-072aeb91a538", + "url": { + "id": "dfac3640-6b23-4991-a59c-7cb80e8eb950", + "resource": "https://utaten.com/lyric/tt24121002/" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "backward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "lyrics", + "type-id": "e38e65aa-75e0-42ba-ace0-072aeb91a538", + "url": { + "id": "b1b5d5df-e79d-4cda-bb2a-8014e5505415", + "resource": "https://www.uta-net.com/song/366579/" + } + } + ] + } + } + ] + }, + "title": "百花繚乱" + } + ] + } + ], + "packaging": "None", + "packaging-id": "119eba76-b343-3e02-a292-f0f00644bb9b", + "quality": "normal", + "release-events": [ + { + "area": { + "disambiguation": "", + "id": "525d4e18-3d00-31b9-a58b-a146a916de8f", + "iso-3166-1-codes": [ + "XW" + ], + "name": "[Worldwide]", + "sort-name": "[Worldwide]", + "type": null, + "type-id": null + }, + "date": "2025-01-10" + } + ], + "release-group": { + "aliases": [], + "artist-credit": [ + { + "artist": { + "aliases": [ + { + "begin": null, + "end": null, + "ended": false, + "locale": "en", + "name": "Lilas Ikuta", + "primary": true, + "sort-name": "Ikuta, Lilas", + "type": "Artist name", + "type-id": "894afba6-2816-3c24-8072-eadb66bd04bc" + } + ], + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "joinphrase": "", + "name": "幾田りら" + } + ], + "disambiguation": "", + "first-release-date": "2025-01-10", + "genres": [], + "id": "da0d6bbb-f44b-4fff-8739-9d72db0402a1", + "primary-type": "Single", + "primary-type-id": "d6038452-8ee0-3f68-affc-2de9a1ede0b9", + "secondary-type-ids": [], + "secondary-types": [], + "tags": [], + "title": "百花繚乱" + }, + "release-relations": [ + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "release": { + "artist-credit": [ + { + "artist": { + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": null, + "type-id": null + }, + "joinphrase": "", + "name": "Lilas Ikuta" + } + ], + "barcode": null, + "disambiguation": "", + "id": "dc3ee2df-0bc1-49eb-b8c4-34473d279a43", + "media": [], + "packaging": null, + "packaging-id": null, + "quality": "normal", + "release-group": null, + "status": null, + "status-id": null, + "text-representation": { + "language": "eng", + "script": "Latn" + }, + "title": "In Bloom" + }, + "source-credit": "", + "target-credit": "", + "type": "transl-tracklisting", + "type-id": "fc399d47-23a7-4c28-bfcf-0607a562b644" + } + ], + "status": "Official", + "status-id": "4e304316-386d-3409-af2e-78857eec5cfe", + "tags": [], + "text-representation": { + "language": "jpn", + "script": "Jpan" + }, + "title": "百花繚乱", + "url-relations": [ + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "amazon asin", + "type-id": "4f2e710d-166c-480c-a293-2e2c8d658d87", + "url": { + "id": "b50c7fb8-2327-4a05-b989-f2211a41afee", + "resource": "https://www.amazon.co.jp/gp/product/B0DR8Y2YDC" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "free streaming", + "type-id": "08445ccf-7b99-4438-9f9a-fb9ac18099ee", + "url": { + "id": "5106a7b0-1443-4803-91a2-28cac2cfb5e0", + "resource": "https://open.spotify.com/album/3LDV2xGL9HiqCsQujEPQLb" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "free streaming", + "type-id": "08445ccf-7b99-4438-9f9a-fb9ac18099ee", + "url": { + "id": "d481d94b-a7bf-4e82-8da0-1757fedcda62", + "resource": "https://www.deezer.com/album/687686261" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "purchase for download", + "type-id": "98e08c20-8402-4163-8970-53504bb6a1e4", + "url": { + "id": "6156d2e4-d107-43f9-8f44-52f04d39c78e", + "resource": "https://mora.jp/package/43000011/199066336168/" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "purchase for download", + "type-id": "98e08c20-8402-4163-8970-53504bb6a1e4", + "url": { + "id": "a4eabb88-1746-4aa2-ab09-c28cfbe65efb", + "resource": "https://mora.jp/package/43000011/199066336168_HD/" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "purchase for download", + "type-id": "98e08c20-8402-4163-8970-53504bb6a1e4", + "url": { + "id": "ab8440f0-3b13-4436-b3ad-f4695c9d8875", + "resource": "https://mora.jp/package/43000011/199066336168_LL/" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "purchase for download", + "type-id": "98e08c20-8402-4163-8970-53504bb6a1e4", + "url": { + "id": "9a8ee8d1-f946-44a1-be16-8f7a77c951e9", + "resource": "https://music.apple.com/jp/album/1786972161" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "purchase for download", + "type-id": "98e08c20-8402-4163-8970-53504bb6a1e4", + "url": { + "id": "c6faaa80-38fb-46a4-aa2b-78cddc5cbe70", + "resource": "https://ototoy.jp/_/default/p/2501951" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "purchase for download", + "type-id": "98e08c20-8402-4163-8970-53504bb6a1e4", + "url": { + "id": "0e7e8bc5-0779-492d-a9db-9ab58f96d23b", + "resource": "https://www.qobuz.com/jp-ja/album/lilas-ikuta-/fl9tx2j78reza" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "purchase for download", + "type-id": "98e08c20-8402-4163-8970-53504bb6a1e4", + "url": { + "id": "c0cf8fe0-3413-4544-a026-37d346a59a77", + "resource": "https://www.qobuz.com/jp-ja/album/lilas-ikuta-/l1dnc4xoi6l7a" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "streaming", + "type-id": "320adf26-96fa-4183-9045-1f5f32f833cb", + "url": { + "id": "e4ce55a9-a5e1-4842-b42d-11be6a31fdab", + "resource": "https://music.amazon.co.jp/albums/B0DR8Y2YDC" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "streaming", + "type-id": "320adf26-96fa-4183-9045-1f5f32f833cb", + "url": { + "id": "9a8ee8d1-f946-44a1-be16-8f7a77c951e9", + "resource": "https://music.apple.com/jp/album/1786972161" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "vgmdb", + "type-id": "6af0134a-df6a-425a-96e2-895f9cd342ba", + "url": { + "id": "1885772a-4004-4d45-9512-d0c8822506c9", + "resource": "https://vgmdb.net/album/145936" + } + } + ] +} diff --git a/test/rsrc/mbpseudo/pseudo_release.json b/test/rsrc/mbpseudo/pseudo_release.json new file mode 100644 index 000000000..ae4bf7b6b --- /dev/null +++ b/test/rsrc/mbpseudo/pseudo_release.json @@ -0,0 +1,515 @@ +{ + "aliases": [], + "artist-credit": [ + { + "artist": { + "aliases": [ + { + "begin": null, + "end": null, + "ended": false, + "locale": "en", + "name": "Lilas Ikuta", + "primary": true, + "sort-name": "Ikuta, Lilas", + "type": "Artist name", + "type-id": "894afba6-2816-3c24-8072-eadb66bd04bc" + } + ], + "country": "JP", + "disambiguation": "", + "genres": [ + { + "count": 1, + "disambiguation": "", + "id": "eba7715e-ee26-4989-8d49-9db382955419", + "name": "j-pop" + }, + { + "count": 1, + "disambiguation": "", + "id": "455f264b-db00-4716-991d-fbd32dc24523", + "name": "singer-songwriter" + } + ], + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "tags": [ + { + "count": 1, + "name": "j-pop" + }, + { + "count": 1, + "name": "singer-songwriter" + } + ], + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "joinphrase": "", + "name": "Lilas Ikuta" + } + ], + "asin": null, + "barcode": null, + "cover-art-archive": { + "artwork": false, + "back": false, + "count": 0, + "darkened": false, + "front": false + }, + "disambiguation": "", + "genres": [], + "id": "dc3ee2df-0bc1-49eb-b8c4-34473d279a43", + "label-info": [], + "media": [ + { + "format": "Digital Media", + "format-id": "907a28d9-b3b2-3ef6-89a8-7b18d91d4794", + "id": "606faab7-60fa-3a8b-a40f-2c66150cce81", + "position": 1, + "title": "", + "track-count": 1, + "track-offset": 0, + "tracks": [ + { + "artist-credit": [ + { + "artist": { + "aliases": [ + { + "begin": null, + "end": null, + "ended": false, + "locale": "en", + "name": "Lilas Ikuta", + "primary": true, + "sort-name": "Ikuta, Lilas", + "type": "Artist name", + "type-id": "894afba6-2816-3c24-8072-eadb66bd04bc" + } + ], + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "joinphrase": "", + "name": "Lilas Ikuta" + } + ], + "id": "2018b012-a184-49a2-a464-fb4628a89588", + "length": 179239, + "number": "1", + "position": 1, + "recording": { + "aliases": [], + "artist-credit": [ + { + "artist": { + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "joinphrase": "", + "name": "幾田りら" + } + ], + "artist-relations": [ + { + "artist": { + "country": "JP", + "disambiguation": "Japanese composer/arranger/guitarist, agehasprings", + "id": "f24241fb-4d89-4bf2-8336-3f2a7d2c0025", + "name": "KOHD", + "sort-name": "KOHD", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "backward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "arranger", + "type-id": "22661fb8-cdb7-4f67-8385-b2a8be6c9f0d" + }, + { + "artist": { + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": "2025", + "direction": "backward", + "end": "2025", + "ended": true, + "source-credit": "", + "target-credit": "Lilas Ikuta", + "type": "phonographic copyright", + "type-id": "7fd5fbc0-fbf4-4d04-be23-417d50a4dc30" + }, + { + "artist": { + "country": "JP", + "disambiguation": "", + "id": "1d27ab8a-a0df-47cf-b4cc-d2d7a0712a05", + "name": "山本秀哉", + "sort-name": "Yamamoto, Shuya", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "backward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "producer", + "type-id": "5c0ceac3-feb4-41f0-868d-dc06f6e27fc0" + }, + { + "artist": { + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "backward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "vocal", + "type-id": "0fdbe3c6-7700-4a31-ae54-b53f06ae1cfa" + } + ], + "disambiguation": "", + "first-release-date": "2025-01-10", + "genres": [], + "id": "781724c1-a039-41e6-bd9b-770c3b9d5b8e", + "isrcs": [ + "JPP302400868" + ], + "length": 179546, + "tags": [], + "title": "百花繚乱", + "url-relations": [ + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "free streaming", + "type-id": "7e41ef12-a124-4324-afdb-fdbae687a89c", + "url": { + "id": "d076eaf9-5fde-4f6e-a946-cde16b67aa3b", + "resource": "https://open.spotify.com/track/782PTXsbAWB70ySDZ5NHmP" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "purchase for download", + "type-id": "92777657-504c-4acb-bd33-51a201bd57e1", + "url": { + "id": "64879627-6eca-4755-98b5-b2234a8dbc61", + "resource": "https://music.apple.com/jp/song/1857886416" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "streaming", + "type-id": "b5f3058a-666c-406f-aafb-f9249fc7b122", + "url": { + "id": "64879627-6eca-4755-98b5-b2234a8dbc61", + "resource": "https://music.apple.com/jp/song/1857886416" + } + } + ], + "video": false, + "work-relations": [ + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "forward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "performance", + "type-id": "a3005666-a872-32c3-ad06-98af558e99b0", + "work": { + "artist-relations": [ + { + "artist": { + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "backward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "composer", + "type-id": "d59d99ea-23d4-4a80-b066-edca32ee158f" + }, + { + "artist": { + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "backward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "lyricist", + "type-id": "3e48faba-ec01-47fd-8e89-30e81161661c" + } + ], + "attributes": [], + "disambiguation": "", + "id": "9e14d6b2-ac7d-43e9-82a9-561bc76ce2ed", + "iswcs": [], + "language": "jpn", + "languages": [ + "jpn" + ], + "title": "百花繚乱", + "type": "Song", + "type-id": "f061270a-2fd6-32f1-a641-f0f8676d14e6", + "url-relations": [ + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "backward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "lyrics", + "type-id": "e38e65aa-75e0-42ba-ace0-072aeb91a538", + "url": { + "id": "dfac3640-6b23-4991-a59c-7cb80e8eb950", + "resource": "https://utaten.com/lyric/tt24121002/" + } + }, + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "backward", + "end": null, + "ended": false, + "source-credit": "", + "target-credit": "", + "type": "lyrics", + "type-id": "e38e65aa-75e0-42ba-ace0-072aeb91a538", + "url": { + "id": "b1b5d5df-e79d-4cda-bb2a-8014e5505415", + "resource": "https://www.uta-net.com/song/366579/" + } + } + ] + } + } + ] + }, + "title": "In Bloom" + } + ] + } + ], + "packaging": null, + "packaging-id": null, + "quality": "normal", + "release-group": { + "aliases": [], + "artist-credit": [ + { + "artist": { + "aliases": [ + { + "begin": null, + "end": null, + "ended": false, + "locale": "en", + "name": "Lilas Ikuta", + "primary": true, + "sort-name": "Ikuta, Lilas", + "type": "Artist name", + "type-id": "894afba6-2816-3c24-8072-eadb66bd04bc" + } + ], + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": "Person", + "type-id": "b6e035f4-3ce9-331c-97df-83397230b0df" + }, + "joinphrase": "", + "name": "幾田りら" + } + ], + "disambiguation": "", + "first-release-date": "2025-01-10", + "genres": [], + "id": "da0d6bbb-f44b-4fff-8739-9d72db0402a1", + "primary-type": "Single", + "primary-type-id": "d6038452-8ee0-3f68-affc-2de9a1ede0b9", + "secondary-type-ids": [], + "secondary-types": [], + "tags": [], + "title": "百花繚乱" + }, + "release-relations": [ + { + "attribute-ids": {}, + "attribute-values": {}, + "attributes": [], + "begin": null, + "direction": "backward", + "end": null, + "ended": false, + "release": { + "artist-credit": [ + { + "artist": { + "country": "JP", + "disambiguation": "", + "id": "55e42264-ef27-49d8-93fd-29f930dc96e4", + "name": "幾田りら", + "sort-name": "Ikuta, Lilas", + "type": null, + "type-id": null + }, + "joinphrase": "", + "name": "幾田りら" + } + ], + "barcode": "199066336168", + "country": "XW", + "date": "2025-01-10", + "disambiguation": "", + "id": "a5ce1d11-2e32-45a4-b37f-c1589d46b103", + "media": [], + "packaging": null, + "packaging-id": null, + "quality": "normal", + "release-events": [ + { + "area": { + "disambiguation": "", + "id": "525d4e18-3d00-31b9-a58b-a146a916de8f", + "iso-3166-1-codes": [ + "XW" + ], + "name": "[Worldwide]", + "sort-name": "[Worldwide]", + "type": null, + "type-id": null + }, + "date": "2025-01-10" + } + ], + "release-group": null, + "status": null, + "status-id": null, + "text-representation": { + "language": "jpn", + "script": "Jpan" + }, + "title": "百花繚乱" + }, + "source-credit": "", + "target-credit": "", + "type": "transl-tracklisting", + "type-id": "fc399d47-23a7-4c28-bfcf-0607a562b644" + } + ], + "status": "Pseudo-Release", + "status-id": "41121bb9-3413-3818-8a9a-9742318349aa", + "tags": [], + "text-representation": { + "language": "eng", + "script": "Latn" + }, + "title": "In Bloom" +} diff --git a/test/rsrc/spotify/japanese_track_request.json b/test/rsrc/spotify/japanese_track_request.json new file mode 100644 index 000000000..04559588e --- /dev/null +++ b/test/rsrc/spotify/japanese_track_request.json @@ -0,0 +1,89 @@ +{ + "tracks":{ + "href":"https://api.spotify.com/v1/search?query=Happy+album%3ADespicable+Me+2+artist%3APharrell+Williams&offset=0&limit=20&type=track", + "items":[ + { + "album":{ + "album_type":"compilation", + "available_markets":[ + "AD", "AR", "AT", "AU", "BE", "BG", "BO", "BR", "CA", + "CH", "CL", "CO", "CR", "CY", "CZ", "DE", "DK", "DO", + "EC", "EE", "ES", "FI", "FR", "GB", "GR", "GT", "HK", + "HN", "HU", "IE", "IS", "IT", "LI", "LT", "LU", "LV", + "MC", "MT", "MX", "MY", "NI", "NL", "NO", "NZ", "PA", + "PE", "PH", "PL", "PT", "PY", "RO", "SE", "SG", "SI", + "SK", "SV", "TR", "TW", "US", "UY" + ], + "external_urls":{ + "spotify":"https://open.spotify.com/album/5l3zEmMrOhOzG8d8s83GOL" + }, + "href":"https://api.spotify.com/v1/albums/5l3zEmMrOhOzG8d8s83GOL", + "id":"5l3zEmMrOhOzG8d8s83GOL", + "images":[ + { + "height":640, + "width":640, + "url":"https://i.scdn.co/image/cb7905340c132365bbaee3f17498f062858382e8" + }, + { + "height":300, + "width":300, + "url":"https://i.scdn.co/image/af369120f0b20099d6784ab31c88256113f10ffb" + }, + { + "height":64, + "width":64, + "url":"https://i.scdn.co/image/9dad385ddf2e7db0bef20cec1fcbdb08689d9ae8" + } + ], + "name":"盗作", + "type":"album", + "uri":"spotify:album:5l3zEmMrOhOzG8d8s83GOL" + }, + "artists":[ + { + "external_urls":{ + "spotify":"https://open.spotify.com/artist/2RdwBSPQiwcmiDo9kixcl8" + }, + "href":"https://api.spotify.com/v1/artists/2RdwBSPQiwcmiDo9kixcl8", + "id":"2RdwBSPQiwcmiDo9kixcl8", + "name":"ヨルシカ", + "type":"artist", + "uri":"spotify:artist:2RdwBSPQiwcmiDo9kixcl8" + } + ], + "available_markets":[ + "AD", "AR", "AT", "AU", "BE", "BG", "BO", "BR", "CA", + "CH", "CL", "CO", "CR", "CY", "CZ", "DE", "DK", "DO", + "EC", "EE", "ES", "FI", "FR", "GB", "GR", "GT", "HK", + "HN", "HU", "IE", "IS", "IT", "LI", "LT", "LU", "LV", + "MC", "MT", "MX", "MY", "NI", "NL", "NO", "NZ", "PA", + "PE", "PH", "PL", "PT", "PY", "RO", "SE", "SG", "SI", + "SK", "SV", "TR", "TW", "US", "UY" + ], + "disc_number":1, + "duration_ms":233305, + "explicit":false, + "external_ids":{ + "isrc":"USQ4E1300686" + }, + "external_urls":{ + "spotify":"https://open.spotify.com/track/6NPVjNh8Jhru9xOmyQigds" + }, + "href":"https://api.spotify.com/v1/tracks/6NPVjNh8Jhru9xOmyQigds", + "id":"6NPVjNh8Jhru9xOmyQigds", + "name":"思想犯", + "popularity":89, + "preview_url":"https://p.scdn.co/mp3-preview/6b00000be293e6b25f61c33e206a0c522b5cbc87", + "track_number":4, + "type":"track", + "uri":"spotify:track:6NPVjNh8Jhru9xOmyQigds" + } + ], + "limit":20, + "next":null, + "offset":0, + "previous":null, + "total":1 + } +} diff --git a/test/rsrc/spotify/multiartist_album.json b/test/rsrc/spotify/multiartist_album.json new file mode 100644 index 000000000..9aef25f10 --- /dev/null +++ b/test/rsrc/spotify/multiartist_album.json @@ -0,0 +1,154 @@ +{ + "album_type": "single", + "total_tracks": 1, + "available_markets": [ + "AR", "AU", "AT", "BE", "BO", "BR", "BG", "CA", "CL", "CO", "CR", "CY", + "CZ", "DK", "DO", "DE", "EC", "EE", "SV", "FI", "FR", "GR", "GT", "HN", + "HK", "HU", "IS", "IE", "IT", "LV", "LT", "LU", "MY", "MT", "MX", "NL", + "NZ", "NI", "NO", "PA", "PY", "PE", "PH", "PL", "PT", "SG", "SK", "ES", + "SE", "CH", "TW", "TR", "UY", "US", "GB", "AD", "LI", "MC", "ID", "JP", + "TH", "VN", "RO", "IL", "ZA", "SA", "AE", "BH", "QA", "OM", "KW", "EG", + "MA", "DZ", "TN", "LB", "JO", "PS", "IN", "BY", "KZ", "MD", "UA", "AL", + "BA", "HR", "ME", "MK", "RS", "SI", "KR", "BD", "PK", "LK", "GH", "KE", + "NG", "TZ", "UG", "AG", "AM", "BS", "BB", "BZ", "BT", "BW", "BF", "CV", + "CW", "DM", "FJ", "GM", "GE", "GD", "GW", "GY", "HT", "JM", "KI", "LS", + "LR", "MW", "MV", "ML", "MH", "FM", "NA", "NR", "NE", "PW", "PG", "PR", + "WS", "SM", "ST", "SN", "SC", "SL", "SB", "KN", "LC", "VC", "SR", "TL", + "TO", "TT", "TV", "VU", "AZ", "BN", "BI", "KH", "CM", "TD", "KM", "GQ", + "SZ", "GA", "GN", "KG", "LA", "MO", "MR", "MN", "NP", "RW", "TG", "UZ", + "ZW", "BJ", "MG", "MU", "MZ", "AO", "CI", "DJ", "ZM", "CD", "CG", "IQ", + "LY", "TJ", "VE", "ET", "XK" + ], + "external_urls": { + "spotify": "https://open.spotify.com/album/0yhKyyjyKXWUieJ4w1IAEa" + }, + "href": "https://api.spotify.com/v1/albums/0yhKyyjyKXWUieJ4w1IAEa", + "id": "0yhKyyjyKXWUieJ4w1IAEa", + "images": [ + { + "url": "https://i.scdn.co/image/ab67616d0000b2739a26f5e04909c87cead97c77", + "height": 640, + "width": 640 + }, + { + "url": "https://i.scdn.co/image/ab67616d00001e029a26f5e04909c87cead97c77", + "height": 300, + "width": 300 + }, + { + "url": "https://i.scdn.co/image/ab67616d000048519a26f5e04909c87cead97c77", + "height": 64, + "width": 64 + } + ], + "name": "Akiba Night", + "release_date": "2017-12-22", + "release_date_precision": "day", + "type": "album", + "uri": "spotify:album:0yhKyyjyKXWUieJ4w1IAEa", + "artists": [ + { + "external_urls": { + "spotify": "https://open.spotify.com/artist/6m8MRXIVKb6wQaPlBIDMr1" + }, + "href": "https://api.spotify.com/v1/artists/6m8MRXIVKb6wQaPlBIDMr1", + "id": "6m8MRXIVKb6wQaPlBIDMr1", + "name": "Project Skylate", + "type": "artist", + "uri": "spotify:artist:6m8MRXIVKb6wQaPlBIDMr1" + }, + { + "external_urls": { + "spotify": "https://open.spotify.com/artist/4kkAIoQmNT5xEoNH5BuQLe" + }, + "href": "https://api.spotify.com/v1/artists/4kkAIoQmNT5xEoNH5BuQLe", + "id": "4kkAIoQmNT5xEoNH5BuQLe", + "name": "Sugar Shrill", + "type": "artist", + "uri": "spotify:artist:4kkAIoQmNT5xEoNH5BuQLe" + } + ], + "tracks": { + "href": "https://api.spotify.com/v1/albums/0yhKyyjyKXWUieJ4w1IAEa/tracks?offset=0&limit=50", + "limit": 50, + "next": null, + "offset": 0, + "previous": null, + "total": 1, + "items": [ + { + "artists": [ + { + "external_urls": { + "spotify": "https://open.spotify.com/artist/6m8MRXIVKb6wQaPlBIDMr1" + }, + "href": "https://api.spotify.com/v1/artists/6m8MRXIVKb6wQaPlBIDMr1", + "id": "12345", + "name": "Foo", + "type": "artist", + "uri": "spotify:artist:6m8MRXIVKb6wQaPlBIDMr1" + }, + { + "external_urls": { + "spotify": "https://open.spotify.com/artist/4kkAIoQmNT5xEoNH5BuQLe" + }, + "href": "https://api.spotify.com/v1/artists/4kkAIoQmNT5xEoNH5BuQLe", + "id": "67890", + "name": "Bar", + "type": "artist", + "uri": "spotify:artist:4kkAIoQmNT5xEoNH5BuQLe" + } + ], + "available_markets": [ + "AR", "AU", "AT", "BE", "BO", "BR", "BG", "CA", "CL", "CO", "CR", + "CY", "CZ", "DK", "DO", "DE", "EC", "EE", "SV", "FI", "FR", "GR", + "GT", "HN", "HK", "HU", "IS", "IE", "IT", "LV", "LT", "LU", "MY", + "MT", "MX", "NL", "NZ", "NI", "NO", "PA", "PY", "PE", "PH", "PL", + "PT", "SG", "SK", "ES", "SE", "CH", "TW", "TR", "UY", "US", "GB", + "AD", "LI", "MC", "ID", "JP", "TH", "VN", "RO", "IL", "ZA", "SA", + "AE", "BH", "QA", "OM", "KW", "EG", "MA", "DZ", "TN", "LB", "JO", + "PS", "IN", "BY", "KZ", "MD", "UA", "AL", "BA", "HR", "ME", "MK", + "RS", "SI", "KR", "BD", "PK", "LK", "GH", "KE", "NG", "TZ", "UG", + "AG", "AM", "BS", "BB", "BZ", "BT", "BW", "BF", "CV", "CW", "DM", + "FJ", "GM", "GE", "GD", "GW", "GY", "HT", "JM", "KI", "LS", "LR", + "MW", "MV", "ML", "MH", "FM", "NA", "NR", "NE", "PW", "PG", "PR", + "WS", "SM", "ST", "SN", "SC", "SL", "SB", "KN", "LC", "VC", "SR", + "TL", "TO", "TT", "TV", "VU", "AZ", "BN", "BI", "KH", "CM", "TD", + "KM", "GQ", "SZ", "GA", "GN", "KG", "LA", "MO", "MR", "MN", "NP", + "RW", "TG", "UZ", "ZW", "BJ", "MG", "MU", "MZ", "AO", "CI", "DJ", + "ZM", "CD", "CG", "IQ", "LY", "TJ", "VE", "ET", "XK" + ], + "disc_number": 1, + "duration_ms": 225268, + "explicit": false, + "external_urls": { + "spotify": "https://open.spotify.com/track/6sjZfVJworBX6TqyjkxIJ1" + }, + "href": "https://api.spotify.com/v1/tracks/6sjZfVJworBX6TqyjkxIJ1", + "id": "6sjZfVJworBX6TqyjkxIJ1", + "name": "Akiba Nights", + "preview_url": "https://p.scdn.co/mp3-preview/a1c6c0c71f42caff0b19d988849602fefbf7754a?cid=4e414367a1d14c75a5c5129a627fcab8", + "track_number": 1, + "type": "track", + "uri": "spotify:track:6sjZfVJworBX6TqyjkxIJ1", + "is_local": false + } + ] + }, + "copyrights": [ + { + "text": "2017 Sugar Shrill", + "type": "C" + }, + { + "text": "2017 Project Skylate", + "type": "P" + } + ], + "external_ids": { + "upc": "5057728789361" + }, + "genres": [], + "label": "Project Skylate", + "popularity": 21 +} diff --git a/test/rsrc/spotify/multiartist_track.json b/test/rsrc/spotify/multiartist_track.json new file mode 100644 index 000000000..e77acee9e --- /dev/null +++ b/test/rsrc/spotify/multiartist_track.json @@ -0,0 +1,131 @@ +{ + "album": { + "album_type": "single", + "artists": [ + { + "external_urls": { + "spotify": "https://open.spotify.com/artist/6m8MRXIVKb6wQaPlBIDMr1" + }, + "href": "https://api.spotify.com/v1/artists/6m8MRXIVKb6wQaPlBIDMr1", + "id": "6m8MRXIVKb6wQaPlBIDMr1", + "name": "Project Skylate", + "type": "artist", + "uri": "spotify:artist:6m8MRXIVKb6wQaPlBIDMr1" + }, + { + "external_urls": { + "spotify": "https://open.spotify.com/artist/4kkAIoQmNT5xEoNH5BuQLe" + }, + "href": "https://api.spotify.com/v1/artists/4kkAIoQmNT5xEoNH5BuQLe", + "id": "4kkAIoQmNT5xEoNH5BuQLe", + "name": "Sugar Shrill", + "type": "artist", + "uri": "spotify:artist:4kkAIoQmNT5xEoNH5BuQLe" + } + ], + "available_markets": [ + "AR", "AU", "AT", "BE", "BO", "BR", "BG", "CA", "CL", "CO", "CR", "CY", + "CZ", "DK", "DO", "DE", "EC", "EE", "SV", "FI", "FR", "GR", "GT", "HN", + "HK", "HU", "IS", "IE", "IT", "LV", "LT", "LU", "MY", "MT", "MX", "NL", + "NZ", "NI", "NO", "PA", "PY", "PE", "PH", "PL", "PT", "SG", "SK", "ES", + "SE", "CH", "TW", "TR", "UY", "US", "GB", "AD", "LI", "MC", "ID", "JP", + "TH", "VN", "RO", "IL", "ZA", "SA", "AE", "BH", "QA", "OM", "KW", "EG", + "MA", "DZ", "TN", "LB", "JO", "PS", "IN", "BY", "KZ", "MD", "UA", "AL", + "BA", "HR", "ME", "MK", "RS", "SI", "KR", "BD", "PK", "LK", "GH", "KE", + "NG", "TZ", "UG", "AG", "AM", "BS", "BB", "BZ", "BT", "BW", "BF", "CV", + "CW", "DM", "FJ", "GM", "GE", "GD", "GW", "GY", "HT", "JM", "KI", "LS", + "LR", "MW", "MV", "ML", "MH", "FM", "NA", "NR", "NE", "PW", "PG", "PR", + "WS", "SM", "ST", "SN", "SC", "SL", "SB", "KN", "LC", "VC", "SR", "TL", + "TO", "TT", "TV", "VU", "AZ", "BN", "BI", "KH", "CM", "TD", "KM", "GQ", + "SZ", "GA", "GN", "KG", "LA", "MO", "MR", "MN", "NP", "RW", "TG", "UZ", + "ZW", "BJ", "MG", "MU", "MZ", "AO", "CI", "DJ", "ZM", "CD", "CG", "IQ", + "LY", "TJ", "VE", "ET", "XK" + ], + "external_urls": { + "spotify": "https://open.spotify.com/album/0yhKyyjyKXWUieJ4w1IAEa" + }, + "href": "https://api.spotify.com/v1/albums/0yhKyyjyKXWUieJ4w1IAEa", + "id": "0yhKyyjyKXWUieJ4w1IAEa", + "images": [ + { + "url": "https://i.scdn.co/image/ab67616d0000b2739a26f5e04909c87cead97c77", + "width": 640, + "height": 640 + }, + { + "url": "https://i.scdn.co/image/ab67616d00001e029a26f5e04909c87cead97c77", + "width": 300, + "height": 300 + }, + { + "url": "https://i.scdn.co/image/ab67616d000048519a26f5e04909c87cead97c77", + "width": 64, + "height": 64 + } + ], + "name": "Akiba Night", + "release_date": "2017-12-22", + "release_date_precision": "day", + "total_tracks": 1, + "type": "album", + "uri": "spotify:album:0yhKyyjyKXWUieJ4w1IAEa" + }, + "artists": [ + { + "external_urls": { + "spotify": "https://open.spotify.com/artist/6m8MRXIVKb6wQaPlBIDMr1" + }, + "href": "https://api.spotify.com/v1/artists/6m8MRXIVKb6wQaPlBIDMr1", + "id": "12345", + "name": "Foo", + "type": "artist", + "uri": "spotify:artist:6m8MRXIVKb6wQaPlBIDMr1" + }, + { + "external_urls": { + "spotify": "https://open.spotify.com/artist/4kkAIoQmNT5xEoNH5BuQLe" + }, + "href": "https://api.spotify.com/v1/artists/4kkAIoQmNT5xEoNH5BuQLe", + "id": "67890", + "name": "Bar", + "type": "artist", + "uri": "spotify:artist:4kkAIoQmNT5xEoNH5BuQLe" + } + ], + "available_markets": [ + "AR", "AU", "AT", "BE", "BO", "BR", "BG", "CA", "CL", "CO", "CR", "CY", + "CZ", "DK", "DO", "DE", "EC", "EE", "SV", "FI", "FR", "GR", "GT", "HN", + "HK", "HU", "IS", "IE", "IT", "LV", "LT", "LU", "MY", "MT", "MX", "NL", + "NZ", "NI", "NO", "PA", "PY", "PE", "PH", "PL", "PT", "SG", "SK", "ES", + "SE", "CH", "TW", "TR", "UY", "US", "GB", "AD", "LI", "MC", "ID", "JP", + "TH", "VN", "RO", "IL", "ZA", "SA", "AE", "BH", "QA", "OM", "KW", "EG", + "MA", "DZ", "TN", "LB", "JO", "PS", "IN", "BY", "KZ", "MD", "UA", "AL", + "BA", "HR", "ME", "MK", "RS", "SI", "KR", "BD", "PK", "LK", "GH", "KE", + "NG", "TZ", "UG", "AG", "AM", "BS", "BB", "BZ", "BT", "BW", "BF", "CV", + "CW", "DM", "FJ", "GM", "GE", "GD", "GW", "GY", "HT", "JM", "KI", "LS", + "LR", "MW", "MV", "ML", "MH", "FM", "NA", "NR", "NE", "PW", "PG", "PR", + "WS", "SM", "ST", "SN", "SC", "SL", "SB", "KN", "LC", "VC", "SR", "TL", + "TO", "TT", "TV", "VU", "AZ", "BN", "BI", "KH", "CM", "TD", "KM", "GQ", + "SZ", "GA", "GN", "KG", "LA", "MO", "MR", "MN", "NP", "RW", "TG", "UZ", + "ZW", "BJ", "MG", "MU", "MZ", "AO", "CI", "DJ", "ZM", "CD", "CG", "IQ", + "LY", "TJ", "VE", "ET", "XK" + ], + "disc_number": 1, + "duration_ms": 225268, + "explicit": false, + "external_ids": { + "isrc": "GB-SMU-45-66095" + }, + "external_urls": { + "spotify": "https://open.spotify.com/track/6sjZfVJworBX6TqyjkxIJ1" + }, + "href": "https://api.spotify.com/v1/tracks/6sjZfVJworBX6TqyjkxIJ1", + "id": "6sjZfVJworBX6TqyjkxIJ1", + "is_local": false, + "name": "Akiba Nights", + "popularity": 29, + "preview_url": "https://p.scdn.co/mp3-preview/a1c6c0c71f42caff0b19d988849602fefbf7754a?cid=4e414367a1d14c75a5c5129a627fcab8", + "track_number": 1, + "type": "track", + "uri": "spotify:track:6sjZfVJworBX6TqyjkxIJ1" +} diff --git a/test/test_art_resize.py b/test/test_art_resize.py index 8dd4d0e89..0ccbb0eae 100644 --- a/test/test_art_resize.py +++ b/test/test_art_resize.py @@ -16,6 +16,7 @@ import os import unittest +from pathlib import Path from unittest.mock import patch from beets.test import _common @@ -65,7 +66,7 @@ class ArtResizerFileSizeTest(CleanupModulesMixin, BeetsTestCase): max_filesize=0, ) # check valid path returned - max_filesize hasn't broken resize command - self.assertExists(im_95_qual) + assert Path(os.fsdecode(im_95_qual)).exists() # Attempt a lower filesize with same quality im_a = backend.resize( @@ -74,7 +75,7 @@ class ArtResizerFileSizeTest(CleanupModulesMixin, BeetsTestCase): quality=95, max_filesize=0.9 * os.stat(syspath(im_95_qual)).st_size, ) - self.assertExists(im_a) + assert Path(os.fsdecode(im_a)).exists() # target size was achieved assert ( os.stat(syspath(im_a)).st_size @@ -88,7 +89,7 @@ class ArtResizerFileSizeTest(CleanupModulesMixin, BeetsTestCase): quality=75, max_filesize=0, ) - self.assertExists(im_75_qual) + assert Path(os.fsdecode(im_75_qual)).exists() im_b = backend.resize( 225, @@ -96,7 +97,7 @@ class ArtResizerFileSizeTest(CleanupModulesMixin, BeetsTestCase): quality=95, max_filesize=0.9 * os.stat(syspath(im_75_qual)).st_size, ) - self.assertExists(im_b) + assert Path(os.fsdecode(im_b)).exists() # Check high (initial) quality still gives a smaller filesize assert ( os.stat(syspath(im_b)).st_size @@ -149,9 +150,5 @@ class ArtResizerFileSizeTest(CleanupModulesMixin, BeetsTestCase): metadata = {"a": "A", "b": "B"} im = DummyIMBackend() im.write_metadata("foo", metadata) - try: - command = im.convert_cmd + "foo -set a A -set b B foo".split() - mock_util.command_output.assert_called_once_with(command) - except AssertionError: - command = im.convert_cmd + "foo -set b B -set a A foo".split() - mock_util.command_output.assert_called_once_with(command) + command = [*im.convert_cmd, *"foo -set a A -set b B foo".split()] + mock_util.command_output.assert_called_once_with(command) diff --git a/test/test_autotag.py b/test/test_autotag.py index 7f8ed3d2e..119ca15e8 100644 --- a/test/test_autotag.py +++ b/test/test_autotag.py @@ -14,499 +14,23 @@ """Tests for autotagging functionality.""" -import re -import unittest - import pytest from beets import autotag, config from beets.autotag import AlbumInfo, TrackInfo, correct_list_fields, match -from beets.autotag.hooks import Distance, string_dist from beets.library import Item -from beets.test.helper import BeetsTestCase, ConfigMixin -from beets.util import plurality +from beets.test.helper import BeetsTestCase -class PluralityTest(BeetsTestCase): - def test_plurality_consensus(self): - objs = [1, 1, 1, 1] - obj, freq = plurality(objs) - assert obj == 1 - assert freq == 4 - - def test_plurality_near_consensus(self): - objs = [1, 1, 2, 1] - obj, freq = plurality(objs) - assert obj == 1 - assert freq == 3 - - def test_plurality_conflict(self): - objs = [1, 1, 2, 2, 3] - obj, freq = plurality(objs) - assert obj in (1, 2) - assert freq == 2 - - def test_plurality_empty_sequence_raises_error(self): - with pytest.raises(ValueError, match="must be non-empty"): - plurality([]) - - def test_current_metadata_finds_pluralities(self): - items = [ - Item(artist="The Beetles", album="The White Album"), - Item(artist="The Beatles", album="The White Album"), - Item(artist="The Beatles", album="Teh White Album"), - ] - likelies, consensus = match.current_metadata(items) - assert likelies["artist"] == "The Beatles" - assert likelies["album"] == "The White Album" - assert not consensus["artist"] - - def test_current_metadata_artist_consensus(self): - items = [ - Item(artist="The Beatles", album="The White Album"), - Item(artist="The Beatles", album="The White Album"), - Item(artist="The Beatles", album="Teh White Album"), - ] - likelies, consensus = match.current_metadata(items) - assert likelies["artist"] == "The Beatles" - assert likelies["album"] == "The White Album" - assert consensus["artist"] - - def test_albumartist_consensus(self): - items = [ - Item(artist="tartist1", album="album", albumartist="aartist"), - Item(artist="tartist2", album="album", albumartist="aartist"), - Item(artist="tartist3", album="album", albumartist="aartist"), - ] - likelies, consensus = match.current_metadata(items) - assert likelies["artist"] == "aartist" - assert not consensus["artist"] - - def test_current_metadata_likelies(self): - fields = [ - "artist", - "album", - "albumartist", - "year", - "disctotal", - "mb_albumid", - "label", - "barcode", - "catalognum", - "country", - "media", - "albumdisambig", - ] - items = [Item(**{f: f"{f}_{i or 1}" for f in fields}) for i in range(5)] - likelies, _ = match.current_metadata(items) - for f in fields: - if isinstance(likelies[f], int): - assert likelies[f] == 0 - else: - assert likelies[f] == f"{f}_1" - - -def _make_item(title, track, artist="some artist"): - return Item( - title=title, - track=track, - artist=artist, - album="some album", - length=1, - mb_trackid="", - mb_albumid="", - mb_artistid="", - ) - - -def _make_trackinfo(): - return [ - TrackInfo( - title="one", track_id=None, artist="some artist", length=1, index=1 - ), - TrackInfo( - title="two", track_id=None, artist="some artist", length=1, index=2 - ), - TrackInfo( - title="three", - track_id=None, - artist="some artist", - length=1, - index=3, - ), - ] - - -def _clear_weights(): - """Hack around the lazy descriptor used to cache weights for - Distance calculations. - """ - Distance.__dict__["_weights"].cache = {} - - -class DistanceTest(BeetsTestCase): - def tearDown(self): - super().tearDown() - _clear_weights() - - def test_add(self): - dist = Distance() - dist.add("add", 1.0) - assert dist._penalties == {"add": [1.0]} - - def test_add_equality(self): - dist = Distance() - dist.add_equality("equality", "ghi", ["abc", "def", "ghi"]) - assert dist._penalties["equality"] == [0.0] - - dist.add_equality("equality", "xyz", ["abc", "def", "ghi"]) - assert dist._penalties["equality"] == [0.0, 1.0] - - dist.add_equality("equality", "abc", re.compile(r"ABC", re.I)) - assert dist._penalties["equality"] == [0.0, 1.0, 0.0] - - def test_add_expr(self): - dist = Distance() - dist.add_expr("expr", True) - assert dist._penalties["expr"] == [1.0] - - dist.add_expr("expr", False) - assert dist._penalties["expr"] == [1.0, 0.0] - - def test_add_number(self): - dist = Distance() - # Add a full penalty for each number of difference between two numbers. - - dist.add_number("number", 1, 1) - assert dist._penalties["number"] == [0.0] - - dist.add_number("number", 1, 2) - assert dist._penalties["number"] == [0.0, 1.0] - - dist.add_number("number", 2, 1) - assert dist._penalties["number"] == [0.0, 1.0, 1.0] - - dist.add_number("number", -1, 2) - assert dist._penalties["number"] == [0.0, 1.0, 1.0, 1.0, 1.0, 1.0] - - def test_add_priority(self): - dist = Distance() - dist.add_priority("priority", "abc", "abc") - assert dist._penalties["priority"] == [0.0] - - dist.add_priority("priority", "def", ["abc", "def"]) - assert dist._penalties["priority"] == [0.0, 0.5] - - dist.add_priority( - "priority", "gh", ["ab", "cd", "ef", re.compile("GH", re.I)] - ) - assert dist._penalties["priority"] == [0.0, 0.5, 0.75] - - dist.add_priority("priority", "xyz", ["abc", "def"]) - assert dist._penalties["priority"] == [0.0, 0.5, 0.75, 1.0] - - def test_add_ratio(self): - dist = Distance() - dist.add_ratio("ratio", 25, 100) - assert dist._penalties["ratio"] == [0.25] - - dist.add_ratio("ratio", 10, 5) - assert dist._penalties["ratio"] == [0.25, 1.0] - - dist.add_ratio("ratio", -5, 5) - assert dist._penalties["ratio"] == [0.25, 1.0, 0.0] - - dist.add_ratio("ratio", 5, 0) - assert dist._penalties["ratio"] == [0.25, 1.0, 0.0, 0.0] - - def test_add_string(self): - dist = Distance() - sdist = string_dist("abc", "bcd") - dist.add_string("string", "abc", "bcd") - assert dist._penalties["string"] == [sdist] - assert dist._penalties["string"] != [0] - - def test_add_string_none(self): - dist = Distance() - dist.add_string("string", None, "string") - assert dist._penalties["string"] == [1] - - def test_add_string_both_none(self): - dist = Distance() - dist.add_string("string", None, None) - assert dist._penalties["string"] == [0] - - def test_distance(self): - config["match"]["distance_weights"]["album"] = 2.0 - config["match"]["distance_weights"]["medium"] = 1.0 - _clear_weights() - - dist = Distance() - dist.add("album", 0.5) - dist.add("media", 0.25) - dist.add("media", 0.75) - assert dist.distance == 0.5 - - # __getitem__() - assert dist["album"] == 0.25 - assert dist["media"] == 0.25 - - def test_max_distance(self): - config["match"]["distance_weights"]["album"] = 3.0 - config["match"]["distance_weights"]["medium"] = 1.0 - _clear_weights() - - dist = Distance() - dist.add("album", 0.5) - dist.add("medium", 0.0) - dist.add("medium", 0.0) - assert dist.max_distance == 5.0 - - def test_operators(self): - config["match"]["distance_weights"]["source"] = 1.0 - config["match"]["distance_weights"]["album"] = 2.0 - config["match"]["distance_weights"]["medium"] = 1.0 - _clear_weights() - - dist = Distance() - dist.add("source", 0.0) - dist.add("album", 0.5) - dist.add("medium", 0.25) - dist.add("medium", 0.75) - assert len(dist) == 2 - assert list(dist) == [("album", 0.2), ("medium", 0.2)] - assert dist == 0.4 - assert dist < 1.0 - assert dist > 0.0 - assert dist - 0.4 == 0.0 - assert 0.4 - dist == 0.0 - assert float(dist) == 0.4 - - def test_raw_distance(self): - config["match"]["distance_weights"]["album"] = 3.0 - config["match"]["distance_weights"]["medium"] = 1.0 - _clear_weights() - - dist = Distance() - dist.add("album", 0.5) - dist.add("medium", 0.25) - dist.add("medium", 0.5) - assert dist.raw_distance == 2.25 - - def test_items(self): - config["match"]["distance_weights"]["album"] = 4.0 - config["match"]["distance_weights"]["medium"] = 2.0 - _clear_weights() - - dist = Distance() - dist.add("album", 0.1875) - dist.add("medium", 0.75) - assert dist.items() == [("medium", 0.25), ("album", 0.125)] - - # Sort by key if distance is equal. - dist = Distance() - dist.add("album", 0.375) - dist.add("medium", 0.75) - assert dist.items() == [("album", 0.25), ("medium", 0.25)] - - def test_update(self): - dist1 = Distance() - dist1.add("album", 0.5) - dist1.add("media", 1.0) - - dist2 = Distance() - dist2.add("album", 0.75) - dist2.add("album", 0.25) - dist2.add("media", 0.05) - - dist1.update(dist2) - - assert dist1._penalties == { - "album": [0.5, 0.75, 0.25], - "media": [1.0, 0.05], - } - - -class TrackDistanceTest(BeetsTestCase): - def test_identical_tracks(self): - item = _make_item("one", 1) - info = _make_trackinfo()[0] - dist = match.track_distance(item, info, incl_artist=True) - assert dist == 0.0 - - def test_different_title(self): - item = _make_item("foo", 1) - info = _make_trackinfo()[0] - dist = match.track_distance(item, info, incl_artist=True) - assert dist != 0.0 - - def test_different_artist(self): - item = _make_item("one", 1) - item.artist = "foo" - info = _make_trackinfo()[0] - dist = match.track_distance(item, info, incl_artist=True) - assert dist != 0.0 - - def test_various_artists_tolerated(self): - item = _make_item("one", 1) - item.artist = "Various Artists" - info = _make_trackinfo()[0] - dist = match.track_distance(item, info, incl_artist=True) - assert dist == 0.0 - - -class AlbumDistanceTest(BeetsTestCase): - def _mapping(self, items, info): - out = {} - for i, t in zip(items, info.tracks): - out[i] = t - return out - - def _dist(self, items, info): - return match.distance(items, info, self._mapping(items, info)) - - def test_identical_albums(self): - items = [] - items.append(_make_item("one", 1)) - items.append(_make_item("two", 2)) - items.append(_make_item("three", 3)) - info = AlbumInfo( - artist="some artist", - album="some album", - tracks=_make_trackinfo(), - va=False, - ) - assert self._dist(items, info) == 0 - - def test_incomplete_album(self): - items = [] - items.append(_make_item("one", 1)) - items.append(_make_item("three", 3)) - info = AlbumInfo( - artist="some artist", - album="some album", - tracks=_make_trackinfo(), - va=False, - ) - dist = self._dist(items, info) - assert dist != 0 - # Make sure the distance is not too great - assert dist < 0.2 - - def test_global_artists_differ(self): - items = [] - items.append(_make_item("one", 1)) - items.append(_make_item("two", 2)) - items.append(_make_item("three", 3)) - info = AlbumInfo( - artist="someone else", - album="some album", - tracks=_make_trackinfo(), - va=False, - ) - assert self._dist(items, info) != 0 - - def test_comp_track_artists_match(self): - items = [] - items.append(_make_item("one", 1)) - items.append(_make_item("two", 2)) - items.append(_make_item("three", 3)) - info = AlbumInfo( - artist="should be ignored", - album="some album", - tracks=_make_trackinfo(), - va=True, - ) - assert self._dist(items, info) == 0 - - def test_comp_no_track_artists(self): - # Some VA releases don't have track artists (incomplete metadata). - items = [] - items.append(_make_item("one", 1)) - items.append(_make_item("two", 2)) - items.append(_make_item("three", 3)) - info = AlbumInfo( - artist="should be ignored", - album="some album", - tracks=_make_trackinfo(), - va=True, - ) - info.tracks[0].artist = None - info.tracks[1].artist = None - info.tracks[2].artist = None - assert self._dist(items, info) == 0 - - def test_comp_track_artists_do_not_match(self): - items = [] - items.append(_make_item("one", 1)) - items.append(_make_item("two", 2, "someone else")) - items.append(_make_item("three", 3)) - info = AlbumInfo( - artist="some artist", - album="some album", - tracks=_make_trackinfo(), - va=True, - ) - assert self._dist(items, info) != 0 - - def test_tracks_out_of_order(self): - items = [] - items.append(_make_item("one", 1)) - items.append(_make_item("three", 2)) - items.append(_make_item("two", 3)) - info = AlbumInfo( - artist="some artist", - album="some album", - tracks=_make_trackinfo(), - va=False, - ) - dist = self._dist(items, info) - assert 0 < dist < 0.2 - - def test_two_medium_release(self): - items = [] - items.append(_make_item("one", 1)) - items.append(_make_item("two", 2)) - items.append(_make_item("three", 3)) - info = AlbumInfo( - artist="some artist", - album="some album", - tracks=_make_trackinfo(), - va=False, - ) - info.tracks[0].medium_index = 1 - info.tracks[1].medium_index = 2 - info.tracks[2].medium_index = 1 - dist = self._dist(items, info) - assert dist == 0 - - def test_per_medium_track_numbers(self): - items = [] - items.append(_make_item("one", 1)) - items.append(_make_item("two", 2)) - items.append(_make_item("three", 1)) - info = AlbumInfo( - artist="some artist", - album="some album", - tracks=_make_trackinfo(), - va=False, - ) - info.tracks[0].medium_index = 1 - info.tracks[1].medium_index = 2 - info.tracks[2].medium_index = 1 - dist = self._dist(items, info) - assert dist == 0 - - -class TestAssignment(ConfigMixin): +class TestAssignment: A = "one" B = "two" C = "three" @pytest.fixture(autouse=True) - def _setup_config(self): - self.config["match"]["track_length_grace"] = 10 - self.config["match"]["track_length_max"] = 30 + def config(self, config): + config["match"]["track_length_grace"] = 10 + config["match"]["track_length_max"] = 30 @pytest.mark.parametrize( # 'expected' is a tuple of expected (mapping, extra_items, extra_tracks) @@ -531,10 +55,12 @@ class TestAssignment(ConfigMixin): items = [Item(title=title) for title in item_titles] tracks = [TrackInfo(title=title) for title in track_titles] - mapping, extra_items, extra_tracks = match.assign_items(items, tracks) + item_info_pairs, extra_items, extra_tracks = match.assign_items( + items, tracks + ) assert ( - {i.title: t.title for i, t in mapping.items()}, + {i.title: t.title for i, t in item_info_pairs}, [i.title for i in extra_items], [t.title for t in extra_tracks], ) == (expected_mapping, expected_extra_items, expected_extra_tracks) @@ -581,7 +107,7 @@ class TestAssignment(ConfigMixin): trackinfo.append(info(11, "Beloved One", 243.733)) trackinfo.append(info(12, "In the Lord's Arms", 186.13300000000001)) - expected = dict(zip(items, trackinfo)), [], [] + expected = list(zip(items, trackinfo)), [], [] assert match.assign_items(items, trackinfo) == expected @@ -589,12 +115,10 @@ class TestAssignment(ConfigMixin): class ApplyTestUtil: def _apply(self, info=None, per_disc_numbering=False, artist_credit=False): info = info or self.info - mapping = {} - for i, t in zip(self.items, info.tracks): - mapping[i] = t + item_info_pairs = list(zip(self.items, info.tracks)) config["per_disc_numbering"] = per_disc_numbering config["artist_credit"] = artist_credit - autotag.apply_metadata(info, mapping) + autotag.apply_metadata(info, item_info_pairs) class ApplyTest(BeetsTestCase, ApplyTestUtil): @@ -920,82 +444,6 @@ class ApplyCompilationTest(BeetsTestCase, ApplyTestUtil): assert self.items[1].comp -class StringDistanceTest(unittest.TestCase): - def test_equal_strings(self): - dist = string_dist("Some String", "Some String") - assert dist == 0.0 - - def test_different_strings(self): - dist = string_dist("Some String", "Totally Different") - assert dist != 0.0 - - def test_punctuation_ignored(self): - dist = string_dist("Some String", "Some.String!") - assert dist == 0.0 - - def test_case_ignored(self): - dist = string_dist("Some String", "sOME sTring") - assert dist == 0.0 - - def test_leading_the_has_lower_weight(self): - dist1 = string_dist("XXX Band Name", "Band Name") - dist2 = string_dist("The Band Name", "Band Name") - assert dist2 < dist1 - - def test_parens_have_lower_weight(self): - dist1 = string_dist("One .Two.", "One") - dist2 = string_dist("One (Two)", "One") - assert dist2 < dist1 - - def test_brackets_have_lower_weight(self): - dist1 = string_dist("One .Two.", "One") - dist2 = string_dist("One [Two]", "One") - assert dist2 < dist1 - - def test_ep_label_has_zero_weight(self): - dist = string_dist("My Song (EP)", "My Song") - assert dist == 0.0 - - def test_featured_has_lower_weight(self): - dist1 = string_dist("My Song blah Someone", "My Song") - dist2 = string_dist("My Song feat Someone", "My Song") - assert dist2 < dist1 - - def test_postfix_the(self): - dist = string_dist("The Song Title", "Song Title, The") - assert dist == 0.0 - - def test_postfix_a(self): - dist = string_dist("A Song Title", "Song Title, A") - assert dist == 0.0 - - def test_postfix_an(self): - dist = string_dist("An Album Title", "Album Title, An") - assert dist == 0.0 - - def test_empty_strings(self): - dist = string_dist("", "") - assert dist == 0.0 - - def test_solo_pattern(self): - # Just make sure these don't crash. - string_dist("The ", "") - string_dist("(EP)", "(EP)") - string_dist(", An", "") - - def test_heuristic_does_not_harm_distance(self): - dist = string_dist("Untitled", "[Untitled]") - assert dist == 0.0 - - def test_ampersand_expansion(self): - dist = string_dist("And", "&") - assert dist == 0.0 - - def test_accented_characters(self): - dist = string_dist("\xe9\xe1\xf1", "ean") - assert dist == 0.0 - - @pytest.mark.parametrize( "single_field,list_field", [ diff --git a/test/test_datequery.py b/test/test_datequery.py index 9c968e998..d73fca45f 100644 --- a/test/test_datequery.py +++ b/test/test_datequery.py @@ -29,122 +29,68 @@ from beets.dbcore.query import ( from beets.test.helper import ItemInDBTestCase -def _date(string): - return datetime.strptime(string, "%Y-%m-%dT%H:%M:%S") +class TestDateInterval: + now = datetime.now().replace(microsecond=0, second=0).isoformat() - -def _datepattern(datetimedate): - return datetimedate.strftime("%Y-%m-%dT%H:%M:%S") - - -class DateIntervalTest(unittest.TestCase): - def test_year_precision_intervals(self): - self.assertContains("2000..2001", "2000-01-01T00:00:00") - self.assertContains("2000..2001", "2001-06-20T14:15:16") - self.assertContains("2000..2001", "2001-12-31T23:59:59") - self.assertExcludes("2000..2001", "1999-12-31T23:59:59") - self.assertExcludes("2000..2001", "2002-01-01T00:00:00") - - self.assertContains("2000..", "2000-01-01T00:00:00") - self.assertContains("2000..", "2099-10-11T00:00:00") - self.assertExcludes("2000..", "1999-12-31T23:59:59") - - self.assertContains("..2001", "2001-12-31T23:59:59") - self.assertExcludes("..2001", "2002-01-01T00:00:00") - - self.assertContains("-1d..1d", _datepattern(datetime.now())) - self.assertExcludes("-2d..-1d", _datepattern(datetime.now())) - - def test_day_precision_intervals(self): - self.assertContains("2000-06-20..2000-06-20", "2000-06-20T00:00:00") - self.assertContains("2000-06-20..2000-06-20", "2000-06-20T10:20:30") - self.assertContains("2000-06-20..2000-06-20", "2000-06-20T23:59:59") - self.assertExcludes("2000-06-20..2000-06-20", "2000-06-19T23:59:59") - self.assertExcludes("2000-06-20..2000-06-20", "2000-06-21T00:00:00") - - def test_month_precision_intervals(self): - self.assertContains("1999-12..2000-02", "1999-12-01T00:00:00") - self.assertContains("1999-12..2000-02", "2000-02-15T05:06:07") - self.assertContains("1999-12..2000-02", "2000-02-29T23:59:59") - self.assertExcludes("1999-12..2000-02", "1999-11-30T23:59:59") - self.assertExcludes("1999-12..2000-02", "2000-03-01T00:00:00") - - def test_hour_precision_intervals(self): - # test with 'T' separator - self.assertExcludes( - "2000-01-01T12..2000-01-01T13", "2000-01-01T11:59:59" - ) - self.assertContains( - "2000-01-01T12..2000-01-01T13", "2000-01-01T12:00:00" - ) - self.assertContains( - "2000-01-01T12..2000-01-01T13", "2000-01-01T12:30:00" - ) - self.assertContains( - "2000-01-01T12..2000-01-01T13", "2000-01-01T13:30:00" - ) - self.assertContains( - "2000-01-01T12..2000-01-01T13", "2000-01-01T13:59:59" - ) - self.assertExcludes( - "2000-01-01T12..2000-01-01T13", "2000-01-01T14:00:00" - ) - self.assertExcludes( - "2000-01-01T12..2000-01-01T13", "2000-01-01T14:30:00" - ) - - # test non-range query - self.assertContains("2008-12-01T22", "2008-12-01T22:30:00") - self.assertExcludes("2008-12-01T22", "2008-12-01T23:30:00") - - def test_minute_precision_intervals(self): - self.assertExcludes( - "2000-01-01T12:30..2000-01-01T12:31", "2000-01-01T12:29:59" - ) - self.assertContains( - "2000-01-01T12:30..2000-01-01T12:31", "2000-01-01T12:30:00" - ) - self.assertContains( - "2000-01-01T12:30..2000-01-01T12:31", "2000-01-01T12:30:30" - ) - self.assertContains( - "2000-01-01T12:30..2000-01-01T12:31", "2000-01-01T12:31:59" - ) - self.assertExcludes( - "2000-01-01T12:30..2000-01-01T12:31", "2000-01-01T12:32:00" - ) - - def test_second_precision_intervals(self): - self.assertExcludes( - "2000-01-01T12:30:50..2000-01-01T12:30:55", "2000-01-01T12:30:49" - ) - self.assertContains( - "2000-01-01T12:30:50..2000-01-01T12:30:55", "2000-01-01T12:30:50" - ) - self.assertContains( - "2000-01-01T12:30:50..2000-01-01T12:30:55", "2000-01-01T12:30:55" - ) - self.assertExcludes( - "2000-01-01T12:30:50..2000-01-01T12:30:55", "2000-01-01T12:30:56" - ) - - def test_unbounded_endpoints(self): - self.assertContains("..", date=datetime.max) - self.assertContains("..", date=datetime.min) - self.assertContains("..", "1000-01-01T00:00:00") - - def assertContains(self, interval_pattern, date_pattern=None, date=None): - if date is None: - date = _date(date_pattern) - (start, end) = _parse_periods(interval_pattern) + @pytest.mark.parametrize( + "pattern, datestr, include", + [ + # year precision + ("2000..2001", "2000-01-01T00:00:00", True), + ("2000..2001", "2001-06-20T14:15:16", True), + ("2000..2001", "2001-12-31T23:59:59", True), + ("2000..2001", "1999-12-31T23:59:59", False), + ("2000..2001", "2002-01-01T00:00:00", False), + ("2000..", "2000-01-01T00:00:00", True), + ("2000..", "2099-10-11T00:00:00", True), + ("2000..", "1999-12-31T23:59:59", False), + ("..2001", "2001-12-31T23:59:59", True), + ("..2001", "2002-01-01T00:00:00", False), + ("-1d..1d", now, True), + ("-2d..-1d", now, False), + # month precision + ("2000-06-20..2000-06-20", "2000-06-20T00:00:00", True), + ("2000-06-20..2000-06-20", "2000-06-20T10:20:30", True), + ("2000-06-20..2000-06-20", "2000-06-20T23:59:59", True), + ("2000-06-20..2000-06-20", "2000-06-19T23:59:59", False), + ("2000-06-20..2000-06-20", "2000-06-21T00:00:00", False), + # day precision + ("1999-12..2000-02", "1999-12-01T00:00:00", True), + ("1999-12..2000-02", "2000-02-15T05:06:07", True), + ("1999-12..2000-02", "2000-02-29T23:59:59", True), + ("1999-12..2000-02", "1999-11-30T23:59:59", False), + ("1999-12..2000-02", "2000-03-01T00:00:00", False), + # hour precision with 'T' separator + ("2000-01-01T12..2000-01-01T13", "2000-01-01T11:59:59", False), + ("2000-01-01T12..2000-01-01T13", "2000-01-01T12:00:00", True), + ("2000-01-01T12..2000-01-01T13", "2000-01-01T12:30:00", True), + ("2000-01-01T12..2000-01-01T13", "2000-01-01T13:30:00", True), + ("2000-01-01T12..2000-01-01T13", "2000-01-01T13:59:59", True), + ("2000-01-01T12..2000-01-01T13", "2000-01-01T14:00:00", False), + ("2000-01-01T12..2000-01-01T13", "2000-01-01T14:30:00", False), + # hour precision non-range query + ("2008-12-01T22", "2008-12-01T22:30:00", True), + ("2008-12-01T22", "2008-12-01T23:30:00", False), + # minute precision + ("2000-01-01T12:30..2000-01-01T12:31", "2000-01-01T12:29:59", False), + ("2000-01-01T12:30..2000-01-01T12:31", "2000-01-01T12:30:00", True), + ("2000-01-01T12:30..2000-01-01T12:31", "2000-01-01T12:30:30", True), + ("2000-01-01T12:30..2000-01-01T12:31", "2000-01-01T12:31:59", True), + ("2000-01-01T12:30..2000-01-01T12:31", "2000-01-01T12:32:00", False), + # second precision + ("2000-01-01T12:30:50..2000-01-01T12:30:55", "2000-01-01T12:30:49", False), + ("2000-01-01T12:30:50..2000-01-01T12:30:55", "2000-01-01T12:30:50", True), + ("2000-01-01T12:30:50..2000-01-01T12:30:55", "2000-01-01T12:30:55", True), + ("2000-01-01T12:30:50..2000-01-01T12:30:55", "2000-01-01T12:30:56", False), # unbounded # noqa: E501 + ("..", datetime.max.isoformat(), True), + ("..", datetime.min.isoformat(), True), + ("..", "1000-01-01T00:00:00", True), + ], + ) # fmt: skip + def test_intervals(self, pattern, datestr, include): + (start, end) = _parse_periods(pattern) interval = DateInterval.from_periods(start, end) - assert interval.contains(date) - - def assertExcludes(self, interval_pattern, date_pattern): - date = _date(date_pattern) - (start, end) = _parse_periods(interval_pattern) - interval = DateInterval.from_periods(start, end) - assert not interval.contains(date) + assert interval.contains(datetime.fromisoformat(datestr)) == include def _parsetime(s): @@ -240,37 +186,37 @@ class DateQueryTestRelativeMore(ItemInDBTestCase): def test_relative(self): for timespan in ["d", "w", "m", "y"]: - query = DateQuery("added", "-4" + timespan + "..+4" + timespan) + query = DateQuery("added", f"-4{timespan}..+4{timespan}") matched = self.lib.items(query) assert len(matched) == 1 def test_relative_fail(self): for timespan in ["d", "w", "m", "y"]: - query = DateQuery("added", "-2" + timespan + "..-1" + timespan) + query = DateQuery("added", f"-2{timespan}..-1{timespan}") matched = self.lib.items(query) assert len(matched) == 0 def test_start_relative(self): for timespan in ["d", "w", "m", "y"]: - query = DateQuery("added", "-4" + timespan + "..") + query = DateQuery("added", f"-4{timespan}..") matched = self.lib.items(query) assert len(matched) == 1 def test_start_relative_fail(self): for timespan in ["d", "w", "m", "y"]: - query = DateQuery("added", "4" + timespan + "..") + query = DateQuery("added", f"4{timespan}..") matched = self.lib.items(query) assert len(matched) == 0 def test_end_relative(self): for timespan in ["d", "w", "m", "y"]: - query = DateQuery("added", "..+4" + timespan) + query = DateQuery("added", f"..+4{timespan}") matched = self.lib.items(query) assert len(matched) == 1 def test_end_relative_fail(self): for timespan in ["d", "w", "m", "y"]: - query = DateQuery("added", "..-4" + timespan) + query = DateQuery("added", f"..-4{timespan}") matched = self.lib.items(query) assert len(matched) == 0 diff --git a/test/test_dbcore.py b/test/test_dbcore.py index a4bae97c9..653adf298 100644 --- a/test/test_dbcore.py +++ b/test/test_dbcore.py @@ -23,13 +23,22 @@ from tempfile import mkstemp import pytest from beets import dbcore +from beets.dbcore.db import DBCustomFunctionError from beets.library import LibModel from beets.test import _common +from beets.util import cached_classproperty # Fixture: concrete database and model classes. For migration tests, we # have multiple models with different numbers of fields. +@pytest.fixture +def db(model): + db = model(":memory:") + yield db + db._connection().close() + + class SortFixture(dbcore.query.FieldSort): pass @@ -53,15 +62,22 @@ class ModelFixture1(LibModel): "field_one": dbcore.types.INTEGER, "field_two": dbcore.types.STRING, } - _types = { - "some_float_field": dbcore.types.FLOAT, - } + _sorts = { "some_sort": SortFixture, } - _queries = { - "some_query": QueryFixture, - } + + @cached_classproperty + def _types(cls): + return { + "some_float_field": dbcore.types.FLOAT, + } + + @cached_classproperty + def _queries(cls): + return { + "some_query": QueryFixture, + } @classmethod def _getters(cls): @@ -73,7 +89,6 @@ class ModelFixture1(LibModel): class DatabaseFixture1(dbcore.Database): _models = (ModelFixture1,) - pass class ModelFixture2(ModelFixture1): @@ -86,7 +101,6 @@ class ModelFixture2(ModelFixture1): class DatabaseFixture2(dbcore.Database): _models = (ModelFixture2,) - pass class ModelFixture3(ModelFixture1): @@ -100,7 +114,6 @@ class ModelFixture3(ModelFixture1): class DatabaseFixture3(dbcore.Database): _models = (ModelFixture3,) - pass class ModelFixture4(ModelFixture1): @@ -115,7 +128,6 @@ class ModelFixture4(ModelFixture1): class DatabaseFixture4(dbcore.Database): _models = (ModelFixture4,) - pass class AnotherModelFixture(ModelFixture1): @@ -137,12 +149,10 @@ class ModelFixture5(ModelFixture1): class DatabaseFixture5(dbcore.Database): _models = (ModelFixture5,) - pass class DatabaseFixtureTwoModels(dbcore.Database): _models = (ModelFixture2, AnotherModelFixture) - pass class ModelFixtureWithGetters(dbcore.Model): @@ -248,7 +258,7 @@ class TransactionTest(unittest.TestCase): def test_query_no_increase_revision(self): old_rev = self.db.revision with self.db.transaction() as tx: - tx.query("PRAGMA table_info(%s)" % ModelFixture1._table) + tx.query(f"PRAGMA table_info({ModelFixture1._table})") assert self.db.revision == old_rev @@ -776,3 +786,25 @@ class ResultsIteratorTest(unittest.TestCase): self.db._fetch(ModelFixture1, dbcore.query.FalseQuery()).get() is None ) + + +class TestException: + @pytest.mark.parametrize("model", [DatabaseFixture1]) + @pytest.mark.filterwarnings( + "ignore: .*plz_raise.*: pytest.PytestUnraisableExceptionWarning" + ) + @pytest.mark.filterwarnings( + "error: .*: pytest.PytestUnraisableExceptionWarning" + ) + def test_custom_function_error(self, db: DatabaseFixture1): + def plz_raise(): + raise Exception("i haz raized") + + db._connection().create_function("plz_raise", 0, plz_raise) + + with db.transaction() as tx: + tx.mutate("insert into test (field_one) values (1)") + + with pytest.raises(DBCustomFunctionError): + with db.transaction() as tx: + tx.query("select * from test where plz_raise()") diff --git a/test/test_files.py b/test/test_files.py index 72b1610c0..d0d93987c 100644 --- a/test/test_files.py +++ b/test/test_files.py @@ -19,6 +19,7 @@ import shutil import stat import unittest from os.path import join +from pathlib import Path import pytest @@ -27,7 +28,7 @@ from beets import util from beets.test import _common from beets.test._common import item, touch from beets.test.helper import NEEDS_REFLINK, BeetsTestCase -from beets.util import MoveOperation, bytestring_path, syspath +from beets.util import MoveOperation, syspath class MoveTest(BeetsTestCase): @@ -35,11 +36,9 @@ class MoveTest(BeetsTestCase): super().setUp() # make a temporary file - self.path = join(self.temp_dir, b"temp.mp3") - shutil.copy( - syspath(join(_common.RSRC, b"full.mp3")), - syspath(self.path), - ) + self.temp_music_file_name = "temp.mp3" + self.path = self.temp_dir_path / self.temp_music_file_name + shutil.copy(self.resource_path, self.path) # add it to a temporary library self.i = beets.library.Item.from_path(self.path) @@ -52,57 +51,57 @@ class MoveTest(BeetsTestCase): self.i.artist = "one" self.i.album = "two" self.i.title = "three" - self.dest = join(self.libdir, b"one", b"two", b"three.mp3") + self.dest = self.lib_path / "one" / "two" / "three.mp3" - self.otherdir = join(self.temp_dir, b"testotherdir") + self.otherdir = self.temp_dir_path / "testotherdir" def test_move_arrives(self): self.i.move() - self.assertExists(self.dest) + assert self.dest.exists() def test_move_to_custom_dir(self): - self.i.move(basedir=self.otherdir) - self.assertExists(join(self.otherdir, b"one", b"two", b"three.mp3")) + self.i.move(basedir=os.fsencode(self.otherdir)) + assert (self.otherdir / "one" / "two" / "three.mp3").exists() def test_move_departs(self): self.i.move() - self.assertNotExists(self.path) + assert not self.path.exists() def test_move_in_lib_prunes_empty_dir(self): self.i.move() - old_path = self.i.path - self.assertExists(old_path) + old_path = self.i.filepath + assert old_path.exists() self.i.artist = "newArtist" self.i.move() - self.assertNotExists(old_path) - self.assertNotExists(os.path.dirname(old_path)) + assert not old_path.exists() + assert not old_path.parent.exists() def test_copy_arrives(self): self.i.move(operation=MoveOperation.COPY) - self.assertExists(self.dest) + assert self.dest.exists() def test_copy_does_not_depart(self): self.i.move(operation=MoveOperation.COPY) - self.assertExists(self.path) + assert self.path.exists() def test_reflink_arrives(self): self.i.move(operation=MoveOperation.REFLINK_AUTO) - self.assertExists(self.dest) + assert self.dest.exists() def test_reflink_does_not_depart(self): self.i.move(operation=MoveOperation.REFLINK_AUTO) - self.assertExists(self.path) + assert self.path.exists() @NEEDS_REFLINK def test_force_reflink_arrives(self): self.i.move(operation=MoveOperation.REFLINK) - self.assertExists(self.dest) + assert self.dest.exists() @NEEDS_REFLINK def test_force_reflink_does_not_depart(self): self.i.move(operation=MoveOperation.REFLINK) - self.assertExists(self.path) + assert self.path.exists() def test_move_changes_path(self): self.i.move() @@ -164,14 +163,14 @@ class MoveTest(BeetsTestCase): @unittest.skipUnless(_common.HAVE_SYMLINK, "need symlinks") def test_link_arrives(self): self.i.move(operation=MoveOperation.LINK) - self.assertExists(self.dest) + assert self.dest.exists() assert os.path.islink(syspath(self.dest)) - assert bytestring_path(os.readlink(syspath(self.dest))) == self.path + assert self.dest.resolve() == self.path @unittest.skipUnless(_common.HAVE_SYMLINK, "need symlinks") def test_link_does_not_depart(self): self.i.move(operation=MoveOperation.LINK) - self.assertExists(self.path) + assert self.path.exists() @unittest.skipUnless(_common.HAVE_SYMLINK, "need symlinks") def test_link_changes_path(self): @@ -181,7 +180,7 @@ class MoveTest(BeetsTestCase): @unittest.skipUnless(_common.HAVE_HARDLINK, "need hardlinks") def test_hardlink_arrives(self): self.i.move(operation=MoveOperation.HARDLINK) - self.assertExists(self.dest) + assert self.dest.exists() s1 = os.stat(syspath(self.path)) s2 = os.stat(syspath(self.dest)) assert (s1[stat.ST_INO], s1[stat.ST_DEV]) == ( @@ -192,15 +191,30 @@ class MoveTest(BeetsTestCase): @unittest.skipUnless(_common.HAVE_HARDLINK, "need hardlinks") def test_hardlink_does_not_depart(self): self.i.move(operation=MoveOperation.HARDLINK) - self.assertExists(self.path) + assert self.path.exists() @unittest.skipUnless(_common.HAVE_HARDLINK, "need hardlinks") def test_hardlink_changes_path(self): self.i.move(operation=MoveOperation.HARDLINK) assert self.i.path == util.normpath(self.dest) + @unittest.skipUnless(_common.HAVE_HARDLINK, "need hardlinks") + def test_hardlink_from_symlink(self): + link_path = join(self.temp_dir, b"temp_link.mp3") + link_source = join("./", self.temp_music_file_name) + os.symlink(syspath(link_source), syspath(link_path)) + self.i.path = link_path + self.i.move(operation=MoveOperation.HARDLINK) -class HelperTest(BeetsTestCase): + s1 = os.stat(syspath(self.path)) + s2 = os.stat(syspath(self.dest)) + assert (s1[stat.ST_INO], s1[stat.ST_DEV]) == ( + s2[stat.ST_INO], + s2[stat.ST_DEV], + ) + + +class HelperTest(unittest.TestCase): def test_ancestry_works_on_file(self): p = "/a/b/c" a = ["/", "/a", "/a/b"] @@ -264,24 +278,24 @@ class AlbumFileTest(BeetsTestCase): assert b"newAlbumName" in self.i.path def test_albuminfo_move_moves_file(self): - oldpath = self.i.path + oldpath = self.i.filepath self.ai.album = "newAlbumName" self.ai.move() self.ai.store() self.i.load() - self.assertNotExists(oldpath) - self.assertExists(self.i.path) + assert not oldpath.exists() + assert self.i.filepath.exists() def test_albuminfo_move_copies_file(self): - oldpath = self.i.path + oldpath = self.i.filepath self.ai.album = "newAlbumName" self.ai.move(operation=MoveOperation.COPY) self.ai.store() self.i.load() - self.assertExists(oldpath) - self.assertExists(self.i.path) + assert oldpath.exists() + assert self.i.filepath.exists() @NEEDS_REFLINK def test_albuminfo_move_reflinks_file(self): @@ -314,29 +328,30 @@ class ArtFileTest(BeetsTestCase): # Make an album. self.ai = self.lib.add_album((self.i,)) # Make an art file too. - self.art = self.lib.get_album(self.i).art_destination("something.jpg") - touch(self.art) - self.ai.artpath = self.art + art_bytes = self.lib.get_album(self.i).art_destination("something.jpg") + self.art = Path(os.fsdecode(art_bytes)) + self.art.touch() + self.ai.artpath = art_bytes self.ai.store() # Alternate destination dir. self.otherdir = os.path.join(self.temp_dir, b"testotherdir") def test_art_deleted_when_items_deleted(self): - self.assertExists(self.art) + assert self.art.exists() self.ai.remove(True) - self.assertNotExists(self.art) + assert not self.art.exists() def test_art_moves_with_album(self): - self.assertExists(self.art) + assert self.art.exists() oldpath = self.i.path self.ai.album = "newAlbum" self.ai.move() self.i.load() assert self.i.path != oldpath - self.assertNotExists(self.art) + assert not self.art.exists() newart = self.lib.get_album(self.i).art_destination(self.art) - self.assertExists(newart) + assert Path(os.fsdecode(newart)).exists() def test_art_moves_with_album_to_custom_dir(self): # Move the album to another directory. @@ -345,10 +360,10 @@ class ArtFileTest(BeetsTestCase): self.i.load() # Art should be in new directory. - self.assertNotExists(self.art) - newart = self.lib.get_album(self.i).artpath - self.assertExists(newart) - assert b"testotherdir" in newart + assert not self.art.exists() + newart = self.lib.get_album(self.i).art_filepath + assert newart.exists() + assert "testotherdir" in str(newart) def test_setart_copies_image(self): util.remove(self.art) @@ -363,7 +378,7 @@ class ArtFileTest(BeetsTestCase): assert ai.artpath is None ai.set_art(newart) - self.assertExists(ai.artpath) + assert ai.art_filepath.exists() def test_setart_to_existing_art_works(self): util.remove(self.art) @@ -380,7 +395,7 @@ class ArtFileTest(BeetsTestCase): # Set the art again. ai.set_art(ai.artpath) - self.assertExists(ai.artpath) + assert ai.art_filepath.exists() def test_setart_to_existing_but_unset_art_works(self): newart = os.path.join(self.libdir, b"newart.jpg") @@ -397,7 +412,7 @@ class ArtFileTest(BeetsTestCase): # Set the art again. ai.set_art(artdest) - self.assertExists(ai.artpath) + assert ai.art_filepath.exists() def test_setart_to_conflicting_file_gets_new_path(self): newart = os.path.join(self.libdir, b"newart.jpg") @@ -442,34 +457,34 @@ class ArtFileTest(BeetsTestCase): os.chmod(syspath(ai.artpath), 0o777) def test_move_last_file_moves_albumart(self): - oldartpath = self.lib.albums()[0].artpath - self.assertExists(oldartpath) + oldartpath = self.lib.albums()[0].art_filepath + assert oldartpath.exists() self.ai.album = "different_album" self.ai.store() self.ai.items()[0].move() - artpath = self.lib.albums()[0].artpath - assert b"different_album" in artpath - self.assertExists(artpath) - self.assertNotExists(oldartpath) + artpath = self.lib.albums()[0].art_filepath + assert "different_album" in str(artpath) + assert artpath.exists() + assert not oldartpath.exists() def test_move_not_last_file_does_not_move_albumart(self): i2 = item() i2.albumid = self.ai.id self.lib.add(i2) - oldartpath = self.lib.albums()[0].artpath - self.assertExists(oldartpath) + oldartpath = self.lib.albums()[0].art_filepath + assert oldartpath.exists() self.i.album = "different_album" self.i.album_id = None # detach from album self.i.move() - artpath = self.lib.albums()[0].artpath - assert b"different_album" not in artpath + artpath = self.lib.albums()[0].art_filepath + assert "different_album" not in str(artpath) assert artpath == oldartpath - self.assertExists(oldartpath) + assert oldartpath.exists() class RemoveTest(BeetsTestCase): @@ -486,37 +501,32 @@ class RemoveTest(BeetsTestCase): self.ai = self.lib.add_album((self.i,)) def test_removing_last_item_prunes_empty_dir(self): - parent = os.path.dirname(self.i.path) - self.assertExists(parent) + assert self.i.filepath.parent.exists() self.i.remove(True) - self.assertNotExists(parent) + assert not self.i.filepath.parent.exists() def test_removing_last_item_preserves_nonempty_dir(self): - parent = os.path.dirname(self.i.path) - touch(os.path.join(parent, b"dummy.txt")) + (self.i.filepath.parent / "dummy.txt").touch() self.i.remove(True) - self.assertExists(parent) + assert self.i.filepath.parent.exists() def test_removing_last_item_prunes_dir_with_blacklisted_file(self): - parent = os.path.dirname(self.i.path) - touch(os.path.join(parent, b".DS_Store")) + (self.i.filepath.parent / ".DS_Store").touch() self.i.remove(True) - self.assertNotExists(parent) + assert not self.i.filepath.parent.exists() def test_removing_without_delete_leaves_file(self): - path = self.i.path self.i.remove(False) - self.assertExists(path) + assert self.i.filepath.parent.exists() def test_removing_last_item_preserves_library_dir(self): self.i.remove(True) - self.assertExists(self.libdir) + assert self.lib_path.exists() def test_removing_item_outside_of_library_deletes_nothing(self): self.lib.directory = os.path.join(self.temp_dir, b"xxx") - parent = os.path.dirname(self.i.path) self.i.remove(True) - self.assertExists(parent) + assert self.i.filepath.parent.exists() def test_removing_last_item_in_album_with_albumart_prunes_dir(self): artfile = os.path.join(self.temp_dir, b"testart.jpg") @@ -524,55 +534,54 @@ class RemoveTest(BeetsTestCase): self.ai.set_art(artfile) self.ai.store() - parent = os.path.dirname(self.i.path) self.i.remove(True) - self.assertNotExists(parent) + assert not self.i.filepath.parent.exists() -# Tests that we can "delete" nonexistent files. -class SoftRemoveTest(BeetsTestCase): +class FilePathTestCase(BeetsTestCase): def setUp(self): super().setUp() - self.path = os.path.join(self.temp_dir, b"testfile") - touch(self.path) + self.path = self.temp_dir_path / "testfile" + self.path.touch() + +# Tests that we can "delete" nonexistent files. +class SoftRemoveTest(FilePathTestCase): def test_soft_remove_deletes_file(self): util.remove(self.path, True) - self.assertNotExists(self.path) + assert not self.path.exists() def test_soft_remove_silent_on_no_file(self): try: - util.remove(self.path + b"XXX", True) + util.remove(self.path / "XXX", True) except OSError: self.fail("OSError when removing path") -class SafeMoveCopyTest(BeetsTestCase): +class SafeMoveCopyTest(FilePathTestCase): def setUp(self): super().setUp() - self.path = os.path.join(self.temp_dir, b"testfile") - touch(self.path) - self.otherpath = os.path.join(self.temp_dir, b"testfile2") - touch(self.otherpath) - self.dest = self.path + b".dest" + self.otherpath = self.temp_dir_path / "testfile2" + self.otherpath.touch() + self.dest = Path(f"{self.path}.dest") def test_successful_move(self): util.move(self.path, self.dest) - self.assertExists(self.dest) - self.assertNotExists(self.path) + assert self.dest.exists() + assert not self.path.exists() def test_successful_copy(self): util.copy(self.path, self.dest) - self.assertExists(self.dest) - self.assertExists(self.path) + assert self.dest.exists() + assert self.path.exists() @NEEDS_REFLINK def test_successful_reflink(self): - util.reflink(self.path, self.dest) - self.assertExists(self.dest) - self.assertExists(self.path) + util.reflink(str(self.path), str(self.dest)) + assert self.dest.exists() + assert self.path.exists() def test_unsuccessful_move(self): with pytest.raises(util.FilesystemError): @@ -588,31 +597,31 @@ class SafeMoveCopyTest(BeetsTestCase): def test_self_move(self): util.move(self.path, self.path) - self.assertExists(self.path) + assert self.path.exists() def test_self_copy(self): util.copy(self.path, self.path) - self.assertExists(self.path) + assert self.path.exists() class PruneTest(BeetsTestCase): def setUp(self): super().setUp() - self.base = os.path.join(self.temp_dir, b"testdir") - os.mkdir(syspath(self.base)) - self.sub = os.path.join(self.base, b"subdir") - os.mkdir(syspath(self.sub)) + self.base = self.temp_dir_path / "testdir" + self.base.mkdir() + self.sub = self.base / "subdir" + self.sub.mkdir() def test_prune_existent_directory(self): util.prune_dirs(self.sub, self.base) - self.assertExists(self.base) - self.assertNotExists(self.sub) + assert self.base.exists() + assert not self.sub.exists() def test_prune_nonexistent_directory(self): - util.prune_dirs(os.path.join(self.sub, b"another"), self.base) - self.assertExists(self.base) - self.assertNotExists(self.sub) + util.prune_dirs(self.sub / "another", self.base) + assert self.base.exists() + assert not self.sub.exists() class WalkTest(BeetsTestCase): @@ -678,12 +687,9 @@ class UniquePathTest(BeetsTestCase): class MkDirAllTest(BeetsTestCase): - def test_parent_exists(self): - path = os.path.join(self.temp_dir, b"foo", b"bar", b"baz", b"qux.mp3") - util.mkdirall(path) - self.assertIsDir(os.path.join(self.temp_dir, b"foo", b"bar", b"baz")) - - def test_child_does_not_exist(self): - path = os.path.join(self.temp_dir, b"foo", b"bar", b"baz", b"qux.mp3") - util.mkdirall(path) - self.assertNotExists(path) + def test_mkdirall(self): + child = self.temp_dir_path / "foo" / "bar" / "baz" / "quz.mp3" + util.mkdirall(child) + assert not child.exists() + assert child.parent.exists() + assert child.parent.is_dir() diff --git a/test/test_hidden.py b/test/test_hidden.py index a7e6a1a10..bd974b1cb 100644 --- a/test/test_hidden.py +++ b/test/test_hidden.py @@ -22,7 +22,7 @@ import tempfile import unittest from beets import util -from beets.util import hidden +from beets.util import bytestring_path, hidden class HiddenFileTest(unittest.TestCase): @@ -44,7 +44,7 @@ class HiddenFileTest(unittest.TestCase): else: raise e - assert hidden.is_hidden(f.name) + assert hidden.is_hidden(bytestring_path(f.name)) def test_windows_hidden(self): if not sys.platform == "win32": diff --git a/test/test_importer.py b/test/test_importer.py index a28b646cf..c1768df3e 100644 --- a/test/test_importer.py +++ b/test/test_importer.py @@ -15,6 +15,8 @@ """Tests for the general importer functionality.""" +from __future__ import annotations + import os import re import shutil @@ -22,6 +24,7 @@ import stat import sys import unicodedata import unittest +from functools import cached_property from io import StringIO from pathlib import Path from tarfile import TarFile @@ -34,14 +37,16 @@ from mediafile import MediaFile from beets import config, importer, logging, util from beets.autotag import AlbumInfo, AlbumMatch, TrackInfo -from beets.importer import albums_in_dir +from beets.importer.tasks import albums_in_dir from beets.test import _common from beets.test.helper import ( NEEDS_REFLINK, AsIsImporterMixin, + AutotagImportTestCase, AutotagStub, BeetsTestCase, ImportTestCase, + IOMixin, PluginMixin, capture_log, has_program, @@ -49,192 +54,112 @@ from beets.test.helper import ( from beets.util import bytestring_path, displayable_path, syspath -class ScrubbedImportTest(AsIsImporterMixin, PluginMixin, ImportTestCase): - db_on_disk = True - plugin = "scrub" +class PathsMixin: + import_media: list[MediaFile] - def test_tags_not_scrubbed(self): - config["plugins"] = ["scrub"] - config["scrub"]["auto"] = False - config["import"]["write"] = True - for mediafile in self.import_media: - assert mediafile.artist == "Tag Artist" - assert mediafile.album == "Tag Album" - self.run_asis_importer() - for item in self.lib.items(): - imported_file = os.path.join(item.path) - imported_file = MediaFile(imported_file) - assert imported_file.artist == "Tag Artist" - assert imported_file.album == "Tag Album" + @cached_property + def track_import_path(self) -> Path: + return Path(self.import_media[0].path) - def test_tags_restored(self): - config["plugins"] = ["scrub"] - config["scrub"]["auto"] = True - config["import"]["write"] = True - for mediafile in self.import_media: - assert mediafile.artist == "Tag Artist" - assert mediafile.album == "Tag Album" - self.run_asis_importer() - for item in self.lib.items(): - imported_file = os.path.join(item.path) - imported_file = MediaFile(imported_file) - assert imported_file.artist == "Tag Artist" - assert imported_file.album == "Tag Album" + @cached_property + def album_path(self) -> Path: + return self.track_import_path.parent - def test_tags_not_restored(self): - config["plugins"] = ["scrub"] - config["scrub"]["auto"] = True - config["import"]["write"] = False - for mediafile in self.import_media: - assert mediafile.artist == "Tag Artist" - assert mediafile.album == "Tag Album" - self.run_asis_importer() - for item in self.lib.items(): - imported_file = os.path.join(item.path) - imported_file = MediaFile(imported_file) - assert imported_file.artist is None - assert imported_file.album is None + @cached_property + def track_lib_path(self): + return self.lib_path / "Tag Artist" / "Tag Album" / "Tag Track 1.mp3" @_common.slow_test() -class NonAutotaggedImportTest(AsIsImporterMixin, ImportTestCase): +class NonAutotaggedImportTest(PathsMixin, AsIsImporterMixin, ImportTestCase): db_on_disk = True def test_album_created_with_track_artist(self): self.run_asis_importer() + albums = self.lib.albums() assert len(albums) == 1 assert albums[0].albumartist == "Tag Artist" def test_import_copy_arrives(self): self.run_asis_importer() - for mediafile in self.import_media: - self.assert_file_in_lib( - b"Tag Artist", - b"Tag Album", - util.bytestring_path(f"{mediafile.title}.mp3"), - ) + + assert self.track_lib_path.exists() def test_threaded_import_copy_arrives(self): config["threaded"] = True self.run_asis_importer() - for mediafile in self.import_media: - self.assert_file_in_lib( - b"Tag Artist", - b"Tag Album", - util.bytestring_path(f"{mediafile.title}.mp3"), - ) + assert self.track_lib_path.exists() def test_import_with_move_deletes_import_files(self): - for mediafile in self.import_media: - self.assertExists(mediafile.path) - self.run_asis_importer(move=True) - for mediafile in self.import_media: - self.assertNotExists(mediafile.path) - - def test_import_with_move_prunes_directory_empty(self): - self.assertExists(os.path.join(self.import_dir, b"album")) - self.run_asis_importer(move=True) - self.assertNotExists(os.path.join(self.import_dir, b"album")) - - def test_import_with_move_prunes_with_extra_clutter(self): - self.touch(os.path.join(self.import_dir, b"album", b"alog.log")) + assert self.album_path.exists() + assert self.track_import_path.exists() + (self.album_path / "alog.log").touch() config["clutter"] = ["*.log"] - self.assertExists(os.path.join(self.import_dir, b"album")) self.run_asis_importer(move=True) - self.assertNotExists(os.path.join(self.import_dir, b"album")) + + assert not self.track_import_path.exists() + assert not self.album_path.exists() def test_threaded_import_move_arrives(self): self.run_asis_importer(move=True, threaded=True) - for mediafile in self.import_media: - self.assert_file_in_lib( - b"Tag Artist", - b"Tag Album", - util.bytestring_path(f"{mediafile.title}.mp3"), - ) - def test_threaded_import_move_deletes_import(self): - self.run_asis_importer(move=True, threaded=True) - for mediafile in self.import_media: - self.assertNotExists(mediafile.path) + assert self.track_lib_path.exists() + assert not self.track_import_path.exists() def test_import_without_delete_retains_files(self): self.run_asis_importer(delete=False) - for mediafile in self.import_media: - self.assertExists(mediafile.path) + + assert self.track_import_path.exists() def test_import_with_delete_removes_files(self): self.run_asis_importer(delete=True) - for mediafile in self.import_media: - self.assertNotExists(mediafile.path) - def test_import_with_delete_prunes_directory_empty(self): - self.assertExists(os.path.join(self.import_dir, b"album")) - self.run_asis_importer(delete=True) - self.assertNotExists(os.path.join(self.import_dir, b"album")) + assert not self.album_path.exists() + assert not self.track_import_path.exists() + + def test_album_mb_albumartistids(self): + self.run_asis_importer() + album = self.lib.albums()[0] + assert album.mb_albumartistids == album.items()[0].mb_albumartistids @unittest.skipUnless(_common.HAVE_SYMLINK, "need symlinks") def test_import_link_arrives(self): self.run_asis_importer(link=True) - for mediafile in self.import_media: - filename = os.path.join( - self.libdir, - b"Tag Artist", - b"Tag Album", - util.bytestring_path(f"{mediafile.title}.mp3"), - ) - self.assertExists(filename) - assert os.path.islink(syspath(filename)) - self.assert_equal_path( - util.bytestring_path(os.readlink(syspath(filename))), - mediafile.path, - ) + + assert self.track_lib_path.exists() + assert self.track_lib_path.is_symlink() + assert self.track_lib_path.resolve() == self.track_import_path @unittest.skipUnless(_common.HAVE_HARDLINK, "need hardlinks") def test_import_hardlink_arrives(self): self.run_asis_importer(hardlink=True) - for mediafile in self.import_media: - filename = os.path.join( - self.libdir, - b"Tag Artist", - b"Tag Album", - util.bytestring_path(f"{mediafile.title}.mp3"), - ) - self.assertExists(filename) - s1 = os.stat(syspath(mediafile.path)) - s2 = os.stat(syspath(filename)) - assert (s1[stat.ST_INO], s1[stat.ST_DEV]) == ( - s2[stat.ST_INO], - s2[stat.ST_DEV], - ) + + assert self.track_lib_path.exists() + media_stat = self.track_import_path.stat() + lib_media_stat = self.track_lib_path.stat() + assert media_stat[stat.ST_INO] == lib_media_stat[stat.ST_INO] + assert media_stat[stat.ST_DEV] == lib_media_stat[stat.ST_DEV] @NEEDS_REFLINK def test_import_reflink_arrives(self): # Detecting reflinks is currently tricky due to various fs # implementations, we'll just check the file exists. self.run_asis_importer(reflink=True) - for mediafile in self.import_media: - self.assert_file_in_lib( - b"Tag Artist", - b"Tag Album", - util.bytestring_path(f"{mediafile.title}.mp3"), - ) + + assert self.track_lib_path.exists() def test_import_reflink_auto_arrives(self): # Should pass regardless of reflink support due to fallback. self.run_asis_importer(reflink="auto") - for mediafile in self.import_media: - self.assert_file_in_lib( - b"Tag Artist", - b"Tag Album", - util.bytestring_path(f"{mediafile.title}.mp3"), - ) + + assert self.track_lib_path.exists() def create_archive(session): - (handle, path) = mkstemp(dir=os.fsdecode(session.temp_dir)) + handle, path = mkstemp(dir=session.temp_dir_path) path = bytestring_path(path) os.close(handle) archive = ZipFile(os.fsdecode(path), mode="w") @@ -259,10 +184,10 @@ class RmTempTest(BeetsTestCase): zip_path = create_archive(self) archive_task = importer.ArchiveImportTask(zip_path) archive_task.extract() - tmp_path = archive_task.toppath - self.assertExists(tmp_path) + tmp_path = Path(os.fsdecode(archive_task.toppath)) + assert tmp_path.exists() archive_task.finalize(self) - self.assertNotExists(tmp_path) + assert not tmp_path.exists() class ImportZipTest(AsIsImporterMixin, ImportTestCase): @@ -306,7 +231,7 @@ class ImportPasswordRarTest(ImportZipTest): return os.path.join(_common.RSRC, b"password.rar") -class ImportSingletonTest(ImportTestCase): +class ImportSingletonTest(AutotagImportTestCase): """Test ``APPLY`` and ``ASIS`` choices for an import session with singletons config set to True. """ @@ -315,62 +240,37 @@ class ImportSingletonTest(ImportTestCase): super().setUp() self.prepare_album_for_import(1) self.importer = self.setup_singleton_importer() - self.matcher = AutotagStub().install() - def tearDown(self): - super().tearDown() - self.matcher.restore() - - def test_apply_asis_adds_track(self): - assert self.lib.items().get() is None - - self.importer.add_choice(importer.action.ASIS) + def test_apply_asis_adds_only_singleton_track(self): + self.importer.add_choice(importer.Action.ASIS) self.importer.run() + + # album not added + assert not self.lib.albums() assert self.lib.items().get().title == "Tag Track 1" - - def test_apply_asis_does_not_add_album(self): - assert self.lib.albums().get() is None - - self.importer.add_choice(importer.action.ASIS) - self.importer.run() - assert self.lib.albums().get() is None - - def test_apply_asis_adds_singleton_path(self): - self.assert_lib_dir_empty() - - self.importer.add_choice(importer.action.ASIS) - self.importer.run() - self.assert_file_in_lib(b"singletons", b"Tag Track 1.mp3") + assert (self.lib_path / "singletons" / "Tag Track 1.mp3").exists() def test_apply_candidate_adds_track(self): - assert self.lib.items().get() is None - - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() + + assert not self.lib.albums() assert self.lib.items().get().title == "Applied Track 1" + assert (self.lib_path / "singletons" / "Applied Track 1.mp3").exists() - def test_apply_candidate_does_not_add_album(self): - self.importer.add_choice(importer.action.APPLY) + def test_skip_does_not_add_track(self): + self.importer.add_choice(importer.Action.SKIP) self.importer.run() - assert self.lib.albums().get() is None - def test_apply_candidate_adds_singleton_path(self): - self.assert_lib_dir_empty() + assert not self.lib.items() - self.importer.add_choice(importer.action.APPLY) - self.importer.run() - self.assert_file_in_lib(b"singletons", b"Applied Track 1.mp3") - - def test_skip_does_not_add_first_track(self): - self.importer.add_choice(importer.action.SKIP) - self.importer.run() - assert self.lib.items().get() is None - - def test_skip_adds_other_tracks(self): + def test_skip_first_add_second_asis(self): self.prepare_album_for_import(2) - self.importer.add_choice(importer.action.SKIP) - self.importer.add_choice(importer.action.ASIS) + + self.importer.add_choice(importer.Action.SKIP) + self.importer.add_choice(importer.Action.ASIS) self.importer.run() + assert len(self.lib.items()) == 1 def test_import_single_files(self): @@ -385,8 +285,8 @@ class ImportSingletonTest(ImportTestCase): self.setup_importer() self.importer.paths = import_files - self.importer.add_choice(importer.action.ASIS) - self.importer.add_choice(importer.action.ASIS) + self.importer.add_choice(importer.Action.ASIS) + self.importer.add_choice(importer.Action.ASIS) self.importer.run() assert len(self.lib.items()) == 2 @@ -406,7 +306,7 @@ class ImportSingletonTest(ImportTestCase): # As-is item import. assert self.lib.albums().get() is None - self.importer.add_choice(importer.action.ASIS) + self.importer.add_choice(importer.Action.ASIS) self.importer.run() for item in self.lib.items(): @@ -419,9 +319,9 @@ class ImportSingletonTest(ImportTestCase): item.remove() # Autotagged. - assert self.lib.albums().get() is None + assert not self.lib.albums() self.importer.clear_choices() - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() for item in self.lib.items(): @@ -432,62 +332,31 @@ class ImportSingletonTest(ImportTestCase): assert item.disc == disc -class ImportTest(ImportTestCase): +class ImportTest(PathsMixin, AutotagImportTestCase): """Test APPLY, ASIS and SKIP choices.""" def setUp(self): super().setUp() self.prepare_album_for_import(1) self.setup_importer() - self.matcher = AutotagStub().install() - self.matcher.matching = AutotagStub.IDENT - def tearDown(self): - super().tearDown() - self.matcher.restore() - - def test_apply_asis_adds_album(self): - assert self.lib.albums().get() is None - - self.importer.add_choice(importer.action.ASIS) + def test_asis_moves_album_and_track(self): + self.importer.add_choice(importer.Action.ASIS) self.importer.run() + assert self.lib.albums().get().album == "Tag Album" + item = self.lib.items().get() + assert item.title == "Tag Track 1" + assert item.filepath.exists() - def test_apply_asis_adds_tracks(self): - assert self.lib.items().get() is None - self.importer.add_choice(importer.action.ASIS) + def test_apply_moves_album_and_track(self): + self.importer.add_choice(importer.Action.APPLY) self.importer.run() - assert self.lib.items().get().title == "Tag Track 1" - def test_apply_asis_adds_album_path(self): - self.assert_lib_dir_empty() - - self.importer.add_choice(importer.action.ASIS) - self.importer.run() - self.assert_file_in_lib(b"Tag Artist", b"Tag Album", b"Tag Track 1.mp3") - - def test_apply_candidate_adds_album(self): - assert self.lib.albums().get() is None - - self.importer.add_choice(importer.action.APPLY) - self.importer.run() assert self.lib.albums().get().album == "Applied Album" - - def test_apply_candidate_adds_tracks(self): - assert self.lib.items().get() is None - - self.importer.add_choice(importer.action.APPLY) - self.importer.run() - assert self.lib.items().get().title == "Applied Track 1" - - def test_apply_candidate_adds_album_path(self): - self.assert_lib_dir_empty() - - self.importer.add_choice(importer.action.APPLY) - self.importer.run() - self.assert_file_in_lib( - b"Applied Artist", b"Applied Album", b"Applied Track 1.mp3" - ) + item = self.lib.items().get() + assert item.title == "Applied Track 1" + assert item.filepath.exists() def test_apply_from_scratch_removes_other_metadata(self): config["import"]["from_scratch"] = True @@ -496,14 +365,14 @@ class ImportTest(ImportTestCase): mediafile.genre = "Tag Genre" mediafile.save() - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() assert self.lib.items().get().genre == "" def test_apply_from_scratch_keeps_format(self): config["import"]["from_scratch"] = True - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() assert self.lib.items().get().format == "MP3" @@ -511,46 +380,46 @@ class ImportTest(ImportTestCase): config["import"]["from_scratch"] = True bitrate = 80000 - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() assert self.lib.items().get().bitrate == bitrate def test_apply_with_move_deletes_import(self): + assert self.track_import_path.exists() + config["import"]["move"] = True - - import_file = os.path.join(self.import_dir, b"album", b"track_1.mp3") - self.assertExists(import_file) - - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() - self.assertNotExists(import_file) + + assert not self.track_import_path.exists() def test_apply_with_delete_deletes_import(self): + assert self.track_import_path.exists() + config["import"]["delete"] = True - - import_file = os.path.join(self.import_dir, b"album", b"track_1.mp3") - self.assertExists(import_file) - - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() - self.assertNotExists(import_file) + + assert not self.track_import_path.exists() def test_skip_does_not_add_track(self): - self.importer.add_choice(importer.action.SKIP) + self.importer.add_choice(importer.Action.SKIP) self.importer.run() - assert self.lib.items().get() is None + + assert not self.lib.items() def test_skip_non_album_dirs(self): - self.assertIsDir(os.path.join(self.import_dir, b"album")) + assert (self.import_path / "album").exists() self.touch(b"cruft", dir=self.import_dir) - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() + assert len(self.lib.albums()) == 1 def test_unmatched_tracks_not_added(self): self.prepare_album_for_import(2) self.matcher.matching = self.matcher.MISSING - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() assert len(self.lib.items()) == 1 @@ -577,7 +446,7 @@ class ImportTest(ImportTestCase): def test_asis_no_data_source(self): assert self.lib.items().get() is None - self.importer.add_choice(importer.action.ASIS) + self.importer.add_choice(importer.Action.ASIS) self.importer.run() with pytest.raises(AttributeError): @@ -599,7 +468,7 @@ class ImportTest(ImportTestCase): # As-is album import. assert self.lib.albums().get() is None - self.importer.add_choice(importer.action.ASIS) + self.importer.add_choice(importer.Action.ASIS) self.importer.run() for album in self.lib.albums(): @@ -621,7 +490,7 @@ class ImportTest(ImportTestCase): # Autotagged. assert self.lib.albums().get() is None self.importer.clear_choices() - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() for album in self.lib.albums(): @@ -639,55 +508,42 @@ class ImportTest(ImportTestCase): assert item.disc == disc -class ImportTracksTest(ImportTestCase): +class ImportTracksTest(AutotagImportTestCase): """Test TRACKS and APPLY choice.""" def setUp(self): super().setUp() self.prepare_album_for_import(1) self.setup_importer() - self.matcher = AutotagStub().install() - - def tearDown(self): - super().tearDown() - self.matcher.restore() def test_apply_tracks_adds_singleton_track(self): - assert self.lib.items().get() is None - assert self.lib.albums().get() is None - - self.importer.add_choice(importer.action.TRACKS) - self.importer.add_choice(importer.action.APPLY) - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.TRACKS) + self.importer.add_choice(importer.Action.APPLY) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() + assert self.lib.items().get().title == "Applied Track 1" - assert self.lib.albums().get() is None + assert not self.lib.albums() def test_apply_tracks_adds_singleton_path(self): - self.assert_lib_dir_empty() - - self.importer.add_choice(importer.action.TRACKS) - self.importer.add_choice(importer.action.APPLY) - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.TRACKS) + self.importer.add_choice(importer.Action.APPLY) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() - self.assert_file_in_lib(b"singletons", b"Applied Track 1.mp3") + + assert (self.lib_path / "singletons" / "Applied Track 1.mp3").exists() -class ImportCompilationTest(ImportTestCase): +class ImportCompilationTest(AutotagImportTestCase): """Test ASIS import of a folder containing tracks with different artists.""" def setUp(self): super().setUp() self.prepare_album_for_import(3) self.setup_importer() - self.matcher = AutotagStub().install() - - def tearDown(self): - super().tearDown() - self.matcher.restore() def test_asis_homogenous_sets_albumartist(self): - self.importer.add_choice(importer.action.ASIS) + self.importer.add_choice(importer.Action.ASIS) self.importer.run() assert self.lib.albums().get().albumartist == "Tag Artist" for item in self.lib.items(): @@ -699,7 +555,7 @@ class ImportCompilationTest(ImportTestCase): self.import_media[1].artist = "Another Artist" self.import_media[1].save() - self.importer.add_choice(importer.action.ASIS) + self.importer.add_choice(importer.Action.ASIS) self.importer.run() assert self.lib.albums().get().albumartist == "Various Artists" for item in self.lib.items(): @@ -711,7 +567,7 @@ class ImportCompilationTest(ImportTestCase): self.import_media[1].artist = "Another Artist" self.import_media[1].save() - self.importer.add_choice(importer.action.ASIS) + self.importer.add_choice(importer.Action.ASIS) self.importer.run() for item in self.lib.items(): assert item.comp @@ -722,7 +578,7 @@ class ImportCompilationTest(ImportTestCase): self.import_media[1].artist = "Other Artist" self.import_media[1].save() - self.importer.add_choice(importer.action.ASIS) + self.importer.add_choice(importer.Action.ASIS) self.importer.run() assert self.lib.albums().get().albumartist == "Other Artist" for item in self.lib.items(): @@ -736,7 +592,7 @@ class ImportCompilationTest(ImportTestCase): mediafile.mb_albumartistid = "Album Artist ID" mediafile.save() - self.importer.add_choice(importer.action.ASIS) + self.importer.add_choice(importer.Action.ASIS) self.importer.run() assert self.lib.albums().get().albumartist == "Album Artist" assert self.lib.albums().get().mb_albumartistid == "Album Artist ID" @@ -755,7 +611,7 @@ class ImportCompilationTest(ImportTestCase): mediafile.mb_albumartistid = "Album Artist ID" mediafile.save() - self.importer.add_choice(importer.action.ASIS) + self.importer.add_choice(importer.Action.ASIS) self.importer.run() assert self.lib.albums().get().albumartist == "Album Artist" assert self.lib.albums().get().albumartists == [ @@ -783,13 +639,12 @@ class ImportCompilationTest(ImportTestCase): assert asserted_multi_artists_1 -class ImportExistingTest(ImportTestCase): +class ImportExistingTest(PathsMixin, AutotagImportTestCase): """Test importing files that are already in the library directory.""" def setUp(self): super().setUp() self.prepare_album_for_import(1) - self.matcher = AutotagStub().install() self.reimporter = self.setup_importer(import_dir=self.libdir) self.importer = self.setup_importer() @@ -798,60 +653,45 @@ class ImportExistingTest(ImportTestCase): super().tearDown() self.matcher.restore() - def test_does_not_duplicate_item(self): + @cached_property + def applied_track_path(self) -> Path: + return Path(str(self.track_lib_path).replace("Tag", "Applied")) + + def test_does_not_duplicate_item_nor_album(self): self.importer.run() assert len(self.lib.items()) == 1 - - self.reimporter.add_choice(importer.action.APPLY) - self.reimporter.run() - assert len(self.lib.items()) == 1 - - def test_does_not_duplicate_album(self): - self.importer.run() assert len(self.lib.albums()) == 1 - self.reimporter.add_choice(importer.action.APPLY) + self.reimporter.add_choice(importer.Action.APPLY) self.reimporter.run() + + assert len(self.lib.items()) == 1 assert len(self.lib.albums()) == 1 def test_does_not_duplicate_singleton_track(self): - self.importer.add_choice(importer.action.TRACKS) - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.TRACKS) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() assert len(self.lib.items()) == 1 - self.reimporter.add_choice(importer.action.TRACKS) - self.reimporter.add_choice(importer.action.APPLY) + self.reimporter.add_choice(importer.Action.TRACKS) + self.reimporter.add_choice(importer.Action.APPLY) self.reimporter.run() assert len(self.lib.items()) == 1 - def test_asis_updates_metadata(self): + def test_asis_updates_metadata_and_moves_file(self): self.importer.run() + medium = MediaFile(self.lib.items().get().path) medium.title = "New Title" medium.save() - self.reimporter.add_choice(importer.action.ASIS) + self.reimporter.add_choice(importer.Action.ASIS) self.reimporter.run() + assert self.lib.items().get().title == "New Title" - - def test_asis_updated_moves_file(self): - self.importer.run() - medium = MediaFile(self.lib.items().get().path) - medium.title = "New Title" - medium.save() - - old_path = os.path.join( - b"Applied Artist", b"Applied Album", b"Applied Track 1.mp3" - ) - self.assert_file_in_lib(old_path) - - self.reimporter.add_choice(importer.action.ASIS) - self.reimporter.run() - self.assert_file_in_lib( - b"Applied Artist", b"Applied Album", b"New Title.mp3" - ) - self.assert_file_not_in_lib(old_path) + assert not self.applied_track_path.exists() + assert self.applied_track_path.with_name("New Title.mp3").exists() def test_asis_updated_without_copy_does_not_move_file(self): self.importer.run() @@ -859,67 +699,38 @@ class ImportExistingTest(ImportTestCase): medium.title = "New Title" medium.save() - old_path = os.path.join( - b"Applied Artist", b"Applied Album", b"Applied Track 1.mp3" - ) - self.assert_file_in_lib(old_path) - config["import"]["copy"] = False - self.reimporter.add_choice(importer.action.ASIS) + self.reimporter.add_choice(importer.Action.ASIS) self.reimporter.run() - self.assert_file_not_in_lib( - b"Applied Artist", b"Applied Album", b"New Title.mp3" - ) - self.assert_file_in_lib(old_path) + + assert self.applied_track_path.exists() + assert not self.applied_track_path.with_name("New Title.mp3").exists() def test_outside_file_is_copied(self): config["import"]["copy"] = False self.importer.run() - self.assert_equal_path( - self.lib.items().get().path, self.import_media[0].path - ) + assert self.lib.items().get().filepath == self.track_import_path self.reimporter = self.setup_importer() - self.reimporter.add_choice(importer.action.APPLY) + self.reimporter.add_choice(importer.Action.APPLY) self.reimporter.run() - new_path = os.path.join( - b"Applied Artist", b"Applied Album", b"Applied Track 1.mp3" - ) - self.assert_file_in_lib(new_path) - self.assert_equal_path( - self.lib.items().get().path, os.path.join(self.libdir, new_path) - ) - - def test_outside_file_is_moved(self): - config["import"]["copy"] = False - self.importer.run() - self.assert_equal_path( - self.lib.items().get().path, self.import_media[0].path - ) - - self.reimporter = self.setup_importer(move=True) - self.reimporter.add_choice(importer.action.APPLY) - self.reimporter.run() - self.assertNotExists(self.import_media[0].path) + assert self.applied_track_path.exists() + assert self.lib.items().get().filepath == self.applied_track_path -class GroupAlbumsImportTest(ImportTestCase): +class GroupAlbumsImportTest(AutotagImportTestCase): + matching = AutotagStub.NONE + def setUp(self): super().setUp() self.prepare_album_for_import(3) - self.matcher = AutotagStub().install() - self.matcher.matching = AutotagStub.NONE self.setup_importer() # Split tracks into two albums and use both as-is - self.importer.add_choice(importer.action.ALBUMS) - self.importer.add_choice(importer.action.ASIS) - self.importer.add_choice(importer.action.ASIS) - - def tearDown(self): - super().tearDown() - self.matcher.restore() + self.importer.add_choice(importer.Action.ALBUMS) + self.importer.add_choice(importer.Action.ASIS) + self.importer.add_choice(importer.Action.ASIS) def test_add_album_for_different_artist_and_different_album(self): self.import_media[0].artist = "Artist B" @@ -972,21 +783,17 @@ class GlobalGroupAlbumsImportTest(GroupAlbumsImportTest): def setUp(self): super().setUp() self.importer.clear_choices() - self.importer.default_choice = importer.action.ASIS + self.importer.default_choice = importer.Action.ASIS config["import"]["group_albums"] = True -class ChooseCandidateTest(ImportTestCase): +class ChooseCandidateTest(AutotagImportTestCase): + matching = AutotagStub.BAD + def setUp(self): super().setUp() self.prepare_album_for_import(1) self.setup_importer() - self.matcher = AutotagStub().install() - self.matcher.matching = AutotagStub.BAD - - def tearDown(self): - super().tearDown() - self.matcher.restore() def test_choose_first_candidate(self): self.importer.add_choice(1) @@ -999,7 +806,7 @@ class ChooseCandidateTest(ImportTestCase): assert self.lib.albums().get().album == "Applied Album MM" -class InferAlbumDataTest(BeetsTestCase): +class InferAlbumDataTest(unittest.TestCase): def setUp(self): super().setUp() @@ -1019,7 +826,7 @@ class InferAlbumDataTest(BeetsTestCase): ) def test_asis_homogenous_single_artist(self): - self.task.set_choice(importer.action.ASIS) + self.task.set_choice(importer.Action.ASIS) self.task.align_album_level_fields() assert not self.items[0].comp assert self.items[0].albumartist == self.items[2].artist @@ -1027,7 +834,7 @@ class InferAlbumDataTest(BeetsTestCase): def test_asis_heterogenous_va(self): self.items[0].artist = "another artist" self.items[1].artist = "some other artist" - self.task.set_choice(importer.action.ASIS) + self.task.set_choice(importer.Action.ASIS) self.task.align_album_level_fields() @@ -1037,7 +844,7 @@ class InferAlbumDataTest(BeetsTestCase): def test_asis_comp_applied_to_all_items(self): self.items[0].artist = "another artist" self.items[1].artist = "some other artist" - self.task.set_choice(importer.action.ASIS) + self.task.set_choice(importer.Action.ASIS) self.task.align_album_level_fields() @@ -1047,7 +854,7 @@ class InferAlbumDataTest(BeetsTestCase): def test_asis_majority_artist_single_artist(self): self.items[0].artist = "another artist" - self.task.set_choice(importer.action.ASIS) + self.task.set_choice(importer.Action.ASIS) self.task.align_album_level_fields() @@ -1060,7 +867,7 @@ class InferAlbumDataTest(BeetsTestCase): for item in self.items: item.albumartist = "some album artist" item.mb_albumartistid = "some album artist id" - self.task.set_choice(importer.action.ASIS) + self.task.set_choice(importer.Action.ASIS) self.task.align_album_level_fields() @@ -1089,31 +896,29 @@ class InferAlbumDataTest(BeetsTestCase): def test_small_single_artist_album(self): self.items = [self.items[0]] self.task.items = self.items - self.task.set_choice(importer.action.ASIS) + self.task.set_choice(importer.Action.ASIS) self.task.align_album_level_fields() assert not self.items[0].comp -def match_album_mock(*args, **kwargs): +def album_candidates_mock(*args, **kwargs): """Create an AlbumInfo object for testing.""" - track_info = TrackInfo( - title="new title", - track_id="trackid", - index=0, - ) - album_info = AlbumInfo( + yield AlbumInfo( artist="artist", album="album", - tracks=[track_info], + tracks=[TrackInfo(title="new title", track_id="trackid", index=0)], album_id="albumid", artist_id="artistid", flex="flex", ) - return iter([album_info]) -@patch("beets.autotag.mb.match_album", Mock(side_effect=match_album_mock)) -class ImportDuplicateAlbumTest(ImportTestCase): +@patch( + "beets.metadata_plugins.candidates", Mock(side_effect=album_candidates_mock) +) +class ImportDuplicateAlbumTest(PluginMixin, ImportTestCase): + plugin = "musicbrainz" + def setUp(self): super().setUp() @@ -1129,12 +934,12 @@ class ImportDuplicateAlbumTest(ImportTestCase): def test_remove_duplicate_album(self): item = self.lib.items().get() assert item.title == "t\xeftle 0" - self.assertExists(item.path) + assert item.filepath.exists() self.importer.default_resolution = self.importer.Resolution.REMOVE self.importer.run() - self.assertNotExists(item.path) + assert not item.filepath.exists() assert len(self.lib.albums()) == 1 assert len(self.lib.items()) == 1 item = self.lib.items().get() @@ -1144,7 +949,7 @@ class ImportDuplicateAlbumTest(ImportTestCase): config["import"]["autotag"] = False item = self.lib.items().get() assert item.title == "t\xeftle 0" - self.assertExists(item.path) + assert item.filepath.exists() # Imported item has the same artist and album as the one in the # library. @@ -1160,7 +965,7 @@ class ImportDuplicateAlbumTest(ImportTestCase): self.importer.default_resolution = self.importer.Resolution.REMOVE self.importer.run() - self.assertExists(item.path) + assert item.filepath.exists() assert len(self.lib.albums()) == 2 assert len(self.lib.items()) == 2 @@ -1219,20 +1024,19 @@ class ImportDuplicateAlbumTest(ImportTestCase): return album -def match_track_mock(*args, **kwargs): - return iter( - [ - TrackInfo( - artist="artist", - title="title", - track_id="new trackid", - index=0, - ) - ] +def item_candidates_mock(*args, **kwargs): + yield TrackInfo( + artist="artist", + title="title", + track_id="new trackid", + index=0, ) -@patch("beets.autotag.mb.match_track", Mock(side_effect=match_track_mock)) +@patch( + "beets.metadata_plugins.item_candidates", + Mock(side_effect=item_candidates_mock), +) class ImportDuplicateSingletonTest(ImportTestCase): def setUp(self): super().setUp() @@ -1251,12 +1055,12 @@ class ImportDuplicateSingletonTest(ImportTestCase): def test_remove_duplicate(self): item = self.lib.items().get() assert item.mb_trackid == "old trackid" - self.assertExists(item.path) + assert item.filepath.exists() self.importer.default_resolution = self.importer.Resolution.REMOVE self.importer.run() - self.assertNotExists(item.path) + assert not item.filepath.exists() assert len(self.lib.items()) == 1 item = self.lib.items().get() assert item.mb_trackid == "new trackid" @@ -1303,7 +1107,7 @@ class ImportDuplicateSingletonTest(ImportTestCase): return item -class TagLogTest(BeetsTestCase): +class TagLogTest(unittest.TestCase): def test_tag_log_line(self): sio = StringIO() handler = logging.StreamHandler(sio) @@ -1566,7 +1370,7 @@ class MultiDiscAlbumsInDirTest(BeetsTestCase): assert len(items) == 3 -class ReimportTest(ImportTestCase): +class ReimportTest(AutotagImportTestCase): """Test "re-imports", in which the autotagging machinery is used for music that's already in the library. @@ -1575,6 +1379,8 @@ class ReimportTest(ImportTestCase): attributes and the added date. """ + matching = AutotagStub.GOOD + def setUp(self): super().setUp() @@ -1589,17 +1395,9 @@ class ReimportTest(ImportTestCase): item.added = 4747.0 item.store() - # Set up an import pipeline with a "good" match. - self.matcher = AutotagStub().install() - self.matcher.matching = AutotagStub.GOOD - - def tearDown(self): - super().tearDown() - self.matcher.restore() - def _setup_session(self, singletons=False): self.setup_importer(import_dir=self.libdir, singletons=singletons) - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.APPLY) def _album(self): return self.lib.albums().get() @@ -1655,14 +1453,14 @@ class ReimportTest(ImportTestCase): replaced_album = self._album() replaced_album.set_art(art_source) replaced_album.store() - old_artpath = replaced_album.artpath + old_artpath = replaced_album.art_filepath self.importer.run() new_album = self._album() new_artpath = new_album.art_destination(art_source) assert new_album.artpath == new_artpath - self.assertExists(new_artpath) + assert new_album.art_filepath.exists() if new_artpath != old_artpath: - self.assertNotExists(old_artpath) + assert not old_artpath.exists() def test_reimported_album_has_new_flexattr(self): self._setup_session() @@ -1672,27 +1470,20 @@ class ReimportTest(ImportTestCase): def test_reimported_album_not_preserves_flexattr(self): self._setup_session() - assert self._album().data_source == "original_source" + self.importer.run() assert self._album().data_source == "match_source" -class ImportPretendTest(ImportTestCase): +class ImportPretendTest(IOMixin, AutotagImportTestCase): """Test the pretend commandline option""" def setUp(self): super().setUp() - self.matcher = AutotagStub().install() - self.io.install() - self.album_track_path = self.prepare_album_for_import(1)[0] self.single_path = self.prepare_track_for_import(2, self.import_path) self.album_path = self.album_track_path.parent - def tearDown(self): - super().tearDown() - self.matcher.restore() - def __run(self, importer): with capture_log() as logs: importer.run() @@ -1701,6 +1492,7 @@ class ImportPretendTest(ImportTestCase): assert len(self.lib.albums()) == 0 return [line for line in logs if not line.startswith("Sending event:")] + assert self._album().data_source == "original_source" def test_import_singletons_pretend(self): assert self.__run(self.setup_singleton_importer(pretend=True)) == [ @@ -1717,7 +1509,7 @@ class ImportPretendTest(ImportTestCase): ] def test_import_pretend_empty(self): - empty_path = Path(os.fsdecode(self.temp_dir)) / "empty" + empty_path = self.temp_dir_path / "empty" empty_path.mkdir() importer = self.setup_importer(pretend=True, import_dir=empty_path) @@ -1725,112 +1517,70 @@ class ImportPretendTest(ImportTestCase): assert self.__run(importer) == [f"No files imported from {empty_path}"] -# Helpers for ImportMusicBrainzIdTest. +def mocked_get_album_by_id(id_): + """Return album candidate for the given id. - -def mocked_get_release_by_id( - id_, includes=[], release_status=[], release_type=[] -): - """Mimic musicbrainzngs.get_release_by_id, accepting only a restricted list - of MB ids (ID_RELEASE_0, ID_RELEASE_1). The returned dict differs only in - the release title and artist name, so that ID_RELEASE_0 is a closer match - to the items created by ImportHelper.prepare_album_for_import().""" + The two albums differ only in the release title and artist name, so that + ID_RELEASE_0 is a closer match to the items created by + ImportHelper.prepare_album_for_import(). + """ # Map IDs to (release title, artist), so the distances are different. - releases = { - ImportMusicBrainzIdTest.ID_RELEASE_0: ("VALID_RELEASE_0", "TAG ARTIST"), - ImportMusicBrainzIdTest.ID_RELEASE_1: ( - "VALID_RELEASE_1", - "DISTANT_MATCH", - ), - } + album, artist = { + ImportIdTest.ID_RELEASE_0: ("VALID_RELEASE_0", "TAG ARTIST"), + ImportIdTest.ID_RELEASE_1: ("VALID_RELEASE_1", "DISTANT_MATCH"), + }[id_] - return { - "release": { - "title": releases[id_][0], - "id": id_, - "medium-list": [ - { - "track-list": [ - { - "id": "baz", - "recording": { - "title": "foo", - "id": "bar", - "length": 59, - }, - "position": 9, - "number": "A2", - } - ], - "position": 5, - } - ], - "artist-credit": [ - { - "artist": { - "name": releases[id_][1], - "id": "some-id", - }, - } - ], - "release-group": { - "id": "another-id", - }, - "status": "Official", - } - } + return AlbumInfo( + album_id=id_, + album=album, + artist_id="some-id", + artist=artist, + albumstatus="Official", + tracks=[ + TrackInfo( + track_id="bar", + title="foo", + artist_id="some-id", + artist=artist, + length=59, + index=9, + track_allt="A2", + ) + ], + ) -def mocked_get_recording_by_id( - id_, includes=[], release_status=[], release_type=[] -): - """Mimic musicbrainzngs.get_recording_by_id, accepting only a restricted - list of MB ids (ID_RECORDING_0, ID_RECORDING_1). The returned dict differs - only in the recording title and artist name, so that ID_RECORDING_0 is a - closer match to the items created by ImportHelper.prepare_album_for_import(). +def mocked_get_track_by_id(id_): + """Return track candidate for the given id. + + The two tracks differ only in the release title and artist name, so that + ID_RELEASE_0 is a closer match to the items created by + ImportHelper.prepare_album_for_import(). """ # Map IDs to (recording title, artist), so the distances are different. - releases = { - ImportMusicBrainzIdTest.ID_RECORDING_0: ( - "VALID_RECORDING_0", - "TAG ARTIST", - ), - ImportMusicBrainzIdTest.ID_RECORDING_1: ( - "VALID_RECORDING_1", - "DISTANT_MATCH", - ), - } + title, artist = { + ImportIdTest.ID_RECORDING_0: ("VALID_RECORDING_0", "TAG ARTIST"), + ImportIdTest.ID_RECORDING_1: ("VALID_RECORDING_1", "DISTANT_MATCH"), + }[id_] - return { - "recording": { - "title": releases[id_][0], - "id": id_, - "length": 59, - "artist-credit": [ - { - "artist": { - "name": releases[id_][1], - "id": "some-id", - }, - } - ], - } - } + return TrackInfo( + track_id=id_, + title=title, + artist_id="some-id", + artist=artist, + length=59, + ) @patch( - "musicbrainzngs.get_recording_by_id", - Mock(side_effect=mocked_get_recording_by_id), + "beets.metadata_plugins.track_for_id", + Mock(side_effect=mocked_get_track_by_id), ) @patch( - "musicbrainzngs.get_release_by_id", - Mock(side_effect=mocked_get_release_by_id), + "beets.metadata_plugins.album_for_id", + Mock(side_effect=mocked_get_album_by_id), ) -class ImportMusicBrainzIdTest(ImportTestCase): - """Test the --musicbrainzid argument.""" - - MB_RELEASE_PREFIX = "https://musicbrainz.org/release/" - MB_RECORDING_PREFIX = "https://musicbrainz.org/recording/" +class ImportIdTest(ImportTestCase): ID_RELEASE_0 = "00000000-0000-0000-0000-000000000000" ID_RELEASE_1 = "11111111-1111-1111-1111-111111111111" ID_RECORDING_0 = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" @@ -1841,46 +1591,34 @@ class ImportMusicBrainzIdTest(ImportTestCase): self.prepare_album_for_import(1) def test_one_mbid_one_album(self): - self.setup_importer( - search_ids=[self.MB_RELEASE_PREFIX + self.ID_RELEASE_0] - ) + self.setup_importer(search_ids=[self.ID_RELEASE_0]) - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() assert self.lib.albums().get().album == "VALID_RELEASE_0" def test_several_mbid_one_album(self): - self.setup_importer( - search_ids=[ - self.MB_RELEASE_PREFIX + self.ID_RELEASE_0, - self.MB_RELEASE_PREFIX + self.ID_RELEASE_1, - ] - ) + self.setup_importer(search_ids=[self.ID_RELEASE_0, self.ID_RELEASE_1]) self.importer.add_choice(2) # Pick the 2nd best match (release 1). - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() assert self.lib.albums().get().album == "VALID_RELEASE_1" def test_one_mbid_one_singleton(self): - self.setup_singleton_importer( - search_ids=[self.MB_RECORDING_PREFIX + self.ID_RECORDING_0] - ) + self.setup_singleton_importer(search_ids=[self.ID_RECORDING_0]) - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() assert self.lib.items().get().title == "VALID_RECORDING_0" def test_several_mbid_one_singleton(self): self.setup_singleton_importer( - search_ids=[ - self.MB_RECORDING_PREFIX + self.ID_RECORDING_0, - self.MB_RECORDING_PREFIX + self.ID_RECORDING_1, - ] + search_ids=[self.ID_RECORDING_0, self.ID_RECORDING_1] ) self.importer.add_choice(2) # Pick the 2nd best match (recording 1). - self.importer.add_choice(importer.action.APPLY) + self.importer.add_choice(importer.Action.APPLY) self.importer.run() assert self.lib.items().get().title == "VALID_RECORDING_1" @@ -1889,13 +1627,9 @@ class ImportMusicBrainzIdTest(ImportTestCase): task = importer.ImportTask( paths=self.import_dir, toppath="top path", items=[_common.item()] ) - task.search_ids = [ - self.MB_RELEASE_PREFIX + self.ID_RELEASE_0, - self.MB_RELEASE_PREFIX + self.ID_RELEASE_1, - "an invalid and discarded id", - ] - task.lookup_candidates() + task.lookup_candidates([self.ID_RELEASE_0, self.ID_RELEASE_1]) + assert {"VALID_RELEASE_0", "VALID_RELEASE_1"} == { c.info.album for c in task.candidates } @@ -1905,13 +1639,9 @@ class ImportMusicBrainzIdTest(ImportTestCase): task = importer.SingletonImportTask( toppath="top path", item=_common.item() ) - task.search_ids = [ - self.MB_RECORDING_PREFIX + self.ID_RECORDING_0, - self.MB_RECORDING_PREFIX + self.ID_RECORDING_1, - "an invalid and discarded id", - ] - task.lookup_candidates() + task.lookup_candidates([self.ID_RECORDING_0, self.ID_RECORDING_1]) + assert {"VALID_RECORDING_0", "VALID_RECORDING_1"} == { c.info.title for c in task.candidates } diff --git a/test/test_library.py b/test/test_library.py index b5e6d4eeb..7c0529001 100644 --- a/test/test_library.py +++ b/test/test_library.py @@ -19,22 +19,22 @@ import os.path import re import shutil import stat -import sys -import time import unicodedata import unittest +from unittest.mock import patch import pytest from mediafile import MediaFile, UnreadableFileError import beets.dbcore.query import beets.library +import beets.logging as blog from beets import config, plugins, util from beets.library import Album from beets.test import _common from beets.test._common import item -from beets.test.helper import BeetsTestCase, ItemInDBTestCase -from beets.util import bytestring_path, syspath +from beets.test.helper import BeetsTestCase, ItemInDBTestCase, capture_log +from beets.util import as_string, bytestring_path, normpath, syspath # Shortcut to path normalization. np = util.normpath @@ -126,6 +126,25 @@ class AddTest(BeetsTestCase): ) assert new_grouping == self.i.grouping + def test_library_add_one_database_change_event(self): + """Test library.add emits only one database_change event.""" + self.item = _common.item() + self.item.path = beets.util.normpath( + os.path.join( + self.temp_dir, + b"a", + b"b.mp3", + ) + ) + self.item.album = "a" + self.item.title = "b" + + blog.getLogger("beets").set_global_level(blog.DEBUG) + with capture_log() as logs: + self.lib.add(self.item) + + assert logs.count("Sending event: database_change") == 1 + class RemoveTest(ItemInDBTestCase): def test_remove_deletes_from_db(self): @@ -175,7 +194,7 @@ class DestinationTest(BeetsTestCase): def create_temp_dir(self, **kwargs): kwargs["prefix"] = "." - super().create_temp_dir(**kwargs) + return super().create_temp_dir(**kwargs) def setUp(self): super().setUp() @@ -411,33 +430,23 @@ class DestinationTest(BeetsTestCase): def test_unicode_normalized_nfd_on_mac(self): instr = unicodedata.normalize("NFC", "caf\xe9") self.lib.path_formats = [("default", instr)] - dest = self.i.destination(platform="darwin", fragment=True) - assert dest == unicodedata.normalize("NFD", instr) + with patch("sys.platform", "darwin"): + dest = self.i.destination(relative_to_libdir=True) + assert as_string(dest) == unicodedata.normalize("NFD", instr) def test_unicode_normalized_nfc_on_linux(self): instr = unicodedata.normalize("NFD", "caf\xe9") self.lib.path_formats = [("default", instr)] - dest = self.i.destination(platform="linux", fragment=True) - assert dest == unicodedata.normalize("NFC", instr) - - def test_non_mbcs_characters_on_windows(self): - oldfunc = sys.getfilesystemencoding - sys.getfilesystemencoding = lambda: "mbcs" - try: - self.i.title = "h\u0259d" - self.lib.path_formats = [("default", "$title")] - p = self.i.destination() - assert b"?" not in p - # We use UTF-8 to encode Windows paths now. - assert "h\u0259d".encode() in p - finally: - sys.getfilesystemencoding = oldfunc + with patch("sys.platform", "linux"): + dest = self.i.destination(relative_to_libdir=True) + assert as_string(dest) == unicodedata.normalize("NFC", instr) def test_unicode_extension_in_fragment(self): self.lib.path_formats = [("default", "foo")] self.i.path = util.bytestring_path("bar.caf\xe9") - dest = self.i.destination(platform="linux", fragment=True) - assert dest == "foo.caf\xe9" + with patch("sys.platform", "linux"): + dest = self.i.destination(relative_to_libdir=True) + assert as_string(dest) == "foo.caf\xe9" def test_asciify_and_replace(self): config["asciify_paths"] = True @@ -462,17 +471,6 @@ class DestinationTest(BeetsTestCase): self.i.album = "bar" assert self.i.destination() == np("base/ber/foo") - def test_destination_with_replacements_argument(self): - self.lib.directory = b"base" - self.lib.replacements = [(re.compile(r"a"), "f")] - self.lib.path_formats = [("default", "$album/$title")] - self.i.title = "foo" - self.i.album = "bar" - replacements = [(re.compile(r"a"), "e")] - assert self.i.destination(replacements=replacements) == np( - "base/ber/foo" - ) - @unittest.skip("unimplemented: #359") def test_destination_with_empty_component(self): self.lib.directory = b"base" @@ -494,37 +492,6 @@ class DestinationTest(BeetsTestCase): self.i.path = "foo.mp3" assert self.i.destination() == np("base/one/_.mp3") - def test_legalize_path_one_for_one_replacement(self): - # Use a replacement that should always replace the last X in any - # path component with a Z. - self.lib.replacements = [ - (re.compile(r"X$"), "Z"), - ] - - # Construct an item whose untruncated path ends with a Y but whose - # truncated version ends with an X. - self.i.title = "X" * 300 + "Y" - - # The final path should reflect the replacement. - dest = self.i.destination() - assert dest[-2:] == b"XZ" - - def test_legalize_path_one_for_many_replacement(self): - # Use a replacement that should always replace the last X in any - # path component with four Zs. - self.lib.replacements = [ - (re.compile(r"X$"), "ZZZZ"), - ] - - # Construct an item whose untruncated path ends with a Y but whose - # truncated version ends with an X. - self.i.title = "X" * 300 + "Y" - - # The final path should ignore the user replacement and create a path - # of the correct length, containing Xs. - dest = self.i.destination() - assert dest[-2:] == b"XX" - def test_album_field_query(self): self.lib.directory = b"one" self.lib.path_formats = [("default", "two"), ("flex:foo", "three")] @@ -605,14 +572,25 @@ class ItemFormattedMappingTest(ItemInDBTestCase): class PathFormattingMixin: """Utilities for testing path formatting.""" + i: beets.library.Item + lib: beets.library.Library + def _setf(self, fmt): self.lib.path_formats.insert(0, ("default", fmt)) def _assert_dest(self, dest, i=None): if i is None: i = self.i - with _common.platform_posix(): - actual = i.destination() + + # Handle paths on Windows. + if os.path.sep != "/": + dest = dest.replace(b"/", os.path.sep.encode()) + + # Paths are normalized based on the CWD. + dest = normpath(dest) + + actual = i.destination() + assert actual == dest @@ -1055,7 +1033,7 @@ class ArtDestinationTest(BeetsTestCase): def test_art_filename_respects_setting(self): art = self.ai.art_destination("something.jpg") - new_art = bytestring_path("%sartimage.jpg" % os.path.sep) + new_art = bytestring_path(f"{os.path.sep}artimage.jpg") assert new_art in art def test_art_path_in_item_dir(self): @@ -1341,56 +1319,3 @@ class ParseQueryTest(unittest.TestCase): def test_parse_bytes(self): with pytest.raises(AssertionError): beets.library.parse_query_string(b"query", None) - - -class LibraryFieldTypesTest(unittest.TestCase): - """Test format() and parse() for library-specific field types""" - - def test_datetype(self): - t = beets.library.DateType() - - # format - time_format = beets.config["time_format"].as_str() - time_local = time.strftime(time_format, time.localtime(123456789)) - assert time_local == t.format(123456789) - # parse - assert 123456789.0 == t.parse(time_local) - assert 123456789.0 == t.parse("123456789.0") - assert t.null == t.parse("not123456789.0") - assert t.null == t.parse("1973-11-29") - - def test_pathtype(self): - t = beets.library.PathType() - - # format - assert "/tmp" == t.format("/tmp") - assert "/tmp/\xe4lbum" == t.format("/tmp/\u00e4lbum") - # parse - assert np(b"/tmp") == t.parse("/tmp") - assert np(b"/tmp/\xc3\xa4lbum") == t.parse("/tmp/\u00e4lbum/") - - def test_musicalkey(self): - t = beets.library.MusicalKey() - - # parse - assert "C#m" == t.parse("c#m") - assert "Gm" == t.parse("g minor") - assert "Not c#m" == t.parse("not C#m") - - def test_durationtype(self): - t = beets.library.DurationType() - - # format - assert "1:01" == t.format(61.23) - assert "60:01" == t.format(3601.23) - assert "0:00" == t.format(None) - # parse - assert 61.0 == t.parse("1:01") - assert 61.23 == t.parse("61.23") - assert 3601.0 == t.parse("60:01") - assert t.null == t.parse("1:00:01") - assert t.null == t.parse("not61.23") - # config format_raw_length - beets.config["format_raw_length"] = True - assert 61.23 == t.format(61.23) - assert 3601.23 == t.format(3601.23) diff --git a/test/test_logging.py b/test/test_logging.py index d95a54387..5990fd4e1 100644 --- a/test/test_logging.py +++ b/test/test_logging.py @@ -3,22 +3,21 @@ import logging as log import sys import threading -from io import StringIO +from types import ModuleType +from unittest.mock import patch + +import pytest import beets.logging as blog -import beetsplug from beets import plugins, ui from beets.test import _common, helper -from beets.test.helper import ( - AsIsImporterMixin, - BeetsTestCase, - ImportTestCase, - PluginMixin, -) +from beets.test.helper import AsIsImporterMixin, ImportTestCase, PluginMixin -class LoggingTest(BeetsTestCase): - def test_logging_management(self): +class TestStrFormatLogger: + """Tests for the custom str-formatting logger.""" + + def test_logger_creation(self): l1 = log.getLogger("foo123") l2 = blog.getLogger("foo123") assert l1 == l2 @@ -38,49 +37,128 @@ class LoggingTest(BeetsTestCase): l6 = blog.getLogger() assert l1 != l6 - def test_str_format_logging(self): - logger = blog.getLogger("baz123") - stream = StringIO() - handler = log.StreamHandler(stream) + @pytest.mark.parametrize( + "level", [log.DEBUG, log.INFO, log.WARNING, log.ERROR] + ) + @pytest.mark.parametrize( + "msg, args, kwargs, expected", + [ + ("foo {} bar {}", ("oof", "baz"), {}, "foo oof bar baz"), + ( + "foo {bar} baz {foo}", + (), + {"foo": "oof", "bar": "baz"}, + "foo baz baz oof", + ), + ("no args", (), {}, "no args"), + ("foo {} bar {baz}", ("oof",), {"baz": "baz"}, "foo oof bar baz"), + ], + ) + def test_str_format_logging( + self, level, msg, args, kwargs, expected, caplog + ): + logger = blog.getLogger("test_logger") + logger.setLevel(level) - logger.addHandler(handler) - logger.propagate = False + with caplog.at_level(level, logger="test_logger"): + logger.log(level, msg, *args, **kwargs) - logger.warning("foo {0} {bar}", "oof", bar="baz") - handler.flush() - assert stream.getvalue(), "foo oof baz" + assert caplog.records, "No log records were captured" + assert str(caplog.records[0].msg) == expected + + +class TestLogSanitization: + """Log messages should have control characters removed from: + - String arguments + - Keyword argument values + - Bytes arguments (which get decoded first) + """ + + @pytest.mark.parametrize( + "msg, args, kwargs, expected", + [ + # Valid UTF-8 bytes are decoded and preserved + ( + "foo {} bar {bar}", + (b"oof \xc3\xa9",), + {"bar": b"baz \xc3\xa9"}, + "foo oof é bar baz é", + ), + # Invalid UTF-8 bytes are decoded with replacement characters + ( + "foo {} bar {bar}", + (b"oof \xff",), + {"bar": b"baz \xff"}, + "foo oof � bar baz �", + ), + # Control characters should be removed + ( + "foo {} bar {bar}", + ("oof \x9e",), + {"bar": "baz \x9e"}, + "foo oof � bar baz �", + ), + # Whitespace control characters should be preserved + ( + "foo {} bar {bar}", + ("foo\t\n",), + {"bar": "bar\r"}, + "foo foo\t\n bar bar\r", + ), + ], + ) + def test_sanitization(self, msg, args, kwargs, expected, caplog): + level = log.INFO + logger = blog.getLogger("test_logger") + logger.setLevel(level) + + with caplog.at_level(level, logger="test_logger"): + logger.log(level, msg, *args, **kwargs) + + assert caplog.records, "No log records were captured" + assert str(caplog.records[0].msg) == expected + + +class DummyModule(ModuleType): + class DummyPlugin(plugins.BeetsPlugin): + def __init__(self): + plugins.BeetsPlugin.__init__(self, "dummy") + self.import_stages = [self.import_stage] + self.register_listener("dummy_event", self.listener) + + def log_all(self, name): + self._log.debug("debug {}", name) + self._log.info("info {}", name) + self._log.warning("warning {}", name) + + def commands(self): + cmd = ui.Subcommand("dummy") + cmd.func = lambda _, __, ___: self.log_all("cmd") + return (cmd,) + + def import_stage(self, session, task): + self.log_all("import_stage") + + def listener(self): + self.log_all("listener") + + def __init__(self, *_, **__): + module_name = "beetsplug.dummy" + super().__init__(module_name) + self.DummyPlugin.__module__ = module_name + self.DummyPlugin = self.DummyPlugin class LoggingLevelTest(AsIsImporterMixin, PluginMixin, ImportTestCase): plugin = "dummy" - class DummyModule: - class DummyPlugin(plugins.BeetsPlugin): - def __init__(self): - plugins.BeetsPlugin.__init__(self, "dummy") - self.import_stages = [self.import_stage] - self.register_listener("dummy_event", self.listener) + @classmethod + def setUpClass(cls): + patcher = patch.dict(sys.modules, {"beetsplug.dummy": DummyModule()}) + patcher.start() + cls.addClassCleanup(patcher.stop) - def log_all(self, name): - self._log.debug("debug " + name) - self._log.info("info " + name) - self._log.warning("warning " + name) - - def commands(self): - cmd = ui.Subcommand("dummy") - cmd.func = lambda _, __, ___: self.log_all("cmd") - return (cmd,) - - def import_stage(self, session, task): - self.log_all("import_stage") - - def listener(self): - self.log_all("listener") - - def setUp(self): - sys.modules["beetsplug.dummy"] = self.DummyModule - beetsplug.dummy = self.DummyModule - super().setUp() + super().setUpClass() def test_command_level0(self): self.config["verbose"] = 0 @@ -176,9 +254,9 @@ class ConcurrentEventsTest(AsIsImporterMixin, ImportTestCase): self.t1_step = self.t2_step = 0 def log_all(self, name): - self._log.debug("debug " + name) - self._log.info("info " + name) - self._log.warning("warning " + name) + self._log.debug("debug {}", name) + self._log.info("info {}", name) + self._log.warning("warning {}", name) def listener1(self): try: diff --git a/test/test_pipeline.py b/test/test_pipeline.py index 83b8d744c..5007ad826 100644 --- a/test/test_pipeline.py +++ b/test/test_pipeline.py @@ -39,11 +39,16 @@ def _consume(result): result.append(i) -# A worker that raises an exception. +# Pipeline stages that raise an exception. class PipelineError(Exception): pass +def _exc_produce(num=5): + yield from range(num) + raise PipelineError() + + def _exc_work(num=3): i = None while True: @@ -53,6 +58,14 @@ def _exc_work(num=3): i *= 2 +def _exc_consume(result, num=4): + while True: + i = yield + if i == num: + raise PipelineError() + result.append(i) + + # A worker that yields a bubble. def _bub_work(num=3): i = None @@ -121,17 +134,32 @@ class ParallelStageTest(unittest.TestCase): class ExceptionTest(unittest.TestCase): def setUp(self): self.result = [] - self.pl = pipeline.Pipeline( - (_produce(), _exc_work(), _consume(self.result)) - ) + + def run_sequential(self, *stages): + pl = pipeline.Pipeline(stages) + with pytest.raises(PipelineError): + pl.run_sequential() + + def run_parallel(self, *stages): + pl = pipeline.Pipeline(stages) + with pytest.raises(PipelineError): + pl.run_parallel() def test_run_sequential(self): - with pytest.raises(PipelineError): - self.pl.run_sequential() + """Test that exceptions from various stages of the pipeline are + properly propagated when running sequentially. + """ + self.run_sequential(_exc_produce(), _work(), _consume(self.result)) + self.run_sequential(_produce(), _exc_work(), _consume(self.result)) + self.run_sequential(_produce(), _work(), _exc_consume(self.result)) def test_run_parallel(self): - with pytest.raises(PipelineError): - self.pl.run_parallel() + """Test that exceptions from various stages of the pipeline are + properly propagated when running in parallel. + """ + self.run_parallel(_exc_produce(), _work(), _consume(self.result)) + self.run_parallel(_produce(), _exc_work(), _consume(self.result)) + self.run_parallel(_produce(), _work(), _exc_consume(self.result)) def test_pull(self): pl = pipeline.Pipeline((_produce(), _exc_work())) diff --git a/test/test_plugins.py b/test/test_plugins.py index efa26d084..6f7026718 100644 --- a/test/test_plugins.py +++ b/test/test_plugins.py @@ -13,9 +13,12 @@ # included in all copies or substantial portions of the Software. +import importlib import itertools +import logging import os -import unittest +import pkgutil +import sys from unittest.mock import ANY, Mock, patch import pytest @@ -24,144 +27,82 @@ from mediafile import MediaFile from beets import config, plugins, ui from beets.dbcore import types from beets.importer import ( + Action, ArchiveImportTask, SentinelImportTask, SingletonImportTask, - action, ) from beets.library import Item -from beets.plugins import MetadataSourcePlugin from beets.test import helper -from beets.test.helper import AutotagStub, ImportHelper, TerminalImportMixin -from beets.test.helper import PluginTestCase as BasePluginTestCase -from beets.util import displayable_path, syspath -from beets.util.id_extractors import ( - beatport_id_regex, - deezer_id_regex, - spotify_id_regex, +from beets.test.helper import ( + AutotagStub, + ImportHelper, + PluginMixin, + PluginTestCase, + TerminalImportMixin, ) +from beets.util import PromptChoice, displayable_path, syspath -class PluginLoaderTestCase(BasePluginTestCase): - def setup_plugin_loader(self): - # FIXME the mocking code is horrific, but this is the lowest and - # earliest level of the plugin mechanism we can hook into. - self._plugin_loader_patch = patch("beets.plugins.load_plugins") - self._plugin_classes = set() - load_plugins = self._plugin_loader_patch.start() +class TestPluginRegistration(PluginTestCase): + class RatingPlugin(plugins.BeetsPlugin): + item_types = { + "rating": types.Float(), + "multi_value": types.MULTI_VALUE_DSV, + } - def myload(names=()): - plugins._classes.update(self._plugin_classes) + def __init__(self): + super().__init__() + self.register_listener("write", self.on_write) - load_plugins.side_effect = myload - - def teardown_plugin_loader(self): - self._plugin_loader_patch.stop() - - def register_plugin(self, plugin_class): - self._plugin_classes.add(plugin_class) + @staticmethod + def on_write(item=None, path=None, tags=None): + if tags["artist"] == "XXX": + tags["artist"] = "YYY" def setUp(self): - self.setup_plugin_loader() super().setUp() - def tearDown(self): - self.teardown_plugin_loader() - super().tearDown() + self.register_plugin(self.RatingPlugin) + + def test_field_type_registered(self): + assert isinstance(Item._types.get("rating"), types.Float) + + def test_duplicate_type(self): + class DuplicateTypePlugin(plugins.BeetsPlugin): + item_types = {"rating": types.INTEGER} + + self.register_plugin(DuplicateTypePlugin) + with pytest.raises( + plugins.PluginConflictError, match="already been defined" + ): + Item._types + + def test_listener_registered(self): + self.RatingPlugin() + item = self.add_item_fixture(artist="XXX") + + item.write() + + assert MediaFile(syspath(item.path)).artist == "YYY" + + def test_multi_value_flex_field_type(self): + item = Item(path="apath", artist="aaa") + item.multi_value = ["one", "two", "three"] + item.add(self.lib) + + out = self.run_with_output("ls", "-f", "$multi_value") + delimiter = types.MULTI_VALUE_DSV.delimiter + assert out == f"one{delimiter}two{delimiter}three\n" -class PluginImportTestCase(ImportHelper, PluginLoaderTestCase): +class PluginImportTestCase(ImportHelper, PluginTestCase): def setUp(self): super().setUp() self.prepare_album_for_import(2) -class ItemTypesTest(PluginLoaderTestCase): - def test_flex_field_type(self): - class RatingPlugin(plugins.BeetsPlugin): - item_types = {"rating": types.Float()} - - self.register_plugin(RatingPlugin) - self.config["plugins"] = "rating" - - item = Item(path="apath", artist="aaa") - item.add(self.lib) - - # Do not match unset values - out = self.run_with_output("ls", "rating:1..3") - assert "aaa" not in out - - self.run_command("modify", "rating=2", "--yes") - - # Match in range - out = self.run_with_output("ls", "rating:1..3") - assert "aaa" in out - - # Don't match out of range - out = self.run_with_output("ls", "rating:3..5") - assert "aaa" not in out - - -class ItemWriteTest(PluginLoaderTestCase): - def setUp(self): - super().setUp() - - class EventListenerPlugin(plugins.BeetsPlugin): - pass - - self.event_listener_plugin = EventListenerPlugin() - self.register_plugin(EventListenerPlugin) - - def test_change_tags(self): - def on_write(item=None, path=None, tags=None): - if tags["artist"] == "XXX": - tags["artist"] = "YYY" - - self.register_listener("write", on_write) - - item = self.add_item_fixture(artist="XXX") - item.write() - - mediafile = MediaFile(syspath(item.path)) - assert mediafile.artist == "YYY" - - def register_listener(self, event, func): - self.event_listener_plugin.register_listener(event, func) - - -class ItemTypeConflictTest(PluginLoaderTestCase): - def test_mismatch(self): - class EventListenerPlugin(plugins.BeetsPlugin): - item_types = {"duplicate": types.INTEGER} - - class AdventListenerPlugin(plugins.BeetsPlugin): - item_types = {"duplicate": types.FLOAT} - - self.event_listener_plugin = EventListenerPlugin - self.advent_listener_plugin = AdventListenerPlugin - self.register_plugin(EventListenerPlugin) - self.register_plugin(AdventListenerPlugin) - with pytest.raises(plugins.PluginConflictError): - plugins.types(Item) - - def test_match(self): - class EventListenerPlugin(plugins.BeetsPlugin): - item_types = {"duplicate": types.INTEGER} - - class AdventListenerPlugin(plugins.BeetsPlugin): - item_types = {"duplicate": types.INTEGER} - - self.event_listener_plugin = EventListenerPlugin - self.advent_listener_plugin = AdventListenerPlugin - self.register_plugin(EventListenerPlugin) - self.register_plugin(AdventListenerPlugin) - assert plugins.types(Item) is not None - - class EventsTest(PluginImportTestCase): - def setUp(self): - super().setUp() - def test_import_task_created(self): self.importer = self.setup_importer(pretend=True) @@ -174,7 +115,7 @@ class EventsTest(PluginImportTestCase): logs = [line for line in logs if not line.startswith("Sending event:")] assert logs == [ - f'Album: {displayable_path(os.path.join(self.import_dir, b"album"))}', + f"Album: {displayable_path(os.path.join(self.import_dir, b'album'))}", f" {displayable_path(self.import_media[0].path)}", f" {displayable_path(self.import_media[1].path)}", ] @@ -221,16 +162,7 @@ class EventsTest(PluginImportTestCase): ] -class HelpersTest(unittest.TestCase): - def test_sanitize_choices(self): - assert plugins.sanitize_choices(["A", "Z"], ("A", "B")) == ["A"] - assert plugins.sanitize_choices(["A", "A"], ("A")) == ["A"] - assert plugins.sanitize_choices( - ["D", "*", "A"], ("A", "B", "C", "D") - ) == ["D", "B", "C", "A"] - - -class ListenersTest(PluginLoaderTestCase): +class ListenersTest(PluginTestCase): def test_register(self): class DummyPlugin(plugins.BeetsPlugin): def __init__(self): @@ -250,15 +182,7 @@ class ListenersTest(PluginLoaderTestCase): d.register_listener("cli_exit", d2.dummy) assert DummyPlugin._raw_listeners["cli_exit"] == [d.dummy, d2.dummy] - @patch("beets.plugins.find_plugins") - @patch("inspect.getfullargspec") - def test_events_called(self, mock_gfa, mock_find_plugins): - mock_gfa.return_value = Mock( - args=(), - varargs="args", - varkw="kwargs", - ) - + def test_events_called(self): class DummyPlugin(plugins.BeetsPlugin): def __init__(self): super().__init__() @@ -268,7 +192,6 @@ class ListenersTest(PluginLoaderTestCase): self.register_listener("event_bar", self.bar) d = DummyPlugin() - mock_find_plugins.return_value = (d,) plugins.send("event") d.foo.assert_has_calls([]) @@ -278,8 +201,7 @@ class ListenersTest(PluginLoaderTestCase): d.foo.assert_called_once_with(var="tagada") d.bar.assert_has_calls([]) - @patch("beets.plugins.find_plugins") - def test_listener_params(self, mock_find_plugins): + def test_listener_params(self): class DummyPlugin(plugins.BeetsPlugin): def __init__(self): super().__init__() @@ -323,8 +245,7 @@ class ListenersTest(PluginLoaderTestCase): def dummy9(self, **kwargs): assert kwargs == {"foo": 5} - d = DummyPlugin() - mock_find_plugins.return_value = (d,) + DummyPlugin() plugins.send("event1", foo=5) plugins.send("event2", foo=5) @@ -347,7 +268,8 @@ class PromptChoicesTest(TerminalImportMixin, PluginImportTestCase): def setUp(self): super().setUp() self.setup_importer() - self.matcher = AutotagStub().install() + self.matcher = AutotagStub(AutotagStub.IDENT).install() + self.addCleanup(self.matcher.restore) # keep track of ui.input_option() calls self.input_options_patcher = patch( "beets.ui.input_options", side_effect=ui.input_options @@ -357,7 +279,6 @@ class PromptChoicesTest(TerminalImportMixin, PluginImportTestCase): def tearDown(self): super().tearDown() self.input_options_patcher.stop() - self.matcher.restore() def test_plugin_choices_in_ui_input_options_album(self): """Test the presence of plugin choices on the prompt (album).""" @@ -371,8 +292,8 @@ class PromptChoicesTest(TerminalImportMixin, PluginImportTestCase): def return_choices(self, session, task): return [ - ui.commands.PromptChoice("f", "Foo", None), - ui.commands.PromptChoice("r", "baR", None), + PromptChoice("f", "Foo", None), + PromptChoice("r", "baR", None), ] self.register_plugin(DummyPlugin) @@ -389,7 +310,7 @@ class PromptChoicesTest(TerminalImportMixin, PluginImportTestCase): "aBort", ) + ("Foo", "baR") - self.importer.add_choice(action.SKIP) + self.importer.add_choice(Action.SKIP) self.importer.run() self.mock_input_options.assert_called_once_with( opts, default="a", require=ANY @@ -407,8 +328,8 @@ class PromptChoicesTest(TerminalImportMixin, PluginImportTestCase): def return_choices(self, session, task): return [ - ui.commands.PromptChoice("f", "Foo", None), - ui.commands.PromptChoice("r", "baR", None), + PromptChoice("f", "Foo", None), + PromptChoice("r", "baR", None), ] self.register_plugin(DummyPlugin) @@ -424,7 +345,7 @@ class PromptChoicesTest(TerminalImportMixin, PluginImportTestCase): ) + ("Foo", "baR") config["import"]["singletons"] = True - self.importer.add_choice(action.SKIP) + self.importer.add_choice(Action.SKIP) self.importer.run() self.mock_input_options.assert_called_with( opts, default="a", require=ANY @@ -442,10 +363,10 @@ class PromptChoicesTest(TerminalImportMixin, PluginImportTestCase): def return_choices(self, session, task): return [ - ui.commands.PromptChoice("a", "A foo", None), # dupe - ui.commands.PromptChoice("z", "baZ", None), # ok - ui.commands.PromptChoice("z", "Zupe", None), # dupe - ui.commands.PromptChoice("z", "Zoo", None), + PromptChoice("a", "A foo", None), # dupe + PromptChoice("z", "baZ", None), # ok + PromptChoice("z", "Zupe", None), # dupe + PromptChoice("z", "Zoo", None), ] # dupe self.register_plugin(DummyPlugin) @@ -461,7 +382,7 @@ class PromptChoicesTest(TerminalImportMixin, PluginImportTestCase): "enter Id", "aBort", ) + ("baZ",) - self.importer.add_choice(action.SKIP) + self.importer.add_choice(Action.SKIP) self.importer.run() self.mock_input_options.assert_called_once_with( opts, default="a", require=ANY @@ -478,7 +399,7 @@ class PromptChoicesTest(TerminalImportMixin, PluginImportTestCase): ) def return_choices(self, session, task): - return [ui.commands.PromptChoice("f", "Foo", self.foo)] + return [PromptChoice("f", "Foo", self.foo)] def foo(self, session, task): pass @@ -520,10 +441,10 @@ class PromptChoicesTest(TerminalImportMixin, PluginImportTestCase): ) def return_choices(self, session, task): - return [ui.commands.PromptChoice("f", "Foo", self.foo)] + return [PromptChoice("f", "Foo", self.foo)] def foo(self, session, task): - return action.SKIP + return Action.SKIP self.register_plugin(DummyPlugin) # Default options + extra choices by the plugin ('Foo', 'Bar') @@ -549,59 +470,112 @@ class PromptChoicesTest(TerminalImportMixin, PluginImportTestCase): ) -class ParseSpotifyIDTest(unittest.TestCase): - def test_parse_id_correct(self): - id_string = "39WqpoPgZxygo6YQjehLJJ" - out = MetadataSourcePlugin._get_id("album", id_string, spotify_id_regex) - assert out == id_string +def get_available_plugins(): + """Get all available plugins in the beetsplug namespace.""" + namespace_pkg = importlib.import_module("beetsplug") - def test_parse_id_non_id_returns_none(self): - id_string = "blah blah" - out = MetadataSourcePlugin._get_id("album", id_string, spotify_id_regex) - assert out is None - - def test_parse_id_url_finds_id(self): - id_string = "39WqpoPgZxygo6YQjehLJJ" - id_url = "https://open.spotify.com/album/%s" % id_string - out = MetadataSourcePlugin._get_id("album", id_url, spotify_id_regex) - assert out == id_string + return [ + m.name + for m in pkgutil.iter_modules(namespace_pkg.__path__) + if not m.name.startswith("_") + ] -class ParseDeezerIDTest(unittest.TestCase): - def test_parse_id_correct(self): - id_string = "176356382" - out = MetadataSourcePlugin._get_id("album", id_string, deezer_id_regex) - assert out == id_string +class TestImportPlugin(PluginMixin): + @pytest.fixture(params=get_available_plugins()) + def plugin_name(self, request): + """Fixture to provide the name of each available plugin.""" + name = request.param - def test_parse_id_non_id_returns_none(self): - id_string = "blah blah" - out = MetadataSourcePlugin._get_id("album", id_string, deezer_id_regex) - assert out is None + # skip gstreamer plugins on windows + gstreamer_plugins = {"bpd", "replaygain"} + if sys.platform == "win32" and name in gstreamer_plugins: + pytest.skip(f"GStreamer is not available on Windows: {name}") - def test_parse_id_url_finds_id(self): - id_string = "176356382" - id_url = "https://www.deezer.com/album/%s" % id_string - out = MetadataSourcePlugin._get_id("album", id_url, deezer_id_regex) - assert out == id_string + return name + def unload_plugins(self): + """Unimport plugins before each test to avoid conflicts.""" + super().unload_plugins() + for mod in list(sys.modules): + if mod.startswith("beetsplug."): + del sys.modules[mod] -class ParseBeatportIDTest(unittest.TestCase): - def test_parse_id_correct(self): - id_string = "3089651" - out = MetadataSourcePlugin._get_id( - "album", id_string, beatport_id_regex + @pytest.fixture(autouse=True) + def cleanup(self): + """Ensure plugins are unimported before and after each test.""" + self.unload_plugins() + yield + self.unload_plugins() + + @pytest.mark.skipif( + os.environ.get("GITHUB_ACTIONS") != "true", + reason=( + "Requires all dependencies to be installed, which we can't" + " guarantee in the local environment." + ), + ) + def test_import_plugin(self, caplog, plugin_name): + """Test that a plugin is importable without an error.""" + caplog.set_level(logging.WARNING) + self.load_plugins(plugin_name) + + assert "PluginImportError" not in caplog.text, ( + f"Plugin '{plugin_name}' has issues during import." ) - assert out == id_string - def test_parse_id_non_id_returns_none(self): - id_string = "blah blah" - out = MetadataSourcePlugin._get_id( - "album", id_string, beatport_id_regex + +class TestDeprecationCopy: + # TODO: remove this test in Beets 3.0.0 + def test_legacy_metadata_plugin_deprecation(self): + """Test that a MetadataSourcePlugin with 'legacy' data_source + raises a deprecation warning and all function and properties are + copied from the base class. + """ + with pytest.warns(DeprecationWarning, match="LegacyMetadataPlugin"): + + class LegacyMetadataPlugin(plugins.BeetsPlugin): + data_source = "legacy" + + # Assert all methods are present + assert hasattr(LegacyMetadataPlugin, "albums_for_ids") + assert hasattr(LegacyMetadataPlugin, "tracks_for_ids") + assert hasattr(LegacyMetadataPlugin, "data_source_mismatch_penalty") + assert hasattr(LegacyMetadataPlugin, "_extract_id") + assert hasattr(LegacyMetadataPlugin, "get_artist") + + +class TestMusicBrainzPluginLoading: + @pytest.fixture(autouse=True) + def config(self): + _config = config + _config.sources = [] + _config.read(user=False, defaults=True) + return _config + + def test_default(self): + assert "musicbrainz" in plugins.get_plugin_names() + + def test_other_plugin_enabled(self, config): + config["plugins"] = ["anything"] + + assert "musicbrainz" not in plugins.get_plugin_names() + + def test_deprecated_enabled(self, config, caplog): + config["plugins"] = ["anything"] + config["musicbrainz"]["enabled"] = True + + assert "musicbrainz" in plugins.get_plugin_names() + assert ( + "musicbrainz.enabled' configuration option is deprecated" + in caplog.text ) - assert out is None - def test_parse_id_url_finds_id(self): - id_string = "3089651" - id_url = "https://www.beatport.com/release/album-name/%s" % id_string - out = MetadataSourcePlugin._get_id("album", id_url, beatport_id_regex) - assert out == id_string + def test_deprecated_disabled(self, config, caplog): + config["musicbrainz"]["enabled"] = False + + assert "musicbrainz" not in plugins.get_plugin_names() + assert ( + "musicbrainz.enabled' configuration option is deprecated" + in caplog.text + ) diff --git a/test/test_query.py b/test/test_query.py index f85e5c637..0ddf83e3a 100644 --- a/test/test_query.py +++ b/test/test_query.py @@ -14,1093 +14,513 @@ """Various tests for querying the library database.""" -import os import sys -import unittest -from contextlib import contextmanager from functools import partial +from pathlib import Path import pytest -from mock import patch -import beets.library -from beets import dbcore, util from beets.dbcore import types from beets.dbcore.query import ( - InvalidQueryArgumentValueError, + AndQuery, + BooleanQuery, + DateQuery, + FalseQuery, + MatchQuery, NoneQuery, + NotQuery, + NumericQuery, + OrQuery, ParsingError, + PathQuery, + RegexpQuery, + StringFieldQuery, + StringQuery, + SubstringQuery, + TrueQuery, ) +from beets.library import Item from beets.test import _common -from beets.test.helper import BeetsTestCase, ItemInDBTestCase -from beets.util import syspath +from beets.test.helper import TestHelper # Because the absolute path begins with something like C:, we # can't disambiguate it from an ordinary query. WIN32_NO_IMPLICIT_PATHS = "Implicit paths are not supported on Windows" - -class AssertsMixin: - def assert_items_matched(self, results, titles): - assert {i.title for i in results} == set(titles) - - def assert_albums_matched(self, results, albums): - assert {a.album for a in results} == set(albums) - - def assertInResult(self, item, results): - result_ids = [i.id for i in results] - assert item.id in result_ids - - def assertNotInResult(self, item, results): - result_ids = [i.id for i in results] - assert item.id not in result_ids +_p = pytest.param -# A test case class providing a library with some dummy data and some -# assertions involving that data. -class DummyDataTestCase(BeetsTestCase, AssertsMixin): - def setUp(self): - super().setUp() - items = [_common.item() for _ in range(3)] - items[0].title = "foo bar" - items[0].artist = "one" - items[0].artists = ["one", "eleven"] - items[0].album = "baz" - items[0].year = 2001 - items[0].comp = True - items[0].genre = "rock" - items[1].title = "baz qux" - items[1].artist = "two" - items[1].artists = ["two", "twelve"] - items[1].album = "baz" - items[1].year = 2002 - items[1].comp = True - items[1].genre = "Rock" - items[2].title = "beets 4 eva" - items[2].artist = "three" - items[2].artists = ["three", "one"] - items[2].album = "foo" - items[2].year = 2003 - items[2].comp = False - items[2].genre = "Hard Rock" - for item in items: - self.lib.add(item) - self.album = self.lib.add_album(items[:2]) +@pytest.fixture(scope="class") +def helper(): + helper = TestHelper() + helper.setup_beets() - def assert_items_matched_all(self, results): - self.assert_items_matched( - results, - [ - "foo bar", - "baz qux", - "beets 4 eva", - ], + yield helper + + helper.teardown_beets() + + +class TestGet: + @pytest.fixture(scope="class") + def lib(self, helper): + album_items = [ + helper.create_item( + title="first", + artist="one", + artists=["one", "eleven"], + album="baz", + year=2001, + comp=True, + genre="rock", + ), + helper.create_item( + title="second", + artist="two", + artists=["two", "twelve"], + album="baz", + year=2002, + comp=True, + genre="Rock", + ), + ] + album = helper.lib.add_album(album_items) + album.albumflex = "foo" + album.store() + + helper.add_item( + title="third", + artist="three", + artists=["three", "one"], + album="foo", + year=2003, + comp=False, + genre="Hard Rock", + comments="caf\xe9", ) + return helper.lib + + @pytest.mark.parametrize( + "q, expected_titles", + [ + ("", ["first", "second", "third"]), + (None, ["first", "second", "third"]), + (":oNE", []), + (":one", ["first"]), + (":sec :ond", ["second"]), + (":second", ["second"]), + ("=rock", ["first"]), + ('=~"hard rock"', ["third"]), + (":t$", ["first"]), + ("oNE", ["first"]), + ("baz", ["first", "second"]), + ("sec ond", ["second"]), + ("three", ["third"]), + ("albumflex:foo", ["first", "second"]), + ("artist::t.+r", ["third"]), + ("artist:thrEE", ["third"]), + ("artists::eleven", ["first"]), + ("artists::one", ["first", "third"]), + ("ArTiST:three", ["third"]), + ("comments:caf\xe9", ["third"]), + ("comp:true", ["first", "second"]), + ("comp:false", ["third"]), + ("genre:=rock", ["first"]), + ("genre:=Rock", ["second"]), + ('genre:="Hard Rock"', ["third"]), + ('genre:=~"hard rock"', ["third"]), + ("genre:=~rock", ["first", "second"]), + ('genre:="hard rock"', []), + ("popebear", []), + ("pope:bear", []), + ("singleton:true", ["third"]), + ("singleton:1", ["third"]), + ("singleton:false", ["first", "second"]), + ("singleton:0", ["first", "second"]), + ("title:ond", ["second"]), + ("title::sec", ["second"]), + ("year:2001", ["first"]), + ("year:2000..2002", ["first", "second"]), + ("xyzzy:nonsense", []), + ], + ) + def test_get_query(self, lib, q, expected_titles): + assert {i.title for i in lib.items(q)} == set(expected_titles) + + @pytest.mark.parametrize( + "q, expected_titles", + [ + (BooleanQuery("comp", True), ("third",)), + (DateQuery("added", "2000-01-01"), ("first", "second", "third")), + (FalseQuery(), ("first", "second", "third")), + (MatchQuery("year", "2003"), ("first", "second")), + (NoneQuery("rg_track_gain"), ()), + (NumericQuery("year", "2001..2002"), ("third",)), + ( + AndQuery( + [BooleanQuery("comp", True), NumericQuery("year", "2002")] + ), + ("first", "third"), + ), + ( + OrQuery( + [BooleanQuery("comp", True), NumericQuery("year", "2002")] + ), + ("third",), + ), + (RegexpQuery("artist", "^t"), ("first",)), + (SubstringQuery("album", "ba"), ("third",)), + (TrueQuery(), ()), + ], + ) + def test_query_logic(self, lib, q, expected_titles): + def get_results(*args): + return {i.title for i in lib.items(*args)} + + # not(a and b) <-> not(a) or not(b) + not_q = NotQuery(q) + not_q_results = get_results(not_q) + assert not_q_results == set(expected_titles) -class GetTest(DummyDataTestCase): - def test_get_empty(self): - q = "" - results = self.lib.items(q) - self.assert_items_matched_all(results) - - def test_get_none(self): - q = None - results = self.lib.items(q) - self.assert_items_matched_all(results) - - def test_get_one_keyed_term(self): - q = "title:qux" - results = self.lib.items(q) - self.assert_items_matched(results, ["baz qux"]) - - def test_get_one_keyed_exact(self): - q = "genre:=rock" - results = self.lib.items(q) - self.assert_items_matched(results, ["foo bar"]) - q = "genre:=Rock" - results = self.lib.items(q) - self.assert_items_matched(results, ["baz qux"]) - q = 'genre:="Hard Rock"' - results = self.lib.items(q) - self.assert_items_matched(results, ["beets 4 eva"]) - - def test_get_one_keyed_exact_nocase(self): - q = 'genre:=~"hard rock"' - results = self.lib.items(q) - self.assert_items_matched(results, ["beets 4 eva"]) - - def test_get_one_keyed_regexp(self): - q = "artist::t.+r" - results = self.lib.items(q) - self.assert_items_matched(results, ["beets 4 eva"]) - - def test_get_one_unkeyed_term(self): - q = "three" - results = self.lib.items(q) - self.assert_items_matched(results, ["beets 4 eva"]) - - def test_get_one_unkeyed_exact(self): - q = "=rock" - results = self.lib.items(q) - self.assert_items_matched(results, ["foo bar"]) - - def test_get_one_unkeyed_exact_nocase(self): - q = '=~"hard rock"' - results = self.lib.items(q) - self.assert_items_matched(results, ["beets 4 eva"]) - - def test_get_one_unkeyed_regexp(self): - q = ":x$" - results = self.lib.items(q) - self.assert_items_matched(results, ["baz qux"]) - - def test_get_no_matches(self): - q = "popebear" - results = self.lib.items(q) - self.assert_items_matched(results, []) - - def test_invalid_key(self): - q = "pope:bear" - results = self.lib.items(q) - # Matches nothing since the flexattr is not present on the - # objects. - self.assert_items_matched(results, []) - - def test_get_no_matches_exact(self): - q = 'genre:="hard rock"' - results = self.lib.items(q) - self.assert_items_matched(results, []) - - def test_term_case_insensitive(self): - q = "oNE" - results = self.lib.items(q) - self.assert_items_matched(results, ["foo bar"]) - - def test_regexp_case_sensitive(self): - q = ":oNE" - results = self.lib.items(q) - self.assert_items_matched(results, []) - q = ":one" - results = self.lib.items(q) - self.assert_items_matched(results, ["foo bar"]) - - def test_term_case_insensitive_with_key(self): - q = "artist:thrEE" - results = self.lib.items(q) - self.assert_items_matched(results, ["beets 4 eva"]) - - def test_term_case_regex_with_multi_key_matches(self): - q = "artists::eleven" - results = self.lib.items(q) - self.assert_items_matched(results, ["foo bar"]) - - def test_term_case_regex_with_multi_key_matches_multiple_columns(self): - q = "artists::one" - results = self.lib.items(q) - self.assert_items_matched(results, ["foo bar", "beets 4 eva"]) - - def test_key_case_insensitive(self): - q = "ArTiST:three" - results = self.lib.items(q) - self.assert_items_matched(results, ["beets 4 eva"]) - - def test_keyed_matches_exact_nocase(self): - q = "genre:=~rock" - results = self.lib.items(q) - self.assert_items_matched( - results, - [ - "foo bar", - "baz qux", - ], - ) - - def test_unkeyed_term_matches_multiple_columns(self): - q = "baz" - results = self.lib.items(q) - self.assert_items_matched( - results, - [ - "foo bar", - "baz qux", - ], - ) - - def test_unkeyed_regexp_matches_multiple_columns(self): - q = ":z$" - results = self.lib.items(q) - self.assert_items_matched( - results, - [ - "foo bar", - "baz qux", - ], - ) - - def test_keyed_term_matches_only_one_column(self): - q = "title:baz" - results = self.lib.items(q) - self.assert_items_matched(results, ["baz qux"]) - - def test_keyed_regexp_matches_only_one_column(self): - q = "title::baz" - results = self.lib.items(q) - self.assert_items_matched( - results, - [ - "baz qux", - ], - ) - - def test_multiple_terms_narrow_search(self): - q = "qux baz" - results = self.lib.items(q) - self.assert_items_matched( - results, - [ - "baz qux", - ], - ) - - def test_multiple_regexps_narrow_search(self): - q = ":baz :qux" - results = self.lib.items(q) - self.assert_items_matched(results, ["baz qux"]) - - def test_mixed_terms_regexps_narrow_search(self): - q = ":baz qux" - results = self.lib.items(q) - self.assert_items_matched(results, ["baz qux"]) - - def test_single_year(self): - q = "year:2001" - results = self.lib.items(q) - self.assert_items_matched(results, ["foo bar"]) - - def test_year_range(self): - q = "year:2000..2002" - results = self.lib.items(q) - self.assert_items_matched( - results, - [ - "foo bar", - "baz qux", - ], - ) - - def test_singleton_true(self): - q = "singleton:true" - results = self.lib.items(q) - self.assert_items_matched(results, ["beets 4 eva"]) - - def test_singleton_1(self): - q = "singleton:1" - results = self.lib.items(q) - self.assert_items_matched(results, ["beets 4 eva"]) - - def test_singleton_false(self): - q = "singleton:false" - results = self.lib.items(q) - self.assert_items_matched(results, ["foo bar", "baz qux"]) - - def test_singleton_0(self): - q = "singleton:0" - results = self.lib.items(q) - self.assert_items_matched(results, ["foo bar", "baz qux"]) - - def test_compilation_true(self): - q = "comp:true" - results = self.lib.items(q) - self.assert_items_matched(results, ["foo bar", "baz qux"]) - - def test_compilation_false(self): - q = "comp:false" - results = self.lib.items(q) - self.assert_items_matched(results, ["beets 4 eva"]) - - def test_unknown_field_name_no_results(self): - q = "xyzzy:nonsense" - results = self.lib.items(q) - titles = [i.title for i in results] - assert titles == [] - - def test_unknown_field_name_no_results_in_album_query(self): - q = "xyzzy:nonsense" - results = self.lib.albums(q) - names = [a.album for a in results] - assert names == [] - - def test_item_field_name_matches_nothing_in_album_query(self): - q = "format:nonsense" - results = self.lib.albums(q) - names = [a.album for a in results] - assert names == [] - - def test_unicode_query(self): - item = self.lib.items().get() - item.title = "caf\xe9" - item.store() - - q = "title:caf\xe9" - results = self.lib.items(q) - self.assert_items_matched(results, ["caf\xe9"]) - - def test_numeric_search_positive(self): - q = dbcore.query.NumericQuery("year", "2001") - results = self.lib.items(q) - assert results - - def test_numeric_search_negative(self): - q = dbcore.query.NumericQuery("year", "1999") - results = self.lib.items(q) - assert not results - - def test_album_field_fallback(self): - self.album["albumflex"] = "foo" - self.album.store() - - q = "albumflex:foo" - results = self.lib.items(q) - self.assert_items_matched(results, ["foo bar", "baz qux"]) - - def test_invalid_query(self): - with pytest.raises(InvalidQueryArgumentValueError, match="not an int"): - dbcore.query.NumericQuery("year", "199a") - - msg_match = r"not a regular expression.*unterminated subpattern" - with pytest.raises(ParsingError, match=msg_match): - dbcore.query.RegexpQuery("year", "199(") - - -class MatchTest(BeetsTestCase): - def setUp(self): - super().setUp() - self.item = _common.item() - - def test_regex_match_positive(self): - q = dbcore.query.RegexpQuery("album", "^the album$") - assert q.match(self.item) - - def test_regex_match_negative(self): - q = dbcore.query.RegexpQuery("album", "^album$") - assert not q.match(self.item) - - def test_regex_match_non_string_value(self): - q = dbcore.query.RegexpQuery("disc", "^6$") - assert q.match(self.item) - - def test_substring_match_positive(self): - q = dbcore.query.SubstringQuery("album", "album") - assert q.match(self.item) - - def test_substring_match_negative(self): - q = dbcore.query.SubstringQuery("album", "ablum") - assert not q.match(self.item) - - def test_substring_match_non_string_value(self): - q = dbcore.query.SubstringQuery("disc", "6") - assert q.match(self.item) - - def test_exact_match_nocase_positive(self): - q = dbcore.query.StringQuery("genre", "the genre") - assert q.match(self.item) - q = dbcore.query.StringQuery("genre", "THE GENRE") - assert q.match(self.item) - - def test_exact_match_nocase_negative(self): - q = dbcore.query.StringQuery("genre", "genre") - assert not q.match(self.item) - - def test_year_match_positive(self): - q = dbcore.query.NumericQuery("year", "1") - assert q.match(self.item) - - def test_year_match_negative(self): - q = dbcore.query.NumericQuery("year", "10") - assert not q.match(self.item) - - def test_bitrate_range_positive(self): - q = dbcore.query.NumericQuery("bitrate", "100000..200000") - assert q.match(self.item) - - def test_bitrate_range_negative(self): - q = dbcore.query.NumericQuery("bitrate", "200000..300000") - assert not q.match(self.item) - - def test_open_range(self): - dbcore.query.NumericQuery("bitrate", "100000..") - - def test_eq(self): - q1 = dbcore.query.MatchQuery("foo", "bar") - q2 = dbcore.query.MatchQuery("foo", "bar") - q3 = dbcore.query.MatchQuery("foo", "baz") - q4 = dbcore.query.StringFieldQuery("foo", "bar") - assert q1 == q2 - assert q1 != q3 - assert q1 != q4 - assert q3 != q4 - - -class PathQueryTest(ItemInDBTestCase, AssertsMixin): - def setUp(self): - super().setUp() - - # This is the item we'll try to match. - self.i.path = util.normpath("/a/b/c.mp3") - self.i.title = "path item" - self.i.album = "path album" - self.i.store() - self.lib.add_album([self.i]) - - # A second item for testing exclusion. - i2 = _common.item() - i2.path = util.normpath("/x/y/z.mp3") - i2.title = "another item" - i2.album = "another album" - self.lib.add(i2) - self.lib.add_album([i2]) - - @contextmanager - def force_implicit_query_detection(self): - # Unadorned path queries with path separators in them are considered - # path queries only when the path in question actually exists. So we - # mock the existence check to return true. - beets.library.PathQuery.force_implicit_query_detection = True - yield - beets.library.PathQuery.force_implicit_query_detection = False - - def test_path_exact_match(self): - q = "path:/a/b/c.mp3" - results = self.lib.items(q) - self.assert_items_matched(results, ["path item"]) - - results = self.lib.albums(q) - self.assert_albums_matched(results, ["path album"]) - - # FIXME: fails on windows - @unittest.skipIf(sys.platform == "win32", "win32") - def test_parent_directory_no_slash(self): - q = "path:/a" - results = self.lib.items(q) - self.assert_items_matched(results, ["path item"]) - - results = self.lib.albums(q) - self.assert_albums_matched(results, ["path album"]) - - # FIXME: fails on windows - @unittest.skipIf(sys.platform == "win32", "win32") - def test_parent_directory_with_slash(self): - q = "path:/a/" - results = self.lib.items(q) - self.assert_items_matched(results, ["path item"]) - - results = self.lib.albums(q) - self.assert_albums_matched(results, ["path album"]) - - def test_no_match(self): - q = "path:/xyzzy/" - results = self.lib.items(q) - self.assert_items_matched(results, []) - - results = self.lib.albums(q) - self.assert_albums_matched(results, []) - - def test_fragment_no_match(self): - q = "path:/b/" - results = self.lib.items(q) - self.assert_items_matched(results, []) - - results = self.lib.albums(q) - self.assert_albums_matched(results, []) - - def test_nonnorm_path(self): - q = "path:/x/../a/b" - results = self.lib.items(q) - self.assert_items_matched(results, ["path item"]) - - results = self.lib.albums(q) - self.assert_albums_matched(results, ["path album"]) - - @unittest.skipIf(sys.platform == "win32", WIN32_NO_IMPLICIT_PATHS) - def test_slashed_query_matches_path(self): - with self.force_implicit_query_detection(): - q = "/a/b" - results = self.lib.items(q) - self.assert_items_matched(results, ["path item"]) - - results = self.lib.albums(q) - self.assert_albums_matched(results, ["path album"]) - - @unittest.skipIf(sys.platform == "win32", WIN32_NO_IMPLICIT_PATHS) - def test_path_query_in_or_query(self): - with self.force_implicit_query_detection(): - q = "/a/b , /a/b" - results = self.lib.items(q) - self.assert_items_matched(results, ["path item"]) - - def test_non_slashed_does_not_match_path(self): - with self.force_implicit_query_detection(): - q = "c.mp3" - results = self.lib.items(q) - self.assert_items_matched(results, []) - - results = self.lib.albums(q) - self.assert_albums_matched(results, []) - - def test_slashes_in_explicit_field_does_not_match_path(self): - with self.force_implicit_query_detection(): - q = "title:/a/b" - results = self.lib.items(q) - self.assert_items_matched(results, []) - - def test_path_item_regex(self): - q = "path::c\\.mp3$" - results = self.lib.items(q) - self.assert_items_matched(results, ["path item"]) - - results = self.lib.albums(q) - self.assert_albums_matched(results, ["path album"]) - - def test_path_album_regex(self): - q = "path::b" - results = self.lib.albums(q) - self.assert_albums_matched(results, ["path album"]) - - def test_escape_underscore(self): - self.add_album( - path=b"/a/_/title.mp3", - title="with underscore", - album="album with underscore", - ) - q = "path:/a/_" - results = self.lib.items(q) - self.assert_items_matched(results, ["with underscore"]) - - results = self.lib.albums(q) - self.assert_albums_matched(results, ["album with underscore"]) - - def test_escape_percent(self): - self.add_album( - path=b"/a/%/title.mp3", - title="with percent", - album="album with percent", - ) - q = "path:/a/%" - results = self.lib.items(q) - self.assert_items_matched(results, ["with percent"]) - - results = self.lib.albums(q) - self.assert_albums_matched(results, ["album with percent"]) - - def test_escape_backslash(self): - self.add_album( - path=rb"/a/\x/title.mp3", - title="with backslash", - album="album with backslash", - ) - q = "path:/a/\\\\x" - results = self.lib.items(q) - self.assert_items_matched(results, ["with backslash"]) - - results = self.lib.albums(q) - self.assert_albums_matched(results, ["album with backslash"]) - - def test_case_sensitivity(self): - self.add_album(path=b"/A/B/C2.mp3", title="caps path") - - makeq = partial(beets.library.PathQuery, "path", "/A/B") - - results = self.lib.items(makeq(case_sensitive=True)) - self.assert_items_matched(results, ["caps path"]) - - results = self.lib.items(makeq(case_sensitive=False)) - self.assert_items_matched(results, ["path item", "caps path"]) - - # FIXME: Also create a variant of this test for windows, which tests - # both os.sep and os.altsep - @unittest.skipIf(sys.platform == "win32", "win32") - def test_path_sep_detection(self): - is_path_query = beets.library.PathQuery.is_path_query - - with self.force_implicit_query_detection(): - assert is_path_query("/foo/bar") - assert is_path_query("foo/bar") - assert is_path_query("foo/") - assert not is_path_query("foo") - assert is_path_query("foo/:bar") - assert not is_path_query("foo:bar/") - assert not is_path_query("foo:/bar") - - # FIXME: shouldn't this also work on windows? - @unittest.skipIf(sys.platform == "win32", WIN32_NO_IMPLICIT_PATHS) - def test_detect_absolute_path(self): - """Test detection of implicit path queries based on whether or - not the path actually exists, when using an absolute path query. - - Thus, don't use the `force_implicit_query_detection()` - contextmanager which would disable the existence check. - """ - is_path_query = beets.library.PathQuery.is_path_query - - path = self.touch(os.path.join(b"foo", b"bar")) - assert os.path.isabs(util.syspath(path)) - path_str = path.decode("utf-8") - - # The file itself. - assert is_path_query(path_str) - - # The parent directory. - parent = os.path.dirname(path_str) - assert is_path_query(parent) - - # Some non-existent path. - assert not is_path_query(f"{path_str}baz") - - def test_detect_relative_path(self): - """Test detection of implicit path queries based on whether or - not the path actually exists, when using a relative path query. - - Thus, don't use the `force_implicit_query_detection()` - contextmanager which would disable the existence check. - """ - is_path_query = beets.library.PathQuery.is_path_query - - self.touch(os.path.join(b"foo", b"bar")) - - # Temporarily change directory so relative paths work. - cur_dir = os.getcwd() - try: - os.chdir(syspath(self.temp_dir)) - assert is_path_query("foo/") - assert is_path_query("foo/bar") - assert is_path_query("foo/bar:tagada") - assert not is_path_query("bar") - finally: - os.chdir(cur_dir) - - -class IntQueryTest(BeetsTestCase): - def test_exact_value_match(self): - item = self.add_item(bpm=120) - matched = self.lib.items("bpm:120").get() - assert item.id == matched.id - - def test_range_match(self): - item = self.add_item(bpm=120) - self.add_item(bpm=130) - - matched = self.lib.items("bpm:110..125") - assert 1 == len(matched) - assert item.id == matched.get().id - - @patch("beets.library.Item._types", {"myint": types.Integer()}) - def test_flex_range_match(self): - item = self.add_item(myint=2) - matched = self.lib.items("myint:2").get() - assert item.id == matched.id - - @patch("beets.library.Item._types", {"myint": types.Integer()}) - def test_flex_dont_match_missing(self): - self.add_item() - matched = self.lib.items("myint:2").get() - assert matched is None - - def test_no_substring_match(self): - self.add_item(bpm=120) - matched = self.lib.items("bpm:12").get() - assert matched is None - - -@patch("beets.library.Item._types", {"flexbool": types.Boolean()}) -class BoolQueryTest(BeetsTestCase, AssertsMixin): - def test_parse_true(self): - item_true = self.add_item(comp=True) - item_false = self.add_item(comp=False) - matched = self.lib.items("comp:true") - self.assertInResult(item_true, matched) - self.assertNotInResult(item_false, matched) - - def test_flex_parse_true(self): - item_true = self.add_item(flexbool=True) - item_false = self.add_item(flexbool=False) - matched = self.lib.items("flexbool:true") - self.assertInResult(item_true, matched) - self.assertNotInResult(item_false, matched) - - def test_flex_parse_false(self): - item_true = self.add_item(flexbool=True) - item_false = self.add_item(flexbool=False) - matched = self.lib.items("flexbool:false") - self.assertInResult(item_false, matched) - self.assertNotInResult(item_true, matched) - - def test_flex_parse_1(self): - item_true = self.add_item(flexbool=True) - item_false = self.add_item(flexbool=False) - matched = self.lib.items("flexbool:1") - self.assertInResult(item_true, matched) - self.assertNotInResult(item_false, matched) - - def test_flex_parse_0(self): - item_true = self.add_item(flexbool=True) - item_false = self.add_item(flexbool=False) - matched = self.lib.items("flexbool:0") - self.assertInResult(item_false, matched) - self.assertNotInResult(item_true, matched) - - def test_flex_parse_any_string(self): - # TODO this should be the other way around - item_true = self.add_item(flexbool=True) - item_false = self.add_item(flexbool=False) - matched = self.lib.items("flexbool:something") - self.assertInResult(item_false, matched) - self.assertNotInResult(item_true, matched) - - -class DefaultSearchFieldsTest(DummyDataTestCase): - def test_albums_matches_album(self): - albums = list(self.lib.albums("baz")) - assert len(albums) == 1 - - def test_albums_matches_albumartist(self): - albums = list(self.lib.albums(["album artist"])) - assert len(albums) == 1 - - def test_items_matches_title(self): - items = self.lib.items("beets") - self.assert_items_matched(items, ["beets 4 eva"]) - - def test_items_does_not_match_year(self): - items = self.lib.items("2001") - self.assert_items_matched(items, []) - - -class NoneQueryTest(BeetsTestCase, AssertsMixin): - def test_match_singletons(self): - singleton = self.add_item() - album_item = self.add_album().items().get() - - matched = self.lib.items(NoneQuery("album_id")) - self.assertInResult(singleton, matched) - self.assertNotInResult(album_item, matched) - - def test_match_after_set_none(self): - item = self.add_item(rg_track_gain=0) - matched = self.lib.items(NoneQuery("rg_track_gain")) - self.assertNotInResult(item, matched) - - item["rg_track_gain"] = None - item.store() - matched = self.lib.items(NoneQuery("rg_track_gain")) - self.assertInResult(item, matched) - - def test_match_slow(self): - item = self.add_item() - matched = self.lib.items(NoneQuery("rg_track_peak", fast=False)) - self.assertInResult(item, matched) - - def test_match_slow_after_set_none(self): - item = self.add_item(rg_track_gain=0) - matched = self.lib.items(NoneQuery("rg_track_gain", fast=False)) - self.assertNotInResult(item, matched) - - item["rg_track_gain"] = None - item.store() - matched = self.lib.items(NoneQuery("rg_track_gain", fast=False)) - self.assertInResult(item, matched) - - -class NotQueryMatchTest(BeetsTestCase): - """Test `query.NotQuery` matching against a single item, using the same - cases and assertions as on `MatchTest`, plus assertion on the negated - queries (ie. assert q -> assert not NotQuery(q)). - """ - - def setUp(self): - super().setUp() - self.item = _common.item() - - def test_regex_match_positive(self): - q = dbcore.query.RegexpQuery("album", "^the album$") - assert q.match(self.item) - assert not dbcore.query.NotQuery(q).match(self.item) - - def test_regex_match_negative(self): - q = dbcore.query.RegexpQuery("album", "^album$") - assert not q.match(self.item) - assert dbcore.query.NotQuery(q).match(self.item) - - def test_regex_match_non_string_value(self): - q = dbcore.query.RegexpQuery("disc", "^6$") - assert q.match(self.item) - assert not dbcore.query.NotQuery(q).match(self.item) - - def test_substring_match_positive(self): - q = dbcore.query.SubstringQuery("album", "album") - assert q.match(self.item) - assert not dbcore.query.NotQuery(q).match(self.item) - - def test_substring_match_negative(self): - q = dbcore.query.SubstringQuery("album", "ablum") - assert not q.match(self.item) - assert dbcore.query.NotQuery(q).match(self.item) - - def test_substring_match_non_string_value(self): - q = dbcore.query.SubstringQuery("disc", "6") - assert q.match(self.item) - assert not dbcore.query.NotQuery(q).match(self.item) - - def test_year_match_positive(self): - q = dbcore.query.NumericQuery("year", "1") - assert q.match(self.item) - assert not dbcore.query.NotQuery(q).match(self.item) - - def test_year_match_negative(self): - q = dbcore.query.NumericQuery("year", "10") - assert not q.match(self.item) - assert dbcore.query.NotQuery(q).match(self.item) - - def test_bitrate_range_positive(self): - q = dbcore.query.NumericQuery("bitrate", "100000..200000") - assert q.match(self.item) - assert not dbcore.query.NotQuery(q).match(self.item) - - def test_bitrate_range_negative(self): - q = dbcore.query.NumericQuery("bitrate", "200000..300000") - assert not q.match(self.item) - assert dbcore.query.NotQuery(q).match(self.item) - - def test_open_range(self): - q = dbcore.query.NumericQuery("bitrate", "100000..") - dbcore.query.NotQuery(q) - - -class NotQueryTest(DummyDataTestCase): - """Test `query.NotQuery` against the dummy data: - - `test_type_xxx`: tests for the negation of a particular XxxQuery class. - - `test_get_yyy`: tests on query strings (similar to `GetTest`) - """ - - def assertNegationProperties(self, q): - """Given a Query `q`, assert that: - - q OR not(q) == all items - - q AND not(q) == 0 - - not(not(q)) == q - """ - not_q = dbcore.query.NotQuery(q) # assert using OrQuery, AndQuery - q_or = dbcore.query.OrQuery([q, not_q]) - q_and = dbcore.query.AndQuery([q, not_q]) - self.assert_items_matched_all(self.lib.items(q_or)) - self.assert_items_matched(self.lib.items(q_and), []) + q_or = OrQuery([q, not_q]) + + q_and = AndQuery([q, not_q]) + assert get_results(q_or) == {"first", "second", "third"} + assert get_results(q_and) == set() # assert manually checking the item titles - all_titles = {i.title for i in self.lib.items()} - q_results = {i.title for i in self.lib.items(q)} - not_q_results = {i.title for i in self.lib.items(not_q)} + all_titles = get_results() + q_results = get_results(q) assert q_results.union(not_q_results) == all_titles assert q_results.intersection(not_q_results) == set() # round trip - not_not_q = dbcore.query.NotQuery(not_q) - assert {i.title for i in self.lib.items(q)} == { - i.title for i in self.lib.items(not_not_q) - } + not_not_q = NotQuery(not_q) + assert get_results(q) == get_results(not_not_q) - def test_type_and(self): - # not(a and b) <-> not(a) or not(b) - q = dbcore.query.AndQuery( - [ - dbcore.query.BooleanQuery("comp", True), - dbcore.query.NumericQuery("year", "2002"), - ], - ) - not_results = self.lib.items(dbcore.query.NotQuery(q)) - self.assert_items_matched(not_results, ["foo bar", "beets 4 eva"]) - self.assertNegationProperties(q) + @pytest.mark.parametrize( + "q, expected_titles", + [ + ("-artist::t.+r", ["first", "second"]), + ("-:t$", ["second", "third"]), + ("sec -bar", ["second"]), + ("sec -title:bar", ["second"]), + ("-ond", ["first", "third"]), + ("^ond", ["first", "third"]), + ("^title:sec", ["first", "third"]), + ("-title:sec", ["first", "third"]), + ], + ) + def test_negation_prefix(self, lib, q, expected_titles): + actual_titles = {i.title for i in lib.items(q)} + assert actual_titles == set(expected_titles) - def test_type_boolean(self): - q = dbcore.query.BooleanQuery("comp", True) - not_results = self.lib.items(dbcore.query.NotQuery(q)) - self.assert_items_matched(not_results, ["beets 4 eva"]) - self.assertNegationProperties(q) - - def test_type_date(self): - q = dbcore.query.DateQuery("added", "2000-01-01") - not_results = self.lib.items(dbcore.query.NotQuery(q)) - # query date is in the past, thus the 'not' results should contain all - # items - self.assert_items_matched( - not_results, ["foo bar", "baz qux", "beets 4 eva"] - ) - self.assertNegationProperties(q) - - def test_type_false(self): - q = dbcore.query.FalseQuery() - not_results = self.lib.items(dbcore.query.NotQuery(q)) - self.assert_items_matched_all(not_results) - self.assertNegationProperties(q) - - def test_type_match(self): - q = dbcore.query.MatchQuery("year", "2003") - not_results = self.lib.items(dbcore.query.NotQuery(q)) - self.assert_items_matched(not_results, ["foo bar", "baz qux"]) - self.assertNegationProperties(q) - - def test_type_none(self): - q = dbcore.query.NoneQuery("rg_track_gain") - not_results = self.lib.items(dbcore.query.NotQuery(q)) - self.assert_items_matched(not_results, []) - self.assertNegationProperties(q) - - def test_type_numeric(self): - q = dbcore.query.NumericQuery("year", "2001..2002") - not_results = self.lib.items(dbcore.query.NotQuery(q)) - self.assert_items_matched(not_results, ["beets 4 eva"]) - self.assertNegationProperties(q) - - def test_type_or(self): - # not(a or b) <-> not(a) and not(b) - q = dbcore.query.OrQuery( - [ - dbcore.query.BooleanQuery("comp", True), - dbcore.query.NumericQuery("year", "2002"), - ] - ) - not_results = self.lib.items(dbcore.query.NotQuery(q)) - self.assert_items_matched(not_results, ["beets 4 eva"]) - self.assertNegationProperties(q) - - def test_type_regexp(self): - q = dbcore.query.RegexpQuery("artist", "^t") - not_results = self.lib.items(dbcore.query.NotQuery(q)) - self.assert_items_matched(not_results, ["foo bar"]) - self.assertNegationProperties(q) - - def test_type_substring(self): - q = dbcore.query.SubstringQuery("album", "ba") - not_results = self.lib.items(dbcore.query.NotQuery(q)) - self.assert_items_matched(not_results, ["beets 4 eva"]) - self.assertNegationProperties(q) - - def test_type_true(self): - q = dbcore.query.TrueQuery() - not_results = self.lib.items(dbcore.query.NotQuery(q)) - self.assert_items_matched(not_results, []) - self.assertNegationProperties(q) - - def test_get_prefixes_keyed(self): - """Test both negation prefixes on a keyed query.""" - q0 = "-title:qux" - q1 = "^title:qux" - results0 = self.lib.items(q0) - results1 = self.lib.items(q1) - self.assert_items_matched(results0, ["foo bar", "beets 4 eva"]) - self.assert_items_matched(results1, ["foo bar", "beets 4 eva"]) - - def test_get_prefixes_unkeyed(self): - """Test both negation prefixes on an unkeyed query.""" - q0 = "-qux" - q1 = "^qux" - results0 = self.lib.items(q0) - results1 = self.lib.items(q1) - self.assert_items_matched(results0, ["foo bar", "beets 4 eva"]) - self.assert_items_matched(results1, ["foo bar", "beets 4 eva"]) - - def test_get_one_keyed_regexp(self): - q = "-artist::t.+r" - results = self.lib.items(q) - self.assert_items_matched(results, ["foo bar", "baz qux"]) - - def test_get_one_unkeyed_regexp(self): - q = "-:x$" - results = self.lib.items(q) - self.assert_items_matched(results, ["foo bar", "beets 4 eva"]) - - def test_get_multiple_terms(self): - q = "baz -bar" - results = self.lib.items(q) - self.assert_items_matched(results, ["baz qux"]) - - def test_get_mixed_terms(self): - q = "baz -title:bar" - results = self.lib.items(q) - self.assert_items_matched(results, ["baz qux"]) - - def test_fast_vs_slow(self): + @pytest.mark.parametrize( + "make_q", + [ + partial(DateQuery, "added", "2001-01-01"), + partial(MatchQuery, "artist", "one"), + partial(NoneQuery, "rg_track_gain"), + partial(NumericQuery, "year", "2002"), + partial(StringQuery, "year", "2001"), + partial(RegexpQuery, "album", "^.a"), + partial(SubstringQuery, "title", "x"), + ], + ) + def test_fast_vs_slow(self, lib, make_q): """Test that the results are the same regardless of the `fast` flag for negated `FieldQuery`s. - - TODO: investigate NoneQuery(fast=False), as it is raising - AttributeError: type object 'NoneQuery' has no attribute 'field' - at NoneQuery.match() (due to being @classmethod, and no self?) """ - classes = [ - (dbcore.query.DateQuery, ["added", "2001-01-01"]), - (dbcore.query.MatchQuery, ["artist", "one"]), - # (dbcore.query.NoneQuery, ['rg_track_gain']), - (dbcore.query.NumericQuery, ["year", "2002"]), - (dbcore.query.StringFieldQuery, ["year", "2001"]), - (dbcore.query.RegexpQuery, ["album", "^.a"]), - (dbcore.query.SubstringQuery, ["title", "x"]), + q_fast = make_q(True) + q_slow = make_q(False) + + assert list(map(dict, lib.items(q_fast))) == list( + map(dict, lib.items(q_slow)) + ) + + +class TestMatch: + @pytest.fixture(scope="class") + def item(self): + return _common.item( + album="the album", + disc=6, + genre="the genre", + year=1, + bitrate=128000, + ) + + @pytest.mark.parametrize( + "q, should_match", + [ + (RegexpQuery("album", "^the album$"), True), + (RegexpQuery("album", "^album$"), False), + (RegexpQuery("disc", "^6$"), True), + (SubstringQuery("album", "album"), True), + (SubstringQuery("album", "ablum"), False), + (SubstringQuery("disc", "6"), True), + (StringQuery("genre", "the genre"), True), + (StringQuery("genre", "THE GENRE"), True), + (StringQuery("genre", "genre"), False), + (NumericQuery("year", "1"), True), + (NumericQuery("year", "10"), False), + (NumericQuery("bitrate", "100000..200000"), True), + (NumericQuery("bitrate", "200000..300000"), False), + (NumericQuery("bitrate", "100000.."), True), + ], + ) + def test_match(self, item, q, should_match): + assert q.match(item) == should_match + assert not NotQuery(q).match(item) == should_match + + +class TestPathQuery: + """Tests for path-based querying functionality in the database system. + + Verifies that path queries correctly match items by their file paths, + handling special characters, case sensitivity, parent directories, + and path separator detection across different platforms. + """ + + @pytest.fixture(scope="class") + def lib(self, helper): + helper.add_item(path=b"/aaa/bb/c.mp3", title="path item") + helper.add_item(path=b"/x/y/z.mp3", title="another item") + helper.add_item(path=b"/c/_/title.mp3", title="with underscore") + helper.add_item(path=b"/c/%/title.mp3", title="with percent") + helper.add_item(path=rb"/c/\x/title.mp3", title="with backslash") + helper.add_item(path=b"/A/B/C2.mp3", title="caps path") + + return helper.lib + + @pytest.mark.parametrize( + "q, expected_titles", + [ + _p("path:/aaa/bb/c.mp3", ["path item"], id="exact-match"), + _p("path:/aaa", ["path item"], id="parent-dir-no-slash"), + _p("path:/aaa/", ["path item"], id="parent-dir-with-slash"), + _p("path:/aa", [], id="no-match-does-not-match-parent-dir"), + _p("path:/xyzzy/", [], id="no-match"), + _p("path:/b/", [], id="fragment-no-match"), + _p("path:/x/../aaa/bb", ["path item"], id="non-normalized"), + _p("path::c\\.mp3$", ["path item"], id="regex"), + _p("path:/c/_", ["with underscore"], id="underscore-escaped"), + _p("path:/c/%", ["with percent"], id="percent-escaped"), + _p("path:/c/\\\\x", ["with backslash"], id="backslash-escaped"), + ], + ) + def test_explicit(self, monkeypatch, lib, q, expected_titles): + """Test explicit path queries with different path specifications.""" + monkeypatch.setattr("beets.util.case_sensitive", lambda *_: True) + + assert {i.title for i in lib.items(q)} == set(expected_titles) + + @pytest.mark.skipif(sys.platform == "win32", reason=WIN32_NO_IMPLICIT_PATHS) + @pytest.mark.parametrize( + "q, expected_titles", + [ + _p("/aaa/bb", ["path item"], id="slashed-query"), + _p("/aaa/bb , /aaa", ["path item"], id="path-in-or-query"), + _p("c.mp3", [], id="no-slash-no-match"), + _p("title:/a/b", [], id="slash-with-explicit-field-no-match"), + ], + ) + def test_implicit(self, monkeypatch, lib, q, expected_titles): + """Test implicit path detection when queries contain path separators.""" + monkeypatch.setattr( + "beets.dbcore.query.PathQuery.is_path_query", lambda path: True + ) + + assert {i.title for i in lib.items(q)} == set(expected_titles) + + @pytest.mark.parametrize( + "case_sensitive, expected_titles", + [ + _p(True, [], id="non-caps-dont-match-caps"), + _p(False, ["caps path"], id="non-caps-match-caps"), + ], + ) + def test_case_sensitivity( + self, lib, monkeypatch, case_sensitive, expected_titles + ): + """Test path matching with different case sensitivity settings.""" + q = "path:/a/b/c2.mp3" + monkeypatch.setattr( + "beets.util.case_sensitive", lambda *_: case_sensitive + ) + + assert {i.title for i in lib.items(q)} == set(expected_titles) + + # FIXME: Also create a variant of this test for windows, which tests + # both os.sep and os.altsep + @pytest.mark.skipif(sys.platform == "win32", reason=WIN32_NO_IMPLICIT_PATHS) + @pytest.mark.parametrize( + "q, is_path_query", + [ + ("/foo/bar", True), + ("foo/bar", True), + ("foo/", True), + ("foo", False), + ("foo/:bar", True), + ("foo:bar/", False), + ("foo:/bar", False), + ], + ) + def test_path_sep_detection(self, monkeypatch, tmp_path, q, is_path_query): + """Test detection of path queries based on the presence of path separators.""" + monkeypatch.chdir(tmp_path) + (tmp_path / "foo").mkdir() + (tmp_path / "foo" / "bar").touch() + if Path(q).is_absolute(): + q = str(tmp_path / q[1:]) + + assert PathQuery.is_path_query(q) == is_path_query + + +class TestQuery: + ALBUM = "album title" + SINGLE = "singleton" + + @pytest.fixture(scope="class") + def lib(self, helper): + helper.add_album( + title=self.ALBUM, + comp=True, + flexbool=True, + bpm=120, + flexint=2, + rg_track_gain=0, + ) + helper.add_item( + title=self.SINGLE, comp=False, flexbool=False, rg_track_gain=None + ) + + with pytest.MonkeyPatch.context() as monkeypatch: + monkeypatch.setattr( + Item, + "_types", + {"flexbool": types.Boolean(), "flexint": types.Integer()}, + ) + yield helper.lib + + @pytest.mark.parametrize("query_class", [MatchQuery, StringFieldQuery]) + def test_equality(self, query_class): + assert query_class("foo", "bar") == query_class("foo", "bar") + + @pytest.mark.parametrize( + "make_q, expected_msg", + [ + (lambda: NumericQuery("year", "199a"), "not an int"), + (lambda: RegexpQuery("year", "199("), r"not a regular expression.*unterminated subpattern"), # noqa: E501 ] + ) # fmt: skip + def test_invalid_query(self, make_q, expected_msg): + with pytest.raises(ParsingError, match=expected_msg): + make_q() - for klass, args in classes: - q_fast = dbcore.query.NotQuery(klass(*(args + [True]))) - q_slow = dbcore.query.NotQuery(klass(*(args + [False]))) - - try: - assert [i.title for i in self.lib.items(q_fast)] == [ - i.title for i in self.lib.items(q_slow) - ] - except NotImplementedError: - # ignore classes that do not provide `fast` implementation - pass + @pytest.mark.parametrize( + "q, expected_titles", + [ + # Boolean value + _p("comp:true", {ALBUM}, id="parse-true"), + _p("flexbool:true", {ALBUM}, id="flex-parse-true"), + _p("flexbool:false", {SINGLE}, id="flex-parse-false"), + _p("flexbool:1", {ALBUM}, id="flex-parse-1"), + _p("flexbool:0", {SINGLE}, id="flex-parse-0"), + # TODO: shouldn't this match 1 / true instead? + _p("flexbool:something", {SINGLE}, id="flex-parse-true"), + # Integer value + _p("bpm:120", {ALBUM}, id="int-exact-value"), + _p("bpm:110..125", {ALBUM}, id="int-range"), + _p("flexint:2", {ALBUM}, id="int-flex"), + _p("flexint:3", set(), id="int-no-match"), + _p("bpm:12", set(), id="int-dont-match-substring"), + # None value + _p(NoneQuery("album_id"), {SINGLE}, id="none-match-singleton"), + _p(NoneQuery("rg_track_gain"), {SINGLE}, id="none-value"), + ], + ) + def test_value_type(self, lib, q, expected_titles): + assert {i.title for i in lib.items(q)} == expected_titles -class RelatedQueriesTest(BeetsTestCase, AssertsMixin): +class TestDefaultSearchFields: + @pytest.fixture(scope="class") + def lib(self, helper): + helper.add_album( + title="title", + album="album", + albumartist="albumartist", + catalognum="catalognum", + year=2001, + ) + + return helper.lib + + @pytest.mark.parametrize( + "entity, q, should_match", + [ + _p("albums", "album", True, id="album-match-album"), + _p("albums", "albumartist", True, id="album-match-albumartist"), + _p("albums", "catalognum", False, id="album-dont-match-catalognum"), + _p("items", "title", True, id="item-match-title"), + _p("items", "2001", False, id="item-dont-match-year"), + ], + ) + def test_search(self, lib, entity, q, should_match): + assert bool(getattr(lib, entity)(q)) == should_match + + +class TestRelatedQueries: """Test album-level queries with track-level filters and vice-versa.""" - def setUp(self): - super().setUp() - - albums = [] + @pytest.fixture(scope="class") + def lib(self, helper): for album_idx in range(1, 3): album_name = f"Album{album_idx}" - album_items = [] - for item_idx in range(1, 3): - item = _common.item() - item.album = album_name - item.title = f"{album_name} Item{item_idx}" - self.lib.add(item) - album_items.append(item) - album = self.lib.add_album(album_items) + items = [ + helper.create_item( + album=album_name, title=f"{album_name} Item{idx}" + ) + for idx in range(1, 3) + ] + album = helper.lib.add_album(items) album.artpath = f"{album_name} Artpath" album.catalognum = "ABC" album.store() - albums.append(album) - self.album, self.another_album = albums + return helper.lib - def test_get_albums_filter_by_track_field(self): - q = "title:Album1" - results = self.lib.albums(q) - self.assert_albums_matched(results, ["Album1"]) - - def test_get_items_filter_by_album_field(self): - q = "artpath::Album1" - results = self.lib.items(q) - self.assert_items_matched(results, ["Album1 Item1", "Album1 Item2"]) - - def test_filter_albums_by_common_field(self): - # title:Album1 ensures that the items table is joined for the query - q = "title:Album1 Album1" - results = self.lib.albums(q) - self.assert_albums_matched(results, ["Album1"]) - - def test_filter_items_by_common_field(self): - # artpath::A ensures that the albums table is joined for the query - q = "artpath::A Album1" - results = self.lib.items(q) - self.assert_items_matched(results, ["Album1 Item1", "Album1 Item2"]) + @pytest.mark.parametrize( + "q, expected_titles, expected_albums", + [ + _p( + "title:Album1", + ["Album1 Item1", "Album1 Item2"], + ["Album1"], + id="match-album-with-item-field-query", + ), + _p( + "title:Item2", + ["Album1 Item2", "Album2 Item2"], + ["Album1", "Album2"], + id="match-albums-with-item-field-query", + ), + _p( + "artpath::Album1", + ["Album1 Item1", "Album1 Item2"], + ["Album1"], + id="match-items-with-album-field-query", + ), + _p( + "catalognum:ABC Album1", + ["Album1 Item1", "Album1 Item2"], + ["Album1"], + id="query-field-common-to-album-and-item", + ), + ], + ) + def test_related_query(self, lib, q, expected_titles, expected_albums): + assert {i.album for i in lib.albums(q)} == set(expected_albums) + assert {i.title for i in lib.items(q)} == set(expected_titles) diff --git a/test/test_release.py b/test/test_release.py index 62986fa95..62b32a714 100644 --- a/test/test_release.py +++ b/test/test_release.py @@ -22,29 +22,36 @@ pytestmark = pytest.mark.skipif( def rst_changelog(): return """New features: -* :doc:`/plugins/substitute`: Some substitute +- :doc:`/plugins/substitute`: Some substitute multi-line change. :bug:`5467` -* :ref:`list-cmd` Update. +- :ref:`list-cmd` Update. -You can do something with this command:: +You can do something with this command: + +:: $ do-something Bug fixes: -* Some fix that refers to an issue. +- Some fix that refers to an issue. :bug:`5467` -* Some fix that mentions user :user:`username`. -* Some fix thanks to +- Some fix that mentions user :user:`username`. +- Some fix thanks to :user:`username`. :bug:`5467` -* Some fix with its own bullet points using incorrect indentation: - * First nested bullet point - with some text that wraps to the next line - * Second nested bullet point -* Another fix with its own bullet points using correct indentation: - * First - * Second +- Some fix with its own bullet points using incorrect indentation: + + - First nested bullet point + with some text that wraps to the next line + - Second nested bullet point + +- Another fix with an enumerated list + + 1. First + and some details + 2. Second + and some details Section naaaaaaaaaaaaaaaaaaaaaaaammmmmmmmmmmmmmmmeeeeeeeeeeeeeee with over 80 characters: @@ -53,7 +60,7 @@ Empty section: Other changes: -* Changed `bitesize` label to `good first issue`. Our `contribute`_ page is now +- Changed ``bitesize`` label to ``good first issue``. Our `contribute`_ page is now automatically populated with these issues. :bug:`4855` .. _contribute: https://github.com/beetbox/beets/contribute @@ -63,7 +70,7 @@ Other changes: Bug fixes: -* Fixed something.""" +- Fixed something.""" @pytest.fixture @@ -79,9 +86,9 @@ You can do something with this command: ### Bug fixes -- Another fix with its own bullet points using correct indentation: - - First - - Second +- Another fix with an enumerated list + 1. First and some details + 2. Second and some details - Some fix thanks to @username. :bug: (#5467) - Some fix that mentions user @username. - Some fix that refers to an issue. :bug: (#5467) diff --git a/test/test_sort.py b/test/test_sort.py index d6aa5c518..460aa07b8 100644 --- a/test/test_sort.py +++ b/test/test_sort.py @@ -14,7 +14,7 @@ """Various tests for querying the library database.""" -from mock import patch +from unittest.mock import patch import beets.library from beets import config, dbcore @@ -378,7 +378,7 @@ class ConfigSortTest(DummyDataTestCase): assert results[0].albumartist > results[1].albumartist -class CaseSensitivityTest(DummyDataTestCase, BeetsTestCase): +class CaseSensitivityTest(DummyDataTestCase): """If case_insensitive is false, lower-case values should be placed after all upper-case values. E.g., `Foo Qux bar` """ diff --git a/test/test_template.py b/test/test_template.py index 236bee5aa..031aab289 100644 --- a/test/test_template.py +++ b/test/test_template.py @@ -61,9 +61,9 @@ class ParseTest(unittest.TestCase): """ assert isinstance(obj, functemplate.Call), f"not a Call: {obj}" assert obj.ident == ident, f"wrong identifier: {obj.ident} vs. {ident}" - assert ( - len(obj.args) == numargs - ), f"wrong argument count in {obj.ident}: {len(obj.args)} vs. {numargs}" + assert len(obj.args) == numargs, ( + f"wrong argument count in {obj.ident}: {len(obj.args)} vs. {numargs}" + ) def test_plain_text(self): assert list(_normparse("hello world")) == ["hello world"] diff --git a/test/test_types.py b/test/test_types.py new file mode 100644 index 000000000..6727917d8 --- /dev/null +++ b/test/test_types.py @@ -0,0 +1,58 @@ +import time + +import beets +from beets.dbcore import types +from beets.util import normpath + + +def test_datetype(): + t = types.DATE + + # format + time_format = beets.config["time_format"].as_str() + time_local = time.strftime(time_format, time.localtime(123456789)) + assert time_local == t.format(123456789) + # parse + assert 123456789.0 == t.parse(time_local) + assert 123456789.0 == t.parse("123456789.0") + assert t.null == t.parse("not123456789.0") + assert t.null == t.parse("1973-11-29") + + +def test_pathtype(): + t = types.PathType() + + # format + assert "/tmp" == t.format("/tmp") + assert "/tmp/\xe4lbum" == t.format("/tmp/\u00e4lbum") + # parse + assert normpath(b"/tmp") == t.parse("/tmp") + assert normpath(b"/tmp/\xc3\xa4lbum") == t.parse("/tmp/\u00e4lbum/") + + +def test_musicalkey(): + t = types.MusicalKey() + + # parse + assert "C#m" == t.parse("c#m") + assert "Gm" == t.parse("g minor") + assert "Not c#m" == t.parse("not C#m") + + +def test_durationtype(): + t = types.DurationType() + + # format + assert "1:01" == t.format(61.23) + assert "60:01" == t.format(3601.23) + assert "0:00" == t.format(None) + # parse + assert 61.0 == t.parse("1:01") + assert 61.23 == t.parse("61.23") + assert 3601.0 == t.parse("60:01") + assert t.null == t.parse("1:00:01") + assert t.null == t.parse("not61.23") + # config format_raw_length + beets.config["format_raw_length"] = True + assert 61.23 == t.format(61.23) + assert 3601.23 == t.format(3601.23) diff --git a/test/test_ui.py b/test/test_ui.py deleted file mode 100644 index e9588dbc6..000000000 --- a/test/test_ui.py +++ /dev/null @@ -1,1638 +0,0 @@ -# This file is part of beets. -# Copyright 2016, Adrian Sampson. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. - -"""Tests for the command-line interface.""" - -import os -import platform -import re -import shutil -import subprocess -import sys -import unittest -from unittest.mock import Mock, patch - -import pytest -from confuse import ConfigError -from mediafile import MediaFile - -from beets import autotag, config, library, plugins, ui, util -from beets.autotag.match import distance -from beets.test import _common -from beets.test.helper import ( - BeetsTestCase, - PluginTestCase, - capture_stdout, - control_stdin, - has_program, -) -from beets.ui import commands -from beets.util import MoveOperation, syspath - - -class ListTest(BeetsTestCase): - def setUp(self): - super().setUp() - self.item = _common.item() - self.item.path = "xxx/yyy" - self.lib.add(self.item) - self.lib.add_album([self.item]) - - def _run_list(self, query="", album=False, path=False, fmt=""): - with capture_stdout() as stdout: - commands.list_items(self.lib, query, album, fmt) - return stdout - - def test_list_outputs_item(self): - stdout = self._run_list() - assert "the title" in stdout.getvalue() - - def test_list_unicode_query(self): - self.item.title = "na\xefve" - self.item.store() - self.lib._connection().commit() - - stdout = self._run_list(["na\xefve"]) - out = stdout.getvalue() - assert "na\xefve" in out - - def test_list_item_path(self): - stdout = self._run_list(fmt="$path") - assert stdout.getvalue().strip() == "xxx/yyy" - - def test_list_album_outputs_something(self): - stdout = self._run_list(album=True) - assert len(stdout.getvalue()) > 0 - - def test_list_album_path(self): - stdout = self._run_list(album=True, fmt="$path") - assert stdout.getvalue().strip() == "xxx" - - def test_list_album_omits_title(self): - stdout = self._run_list(album=True) - assert "the title" not in stdout.getvalue() - - def test_list_uses_track_artist(self): - stdout = self._run_list() - assert "the artist" in stdout.getvalue() - assert "the album artist" not in stdout.getvalue() - - def test_list_album_uses_album_artist(self): - stdout = self._run_list(album=True) - assert "the artist" not in stdout.getvalue() - assert "the album artist" in stdout.getvalue() - - def test_list_item_format_artist(self): - stdout = self._run_list(fmt="$artist") - assert "the artist" in stdout.getvalue() - - def test_list_item_format_multiple(self): - stdout = self._run_list(fmt="$artist - $album - $year") - assert "the artist - the album - 0001" == stdout.getvalue().strip() - - def test_list_album_format(self): - stdout = self._run_list(album=True, fmt="$genre") - assert "the genre" in stdout.getvalue() - assert "the album" not in stdout.getvalue() - - -class RemoveTest(BeetsTestCase): - def setUp(self): - super().setUp() - - self.io.install() - - # Copy a file into the library. - self.item_path = os.path.join(_common.RSRC, b"full.mp3") - self.i = library.Item.from_path(self.item_path) - self.lib.add(self.i) - self.i.move(operation=MoveOperation.COPY) - - def test_remove_items_no_delete(self): - self.io.addinput("y") - commands.remove_items(self.lib, "", False, False, False) - items = self.lib.items() - assert len(list(items)) == 0 - self.assertExists(self.i.path) - - def test_remove_items_with_delete(self): - self.io.addinput("y") - commands.remove_items(self.lib, "", False, True, False) - items = self.lib.items() - assert len(list(items)) == 0 - self.assertNotExists(self.i.path) - - def test_remove_items_with_force_no_delete(self): - commands.remove_items(self.lib, "", False, False, True) - items = self.lib.items() - assert len(list(items)) == 0 - self.assertExists(self.i.path) - - def test_remove_items_with_force_delete(self): - commands.remove_items(self.lib, "", False, True, True) - items = self.lib.items() - assert len(list(items)) == 0 - self.assertNotExists(self.i.path) - - def test_remove_items_select_with_delete(self): - i2 = library.Item.from_path(self.item_path) - self.lib.add(i2) - i2.move(operation=MoveOperation.COPY) - - for s in ("s", "y", "n"): - self.io.addinput(s) - commands.remove_items(self.lib, "", False, True, False) - items = self.lib.items() - assert len(list(items)) == 1 - # There is probably no guarantee that the items are queried in any - # spcecific order, thus just ensure that exactly one was removed. - # To improve upon this, self.io would need to have the capability to - # generate input that depends on previous output. - num_existing = 0 - num_existing += 1 if os.path.exists(syspath(self.i.path)) else 0 - num_existing += 1 if os.path.exists(syspath(i2.path)) else 0 - assert num_existing == 1 - - def test_remove_albums_select_with_delete(self): - a1 = self.add_album_fixture() - a2 = self.add_album_fixture() - path1 = a1.items()[0].path - path2 = a2.items()[0].path - items = self.lib.items() - assert len(list(items)) == 3 - - for s in ("s", "y", "n"): - self.io.addinput(s) - commands.remove_items(self.lib, "", True, True, False) - items = self.lib.items() - assert len(list(items)) == 2 # incl. the item from setUp() - # See test_remove_items_select_with_delete() - num_existing = 0 - num_existing += 1 if os.path.exists(syspath(path1)) else 0 - num_existing += 1 if os.path.exists(syspath(path2)) else 0 - assert num_existing == 1 - - -class ModifyTest(BeetsTestCase): - def setUp(self): - super().setUp() - self.album = self.add_album_fixture() - [self.item] = self.album.items() - - def modify_inp(self, inp, *args): - with control_stdin(inp): - self.run_command("modify", *args) - - def modify(self, *args): - self.modify_inp("y", *args) - - # Item tests - - def test_modify_item(self): - self.modify("title=newTitle") - item = self.lib.items().get() - assert item.title == "newTitle" - - def test_modify_item_abort(self): - item = self.lib.items().get() - title = item.title - self.modify_inp("n", "title=newTitle") - item = self.lib.items().get() - assert item.title == title - - def test_modify_item_no_change(self): - title = "Tracktitle" - item = self.add_item_fixture(title=title) - self.modify_inp("y", "title", f"title={title}") - item = self.lib.items(title).get() - assert item.title == title - - def test_modify_write_tags(self): - self.modify("title=newTitle") - item = self.lib.items().get() - item.read() - assert item.title == "newTitle" - - def test_modify_dont_write_tags(self): - self.modify("--nowrite", "title=newTitle") - item = self.lib.items().get() - item.read() - assert item.title != "newTitle" - - def test_move(self): - self.modify("title=newTitle") - item = self.lib.items().get() - assert b"newTitle" in item.path - - def test_not_move(self): - self.modify("--nomove", "title=newTitle") - item = self.lib.items().get() - assert b"newTitle" not in item.path - - def test_no_write_no_move(self): - self.modify("--nomove", "--nowrite", "title=newTitle") - item = self.lib.items().get() - item.read() - assert b"newTitle" not in item.path - assert item.title != "newTitle" - - def test_update_mtime(self): - item = self.item - old_mtime = item.mtime - - self.modify("title=newTitle") - item.load() - assert old_mtime != item.mtime - assert item.current_mtime() == item.mtime - - def test_reset_mtime_with_no_write(self): - item = self.item - - self.modify("--nowrite", "title=newTitle") - item.load() - assert 0 == item.mtime - - def test_selective_modify(self): - title = "Tracktitle" - album = "album" - original_artist = "composer" - new_artist = "coverArtist" - for i in range(0, 10): - self.add_item_fixture( - title=f"{title}{i}", artist=original_artist, album=album - ) - self.modify_inp( - "s\ny\ny\ny\nn\nn\ny\ny\ny\ny\nn", title, f"artist={new_artist}" - ) - original_items = self.lib.items(f"artist:{original_artist}") - new_items = self.lib.items(f"artist:{new_artist}") - assert len(list(original_items)) == 3 - assert len(list(new_items)) == 7 - - def test_modify_formatted(self): - for i in range(0, 3): - self.add_item_fixture( - title=f"title{i}", artist="artist", album="album" - ) - items = list(self.lib.items()) - self.modify("title=${title} - append") - for item in items: - orig_title = item.title - item.load() - assert item.title == f"{orig_title} - append" - - # Album Tests - - def test_modify_album(self): - self.modify("--album", "album=newAlbum") - album = self.lib.albums().get() - assert album.album == "newAlbum" - - def test_modify_album_write_tags(self): - self.modify("--album", "album=newAlbum") - item = self.lib.items().get() - item.read() - assert item.album == "newAlbum" - - def test_modify_album_dont_write_tags(self): - self.modify("--album", "--nowrite", "album=newAlbum") - item = self.lib.items().get() - item.read() - assert item.album == "the album" - - def test_album_move(self): - self.modify("--album", "album=newAlbum") - item = self.lib.items().get() - item.read() - assert b"newAlbum" in item.path - - def test_album_not_move(self): - self.modify("--nomove", "--album", "album=newAlbum") - item = self.lib.items().get() - item.read() - assert b"newAlbum" not in item.path - - def test_modify_album_formatted(self): - item = self.lib.items().get() - orig_album = item.album - self.modify("--album", "album=${album} - append") - item.load() - assert item.album == f"{orig_album} - append" - - # Misc - - def test_write_initial_key_tag(self): - self.modify("initial_key=C#m") - item = self.lib.items().get() - mediafile = MediaFile(syspath(item.path)) - assert mediafile.initial_key == "C#m" - - def test_set_flexattr(self): - self.modify("flexattr=testAttr") - item = self.lib.items().get() - assert item.flexattr == "testAttr" - - def test_remove_flexattr(self): - item = self.lib.items().get() - item.flexattr = "testAttr" - item.store() - - self.modify("flexattr!") - item = self.lib.items().get() - assert "flexattr" not in item - - @unittest.skip("not yet implemented") - def test_delete_initial_key_tag(self): - item = self.lib.items().get() - item.initial_key = "C#m" - item.write() - item.store() - - mediafile = MediaFile(syspath(item.path)) - assert mediafile.initial_key == "C#m" - - self.modify("initial_key!") - mediafile = MediaFile(syspath(item.path)) - assert mediafile.initial_key is None - - def test_arg_parsing_colon_query(self): - (query, mods, dels) = commands.modify_parse_args( - ["title:oldTitle", "title=newTitle"] - ) - assert query == ["title:oldTitle"] - assert mods == {"title": "newTitle"} - - def test_arg_parsing_delete(self): - (query, mods, dels) = commands.modify_parse_args( - ["title:oldTitle", "title!"] - ) - assert query == ["title:oldTitle"] - assert dels == ["title"] - - def test_arg_parsing_query_with_exclaimation(self): - (query, mods, dels) = commands.modify_parse_args( - ["title:oldTitle!", "title=newTitle!"] - ) - assert query == ["title:oldTitle!"] - assert mods == {"title": "newTitle!"} - - def test_arg_parsing_equals_in_value(self): - (query, mods, dels) = commands.modify_parse_args( - ["title:foo=bar", "title=newTitle"] - ) - assert query == ["title:foo=bar"] - assert mods == {"title": "newTitle"} - - -class WriteTest(BeetsTestCase): - def write_cmd(self, *args): - return self.run_with_output("write", *args) - - def test_update_mtime(self): - item = self.add_item_fixture() - item["title"] = "a new title" - item.store() - - item = self.lib.items().get() - assert item.mtime == 0 - - self.write_cmd() - item = self.lib.items().get() - assert item.mtime == item.current_mtime() - - def test_non_metadata_field_unchanged(self): - """Changing a non-"tag" field like `bitrate` and writing should - have no effect. - """ - # An item that starts out "clean". - item = self.add_item_fixture() - item.read() - - # ... but with a mismatched bitrate. - item.bitrate = 123 - item.store() - - output = self.write_cmd() - - assert output == "" - - def test_write_metadata_field(self): - item = self.add_item_fixture() - item.read() - old_title = item.title - - item.title = "new title" - item.store() - - output = self.write_cmd() - - assert f"{old_title} -> new title" in output - - -class MoveTest(BeetsTestCase): - def setUp(self): - super().setUp() - - self.io.install() - - self.itempath = os.path.join(self.libdir, b"srcfile") - shutil.copy( - syspath(os.path.join(_common.RSRC, b"full.mp3")), - syspath(self.itempath), - ) - - # Add a file to the library but don't copy it in yet. - self.i = library.Item.from_path(self.itempath) - self.lib.add(self.i) - self.album = self.lib.add_album([self.i]) - - # Alternate destination directory. - self.otherdir = os.path.join(self.temp_dir, b"testotherdir") - - def _move( - self, - query=(), - dest=None, - copy=False, - album=False, - pretend=False, - export=False, - ): - commands.move_items( - self.lib, dest, query, copy, album, pretend, export=export - ) - - def test_move_item(self): - self._move() - self.i.load() - assert b"libdir" in self.i.path - self.assertExists(self.i.path) - self.assertNotExists(self.itempath) - - def test_copy_item(self): - self._move(copy=True) - self.i.load() - assert b"libdir" in self.i.path - self.assertExists(self.i.path) - self.assertExists(self.itempath) - - def test_move_album(self): - self._move(album=True) - self.i.load() - assert b"libdir" in self.i.path - self.assertExists(self.i.path) - self.assertNotExists(self.itempath) - - def test_copy_album(self): - self._move(copy=True, album=True) - self.i.load() - assert b"libdir" in self.i.path - self.assertExists(self.i.path) - self.assertExists(self.itempath) - - def test_move_item_custom_dir(self): - self._move(dest=self.otherdir) - self.i.load() - assert b"testotherdir" in self.i.path - self.assertExists(self.i.path) - self.assertNotExists(self.itempath) - - def test_move_album_custom_dir(self): - self._move(dest=self.otherdir, album=True) - self.i.load() - assert b"testotherdir" in self.i.path - self.assertExists(self.i.path) - self.assertNotExists(self.itempath) - - def test_pretend_move_item(self): - self._move(dest=self.otherdir, pretend=True) - self.i.load() - assert b"srcfile" in self.i.path - - def test_pretend_move_album(self): - self._move(album=True, pretend=True) - self.i.load() - assert b"srcfile" in self.i.path - - def test_export_item_custom_dir(self): - self._move(dest=self.otherdir, export=True) - self.i.load() - assert self.i.path == self.itempath - self.assertExists(self.otherdir) - - def test_export_album_custom_dir(self): - self._move(dest=self.otherdir, album=True, export=True) - self.i.load() - assert self.i.path == self.itempath - self.assertExists(self.otherdir) - - def test_pretend_export_item(self): - self._move(dest=self.otherdir, pretend=True, export=True) - self.i.load() - assert b"srcfile" in self.i.path - self.assertNotExists(self.otherdir) - - -class UpdateTest(BeetsTestCase): - def setUp(self): - super().setUp() - - self.io.install() - - # Copy a file into the library. - item_path = os.path.join(_common.RSRC, b"full.mp3") - item_path_two = os.path.join(_common.RSRC, b"full.flac") - self.i = library.Item.from_path(item_path) - self.i2 = library.Item.from_path(item_path_two) - self.lib.add(self.i) - self.lib.add(self.i2) - self.i.move(operation=MoveOperation.COPY) - self.i2.move(operation=MoveOperation.COPY) - self.album = self.lib.add_album([self.i, self.i2]) - - # Album art. - artfile = os.path.join(self.temp_dir, b"testart.jpg") - _common.touch(artfile) - self.album.set_art(artfile) - self.album.store() - util.remove(artfile) - - def _update( - self, - query=(), - album=False, - move=False, - reset_mtime=True, - fields=None, - exclude_fields=None, - ): - self.io.addinput("y") - if reset_mtime: - self.i.mtime = 0 - self.i.store() - commands.update_items( - self.lib, - query, - album, - move, - False, - fields=fields, - exclude_fields=exclude_fields, - ) - - def test_delete_removes_item(self): - assert list(self.lib.items()) - util.remove(self.i.path) - util.remove(self.i2.path) - self._update() - assert not list(self.lib.items()) - - def test_delete_removes_album(self): - assert self.lib.albums() - util.remove(self.i.path) - util.remove(self.i2.path) - self._update() - assert not self.lib.albums() - - def test_delete_removes_album_art(self): - artpath = self.album.artpath - self.assertExists(artpath) - util.remove(self.i.path) - util.remove(self.i2.path) - self._update() - self.assertNotExists(artpath) - - def test_modified_metadata_detected(self): - mf = MediaFile(syspath(self.i.path)) - mf.title = "differentTitle" - mf.save() - self._update() - item = self.lib.items().get() - assert item.title == "differentTitle" - - def test_modified_metadata_moved(self): - mf = MediaFile(syspath(self.i.path)) - mf.title = "differentTitle" - mf.save() - self._update(move=True) - item = self.lib.items().get() - assert b"differentTitle" in item.path - - def test_modified_metadata_not_moved(self): - mf = MediaFile(syspath(self.i.path)) - mf.title = "differentTitle" - mf.save() - self._update(move=False) - item = self.lib.items().get() - assert b"differentTitle" not in item.path - - def test_selective_modified_metadata_moved(self): - mf = MediaFile(syspath(self.i.path)) - mf.title = "differentTitle" - mf.genre = "differentGenre" - mf.save() - self._update(move=True, fields=["title"]) - item = self.lib.items().get() - assert b"differentTitle" in item.path - assert item.genre != "differentGenre" - - def test_selective_modified_metadata_not_moved(self): - mf = MediaFile(syspath(self.i.path)) - mf.title = "differentTitle" - mf.genre = "differentGenre" - mf.save() - self._update(move=False, fields=["title"]) - item = self.lib.items().get() - assert b"differentTitle" not in item.path - assert item.genre != "differentGenre" - - def test_modified_album_metadata_moved(self): - mf = MediaFile(syspath(self.i.path)) - mf.album = "differentAlbum" - mf.save() - self._update(move=True) - item = self.lib.items().get() - assert b"differentAlbum" in item.path - - def test_modified_album_metadata_art_moved(self): - artpath = self.album.artpath - mf = MediaFile(syspath(self.i.path)) - mf.album = "differentAlbum" - mf.save() - self._update(move=True) - album = self.lib.albums()[0] - assert artpath != album.artpath - assert album.artpath is not None - - def test_selective_modified_album_metadata_moved(self): - mf = MediaFile(syspath(self.i.path)) - mf.album = "differentAlbum" - mf.genre = "differentGenre" - mf.save() - self._update(move=True, fields=["album"]) - item = self.lib.items().get() - assert b"differentAlbum" in item.path - assert item.genre != "differentGenre" - - def test_selective_modified_album_metadata_not_moved(self): - mf = MediaFile(syspath(self.i.path)) - mf.album = "differentAlbum" - mf.genre = "differentGenre" - mf.save() - self._update(move=True, fields=["genre"]) - item = self.lib.items().get() - assert b"differentAlbum" not in item.path - assert item.genre == "differentGenre" - - def test_mtime_match_skips_update(self): - mf = MediaFile(syspath(self.i.path)) - mf.title = "differentTitle" - mf.save() - - # Make in-memory mtime match on-disk mtime. - self.i.mtime = os.path.getmtime(syspath(self.i.path)) - self.i.store() - - self._update(reset_mtime=False) - item = self.lib.items().get() - assert item.title == "full" - - def test_multivalued_albumtype_roundtrip(self): - # https://github.com/beetbox/beets/issues/4528 - - # albumtypes is empty for our test fixtures, so populate it first - album = self.album - correct_albumtypes = ["album", "live"] - - # Setting albumtypes does not set albumtype, currently. - # Using x[0] mirrors https://github.com/beetbox/mediafile/blob/057432ad53b3b84385e5582f69f44dc00d0a725d/mediafile.py#L1928 # noqa: E501 - correct_albumtype = correct_albumtypes[0] - - album.albumtype = correct_albumtype - album.albumtypes = correct_albumtypes - album.try_sync(write=True, move=False) - - album.load() - assert album.albumtype == correct_albumtype - assert album.albumtypes == correct_albumtypes - - self._update() - - album.load() - assert album.albumtype == correct_albumtype - assert album.albumtypes == correct_albumtypes - - def test_modified_metadata_excluded(self): - mf = MediaFile(syspath(self.i.path)) - mf.lyrics = "new lyrics" - mf.save() - self._update(exclude_fields=["lyrics"]) - item = self.lib.items().get() - assert item.lyrics != "new lyrics" - - -class PrintTest(BeetsTestCase): - def setUp(self): - super().setUp() - self.io.install() - - def test_print_without_locale(self): - lang = os.environ.get("LANG") - if lang: - del os.environ["LANG"] - - try: - ui.print_("something") - except TypeError: - self.fail("TypeError during print") - finally: - if lang: - os.environ["LANG"] = lang - - def test_print_with_invalid_locale(self): - old_lang = os.environ.get("LANG") - os.environ["LANG"] = "" - old_ctype = os.environ.get("LC_CTYPE") - os.environ["LC_CTYPE"] = "UTF-8" - - try: - ui.print_("something") - except ValueError: - self.fail("ValueError during print") - finally: - if old_lang: - os.environ["LANG"] = old_lang - else: - del os.environ["LANG"] - if old_ctype: - os.environ["LC_CTYPE"] = old_ctype - else: - del os.environ["LC_CTYPE"] - - -class ImportTest(BeetsTestCase): - def test_quiet_timid_disallowed(self): - config["import"]["quiet"] = True - config["import"]["timid"] = True - with pytest.raises(ui.UserError): - commands.import_files(None, [], None) - - def test_parse_paths_from_logfile(self): - if os.path.__name__ == "ntpath": - logfile_content = ( - "import started Wed Jun 15 23:08:26 2022\n" - "asis C:\\music\\Beatles, The\\The Beatles; C:\\music\\Beatles, The\\The Beatles\\CD 01; C:\\music\\Beatles, The\\The Beatles\\CD 02\n" # noqa: E501 - "duplicate-replace C:\\music\\Bill Evans\\Trio '65\n" - "skip C:\\music\\Michael Jackson\\Bad\n" - "skip C:\\music\\Soulwax\\Any Minute Now\n" - ) - expected_paths = [ - "C:\\music\\Beatles, The\\The Beatles", - "C:\\music\\Michael Jackson\\Bad", - "C:\\music\\Soulwax\\Any Minute Now", - ] - else: - logfile_content = ( - "import started Wed Jun 15 23:08:26 2022\n" - "asis /music/Beatles, The/The Beatles; /music/Beatles, The/The Beatles/CD 01; /music/Beatles, The/The Beatles/CD 02\n" # noqa: E501 - "duplicate-replace /music/Bill Evans/Trio '65\n" - "skip /music/Michael Jackson/Bad\n" - "skip /music/Soulwax/Any Minute Now\n" - ) - expected_paths = [ - "/music/Beatles, The/The Beatles", - "/music/Michael Jackson/Bad", - "/music/Soulwax/Any Minute Now", - ] - - logfile = os.path.join(self.temp_dir, b"logfile.log") - with open(logfile, mode="w") as fp: - fp.write(logfile_content) - actual_paths = list(commands._paths_from_logfile(logfile)) - assert actual_paths == expected_paths - - -@_common.slow_test() -class TestPluginTestCase(PluginTestCase): - plugin = "test" - - def setUp(self): - super().setUp() - config["pluginpath"] = [_common.PLUGINPATH] - - -class ConfigTest(TestPluginTestCase): - def setUp(self): - super().setUp() - - # Don't use the BEETSDIR from `helper`. Instead, we point the home - # directory there. Some tests will set `BEETSDIR` themselves. - del os.environ["BEETSDIR"] - - # Also set APPDATA, the Windows equivalent of setting $HOME. - appdata_dir = os.fsdecode( - os.path.join(self.temp_dir, b"AppData", b"Roaming") - ) - - self._orig_cwd = os.getcwd() - self.test_cmd = self._make_test_cmd() - commands.default_commands.append(self.test_cmd) - - # Default user configuration - if platform.system() == "Windows": - self.user_config_dir = os.fsencode( - os.path.join(appdata_dir, "beets") - ) - else: - self.user_config_dir = os.path.join( - self.temp_dir, b".config", b"beets" - ) - os.makedirs(syspath(self.user_config_dir)) - self.user_config_path = os.path.join( - self.user_config_dir, b"config.yaml" - ) - - # Custom BEETSDIR - self.beetsdir = os.path.join(self.temp_dir, b"beetsdir") - self.cli_config_path = os.path.join( - os.fsdecode(self.temp_dir), "config.yaml" - ) - os.makedirs(syspath(self.beetsdir)) - self.env_patcher = patch( - "os.environ", - {"HOME": os.fsdecode(self.temp_dir), "APPDATA": appdata_dir}, - ) - self.env_patcher.start() - - self._reset_config() - - def tearDown(self): - self.env_patcher.stop() - commands.default_commands.pop() - os.chdir(syspath(self._orig_cwd)) - super().tearDown() - - def _make_test_cmd(self): - test_cmd = ui.Subcommand("test", help="test") - - def run(lib, options, args): - test_cmd.lib = lib - test_cmd.options = options - test_cmd.args = args - - test_cmd.func = run - return test_cmd - - def _reset_config(self): - # Config should read files again on demand - config.clear() - config._materialized = False - - def write_config_file(self): - return open(self.user_config_path, "w") - - def test_paths_section_respected(self): - with self.write_config_file() as config: - config.write("paths: {x: y}") - - self.run_command("test", lib=None) - key, template = self.test_cmd.lib.path_formats[0] - assert key == "x" - assert template.original == "y" - - def test_default_paths_preserved(self): - default_formats = ui.get_path_formats() - - self._reset_config() - with self.write_config_file() as config: - config.write("paths: {x: y}") - self.run_command("test", lib=None) - key, template = self.test_cmd.lib.path_formats[0] - assert key == "x" - assert template.original == "y" - assert self.test_cmd.lib.path_formats[1:] == default_formats - - def test_nonexistant_db(self): - with self.write_config_file() as config: - config.write("library: /xxx/yyy/not/a/real/path") - - with pytest.raises(ui.UserError): - self.run_command("test", lib=None) - - def test_user_config_file(self): - with self.write_config_file() as file: - file.write("anoption: value") - - self.run_command("test", lib=None) - assert config["anoption"].get() == "value" - - def test_replacements_parsed(self): - with self.write_config_file() as config: - config.write("replace: {'[xy]': z}") - - self.run_command("test", lib=None) - replacements = self.test_cmd.lib.replacements - repls = [(p.pattern, s) for p, s in replacements] # Compare patterns. - assert repls == [("[xy]", "z")] - - def test_multiple_replacements_parsed(self): - with self.write_config_file() as config: - config.write("replace: {'[xy]': z, foo: bar}") - self.run_command("test", lib=None) - replacements = self.test_cmd.lib.replacements - repls = [(p.pattern, s) for p, s in replacements] - assert repls == [("[xy]", "z"), ("foo", "bar")] - - def test_cli_config_option(self): - with open(self.cli_config_path, "w") as file: - file.write("anoption: value") - self.run_command("--config", self.cli_config_path, "test", lib=None) - assert config["anoption"].get() == "value" - - def test_cli_config_file_overwrites_user_defaults(self): - with open(self.user_config_path, "w") as file: - file.write("anoption: value") - - with open(self.cli_config_path, "w") as file: - file.write("anoption: cli overwrite") - self.run_command("--config", self.cli_config_path, "test", lib=None) - assert config["anoption"].get() == "cli overwrite" - - def test_cli_config_file_overwrites_beetsdir_defaults(self): - os.environ["BEETSDIR"] = os.fsdecode(self.beetsdir) - env_config_path = os.path.join(self.beetsdir, b"config.yaml") - with open(env_config_path, "w") as file: - file.write("anoption: value") - - with open(self.cli_config_path, "w") as file: - file.write("anoption: cli overwrite") - self.run_command("--config", self.cli_config_path, "test", lib=None) - assert config["anoption"].get() == "cli overwrite" - - # @unittest.skip('Difficult to implement with optparse') - # def test_multiple_cli_config_files(self): - # cli_config_path_1 = os.path.join(self.temp_dir, b'config.yaml') - # cli_config_path_2 = os.path.join(self.temp_dir, b'config_2.yaml') - # - # with open(cli_config_path_1, 'w') as file: - # file.write('first: value') - # - # with open(cli_config_path_2, 'w') as file: - # file.write('second: value') - # - # self.run_command('--config', cli_config_path_1, - # '--config', cli_config_path_2, 'test', lib=None) - # assert config['first'].get() == 'value' - # assert config['second'].get() == 'value' - # - # @unittest.skip('Difficult to implement with optparse') - # def test_multiple_cli_config_overwrite(self): - # cli_overwrite_config_path = os.path.join(self.temp_dir, - # b'overwrite_config.yaml') - # - # with open(self.cli_config_path, 'w') as file: - # file.write('anoption: value') - # - # with open(cli_overwrite_config_path, 'w') as file: - # file.write('anoption: overwrite') - # - # self.run_command('--config', self.cli_config_path, - # '--config', cli_overwrite_config_path, 'test') - # assert config['anoption'].get() == 'cli overwrite' - - # FIXME: fails on windows - @unittest.skipIf(sys.platform == "win32", "win32") - def test_cli_config_paths_resolve_relative_to_user_dir(self): - with open(self.cli_config_path, "w") as file: - file.write("library: beets.db\n") - file.write("statefile: state") - - self.run_command("--config", self.cli_config_path, "test", lib=None) - self.assert_equal_path( - util.bytestring_path(config["library"].as_filename()), - os.path.join(self.user_config_dir, b"beets.db"), - ) - self.assert_equal_path( - util.bytestring_path(config["statefile"].as_filename()), - os.path.join(self.user_config_dir, b"state"), - ) - - def test_cli_config_paths_resolve_relative_to_beetsdir(self): - os.environ["BEETSDIR"] = os.fsdecode(self.beetsdir) - - with open(self.cli_config_path, "w") as file: - file.write("library: beets.db\n") - file.write("statefile: state") - - self.run_command("--config", self.cli_config_path, "test", lib=None) - self.assert_equal_path( - util.bytestring_path(config["library"].as_filename()), - os.path.join(self.beetsdir, b"beets.db"), - ) - self.assert_equal_path( - util.bytestring_path(config["statefile"].as_filename()), - os.path.join(self.beetsdir, b"state"), - ) - - def test_command_line_option_relative_to_working_dir(self): - config.read() - os.chdir(syspath(self.temp_dir)) - self.run_command("--library", "foo.db", "test", lib=None) - self.assert_equal_path( - config["library"].as_filename(), os.path.join(os.getcwd(), "foo.db") - ) - - def test_cli_config_file_loads_plugin_commands(self): - with open(self.cli_config_path, "w") as file: - file.write("pluginpath: %s\n" % _common.PLUGINPATH) - file.write("plugins: test") - - self.run_command("--config", self.cli_config_path, "plugin", lib=None) - assert plugins.find_plugins()[0].is_test_plugin - self.unload_plugins() - - def test_beetsdir_config(self): - os.environ["BEETSDIR"] = os.fsdecode(self.beetsdir) - - env_config_path = os.path.join(self.beetsdir, b"config.yaml") - with open(env_config_path, "w") as file: - file.write("anoption: overwrite") - - config.read() - assert config["anoption"].get() == "overwrite" - - def test_beetsdir_points_to_file_error(self): - beetsdir = os.path.join(self.temp_dir, b"beetsfile") - open(beetsdir, "a").close() - os.environ["BEETSDIR"] = os.fsdecode(beetsdir) - with pytest.raises(ConfigError): - self.run_command("test") - - def test_beetsdir_config_does_not_load_default_user_config(self): - os.environ["BEETSDIR"] = os.fsdecode(self.beetsdir) - - with open(self.user_config_path, "w") as file: - file.write("anoption: value") - - config.read() - assert not config["anoption"].exists() - - def test_default_config_paths_resolve_relative_to_beetsdir(self): - os.environ["BEETSDIR"] = os.fsdecode(self.beetsdir) - - config.read() - self.assert_equal_path( - util.bytestring_path(config["library"].as_filename()), - os.path.join(self.beetsdir, b"library.db"), - ) - self.assert_equal_path( - util.bytestring_path(config["statefile"].as_filename()), - os.path.join(self.beetsdir, b"state.pickle"), - ) - - def test_beetsdir_config_paths_resolve_relative_to_beetsdir(self): - os.environ["BEETSDIR"] = os.fsdecode(self.beetsdir) - - env_config_path = os.path.join(self.beetsdir, b"config.yaml") - with open(env_config_path, "w") as file: - file.write("library: beets.db\n") - file.write("statefile: state") - - config.read() - self.assert_equal_path( - util.bytestring_path(config["library"].as_filename()), - os.path.join(self.beetsdir, b"beets.db"), - ) - self.assert_equal_path( - util.bytestring_path(config["statefile"].as_filename()), - os.path.join(self.beetsdir, b"state"), - ) - - -class ShowModelChangeTest(BeetsTestCase): - def setUp(self): - super().setUp() - self.io.install() - self.a = _common.item() - self.b = _common.item() - self.a.path = self.b.path - - def _show(self, **kwargs): - change = ui.show_model_changes(self.a, self.b, **kwargs) - out = self.io.getoutput() - return change, out - - def test_identical(self): - change, out = self._show() - assert not change - assert out == "" - - def test_string_fixed_field_change(self): - self.b.title = "x" - change, out = self._show() - assert change - assert "title" in out - - def test_int_fixed_field_change(self): - self.b.track = 9 - change, out = self._show() - assert change - assert "track" in out - - def test_floats_close_to_identical(self): - self.a.length = 1.00001 - self.b.length = 1.00005 - change, out = self._show() - assert not change - assert out == "" - - def test_floats_different(self): - self.a.length = 1.00001 - self.b.length = 2.00001 - change, out = self._show() - assert change - assert "length" in out - - def test_both_values_shown(self): - self.a.title = "foo" - self.b.title = "bar" - change, out = self._show() - assert "foo" in out - assert "bar" in out - - -class ShowChangeTest(BeetsTestCase): - def setUp(self): - super().setUp() - self.io.install() - - self.items = [_common.item()] - self.items[0].track = 1 - self.items[0].path = b"/path/to/file.mp3" - self.info = autotag.AlbumInfo( - album="the album", - album_id="album id", - artist="the artist", - artist_id="artist id", - tracks=[ - autotag.TrackInfo( - title="the title", track_id="track id", index=1 - ) - ], - ) - - def _show_change( - self, - items=None, - info=None, - color=False, - cur_artist="the artist", - cur_album="the album", - dist=0.1, - ): - """Return an unicode string representing the changes""" - items = items or self.items - info = info or self.info - mapping = dict(zip(items, info.tracks)) - config["ui"]["color"] = color - config["import"]["detail"] = True - change_dist = distance(items, info, mapping) - change_dist._penalties = {"album": [dist], "artist": [dist]} - commands.show_change( - cur_artist, - cur_album, - autotag.AlbumMatch(change_dist, info, mapping, set(), set()), - ) - return self.io.getoutput().lower() - - def test_null_change(self): - msg = self._show_change() - assert "match (90.0%)" in msg - assert "album, artist" in msg - - def test_album_data_change(self): - msg = self._show_change( - cur_artist="another artist", cur_album="another album" - ) - assert "another artist -> the artist" in msg - assert "another album -> the album" in msg - - def test_item_data_change(self): - self.items[0].title = "different" - msg = self._show_change() - assert "different" in msg - assert "the title" in msg - - def test_item_data_change_with_unicode(self): - self.items[0].title = "caf\xe9" - msg = self._show_change() - assert "caf\xe9" in msg - assert "the title" in msg - - def test_album_data_change_with_unicode(self): - msg = self._show_change(cur_artist="caf\xe9", cur_album="another album") - assert "caf\xe9" in msg - assert "the artist" in msg - - def test_item_data_change_title_missing(self): - self.items[0].title = "" - msg = re.sub(r" +", " ", self._show_change()) - assert "file.mp3" in msg - assert "the title" in msg - - def test_item_data_change_title_missing_with_unicode_filename(self): - self.items[0].title = "" - self.items[0].path = "/path/to/caf\xe9.mp3".encode() - msg = re.sub(r" +", " ", self._show_change()) - assert "caf\xe9.mp3" in msg or "caf.mp3" in msg - - def test_colorize(self): - assert "test" == ui.uncolorize("test") - txt = ui.uncolorize("\x1b[31mtest\x1b[39;49;00m") - assert "test" == txt - txt = ui.uncolorize("\x1b[31mtest\x1b[39;49;00m test") - assert "test test" == txt - txt = ui.uncolorize("\x1b[31mtest\x1b[39;49;00mtest") - assert "testtest" == txt - txt = ui.uncolorize("test \x1b[31mtest\x1b[39;49;00m test") - assert "test test test" == txt - - def test_color_split(self): - exp = ("test", "") - res = ui.color_split("test", 5) - assert exp == res - exp = ("\x1b[31mtes\x1b[39;49;00m", "\x1b[31mt\x1b[39;49;00m") - res = ui.color_split("\x1b[31mtest\x1b[39;49;00m", 3) - assert exp == res - - def test_split_into_lines(self): - # Test uncolored text - txt = ui.split_into_lines("test test test", [5, 5, 5]) - assert txt == ["test", "test", "test"] - # Test multiple colored texts - colored_text = "\x1b[31mtest \x1b[39;49;00m" * 3 - split_txt = [ - "\x1b[31mtest\x1b[39;49;00m", - "\x1b[31mtest\x1b[39;49;00m", - "\x1b[31mtest\x1b[39;49;00m", - ] - txt = ui.split_into_lines(colored_text, [5, 5, 5]) - assert txt == split_txt - # Test single color, multi space text - colored_text = "\x1b[31m test test test \x1b[39;49;00m" - txt = ui.split_into_lines(colored_text, [5, 5, 5]) - assert txt == split_txt - # Test single color, different spacing - colored_text = "\x1b[31mtest\x1b[39;49;00mtest test test" - # ToDo: fix color_len to handle mid-text color escapes, and thus - # split colored texts over newlines (potentially with dashes?) - split_txt = ["\x1b[31mtest\x1b[39;49;00mt", "est", "test", "test"] - txt = ui.split_into_lines(colored_text, [5, 5, 5]) - assert txt == split_txt - - def test_album_data_change_wrap_newline(self): - # Patch ui.term_width to force wrapping - with patch("beets.ui.commands.ui.term_width", return_value=30): - # Test newline layout - config["ui"]["import"]["layout"] = "newline" - long_name = "another artist with a" + (" very" * 10) + " long name" - msg = self._show_change( - cur_artist=long_name, cur_album="another album" - ) - # _common.log.info("Message:{}".format(msg)) - assert "artist: another artist" in msg - assert " -> the artist" in msg - assert "another album -> the album" not in msg - - def test_item_data_change_wrap_column(self): - # Patch ui.term_width to force wrapping - with patch("beets.ui.commands.ui.term_width", return_value=54): - # Test Column layout - config["ui"]["import"]["layout"] = "column" - long_title = "a track with a" + (" very" * 10) + " long name" - self.items[0].title = long_title - msg = self._show_change() - assert "(#1) a track (1:00) -> (#1) the title (0:00)" in msg - - def test_item_data_change_wrap_newline(self): - # Patch ui.term_width to force wrapping - with patch("beets.ui.commands.ui.term_width", return_value=30): - config["ui"]["import"]["layout"] = "newline" - long_title = "a track with a" + (" very" * 10) + " long name" - self.items[0].title = long_title - msg = self._show_change() - assert "(#1) a track with" in msg - assert " -> (#1) the title (0:00)" in msg - - -@patch("beets.library.Item.try_filesize", Mock(return_value=987)) -class SummarizeItemsTest(BeetsTestCase): - def setUp(self): - super().setUp() - item = library.Item() - item.bitrate = 4321 - item.length = 10 * 60 + 54 - item.format = "F" - self.item = item - - def test_summarize_item(self): - summary = commands.summarize_items([], True) - assert summary == "" - - summary = commands.summarize_items([self.item], True) - assert summary == "F, 4kbps, 10:54, 987.0 B" - - def test_summarize_items(self): - summary = commands.summarize_items([], False) - assert summary == "0 items" - - summary = commands.summarize_items([self.item], False) - assert summary == "1 items, F, 4kbps, 10:54, 987.0 B" - - # make a copy of self.item - i2 = self.item.copy() - - summary = commands.summarize_items([self.item, i2], False) - assert summary == "2 items, F, 4kbps, 21:48, 1.9 KiB" - - i2.format = "G" - summary = commands.summarize_items([self.item, i2], False) - assert summary == "2 items, F 1, G 1, 4kbps, 21:48, 1.9 KiB" - - summary = commands.summarize_items([self.item, i2, i2], False) - assert summary == "3 items, G 2, F 1, 4kbps, 32:42, 2.9 KiB" - - -class PathFormatTest(BeetsTestCase): - def test_custom_paths_prepend(self): - default_formats = ui.get_path_formats() - - config["paths"] = {"foo": "bar"} - pf = ui.get_path_formats() - key, tmpl = pf[0] - assert key == "foo" - assert tmpl.original == "bar" - assert pf[1:] == default_formats - - -@_common.slow_test() -class PluginTest(TestPluginTestCase): - def test_plugin_command_from_pluginpath(self): - self.run_command("test", lib=None) - - -@_common.slow_test() -@pytest.mark.xfail( - os.environ.get("GITHUB_ACTIONS") == "true" and sys.platform == "linux", - reason="Completion is for some reason unhappy on Ubuntu 24.04 in CI", -) -class CompletionTest(TestPluginTestCase): - def test_completion(self): - # Do not load any other bash completion scripts on the system. - env = dict(os.environ) - env["BASH_COMPLETION_DIR"] = os.devnull - env["BASH_COMPLETION_COMPAT_DIR"] = os.devnull - - # Open a `bash` process to run the tests in. We'll pipe in bash - # commands via stdin. - cmd = os.environ.get("BEETS_TEST_SHELL", "/bin/bash --norc").split() - if not has_program(cmd[0]): - self.skipTest("bash not available") - tester = subprocess.Popen( - cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=env - ) - - # Load bash_completion library. - for path in commands.BASH_COMPLETION_PATHS: - if os.path.exists(syspath(path)): - bash_completion = path - break - else: - self.skipTest("bash-completion script not found") - try: - with open(util.syspath(bash_completion), "rb") as f: - tester.stdin.writelines(f) - except OSError: - self.skipTest("could not read bash-completion script") - - # Load completion script. - self.io.install() - self.run_command("completion", lib=None) - completion_script = self.io.getoutput().encode("utf-8") - self.io.restore() - tester.stdin.writelines(completion_script.splitlines(True)) - - # Load test suite. - test_script_name = os.path.join(_common.RSRC, b"test_completion.sh") - with open(test_script_name, "rb") as test_script_file: - tester.stdin.writelines(test_script_file) - out, err = tester.communicate() - assert tester.returncode == 0 - assert out == b"completion tests passed\n", ( - "test/test_completion.sh did not execute properly. " - f'Output:{out.decode("utf-8")}' - ) - - -class CommonOptionsParserCliTest(BeetsTestCase): - """Test CommonOptionsParser and formatting LibModel formatting on 'list' - command. - """ - - def setUp(self): - super().setUp() - self.item = _common.item() - self.item.path = b"xxx/yyy" - self.lib.add(self.item) - self.lib.add_album([self.item]) - - def test_base(self): - output = self.run_with_output("ls") - assert output == "the artist - the album - the title\n" - - output = self.run_with_output("ls", "-a") - assert output == "the album artist - the album\n" - - def test_path_option(self): - output = self.run_with_output("ls", "-p") - assert output == "xxx/yyy\n" - - output = self.run_with_output("ls", "-a", "-p") - assert output == "xxx\n" - - def test_format_option(self): - output = self.run_with_output("ls", "-f", "$artist") - assert output == "the artist\n" - - output = self.run_with_output("ls", "-a", "-f", "$albumartist") - assert output == "the album artist\n" - - def test_format_option_unicode(self): - output = self.run_with_output("ls", "-f", "caf\xe9") - assert output == "caf\xe9\n" - - def test_root_format_option(self): - output = self.run_with_output( - "--format-item", "$artist", "--format-album", "foo", "ls" - ) - assert output == "the artist\n" - - output = self.run_with_output( - "--format-item", "foo", "--format-album", "$albumartist", "ls", "-a" - ) - assert output == "the album artist\n" - - def test_help(self): - output = self.run_with_output("help") - assert "Usage:" in output - - output = self.run_with_output("help", "list") - assert "Usage:" in output - - with pytest.raises(ui.UserError): - self.run_command("help", "this.is.not.a.real.command") - - def test_stats(self): - output = self.run_with_output("stats") - assert "Approximate total size:" in output - - # # Need to have more realistic library setup for this to work - # output = self.run_with_output('stats', '-e') - # assert 'Total size:' in output - - def test_version(self): - output = self.run_with_output("version") - assert "Python version" in output - assert "no plugins loaded" in output - - # # Need to have plugin loaded - # output = self.run_with_output('version') - # assert 'plugins: ' in output - - -class CommonOptionsParserTest(BeetsTestCase): - def test_album_option(self): - parser = ui.CommonOptionsParser() - assert not parser._album_flags - parser.add_album_option() - assert bool(parser._album_flags) - - assert parser.parse_args([]) == ({"album": None}, []) - assert parser.parse_args(["-a"]) == ({"album": True}, []) - assert parser.parse_args(["--album"]) == ({"album": True}, []) - - def test_path_option(self): - parser = ui.CommonOptionsParser() - parser.add_path_option() - assert not parser._album_flags - - config["format_item"].set("$foo") - assert parser.parse_args([]) == ({"path": None}, []) - assert config["format_item"].as_str() == "$foo" - - assert parser.parse_args(["-p"]) == ( - {"path": True, "format": "$path"}, - [], - ) - assert parser.parse_args(["--path"]) == ( - {"path": True, "format": "$path"}, - [], - ) - - assert config["format_item"].as_str() == "$path" - assert config["format_album"].as_str() == "$path" - - def test_format_option(self): - parser = ui.CommonOptionsParser() - parser.add_format_option() - assert not parser._album_flags - - config["format_item"].set("$foo") - assert parser.parse_args([]) == ({"format": None}, []) - assert config["format_item"].as_str() == "$foo" - - assert parser.parse_args(["-f", "$bar"]) == ({"format": "$bar"}, []) - assert parser.parse_args(["--format", "$baz"]) == ( - {"format": "$baz"}, - [], - ) - - assert config["format_item"].as_str() == "$baz" - assert config["format_album"].as_str() == "$baz" - - def test_format_option_with_target(self): - with pytest.raises(KeyError): - ui.CommonOptionsParser().add_format_option(target="thingy") - - parser = ui.CommonOptionsParser() - parser.add_format_option(target="item") - - config["format_item"].set("$item") - config["format_album"].set("$album") - - assert parser.parse_args(["-f", "$bar"]) == ({"format": "$bar"}, []) - - assert config["format_item"].as_str() == "$bar" - assert config["format_album"].as_str() == "$album" - - def test_format_option_with_album(self): - parser = ui.CommonOptionsParser() - parser.add_album_option() - parser.add_format_option() - - config["format_item"].set("$item") - config["format_album"].set("$album") - - parser.parse_args(["-f", "$bar"]) - assert config["format_item"].as_str() == "$bar" - assert config["format_album"].as_str() == "$album" - - parser.parse_args(["-a", "-f", "$foo"]) - assert config["format_item"].as_str() == "$bar" - assert config["format_album"].as_str() == "$foo" - - parser.parse_args(["-f", "$foo2", "-a"]) - assert config["format_album"].as_str() == "$foo2" - - def test_add_all_common_options(self): - parser = ui.CommonOptionsParser() - parser.add_all_common_options() - assert parser.parse_args([]) == ( - {"album": None, "path": None, "format": None}, - [], - ) - - -class EncodingTest(BeetsTestCase): - """Tests for the `terminal_encoding` config option and our - `_in_encoding` and `_out_encoding` utility functions. - """ - - def out_encoding_overridden(self): - config["terminal_encoding"] = "fake_encoding" - assert ui._out_encoding() == "fake_encoding" - - def in_encoding_overridden(self): - config["terminal_encoding"] = "fake_encoding" - assert ui._in_encoding() == "fake_encoding" - - def out_encoding_default_utf8(self): - with patch("sys.stdout") as stdout: - stdout.encoding = None - assert ui._out_encoding() == "utf-8" - - def in_encoding_default_utf8(self): - with patch("sys.stdin") as stdin: - stdin.encoding = None - assert ui._in_encoding() == "utf-8" diff --git a/test/test_ui_commands.py b/test/test_ui_commands.py deleted file mode 100644 index 897cba8a1..000000000 --- a/test/test_ui_commands.py +++ /dev/null @@ -1,105 +0,0 @@ -# This file is part of beets. -# Copyright 2016, Adrian Sampson. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. - -"""Test module for file ui/commands.py""" - -import os -import shutil - -import pytest - -from beets import library, ui -from beets.test import _common -from beets.test.helper import BeetsTestCase, ItemInDBTestCase -from beets.ui import commands -from beets.util import syspath - - -class QueryTest(BeetsTestCase): - def add_item(self, filename=b"srcfile", templatefile=b"full.mp3"): - itempath = os.path.join(self.libdir, filename) - shutil.copy( - syspath(os.path.join(_common.RSRC, templatefile)), - syspath(itempath), - ) - item = library.Item.from_path(itempath) - self.lib.add(item) - return item, itempath - - def add_album(self, items): - album = self.lib.add_album(items) - return album - - def check_do_query( - self, num_items, num_albums, q=(), album=False, also_items=True - ): - items, albums = commands._do_query(self.lib, q, album, also_items) - assert len(items) == num_items - assert len(albums) == num_albums - - def test_query_empty(self): - with pytest.raises(ui.UserError): - commands._do_query(self.lib, (), False) - - def test_query_empty_album(self): - with pytest.raises(ui.UserError): - commands._do_query(self.lib, (), True) - - def test_query_item(self): - self.add_item() - self.check_do_query(1, 0, album=False) - self.add_item() - self.check_do_query(2, 0, album=False) - - def test_query_album(self): - item, itempath = self.add_item() - self.add_album([item]) - self.check_do_query(1, 1, album=True) - self.check_do_query(0, 1, album=True, also_items=False) - - item, itempath = self.add_item() - item2, itempath = self.add_item() - self.add_album([item, item2]) - self.check_do_query(3, 2, album=True) - self.check_do_query(0, 2, album=True, also_items=False) - - -class FieldsTest(ItemInDBTestCase): - def setUp(self): - super().setUp() - - self.io.install() - - def tearDown(self): - super().tearDown() - self.io.restore() - - def remove_keys(self, keys, text): - for i in text: - try: - keys.remove(i) - except ValueError: - pass - - def test_fields_func(self): - commands.fields_func(self.lib, [], []) - items = library.Item.all_keys() - albums = library.Album.all_keys() - - output = self.io.stdout.get().split() - self.remove_keys(items, output) - self.remove_keys(albums, output) - - assert len(items) == 0 - assert len(albums) == 0 diff --git a/test/test_util.py b/test/test_util.py index f5b4fd102..d8a4ca0db 100644 --- a/test/test_util.py +++ b/test/test_util.py @@ -24,8 +24,8 @@ from unittest.mock import Mock, patch import pytest from beets import util +from beets.library import Item from beets.test import _common -from beets.test.helper import BeetsTestCase class UtilTest(unittest.TestCase): @@ -132,7 +132,7 @@ class UtilTest(unittest.TestCase): pass -class PathConversionTest(BeetsTestCase): +class PathConversionTest(unittest.TestCase): def test_syspath_windows_format(self): with _common.platform_windows(): path = os.path.join("a", "b", "c") @@ -156,13 +156,8 @@ class PathConversionTest(BeetsTestCase): assert path == outpath def _windows_bytestring_path(self, path): - old_gfse = sys.getfilesystemencoding - sys.getfilesystemencoding = lambda: "mbcs" - try: - with _common.platform_windows(): - return util.bytestring_path(path) - finally: - sys.getfilesystemencoding = old_gfse + with _common.platform_windows(): + return util.bytestring_path(path) def test_bytestring_path_windows_encodes_utf8(self): path = "caf\xe9" @@ -175,18 +170,89 @@ class PathConversionTest(BeetsTestCase): assert outpath == "C:\\caf\xe9".encode() -class PathTruncationTest(BeetsTestCase): - def test_truncate_bytestring(self): - with _common.platform_posix(): - p = util.truncate_path(b"abcde/fgh", 4) - assert p == b"abcd/fgh" +class TestPathLegalization: + _p = pytest.param - def test_truncate_unicode(self): - with _common.platform_posix(): - p = util.truncate_path("abcde/fgh", 4) - assert p == "abcd/fgh" + @pytest.fixture(autouse=True) + def _patch_max_filename_length(self, monkeypatch): + monkeypatch.setattr("beets.util.get_max_filename_length", lambda: 5) - def test_truncate_preserves_extension(self): - with _common.platform_posix(): - p = util.truncate_path("abcde/fgh.ext", 5) - assert p == "abcde/f.ext" + @pytest.mark.parametrize( + "path, expected", + [ + _p("abcdeX/fgh", "abcde/fgh", id="truncate-parent-dir"), + _p("abcde/fXX.ext", "abcde/f.ext", id="truncate-filename"), + # note that 🎹 is 4 bytes long: + # >>> "🎹".encode("utf-8") + # b'\xf0\x9f\x8e\xb9' + _p("a🎹/a.ext", "a🎹/a.ext", id="unicode-fit"), + _p("ab🎹/a.ext", "ab/a.ext", id="unicode-truncate-fully-one-byte-over-limit"), + _p("f.a.e", "f.a.e", id="persist-dot-in-filename"), # see #5771 + ], + ) # fmt: skip + def test_truncate(self, path, expected): + path = path.replace("/", os.path.sep) + expected = expected.replace("/", os.path.sep) + + assert util.truncate_path(path) == expected + + @pytest.mark.parametrize( + "replacements, expected_path, expected_truncated", + [ # [ repl before truncation, repl after truncation ] + _p([ ], "_abcd", False, id="default"), + _p([(r"abcdX$", "1ST"), ], ":1ST", False, id="1st_valid"), + _p([(r"abcdX$", "TOO_LONG"), ], ":TOO_", False, id="1st_truncated"), + _p([(r"abcdX$", "1ST"), (r"1ST$", "2ND") ], ":2ND", False, id="both_valid"), + _p([(r"abcdX$", "TOO_LONG"), (r"TOO_$", "2ND") ], ":2ND", False, id="1st_truncated_2nd_valid"), + _p([(r"abcdX$", "1ST"), (r"1ST$", "TOO_LONG") ], ":TOO_", False, id="1st_valid_2nd_truncated"), + # if the logic truncates the path twice, it ends up applying the default replacements + _p([(r"abcdX$", "TOO_LONG"), (r"TOO_$", "TOO_LONG") ], "_TOO_", True, id="both_truncated_default_repl_applied"), + ] + ) # fmt: skip + def test_replacements( + self, replacements, expected_path, expected_truncated + ): + replacements = [(re.compile(pat), repl) for pat, repl in replacements] + + assert util.legalize_path(":abcdX", replacements, "") == ( + expected_path, + expected_truncated, + ) + + +class TestPlurality: + @pytest.mark.parametrize( + "objs, expected_obj, expected_freq", + [ + pytest.param([1, 1, 1, 1], 1, 4, id="consensus"), + pytest.param([1, 1, 2, 1], 1, 3, id="near consensus"), + pytest.param([1, 1, 2, 2, 3], 1, 2, id="conflict-first-wins"), + ], + ) + def test_plurality(self, objs, expected_obj, expected_freq): + assert (expected_obj, expected_freq) == util.plurality(objs) + + def test_empty_sequence_raises_error(self): + with pytest.raises(ValueError, match="must be non-empty"): + util.plurality([]) + + def test_get_most_common_tags(self): + items = [ + Item(albumartist="aartist", label="label 1", album="album"), + Item(albumartist="aartist", label="label 2", album="album"), + Item(albumartist="aartist", label="label 3", album="another album"), + ] + + likelies, consensus = util.get_most_common_tags(items) + + assert likelies["albumartist"] == "aartist" + assert likelies["album"] == "album" + # albumartist consensus overrides artist + assert likelies["artist"] == "aartist" + assert likelies["label"] == "label 1" + assert likelies["year"] == 0 + + assert consensus["year"] + assert consensus["albumartist"] + assert not consensus["album"] + assert not consensus["label"] diff --git a/test/ui/__init__.py b/test/ui/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test/ui/commands/__init__.py b/test/ui/commands/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test/ui/commands/test_completion.py b/test/ui/commands/test_completion.py new file mode 100644 index 000000000..f1e53f238 --- /dev/null +++ b/test/ui/commands/test_completion.py @@ -0,0 +1,64 @@ +import os +import subprocess +import sys + +import pytest + +from beets.test import _common +from beets.test.helper import IOMixin, has_program +from beets.ui.commands.completion import BASH_COMPLETION_PATHS +from beets.util import syspath + +from ..test_ui import TestPluginTestCase + + +@_common.slow_test() +@pytest.mark.xfail( + os.environ.get("GITHUB_ACTIONS") == "true" and sys.platform == "linux", + reason="Completion is for some reason unhappy on Ubuntu 24.04 in CI", +) +class CompletionTest(IOMixin, TestPluginTestCase): + def test_completion(self): + # Do not load any other bash completion scripts on the system. + env = dict(os.environ) + env["BASH_COMPLETION_DIR"] = os.devnull + env["BASH_COMPLETION_COMPAT_DIR"] = os.devnull + + # Open a `bash` process to run the tests in. We'll pipe in bash + # commands via stdin. + cmd = os.environ.get("BEETS_TEST_SHELL", "/bin/bash --norc").split() + if not has_program(cmd[0]): + self.skipTest("bash not available") + tester = subprocess.Popen( + cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=env + ) + + # Load bash_completion library. + for path in BASH_COMPLETION_PATHS: + if os.path.exists(syspath(path)): + bash_completion = path + break + else: + self.skipTest("bash-completion script not found") + try: + with open(syspath(bash_completion), "rb") as f: + tester.stdin.writelines(f) + except OSError: + self.skipTest("could not read bash-completion script") + + # Load completion script. + self.run_command("completion", lib=None) + completion_script = self.io.getoutput().encode("utf-8") + self.io.restore() + tester.stdin.writelines(completion_script.splitlines(True)) + + # Load test suite. + test_script_name = os.path.join(_common.RSRC, b"test_completion.sh") + with open(test_script_name, "rb") as test_script_file: + tester.stdin.writelines(test_script_file) + out, err = tester.communicate() + assert tester.returncode == 0 + assert out == b"completion tests passed\n", ( + "test/test_completion.sh did not execute properly. " + f"Output:{out.decode('utf-8')}" + ) diff --git a/test/test_config_command.py b/test/ui/commands/test_config.py similarity index 93% rename from test/test_config_command.py rename to test/ui/commands/test_config.py index b68c4f042..c1215ef43 100644 --- a/test/test_config_command.py +++ b/test/ui/commands/test_config.py @@ -128,3 +128,11 @@ class ConfigCommandTest(BeetsTestCase): with patch("os.execlp") as execlp: self.run_command("config", "-e") execlp.assert_called_once_with("myeditor", "myeditor", self.config_path) + + def test_edit_config_with_custom_config_path(self): + os.environ["EDITOR"] = "myeditor" + with patch("os.execlp") as execlp: + self.run_command("--config", self.cli_config_path, "config", "-e") + execlp.assert_called_once_with( + "myeditor", "myeditor", self.cli_config_path + ) diff --git a/test/ui/commands/test_fields.py b/test/ui/commands/test_fields.py new file mode 100644 index 000000000..0eaaa9ceb --- /dev/null +++ b/test/ui/commands/test_fields.py @@ -0,0 +1,24 @@ +from beets import library +from beets.test.helper import IOMixin, ItemInDBTestCase +from beets.ui.commands.fields import fields_func + + +class FieldsTest(IOMixin, ItemInDBTestCase): + def remove_keys(self, keys, text): + for i in text: + try: + keys.remove(i) + except ValueError: + pass + + def test_fields_func(self): + fields_func(self.lib, [], []) + items = library.Item.all_keys() + albums = library.Album.all_keys() + + output = self.io.stdout.get().split() + self.remove_keys(items, output) + self.remove_keys(albums, output) + + assert len(items) == 0 + assert len(albums) == 0 diff --git a/test/ui/commands/test_import.py b/test/ui/commands/test_import.py new file mode 100644 index 000000000..6e96c3bf3 --- /dev/null +++ b/test/ui/commands/test_import.py @@ -0,0 +1,258 @@ +import os +import re +import unittest +from unittest.mock import Mock, patch + +import pytest + +from beets import autotag, config, library, ui +from beets.autotag.match import distance +from beets.test import _common +from beets.test.helper import BeetsTestCase, IOMixin +from beets.ui.commands.import_ import import_files, paths_from_logfile +from beets.ui.commands.import_.display import show_change +from beets.ui.commands.import_.session import summarize_items + + +class ImportTest(BeetsTestCase): + def test_quiet_timid_disallowed(self): + config["import"]["quiet"] = True + config["import"]["timid"] = True + with pytest.raises(ui.UserError): + import_files(None, [], None) + + def test_parse_paths_from_logfile(self): + if os.path.__name__ == "ntpath": + logfile_content = ( + "import started Wed Jun 15 23:08:26 2022\n" + "asis C:\\music\\Beatles, The\\The Beatles; C:\\music\\Beatles, The\\The Beatles\\CD 01; C:\\music\\Beatles, The\\The Beatles\\CD 02\n" # noqa: E501 + "duplicate-replace C:\\music\\Bill Evans\\Trio '65\n" + "skip C:\\music\\Michael Jackson\\Bad\n" + "skip C:\\music\\Soulwax\\Any Minute Now\n" + ) + expected_paths = [ + "C:\\music\\Beatles, The\\The Beatles", + "C:\\music\\Michael Jackson\\Bad", + "C:\\music\\Soulwax\\Any Minute Now", + ] + else: + logfile_content = ( + "import started Wed Jun 15 23:08:26 2022\n" + "asis /music/Beatles, The/The Beatles; /music/Beatles, The/The Beatles/CD 01; /music/Beatles, The/The Beatles/CD 02\n" # noqa: E501 + "duplicate-replace /music/Bill Evans/Trio '65\n" + "skip /music/Michael Jackson/Bad\n" + "skip /music/Soulwax/Any Minute Now\n" + ) + expected_paths = [ + "/music/Beatles, The/The Beatles", + "/music/Michael Jackson/Bad", + "/music/Soulwax/Any Minute Now", + ] + + logfile = os.path.join(self.temp_dir, b"logfile.log") + with open(logfile, mode="w") as fp: + fp.write(logfile_content) + actual_paths = list(paths_from_logfile(logfile)) + assert actual_paths == expected_paths + + +class ShowChangeTest(IOMixin, unittest.TestCase): + def setUp(self): + super().setUp() + + self.items = [_common.item()] + self.items[0].track = 1 + self.items[0].path = b"/path/to/file.mp3" + self.info = autotag.AlbumInfo( + album="the album", + album_id="album id", + artist="the artist", + artist_id="artist id", + tracks=[ + autotag.TrackInfo( + title="the title", track_id="track id", index=1 + ) + ], + ) + + def _show_change( + self, + items=None, + info=None, + color=False, + cur_artist="the artist", + cur_album="the album", + dist=0.1, + ): + """Return an unicode string representing the changes""" + items = items or self.items + info = info or self.info + item_info_pairs = list(zip(items, info.tracks)) + config["ui"]["color"] = color + config["import"]["detail"] = True + change_dist = distance(items, info, item_info_pairs) + change_dist._penalties = {"album": [dist], "artist": [dist]} + show_change( + cur_artist, + cur_album, + autotag.AlbumMatch( + change_dist, info, dict(item_info_pairs), set(), set() + ), + ) + return self.io.getoutput().lower() + + def test_null_change(self): + msg = self._show_change() + assert "match (90.0%)" in msg + assert "album, artist" in msg + + def test_album_data_change(self): + msg = self._show_change( + cur_artist="another artist", cur_album="another album" + ) + assert "another artist -> the artist" in msg + assert "another album -> the album" in msg + + def test_item_data_change(self): + self.items[0].title = "different" + msg = self._show_change() + assert "different" in msg + assert "the title" in msg + + def test_item_data_change_with_unicode(self): + self.items[0].title = "caf\xe9" + msg = self._show_change() + assert "caf\xe9" in msg + assert "the title" in msg + + def test_album_data_change_with_unicode(self): + msg = self._show_change(cur_artist="caf\xe9", cur_album="another album") + assert "caf\xe9" in msg + assert "the artist" in msg + + def test_item_data_change_title_missing(self): + self.items[0].title = "" + msg = re.sub(r" +", " ", self._show_change()) + assert "file.mp3" in msg + assert "the title" in msg + + def test_item_data_change_title_missing_with_unicode_filename(self): + self.items[0].title = "" + self.items[0].path = "/path/to/caf\xe9.mp3".encode() + msg = re.sub(r" +", " ", self._show_change()) + assert "caf\xe9.mp3" in msg or "caf.mp3" in msg + + def test_colorize(self): + assert "test" == ui.uncolorize("test") + txt = ui.uncolorize("\x1b[31mtest\x1b[39;49;00m") + assert "test" == txt + txt = ui.uncolorize("\x1b[31mtest\x1b[39;49;00m test") + assert "test test" == txt + txt = ui.uncolorize("\x1b[31mtest\x1b[39;49;00mtest") + assert "testtest" == txt + txt = ui.uncolorize("test \x1b[31mtest\x1b[39;49;00m test") + assert "test test test" == txt + + def test_color_split(self): + exp = ("test", "") + res = ui.color_split("test", 5) + assert exp == res + exp = ("\x1b[31mtes\x1b[39;49;00m", "\x1b[31mt\x1b[39;49;00m") + res = ui.color_split("\x1b[31mtest\x1b[39;49;00m", 3) + assert exp == res + + def test_split_into_lines(self): + # Test uncolored text + txt = ui.split_into_lines("test test test", [5, 5, 5]) + assert txt == ["test", "test", "test"] + # Test multiple colored texts + colored_text = "\x1b[31mtest \x1b[39;49;00m" * 3 + split_txt = [ + "\x1b[31mtest\x1b[39;49;00m", + "\x1b[31mtest\x1b[39;49;00m", + "\x1b[31mtest\x1b[39;49;00m", + ] + txt = ui.split_into_lines(colored_text, [5, 5, 5]) + assert txt == split_txt + # Test single color, multi space text + colored_text = "\x1b[31m test test test \x1b[39;49;00m" + txt = ui.split_into_lines(colored_text, [5, 5, 5]) + assert txt == split_txt + # Test single color, different spacing + colored_text = "\x1b[31mtest\x1b[39;49;00mtest test test" + # ToDo: fix color_len to handle mid-text color escapes, and thus + # split colored texts over newlines (potentially with dashes?) + split_txt = ["\x1b[31mtest\x1b[39;49;00mt", "est", "test", "test"] + txt = ui.split_into_lines(colored_text, [5, 5, 5]) + assert txt == split_txt + + def test_album_data_change_wrap_newline(self): + # Patch ui.term_width to force wrapping + with patch("beets.ui.term_width", return_value=30): + # Test newline layout + config["ui"]["import"]["layout"] = "newline" + long_name = f"another artist with a{' very' * 10} long name" + msg = self._show_change( + cur_artist=long_name, cur_album="another album" + ) + assert "artist: another artist" in msg + assert " -> the artist" in msg + assert "another album -> the album" not in msg + + def test_item_data_change_wrap_column(self): + # Patch ui.term_width to force wrapping + with patch("beets.ui.term_width", return_value=54): + # Test Column layout + config["ui"]["import"]["layout"] = "column" + long_title = f"a track with a{' very' * 10} long name" + self.items[0].title = long_title + msg = self._show_change() + assert "(#1) a track (1:00) -> (#1) the title (0:00)" in msg + + def test_item_data_change_wrap_newline(self): + # Patch ui.term_width to force wrapping + with patch("beets.ui.term_width", return_value=30): + config["ui"]["import"]["layout"] = "newline" + long_title = f"a track with a{' very' * 10} long name" + self.items[0].title = long_title + msg = self._show_change() + assert "(#1) a track with" in msg + assert " -> (#1) the title (0:00)" in msg + + +@patch("beets.library.Item.try_filesize", Mock(return_value=987)) +class SummarizeItemsTest(unittest.TestCase): + def setUp(self): + super().setUp() + item = library.Item() + item.bitrate = 4321 + item.length = 10 * 60 + 54 + item.format = "F" + self.item = item + + def test_summarize_item(self): + summary = summarize_items([], True) + assert summary == "" + + summary = summarize_items([self.item], True) + assert summary == "F, 4kbps, 10:54, 987.0 B" + + def test_summarize_items(self): + summary = summarize_items([], False) + assert summary == "0 items" + + summary = summarize_items([self.item], False) + assert summary == "1 items, F, 4kbps, 10:54, 987.0 B" + + # make a copy of self.item + i2 = self.item.copy() + + summary = summarize_items([self.item, i2], False) + assert summary == "2 items, F, 4kbps, 21:48, 1.9 KiB" + + i2.format = "G" + summary = summarize_items([self.item, i2], False) + assert summary == "2 items, F 1, G 1, 4kbps, 21:48, 1.9 KiB" + + summary = summarize_items([self.item, i2, i2], False) + assert summary == "3 items, G 2, F 1, 4kbps, 32:42, 2.9 KiB" diff --git a/test/ui/commands/test_list.py b/test/ui/commands/test_list.py new file mode 100644 index 000000000..a63a56ad1 --- /dev/null +++ b/test/ui/commands/test_list.py @@ -0,0 +1,69 @@ +from beets.test import _common +from beets.test.helper import BeetsTestCase, capture_stdout +from beets.ui.commands.list import list_items + + +class ListTest(BeetsTestCase): + def setUp(self): + super().setUp() + self.item = _common.item() + self.item.path = "xxx/yyy" + self.lib.add(self.item) + self.lib.add_album([self.item]) + + def _run_list(self, query="", album=False, path=False, fmt=""): + with capture_stdout() as stdout: + list_items(self.lib, query, album, fmt) + return stdout + + def test_list_outputs_item(self): + stdout = self._run_list() + assert "the title" in stdout.getvalue() + + def test_list_unicode_query(self): + self.item.title = "na\xefve" + self.item.store() + self.lib._connection().commit() + + stdout = self._run_list(["na\xefve"]) + out = stdout.getvalue() + assert "na\xefve" in out + + def test_list_item_path(self): + stdout = self._run_list(fmt="$path") + assert stdout.getvalue().strip() == "xxx/yyy" + + def test_list_album_outputs_something(self): + stdout = self._run_list(album=True) + assert len(stdout.getvalue()) > 0 + + def test_list_album_path(self): + stdout = self._run_list(album=True, fmt="$path") + assert stdout.getvalue().strip() == "xxx" + + def test_list_album_omits_title(self): + stdout = self._run_list(album=True) + assert "the title" not in stdout.getvalue() + + def test_list_uses_track_artist(self): + stdout = self._run_list() + assert "the artist" in stdout.getvalue() + assert "the album artist" not in stdout.getvalue() + + def test_list_album_uses_album_artist(self): + stdout = self._run_list(album=True) + assert "the artist" not in stdout.getvalue() + assert "the album artist" in stdout.getvalue() + + def test_list_item_format_artist(self): + stdout = self._run_list(fmt="$artist") + assert "the artist" in stdout.getvalue() + + def test_list_item_format_multiple(self): + stdout = self._run_list(fmt="$artist - $album - $year") + assert "the artist - the album - 0001" == stdout.getvalue().strip() + + def test_list_album_format(self): + stdout = self._run_list(album=True, fmt="$genre") + assert "the genre" in stdout.getvalue() + assert "the album" not in stdout.getvalue() diff --git a/test/ui/commands/test_modify.py b/test/ui/commands/test_modify.py new file mode 100644 index 000000000..b9cc1524d --- /dev/null +++ b/test/ui/commands/test_modify.py @@ -0,0 +1,216 @@ +import unittest + +from mediafile import MediaFile + +from beets.test.helper import BeetsTestCase, control_stdin +from beets.ui.commands.modify import modify_parse_args +from beets.util import syspath + + +class ModifyTest(BeetsTestCase): + def setUp(self): + super().setUp() + self.album = self.add_album_fixture() + [self.item] = self.album.items() + + def modify_inp(self, inp, *args): + with control_stdin(inp): + self.run_command("modify", *args) + + def modify(self, *args): + self.modify_inp("y", *args) + + # Item tests + + def test_modify_item(self): + self.modify("title=newTitle") + item = self.lib.items().get() + assert item.title == "newTitle" + + def test_modify_item_abort(self): + item = self.lib.items().get() + title = item.title + self.modify_inp("n", "title=newTitle") + item = self.lib.items().get() + assert item.title == title + + def test_modify_item_no_change(self): + title = "Tracktitle" + item = self.add_item_fixture(title=title) + self.modify_inp("y", "title", f"title={title}") + item = self.lib.items(title).get() + assert item.title == title + + def test_modify_write_tags(self): + self.modify("title=newTitle") + item = self.lib.items().get() + item.read() + assert item.title == "newTitle" + + def test_modify_dont_write_tags(self): + self.modify("--nowrite", "title=newTitle") + item = self.lib.items().get() + item.read() + assert item.title != "newTitle" + + def test_move(self): + self.modify("title=newTitle") + item = self.lib.items().get() + assert b"newTitle" in item.path + + def test_not_move(self): + self.modify("--nomove", "title=newTitle") + item = self.lib.items().get() + assert b"newTitle" not in item.path + + def test_no_write_no_move(self): + self.modify("--nomove", "--nowrite", "title=newTitle") + item = self.lib.items().get() + item.read() + assert b"newTitle" not in item.path + assert item.title != "newTitle" + + def test_update_mtime(self): + item = self.item + old_mtime = item.mtime + + self.modify("title=newTitle") + item.load() + assert old_mtime != item.mtime + assert item.current_mtime() == item.mtime + + def test_reset_mtime_with_no_write(self): + item = self.item + + self.modify("--nowrite", "title=newTitle") + item.load() + assert 0 == item.mtime + + def test_selective_modify(self): + title = "Tracktitle" + album = "album" + original_artist = "composer" + new_artist = "coverArtist" + for i in range(0, 10): + self.add_item_fixture( + title=f"{title}{i}", artist=original_artist, album=album + ) + self.modify_inp( + "s\ny\ny\ny\nn\nn\ny\ny\ny\ny\nn", title, f"artist={new_artist}" + ) + original_items = self.lib.items(f"artist:{original_artist}") + new_items = self.lib.items(f"artist:{new_artist}") + assert len(list(original_items)) == 3 + assert len(list(new_items)) == 7 + + def test_modify_formatted(self): + for i in range(0, 3): + self.add_item_fixture( + title=f"title{i}", artist="artist", album="album" + ) + items = list(self.lib.items()) + self.modify("title=${title} - append") + for item in items: + orig_title = item.title + item.load() + assert item.title == f"{orig_title} - append" + + # Album Tests + + def test_modify_album(self): + self.modify("--album", "album=newAlbum") + album = self.lib.albums().get() + assert album.album == "newAlbum" + + def test_modify_album_write_tags(self): + self.modify("--album", "album=newAlbum") + item = self.lib.items().get() + item.read() + assert item.album == "newAlbum" + + def test_modify_album_dont_write_tags(self): + self.modify("--album", "--nowrite", "album=newAlbum") + item = self.lib.items().get() + item.read() + assert item.album == "the album" + + def test_album_move(self): + self.modify("--album", "album=newAlbum") + item = self.lib.items().get() + item.read() + assert b"newAlbum" in item.path + + def test_album_not_move(self): + self.modify("--nomove", "--album", "album=newAlbum") + item = self.lib.items().get() + item.read() + assert b"newAlbum" not in item.path + + def test_modify_album_formatted(self): + item = self.lib.items().get() + orig_album = item.album + self.modify("--album", "album=${album} - append") + item.load() + assert item.album == f"{orig_album} - append" + + # Misc + + def test_write_initial_key_tag(self): + self.modify("initial_key=C#m") + item = self.lib.items().get() + mediafile = MediaFile(syspath(item.path)) + assert mediafile.initial_key == "C#m" + + def test_set_flexattr(self): + self.modify("flexattr=testAttr") + item = self.lib.items().get() + assert item.flexattr == "testAttr" + + def test_remove_flexattr(self): + item = self.lib.items().get() + item.flexattr = "testAttr" + item.store() + + self.modify("flexattr!") + item = self.lib.items().get() + assert "flexattr" not in item + + @unittest.skip("not yet implemented") + def test_delete_initial_key_tag(self): + item = self.lib.items().get() + item.initial_key = "C#m" + item.write() + item.store() + + mediafile = MediaFile(syspath(item.path)) + assert mediafile.initial_key == "C#m" + + self.modify("initial_key!") + mediafile = MediaFile(syspath(item.path)) + assert mediafile.initial_key is None + + def test_arg_parsing_colon_query(self): + (query, mods, dels) = modify_parse_args( + ["title:oldTitle", "title=newTitle"] + ) + assert query == ["title:oldTitle"] + assert mods == {"title": "newTitle"} + + def test_arg_parsing_delete(self): + (query, mods, dels) = modify_parse_args(["title:oldTitle", "title!"]) + assert query == ["title:oldTitle"] + assert dels == ["title"] + + def test_arg_parsing_query_with_exclaimation(self): + (query, mods, dels) = modify_parse_args( + ["title:oldTitle!", "title=newTitle!"] + ) + assert query == ["title:oldTitle!"] + assert mods == {"title": "newTitle!"} + + def test_arg_parsing_equals_in_value(self): + (query, mods, dels) = modify_parse_args( + ["title:foo=bar", "title=newTitle"] + ) + assert query == ["title:foo=bar"] + assert mods == {"title": "newTitle"} diff --git a/test/ui/commands/test_move.py b/test/ui/commands/test_move.py new file mode 100644 index 000000000..5c65f1475 --- /dev/null +++ b/test/ui/commands/test_move.py @@ -0,0 +1,102 @@ +import shutil + +from beets import library +from beets.test.helper import BeetsTestCase +from beets.ui.commands.move import move_items + + +class MoveTest(BeetsTestCase): + def setUp(self): + super().setUp() + + self.initial_item_path = self.lib_path / "srcfile" + shutil.copy(self.resource_path, self.initial_item_path) + + # Add a file to the library but don't copy it in yet. + self.i = library.Item.from_path(self.initial_item_path) + self.lib.add(self.i) + self.album = self.lib.add_album([self.i]) + + # Alternate destination directory. + self.otherdir = self.temp_dir_path / "testotherdir" + + def _move( + self, + query=(), + dest=None, + copy=False, + album=False, + pretend=False, + export=False, + ): + move_items(self.lib, dest, query, copy, album, pretend, export=export) + + def test_move_item(self): + self._move() + self.i.load() + assert b"libdir" in self.i.path + assert self.i.filepath.exists() + assert not self.initial_item_path.exists() + + def test_copy_item(self): + self._move(copy=True) + self.i.load() + assert b"libdir" in self.i.path + assert self.i.filepath.exists() + assert self.initial_item_path.exists() + + def test_move_album(self): + self._move(album=True) + self.i.load() + assert b"libdir" in self.i.path + assert self.i.filepath.exists() + assert not self.initial_item_path.exists() + + def test_copy_album(self): + self._move(copy=True, album=True) + self.i.load() + assert b"libdir" in self.i.path + assert self.i.filepath.exists() + assert self.initial_item_path.exists() + + def test_move_item_custom_dir(self): + self._move(dest=self.otherdir) + self.i.load() + assert b"testotherdir" in self.i.path + assert self.i.filepath.exists() + assert not self.initial_item_path.exists() + + def test_move_album_custom_dir(self): + self._move(dest=self.otherdir, album=True) + self.i.load() + assert b"testotherdir" in self.i.path + assert self.i.filepath.exists() + assert not self.initial_item_path.exists() + + def test_pretend_move_item(self): + self._move(dest=self.otherdir, pretend=True) + self.i.load() + assert self.i.filepath == self.initial_item_path + + def test_pretend_move_album(self): + self._move(album=True, pretend=True) + self.i.load() + assert self.i.filepath == self.initial_item_path + + def test_export_item_custom_dir(self): + self._move(dest=self.otherdir, export=True) + self.i.load() + assert self.i.filepath == self.initial_item_path + assert self.otherdir.exists() + + def test_export_album_custom_dir(self): + self._move(dest=self.otherdir, album=True, export=True) + self.i.load() + assert self.i.filepath == self.initial_item_path + assert self.otherdir.exists() + + def test_pretend_export_item(self): + self._move(dest=self.otherdir, pretend=True, export=True) + self.i.load() + assert self.i.filepath == self.initial_item_path + assert not self.otherdir.exists() diff --git a/test/ui/commands/test_remove.py b/test/ui/commands/test_remove.py new file mode 100644 index 000000000..e42bb7630 --- /dev/null +++ b/test/ui/commands/test_remove.py @@ -0,0 +1,80 @@ +import os + +from beets import library +from beets.test.helper import BeetsTestCase, IOMixin +from beets.ui.commands.remove import remove_items +from beets.util import MoveOperation, syspath + + +class RemoveTest(IOMixin, BeetsTestCase): + def setUp(self): + super().setUp() + + # Copy a file into the library. + self.i = library.Item.from_path(self.resource_path) + self.lib.add(self.i) + self.i.move(operation=MoveOperation.COPY) + + def test_remove_items_no_delete(self): + self.io.addinput("y") + remove_items(self.lib, "", False, False, False) + items = self.lib.items() + assert len(list(items)) == 0 + assert self.i.filepath.exists() + + def test_remove_items_with_delete(self): + self.io.addinput("y") + remove_items(self.lib, "", False, True, False) + items = self.lib.items() + assert len(list(items)) == 0 + assert not self.i.filepath.exists() + + def test_remove_items_with_force_no_delete(self): + remove_items(self.lib, "", False, False, True) + items = self.lib.items() + assert len(list(items)) == 0 + assert self.i.filepath.exists() + + def test_remove_items_with_force_delete(self): + remove_items(self.lib, "", False, True, True) + items = self.lib.items() + assert len(list(items)) == 0 + assert not self.i.filepath.exists() + + def test_remove_items_select_with_delete(self): + i2 = library.Item.from_path(self.resource_path) + self.lib.add(i2) + i2.move(operation=MoveOperation.COPY) + + for s in ("s", "y", "n"): + self.io.addinput(s) + remove_items(self.lib, "", False, True, False) + items = self.lib.items() + assert len(list(items)) == 1 + # There is probably no guarantee that the items are queried in any + # spcecific order, thus just ensure that exactly one was removed. + # To improve upon this, self.io would need to have the capability to + # generate input that depends on previous output. + num_existing = 0 + num_existing += 1 if os.path.exists(syspath(self.i.path)) else 0 + num_existing += 1 if os.path.exists(syspath(i2.path)) else 0 + assert num_existing == 1 + + def test_remove_albums_select_with_delete(self): + a1 = self.add_album_fixture() + a2 = self.add_album_fixture() + path1 = a1.items()[0].path + path2 = a2.items()[0].path + items = self.lib.items() + assert len(list(items)) == 3 + + for s in ("s", "y", "n"): + self.io.addinput(s) + remove_items(self.lib, "", True, True, False) + items = self.lib.items() + assert len(list(items)) == 2 # incl. the item from setUp() + # See test_remove_items_select_with_delete() + num_existing = 0 + num_existing += 1 if os.path.exists(syspath(path1)) else 0 + num_existing += 1 if os.path.exists(syspath(path2)) else 0 + assert num_existing == 1 diff --git a/test/ui/commands/test_update.py b/test/ui/commands/test_update.py new file mode 100644 index 000000000..3fb687418 --- /dev/null +++ b/test/ui/commands/test_update.py @@ -0,0 +1,205 @@ +import os + +from mediafile import MediaFile + +from beets import library +from beets.test import _common +from beets.test.helper import BeetsTestCase, IOMixin +from beets.ui.commands.update import update_items +from beets.util import MoveOperation, remove, syspath + + +class UpdateTest(IOMixin, BeetsTestCase): + def setUp(self): + super().setUp() + + # Copy a file into the library. + item_path = os.path.join(_common.RSRC, b"full.mp3") + item_path_two = os.path.join(_common.RSRC, b"full.flac") + self.i = library.Item.from_path(item_path) + self.i2 = library.Item.from_path(item_path_two) + self.lib.add(self.i) + self.lib.add(self.i2) + self.i.move(operation=MoveOperation.COPY) + self.i2.move(operation=MoveOperation.COPY) + self.album = self.lib.add_album([self.i, self.i2]) + + # Album art. + artfile = os.path.join(self.temp_dir, b"testart.jpg") + _common.touch(artfile) + self.album.set_art(artfile) + self.album.store() + remove(artfile) + + def _update( + self, + query=(), + album=False, + move=False, + reset_mtime=True, + fields=None, + exclude_fields=None, + ): + self.io.addinput("y") + if reset_mtime: + self.i.mtime = 0 + self.i.store() + update_items( + self.lib, + query, + album, + move, + False, + fields=fields, + exclude_fields=exclude_fields, + ) + + def test_delete_removes_item(self): + assert list(self.lib.items()) + remove(self.i.path) + remove(self.i2.path) + self._update() + assert not list(self.lib.items()) + + def test_delete_removes_album(self): + assert self.lib.albums() + remove(self.i.path) + remove(self.i2.path) + self._update() + assert not self.lib.albums() + + def test_delete_removes_album_art(self): + art_filepath = self.album.art_filepath + assert art_filepath.exists() + remove(self.i.path) + remove(self.i2.path) + self._update() + assert not art_filepath.exists() + + def test_modified_metadata_detected(self): + mf = MediaFile(syspath(self.i.path)) + mf.title = "differentTitle" + mf.save() + self._update() + item = self.lib.items().get() + assert item.title == "differentTitle" + + def test_modified_metadata_moved(self): + mf = MediaFile(syspath(self.i.path)) + mf.title = "differentTitle" + mf.save() + self._update(move=True) + item = self.lib.items().get() + assert b"differentTitle" in item.path + + def test_modified_metadata_not_moved(self): + mf = MediaFile(syspath(self.i.path)) + mf.title = "differentTitle" + mf.save() + self._update(move=False) + item = self.lib.items().get() + assert b"differentTitle" not in item.path + + def test_selective_modified_metadata_moved(self): + mf = MediaFile(syspath(self.i.path)) + mf.title = "differentTitle" + mf.genre = "differentGenre" + mf.save() + self._update(move=True, fields=["title"]) + item = self.lib.items().get() + assert b"differentTitle" in item.path + assert item.genre != "differentGenre" + + def test_selective_modified_metadata_not_moved(self): + mf = MediaFile(syspath(self.i.path)) + mf.title = "differentTitle" + mf.genre = "differentGenre" + mf.save() + self._update(move=False, fields=["title"]) + item = self.lib.items().get() + assert b"differentTitle" not in item.path + assert item.genre != "differentGenre" + + def test_modified_album_metadata_moved(self): + mf = MediaFile(syspath(self.i.path)) + mf.album = "differentAlbum" + mf.save() + self._update(move=True) + item = self.lib.items().get() + assert b"differentAlbum" in item.path + + def test_modified_album_metadata_art_moved(self): + artpath = self.album.artpath + mf = MediaFile(syspath(self.i.path)) + mf.album = "differentAlbum" + mf.save() + self._update(move=True) + album = self.lib.albums()[0] + assert artpath != album.artpath + assert album.artpath is not None + + def test_selective_modified_album_metadata_moved(self): + mf = MediaFile(syspath(self.i.path)) + mf.album = "differentAlbum" + mf.genre = "differentGenre" + mf.save() + self._update(move=True, fields=["album"]) + item = self.lib.items().get() + assert b"differentAlbum" in item.path + assert item.genre != "differentGenre" + + def test_selective_modified_album_metadata_not_moved(self): + mf = MediaFile(syspath(self.i.path)) + mf.album = "differentAlbum" + mf.genre = "differentGenre" + mf.save() + self._update(move=True, fields=["genre"]) + item = self.lib.items().get() + assert b"differentAlbum" not in item.path + assert item.genre == "differentGenre" + + def test_mtime_match_skips_update(self): + mf = MediaFile(syspath(self.i.path)) + mf.title = "differentTitle" + mf.save() + + # Make in-memory mtime match on-disk mtime. + self.i.mtime = os.path.getmtime(syspath(self.i.path)) + self.i.store() + + self._update(reset_mtime=False) + item = self.lib.items().get() + assert item.title == "full" + + def test_multivalued_albumtype_roundtrip(self): + # https://github.com/beetbox/beets/issues/4528 + + # albumtypes is empty for our test fixtures, so populate it first + album = self.album + correct_albumtypes = ["album", "live"] + + # Setting albumtypes does not set albumtype, currently. + # Using x[0] mirrors https://github.com/beetbox/mediafile/blob/057432ad53b3b84385e5582f69f44dc00d0a725d/mediafile.py#L1928 # noqa: E501 + correct_albumtype = correct_albumtypes[0] + + album.albumtype = correct_albumtype + album.albumtypes = correct_albumtypes + album.try_sync(write=True, move=False) + + album.load() + assert album.albumtype == correct_albumtype + assert album.albumtypes == correct_albumtypes + + self._update() + + album.load() + assert album.albumtype == correct_albumtype + assert album.albumtypes == correct_albumtypes + + def test_modified_metadata_excluded(self): + mf = MediaFile(syspath(self.i.path)) + mf.lyrics = "new lyrics" + mf.save() + self._update(exclude_fields=["lyrics"]) + item = self.lib.items().get() + assert item.lyrics != "new lyrics" diff --git a/test/ui/commands/test_utils.py b/test/ui/commands/test_utils.py new file mode 100644 index 000000000..bd07a27c7 --- /dev/null +++ b/test/ui/commands/test_utils.py @@ -0,0 +1,59 @@ +import os +import shutil + +import pytest + +from beets import library, ui +from beets.test import _common +from beets.test.helper import BeetsTestCase +from beets.ui.commands.utils import do_query +from beets.util import syspath + + +class QueryTest(BeetsTestCase): + def add_item(self, filename=b"srcfile", templatefile=b"full.mp3"): + itempath = os.path.join(self.libdir, filename) + shutil.copy( + syspath(os.path.join(_common.RSRC, templatefile)), + syspath(itempath), + ) + item = library.Item.from_path(itempath) + self.lib.add(item) + return item, itempath + + def add_album(self, items): + album = self.lib.add_album(items) + return album + + def check_do_query( + self, num_items, num_albums, q=(), album=False, also_items=True + ): + items, albums = do_query(self.lib, q, album, also_items) + assert len(items) == num_items + assert len(albums) == num_albums + + def test_query_empty(self): + with pytest.raises(ui.UserError): + do_query(self.lib, (), False) + + def test_query_empty_album(self): + with pytest.raises(ui.UserError): + do_query(self.lib, (), True) + + def test_query_item(self): + self.add_item() + self.check_do_query(1, 0, album=False) + self.add_item() + self.check_do_query(2, 0, album=False) + + def test_query_album(self): + item, itempath = self.add_item() + self.add_album([item]) + self.check_do_query(1, 1, album=True) + self.check_do_query(0, 1, album=True, also_items=False) + + item, itempath = self.add_item() + item2, itempath = self.add_item() + self.add_album([item, item2]) + self.check_do_query(3, 2, album=True) + self.check_do_query(0, 2, album=True, also_items=False) diff --git a/test/ui/commands/test_write.py b/test/ui/commands/test_write.py new file mode 100644 index 000000000..312b51dd2 --- /dev/null +++ b/test/ui/commands/test_write.py @@ -0,0 +1,46 @@ +from beets.test.helper import BeetsTestCase + + +class WriteTest(BeetsTestCase): + def write_cmd(self, *args): + return self.run_with_output("write", *args) + + def test_update_mtime(self): + item = self.add_item_fixture() + item["title"] = "a new title" + item.store() + + item = self.lib.items().get() + assert item.mtime == 0 + + self.write_cmd() + item = self.lib.items().get() + assert item.mtime == item.current_mtime() + + def test_non_metadata_field_unchanged(self): + """Changing a non-"tag" field like `bitrate` and writing should + have no effect. + """ + # An item that starts out "clean". + item = self.add_item_fixture() + item.read() + + # ... but with a mismatched bitrate. + item.bitrate = 123 + item.store() + + output = self.write_cmd() + + assert output == "" + + def test_write_metadata_field(self): + item = self.add_item_fixture() + item.read() + old_title = item.title + + item.title = "new title" + item.store() + + output = self.write_cmd() + + assert f"{old_title} -> new title" in output diff --git a/test/ui/test_field_diff.py b/test/ui/test_field_diff.py new file mode 100644 index 000000000..35f3c6ca7 --- /dev/null +++ b/test/ui/test_field_diff.py @@ -0,0 +1,59 @@ +import pytest + +from beets.library import Item +from beets.ui import _field_diff + +p = pytest.param + + +class TestFieldDiff: + @pytest.fixture(autouse=True) + def configure_color(self, config, color): + config["ui"]["color"] = color + + @pytest.fixture(autouse=True) + def patch_colorize(self, monkeypatch): + """Patch to return a deterministic string format instead of ANSI codes.""" + monkeypatch.setattr( + "beets.ui.colorize", + lambda color_name, text: f"[{color_name}]{text}[/]", + ) + + @staticmethod + def diff_fmt(old, new): + return f"[text_diff_removed]{old}[/] -> [text_diff_added]{new}[/]" + + @pytest.mark.parametrize( + "old_data, new_data, field, expected_diff", + [ + p({"title": "foo"}, {"title": "foo"}, "title", None, id="no_change"), + p({"bpm": 120.0}, {"bpm": 120.005}, "bpm", None, id="float_close_enough"), + p({"bpm": 120.0}, {"bpm": 121.0}, "bpm", f"bpm: {diff_fmt('120', '121')}", id="float_changed"), + p({"title": "foo"}, {"title": "bar"}, "title", f"title: {diff_fmt('foo', 'bar')}", id="string_full_replace"), + p({"title": "prefix foo"}, {"title": "prefix bar"}, "title", "title: prefix [text_diff_removed]foo[/] -> prefix [text_diff_added]bar[/]", id="string_partial_change"), + p({"year": 2000}, {"year": 2001}, "year", f"year: {diff_fmt('2000', '2001')}", id="int_changed"), + p({}, {"genre": "Rock"}, "genre", "genre: -> [text_diff_added]Rock[/]", id="field_added"), + p({"genre": "Rock"}, {}, "genre", "genre: [text_diff_removed]Rock[/] -> ", id="field_removed"), + p({"track": 1}, {"track": 2}, "track", f"track: {diff_fmt('01', '02')}", id="formatted_value_changed"), + p({"mb_trackid": None}, {"mb_trackid": "1234"}, "mb_trackid", "mb_trackid: -> [text_diff_added]1234[/]", id="none_to_value"), + p({}, {"new_flex": "foo"}, "new_flex", "[text_diff_added]new_flex: foo[/]", id="flex_field_added"), + p({"old_flex": "foo"}, {}, "old_flex", "[text_diff_removed]old_flex: foo[/]", id="flex_field_removed"), + ], + ) # fmt: skip + @pytest.mark.parametrize("color", [True], ids=["color_enabled"]) + def test_field_diff_colors(self, old_data, new_data, field, expected_diff): + old_item = Item(**old_data) + new_item = Item(**new_data) + + diff = _field_diff(field, old_item.formatted(), new_item.formatted()) + + assert diff == expected_diff + + @pytest.mark.parametrize("color", [False], ids=["color_disabled"]) + def test_field_diff_no_color(self): + old_item = Item(title="foo") + new_item = Item(title="bar") + + diff = _field_diff("title", old_item.formatted(), new_item.formatted()) + + assert diff == "title: foo -> bar" diff --git a/test/ui/test_ui.py b/test/ui/test_ui.py new file mode 100644 index 000000000..a37d4bb29 --- /dev/null +++ b/test/ui/test_ui.py @@ -0,0 +1,590 @@ +# This file is part of beets. +# Copyright 2016, Adrian Sampson. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. + +"""Tests for the command-line interface.""" + +import os +import platform +import sys +import unittest +from pathlib import Path +from unittest.mock import patch + +import pytest +from confuse import ConfigError + +from beets import config, plugins, ui +from beets.test import _common +from beets.test.helper import BeetsTestCase, IOMixin, PluginTestCase +from beets.ui import commands +from beets.util import syspath + + +class PrintTest(IOMixin, unittest.TestCase): + def test_print_without_locale(self): + lang = os.environ.get("LANG") + if lang: + del os.environ["LANG"] + + try: + ui.print_("something") + except TypeError: + self.fail("TypeError during print") + finally: + if lang: + os.environ["LANG"] = lang + + def test_print_with_invalid_locale(self): + old_lang = os.environ.get("LANG") + os.environ["LANG"] = "" + old_ctype = os.environ.get("LC_CTYPE") + os.environ["LC_CTYPE"] = "UTF-8" + + try: + ui.print_("something") + except ValueError: + self.fail("ValueError during print") + finally: + if old_lang: + os.environ["LANG"] = old_lang + else: + del os.environ["LANG"] + if old_ctype: + os.environ["LC_CTYPE"] = old_ctype + else: + del os.environ["LC_CTYPE"] + + +@_common.slow_test() +class TestPluginTestCase(PluginTestCase): + plugin = "test" + + def setUp(self): + super().setUp() + config["pluginpath"] = [_common.PLUGINPATH] + + +class ConfigTest(TestPluginTestCase): + def setUp(self): + super().setUp() + + # Don't use the BEETSDIR from `helper`. Instead, we point the home + # directory there. Some tests will set `BEETSDIR` themselves. + del os.environ["BEETSDIR"] + + # Also set APPDATA, the Windows equivalent of setting $HOME. + appdata_dir = self.temp_dir_path / "AppData" / "Roaming" + + self._orig_cwd = os.getcwd() + self.test_cmd = self._make_test_cmd() + commands.default_commands.append(self.test_cmd) + + # Default user configuration + if platform.system() == "Windows": + self.user_config_dir = appdata_dir / "beets" + else: + self.user_config_dir = self.temp_dir_path / ".config" / "beets" + self.user_config_dir.mkdir(parents=True, exist_ok=True) + self.user_config_path = self.user_config_dir / "config.yaml" + + # Custom BEETSDIR + self.beetsdir = self.temp_dir_path / "beetsdir" + self.beetsdir.mkdir(parents=True, exist_ok=True) + + self.env_config_path = str(self.beetsdir / "config.yaml") + self.cli_config_path = str(self.temp_dir_path / "config.yaml") + self.env_patcher = patch( + "os.environ", + {"HOME": str(self.temp_dir_path), "APPDATA": str(appdata_dir)}, + ) + self.env_patcher.start() + + self._reset_config() + + def tearDown(self): + self.env_patcher.stop() + commands.default_commands.pop() + os.chdir(syspath(self._orig_cwd)) + super().tearDown() + + def _make_test_cmd(self): + test_cmd = ui.Subcommand("test", help="test") + + def run(lib, options, args): + test_cmd.lib = lib + test_cmd.options = options + test_cmd.args = args + + test_cmd.func = run + return test_cmd + + def _reset_config(self): + # Config should read files again on demand + config.clear() + config._materialized = False + + def write_config_file(self): + return open(self.user_config_path, "w") + + def test_paths_section_respected(self): + with self.write_config_file() as config: + config.write("paths: {x: y}") + + self.run_command("test", lib=None) + key, template = self.test_cmd.lib.path_formats[0] + assert key == "x" + assert template.original == "y" + + def test_default_paths_preserved(self): + default_formats = ui.get_path_formats() + + self._reset_config() + with self.write_config_file() as config: + config.write("paths: {x: y}") + self.run_command("test", lib=None) + key, template = self.test_cmd.lib.path_formats[0] + assert key == "x" + assert template.original == "y" + assert self.test_cmd.lib.path_formats[1:] == default_formats + + def test_nonexistant_db(self): + with self.write_config_file() as config: + config.write("library: /xxx/yyy/not/a/real/path") + + with pytest.raises(ui.UserError): + self.run_command("test", lib=None) + + def test_user_config_file(self): + with self.write_config_file() as file: + file.write("anoption: value") + + self.run_command("test", lib=None) + assert config["anoption"].get() == "value" + + def test_replacements_parsed(self): + with self.write_config_file() as config: + config.write("replace: {'[xy]': z}") + + self.run_command("test", lib=None) + replacements = self.test_cmd.lib.replacements + repls = [(p.pattern, s) for p, s in replacements] # Compare patterns. + assert repls == [("[xy]", "z")] + + def test_multiple_replacements_parsed(self): + with self.write_config_file() as config: + config.write("replace: {'[xy]': z, foo: bar}") + self.run_command("test", lib=None) + replacements = self.test_cmd.lib.replacements + repls = [(p.pattern, s) for p, s in replacements] + assert repls == [("[xy]", "z"), ("foo", "bar")] + + def test_cli_config_option(self): + with open(self.cli_config_path, "w") as file: + file.write("anoption: value") + self.run_command("--config", self.cli_config_path, "test", lib=None) + assert config["anoption"].get() == "value" + + def test_cli_config_file_overwrites_user_defaults(self): + with open(self.user_config_path, "w") as file: + file.write("anoption: value") + + with open(self.cli_config_path, "w") as file: + file.write("anoption: cli overwrite") + self.run_command("--config", self.cli_config_path, "test", lib=None) + assert config["anoption"].get() == "cli overwrite" + + def test_cli_config_file_overwrites_beetsdir_defaults(self): + os.environ["BEETSDIR"] = str(self.beetsdir) + with open(self.env_config_path, "w") as file: + file.write("anoption: value") + + with open(self.cli_config_path, "w") as file: + file.write("anoption: cli overwrite") + self.run_command("--config", self.cli_config_path, "test", lib=None) + assert config["anoption"].get() == "cli overwrite" + + # @unittest.skip('Difficult to implement with optparse') + # def test_multiple_cli_config_files(self): + # cli_config_path_1 = os.path.join(self.temp_dir, b'config.yaml') + # cli_config_path_2 = os.path.join(self.temp_dir, b'config_2.yaml') + # + # with open(cli_config_path_1, 'w') as file: + # file.write('first: value') + # + # with open(cli_config_path_2, 'w') as file: + # file.write('second: value') + # + # self.run_command('--config', cli_config_path_1, + # '--config', cli_config_path_2, 'test', lib=None) + # assert config['first'].get() == 'value' + # assert config['second'].get() == 'value' + # + # @unittest.skip('Difficult to implement with optparse') + # def test_multiple_cli_config_overwrite(self): + # cli_overwrite_config_path = os.path.join(self.temp_dir, + # b'overwrite_config.yaml') + # + # with open(self.cli_config_path, 'w') as file: + # file.write('anoption: value') + # + # with open(cli_overwrite_config_path, 'w') as file: + # file.write('anoption: overwrite') + # + # self.run_command('--config', self.cli_config_path, + # '--config', cli_overwrite_config_path, 'test') + # assert config['anoption'].get() == 'cli overwrite' + + # FIXME: fails on windows + @unittest.skipIf(sys.platform == "win32", "win32") + def test_cli_config_paths_resolve_relative_to_user_dir(self): + with open(self.cli_config_path, "w") as file: + file.write("library: beets.db\n") + file.write("statefile: state") + + self.run_command("--config", self.cli_config_path, "test", lib=None) + assert config["library"].as_path() == self.user_config_dir / "beets.db" + assert config["statefile"].as_path() == self.user_config_dir / "state" + + def test_cli_config_paths_resolve_relative_to_beetsdir(self): + os.environ["BEETSDIR"] = str(self.beetsdir) + + with open(self.cli_config_path, "w") as file: + file.write("library: beets.db\n") + file.write("statefile: state") + + self.run_command("--config", self.cli_config_path, "test", lib=None) + assert config["library"].as_path() == self.beetsdir / "beets.db" + assert config["statefile"].as_path() == self.beetsdir / "state" + + def test_command_line_option_relative_to_working_dir(self): + config.read() + os.chdir(syspath(self.temp_dir)) + self.run_command("--library", "foo.db", "test", lib=None) + assert config["library"].as_path() == Path.cwd() / "foo.db" + + def test_cli_config_file_loads_plugin_commands(self): + with open(self.cli_config_path, "w") as file: + file.write(f"pluginpath: {_common.PLUGINPATH}\n") + file.write("plugins: test") + + self.run_command("--config", self.cli_config_path, "plugin", lib=None) + plugs = plugins.find_plugins() + assert len(plugs) == 1 + assert plugs[0].is_test_plugin + self.unload_plugins() + + def test_beetsdir_config(self): + os.environ["BEETSDIR"] = str(self.beetsdir) + + with open(self.env_config_path, "w") as file: + file.write("anoption: overwrite") + + config.read() + assert config["anoption"].get() == "overwrite" + + def test_beetsdir_points_to_file_error(self): + beetsdir = str(self.temp_dir_path / "beetsfile") + open(beetsdir, "a").close() + os.environ["BEETSDIR"] = beetsdir + with pytest.raises(ConfigError): + self.run_command("test") + + def test_beetsdir_config_does_not_load_default_user_config(self): + os.environ["BEETSDIR"] = str(self.beetsdir) + + with open(self.user_config_path, "w") as file: + file.write("anoption: value") + + config.read() + assert not config["anoption"].exists() + + def test_default_config_paths_resolve_relative_to_beetsdir(self): + os.environ["BEETSDIR"] = str(self.beetsdir) + + config.read() + assert config["library"].as_path() == self.beetsdir / "library.db" + assert config["statefile"].as_path() == self.beetsdir / "state.pickle" + + def test_beetsdir_config_paths_resolve_relative_to_beetsdir(self): + os.environ["BEETSDIR"] = str(self.beetsdir) + + with open(self.env_config_path, "w") as file: + file.write("library: beets.db\n") + file.write("statefile: state") + + config.read() + assert config["library"].as_path() == self.beetsdir / "beets.db" + assert config["statefile"].as_path() == self.beetsdir / "state" + + +class ShowModelChangeTest(IOMixin, unittest.TestCase): + def setUp(self): + super().setUp() + self.a = _common.item() + self.b = _common.item() + self.a.path = self.b.path + + def _show(self, **kwargs): + change = ui.show_model_changes(self.a, self.b, **kwargs) + out = self.io.getoutput() + return change, out + + def test_identical(self): + change, out = self._show() + assert not change + assert out == "" + + def test_string_fixed_field_change(self): + self.b.title = "x" + change, out = self._show() + assert change + assert "title" in out + + def test_int_fixed_field_change(self): + self.b.track = 9 + change, out = self._show() + assert change + assert "track" in out + + def test_floats_close_to_identical(self): + self.a.length = 1.00001 + self.b.length = 1.00005 + change, out = self._show() + assert not change + assert out == "" + + def test_floats_different(self): + self.a.length = 1.00001 + self.b.length = 2.00001 + change, out = self._show() + assert change + assert "length" in out + + def test_both_values_shown(self): + self.a.title = "foo" + self.b.title = "bar" + change, out = self._show() + assert "foo" in out + assert "bar" in out + + +class PathFormatTest(unittest.TestCase): + def test_custom_paths_prepend(self): + default_formats = ui.get_path_formats() + + config["paths"] = {"foo": "bar"} + pf = ui.get_path_formats() + key, tmpl = pf[0] + assert key == "foo" + assert tmpl.original == "bar" + assert pf[1:] == default_formats + + +@_common.slow_test() +class PluginTest(TestPluginTestCase): + def test_plugin_command_from_pluginpath(self): + self.run_command("test", lib=None) + + +class CommonOptionsParserCliTest(BeetsTestCase): + """Test CommonOptionsParser and formatting LibModel formatting on 'list' + command. + """ + + def setUp(self): + super().setUp() + self.item = _common.item() + self.item.path = b"xxx/yyy" + self.lib.add(self.item) + self.lib.add_album([self.item]) + + def test_base(self): + output = self.run_with_output("ls") + assert output == "the artist - the album - the title\n" + + output = self.run_with_output("ls", "-a") + assert output == "the album artist - the album\n" + + def test_path_option(self): + output = self.run_with_output("ls", "-p") + assert output == "xxx/yyy\n" + + output = self.run_with_output("ls", "-a", "-p") + assert output == "xxx\n" + + def test_format_option(self): + output = self.run_with_output("ls", "-f", "$artist") + assert output == "the artist\n" + + output = self.run_with_output("ls", "-a", "-f", "$albumartist") + assert output == "the album artist\n" + + def test_format_option_unicode(self): + output = self.run_with_output("ls", "-f", "caf\xe9") + assert output == "caf\xe9\n" + + def test_root_format_option(self): + output = self.run_with_output( + "--format-item", "$artist", "--format-album", "foo", "ls" + ) + assert output == "the artist\n" + + output = self.run_with_output( + "--format-item", "foo", "--format-album", "$albumartist", "ls", "-a" + ) + assert output == "the album artist\n" + + def test_help(self): + output = self.run_with_output("help") + assert "Usage:" in output + + output = self.run_with_output("help", "list") + assert "Usage:" in output + + with pytest.raises(ui.UserError): + self.run_command("help", "this.is.not.a.real.command") + + def test_stats(self): + output = self.run_with_output("stats") + assert "Approximate total size:" in output + + # # Need to have more realistic library setup for this to work + # output = self.run_with_output('stats', '-e') + # assert 'Total size:' in output + + def test_version(self): + output = self.run_with_output("version") + assert "Python version" in output + assert "no plugins loaded" in output + + # # Need to have plugin loaded + # output = self.run_with_output('version') + # assert 'plugins: ' in output + + +class CommonOptionsParserTest(unittest.TestCase): + def test_album_option(self): + parser = ui.CommonOptionsParser() + assert not parser._album_flags + parser.add_album_option() + assert bool(parser._album_flags) + + assert parser.parse_args([]) == ({"album": None}, []) + assert parser.parse_args(["-a"]) == ({"album": True}, []) + assert parser.parse_args(["--album"]) == ({"album": True}, []) + + def test_path_option(self): + parser = ui.CommonOptionsParser() + parser.add_path_option() + assert not parser._album_flags + + config["format_item"].set("$foo") + assert parser.parse_args([]) == ({"path": None}, []) + assert config["format_item"].as_str() == "$foo" + + assert parser.parse_args(["-p"]) == ( + {"path": True, "format": "$path"}, + [], + ) + assert parser.parse_args(["--path"]) == ( + {"path": True, "format": "$path"}, + [], + ) + + assert config["format_item"].as_str() == "$path" + assert config["format_album"].as_str() == "$path" + + def test_format_option(self): + parser = ui.CommonOptionsParser() + parser.add_format_option() + assert not parser._album_flags + + config["format_item"].set("$foo") + assert parser.parse_args([]) == ({"format": None}, []) + assert config["format_item"].as_str() == "$foo" + + assert parser.parse_args(["-f", "$bar"]) == ({"format": "$bar"}, []) + assert parser.parse_args(["--format", "$baz"]) == ( + {"format": "$baz"}, + [], + ) + + assert config["format_item"].as_str() == "$baz" + assert config["format_album"].as_str() == "$baz" + + def test_format_option_with_target(self): + with pytest.raises(KeyError): + ui.CommonOptionsParser().add_format_option(target="thingy") + + parser = ui.CommonOptionsParser() + parser.add_format_option(target="item") + + config["format_item"].set("$item") + config["format_album"].set("$album") + + assert parser.parse_args(["-f", "$bar"]) == ({"format": "$bar"}, []) + + assert config["format_item"].as_str() == "$bar" + assert config["format_album"].as_str() == "$album" + + def test_format_option_with_album(self): + parser = ui.CommonOptionsParser() + parser.add_album_option() + parser.add_format_option() + + config["format_item"].set("$item") + config["format_album"].set("$album") + + parser.parse_args(["-f", "$bar"]) + assert config["format_item"].as_str() == "$bar" + assert config["format_album"].as_str() == "$album" + + parser.parse_args(["-a", "-f", "$foo"]) + assert config["format_item"].as_str() == "$bar" + assert config["format_album"].as_str() == "$foo" + + parser.parse_args(["-f", "$foo2", "-a"]) + assert config["format_album"].as_str() == "$foo2" + + def test_add_all_common_options(self): + parser = ui.CommonOptionsParser() + parser.add_all_common_options() + assert parser.parse_args([]) == ( + {"album": None, "path": None, "format": None}, + [], + ) + + +class EncodingTest(unittest.TestCase): + """Tests for the `terminal_encoding` config option and our + `_in_encoding` and `_out_encoding` utility functions. + """ + + def out_encoding_overridden(self): + config["terminal_encoding"] = "fake_encoding" + assert ui._out_encoding() == "fake_encoding" + + def in_encoding_overridden(self): + config["terminal_encoding"] = "fake_encoding" + assert ui._in_encoding() == "fake_encoding" + + def out_encoding_default_utf8(self): + with patch("sys.stdout") as stdout: + stdout.encoding = None + assert ui._out_encoding() == "utf-8" + + def in_encoding_default_utf8(self): + with patch("sys.stdin") as stdin: + stdin.encoding = None + assert ui._in_encoding() == "utf-8" diff --git a/test/test_ui_importer.py b/test/ui/test_ui_importer.py similarity index 100% rename from test/test_ui_importer.py rename to test/ui/test_ui_importer.py diff --git a/test/test_ui_init.py b/test/ui/test_ui_init.py similarity index 75% rename from test/test_ui_init.py rename to test/ui/test_ui_init.py index a6f06c494..f6c9fe245 100644 --- a/test/test_ui_init.py +++ b/test/ui/test_ui_init.py @@ -16,19 +16,16 @@ import os import shutil +import unittest from copy import deepcopy from random import random from beets import config, ui from beets.test import _common -from beets.test.helper import BeetsTestCase, ItemInDBTestCase, control_stdin +from beets.test.helper import BeetsTestCase, IOMixin, control_stdin -class InputMethodsTest(BeetsTestCase): - def setUp(self): - super().setUp() - self.io.install() - +class InputMethodsTest(IOMixin, unittest.TestCase): def _print_helper(self, s): print(s) @@ -88,42 +85,6 @@ class InputMethodsTest(BeetsTestCase): assert items == ["1", "3"] -class InitTest(ItemInDBTestCase): - def test_human_bytes(self): - tests = [ - (0, "0.0 B"), - (30, "30.0 B"), - (pow(2, 10), "1.0 KiB"), - (pow(2, 20), "1.0 MiB"), - (pow(2, 30), "1.0 GiB"), - (pow(2, 40), "1.0 TiB"), - (pow(2, 50), "1.0 PiB"), - (pow(2, 60), "1.0 EiB"), - (pow(2, 70), "1.0 ZiB"), - (pow(2, 80), "1.0 YiB"), - (pow(2, 90), "1.0 HiB"), - (pow(2, 100), "big"), - ] - for i, h in tests: - assert h == ui.human_bytes(i) - - def test_human_seconds(self): - tests = [ - (0, "0.0 seconds"), - (30, "30.0 seconds"), - (60, "1.0 minutes"), - (90, "1.5 minutes"), - (125, "2.1 minutes"), - (3600, "1.0 hours"), - (86400, "1.0 days"), - (604800, "1.0 weeks"), - (31449600, "1.0 years"), - (314496000, "1.0 decades"), - ] - for i, h in tests: - assert h == ui.human_seconds(i) - - class ParentalDirCreation(BeetsTestCase): def test_create_yes(self): non_exist_path = _common.os.fsdecode( diff --git a/test/util/test_config.py b/test/util/test_config.py new file mode 100644 index 000000000..7105844dd --- /dev/null +++ b/test/util/test_config.py @@ -0,0 +1,38 @@ +import pytest + +from beets.util.config import sanitize_choices, sanitize_pairs + + +@pytest.mark.parametrize( + "input_choices, valid_choices, expected", + [ + (["A", "Z"], ("A", "B"), ["A"]), + (["A", "A"], ("A"), ["A"]), + (["D", "*", "A"], ("A", "B", "C", "D"), ["D", "B", "C", "A"]), + ], +) +def test_sanitize_choices(input_choices, valid_choices, expected): + assert sanitize_choices(input_choices, valid_choices) == expected + + +def test_sanitize_pairs(): + assert sanitize_pairs( + [ + ("foo", "baz bar"), + ("foo", "baz bar"), + ("key", "*"), + ("*", "*"), + ("discard", "bye"), + ], + [ + ("foo", "bar"), + ("foo", "baz"), + ("foo", "foobar"), + ("key", "value"), + ], + ) == [ + ("foo", "baz"), + ("foo", "bar"), + ("key", "value"), + ("foo", "foobar"), + ] diff --git a/test/util/test_id_extractors.py b/test/util/test_id_extractors.py new file mode 100644 index 000000000..4918b4361 --- /dev/null +++ b/test/util/test_id_extractors.py @@ -0,0 +1,61 @@ +from typing import NamedTuple + +import pytest + +from beets.util.id_extractors import extract_release_id + + +@pytest.mark.parametrize( + "source, id_string, expected", + [ + ("spotify", "39WqpoPgZxygo6YQjehLJJ", "39WqpoPgZxygo6YQjehLJJ"), + ("spotify", "blah blah", None), + ("spotify", "https://open.spotify.com/album/39WqpoPgZxygo6YQjehLJJ", "39WqpoPgZxygo6YQjehLJJ"), # noqa: E501 + ("deezer", "176356382", "176356382"), + ("deezer", "blah blah", None), + ("deezer", "https://www.deezer.com/album/176356382", "176356382"), + ("beatport", "3089651", "3089651"), + ("beatport", "blah blah", None), + ("beatport", "https://www.beatport.com/release/album-name/3089651", "3089651"), # noqa: E501 + ("discogs", "http://www.discogs.com/G%C3%BCnther-Lause-Meru-Ep/release/4354798", "4354798"), # noqa: E501 + ("discogs", "http://www.discogs.com/release/4354798-G%C3%BCnther-Lause-Meru-Ep", "4354798"), # noqa: E501 + ("discogs", "http://www.discogs.com/G%C3%BCnther-4354798Lause-Meru-Ep/release/4354798", "4354798"), # noqa: E501 + ("discogs", "http://www.discogs.com/release/4354798-G%C3%BCnther-4354798Lause-Meru-Ep/", "4354798"), # noqa: E501 + ("discogs", "[r4354798]", "4354798"), + ("discogs", "r4354798", "4354798"), + ("discogs", "4354798", "4354798"), + ("discogs", "yet-another-metadata-provider.org/foo/12345", None), + ("discogs", "005b84a0-ecd6-39f1-b2f6-6eb48756b268", None), + ("musicbrainz", "28e32c71-1450-463e-92bf-e0a46446fc11", "28e32c71-1450-463e-92bf-e0a46446fc11"), # noqa: E501 + ("musicbrainz", "blah blah", None), + ("musicbrainz", "https://musicbrainz.org/entity/28e32c71-1450-463e-92bf-e0a46446fc11", "28e32c71-1450-463e-92bf-e0a46446fc11"), # noqa: E501 + ("bandcamp", "https://nameofartist.bandcamp.com/album/nameofalbum", "https://nameofartist.bandcamp.com/album/nameofalbum"), # noqa: E501 + ], +) # fmt: skip +def test_extract_release_id(source, id_string, expected): + assert extract_release_id(source, id_string) == expected + + +class SourceWithURL(NamedTuple): + source: str + url: str + + +source_with_urls = [ + SourceWithURL("spotify", "https://open.spotify.com/album/39WqpoPgZxygo6YQjehLJJ"), + SourceWithURL("deezer", "https://www.deezer.com/album/176356382"), + SourceWithURL("beatport", "https://www.beatport.com/release/album-name/3089651"), + SourceWithURL("discogs", "http://www.discogs.com/G%C3%BCnther-Lause-Meru-Ep/release/4354798"), + SourceWithURL("musicbrainz", "https://musicbrainz.org/entity/28e32c71-1450-463e-92bf-e0a46446fc11"), +] # fmt: skip + + +@pytest.mark.parametrize("source", [s.source for s in source_with_urls]) +@pytest.mark.parametrize("source_with_url", source_with_urls) +def test_match_source_url(source, source_with_url): + if source == source_with_url.source: + assert extract_release_id(source, source_with_url.url) + else: + assert not extract_release_id(source, source_with_url.url), ( + f"Source {source} pattern should not match {source_with_url.source} URL" + ) diff --git a/test/util/test_units.py b/test/util/test_units.py new file mode 100644 index 000000000..26f4d3eca --- /dev/null +++ b/test/util/test_units.py @@ -0,0 +1,43 @@ +import pytest + +from beets.util.units import human_bytes, human_seconds + + +@pytest.mark.parametrize( + "input_bytes,expected", + [ + (0, "0.0 B"), + (30, "30.0 B"), + (pow(2, 10), "1.0 KiB"), + (pow(2, 20), "1.0 MiB"), + (pow(2, 30), "1.0 GiB"), + (pow(2, 40), "1.0 TiB"), + (pow(2, 50), "1.0 PiB"), + (pow(2, 60), "1.0 EiB"), + (pow(2, 70), "1.0 ZiB"), + (pow(2, 80), "1.0 YiB"), + (pow(2, 90), "1.0 HiB"), + (pow(2, 100), "big"), + ], +) +def test_human_bytes(input_bytes, expected): + assert human_bytes(input_bytes) == expected + + +@pytest.mark.parametrize( + "input_seconds,expected", + [ + (0, "0.0 seconds"), + (30, "30.0 seconds"), + (60, "1.0 minutes"), + (90, "1.5 minutes"), + (125, "2.1 minutes"), + (3600, "1.0 hours"), + (86400, "1.0 days"), + (604800, "1.0 weeks"), + (31449600, "1.0 years"), + (314496000, "1.0 decades"), + ], +) +def test_human_seconds(input_seconds, expected): + assert human_seconds(input_seconds) == expected