Remove support for Python 3.8 (#5508)

- Drop support for EOL Python 3.8 making Python 3.9 the minimum
supported version

- Take advantage of Python 3.9+ type hint syntax by:
  - Using `list[T]` instead of `List[T]` etc. from typing module
  - Using `Type | None` syntax for unions instead of `Union[Type, None]`
  - Moving collection type hints from `typing` to `collections.abc`
  - Using `TYPE_CHECKING` guard for runtime import optimization

Note: in #5503 we found that we cannot support Python 3.12 unless we
upgrade our minimum support Python to 3.9.
This commit is contained in:
Šarūnas Nejus 2024-12-10 06:54:23 +00:00 committed by GitHub
commit 22163d70a7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
31 changed files with 998 additions and 1021 deletions

View file

@ -14,10 +14,10 @@ jobs:
fail-fast: false
matrix:
platform: [ubuntu-latest, windows-latest]
python-version: ["3.8", "3.9"]
python-version: ["3.9"]
runs-on: ${{ matrix.platform }}
env:
IS_MAIN_PYTHON: ${{ matrix.python-version == '3.8' && matrix.platform == 'ubuntu-latest' }}
IS_MAIN_PYTHON: ${{ matrix.python-version == '3.9' && matrix.platform == 'ubuntu-latest' }}
steps:
- uses: actions/checkout@v4
- name: Install Python tools

View file

@ -7,7 +7,7 @@ on:
- master
env:
PYTHON_VERSION: 3.8
PYTHON_VERSION: 3.9
jobs:
changed-files:
@ -131,7 +131,7 @@ jobs:
run: echo "::add-matcher::.github/sphinx-problem-matcher.json"
- name: Build docs
run: |
run: |-
poe docs |& tee /tmp/output
# fail the job if there are issues
grep -q " WARNING:" /tmp/output && exit 1 || exit 0

View file

@ -3,6 +3,6 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.8.0
rev: v0.8.1
hooks:
- id: ruff-format

View file

@ -118,10 +118,10 @@ command. Instead, you can activate the virtual environment in your shell with::
$ poetry shell
You should see ``(beets-py38)`` prefix in your shell prompt. Now you can run
You should see ``(beets-py3.9)`` prefix in your shell prompt. Now you can run
commands directly, for example::
$ (beets-py38) pytest
$ (beets-py3.9) pytest
Additionally, `poethepoet`_ task runner assists us with the most common
operations. Formatting, linting, testing are defined as ``poe`` tasks in

View file

@ -14,7 +14,8 @@
"""Facilities for automatically determining files' correct metadata."""
from typing import Mapping, Sequence, Union
from collections.abc import Mapping, Sequence
from typing import Union
from beets import config, logging
from beets.library import Album, Item

View file

@ -18,36 +18,27 @@ from __future__ import annotations
import re
from functools import total_ordering
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
NamedTuple,
Optional,
Tuple,
TypeVar,
Union,
cast,
)
from typing import TYPE_CHECKING, Any, Callable, NamedTuple, TypeVar, cast
from jellyfish import levenshtein_distance
from unidecode import unidecode
from beets import config, logging, plugins
from beets.autotag import mb
from beets.library import Item
from beets.util import as_string, cached_classproperty
if TYPE_CHECKING:
from collections.abc import Iterable, Iterator
from beets.library import Item
log = logging.getLogger("beets")
V = TypeVar("V")
# Classes used to represent candidate options.
class AttrDict(Dict[str, V]):
class AttrDict(dict[str, V]):
"""A dictionary that supports attribute ("dot") access, so `d.field`
is equivalent to `d['field']`.
"""
@ -82,47 +73,47 @@ class AlbumInfo(AttrDict):
# TYPING: are all of these correct? I've assumed optional strings
def __init__(
self,
tracks: List[TrackInfo],
album: Optional[str] = None,
album_id: Optional[str] = None,
artist: Optional[str] = None,
artist_id: Optional[str] = None,
artists: Optional[List[str]] = None,
artists_ids: Optional[List[str]] = None,
asin: Optional[str] = None,
albumtype: Optional[str] = None,
albumtypes: Optional[List[str]] = None,
tracks: list[TrackInfo],
album: str | None = None,
album_id: str | None = None,
artist: str | None = None,
artist_id: str | None = None,
artists: list[str] | None = None,
artists_ids: list[str] | None = None,
asin: str | None = None,
albumtype: str | None = None,
albumtypes: list[str] | None = None,
va: bool = False,
year: Optional[int] = None,
month: Optional[int] = None,
day: Optional[int] = None,
label: Optional[str] = None,
barcode: Optional[str] = None,
mediums: Optional[int] = None,
artist_sort: Optional[str] = None,
artists_sort: Optional[List[str]] = None,
releasegroup_id: Optional[str] = None,
release_group_title: Optional[str] = None,
catalognum: Optional[str] = None,
script: Optional[str] = None,
language: Optional[str] = None,
country: Optional[str] = None,
style: Optional[str] = None,
genre: Optional[str] = None,
albumstatus: Optional[str] = None,
media: Optional[str] = None,
albumdisambig: Optional[str] = None,
releasegroupdisambig: Optional[str] = None,
artist_credit: Optional[str] = None,
artists_credit: Optional[List[str]] = None,
original_year: Optional[int] = None,
original_month: Optional[int] = None,
original_day: Optional[int] = None,
data_source: Optional[str] = None,
data_url: Optional[str] = None,
discogs_albumid: Optional[str] = None,
discogs_labelid: Optional[str] = None,
discogs_artistid: Optional[str] = None,
year: int | None = None,
month: int | None = None,
day: int | None = None,
label: str | None = None,
barcode: str | None = None,
mediums: int | None = None,
artist_sort: str | None = None,
artists_sort: list[str] | None = None,
releasegroup_id: str | None = None,
release_group_title: str | None = None,
catalognum: str | None = None,
script: str | None = None,
language: str | None = None,
country: str | None = None,
style: str | None = None,
genre: str | None = None,
albumstatus: str | None = None,
media: str | None = None,
albumdisambig: str | None = None,
releasegroupdisambig: str | None = None,
artist_credit: str | None = None,
artists_credit: list[str] | None = None,
original_year: int | None = None,
original_month: int | None = None,
original_day: int | None = None,
data_source: str | None = None,
data_url: str | None = None,
discogs_albumid: str | None = None,
discogs_labelid: str | None = None,
discogs_artistid: str | None = None,
**kwargs,
):
self.album = album
@ -190,38 +181,38 @@ class TrackInfo(AttrDict):
# TYPING: are all of these correct? I've assumed optional strings
def __init__(
self,
title: Optional[str] = None,
track_id: Optional[str] = None,
release_track_id: Optional[str] = None,
artist: Optional[str] = None,
artist_id: Optional[str] = None,
artists: Optional[List[str]] = None,
artists_ids: Optional[List[str]] = None,
length: Optional[float] = None,
index: Optional[int] = None,
medium: Optional[int] = None,
medium_index: Optional[int] = None,
medium_total: Optional[int] = None,
artist_sort: Optional[str] = None,
artists_sort: Optional[List[str]] = None,
disctitle: Optional[str] = None,
artist_credit: Optional[str] = None,
artists_credit: Optional[List[str]] = None,
data_source: Optional[str] = None,
data_url: Optional[str] = None,
media: Optional[str] = None,
lyricist: Optional[str] = None,
composer: Optional[str] = None,
composer_sort: Optional[str] = None,
arranger: Optional[str] = None,
track_alt: Optional[str] = None,
work: Optional[str] = None,
mb_workid: Optional[str] = None,
work_disambig: Optional[str] = None,
bpm: Optional[str] = None,
initial_key: Optional[str] = None,
genre: Optional[str] = None,
album: Optional[str] = None,
title: str | None = None,
track_id: str | None = None,
release_track_id: str | None = None,
artist: str | None = None,
artist_id: str | None = None,
artists: list[str] | None = None,
artists_ids: list[str] | None = None,
length: float | None = None,
index: int | None = None,
medium: int | None = None,
medium_index: int | None = None,
medium_total: int | None = None,
artist_sort: str | None = None,
artists_sort: list[str] | None = None,
disctitle: str | None = None,
artist_credit: str | None = None,
artists_credit: list[str] | None = None,
data_source: str | None = None,
data_url: str | None = None,
media: str | None = None,
lyricist: str | None = None,
composer: str | None = None,
composer_sort: str | None = None,
arranger: str | None = None,
track_alt: str | None = None,
work: str | None = None,
mb_workid: str | None = None,
work_disambig: str | None = None,
bpm: str | None = None,
initial_key: str | None = None,
genre: str | None = None,
album: str | None = None,
**kwargs,
):
self.title = title
@ -301,7 +292,7 @@ def _string_dist_basic(str1: str, str2: str) -> float:
return levenshtein_distance(str1, str2) / float(max(len(str1), len(str2)))
def string_dist(str1: Optional[str], str2: Optional[str]) -> float:
def string_dist(str1: str | None, str2: str | None) -> float:
"""Gives an "intuitive" edit distance between two strings. This is
an edit distance, normalized by the string length, with a number of
tweaks that reflect intuition about text.
@ -368,10 +359,10 @@ class Distance:
def __init__(self):
self._penalties = {}
self.tracks: Dict[TrackInfo, Distance] = {}
self.tracks: dict[TrackInfo, Distance] = {}
@cached_classproperty
def _weights(cls) -> Dict[str, float]:
def _weights(cls) -> dict[str, float]:
"""A dictionary from keys to floating-point weights."""
weights_view = config["match"]["distance_weights"]
weights = {}
@ -407,7 +398,7 @@ class Distance:
dist_raw += sum(penalty) * self._weights[key]
return dist_raw
def items(self) -> List[Tuple[str, float]]:
def items(self) -> list[tuple[str, float]]:
"""Return a list of (key, dist) pairs, with `dist` being the
weighted distance, sorted from highest to lowest. Does not
include penalties with a zero value.
@ -457,16 +448,16 @@ class Distance:
return dist / dist_max
return 0.0
def __iter__(self) -> Iterator[Tuple[str, float]]:
def __iter__(self) -> Iterator[tuple[str, float]]:
return iter(self.items())
def __len__(self) -> int:
return len(self.items())
def keys(self) -> List[str]:
def keys(self) -> list[str]:
return [key for key, _ in self.items()]
def update(self, dist: "Distance"):
def update(self, dist: Distance):
"""Adds all the distance penalties from `dist`."""
if not isinstance(dist, Distance):
raise ValueError(
@ -477,7 +468,7 @@ class Distance:
# Adding components.
def _eq(self, value1: Union[re.Pattern[str], Any], value2: Any) -> bool:
def _eq(self, value1: re.Pattern[str] | Any, value2: Any) -> bool:
"""Returns True if `value1` is equal to `value2`. `value1` may
be a compiled regular expression, in which case it will be
matched against `value2`.
@ -501,7 +492,7 @@ class Distance:
self,
key: str,
value: Any,
options: Union[List[Any], Tuple[Any, ...], Any],
options: list[Any] | tuple[Any, ...] | Any,
):
"""Adds a distance penalty of 1.0 if `value` doesn't match any
of the values in `options`. If an option is a compiled regular
@ -544,7 +535,7 @@ class Distance:
self,
key: str,
value: Any,
options: Union[List[Any], Tuple[Any, ...], Any],
options: list[Any] | tuple[Any, ...] | Any,
):
"""Adds a distance penalty that corresponds to the position at
which `value` appears in `options`. A distance penalty of 0.0
@ -566,8 +557,8 @@ class Distance:
def add_ratio(
self,
key: str,
number1: Union[int, float],
number2: Union[int, float],
number1: int | float,
number2: int | float,
):
"""Adds a distance penalty for `number1` as a ratio of `number2`.
`number1` is bound at 0 and `number2`.
@ -579,7 +570,7 @@ class Distance:
dist = 0.0
self.add(key, dist)
def add_string(self, key: str, str1: Optional[str], str2: Optional[str]):
def add_string(self, key: str, str1: str | None, str2: str | None):
"""Adds a distance penalty based on the edit distance between
`str1` and `str2`.
"""
@ -593,9 +584,9 @@ class Distance:
class AlbumMatch(NamedTuple):
distance: Distance
info: AlbumInfo
mapping: Dict[Item, TrackInfo]
extra_items: List[Item]
extra_tracks: List[TrackInfo]
mapping: dict[Item, TrackInfo]
extra_items: list[Item]
extra_tracks: list[TrackInfo]
class TrackMatch(NamedTuple):
@ -606,7 +597,7 @@ class TrackMatch(NamedTuple):
# Aggregation of sources.
def album_for_mbid(release_id: str) -> Optional[AlbumInfo]:
def album_for_mbid(release_id: str) -> AlbumInfo | None:
"""Get an AlbumInfo object for a MusicBrainz release ID. Return None
if the ID is not found.
"""
@ -620,7 +611,7 @@ def album_for_mbid(release_id: str) -> Optional[AlbumInfo]:
return None
def track_for_mbid(recording_id: str) -> Optional[TrackInfo]:
def track_for_mbid(recording_id: str) -> TrackInfo | None:
"""Get a TrackInfo object for a MusicBrainz recording ID. Return None
if the ID is not found.
"""
@ -666,12 +657,12 @@ def invoke_mb(call_func: Callable, *args):
@plugins.notify_info_yielded("albuminfo_received")
def album_candidates(
items: List[Item],
items: list[Item],
artist: str,
album: str,
va_likely: bool,
extra_tags: Dict,
) -> Iterable[Tuple]:
extra_tags: dict,
) -> Iterable[tuple]:
"""Search for album matches. ``items`` is a list of Item objects
that make up the album. ``artist`` and ``album`` are the respective
names (strings), which may be derived from the item list or may be
@ -699,7 +690,7 @@ def album_candidates(
@plugins.notify_info_yielded("trackinfo_received")
def item_candidates(item: Item, artist: str, title: str) -> Iterable[Tuple]:
def item_candidates(item: Item, artist: str, title: str) -> Iterable[tuple]:
"""Search for item matches. ``item`` is the Item to be matched.
``artist`` and ``title`` are strings and either reflect the item or
are specified by the user.

View file

@ -20,20 +20,9 @@ from __future__ import annotations
import datetime
import re
from collections.abc import Iterable, Sequence
from enum import IntEnum
from typing import (
Any,
Dict,
Iterable,
List,
NamedTuple,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
)
from typing import TYPE_CHECKING, Any, NamedTuple, TypeVar, Union, cast
from munkres import Munkres
@ -46,9 +35,11 @@ from beets.autotag import (
TrackMatch,
hooks,
)
from beets.library import Item
from beets.util import plurality
if TYPE_CHECKING:
from beets.library import Item
# Artist signals that indicate "various artists". These are used at the
# album level to determine whether a given release is likely a VA
# release and also on the track level to to remove the penalty for
@ -88,7 +79,7 @@ class Proposal(NamedTuple):
def current_metadata(
items: Iterable[Item],
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
) -> tuple[dict[str, Any], dict[str, Any]]:
"""Extract the likely current metadata for an album given a list of its
items. Return two dictionaries:
- The most common value for each field.
@ -127,7 +118,7 @@ def current_metadata(
def assign_items(
items: Sequence[Item],
tracks: Sequence[TrackInfo],
) -> Tuple[Dict[Item, TrackInfo], List[Item], List[TrackInfo]]:
) -> tuple[dict[Item, TrackInfo], list[Item], list[TrackInfo]]:
"""Given a list of Items and a list of TrackInfo objects, find the
best mapping between them. Returns a mapping from Items to TrackInfo
objects, a set of extra Items, and a set of extra TrackInfo
@ -135,7 +126,7 @@ def assign_items(
of objects of the two types.
"""
# Construct the cost matrix.
costs: List[List[Distance]] = []
costs: list[list[Distance]] = []
for item in items:
row = []
for track in tracks:
@ -221,7 +212,7 @@ def track_distance(
def distance(
items: Sequence[Item],
album_info: AlbumInfo,
mapping: Dict[Item, TrackInfo],
mapping: dict[Item, TrackInfo],
) -> Distance:
"""Determines how "significant" an album metadata change would be.
Returns a Distance object. `album_info` is an AlbumInfo object
@ -425,7 +416,7 @@ def _sort_candidates(candidates: Iterable[AnyMatch]) -> Sequence[AnyMatch]:
def _add_candidate(
items: Sequence[Item],
results: Dict[Any, AlbumMatch],
results: dict[Any, AlbumMatch],
info: AlbumInfo,
):
"""Given a candidate AlbumInfo object, attempt to add the candidate
@ -477,10 +468,10 @@ def _add_candidate(
def tag_album(
items,
search_artist: Optional[str] = None,
search_album: Optional[str] = None,
search_ids: List[str] = [],
) -> Tuple[str, str, Proposal]:
search_artist: str | None = None,
search_album: str | None = None,
search_ids: list[str] = [],
) -> tuple[str, str, Proposal]:
"""Return a tuple of the current artist name, the current album
name, and a `Proposal` containing `AlbumMatch` candidates.
@ -505,7 +496,7 @@ def tag_album(
log.debug("Tagging {0} - {1}", cur_artist, cur_album)
# The output result, keys are the MB album ID.
candidates: Dict[Any, AlbumMatch] = {}
candidates: dict[Any, AlbumMatch] = {}
# Search by explicit ID.
if search_ids:
@ -569,9 +560,9 @@ def tag_album(
def tag_item(
item,
search_artist: Optional[str] = None,
search_title: Optional[str] = None,
search_ids: Optional[List[str]] = None,
search_artist: str | None = None,
search_title: str | None = None,
search_ids: list[str] | None = None,
) -> Proposal:
"""Find metadata for a single track. Return a `Proposal` consisting
of `TrackMatch` objects.
@ -584,7 +575,7 @@ def tag_item(
# Holds candidates found so far: keys are MBIDs; values are
# (distance, TrackInfo) pairs.
candidates = {}
rec: Optional[Recommendation] = None
rec: Recommendation | None = None
# First, try matching by MusicBrainz ID.
trackids = search_ids or [t for t in [item.mb_trackid] if t]

View file

@ -19,8 +19,9 @@ from __future__ import annotations
import re
import traceback
from collections import Counter
from collections.abc import Iterator, Sequence
from itertools import product
from typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple, cast
from typing import Any, cast
from urllib.parse import urljoin
import musicbrainzngs
@ -131,7 +132,7 @@ def configure():
)
def _preferred_alias(aliases: List):
def _preferred_alias(aliases: list):
"""Given an list of alias structures for an artist credit, select
and return the user's preferred alias alias or None if no matching
alias is found.
@ -166,7 +167,7 @@ def _preferred_alias(aliases: List):
return matches[0]
def _preferred_release_event(release: Dict[str, Any]) -> Tuple[str, str]:
def _preferred_release_event(release: dict[str, Any]) -> tuple[str, str]:
"""Given a release, select and return the user's preferred release
event as a tuple of (country, release_date). Fall back to the
default release event if a preferred event is not found.
@ -186,8 +187,8 @@ def _preferred_release_event(release: Dict[str, Any]) -> Tuple[str, str]:
def _multi_artist_credit(
credit: List[Dict], include_join_phrase: bool
) -> Tuple[List[str], List[str], List[str]]:
credit: list[dict], include_join_phrase: bool
) -> tuple[list[str], list[str], list[str]]:
"""Given a list representing an ``artist-credit`` block, accumulate
data into a triple of joined artist name lists: canonical, sort, and
credit.
@ -234,7 +235,7 @@ def _multi_artist_credit(
)
def _flatten_artist_credit(credit: List[Dict]) -> Tuple[str, str, str]:
def _flatten_artist_credit(credit: list[dict]) -> tuple[str, str, str]:
"""Given a list representing an ``artist-credit`` block, flatten the
data into a triple of joined artist name strings: canonical, sort, and
credit.
@ -249,12 +250,12 @@ def _flatten_artist_credit(credit: List[Dict]) -> Tuple[str, str, str]:
)
def _artist_ids(credit: List[Dict]) -> List[str]:
def _artist_ids(credit: list[dict]) -> list[str]:
"""
Given a list representing an ``artist-credit``,
return a list of artist IDs
"""
artist_ids: List[str] = []
artist_ids: list[str] = []
for el in credit:
if isinstance(el, dict):
artist_ids.append(el["artist"]["id"])
@ -276,11 +277,11 @@ def _get_related_artist_names(relations, relation_type):
def track_info(
recording: Dict,
index: Optional[int] = None,
medium: Optional[int] = None,
medium_index: Optional[int] = None,
medium_total: Optional[int] = None,
recording: dict,
index: int | None = None,
medium: int | None = None,
medium_index: int | None = None,
medium_total: int | None = None,
) -> beets.autotag.hooks.TrackInfo:
"""Translates a MusicBrainz recording result dictionary into a beets
``TrackInfo`` object. Three parameters are optional and are used
@ -400,7 +401,7 @@ def _set_date_str(
setattr(info, key, date_num)
def album_info(release: Dict) -> beets.autotag.hooks.AlbumInfo:
def album_info(release: dict) -> beets.autotag.hooks.AlbumInfo:
"""Takes a MusicBrainz release result dictionary and returns a beets
AlbumInfo object containing the interesting data about that release.
"""
@ -661,8 +662,8 @@ def album_info(release: Dict) -> beets.autotag.hooks.AlbumInfo:
def match_album(
artist: str,
album: str,
tracks: Optional[int] = None,
extra_tags: Optional[Dict[str, Any]] = None,
tracks: int | None = None,
extra_tags: dict[str, Any] | None = None,
) -> Iterator[beets.autotag.hooks.AlbumInfo]:
"""Searches for a single album ("release" in MusicBrainz parlance)
and returns an iterator over AlbumInfo objects. May raise a
@ -739,7 +740,7 @@ def match_track(
yield track_info(recording)
def _parse_id(s: str) -> Optional[str]:
def _parse_id(s: str) -> str | None:
"""Search for a MusicBrainz ID in the given string and return it. If
no ID can be found, return None.
"""
@ -756,8 +757,8 @@ def _is_translation(r):
def _find_actual_release_from_pseudo_release(
pseudo_rel: Dict,
) -> Optional[Dict]:
pseudo_rel: dict,
) -> dict | None:
try:
relations = pseudo_rel["release"]["release-relation-list"]
except KeyError:
@ -776,7 +777,7 @@ def _find_actual_release_from_pseudo_release(
def _merge_pseudo_and_actual_album(
pseudo: beets.autotag.hooks.AlbumInfo, actual: beets.autotag.hooks.AlbumInfo
) -> Optional[beets.autotag.hooks.AlbumInfo]:
) -> beets.autotag.hooks.AlbumInfo | None:
"""
Merges a pseudo release with its actual release.
@ -814,7 +815,7 @@ def _merge_pseudo_and_actual_album(
return merged
def album_for_id(releaseid: str) -> Optional[beets.autotag.hooks.AlbumInfo]:
def album_for_id(releaseid: str) -> beets.autotag.hooks.AlbumInfo | None:
"""Fetches an album by its MusicBrainz ID and returns an AlbumInfo
object or None if the album is not found. May raise a
MusicBrainzAPIError.
@ -852,7 +853,7 @@ def album_for_id(releaseid: str) -> Optional[beets.autotag.hooks.AlbumInfo]:
return release
def track_for_id(releaseid: str) -> Optional[beets.autotag.hooks.TrackInfo]:
def track_for_id(releaseid: str) -> beets.autotag.hooks.TrackInfo | None:
"""Fetches a track by its MusicBrainz ID. Returns a TrackInfo object
or None if no track is found. May raise a MusicBrainzAPIError.
"""

View file

@ -24,28 +24,9 @@ import threading
import time
from abc import ABC
from collections import defaultdict
from collections.abc import Generator, Iterable, Iterator, Mapping, Sequence
from sqlite3 import Connection
from types import TracebackType
from typing import (
Any,
AnyStr,
Callable,
DefaultDict,
Dict,
Generator,
Generic,
Iterable,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from typing import TYPE_CHECKING, Any, AnyStr, Callable, Generic, TypeVar, cast
from unidecode import unidecode
@ -63,6 +44,9 @@ from .query import (
TrueQuery,
)
if TYPE_CHECKING:
from types import TracebackType
class DBAccessError(Exception):
"""The SQLite database became inaccessible.
@ -120,7 +104,7 @@ class FormattedMapping(Mapping[str, str]):
def get( # type: ignore
self,
key: str,
default: Optional[str] = None,
default: str | None = None,
) -> str:
"""Similar to Mapping.get(key, default), but always formats to str."""
if default is None:
@ -158,14 +142,14 @@ class FormattedMapping(Mapping[str, str]):
class LazyConvertDict:
"""Lazily convert types for attributes fetched from the database"""
def __init__(self, model_cls: "Model"):
def __init__(self, model_cls: Model):
"""Initialize the object empty"""
# FIXME: Dict[str, SQLiteType]
self._data: Dict[str, Any] = {}
self._data: dict[str, Any] = {}
self.model_cls = model_cls
self._converted: Dict[str, Any] = {}
self._converted: dict[str, Any] = {}
def init(self, data: Dict[str, Any]):
def init(self, data: dict[str, Any]):
"""Set the base data that should be lazily converted"""
self._data = data
@ -195,7 +179,7 @@ class LazyConvertDict:
if key in self._data:
del self._data[key]
def keys(self) -> List[str]:
def keys(self) -> list[str]:
"""Get a list of available field names for this object."""
return list(self._converted.keys()) + list(self._data.keys())
@ -213,14 +197,14 @@ class LazyConvertDict:
for key, value in values.items():
self[key] = value
def items(self) -> Iterable[Tuple[str, Any]]:
def items(self) -> Iterable[tuple[str, Any]]:
"""Iterate over (key, value) pairs that this object contains.
Computed fields are not included.
"""
for key in self:
yield key, self[key]
def get(self, key: str, default: Optional[Any] = None):
def get(self, key: str, default: Any | None = None):
"""Get the value for a given key or `default` if it does not
exist.
"""
@ -286,7 +270,7 @@ class Model(ABC):
"""The flex field SQLite table name.
"""
_fields: Dict[str, types.Type] = {}
_fields: dict[str, types.Type] = {}
"""A mapping indicating available "fixed" fields on this type. The
keys are field names and the values are `Type` objects.
"""
@ -296,16 +280,16 @@ class Model(ABC):
terms.
"""
_types: Dict[str, types.Type] = {}
_types: dict[str, types.Type] = {}
"""Optional Types for non-fixed (i.e., flexible and computed) fields.
"""
_sorts: Dict[str, Type[Sort]] = {}
_sorts: dict[str, type[Sort]] = {}
"""Optional named sort criteria. The keys are strings and the values
are subclasses of `Sort`.
"""
_queries: Dict[str, Type[FieldQuery]] = {}
_queries: dict[str, type[FieldQuery]] = {}
"""Named queries that use a field-like `name:value` syntax but which
do not relate to any specific field.
"""
@ -348,7 +332,7 @@ class Model(ABC):
return cls._relation._fields.keys() - cls.shared_db_fields
@classmethod
def _getters(cls: Type["Model"]):
def _getters(cls: type[Model]):
"""Return a mapping from field names to getter functions."""
# We could cache this if it becomes a performance problem to
# gather the getter mapping every time.
@ -363,7 +347,7 @@ class Model(ABC):
# Basic operation.
def __init__(self, db: Optional[Database] = None, **values):
def __init__(self, db: Database | None = None, **values):
"""Create a new object with an optional Database association and
initial field values.
"""
@ -378,10 +362,10 @@ class Model(ABC):
@classmethod
def _awaken(
cls: Type[AnyModel],
db: Optional[Database] = None,
fixed_values: Dict[str, Any] = {},
flex_values: Dict[str, Any] = {},
cls: type[AnyModel],
db: Database | None = None,
fixed_values: dict[str, Any] = {},
flex_values: dict[str, Any] = {},
) -> AnyModel:
"""Create an object with values drawn from the database.
@ -421,7 +405,7 @@ class Model(ABC):
return self._db
def copy(self) -> "Model":
def copy(self) -> Model:
"""Create a copy of the model object.
The field values and other state is duplicated, but the new copy
@ -537,7 +521,7 @@ class Model(ABC):
for key, value in values.items():
self[key] = value
def items(self) -> Iterator[Tuple[str, Any]]:
def items(self) -> Iterator[tuple[str, Any]]:
"""Iterate over (key, value) pairs that this object contains.
Computed fields are not included.
"""
@ -579,7 +563,7 @@ class Model(ABC):
# Database interaction (CRUD methods).
def store(self, fields: Optional[Iterable[str]] = None):
def store(self, fields: Iterable[str] | None = None):
"""Save the object's metadata into the library database.
:param fields: the fields to be stored. If not specified, all fields
will be.
@ -653,7 +637,7 @@ class Model(ABC):
f"DELETE FROM {self._flex_table} WHERE entity_id=?", (self.id,)
)
def add(self, db: Optional["Database"] = None):
def add(self, db: Database | None = None):
"""Add the object to the library database. This object must be
associated with a database; you can provide one via the `db`
parameter or use the currently associated database.
@ -692,7 +676,7 @@ class Model(ABC):
def evaluate_template(
self,
template: Union[str, functemplate.Template],
template: str | functemplate.Template,
for_path: bool = False,
) -> str:
"""Evaluate a template (a string or a `Template` object) using
@ -730,16 +714,16 @@ class Model(ABC):
cls,
field,
pattern,
query_cls: Type[FieldQuery] = MatchQuery,
query_cls: type[FieldQuery] = MatchQuery,
) -> FieldQuery:
"""Get a `FieldQuery` for this model."""
return query_cls(field, pattern, field in cls._fields)
@classmethod
def all_fields_query(
cls: Type["Model"],
cls: type[Model],
pats: Mapping,
query_cls: Type[FieldQuery] = MatchQuery,
query_cls: type[FieldQuery] = MatchQuery,
):
"""Get a query that matches many fields with different patterns.
@ -764,11 +748,11 @@ class Results(Generic[AnyModel]):
def __init__(
self,
model_class: Type[AnyModel],
rows: List[Mapping],
db: "Database",
model_class: type[AnyModel],
rows: list[Mapping],
db: Database,
flex_rows,
query: Optional[Query] = None,
query: Query | None = None,
sort=None,
):
"""Create a result set that will construct objects of type
@ -800,7 +784,7 @@ class Results(Generic[AnyModel]):
# The materialized objects corresponding to rows that have been
# consumed.
self._objects: List[AnyModel] = []
self._objects: list[AnyModel] = []
def _get_objects(self) -> Iterator[AnyModel]:
"""Construct and generate Model objects for they query. The
@ -852,7 +836,7 @@ class Results(Generic[AnyModel]):
def _get_indexed_flex_attrs(self) -> Mapping:
"""Index flexible attributes by the entity id they belong to"""
flex_values: Dict[int, Dict[str, Any]] = {}
flex_values: dict[int, dict[str, Any]] = {}
for row in self.flex_rows:
if row["entity_id"] not in flex_values:
flex_values[row["entity_id"]] = {}
@ -861,7 +845,7 @@ class Results(Generic[AnyModel]):
return flex_values
def _make_model(self, row, flex_values: Dict = {}) -> AnyModel:
def _make_model(self, row, flex_values: dict = {}) -> AnyModel:
"""Create a Model object for the given row"""
cols = dict(row)
values = {k: v for (k, v) in cols.items() if not k[:4] == "flex"}
@ -912,7 +896,7 @@ class Results(Generic[AnyModel]):
except StopIteration:
raise IndexError(f"result index {n} out of range")
def get(self) -> Optional[AnyModel]:
def get(self) -> AnyModel | None:
"""Return the first matching object, or None if no objects
match.
"""
@ -933,10 +917,10 @@ class Transaction:
current transaction.
"""
def __init__(self, db: "Database"):
def __init__(self, db: Database):
self.db = db
def __enter__(self) -> "Transaction":
def __enter__(self) -> Transaction:
"""Begin a transaction. This transaction may be created while
another is active in a different thread.
"""
@ -951,7 +935,7 @@ class Transaction:
def __exit__(
self,
exc_type: Type[Exception],
exc_type: type[Exception],
exc_value: Exception,
traceback: TracebackType,
):
@ -970,7 +954,7 @@ class Transaction:
self._mutated = False
self.db._db_lock.release()
def query(self, statement: str, subvals: Sequence = ()) -> List:
def query(self, statement: str, subvals: Sequence = ()) -> list:
"""Execute an SQL statement with substitution values and return
a list of rows from the database.
"""
@ -1010,7 +994,7 @@ class Database:
the backend.
"""
_models: Sequence[Type[Model]] = ()
_models: Sequence[type[Model]] = ()
"""The Model subclasses representing tables in this database.
"""
@ -1031,9 +1015,9 @@ class Database:
self.path = path
self.timeout = timeout
self._connections: Dict[int, sqlite3.Connection] = {}
self._tx_stacks: DefaultDict[int, List[Transaction]] = defaultdict(list)
self._extensions: List[str] = []
self._connections: dict[int, sqlite3.Connection] = {}
self._tx_stacks: defaultdict[int, list[Transaction]] = defaultdict(list)
self._extensions: list[str] = []
# A lock to protect the _connections and _tx_stacks maps, which
# both map thread IDs to private resources.
@ -1110,7 +1094,7 @@ class Database:
value = value.decode()
return re.search(pattern, str(value)) is not None
def bytelower(bytestring: Optional[AnyStr]) -> Optional[AnyStr]:
def bytelower(bytestring: AnyStr | None) -> AnyStr | None:
"""A custom ``bytelower`` sqlite function so we can compare
bytestrings in a semi case insensitive fashion.
@ -1138,7 +1122,7 @@ class Database:
conn.close()
@contextlib.contextmanager
def _tx_stack(self) -> Generator[List, None, None]:
def _tx_stack(self) -> Generator[list]:
"""A context manager providing access to the current thread's
transaction stack. The context manager synchronizes access to
the stack map. Transactions should never migrate across threads.
@ -1231,9 +1215,9 @@ class Database:
def _fetch(
self,
model_cls: Type[AnyModel],
query: Optional[Query] = None,
sort: Optional[Sort] = None,
model_cls: type[AnyModel],
query: Query | None = None,
sort: Sort | None = None,
) -> Results[AnyModel]:
"""Fetch the objects of type `model_cls` matching the given
query. The query may be given as a string, string sequence, a
@ -1289,9 +1273,9 @@ class Database:
def _get(
self,
model_cls: Type[AnyModel],
model_cls: type[AnyModel],
id,
) -> Optional[AnyModel]:
) -> AnyModel | None:
"""Get a Model object by its id or None if the id does not
exist.
"""

View file

@ -19,26 +19,12 @@ from __future__ import annotations
import re
import unicodedata
from abc import ABC, abstractmethod
from collections.abc import Collection, Iterator, MutableSequence, Sequence
from datetime import datetime, timedelta
from functools import reduce
from operator import mul, or_
from typing import (
TYPE_CHECKING,
Any,
Collection,
Generic,
Iterator,
List,
MutableSequence,
Optional,
Pattern,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from re import Pattern
from typing import TYPE_CHECKING, Any, Generic, TypeVar, Union
from beets import util
@ -83,11 +69,11 @@ class Query(ABC):
"""An abstract class representing a query into the database."""
@property
def field_names(self) -> Set[str]:
def field_names(self) -> set[str]:
"""Return a set with field names that this query operates on."""
return set()
def clause(self) -> Tuple[Optional[str], Sequence[Any]]:
def clause(self) -> tuple[str | None, Sequence[Any]]:
"""Generate an SQLite expression implementing the query.
Return (clause, subvals) where clause is a valid sqlite
@ -141,7 +127,7 @@ class FieldQuery(Query, Generic[P]):
)
@property
def field_names(self) -> Set[str]:
def field_names(self) -> set[str]:
"""Return a set with field names that this query operates on."""
return {self.field_name}
@ -150,10 +136,10 @@ class FieldQuery(Query, Generic[P]):
self.pattern = pattern
self.fast = fast
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
return self.field, ()
def clause(self) -> Tuple[Optional[str], Sequence[SQLiteType]]:
def clause(self) -> tuple[str | None, Sequence[SQLiteType]]:
if self.fast:
return self.col_clause()
else:
@ -188,7 +174,7 @@ class FieldQuery(Query, Generic[P]):
class MatchQuery(FieldQuery[AnySQLiteType]):
"""A query that looks for exact matches in an Model field."""
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
return self.field + " = ?", [self.pattern]
@classmethod
@ -202,7 +188,7 @@ class NoneQuery(FieldQuery[None]):
def __init__(self, field, fast: bool = True):
super().__init__(field, None, fast)
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
return self.field + " IS NULL", ()
def match(self, obj: Model) -> bool:
@ -239,7 +225,7 @@ class StringFieldQuery(FieldQuery[P]):
class StringQuery(StringFieldQuery[str]):
"""A query that matches a whole string in a specific Model field."""
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
search = (
self.pattern.replace("\\", "\\\\")
.replace("%", "\\%")
@ -257,7 +243,7 @@ class StringQuery(StringFieldQuery[str]):
class SubstringQuery(StringFieldQuery[str]):
"""A query that matches a substring in a specific Model field."""
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
pattern = (
self.pattern.replace("\\", "\\\\")
.replace("%", "\\%")
@ -292,7 +278,7 @@ class RegexpQuery(StringFieldQuery[Pattern[str]]):
super().__init__(field_name, pattern_re, fast)
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
return f" regexp({self.field}, ?)", [self.pattern.pattern]
@staticmethod
@ -333,7 +319,7 @@ class BytesQuery(FieldQuery[bytes]):
`MatchQuery` when matching on BLOB values.
"""
def __init__(self, field_name: str, pattern: Union[bytes, str, memoryview]):
def __init__(self, field_name: str, pattern: bytes | str | memoryview):
# Use a buffer/memoryview representation of the pattern for SQLite
# matching. This instructs SQLite to treat the blob as binary
# rather than encoded Unicode.
@ -351,7 +337,7 @@ class BytesQuery(FieldQuery[bytes]):
super().__init__(field_name, bytes_pattern)
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
return self.field + " = ?", [self.buf_pattern]
@classmethod
@ -368,7 +354,7 @@ class NumericQuery(FieldQuery[str]):
a float.
"""
def _convert(self, s: str) -> Union[float, int, None]:
def _convert(self, s: str) -> float | int | None:
"""Convert a string to a numeric type (float or int).
Return None if `s` is empty.
@ -416,7 +402,7 @@ class NumericQuery(FieldQuery[str]):
return False
return True
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
if self.point is not None:
return self.field + "=?", (self.point,)
else:
@ -444,7 +430,7 @@ class InQuery(Generic[AnySQLiteType], FieldQuery[Sequence[AnySQLiteType]]):
def subvals(self) -> Sequence[SQLiteType]:
return self.pattern
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
placeholders = ", ".join(["?"] * len(self.subvals))
return f"{self.field_name} IN ({placeholders})", self.subvals
@ -461,7 +447,7 @@ class CollectionQuery(Query):
"""
@property
def field_names(self) -> Set[str]:
def field_names(self) -> set[str]:
"""Return a set with field names that this query operates on."""
return reduce(or_, (sq.field_names for sq in self.subqueries))
@ -485,7 +471,7 @@ class CollectionQuery(Query):
def clause_with_joiner(
self,
joiner: str,
) -> Tuple[Optional[str], Sequence[SQLiteType]]:
) -> tuple[str | None, Sequence[SQLiteType]]:
"""Return a clause created by joining together the clauses of
all subqueries with the string joiner (padded by spaces).
"""
@ -521,11 +507,11 @@ class AnyFieldQuery(CollectionQuery):
"""
@property
def field_names(self) -> Set[str]:
def field_names(self) -> set[str]:
"""Return a set with field names that this query operates on."""
return set(self.fields)
def __init__(self, pattern, fields, cls: Type[FieldQuery]):
def __init__(self, pattern, fields, cls: type[FieldQuery]):
self.pattern = pattern
self.fields = fields
self.query_class = cls
@ -536,7 +522,7 @@ class AnyFieldQuery(CollectionQuery):
# TYPING ERROR
super().__init__(subqueries)
def clause(self) -> Tuple[Optional[str], Sequence[SQLiteType]]:
def clause(self) -> tuple[str | None, Sequence[SQLiteType]]:
return self.clause_with_joiner("or")
def match(self, obj: Model) -> bool:
@ -575,7 +561,7 @@ class MutableCollectionQuery(CollectionQuery):
class AndQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries."""
def clause(self) -> Tuple[Optional[str], Sequence[SQLiteType]]:
def clause(self) -> tuple[str | None, Sequence[SQLiteType]]:
return self.clause_with_joiner("and")
def match(self, obj: Model) -> bool:
@ -585,7 +571,7 @@ class AndQuery(MutableCollectionQuery):
class OrQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries."""
def clause(self) -> Tuple[Optional[str], Sequence[SQLiteType]]:
def clause(self) -> tuple[str | None, Sequence[SQLiteType]]:
return self.clause_with_joiner("or")
def match(self, obj: Model) -> bool:
@ -598,14 +584,14 @@ class NotQuery(Query):
"""
@property
def field_names(self) -> Set[str]:
def field_names(self) -> set[str]:
"""Return a set with field names that this query operates on."""
return self.subquery.field_names
def __init__(self, subquery):
self.subquery = subquery
def clause(self) -> Tuple[Optional[str], Sequence[SQLiteType]]:
def clause(self) -> tuple[str | None, Sequence[SQLiteType]]:
clause, subvals = self.subquery.clause()
if clause:
return f"not ({clause})", subvals
@ -630,7 +616,7 @@ class NotQuery(Query):
class TrueQuery(Query):
"""A query that always matches."""
def clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def clause(self) -> tuple[str, Sequence[SQLiteType]]:
return "1", ()
def match(self, obj: Model) -> bool:
@ -640,7 +626,7 @@ class TrueQuery(Query):
class FalseQuery(Query):
"""A query that never matches."""
def clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def clause(self) -> tuple[str, Sequence[SQLiteType]]:
return "0", ()
def match(self, obj: Model) -> bool:
@ -650,7 +636,7 @@ class FalseQuery(Query):
# Time/date queries.
def _parse_periods(pattern: str) -> Tuple[Optional[Period], Optional[Period]]:
def _parse_periods(pattern: str) -> tuple[Period | None, Period | None]:
"""Parse a string containing two dates separated by two dots (..).
Return a pair of `Period` objects.
"""
@ -696,7 +682,7 @@ class Period:
self.precision = precision
@classmethod
def parse(cls: Type["Period"], string: str) -> Optional["Period"]:
def parse(cls: type[Period], string: str) -> Period | None:
"""Parse a date and return a `Period` object or `None` if the
string is empty, or raise an InvalidQueryArgumentValueError if
the string cannot be parsed to a date.
@ -715,7 +701,7 @@ class Period:
def find_date_and_format(
string: str,
) -> Union[Tuple[None, None], Tuple[datetime, int]]:
) -> tuple[None, None] | tuple[datetime, int]:
for ord, format in enumerate(cls.date_formats):
for format_option in format:
try:
@ -729,7 +715,7 @@ class Period:
if not string:
return None
date: Optional[datetime]
date: datetime | None
# Check for a relative date.
match_dq = re.match(cls.relative_re, string)
@ -789,7 +775,7 @@ class DateInterval:
A right endpoint of None means towards infinity.
"""
def __init__(self, start: Optional[datetime], end: Optional[datetime]):
def __init__(self, start: datetime | None, end: datetime | None):
if start is not None and end is not None and not start < end:
raise ValueError(
"start date {} is not before end date {}".format(start, end)
@ -800,8 +786,8 @@ class DateInterval:
@classmethod
def from_periods(
cls,
start: Optional[Period],
end: Optional[Period],
start: Period | None,
end: Period | None,
) -> DateInterval:
"""Create an interval with two Periods as the endpoints."""
end_date = end.open_right_endpoint() if end is not None else None
@ -843,7 +829,7 @@ class DateQuery(FieldQuery[str]):
_clause_tmpl = "{0} {1} ?"
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
clause_parts = []
subvals = []
@ -875,7 +861,7 @@ class DurationQuery(NumericQuery):
or M:SS time interval.
"""
def _convert(self, s: str) -> Optional[float]:
def _convert(self, s: str) -> float | None:
"""Convert a M:SS or numeric string to a float.
Return None if `s` is empty.
@ -902,13 +888,13 @@ class Sort:
the database.
"""
def order_clause(self) -> Optional[str]:
def order_clause(self) -> str | None:
"""Generates a SQL fragment to be used in a ORDER BY clause, or
None if no fragment is used (i.e., this is a slow sort).
"""
return None
def sort(self, items: List) -> List:
def sort(self, items: list) -> list:
"""Sort the list of objects and return a list."""
return sorted(items)
@ -931,7 +917,7 @@ class Sort:
class MultipleSort(Sort):
"""Sort that encapsulates multiple sub-sorts."""
def __init__(self, sorts: Optional[List[Sort]] = None):
def __init__(self, sorts: list[Sort] | None = None):
self.sorts = sorts or []
def add_sort(self, sort: Sort):
@ -1061,7 +1047,7 @@ class SlowFieldSort(FieldSort):
class NullSort(Sort):
"""No sorting. Leave results unsorted."""
def sort(self, items: List) -> List:
def sort(self, items: list) -> list:
return items
def __nonzero__(self) -> bool:

View file

@ -14,12 +14,18 @@
"""Parsing of strings into DBCore queries."""
from __future__ import annotations
import itertools
import re
from typing import Collection, Dict, List, Optional, Sequence, Tuple, Type
from typing import TYPE_CHECKING
from . import Model, query
from .query import Sort
if TYPE_CHECKING:
from collections.abc import Collection, Sequence
from .query import Sort
PARSE_QUERY_PART_REGEX = re.compile(
# Non-capturing optional segment for the keyword.
@ -35,10 +41,10 @@ PARSE_QUERY_PART_REGEX = re.compile(
def parse_query_part(
part: str,
query_classes: Dict[str, Type[query.FieldQuery]] = {},
prefixes: Dict = {},
default_class: Type[query.SubstringQuery] = query.SubstringQuery,
) -> Tuple[Optional[str], str, Type[query.FieldQuery], bool]:
query_classes: dict[str, type[query.FieldQuery]] = {},
prefixes: dict = {},
default_class: type[query.SubstringQuery] = query.SubstringQuery,
) -> tuple[str | None, str, type[query.FieldQuery], bool]:
"""Parse a single *query part*, which is a chunk of a complete query
string representing a single criterion.
@ -104,8 +110,8 @@ def parse_query_part(
def construct_query_part(
model_cls: Type[Model],
prefixes: Dict,
model_cls: type[Model],
prefixes: dict,
query_part: str,
) -> query.Query:
"""Parse a *query part* string and return a :class:`Query` object.
@ -127,7 +133,7 @@ def construct_query_part(
# Use `model_cls` to build up a map from field (or query) names to
# `Query` classes.
query_classes: Dict[str, Type[query.FieldQuery]] = {}
query_classes: dict[str, type[query.FieldQuery]] = {}
for k, t in itertools.chain(
model_cls._fields.items(), model_cls._types.items()
):
@ -171,9 +177,9 @@ def construct_query_part(
# TYPING ERROR
def query_from_strings(
query_cls: Type[query.CollectionQuery],
model_cls: Type[Model],
prefixes: Dict,
query_cls: type[query.CollectionQuery],
model_cls: type[Model],
prefixes: dict,
query_parts: Collection[str],
) -> query.Query:
"""Creates a collection query of type `query_cls` from a list of
@ -189,7 +195,7 @@ def query_from_strings(
def construct_sort_part(
model_cls: Type[Model],
model_cls: type[Model],
part: str,
case_insensitive: bool = True,
) -> Sort:
@ -220,7 +226,7 @@ def construct_sort_part(
def sort_from_strings(
model_cls: Type[Model],
model_cls: type[Model],
sort_parts: Sequence[str],
case_insensitive: bool = True,
) -> Sort:
@ -239,11 +245,11 @@ def sort_from_strings(
def parse_sorted_query(
model_cls: Type[Model],
parts: List[str],
prefixes: Dict = {},
model_cls: type[Model],
parts: list[str],
prefixes: dict = {},
case_insensitive: bool = True,
) -> Tuple[query.Query, Sort]:
) -> tuple[query.Query, Sort]:
"""Given a list of strings, create the `Query` and `Sort` that they
represent.
"""

View file

@ -14,9 +14,11 @@
"""Representation of type information for DBCore model fields."""
from __future__ import annotations
import typing
from abc import ABC
from typing import Any, Generic, List, TypeVar, Union, cast
from typing import Any, Generic, TypeVar, cast
from beets.util import str2bool
@ -49,11 +51,11 @@ class Type(ABC, Generic[T, N]):
"""The SQLite column type for the value.
"""
query: typing.Type[FieldQuery] = SubstringQuery
query: type[FieldQuery] = SubstringQuery
"""The `Query` subclass to be used when querying the field.
"""
model_type: typing.Type[T]
model_type: type[T]
"""The Python type that is used to represent the value in the model.
The model is guaranteed to return a value of this type if the field
@ -69,7 +71,7 @@ class Type(ABC, Generic[T, N]):
# have a field null_type similar to `model_type` and use that here.
return cast(N, self.model_type())
def format(self, value: Union[N, T]) -> str:
def format(self, value: N | T) -> str:
"""Given a value of this type, produce a Unicode string
representing the value. This is used in template evaluation.
"""
@ -83,7 +85,7 @@ class Type(ABC, Generic[T, N]):
else:
return str(value)
def parse(self, string: str) -> Union[T, N]:
def parse(self, string: str) -> T | N:
"""Parse a (possibly human-written) string and return the
indicated value of this type.
"""
@ -92,7 +94,7 @@ class Type(ABC, Generic[T, N]):
except ValueError:
return self.null
def normalize(self, value: Any) -> Union[T, N]:
def normalize(self, value: Any) -> T | N:
"""Given a value that will be assigned into a field of this
type, normalize the value to have the appropriate type. This
base implementation only reinterprets `None`.
@ -107,8 +109,8 @@ class Type(ABC, Generic[T, N]):
def from_sql(
self,
sql_value: Union[None, int, float, str, bytes],
) -> Union[T, N]:
sql_value: None | int | float | str | bytes,
) -> T | N:
"""Receives the value stored in the SQL backend and return the
value to be stored in the model.
@ -129,7 +131,7 @@ class Type(ABC, Generic[T, N]):
else:
return self.normalize(sql_value)
def to_sql(self, model_value: Any) -> Union[None, int, float, str, bytes]:
def to_sql(self, model_value: Any) -> None | int | float | str | bytes:
"""Convert a value as stored in the model object to a value used
by the database adapter.
"""
@ -154,7 +156,7 @@ class BaseInteger(Type[int, N]):
query = NumericQuery
model_type = int
def normalize(self, value: Any) -> Union[int, N]:
def normalize(self, value: Any) -> int | N:
try:
return self.model_type(round(float(value)))
except ValueError:
@ -183,7 +185,7 @@ class BasePaddedInt(BaseInteger[N]):
def __init__(self, digits: int):
self.digits = digits
def format(self, value: Union[int, N]) -> str:
def format(self, value: int | N) -> str:
return "{0:0{1}d}".format(value or 0, self.digits)
@ -232,13 +234,13 @@ class BaseFloat(Type[float, N]):
"""
sql = "REAL"
query: typing.Type[FieldQuery[Any]] = NumericQuery
query: type[FieldQuery[Any]] = NumericQuery
model_type = float
def __init__(self, digits: int = 1):
self.digits = digits
def format(self, value: Union[float, N]) -> str:
def format(self, value: float | N) -> str:
return "{0:.{1}f}".format(value or 0, self.digits)
@ -264,7 +266,7 @@ class BaseString(Type[T, N]):
sql = "TEXT"
query = SubstringQuery
def normalize(self, value: Any) -> Union[T, N]:
def normalize(self, value: Any) -> T | N:
if value is None:
return self.null
else:
@ -277,7 +279,7 @@ class String(BaseString[str, Any]):
model_type = str
class DelimitedString(BaseString[List[str], List[str]]):
class DelimitedString(BaseString[list[str], list[str]]):
"""A list of Unicode strings, represented in-database by a single string
containing delimiter-separated values.
"""
@ -287,7 +289,7 @@ class DelimitedString(BaseString[List[str], List[str]]):
def __init__(self, delimiter: str):
self.delimiter = delimiter
def format(self, value: List[str]):
def format(self, value: list[str]):
return self.delimiter.join(value)
def parse(self, string: str):
@ -295,7 +297,7 @@ class DelimitedString(BaseString[List[str], List[str]]):
return []
return string.split(self.delimiter)
def to_sql(self, model_value: List[str]):
def to_sql(self, model_value: list[str]):
return self.delimiter.join(model_value)

View file

@ -27,7 +27,7 @@ import sys
import textwrap
import traceback
from difflib import SequenceMatcher
from typing import Any, Callable, List
from typing import Any, Callable
import confuse
@ -1450,7 +1450,7 @@ class Subcommand:
invoked by a SubcommandOptionParser.
"""
func: Callable[[library.Library, optparse.Values, List[str]], Any]
func: Callable[[library.Library, optparse.Values, list[str]], Any]
def __init__(self, name, parser=None, help="", aliases=(), hide=False):
"""Creates a new subcommand. name is the primary way to invoke

View file

@ -19,9 +19,10 @@ interface.
import os
import re
from collections import Counter
from collections.abc import Sequence
from itertools import chain
from platform import python_version
from typing import Any, NamedTuple, Sequence
from typing import Any, NamedTuple
import beets
from beets import autotag, config, importer, library, logging, plugins, ui, util

View file

@ -33,15 +33,13 @@ from enum import Enum
from importlib import import_module
from multiprocessing.pool import ThreadPool
from pathlib import Path
from re import Pattern
from typing import (
TYPE_CHECKING,
Any,
AnyStr,
Callable,
Iterator,
NamedTuple,
Pattern,
Sequence,
TypeVar,
Union,
)
@ -51,6 +49,7 @@ from unidecode import unidecode
from beets.util import hidden
if TYPE_CHECKING:
from collections.abc import Iterator, Sequence
from logging import Logger
if sys.version_info >= (3, 10):

View file

@ -16,14 +16,14 @@
libraries.
"""
from typing import Any, Dict, NamedTuple
from typing import Any, NamedTuple
from beets import util
class Node(NamedTuple):
files: Dict[str, Any]
dirs: Dict[str, Any]
files: dict[str, Any]
dirs: dict[str, Any]
def _insert(node, path, itemid):

View file

@ -17,9 +17,10 @@
import os
import re
import sys
from collections.abc import Mapping
from dataclasses import dataclass
from mimetypes import guess_type
from typing import ClassVar, Mapping, Type
from typing import ClassVar
from flask import (
Blueprint,
@ -127,7 +128,7 @@ ARTIST_ATTR_MAP = {
class AURADocument:
"""Base class for building AURA documents."""
model_cls: ClassVar[Type[LibModel]]
model_cls: ClassVar[type[LibModel]]
lib: Library
args: Mapping[str, str]
@ -153,7 +154,7 @@ class AURADocument:
return make_response(document, status)
@classmethod
def get_attribute_converter(cls, beets_attr: str) -> Type[SQLiteType]:
def get_attribute_converter(cls, beets_attr: str) -> type[SQLiteType]:
"""Work out what data type an attribute should be for beets.
Args:
@ -374,7 +375,7 @@ class TrackDocument(AURADocument):
return self.lib.items(query, sort)
@classmethod
def get_attribute_converter(cls, beets_attr: str) -> Type[SQLiteType]:
def get_attribute_converter(cls, beets_attr: str) -> type[SQLiteType]:
"""Work out what data type an attribute should be for beets.
Args:

View file

@ -15,15 +15,18 @@
from __future__ import annotations
from typing import Iterable
from collections.abc import Iterable
from typing import TYPE_CHECKING
import librosa
from beets.importer import ImportTask
from beets.library import Item, Library
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, should_write
if TYPE_CHECKING:
from beets.importer import ImportTask
from beets.library import Item, Library
class AutoBPMPlugin(BeetsPlugin):
def __init__(self) -> None:

View file

@ -26,7 +26,6 @@ import sys
import time
import traceback
from string import Template
from typing import List
from mediafile import MediaFile
@ -1059,7 +1058,7 @@ class Command:
raise BPDError(ERROR_SYSTEM, "server error", self.name)
class CommandList(List[Command]):
class CommandList(list[Command]):
"""A list of commands issued by the client for processing by the
server. May be verbose, in which case the response is delimited, or
not. Should be a list of `Command` objects.

View file

@ -15,7 +15,7 @@
import fnmatch
import os
import tempfile
from typing import Sequence
from collections.abc import Sequence
import beets
from beets.dbcore.query import InQuery

View file

@ -13,10 +13,11 @@
# included in all copies or substantial portions of the Software.
from __future__ import annotations
import collections
import enum
import math
import optparse
import os
import queue
import signal
@ -25,32 +26,24 @@ import sys
import warnings
from abc import ABC, abstractmethod
from dataclasses import dataclass
from logging import Logger
from multiprocessing.pool import ThreadPool
from threading import Event, Thread
from typing import (
Any,
Callable,
DefaultDict,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from confuse import ConfigView
from typing import TYPE_CHECKING, Any, Callable, TypeVar, cast
from beets import ui
from beets.importer import ImportSession, ImportTask
from beets.library import Album, Item, Library
from beets.plugins import BeetsPlugin
from beets.util import command_output, displayable_path, syspath
if TYPE_CHECKING:
import optparse
from collections.abc import Sequence
from logging import Logger
from confuse import ConfigView
from beets.importer import ImportSession, ImportTask
from beets.library import Album, Item, Library
# Utilities.
@ -69,7 +62,7 @@ class FatalGstreamerPluginReplayGainError(FatalReplayGainError):
loading the required plugins."""
def call(args: List[Any], log: Logger, **kwargs: Any):
def call(args: list[Any], log: Logger, **kwargs: Any):
"""Execute the command and return its output or raise a
ReplayGainError on failure.
"""
@ -134,9 +127,9 @@ class RgTask:
def __init__(
self,
items: Sequence[Item],
album: Optional[Album],
album: Album | None,
target_level: float,
peak_method: Optional[PeakMethod],
peak_method: PeakMethod | None,
backend_name: str,
log: Logger,
):
@ -146,8 +139,8 @@ class RgTask:
self.peak_method = peak_method
self.backend_name = backend_name
self._log = log
self.album_gain: Optional[Gain] = None
self.track_gains: Optional[List[Gain]] = None
self.album_gain: Gain | None = None
self.track_gains: list[Gain] | None = None
def _store_track_gain(self, item: Item, track_gain: Gain):
"""Store track gain for a single item in the database."""
@ -236,7 +229,7 @@ class R128Task(RgTask):
def __init__(
self,
items: Sequence[Item],
album: Optional[Album],
album: Album | None,
target_level: float,
backend_name: str,
log: Logger,
@ -348,7 +341,7 @@ class FfmpegBackend(Backend):
# analyse tracks
# Gives a list of tuples (track_gain, track_n_blocks)
track_results: List[Tuple[Gain, int]] = [
track_results: list[tuple[Gain, int]] = [
self._analyse_item(
item,
task.target_level,
@ -358,7 +351,7 @@ class FfmpegBackend(Backend):
for item in task.items
]
track_gains: List[Gain] = [tg for tg, _nb in track_results]
track_gains: list[Gain] = [tg for tg, _nb in track_results]
# Album peak is maximum track peak
album_peak = max(tg.peak for tg in track_gains)
@ -409,8 +402,8 @@ class FfmpegBackend(Backend):
return task
def _construct_cmd(
self, item: Item, peak_method: Optional[PeakMethod]
) -> List[Union[str, bytes]]:
self, item: Item, peak_method: PeakMethod | None
) -> list[str | bytes]:
"""Construct the shell command to analyse items."""
return [
self._ffmpeg_path,
@ -433,9 +426,9 @@ class FfmpegBackend(Backend):
self,
item: Item,
target_level: float,
peak_method: Optional[PeakMethod],
peak_method: PeakMethod | None,
count_blocks: bool = True,
) -> Tuple[Gain, int]:
) -> tuple[Gain, int]:
"""Analyse item. Return a pair of a Gain object and the number
of gating blocks above the threshold.
@ -647,7 +640,7 @@ class CommandBackend(Backend):
items: Sequence[Item],
target_level: float,
is_album: bool,
) -> List[Gain]:
) -> list[Gain]:
"""Computes the track or album gain of a list of items, returns
a list of TrackGain objects.
@ -667,7 +660,7 @@ class CommandBackend(Backend):
# tag-writing; this turns the mp3gain/aacgain tool into a gain
# calculator rather than a tag manipulator because we take care
# of changing tags ourselves.
cmd: List[Union[bytes, str]] = [self.command, "-o", "-s", "s"]
cmd: list[bytes | str] = [self.command, "-o", "-s", "s"]
if self.noclip:
# Adjust to avoid clipping.
cmd = cmd + ["-k"]
@ -685,7 +678,7 @@ class CommandBackend(Backend):
output, len(items) + (1 if is_album else 0)
)
def parse_tool_output(self, text: bytes, num_lines: int) -> List[Gain]:
def parse_tool_output(self, text: bytes, num_lines: int) -> list[Gain]:
"""Given the tab-delimited output from an invocation of mp3gain
or aacgain, parse the text and return a list of dictionaries
containing information about each analyzed file.
@ -771,7 +764,7 @@ class GStreamerBackend(Backend):
self._main_loop = self.GLib.MainLoop()
self._files: List[bytes] = []
self._files: list[bytes] = []
def _import_gst(self):
"""Import the necessary GObject-related modules and assign `Gst`
@ -811,7 +804,7 @@ class GStreamerBackend(Backend):
self._files = [i.path for i in items]
# FIXME: Turn this into DefaultDict[bytes, Gain]
self._file_tags: DefaultDict[bytes, Dict[str, float]] = (
self._file_tags: collections.defaultdict[bytes, dict[str, float]] = (
collections.defaultdict(dict)
)
@ -1192,20 +1185,20 @@ class ExceptionWatcher(Thread):
# whether `_stopevent` is set
pass
def join(self, timeout: Optional[float] = None):
def join(self, timeout: float | None = None):
self._stopevent.set()
Thread.join(self, timeout)
# Main plugin logic.
BACKEND_CLASSES: List[Type[Backend]] = [
BACKEND_CLASSES: list[type[Backend]] = [
CommandBackend,
GStreamerBackend,
AudioToolsBackend,
FfmpegBackend,
]
BACKENDS: Dict[str, Type[Backend]] = {b.NAME: b for b in BACKEND_CLASSES}
BACKENDS: dict[str, type[Backend]] = {b.NAME: b for b in BACKEND_CLASSES}
class ReplayGainPlugin(BeetsPlugin):
@ -1332,7 +1325,7 @@ class ReplayGainPlugin(BeetsPlugin):
self,
items: Sequence[Item],
use_r128: bool,
album: Optional[Album] = None,
album: Album | None = None,
) -> RgTask:
if use_r128:
return R128Task(
@ -1375,7 +1368,7 @@ class ReplayGainPlugin(BeetsPlugin):
self._log.info("analyzing {0}", album)
discs: Dict[int, List[Item]] = {}
discs: dict[int, list[Item]] = {}
if self.config["per_disc"].get(bool):
for item in album.items():
if discs.get(item.disc) is None:
@ -1447,8 +1440,8 @@ class ReplayGainPlugin(BeetsPlugin):
def _apply(
self,
func: Callable[..., AnyRgTask],
args: List[Any],
kwds: Dict[str, Any],
args: list[Any],
kwds: dict[str, Any],
callback: Callable[[AnyRgTask], Any],
):
if self.pool is not None:
@ -1525,7 +1518,7 @@ class ReplayGainPlugin(BeetsPlugin):
self,
lib: Library,
opts: optparse.Values,
args: List[str],
args: list[str],
):
try:
write = ui.should_write(opts.write)
@ -1562,7 +1555,7 @@ class ReplayGainPlugin(BeetsPlugin):
# Silence interrupt exceptions
pass
def commands(self) -> List[ui.Subcommand]:
def commands(self) -> list[ui.Subcommand]:
"""Return the "replaygain" ui subcommand."""
cmd = ui.Subcommand("replaygain", help="analyze for ReplayGain")
cmd.parser.add_album_option()

View file

@ -15,7 +15,6 @@
"""Moves patterns in path formats (suitable for moving articles)."""
import re
from typing import List
from beets.plugins import BeetsPlugin
@ -28,7 +27,7 @@ FORMAT = "{0}, {1}"
class ThePlugin(BeetsPlugin):
patterns: List[str] = []
patterns: list[str] = []
def __init__(self):
super().__init__()

View file

@ -6,6 +6,9 @@ Changelog goes here! Please add your entry to the bottom of one of the lists bel
Unreleased
----------
Beets now requires Python 3.9 or later since support for EOL Python 3.8 has
been dropped.
New features:
Bug fixes:
@ -19,6 +22,8 @@ Bug fixes:
For packagers:
* The minimum supported Python version is now 3.9.
Other changes:
* Release workflow: fix the issue where the new release tag is created for the
@ -46,8 +51,6 @@ Bug fixes:
* Bring back test files and the manual to the source distribution tarball.
:bug:`5513`
For packagers:
Other changes:
* Changed `bitesize` label to `good first issue`. Our `contribute`_ page is now

1161
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -17,7 +17,6 @@ classifiers = [
"Environment :: Web Environment",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
@ -40,7 +39,7 @@ Changelog = "https://github.com/beetbox/beets/blob/master/docs/changelog.rst"
"Bug Tracker" = "https://github.com/beetbox/beets/issues"
[tool.poetry.dependencies]
python = ">=3.8,<4"
python = ">=3.9,<4"
colorama = { version = "*", markers = "sys_platform == 'win32'" }
confuse = ">=1.5.0"
@ -241,7 +240,7 @@ done
interpreter = "zsh"
[tool.ruff]
target-version = "py38"
target-version = "py39"
line-length = 80
[tool.ruff.lint]
@ -256,6 +255,7 @@ select = [
"PT", # flake8-pytest-style
# "RUF", # ruff
# "UP", # pyupgrade
"TCH", # flake8-type-checking
"W", # pycodestyle
]
[tool.ruff.lint.per-file-ignores]

View file

@ -87,23 +87,29 @@ class AdvancedRewritePluginTest(PluginTestCase):
assert item.artists == ["유빈", "미미"]
def test_fail_when_replacements_empty(self):
with pytest.raises(
UserError,
match="Advanced rewrites must have at least one replacement",
), self.configure_plugin([{"match": "artist:A", "replacements": {}}]):
with (
pytest.raises(
UserError,
match="Advanced rewrites must have at least one replacement",
),
self.configure_plugin([{"match": "artist:A", "replacements": {}}]),
):
pass
def test_fail_when_rewriting_single_valued_field_with_list(self):
with pytest.raises(
UserError,
match="Field artist is not a multi-valued field but a list was given: C, D",
), self.configure_plugin(
[
{
"match": "artist:'A & B'",
"replacements": {"artist": ["C", "D"]},
},
]
with (
pytest.raises(
UserError,
match="Field artist is not a multi-valued field but a list was given: C, D", # noqa: E501
),
self.configure_plugin(
[
{
"match": "artist:'A & B'",
"replacements": {"artist": ["C", "D"]},
},
]
),
):
pass

View file

@ -14,7 +14,7 @@
"""Tests for the 'albumtypes' plugin."""
from typing import Sequence, Tuple
from collections.abc import Sequence
from beets.autotag.mb import VARIOUS_ARTISTS_ID
from beets.test.helper import PluginTestCase
@ -91,7 +91,7 @@ class AlbumTypesPluginTest(PluginTestCase):
def _set_config(
self,
types: Sequence[Tuple[str, str]],
types: Sequence[tuple[str, str]],
ignore_va: Sequence[str],
bracket: str,
):

View file

@ -1,7 +1,7 @@
import os
from http import HTTPStatus
from pathlib import Path
from typing import Any, Dict, Optional
from typing import Any, Optional
import pytest
from flask.testing import Client
@ -59,8 +59,8 @@ class TestAuraResponse:
"""Return a callback accepting `endpoint` and `params` parameters."""
def get(
endpoint: str, params: Dict[str, str]
) -> Optional[Dict[str, Any]]:
endpoint: str, params: dict[str, str]
) -> Optional[dict[str, Any]]:
"""Add additional `params` and GET the given endpoint.
`include` parameter is added to every call to check that the

View file

@ -19,11 +19,14 @@ import os.path
import sys
import unittest
from contextlib import contextmanager
from typing import Callable, Iterator
from typing import TYPE_CHECKING, Callable
from beets import plugins
from beets.test.helper import PluginTestCase, capture_log
if TYPE_CHECKING:
from collections.abc import Iterator
class HookTestCase(PluginTestCase):
plugin = "hook"

View file

@ -102,9 +102,12 @@ class ZeroPluginTest(PluginTestCase):
item.write()
item_id = item.id
with self.configure_plugin(
{"fields": ["comments"], "update_database": True, "auto": False}
), control_stdin("y"):
with (
self.configure_plugin(
{"fields": ["comments"], "update_database": True, "auto": False}
),
control_stdin("y"),
):
self.run_command("zero")
mf = MediaFile(syspath(item.path))
@ -122,9 +125,16 @@ class ZeroPluginTest(PluginTestCase):
item.write()
item_id = item.id
with self.configure_plugin(
{"fields": ["comments"], "update_database": False, "auto": False}
), control_stdin("y"):
with (
self.configure_plugin(
{
"fields": ["comments"],
"update_database": False,
"auto": False,
}
),
control_stdin("y"),
):
self.run_command("zero")
mf = MediaFile(syspath(item.path))
@ -193,9 +203,12 @@ class ZeroPluginTest(PluginTestCase):
item_id = item.id
with self.configure_plugin(
{"fields": ["year"], "keep_fields": ["comments"]}
), control_stdin("y"):
with (
self.configure_plugin(
{"fields": ["year"], "keep_fields": ["comments"]}
),
control_stdin("y"),
):
self.run_command("zero")
item = self.lib.get_item(item_id)
@ -242,9 +255,12 @@ class ZeroPluginTest(PluginTestCase):
)
item.write()
item_id = item.id
with self.configure_plugin(
{"fields": ["comments"], "update_database": True, "auto": False}
), control_stdin("n"):
with (
self.configure_plugin(
{"fields": ["comments"], "update_database": True, "auto": False}
),
control_stdin("n"),
):
self.run_command("zero")
mf = MediaFile(syspath(item.path))

View file

@ -112,9 +112,10 @@ class ConfigCommandTest(BeetsTestCase):
def test_config_editor_not_found(self):
msg_match = "Could not edit configuration.*here is problem"
with patch(
"os.execlp", side_effect=OSError("here is problem")
), pytest.raises(ui.UserError, match=msg_match):
with (
patch("os.execlp", side_effect=OSError("here is problem")),
pytest.raises(ui.UserError, match=msg_match),
):
self.run_command("config", "-e")
def test_edit_invalid_config_file(self):