Use PEP585 lowercase collections typing annotations

This commit is contained in:
Šarūnas Nejus 2024-11-17 05:57:54 +00:00
parent 7be8f9c97a
commit 51f9dd229e
No known key found for this signature in database
GPG key ID: DD28F6704DBE3435
15 changed files with 168 additions and 198 deletions

View file

@ -21,13 +21,10 @@ from functools import total_ordering
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
NamedTuple,
Optional,
Tuple,
TypeVar,
Union,
cast,
@ -47,7 +44,7 @@ V = TypeVar("V")
# Classes used to represent candidate options.
class AttrDict(Dict[str, V]):
class AttrDict(dict[str, V]):
"""A dictionary that supports attribute ("dot") access, so `d.field`
is equivalent to `d['field']`.
"""
@ -82,16 +79,16 @@ class AlbumInfo(AttrDict):
# TYPING: are all of these correct? I've assumed optional strings
def __init__(
self,
tracks: List[TrackInfo],
tracks: list[TrackInfo],
album: Optional[str] = None,
album_id: Optional[str] = None,
artist: Optional[str] = None,
artist_id: Optional[str] = None,
artists: Optional[List[str]] = None,
artists_ids: Optional[List[str]] = None,
artists: Optional[list[str]] = None,
artists_ids: Optional[list[str]] = None,
asin: Optional[str] = None,
albumtype: Optional[str] = None,
albumtypes: Optional[List[str]] = None,
albumtypes: Optional[list[str]] = None,
va: bool = False,
year: Optional[int] = None,
month: Optional[int] = None,
@ -100,7 +97,7 @@ class AlbumInfo(AttrDict):
barcode: Optional[str] = None,
mediums: Optional[int] = None,
artist_sort: Optional[str] = None,
artists_sort: Optional[List[str]] = None,
artists_sort: Optional[list[str]] = None,
releasegroup_id: Optional[str] = None,
release_group_title: Optional[str] = None,
catalognum: Optional[str] = None,
@ -114,7 +111,7 @@ class AlbumInfo(AttrDict):
albumdisambig: Optional[str] = None,
releasegroupdisambig: Optional[str] = None,
artist_credit: Optional[str] = None,
artists_credit: Optional[List[str]] = None,
artists_credit: Optional[list[str]] = None,
original_year: Optional[int] = None,
original_month: Optional[int] = None,
original_day: Optional[int] = None,
@ -195,18 +192,18 @@ class TrackInfo(AttrDict):
release_track_id: Optional[str] = None,
artist: Optional[str] = None,
artist_id: Optional[str] = None,
artists: Optional[List[str]] = None,
artists_ids: Optional[List[str]] = None,
artists: Optional[list[str]] = None,
artists_ids: Optional[list[str]] = None,
length: Optional[float] = None,
index: Optional[int] = None,
medium: Optional[int] = None,
medium_index: Optional[int] = None,
medium_total: Optional[int] = None,
artist_sort: Optional[str] = None,
artists_sort: Optional[List[str]] = None,
artists_sort: Optional[list[str]] = None,
disctitle: Optional[str] = None,
artist_credit: Optional[str] = None,
artists_credit: Optional[List[str]] = None,
artists_credit: Optional[list[str]] = None,
data_source: Optional[str] = None,
data_url: Optional[str] = None,
media: Optional[str] = None,
@ -368,10 +365,10 @@ class Distance:
def __init__(self):
self._penalties = {}
self.tracks: Dict[TrackInfo, Distance] = {}
self.tracks: dict[TrackInfo, Distance] = {}
@cached_classproperty
def _weights(cls) -> Dict[str, float]:
def _weights(cls) -> dict[str, float]:
"""A dictionary from keys to floating-point weights."""
weights_view = config["match"]["distance_weights"]
weights = {}
@ -407,7 +404,7 @@ class Distance:
dist_raw += sum(penalty) * self._weights[key]
return dist_raw
def items(self) -> List[Tuple[str, float]]:
def items(self) -> list[tuple[str, float]]:
"""Return a list of (key, dist) pairs, with `dist` being the
weighted distance, sorted from highest to lowest. Does not
include penalties with a zero value.
@ -457,13 +454,13 @@ class Distance:
return dist / dist_max
return 0.0
def __iter__(self) -> Iterator[Tuple[str, float]]:
def __iter__(self) -> Iterator[tuple[str, float]]:
return iter(self.items())
def __len__(self) -> int:
return len(self.items())
def keys(self) -> List[str]:
def keys(self) -> list[str]:
return [key for key, _ in self.items()]
def update(self, dist: "Distance"):
@ -501,7 +498,7 @@ class Distance:
self,
key: str,
value: Any,
options: Union[List[Any], Tuple[Any, ...], Any],
options: Union[list[Any], tuple[Any, ...], Any],
):
"""Adds a distance penalty of 1.0 if `value` doesn't match any
of the values in `options`. If an option is a compiled regular
@ -544,7 +541,7 @@ class Distance:
self,
key: str,
value: Any,
options: Union[List[Any], Tuple[Any, ...], Any],
options: Union[list[Any], tuple[Any, ...], Any],
):
"""Adds a distance penalty that corresponds to the position at
which `value` appears in `options`. A distance penalty of 0.0
@ -593,9 +590,9 @@ class Distance:
class AlbumMatch(NamedTuple):
distance: Distance
info: AlbumInfo
mapping: Dict[Item, TrackInfo]
extra_items: List[Item]
extra_tracks: List[TrackInfo]
mapping: dict[Item, TrackInfo]
extra_items: list[Item]
extra_tracks: list[TrackInfo]
class TrackMatch(NamedTuple):
@ -666,12 +663,12 @@ def invoke_mb(call_func: Callable, *args):
@plugins.notify_info_yielded("albuminfo_received")
def album_candidates(
items: List[Item],
items: list[Item],
artist: str,
album: str,
va_likely: bool,
extra_tags: Dict,
) -> Iterable[Tuple]:
extra_tags: dict,
) -> Iterable[tuple]:
"""Search for album matches. ``items`` is a list of Item objects
that make up the album. ``artist`` and ``album`` are the respective
names (strings), which may be derived from the item list or may be
@ -699,7 +696,7 @@ def album_candidates(
@plugins.notify_info_yielded("trackinfo_received")
def item_candidates(item: Item, artist: str, title: str) -> Iterable[Tuple]:
def item_candidates(item: Item, artist: str, title: str) -> Iterable[tuple]:
"""Search for item matches. ``item`` is the Item to be matched.
``artist`` and ``title`` are strings and either reflect the item or
are specified by the user.

View file

@ -23,13 +23,10 @@ import re
from enum import IntEnum
from typing import (
Any,
Dict,
Iterable,
List,
NamedTuple,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
@ -88,7 +85,7 @@ class Proposal(NamedTuple):
def current_metadata(
items: Iterable[Item],
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
) -> tuple[dict[str, Any], dict[str, Any]]:
"""Extract the likely current metadata for an album given a list of its
items. Return two dictionaries:
- The most common value for each field.
@ -127,7 +124,7 @@ def current_metadata(
def assign_items(
items: Sequence[Item],
tracks: Sequence[TrackInfo],
) -> Tuple[Dict[Item, TrackInfo], List[Item], List[TrackInfo]]:
) -> tuple[dict[Item, TrackInfo], list[Item], list[TrackInfo]]:
"""Given a list of Items and a list of TrackInfo objects, find the
best mapping between them. Returns a mapping from Items to TrackInfo
objects, a set of extra Items, and a set of extra TrackInfo
@ -135,7 +132,7 @@ def assign_items(
of objects of the two types.
"""
# Construct the cost matrix.
costs: List[List[Distance]] = []
costs: list[list[Distance]] = []
for item in items:
row = []
for track in tracks:
@ -221,7 +218,7 @@ def track_distance(
def distance(
items: Sequence[Item],
album_info: AlbumInfo,
mapping: Dict[Item, TrackInfo],
mapping: dict[Item, TrackInfo],
) -> Distance:
"""Determines how "significant" an album metadata change would be.
Returns a Distance object. `album_info` is an AlbumInfo object
@ -425,7 +422,7 @@ def _sort_candidates(candidates: Iterable[AnyMatch]) -> Sequence[AnyMatch]:
def _add_candidate(
items: Sequence[Item],
results: Dict[Any, AlbumMatch],
results: dict[Any, AlbumMatch],
info: AlbumInfo,
):
"""Given a candidate AlbumInfo object, attempt to add the candidate
@ -479,8 +476,8 @@ def tag_album(
items,
search_artist: Optional[str] = None,
search_album: Optional[str] = None,
search_ids: List[str] = [],
) -> Tuple[str, str, Proposal]:
search_ids: list[str] = [],
) -> tuple[str, str, Proposal]:
"""Return a tuple of the current artist name, the current album
name, and a `Proposal` containing `AlbumMatch` candidates.
@ -505,7 +502,7 @@ def tag_album(
log.debug("Tagging {0} - {1}", cur_artist, cur_album)
# The output result, keys are the MB album ID.
candidates: Dict[Any, AlbumMatch] = {}
candidates: dict[Any, AlbumMatch] = {}
# Search by explicit ID.
if search_ids:
@ -571,7 +568,7 @@ def tag_item(
item,
search_artist: Optional[str] = None,
search_title: Optional[str] = None,
search_ids: Optional[List[str]] = None,
search_ids: Optional[list[str]] = None,
) -> Proposal:
"""Find metadata for a single track. Return a `Proposal` consisting
of `TrackMatch` objects.

View file

@ -20,7 +20,7 @@ import re
import traceback
from collections import Counter
from itertools import product
from typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple, cast
from typing import Any, Iterator, Optional, Sequence, cast
from urllib.parse import urljoin
import musicbrainzngs
@ -131,7 +131,7 @@ def configure():
)
def _preferred_alias(aliases: List):
def _preferred_alias(aliases: list):
"""Given an list of alias structures for an artist credit, select
and return the user's preferred alias alias or None if no matching
alias is found.
@ -166,7 +166,7 @@ def _preferred_alias(aliases: List):
return matches[0]
def _preferred_release_event(release: Dict[str, Any]) -> Tuple[str, str]:
def _preferred_release_event(release: dict[str, Any]) -> tuple[str, str]:
"""Given a release, select and return the user's preferred release
event as a tuple of (country, release_date). Fall back to the
default release event if a preferred event is not found.
@ -186,8 +186,8 @@ def _preferred_release_event(release: Dict[str, Any]) -> Tuple[str, str]:
def _multi_artist_credit(
credit: List[Dict], include_join_phrase: bool
) -> Tuple[List[str], List[str], List[str]]:
credit: list[dict], include_join_phrase: bool
) -> tuple[list[str], list[str], list[str]]:
"""Given a list representing an ``artist-credit`` block, accumulate
data into a triple of joined artist name lists: canonical, sort, and
credit.
@ -234,7 +234,7 @@ def _multi_artist_credit(
)
def _flatten_artist_credit(credit: List[Dict]) -> Tuple[str, str, str]:
def _flatten_artist_credit(credit: list[dict]) -> tuple[str, str, str]:
"""Given a list representing an ``artist-credit`` block, flatten the
data into a triple of joined artist name strings: canonical, sort, and
credit.
@ -249,12 +249,12 @@ def _flatten_artist_credit(credit: List[Dict]) -> Tuple[str, str, str]:
)
def _artist_ids(credit: List[Dict]) -> List[str]:
def _artist_ids(credit: list[dict]) -> list[str]:
"""
Given a list representing an ``artist-credit``,
return a list of artist IDs
"""
artist_ids: List[str] = []
artist_ids: list[str] = []
for el in credit:
if isinstance(el, dict):
artist_ids.append(el["artist"]["id"])
@ -276,7 +276,7 @@ def _get_related_artist_names(relations, relation_type):
def track_info(
recording: Dict,
recording: dict,
index: Optional[int] = None,
medium: Optional[int] = None,
medium_index: Optional[int] = None,
@ -400,7 +400,7 @@ def _set_date_str(
setattr(info, key, date_num)
def album_info(release: Dict) -> beets.autotag.hooks.AlbumInfo:
def album_info(release: dict) -> beets.autotag.hooks.AlbumInfo:
"""Takes a MusicBrainz release result dictionary and returns a beets
AlbumInfo object containing the interesting data about that release.
"""
@ -662,7 +662,7 @@ def match_album(
artist: str,
album: str,
tracks: Optional[int] = None,
extra_tags: Optional[Dict[str, Any]] = None,
extra_tags: Optional[dict[str, Any]] = None,
) -> Iterator[beets.autotag.hooks.AlbumInfo]:
"""Searches for a single album ("release" in MusicBrainz parlance)
and returns an iterator over AlbumInfo objects. May raise a
@ -756,8 +756,8 @@ def _is_translation(r):
def _find_actual_release_from_pseudo_release(
pseudo_rel: Dict,
) -> Optional[Dict]:
pseudo_rel: dict,
) -> Optional[dict]:
try:
relations = pseudo_rel["release"]["release-relation-list"]
except KeyError:

View file

@ -30,18 +30,13 @@ from typing import (
Any,
AnyStr,
Callable,
DefaultDict,
Dict,
Generator,
Generic,
Iterable,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast,
@ -161,11 +156,11 @@ class LazyConvertDict:
def __init__(self, model_cls: "Model"):
"""Initialize the object empty"""
# FIXME: Dict[str, SQLiteType]
self._data: Dict[str, Any] = {}
self._data: dict[str, Any] = {}
self.model_cls = model_cls
self._converted: Dict[str, Any] = {}
self._converted: dict[str, Any] = {}
def init(self, data: Dict[str, Any]):
def init(self, data: dict[str, Any]):
"""Set the base data that should be lazily converted"""
self._data = data
@ -195,7 +190,7 @@ class LazyConvertDict:
if key in self._data:
del self._data[key]
def keys(self) -> List[str]:
def keys(self) -> list[str]:
"""Get a list of available field names for this object."""
return list(self._converted.keys()) + list(self._data.keys())
@ -213,7 +208,7 @@ class LazyConvertDict:
for key, value in values.items():
self[key] = value
def items(self) -> Iterable[Tuple[str, Any]]:
def items(self) -> Iterable[tuple[str, Any]]:
"""Iterate over (key, value) pairs that this object contains.
Computed fields are not included.
"""
@ -286,7 +281,7 @@ class Model(ABC):
"""The flex field SQLite table name.
"""
_fields: Dict[str, types.Type] = {}
_fields: dict[str, types.Type] = {}
"""A mapping indicating available "fixed" fields on this type. The
keys are field names and the values are `Type` objects.
"""
@ -296,16 +291,16 @@ class Model(ABC):
terms.
"""
_types: Dict[str, types.Type] = {}
_types: dict[str, types.Type] = {}
"""Optional Types for non-fixed (i.e., flexible and computed) fields.
"""
_sorts: Dict[str, Type[Sort]] = {}
_sorts: dict[str, type[Sort]] = {}
"""Optional named sort criteria. The keys are strings and the values
are subclasses of `Sort`.
"""
_queries: Dict[str, Type[FieldQuery]] = {}
_queries: dict[str, type[FieldQuery]] = {}
"""Named queries that use a field-like `name:value` syntax but which
do not relate to any specific field.
"""
@ -348,7 +343,7 @@ class Model(ABC):
return cls._relation._fields.keys() - cls.shared_db_fields
@classmethod
def _getters(cls: Type["Model"]):
def _getters(cls: type["Model"]):
"""Return a mapping from field names to getter functions."""
# We could cache this if it becomes a performance problem to
# gather the getter mapping every time.
@ -378,10 +373,10 @@ class Model(ABC):
@classmethod
def _awaken(
cls: Type[AnyModel],
cls: type[AnyModel],
db: Optional[Database] = None,
fixed_values: Dict[str, Any] = {},
flex_values: Dict[str, Any] = {},
fixed_values: dict[str, Any] = {},
flex_values: dict[str, Any] = {},
) -> AnyModel:
"""Create an object with values drawn from the database.
@ -537,7 +532,7 @@ class Model(ABC):
for key, value in values.items():
self[key] = value
def items(self) -> Iterator[Tuple[str, Any]]:
def items(self) -> Iterator[tuple[str, Any]]:
"""Iterate over (key, value) pairs that this object contains.
Computed fields are not included.
"""
@ -730,16 +725,16 @@ class Model(ABC):
cls,
field,
pattern,
query_cls: Type[FieldQuery] = MatchQuery,
query_cls: type[FieldQuery] = MatchQuery,
) -> FieldQuery:
"""Get a `FieldQuery` for this model."""
return query_cls(field, pattern, field in cls._fields)
@classmethod
def all_fields_query(
cls: Type["Model"],
cls: type["Model"],
pats: Mapping,
query_cls: Type[FieldQuery] = MatchQuery,
query_cls: type[FieldQuery] = MatchQuery,
):
"""Get a query that matches many fields with different patterns.
@ -764,8 +759,8 @@ class Results(Generic[AnyModel]):
def __init__(
self,
model_class: Type[AnyModel],
rows: List[Mapping],
model_class: type[AnyModel],
rows: list[Mapping],
db: "Database",
flex_rows,
query: Optional[Query] = None,
@ -800,7 +795,7 @@ class Results(Generic[AnyModel]):
# The materialized objects corresponding to rows that have been
# consumed.
self._objects: List[AnyModel] = []
self._objects: list[AnyModel] = []
def _get_objects(self) -> Iterator[AnyModel]:
"""Construct and generate Model objects for they query. The
@ -852,7 +847,7 @@ class Results(Generic[AnyModel]):
def _get_indexed_flex_attrs(self) -> Mapping:
"""Index flexible attributes by the entity id they belong to"""
flex_values: Dict[int, Dict[str, Any]] = {}
flex_values: dict[int, dict[str, Any]] = {}
for row in self.flex_rows:
if row["entity_id"] not in flex_values:
flex_values[row["entity_id"]] = {}
@ -861,7 +856,7 @@ class Results(Generic[AnyModel]):
return flex_values
def _make_model(self, row, flex_values: Dict = {}) -> AnyModel:
def _make_model(self, row, flex_values: dict = {}) -> AnyModel:
"""Create a Model object for the given row"""
cols = dict(row)
values = {k: v for (k, v) in cols.items() if not k[:4] == "flex"}
@ -951,7 +946,7 @@ class Transaction:
def __exit__(
self,
exc_type: Type[Exception],
exc_type: type[Exception],
exc_value: Exception,
traceback: TracebackType,
):
@ -970,7 +965,7 @@ class Transaction:
self._mutated = False
self.db._db_lock.release()
def query(self, statement: str, subvals: Sequence = ()) -> List:
def query(self, statement: str, subvals: Sequence = ()) -> list:
"""Execute an SQL statement with substitution values and return
a list of rows from the database.
"""
@ -1010,7 +1005,7 @@ class Database:
the backend.
"""
_models: Sequence[Type[Model]] = ()
_models: Sequence[type[Model]] = ()
"""The Model subclasses representing tables in this database.
"""
@ -1031,9 +1026,9 @@ class Database:
self.path = path
self.timeout = timeout
self._connections: Dict[int, sqlite3.Connection] = {}
self._tx_stacks: DefaultDict[int, List[Transaction]] = defaultdict(list)
self._extensions: List[str] = []
self._connections: dict[int, sqlite3.Connection] = {}
self._tx_stacks: defaultdict[int, list[Transaction]] = defaultdict(list)
self._extensions: list[str] = []
# A lock to protect the _connections and _tx_stacks maps, which
# both map thread IDs to private resources.
@ -1138,7 +1133,7 @@ class Database:
conn.close()
@contextlib.contextmanager
def _tx_stack(self) -> Generator[List, None, None]:
def _tx_stack(self) -> Generator[list, None, None]:
"""A context manager providing access to the current thread's
transaction stack. The context manager synchronizes access to
the stack map. Transactions should never migrate across threads.
@ -1231,7 +1226,7 @@ class Database:
def _fetch(
self,
model_cls: Type[AnyModel],
model_cls: type[AnyModel],
query: Optional[Query] = None,
sort: Optional[Sort] = None,
) -> Results[AnyModel]:
@ -1289,7 +1284,7 @@ class Database:
def _get(
self,
model_cls: Type[AnyModel],
model_cls: type[AnyModel],
id,
) -> Optional[AnyModel]:
"""Get a Model object by its id or None if the id does not

View file

@ -28,14 +28,10 @@ from typing import (
Collection,
Generic,
Iterator,
List,
MutableSequence,
Optional,
Pattern,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
)
@ -83,11 +79,11 @@ class Query(ABC):
"""An abstract class representing a query into the database."""
@property
def field_names(self) -> Set[str]:
def field_names(self) -> set[str]:
"""Return a set with field names that this query operates on."""
return set()
def clause(self) -> Tuple[Optional[str], Sequence[Any]]:
def clause(self) -> tuple[Optional[str], Sequence[Any]]:
"""Generate an SQLite expression implementing the query.
Return (clause, subvals) where clause is a valid sqlite
@ -141,7 +137,7 @@ class FieldQuery(Query, Generic[P]):
)
@property
def field_names(self) -> Set[str]:
def field_names(self) -> set[str]:
"""Return a set with field names that this query operates on."""
return {self.field_name}
@ -150,10 +146,10 @@ class FieldQuery(Query, Generic[P]):
self.pattern = pattern
self.fast = fast
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
return self.field, ()
def clause(self) -> Tuple[Optional[str], Sequence[SQLiteType]]:
def clause(self) -> tuple[Optional[str], Sequence[SQLiteType]]:
if self.fast:
return self.col_clause()
else:
@ -188,7 +184,7 @@ class FieldQuery(Query, Generic[P]):
class MatchQuery(FieldQuery[AnySQLiteType]):
"""A query that looks for exact matches in an Model field."""
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
return self.field + " = ?", [self.pattern]
@classmethod
@ -202,7 +198,7 @@ class NoneQuery(FieldQuery[None]):
def __init__(self, field, fast: bool = True):
super().__init__(field, None, fast)
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
return self.field + " IS NULL", ()
def match(self, obj: Model) -> bool:
@ -239,7 +235,7 @@ class StringFieldQuery(FieldQuery[P]):
class StringQuery(StringFieldQuery[str]):
"""A query that matches a whole string in a specific Model field."""
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
search = (
self.pattern.replace("\\", "\\\\")
.replace("%", "\\%")
@ -257,7 +253,7 @@ class StringQuery(StringFieldQuery[str]):
class SubstringQuery(StringFieldQuery[str]):
"""A query that matches a substring in a specific Model field."""
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
pattern = (
self.pattern.replace("\\", "\\\\")
.replace("%", "\\%")
@ -292,7 +288,7 @@ class RegexpQuery(StringFieldQuery[Pattern[str]]):
super().__init__(field_name, pattern_re, fast)
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
return f" regexp({self.field}, ?)", [self.pattern.pattern]
@staticmethod
@ -351,7 +347,7 @@ class BytesQuery(FieldQuery[bytes]):
super().__init__(field_name, bytes_pattern)
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
return self.field + " = ?", [self.buf_pattern]
@classmethod
@ -416,7 +412,7 @@ class NumericQuery(FieldQuery[str]):
return False
return True
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
if self.point is not None:
return self.field + "=?", (self.point,)
else:
@ -444,7 +440,7 @@ class InQuery(Generic[AnySQLiteType], FieldQuery[Sequence[AnySQLiteType]]):
def subvals(self) -> Sequence[SQLiteType]:
return self.pattern
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
placeholders = ", ".join(["?"] * len(self.subvals))
return f"{self.field_name} IN ({placeholders})", self.subvals
@ -461,7 +457,7 @@ class CollectionQuery(Query):
"""
@property
def field_names(self) -> Set[str]:
def field_names(self) -> set[str]:
"""Return a set with field names that this query operates on."""
return reduce(or_, (sq.field_names for sq in self.subqueries))
@ -485,7 +481,7 @@ class CollectionQuery(Query):
def clause_with_joiner(
self,
joiner: str,
) -> Tuple[Optional[str], Sequence[SQLiteType]]:
) -> tuple[Optional[str], Sequence[SQLiteType]]:
"""Return a clause created by joining together the clauses of
all subqueries with the string joiner (padded by spaces).
"""
@ -521,11 +517,11 @@ class AnyFieldQuery(CollectionQuery):
"""
@property
def field_names(self) -> Set[str]:
def field_names(self) -> set[str]:
"""Return a set with field names that this query operates on."""
return set(self.fields)
def __init__(self, pattern, fields, cls: Type[FieldQuery]):
def __init__(self, pattern, fields, cls: type[FieldQuery]):
self.pattern = pattern
self.fields = fields
self.query_class = cls
@ -536,7 +532,7 @@ class AnyFieldQuery(CollectionQuery):
# TYPING ERROR
super().__init__(subqueries)
def clause(self) -> Tuple[Optional[str], Sequence[SQLiteType]]:
def clause(self) -> tuple[Optional[str], Sequence[SQLiteType]]:
return self.clause_with_joiner("or")
def match(self, obj: Model) -> bool:
@ -575,7 +571,7 @@ class MutableCollectionQuery(CollectionQuery):
class AndQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries."""
def clause(self) -> Tuple[Optional[str], Sequence[SQLiteType]]:
def clause(self) -> tuple[Optional[str], Sequence[SQLiteType]]:
return self.clause_with_joiner("and")
def match(self, obj: Model) -> bool:
@ -585,7 +581,7 @@ class AndQuery(MutableCollectionQuery):
class OrQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries."""
def clause(self) -> Tuple[Optional[str], Sequence[SQLiteType]]:
def clause(self) -> tuple[Optional[str], Sequence[SQLiteType]]:
return self.clause_with_joiner("or")
def match(self, obj: Model) -> bool:
@ -598,14 +594,14 @@ class NotQuery(Query):
"""
@property
def field_names(self) -> Set[str]:
def field_names(self) -> set[str]:
"""Return a set with field names that this query operates on."""
return self.subquery.field_names
def __init__(self, subquery):
self.subquery = subquery
def clause(self) -> Tuple[Optional[str], Sequence[SQLiteType]]:
def clause(self) -> tuple[Optional[str], Sequence[SQLiteType]]:
clause, subvals = self.subquery.clause()
if clause:
return f"not ({clause})", subvals
@ -630,7 +626,7 @@ class NotQuery(Query):
class TrueQuery(Query):
"""A query that always matches."""
def clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def clause(self) -> tuple[str, Sequence[SQLiteType]]:
return "1", ()
def match(self, obj: Model) -> bool:
@ -640,7 +636,7 @@ class TrueQuery(Query):
class FalseQuery(Query):
"""A query that never matches."""
def clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def clause(self) -> tuple[str, Sequence[SQLiteType]]:
return "0", ()
def match(self, obj: Model) -> bool:
@ -650,7 +646,7 @@ class FalseQuery(Query):
# Time/date queries.
def _parse_periods(pattern: str) -> Tuple[Optional[Period], Optional[Period]]:
def _parse_periods(pattern: str) -> tuple[Optional[Period], Optional[Period]]:
"""Parse a string containing two dates separated by two dots (..).
Return a pair of `Period` objects.
"""
@ -696,7 +692,7 @@ class Period:
self.precision = precision
@classmethod
def parse(cls: Type["Period"], string: str) -> Optional["Period"]:
def parse(cls: type["Period"], string: str) -> Optional["Period"]:
"""Parse a date and return a `Period` object or `None` if the
string is empty, or raise an InvalidQueryArgumentValueError if
the string cannot be parsed to a date.
@ -715,7 +711,7 @@ class Period:
def find_date_and_format(
string: str,
) -> Union[Tuple[None, None], Tuple[datetime, int]]:
) -> Union[tuple[None, None], tuple[datetime, int]]:
for ord, format in enumerate(cls.date_formats):
for format_option in format:
try:
@ -843,7 +839,7 @@ class DateQuery(FieldQuery[str]):
_clause_tmpl = "{0} {1} ?"
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
clause_parts = []
subvals = []
@ -908,7 +904,7 @@ class Sort:
"""
return None
def sort(self, items: List) -> List:
def sort(self, items: list) -> list:
"""Sort the list of objects and return a list."""
return sorted(items)
@ -931,7 +927,7 @@ class Sort:
class MultipleSort(Sort):
"""Sort that encapsulates multiple sub-sorts."""
def __init__(self, sorts: Optional[List[Sort]] = None):
def __init__(self, sorts: Optional[list[Sort]] = None):
self.sorts = sorts or []
def add_sort(self, sort: Sort):
@ -1061,7 +1057,7 @@ class SlowFieldSort(FieldSort):
class NullSort(Sort):
"""No sorting. Leave results unsorted."""
def sort(self, items: List) -> List:
def sort(self, items: list) -> list:
return items
def __nonzero__(self) -> bool:

View file

@ -16,7 +16,7 @@
import itertools
import re
from typing import Collection, Dict, List, Optional, Sequence, Tuple, Type
from typing import Collection, Optional, Sequence
from . import Model, query
from .query import Sort
@ -35,10 +35,10 @@ PARSE_QUERY_PART_REGEX = re.compile(
def parse_query_part(
part: str,
query_classes: Dict[str, Type[query.FieldQuery]] = {},
prefixes: Dict = {},
default_class: Type[query.SubstringQuery] = query.SubstringQuery,
) -> Tuple[Optional[str], str, Type[query.FieldQuery], bool]:
query_classes: dict[str, type[query.FieldQuery]] = {},
prefixes: dict = {},
default_class: type[query.SubstringQuery] = query.SubstringQuery,
) -> tuple[Optional[str], str, type[query.FieldQuery], bool]:
"""Parse a single *query part*, which is a chunk of a complete query
string representing a single criterion.
@ -104,8 +104,8 @@ def parse_query_part(
def construct_query_part(
model_cls: Type[Model],
prefixes: Dict,
model_cls: type[Model],
prefixes: dict,
query_part: str,
) -> query.Query:
"""Parse a *query part* string and return a :class:`Query` object.
@ -127,7 +127,7 @@ def construct_query_part(
# Use `model_cls` to build up a map from field (or query) names to
# `Query` classes.
query_classes: Dict[str, Type[query.FieldQuery]] = {}
query_classes: dict[str, type[query.FieldQuery]] = {}
for k, t in itertools.chain(
model_cls._fields.items(), model_cls._types.items()
):
@ -171,9 +171,9 @@ def construct_query_part(
# TYPING ERROR
def query_from_strings(
query_cls: Type[query.CollectionQuery],
model_cls: Type[Model],
prefixes: Dict,
query_cls: type[query.CollectionQuery],
model_cls: type[Model],
prefixes: dict,
query_parts: Collection[str],
) -> query.Query:
"""Creates a collection query of type `query_cls` from a list of
@ -189,7 +189,7 @@ def query_from_strings(
def construct_sort_part(
model_cls: Type[Model],
model_cls: type[Model],
part: str,
case_insensitive: bool = True,
) -> Sort:
@ -220,7 +220,7 @@ def construct_sort_part(
def sort_from_strings(
model_cls: Type[Model],
model_cls: type[Model],
sort_parts: Sequence[str],
case_insensitive: bool = True,
) -> Sort:
@ -239,11 +239,11 @@ def sort_from_strings(
def parse_sorted_query(
model_cls: Type[Model],
parts: List[str],
prefixes: Dict = {},
model_cls: type[Model],
parts: list[str],
prefixes: dict = {},
case_insensitive: bool = True,
) -> Tuple[query.Query, Sort]:
) -> tuple[query.Query, Sort]:
"""Given a list of strings, create the `Query` and `Sort` that they
represent.
"""

View file

@ -16,7 +16,7 @@
import typing
from abc import ABC
from typing import Any, Generic, List, TypeVar, Union, cast
from typing import Any, Generic, TypeVar, Union, cast
from beets.util import str2bool
@ -49,11 +49,11 @@ class Type(ABC, Generic[T, N]):
"""The SQLite column type for the value.
"""
query: typing.Type[FieldQuery] = SubstringQuery
query: type[FieldQuery] = SubstringQuery
"""The `Query` subclass to be used when querying the field.
"""
model_type: typing.Type[T]
model_type: type[T]
"""The Python type that is used to represent the value in the model.
The model is guaranteed to return a value of this type if the field
@ -232,7 +232,7 @@ class BaseFloat(Type[float, N]):
"""
sql = "REAL"
query: typing.Type[FieldQuery[Any]] = NumericQuery
query: type[FieldQuery[Any]] = NumericQuery
model_type = float
def __init__(self, digits: int = 1):
@ -277,7 +277,7 @@ class String(BaseString[str, Any]):
model_type = str
class DelimitedString(BaseString[List[str], List[str]]):
class DelimitedString(BaseString[list[str], list[str]]):
"""A list of Unicode strings, represented in-database by a single string
containing delimiter-separated values.
"""
@ -287,7 +287,7 @@ class DelimitedString(BaseString[List[str], List[str]]):
def __init__(self, delimiter: str):
self.delimiter = delimiter
def format(self, value: List[str]):
def format(self, value: list[str]):
return self.delimiter.join(value)
def parse(self, string: str):
@ -295,7 +295,7 @@ class DelimitedString(BaseString[List[str], List[str]]):
return []
return string.split(self.delimiter)
def to_sql(self, model_value: List[str]):
def to_sql(self, model_value: list[str]):
return self.delimiter.join(model_value)

View file

@ -27,7 +27,7 @@ import sys
import textwrap
import traceback
from difflib import SequenceMatcher
from typing import Any, Callable, List
from typing import Any, Callable
import confuse
@ -1450,7 +1450,7 @@ class Subcommand:
invoked by a SubcommandOptionParser.
"""
func: Callable[[library.Library, optparse.Values, List[str]], Any]
func: Callable[[library.Library, optparse.Values, list[str]], Any]
def __init__(self, name, parser=None, help="", aliases=(), hide=False):
"""Creates a new subcommand. name is the primary way to invoke

View file

@ -16,14 +16,14 @@
libraries.
"""
from typing import Any, Dict, NamedTuple
from typing import Any, NamedTuple
from beets import util
class Node(NamedTuple):
files: Dict[str, Any]
dirs: Dict[str, Any]
files: dict[str, Any]
dirs: dict[str, Any]
def _insert(node, path, itemid):

View file

@ -19,7 +19,7 @@ import re
import sys
from dataclasses import dataclass
from mimetypes import guess_type
from typing import ClassVar, Mapping, Type
from typing import ClassVar, Mapping
from flask import (
Blueprint,
@ -127,7 +127,7 @@ ARTIST_ATTR_MAP = {
class AURADocument:
"""Base class for building AURA documents."""
model_cls: ClassVar[Type[LibModel]]
model_cls: ClassVar[type[LibModel]]
lib: Library
args: Mapping[str, str]
@ -153,7 +153,7 @@ class AURADocument:
return make_response(document, status)
@classmethod
def get_attribute_converter(cls, beets_attr: str) -> Type[SQLiteType]:
def get_attribute_converter(cls, beets_attr: str) -> type[SQLiteType]:
"""Work out what data type an attribute should be for beets.
Args:
@ -374,7 +374,7 @@ class TrackDocument(AURADocument):
return self.lib.items(query, sort)
@classmethod
def get_attribute_converter(cls, beets_attr: str) -> Type[SQLiteType]:
def get_attribute_converter(cls, beets_attr: str) -> type[SQLiteType]:
"""Work out what data type an attribute should be for beets.
Args:

View file

@ -26,7 +26,6 @@ import sys
import time
import traceback
from string import Template
from typing import List
from mediafile import MediaFile
@ -1059,7 +1058,7 @@ class Command:
raise BPDError(ERROR_SYSTEM, "server error", self.name)
class CommandList(List[Command]):
class CommandList(list[Command]):
"""A list of commands issued by the client for processing by the
server. May be verbose, in which case the response is delimited, or
not. Should be a list of `Command` objects.

View file

@ -28,20 +28,7 @@ from dataclasses import dataclass
from logging import Logger
from multiprocessing.pool import ThreadPool
from threading import Event, Thread
from typing import (
Any,
Callable,
DefaultDict,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from typing import Any, Callable, Optional, Sequence, TypeVar, Union, cast
from confuse import ConfigView
@ -69,7 +56,7 @@ class FatalGstreamerPluginReplayGainError(FatalReplayGainError):
loading the required plugins."""
def call(args: List[Any], log: Logger, **kwargs: Any):
def call(args: list[Any], log: Logger, **kwargs: Any):
"""Execute the command and return its output or raise a
ReplayGainError on failure.
"""
@ -147,7 +134,7 @@ class RgTask:
self.backend_name = backend_name
self._log = log
self.album_gain: Optional[Gain] = None
self.track_gains: Optional[List[Gain]] = None
self.track_gains: Optional[list[Gain]] = None
def _store_track_gain(self, item: Item, track_gain: Gain):
"""Store track gain for a single item in the database."""
@ -348,7 +335,7 @@ class FfmpegBackend(Backend):
# analyse tracks
# Gives a list of tuples (track_gain, track_n_blocks)
track_results: List[Tuple[Gain, int]] = [
track_results: list[tuple[Gain, int]] = [
self._analyse_item(
item,
task.target_level,
@ -358,7 +345,7 @@ class FfmpegBackend(Backend):
for item in task.items
]
track_gains: List[Gain] = [tg for tg, _nb in track_results]
track_gains: list[Gain] = [tg for tg, _nb in track_results]
# Album peak is maximum track peak
album_peak = max(tg.peak for tg in track_gains)
@ -410,7 +397,7 @@ class FfmpegBackend(Backend):
def _construct_cmd(
self, item: Item, peak_method: Optional[PeakMethod]
) -> List[Union[str, bytes]]:
) -> list[Union[str, bytes]]:
"""Construct the shell command to analyse items."""
return [
self._ffmpeg_path,
@ -435,7 +422,7 @@ class FfmpegBackend(Backend):
target_level: float,
peak_method: Optional[PeakMethod],
count_blocks: bool = True,
) -> Tuple[Gain, int]:
) -> tuple[Gain, int]:
"""Analyse item. Return a pair of a Gain object and the number
of gating blocks above the threshold.
@ -647,7 +634,7 @@ class CommandBackend(Backend):
items: Sequence[Item],
target_level: float,
is_album: bool,
) -> List[Gain]:
) -> list[Gain]:
"""Computes the track or album gain of a list of items, returns
a list of TrackGain objects.
@ -667,7 +654,7 @@ class CommandBackend(Backend):
# tag-writing; this turns the mp3gain/aacgain tool into a gain
# calculator rather than a tag manipulator because we take care
# of changing tags ourselves.
cmd: List[Union[bytes, str]] = [self.command, "-o", "-s", "s"]
cmd: list[Union[bytes, str]] = [self.command, "-o", "-s", "s"]
if self.noclip:
# Adjust to avoid clipping.
cmd = cmd + ["-k"]
@ -685,7 +672,7 @@ class CommandBackend(Backend):
output, len(items) + (1 if is_album else 0)
)
def parse_tool_output(self, text: bytes, num_lines: int) -> List[Gain]:
def parse_tool_output(self, text: bytes, num_lines: int) -> list[Gain]:
"""Given the tab-delimited output from an invocation of mp3gain
or aacgain, parse the text and return a list of dictionaries
containing information about each analyzed file.
@ -771,7 +758,7 @@ class GStreamerBackend(Backend):
self._main_loop = self.GLib.MainLoop()
self._files: List[bytes] = []
self._files: list[bytes] = []
def _import_gst(self):
"""Import the necessary GObject-related modules and assign `Gst`
@ -811,7 +798,7 @@ class GStreamerBackend(Backend):
self._files = [i.path for i in items]
# FIXME: Turn this into DefaultDict[bytes, Gain]
self._file_tags: DefaultDict[bytes, Dict[str, float]] = (
self._file_tags: collections.defaultdict[bytes, dict[str, float]] = (
collections.defaultdict(dict)
)
@ -1199,13 +1186,13 @@ class ExceptionWatcher(Thread):
# Main plugin logic.
BACKEND_CLASSES: List[Type[Backend]] = [
BACKEND_CLASSES: list[type[Backend]] = [
CommandBackend,
GStreamerBackend,
AudioToolsBackend,
FfmpegBackend,
]
BACKENDS: Dict[str, Type[Backend]] = {b.NAME: b for b in BACKEND_CLASSES}
BACKENDS: dict[str, type[Backend]] = {b.NAME: b for b in BACKEND_CLASSES}
class ReplayGainPlugin(BeetsPlugin):
@ -1375,7 +1362,7 @@ class ReplayGainPlugin(BeetsPlugin):
self._log.info("analyzing {0}", album)
discs: Dict[int, List[Item]] = {}
discs: dict[int, list[Item]] = {}
if self.config["per_disc"].get(bool):
for item in album.items():
if discs.get(item.disc) is None:
@ -1447,8 +1434,8 @@ class ReplayGainPlugin(BeetsPlugin):
def _apply(
self,
func: Callable[..., AnyRgTask],
args: List[Any],
kwds: Dict[str, Any],
args: list[Any],
kwds: dict[str, Any],
callback: Callable[[AnyRgTask], Any],
):
if self.pool is not None:
@ -1525,7 +1512,7 @@ class ReplayGainPlugin(BeetsPlugin):
self,
lib: Library,
opts: optparse.Values,
args: List[str],
args: list[str],
):
try:
write = ui.should_write(opts.write)
@ -1562,7 +1549,7 @@ class ReplayGainPlugin(BeetsPlugin):
# Silence interrupt exceptions
pass
def commands(self) -> List[ui.Subcommand]:
def commands(self) -> list[ui.Subcommand]:
"""Return the "replaygain" ui subcommand."""
cmd = ui.Subcommand("replaygain", help="analyze for ReplayGain")
cmd.parser.add_album_option()

View file

@ -15,7 +15,6 @@
"""Moves patterns in path formats (suitable for moving articles)."""
import re
from typing import List
from beets.plugins import BeetsPlugin
@ -28,7 +27,7 @@ FORMAT = "{0}, {1}"
class ThePlugin(BeetsPlugin):
patterns: List[str] = []
patterns: list[str] = []
def __init__(self):
super().__init__()

View file

@ -14,7 +14,7 @@
"""Tests for the 'albumtypes' plugin."""
from typing import Sequence, Tuple
from typing import Sequence
from beets.autotag.mb import VARIOUS_ARTISTS_ID
from beets.test.helper import PluginTestCase
@ -91,7 +91,7 @@ class AlbumTypesPluginTest(PluginTestCase):
def _set_config(
self,
types: Sequence[Tuple[str, str]],
types: Sequence[tuple[str, str]],
ignore_va: Sequence[str],
bracket: str,
):

View file

@ -1,7 +1,7 @@
import os
from http import HTTPStatus
from pathlib import Path
from typing import Any, Dict, Optional
from typing import Any, Optional
import pytest
from flask.testing import Client
@ -59,8 +59,8 @@ class TestAuraResponse:
"""Return a callback accepting `endpoint` and `params` parameters."""
def get(
endpoint: str, params: Dict[str, str]
) -> Optional[Dict[str, Any]]:
endpoint: str, params: dict[str, str]
) -> Optional[dict[str, Any]]:
"""Add additional `params` and GET the given endpoint.
`include` parameter is added to every call to check that the