Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Graham R. Cobb 2021-03-10 18:32:13 +00:00
commit 0aa333cd7b
13 changed files with 1500 additions and 108 deletions

View file

@ -56,10 +56,11 @@ class FormattedMapping(Mapping):
are replaced.
"""
def __init__(self, model, for_path=False):
def __init__(self, model, for_path=False, compute_keys=True):
self.for_path = for_path
self.model = model
self.model_keys = model.keys(True)
if compute_keys:
self.model_keys = model.keys(True)
def __getitem__(self, key):
if key in self.model_keys:
@ -257,6 +258,11 @@ class Model(object):
value is the same as the old value (e.g., `o.f = o.f`).
"""
_revision = -1
"""A revision number from when the model was loaded from or written
to the database.
"""
@classmethod
def _getters(cls):
"""Return a mapping from field names to getter functions.
@ -309,9 +315,11 @@ class Model(object):
def clear_dirty(self):
"""Mark all fields as *clean* (i.e., not needing to be stored to
the database).
the database). Also update the revision.
"""
self._dirty = set()
if self._db:
self._revision = self._db.revision
def _check_db(self, need_id=True):
"""Ensure that this object is associated with a database row: it
@ -351,9 +359,9 @@ class Model(object):
"""
return cls._fields.get(key) or cls._types.get(key) or types.DEFAULT
def __getitem__(self, key):
"""Get the value for a field. Raise a KeyError if the field is
not available.
def _get(self, key, default=None, raise_=False):
"""Get the value for a field, or `default`. Alternatively,
raise a KeyError if the field is not available.
"""
getters = self._getters()
if key in getters: # Computed.
@ -365,8 +373,18 @@ class Model(object):
return self._type(key).null
elif key in self._values_flex: # Flexible.
return self._values_flex[key]
else:
elif raise_:
raise KeyError(key)
else:
return default
get = _get
def __getitem__(self, key):
"""Get the value for a field. Raise a KeyError if the field is
not available.
"""
return self._get(key, raise_=True)
def _setitem(self, key, value):
"""Assign the value for a field, return whether new and old value
@ -441,19 +459,10 @@ class Model(object):
for key in self:
yield key, self[key]
def get(self, key, default=None):
"""Get the value for a given key or `default` if it does not
exist.
"""
if key in self:
return self[key]
else:
return default
def __contains__(self, key):
"""Determine whether `key` is an attribute on this object.
"""
return key in self.keys(True)
return key in self.keys(computed=True)
def __iter__(self):
"""Iterate over the available field names (excluding computed
@ -538,8 +547,14 @@ class Model(object):
def load(self):
"""Refresh the object's metadata from the library database.
If check_revision is true, the database is only queried loaded when a
transaction has been committed since the item was last loaded.
"""
self._check_db()
if not self._dirty and self._db.revision == self._revision:
# Exit early
return
stored_obj = self._db._get(type(self), self.id)
assert stored_obj is not None, u"object {0} not in DB".format(self.id)
self._values_fixed = LazyConvertDict(self)
@ -794,6 +809,12 @@ class Transaction(object):
"""A context manager for safe, concurrent access to the database.
All SQL commands should be executed through a transaction.
"""
_mutated = False
"""A flag storing whether a mutation has been executed in the
current transaction.
"""
def __init__(self, db):
self.db = db
@ -815,12 +836,15 @@ class Transaction(object):
entered but not yet exited transaction. If it is the last active
transaction, the database updates are committed.
"""
# Beware of races; currently secured by db._db_lock
self.db.revision += self._mutated
with self.db._tx_stack() as stack:
assert stack.pop() is self
empty = not stack
if empty:
# Ending a "root" transaction. End the SQLite transaction.
self.db._connection().commit()
self._mutated = False
self.db._db_lock.release()
def query(self, statement, subvals=()):
@ -836,7 +860,6 @@ class Transaction(object):
"""
try:
cursor = self.db._connection().execute(statement, subvals)
return cursor.lastrowid
except sqlite3.OperationalError as e:
# In two specific cases, SQLite reports an error while accessing
# the underlying database file. We surface these exceptions as
@ -846,9 +869,14 @@ class Transaction(object):
raise DBAccessError(e.args[0])
else:
raise
else:
self._mutated = True
return cursor.lastrowid
def script(self, statements):
"""Execute a string containing multiple SQL statements."""
# We don't know whether this mutates, but quite likely it does.
self._mutated = True
self.db._connection().executescript(statements)
@ -864,6 +892,11 @@ class Database(object):
supports_extensions = hasattr(sqlite3.Connection, 'enable_load_extension')
"""Whether or not the current version of SQLite supports extensions"""
revision = 0
"""The current revision of the database. To be increased whenever
data is written in a transaction.
"""
def __init__(self, path, timeout=5.0):
self.path = path
self.timeout = timeout

View file

@ -786,7 +786,7 @@ class ImportTask(BaseImportTask):
if (not dup_item.album_id or
dup_item.album_id in replaced_album_ids):
continue
replaced_album = dup_item.get_album()
replaced_album = dup_item._cached_album
if replaced_album:
replaced_album_ids.add(dup_item.album_id)
self.replaced_albums[replaced_album.path] = replaced_album

View file

@ -375,7 +375,11 @@ class FormattedItemMapping(dbcore.db.FormattedMapping):
"""
def __init__(self, item, for_path=False):
super(FormattedItemMapping, self).__init__(item, for_path)
# We treat album and item keys specially here,
# so exclude transitive album keys from the model's keys.
super(FormattedItemMapping, self).__init__(item, for_path,
compute_keys=False)
self.model_keys = item.keys(computed=True, with_album=False)
self.item = item
@lazy_property
@ -386,15 +390,15 @@ class FormattedItemMapping(dbcore.db.FormattedMapping):
def album_keys(self):
album_keys = []
if self.album:
for key in self.album.keys(True):
for key in self.album.keys(computed=True):
if key in Album.item_keys \
or key not in self.item._fields.keys():
album_keys.append(key)
return album_keys
@lazy_property
@property
def album(self):
return self.item.get_album()
return self.item._cached_album
def _get(self, key):
"""Get the value for a key, either from the album or the item.
@ -545,6 +549,29 @@ class Item(LibModel):
_format_config_key = 'format_item'
__album = None
"""Cached album object. Read-only."""
@property
def _cached_album(self):
"""The Album object that this item belongs to, if any, or
None if the item is a singleton or is not associated with a
library.
The instance is cached and refreshed on access.
DO NOT MODIFY!
If you want a copy to modify, use :meth:`get_album`.
"""
if not self.__album and self._db:
self.__album = self._db.get_album(self)
elif self.__album:
self.__album.load()
return self.__album
@_cached_album.setter
def _cached_album(self, album):
self.__album = album
@classmethod
def _getters(cls):
getters = plugins.item_field_getters()
@ -571,12 +598,45 @@ class Item(LibModel):
value = bytestring_path(value)
elif isinstance(value, BLOB_TYPE):
value = bytes(value)
elif key == 'album_id':
self._cached_album = None
changed = super(Item, self)._setitem(key, value)
if changed and key in MediaFile.fields():
self.mtime = 0 # Reset mtime on dirty.
def __getitem__(self, key):
"""Get the value for a field, falling back to the album if
necessary. Raise a KeyError if the field is not available.
"""
try:
return super(Item, self).__getitem__(key)
except KeyError:
if self._cached_album:
return self._cached_album[key]
raise
def keys(self, computed=False, with_album=True):
"""Get a list of available field names. `with_album`
controls whether the album's fields are included.
"""
keys = super(Item, self).keys(computed=computed)
if with_album and self._cached_album:
keys += self._cached_album.keys(computed=computed)
return keys
def get(self, key, default=None, with_album=True):
"""Get the value for a given key or `default` if it does not
exist. Set `with_album` to false to skip album fallback.
"""
try:
return self._get(key, default, raise_=with_album)
except KeyError:
if self._cached_album:
return self._cached_album.get(key, default)
return default
def update(self, values):
"""Set all key/value pairs in the mapping. If mtime is
specified, it is not reset (as it might otherwise be).

View file

@ -1155,8 +1155,13 @@ def _setup(options, lib=None):
plugins.send("library_opened", lib=lib)
# Add types and queries defined by plugins.
library.Item._types.update(plugins.types(library.Item))
library.Album._types.update(plugins.types(library.Album))
plugin_types_album = plugins.types(library.Album)
library.Album._types.update(plugin_types_album)
item_types = plugin_types_album.copy()
item_types.update(library.Item._types)
item_types.update(plugins.types(library.Item))
library.Item._types = item_types
library.Item._queries.update(plugins.named_queries(library.Item))
library.Album._queries.update(plugins.named_queries(library.Album))

966
beetsplug/aura.py Normal file
View file

@ -0,0 +1,966 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2020, Callum Brown.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""An AURA server using Flask."""
from __future__ import division, absolute_import, print_function
from mimetypes import guess_type
import re
from os.path import isfile, getsize
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, _open_library
from beets import config
from beets.util import py3_path
from beets.library import Item, Album
from beets.dbcore.query import (
MatchQuery,
NotQuery,
RegexpQuery,
AndQuery,
FixedFieldSort,
SlowFieldSort,
MultipleSort,
)
from flask import (
Blueprint,
Flask,
current_app,
send_file,
make_response,
request,
)
# Constants
# AURA server information
# TODO: Add version information
SERVER_INFO = {
"aura-version": "0",
"server": "beets-aura",
"server-version": "0.1",
"auth-required": False,
"features": ["albums", "artists", "images"],
}
# Maps AURA Track attribute to beets Item attribute
TRACK_ATTR_MAP = {
# Required
"title": "title",
"artist": "artist",
# Optional
"album": "album",
"track": "track", # Track number on album
"tracktotal": "tracktotal",
"disc": "disc",
"disctotal": "disctotal",
"year": "year",
"month": "month",
"day": "day",
"bpm": "bpm",
"genre": "genre",
"recording-mbid": "mb_trackid", # beets trackid is MB recording
"track-mbid": "mb_releasetrackid",
"composer": "composer",
"albumartist": "albumartist",
"comments": "comments",
# Optional for Audio Metadata
# TODO: Support the mimetype attribute, format != mime type
# "mimetype": track.format,
"duration": "length",
"framerate": "samplerate",
# I don't think beets has a framecount field
# "framecount": ???,
"channels": "channels",
"bitrate": "bitrate",
"bitdepth": "bitdepth",
"size": "filesize",
}
# Maps AURA Album attribute to beets Album attribute
ALBUM_ATTR_MAP = {
# Required
"title": "album",
"artist": "albumartist",
# Optional
"tracktotal": "albumtotal",
"disctotal": "disctotal",
"year": "year",
"month": "month",
"day": "day",
"genre": "genre",
"release-mbid": "mb_albumid",
"release-group-mbid": "mb_releasegroupid",
}
# Maps AURA Artist attribute to beets Item field
# Artists are not first-class in beets, so information is extracted from
# beets Items.
ARTIST_ATTR_MAP = {
# Required
"name": "artist",
# Optional
"artist-mbid": "mb_artistid",
}
class AURADocument:
"""Base class for building AURA documents."""
@staticmethod
def error(status, title, detail):
"""Make a response for an error following the JSON:API spec.
Args:
status: An HTTP status code string, e.g. "404 Not Found".
title: A short, human-readable summary of the problem.
detail: A human-readable explanation specific to this
occurrence of the problem.
"""
document = {
"errors": [{"status": status, "title": title, "detail": detail}]
}
return make_response(document, status)
def translate_filters(self):
"""Translate filters from request arguments to a beets Query."""
# The format of each filter key in the request parameter is:
# filter[<attribute>]. This regex extracts <attribute>.
pattern = re.compile(r"filter\[(?P<attribute>[a-zA-Z0-9_-]+)\]")
queries = []
for key, value in request.args.items():
match = pattern.match(key)
if match:
# Extract attribute name from key
aura_attr = match.group("attribute")
# Get the beets version of the attribute name
beets_attr = self.attribute_map.get(aura_attr, aura_attr)
converter = self.get_attribute_converter(beets_attr)
value = converter(value)
# Add exact match query to list
# Use a slow query so it works with all fields
queries.append(MatchQuery(beets_attr, value, fast=False))
# NOTE: AURA doesn't officially support multiple queries
return AndQuery(queries)
def translate_sorts(self, sort_arg):
"""Translate an AURA sort parameter into a beets Sort.
Args:
sort_arg: The value of the 'sort' query parameter; a comma
separated list of fields to sort by, in order.
E.g. "-year,title".
"""
# Change HTTP query parameter to a list
aura_sorts = sort_arg.strip(",").split(",")
sorts = []
for aura_attr in aura_sorts:
if aura_attr[0] == "-":
ascending = False
# Remove leading "-"
aura_attr = aura_attr[1:]
else:
# JSON:API default
ascending = True
# Get the beets version of the attribute name
beets_attr = self.attribute_map.get(aura_attr, aura_attr)
# Use slow sort so it works with all fields (inc. computed)
sorts.append(SlowFieldSort(beets_attr, ascending=ascending))
return MultipleSort(sorts)
def paginate(self, collection):
"""Get a page of the collection and the URL to the next page.
Args:
collection: The raw data from which resource objects can be
built. Could be an sqlite3.Cursor object (tracks and
albums) or a list of strings (artists).
"""
# Pages start from zero
page = request.args.get("page", 0, int)
# Use page limit defined in config by default.
default_limit = config["aura"]["page_limit"].get(int)
limit = request.args.get("limit", default_limit, int)
# start = offset of first item to return
start = page * limit
# end = offset of last item + 1
end = start + limit
if end > len(collection):
end = len(collection)
next_url = None
else:
# Not the last page so work out links.next url
if not request.args:
# No existing arguments, so current page is 0
next_url = request.url + "?page=1"
elif not request.args.get("page", None):
# No existing page argument, so add one to the end
next_url = request.url + "&page=1"
else:
# Increment page token by 1
next_url = request.url.replace(
"page={}".format(page), "page={}".format(page + 1)
)
# Get only the items in the page range
data = [self.resource_object(collection[i]) for i in range(start, end)]
return data, next_url
def get_included(self, data, include_str):
"""Build a list of resource objects for inclusion.
Args:
data: An array of dicts in the form of resource objects.
include_str: A comma separated list of resource types to
include. E.g. "tracks,images".
"""
# Change HTTP query parameter to a list
to_include = include_str.strip(",").split(",")
# Build a list of unique type and id combinations
# For each resource object in the primary data, iterate over it's
# relationships. If a relationship matches one of the types
# requested for inclusion (e.g. "albums") then add each type-id pair
# under the "data" key to unique_identifiers, checking first that
# it has not already been added. This ensures that no resources are
# included more than once.
unique_identifiers = []
for res_obj in data:
for rel_name, rel_obj in res_obj["relationships"].items():
if rel_name in to_include:
# NOTE: Assumes relationship is to-many
for identifier in rel_obj["data"]:
if identifier not in unique_identifiers:
unique_identifiers.append(identifier)
# TODO: I think this could be improved
included = []
for identifier in unique_identifiers:
res_type = identifier["type"]
if res_type == "track":
track_id = int(identifier["id"])
track = current_app.config["lib"].get_item(track_id)
included.append(TrackDocument.resource_object(track))
elif res_type == "album":
album_id = int(identifier["id"])
album = current_app.config["lib"].get_album(album_id)
included.append(AlbumDocument.resource_object(album))
elif res_type == "artist":
artist_id = identifier["id"]
included.append(ArtistDocument.resource_object(artist_id))
elif res_type == "image":
image_id = identifier["id"]
included.append(ImageDocument.resource_object(image_id))
else:
raise ValueError("Invalid resource type: {}".format(res_type))
return included
def all_resources(self):
"""Build document for /tracks, /albums or /artists."""
query = self.translate_filters()
sort_arg = request.args.get("sort", None)
if sort_arg:
sort = self.translate_sorts(sort_arg)
# For each sort field add a query which ensures all results
# have a non-empty, non-zero value for that field.
for s in sort.sorts:
query.subqueries.append(
NotQuery(
# Match empty fields (^$) or zero fields, (^0$)
RegexpQuery(s.field, "(^$|^0$)", fast=False)
)
)
else:
sort = None
# Get information from the library
collection = self.get_collection(query=query, sort=sort)
# Convert info to AURA form and paginate it
data, next_url = self.paginate(collection)
document = {"data": data}
# If there are more pages then provide a way to access them
if next_url:
document["links"] = {"next": next_url}
# Include related resources for each element in "data"
include_str = request.args.get("include", None)
if include_str:
document["included"] = self.get_included(data, include_str)
return document
def single_resource_document(self, resource_object):
"""Build document for a specific requested resource.
Args:
resource_object: A dictionary in the form of a JSON:API
resource object.
"""
document = {"data": resource_object}
include_str = request.args.get("include", None)
if include_str:
# [document["data"]] is because arg needs to be list
document["included"] = self.get_included(
[document["data"]], include_str
)
return document
class TrackDocument(AURADocument):
"""Class for building documents for /tracks endpoints."""
attribute_map = TRACK_ATTR_MAP
def get_collection(self, query=None, sort=None):
"""Get Item objects from the library.
Args:
query: A beets Query object or a beets query string.
sort: A beets Sort object.
"""
return current_app.config["lib"].items(query, sort)
def get_attribute_converter(self, beets_attr):
"""Work out what data type an attribute should be for beets.
Args:
beets_attr: The name of the beets attribute, e.g. "title".
"""
# filesize is a special field (read from disk not db?)
if beets_attr == "filesize":
converter = int
else:
try:
# Look for field in list of Item fields
# and get python type of database type.
# See beets.library.Item and beets.dbcore.types
converter = Item._fields[beets_attr].model_type
except KeyError:
# Fall back to string (NOTE: probably not good)
converter = str
return converter
@staticmethod
def resource_object(track):
"""Construct a JSON:API resource object from a beets Item.
Args:
track: A beets Item object.
"""
attributes = {}
# Use aura => beets attribute map, e.g. size => filesize
for aura_attr, beets_attr in TRACK_ATTR_MAP.items():
a = getattr(track, beets_attr)
# Only set attribute if it's not None, 0, "", etc.
# NOTE: This could result in required attributes not being set
if a:
attributes[aura_attr] = a
# JSON:API one-to-many relationship to parent album
relationships = {
"artists": {"data": [{"type": "artist", "id": track.artist}]}
}
# Only add album relationship if not singleton
if not track.singleton:
relationships["albums"] = {
"data": [{"type": "album", "id": str(track.album_id)}]
}
return {
"type": "track",
"id": str(track.id),
"attributes": attributes,
"relationships": relationships,
}
def single_resource(self, track_id):
"""Get track from the library and build a document.
Args:
track_id: The beets id of the track (integer).
"""
track = current_app.config["lib"].get_item(track_id)
if not track:
return self.error(
"404 Not Found",
"No track with the requested id.",
"There is no track with an id of {} in the library.".format(
track_id
),
)
return self.single_resource_document(self.resource_object(track))
class AlbumDocument(AURADocument):
"""Class for building documents for /albums endpoints."""
attribute_map = ALBUM_ATTR_MAP
def get_collection(self, query=None, sort=None):
"""Get Album objects from the library.
Args:
query: A beets Query object or a beets query string.
sort: A beets Sort object.
"""
return current_app.config["lib"].albums(query, sort)
def get_attribute_converter(self, beets_attr):
"""Work out what data type an attribute should be for beets.
Args:
beets_attr: The name of the beets attribute, e.g. "title".
"""
try:
# Look for field in list of Album fields
# and get python type of database type.
# See beets.library.Album and beets.dbcore.types
converter = Album._fields[beets_attr].model_type
except KeyError:
# Fall back to string (NOTE: probably not good)
converter = str
return converter
@staticmethod
def resource_object(album):
"""Construct a JSON:API resource object from a beets Album.
Args:
album: A beets Album object.
"""
attributes = {}
# Use aura => beets attribute name map
for aura_attr, beets_attr in ALBUM_ATTR_MAP.items():
a = getattr(album, beets_attr)
# Only set attribute if it's not None, 0, "", etc.
# NOTE: This could mean required attributes are not set
if a:
attributes[aura_attr] = a
# Get beets Item objects for all tracks in the album sorted by
# track number. Sorting is not required but it's nice.
query = MatchQuery("album_id", album.id)
sort = FixedFieldSort("track", ascending=True)
tracks = current_app.config["lib"].items(query, sort)
# JSON:API one-to-many relationship to tracks on the album
relationships = {
"tracks": {
"data": [{"type": "track", "id": str(t.id)} for t in tracks]
}
}
# Add images relationship if album has associated images
if album.artpath:
path = py3_path(album.artpath)
filename = path.split("/")[-1]
image_id = "album-{}-{}".format(album.id, filename)
relationships["images"] = {
"data": [{"type": "image", "id": image_id}]
}
# Add artist relationship if artist name is same on tracks
# Tracks are used to define artists so don't albumartist
# Check for all tracks in case some have featured artists
if album.albumartist in [t.artist for t in tracks]:
relationships["artists"] = {
"data": [{"type": "artist", "id": album.albumartist}]
}
return {
"type": "album",
"id": str(album.id),
"attributes": attributes,
"relationships": relationships,
}
def single_resource(self, album_id):
"""Get album from the library and build a document.
Args:
album_id: The beets id of the album (integer).
"""
album = current_app.config["lib"].get_album(album_id)
if not album:
return self.error(
"404 Not Found",
"No album with the requested id.",
"There is no album with an id of {} in the library.".format(
album_id
),
)
return self.single_resource_document(self.resource_object(album))
class ArtistDocument(AURADocument):
"""Class for building documents for /artists endpoints."""
attribute_map = ARTIST_ATTR_MAP
def get_collection(self, query=None, sort=None):
"""Get a list of artist names from the library.
Args:
query: A beets Query object or a beets query string.
sort: A beets Sort object.
"""
# Gets only tracks with matching artist information
tracks = current_app.config["lib"].items(query, sort)
collection = []
for track in tracks:
# Do not add duplicates
if track.artist not in collection:
collection.append(track.artist)
return collection
def get_attribute_converter(self, beets_attr):
"""Work out what data type an attribute should be for beets.
Args:
beets_attr: The name of the beets attribute, e.g. "artist".
"""
try:
# Look for field in list of Item fields
# and get python type of database type.
# See beets.library.Item and beets.dbcore.types
converter = Item._fields[beets_attr].model_type
except KeyError:
# Fall back to string (NOTE: probably not good)
converter = str
return converter
@staticmethod
def resource_object(artist_id):
"""Construct a JSON:API resource object for the given artist.
Args:
artist_id: A string which is the artist's name.
"""
# Get tracks where artist field exactly matches artist_id
query = MatchQuery("artist", artist_id)
tracks = current_app.config["lib"].items(query)
if not tracks:
return None
# Get artist information from the first track
# NOTE: It could be that the first track doesn't have a
# MusicBrainz id but later tracks do, which isn't ideal.
attributes = {}
# Use aura => beets attribute map, e.g. artist => name
for aura_attr, beets_attr in ARTIST_ATTR_MAP.items():
a = getattr(tracks[0], beets_attr)
# Only set attribute if it's not None, 0, "", etc.
# NOTE: This could mean required attributes are not set
if a:
attributes[aura_attr] = a
relationships = {
"tracks": {
"data": [{"type": "track", "id": str(t.id)} for t in tracks]
}
}
album_query = MatchQuery("albumartist", artist_id)
albums = current_app.config["lib"].albums(query=album_query)
if len(albums) != 0:
relationships["albums"] = {
"data": [{"type": "album", "id": str(a.id)} for a in albums]
}
return {
"type": "artist",
"id": artist_id,
"attributes": attributes,
"relationships": relationships,
}
def single_resource(self, artist_id):
"""Get info for the requested artist and build a document.
Args:
artist_id: A string which is the artist's name.
"""
artist_resource = self.resource_object(artist_id)
if not artist_resource:
return self.error(
"404 Not Found",
"No artist with the requested id.",
"There is no artist with an id of {} in the library.".format(
artist_id
),
)
return self.single_resource_document(artist_resource)
class ImageDocument(AURADocument):
"""Class for building documents for /images/(id) endpoints."""
@staticmethod
def get_image_path(image_id):
"""Works out the full path to the image with the given id.
Returns None if there is no such image.
Args:
image_id: A string in the form
"<parent_type>-<parent_id>-<img_filename>".
"""
# Split image_id into its constituent parts
id_split = image_id.split("-")
if len(id_split) < 3:
# image_id is not in the required format
return None
parent_type = id_split[0]
parent_id = id_split[1]
img_filename = "-".join(id_split[2:])
# Get the path to the directory parent's images are in
if parent_type == "album":
album = current_app.config["lib"].get_album(int(parent_id))
if not album or not album.artpath:
return None
# Cut the filename off of artpath
# This is in preparation for supporting images in the same
# directory that are not tracked by beets.
artpath = py3_path(album.artpath)
dir_path = "/".join(artpath.split("/")[:-1])
else:
# Images for other resource types are not supported
return None
img_path = dir_path + "/" + img_filename
# Check the image actually exists
if isfile(img_path):
return img_path
else:
return None
@staticmethod
def resource_object(image_id):
"""Construct a JSON:API resource object for the given image.
Args:
image_id: A string in the form
"<parent_type>-<parent_id>-<img_filename>".
"""
# Could be called as a static method, so can't use
# self.get_image_path()
image_path = ImageDocument.get_image_path(image_id)
if not image_path:
return None
attributes = {
"role": "cover",
"mimetype": guess_type(image_path)[0],
"size": getsize(image_path),
}
try:
from PIL import Image
except ImportError:
pass
else:
im = Image.open(image_path)
attributes["width"] = im.width
attributes["height"] = im.height
relationships = {}
# Split id into [parent_type, parent_id, filename]
id_split = image_id.split("-")
relationships[id_split[0] + "s"] = {
"data": [{"type": id_split[0], "id": id_split[1]}]
}
return {
"id": image_id,
"type": "image",
# Remove attributes that are None, 0, "", etc.
"attributes": {k: v for k, v in attributes.items() if v},
"relationships": relationships,
}
def single_resource(self, image_id):
"""Get info for the requested image and build a document.
Args:
image_id: A string in the form
"<parent_type>-<parent_id>-<img_filename>".
"""
image_resource = self.resource_object(image_id)
if not image_resource:
return self.error(
"404 Not Found",
"No image with the requested id.",
"There is no image with an id of {} in the library.".format(
image_id
),
)
return self.single_resource_document(image_resource)
# Initialise flask blueprint
aura_bp = Blueprint("aura_bp", __name__)
@aura_bp.route("/server")
def server_info():
"""Respond with info about the server."""
return {"data": {"type": "server", "id": "0", "attributes": SERVER_INFO}}
# Track endpoints
@aura_bp.route("/tracks")
def all_tracks():
"""Respond with a list of all tracks and related information."""
doc = TrackDocument()
return doc.all_resources()
@aura_bp.route("/tracks/<int:track_id>")
def single_track(track_id):
"""Respond with info about the specified track.
Args:
track_id: The id of the track provided in the URL (integer).
"""
doc = TrackDocument()
return doc.single_resource(track_id)
@aura_bp.route("/tracks/<int:track_id>/audio")
def audio_file(track_id):
"""Supply an audio file for the specified track.
Args:
track_id: The id of the track provided in the URL (integer).
"""
track = current_app.config["lib"].get_item(track_id)
if not track:
return AURADocument.error(
"404 Not Found",
"No track with the requested id.",
"There is no track with an id of {} in the library.".format(
track_id
),
)
path = py3_path(track.path)
if not isfile(path):
return AURADocument.error(
"404 Not Found",
"No audio file for the requested track.",
(
"There is no audio file for track {} at the expected location"
).format(track_id),
)
file_mimetype = guess_type(path)[0]
if not file_mimetype:
return AURADocument.error(
"500 Internal Server Error",
"Requested audio file has an unknown mimetype.",
(
"The audio file for track {} has an unknown mimetype. "
"Its file extension is {}."
).format(track_id, path.split(".")[-1]),
)
# Check that the Accept header contains the file's mimetype
# Takes into account */* and audio/*
# Adding support for the bitrate parameter would require some effort so I
# left it out. This means the client could be sent an error even if the
# audio doesn't need transcoding.
if not request.accept_mimetypes.best_match([file_mimetype]):
return AURADocument.error(
"406 Not Acceptable",
"Unsupported MIME type or bitrate parameter in Accept header.",
(
"The audio file for track {} is only available as {} and "
"bitrate parameters are not supported."
).format(track_id, file_mimetype),
)
return send_file(
path,
mimetype=file_mimetype,
# Handles filename in Content-Disposition header
as_attachment=True,
# Tries to upgrade the stream to support range requests
conditional=True,
)
# Album endpoints
@aura_bp.route("/albums")
def all_albums():
"""Respond with a list of all albums and related information."""
doc = AlbumDocument()
return doc.all_resources()
@aura_bp.route("/albums/<int:album_id>")
def single_album(album_id):
"""Respond with info about the specified album.
Args:
album_id: The id of the album provided in the URL (integer).
"""
doc = AlbumDocument()
return doc.single_resource(album_id)
# Artist endpoints
# Artist ids are their names
@aura_bp.route("/artists")
def all_artists():
"""Respond with a list of all artists and related information."""
doc = ArtistDocument()
return doc.all_resources()
# Using the path converter allows slashes in artist_id
@aura_bp.route("/artists/<path:artist_id>")
def single_artist(artist_id):
"""Respond with info about the specified artist.
Args:
artist_id: The id of the artist provided in the URL. A string
which is the artist's name.
"""
doc = ArtistDocument()
return doc.single_resource(artist_id)
# Image endpoints
# Image ids are in the form <parent_type>-<parent_id>-<img_filename>
# For example: album-13-cover.jpg
@aura_bp.route("/images/<string:image_id>")
def single_image(image_id):
"""Respond with info about the specified image.
Args:
image_id: The id of the image provided in the URL. A string in
the form "<parent_type>-<parent_id>-<img_filename>".
"""
doc = ImageDocument()
return doc.single_resource(image_id)
@aura_bp.route("/images/<string:image_id>/file")
def image_file(image_id):
"""Supply an image file for the specified image.
Args:
image_id: The id of the image provided in the URL. A string in
the form "<parent_type>-<parent_id>-<img_filename>".
"""
img_path = ImageDocument.get_image_path(image_id)
if not img_path:
return AURADocument.error(
"404 Not Found",
"No image with the requested id.",
"There is no image with an id of {} in the library".format(
image_id
),
)
return send_file(img_path)
# WSGI app
def create_app():
"""An application factory for use by a WSGI server."""
config["aura"].add(
{
"host": u"127.0.0.1",
"port": 8337,
"cors": [],
"cors_supports_credentials": False,
"page_limit": 500,
}
)
app = Flask(__name__)
# Register AURA blueprint view functions under a URL prefix
app.register_blueprint(aura_bp, url_prefix="/aura")
# AURA specifies mimetype MUST be this
app.config["JSONIFY_MIMETYPE"] = "application/vnd.api+json"
# Disable auto-sorting of JSON keys
app.config["JSON_SORT_KEYS"] = False
# Provide a way to access the beets library
# The normal method of using the Library and config provided in the
# command function is not used because create_app() could be called
# by an external WSGI server.
# NOTE: this uses a 'private' function from beets.ui.__init__
app.config["lib"] = _open_library(config)
# Enable CORS if required
cors = config["aura"]["cors"].as_str_seq(list)
if cors:
from flask_cors import CORS
# "Accept" is the only header clients use
app.config["CORS_ALLOW_HEADERS"] = "Accept"
app.config["CORS_RESOURCES"] = {r"/aura/*": {"origins": cors}}
app.config["CORS_SUPPORTS_CREDENTIALS"] = config["aura"][
"cors_supports_credentials"
].get(bool)
CORS(app)
return app
# Beets Plugin Hook
class AURAPlugin(BeetsPlugin):
"""The BeetsPlugin subclass for the AURA server plugin."""
def __init__(self):
"""Add configuration options for the AURA plugin."""
super(AURAPlugin, self).__init__()
def commands(self):
"""Add subcommand used to run the AURA server."""
def run_aura(lib, opts, args):
"""Run the application using Flask's built in-server.
Args:
lib: A beets Library object (not used).
opts: Command line options. An optparse.Values object.
args: The list of arguments to process (not used).
"""
app = create_app()
# Start the built-in server (not intended for production)
app.run(
host=self.config["host"].get(str),
port=self.config["port"].get(int),
debug=opts.debug,
threaded=True,
)
run_aura_cmd = Subcommand("aura", help=u"run an AURA server")
run_aura_cmd.parser.add_option(
u"-d",
u"--debug",
action="store_true",
default=False,
help=u"use Flask debug mode",
)
run_aura_cmd.func = run_aura
return [run_aura_cmd]

View file

@ -358,7 +358,7 @@ class ConvertPlugin(BeetsPlugin):
item.store() # Store new path and audio data.
if self.config['embed'] and not linked:
album = item.get_album()
album = item._cached_album
if album and album.artpath:
self._log.debug(u'embedding album art from {}',
util.displayable_path(album.artpath))

View file

@ -4,84 +4,104 @@ Changelog
1.5.0 (in development)
----------------------
New features:
This long overdue release of beets includes far too many exciting and useful
features than could ever be satisfactorily enumerated.
As a technical detail, it also introduces two new external libraries:
`MediaFile`_ and `Confuse`_ used to be part of beets but are now reusable
dependencies---packagers, please take note.
Finally, this is the last version of beets where we intend to support Python
2.x; future releases will soon require Python 3.5.
Major new features:
* :doc:`/plugins/mpdstats`: Add strip_path option to help build the right local path
from MPD information
* Submitting acoustID information on tracks which already have a fingerprint
:bug:`3834`
* conversion uses par_map to parallelize conversion jobs in python3
* Add ``title_case`` config option to lastgenre to make TitleCasing optional.
* When config is printed with no available configuration a new message is printed.
:bug:`3779`
* When importing a duplicate album it ask if it should "Keep all" instead of "Keep both".
:bug:`3569`
* :doc:`/plugins/chroma`: Update file metadata after generating fingerprints through the `submit` command.
* :doc:`/plugins/lastgenre`: Added more heavy metal genres: https://en.wikipedia.org/wiki/Heavy_metal_genres to genres.txt and genres-tree.yaml
* :doc:`/plugins/subsonicplaylist`: import playlist from a subsonic server.
* :doc:`/plugins/subsonicupdate`: Automatically choose between token and
password-based authentication based on server version
* A new :ref:`reflink` config option instructs the importer to create fast,
copy-on-write file clones on filesystems that support them. Thanks to
:user:`rubdos`.
* A new :ref:`extra_tags` configuration option allows more tagged metadata
to be included in MusicBrainz queries.
* A new :doc:`/plugins/fish` adds `Fish shell`_ tab autocompletion to beets
* :doc:`plugins/fetchart` and :doc:`plugins/embedart`: Added a new ``quality``
option that controls the quality of the image output when the image is
resized.
* :doc:`plugins/keyfinder`: Added support for `keyfinder-cli`_
Thanks to :user:`BrainDamage`.
* :doc:`plugins/fetchart`: Added a new ``high_resolution`` config option to
allow downloading of higher resolution iTunes artwork (at the expense of
file size).
:bug:`3391`
* :doc:`plugins/discogs` now adds two extra fields: `discogs_labelid` and
`discogs_artistid`
:bug:`3413`
* :doc:`/plugins/export`: Added new ``-f`` (``--format``) flag;
which allows for the ability to export in json, jsonlines, csv and xml.
Thanks to :user:`austinmm`.
:bug:`3402`
* :doc:`/plugins/unimported`: lets you find untracked files in your library directory.
* A new :doc:`/plugins/unimported` lets you find untracked files in your
library directory.
* We now fetch information about `works`_ from MusicBrainz.
MusicBrainz matches provide the fields ``work`` (the title), ``mb_workid``
(the MBID), and ``work_disambig`` (the disambiguation string).
Thanks to :user:`dosoe`.
:bug:`2580` :bug:`3272`
* :doc:`/plugins/convert`: Added new ``-l`` (``--link``) flag and ``link``
option as well as the ``-H`` (``--hardlink``) flag and ``hardlink``
option which symlinks or hardlinks files that do not need to
be converted instead of copying them.
:bug:`2324`
* :doc:`/plugins/bpd`: BPD now supports most of the features of version 0.16
of the MPD protocol. This is enough to get it talking to more complicated
clients like ncmpcpp, but there are still some incompatibilities, largely due
to MPD commands we don't support yet. Let us know if you find an MPD client
that doesn't get along with BPD!
:bug:`3214` :bug:`800`
* :doc:`/plugins/replaygain`: The plugin now supports a ``per_disc`` option
which enables calculation of album ReplayGain on disc level instead of album
level.
Thanks to :user:`samuelnilsson`
:bug:`293`
* :doc:`/plugins/replaygain`: The new ``ffmpeg`` ReplayGain backend supports
``R128_`` tags.
:bug:`3056`
* :doc:`plugins/replaygain`: ``r128_targetlevel`` is a new configuration option
for the ReplayGain plugin: It defines the reference volume for files using
``R128_`` tags. ``targetlevel`` only configures the reference volume for
``REPLAYGAIN_`` files.
:bug:`3065`
* A new :doc:`/plugins/parentwork` gets information about the original work,
which is useful for classical music.
Thanks to :user:`dosoe`.
:bug:`2580` :bug:`3279`
* :doc:`/plugins/discogs`: The field now collects the "style" field.
* :doc:`/plugins/bpd`: BPD now supports most of the features of version 0.16
of the MPD protocol. This is enough to get it talking to more complicated
clients like ncmpcpp, but there are still some incompatibilities, largely due
to MPD commands we don't support yet. (Let us know if you find an MPD client
that doesn't get along with BPD!)
:bug:`3214` :bug:`800`
* A new :doc:`/plugins/deezer` can autotag tracks and albums using the
`Deezer`_ database.
Thanks to :user:`rhlahuja`.
:bug:`3355`
Other new things:
* :doc:`/plugins/mpdstats`: Add a new `strip_path` option to help build the
right local path from MPD information.
* :doc:`/plugins/convert`: Conversion can now parallelize conversion jobs on
Python 3.
* :doc:`/plugins/lastgenre`: Add a new `title_case` config option to make
title-case formatting optional.
* There's a new message when running ``beet config`` when there's no available
configuration file.
:bug:`3779`
* When importing a duplicate album, the prompt now says "keep all" instead of
"keep both" to reflect that there may be more than two albums involved.
:bug:`3569`
* :doc:`/plugins/chroma`: The plugin now updates file metadata after
generating fingerprints through the `submit` command.
* :doc:`/plugins/lastgenre`: Added more heavy metal genres to the built-in
genre filter lists.
* A new :doc:`/plugins/subsonicplaylist` can import playlists from a Subsonic
server.
* :doc:`/plugins/subsonicupdate`: The plugin now automatically chooses between
token- and password-based authentication based on server version
* A new :ref:`extra_tags` configuration option lets you use more metadata in
MusicBrainz queries to further narrow the search.
* A new :doc:`/plugins/fish` adds `Fish shell`_ tab autocompletion to beets.
* :doc:`plugins/fetchart` and :doc:`plugins/embedart`: Added a new ``quality``
option that controls the quality of the image output when the image is
resized.
* :doc:`plugins/keyfinder`: Added support for `keyfinder-cli`_.
Thanks to :user:`BrainDamage`.
* :doc:`plugins/fetchart`: Added a new ``high_resolution`` config option to
allow downloading of higher resolution iTunes artwork (at the expense of
file size).
:bug:`3391`
* :doc:`plugins/discogs`: The plugin applies two new fields: `discogs_labelid`
and `discogs_artistid`.
:bug:`3413`
* :doc:`/plugins/export`: Added a new ``-f`` (``--format``) flag,
which can export your data as JSON, JSON lines, CSV, or XML.
Thanks to :user:`austinmm`.
:bug:`3402`
* :doc:`/plugins/convert`: Added a new ``-l`` (``--link``) flag and ``link``
option as well as the ``-H`` (``--hardlink``) flag and ``hardlink``
option, which symlink or hardlink files that do not need to
be converted (instead of copying them).
:bug:`2324`
* :doc:`/plugins/replaygain`: The plugin now supports a ``per_disc`` option
that enables calculation of album ReplayGain on disc level instead of album
level.
Thanks to :user:`samuelnilsson`.
:bug:`293`
* :doc:`/plugins/replaygain`: The new ``ffmpeg`` ReplayGain backend supports
``R128_`` tags.
:bug:`3056`
* :doc:`plugins/replaygain`: A new ``r128_targetlevel`` configuration option
defines the reference volume for files using ``R128_`` tags. ``targetlevel``
only configures the reference volume for ``REPLAYGAIN_`` files.
:bug:`3065`
* :doc:`/plugins/discogs`: The plugin now collects the "style" field.
Thanks to :user:`thedevilisinthedetails`.
:bug:`2579` :bug:`3251`
* :doc:`/plugins/absubmit`: By default, the plugin now avoids re-analyzing
files that already have AB data.
files that already have AcousticBrainz data.
There are new ``force`` and ``pretend`` options to help control this new
behavior.
Thanks to :user:`SusannaMaria`.
@ -99,24 +119,21 @@ New features:
Windows.
Thanks to :user:`MartyLake`.
:bug:`3331` :bug:`3334`
* The 'data_source' field is now also applied as an album-level flexible
attribute during imports, allowing for more refined album level searches.
* The `data_source` field, which indicates which metadata source was used
during an autotagging import, is now also applied as an album-level flexible
attribute.
:bug:`3350` :bug:`1693`
* :doc:`/plugins/deezer`: Added Deezer plugin as an import metadata provider:
you can now match tracks and albums using the `Deezer`_ database.
Thanks to :user:`rhlahuja`.
:bug:`3355`
* :doc:`/plugins/beatport`: The plugin now gets the musical key, BPM and the
* :doc:`/plugins/beatport`: The plugin now gets the musical key, BPM, and
genre for each track.
:bug:`2080`
* :doc:`/plugins/beatport`: Fix default assignment of the musical key.
* :doc:`/plugins/beatport`: Fix the default assignment of the musical key.
:bug:`3377`
* :doc:`/plugins/bpsync`: Add `bpsync` plugin to sync metadata changes
from the Beatport database.
* :doc:`/plugins/beatport`: Fix assignment of `genre` and rename `musical_key`
to `initial_key`.
:bug:`3387`
* :doc:`/plugins/hook` now treats non-zero exit codes as errors.
* :doc:`/plugins/hook`: The plugin now treats non-zero exit codes as errors.
:bug:`3409`
* :doc:`/plugins/subsonicupdate`: A new ``url`` configuration replaces the
older (and now deprecated) separate ``host``, ``port``, and ``contextpath``
@ -131,27 +148,24 @@ New features:
:bug:`3459`
* :doc:`/plugins/fetchart`: Album art can now be fetched from `last.fm`_.
:bug:`3530`
* The classes ``AlbumInfo`` and ``TrackInfo`` now have flexible attributes,
allowing to solve :bug:`1547`.
Thanks to :user:`dosoe`.
* :doc:`/plugins/web`: The query API now interprets backslashes as path
separators to support path queries.
Thanks to :user:`nmeum`.
:bug:`3567`
* ``beet import`` now handles tar archives with bzip2 or gzip compression.
:bug:`3606`
* :doc:`/plugins/plexupdate`: Add option to use secure connection to Plex
server, and to ignore certificate validation errors if necessary.
* :doc:`/plugins/plexupdate`: Added an option to use a secure connection to
Plex server, and to ignore certificate validation errors if necessary.
:bug:`2871`
* :doc:`/plugins/lyrics`: Improved searching Genius backend when artist
contained special characters.
* :doc:`/plugins/lyrics`: Improved searching on the Genius backend when the
artist contains special characters.
:bug:`3634`
* :doc:`/plugins/parentwork`: Also get the composition date of the parent work,
instead of just the child work.
Thanks to :user:`aereaux`.
:bug:`3650`
* :doc:`/plugins/lyrics`: Fix a bug in the heuristic for detecting valid
lyrics in the Google source of the lyrics plugin
lyrics in the Google source.
:bug:`2969`
* :doc:`/plugins/thumbnails`: Fix a bug where pathlib expected a string instead
of bytes for a path.
@ -178,11 +192,17 @@ New features:
* :doc:`/plugins/replaygain` now does its analysis in parallel when using
the ``command`` or ``ffmpeg`` backends.
:bug:`3478`
* Fields in queries now fall back to an item's album and check its fields too.
Notably, this allows querying items by an album flex attribute, also in path
configuration.
Thanks to :user:`FichteFoll`.
:bug:`2797` :bug:`2988`
* Removes usage of the bs1770gain replaygain backend.
Thanks to :user:`SamuelCook`.
* Added ``trackdisambig`` which stores the recording disambiguation from
MusicBrainz for each track.
:bug:`1904`
* The :doc:`/plugins/aura` has arrived!
Fixes:
@ -310,6 +330,9 @@ Fixes:
information. Thanks to :user:`dosoe`.
* :doc:`/plugins/discogs`: Replace deprecated discogs-client library with community
supported python3-discogs-client library. :bug:`3608`
* :doc:`/plugins/chroma`: Fixed submitting AcoustID information for tracks
that already have a fingerprint.
:bug:`3834`
For plugin developers:
@ -344,6 +367,16 @@ For plugin developers:
:bug:`3355`
* The autotag hooks have been modified such that they now take 'bpm',
'musical_key' and a per-track based 'genre' as attributes.
* Item (and attribute) access on an item now falls back to the album's
attributes as well. If you specifically want to access an item's attributes,
use ``Item.get(key, with_album=False)``. :bug:`2988`
* ``Item.keys`` also has a ``with_album`` argument now, defaulting to ``True``.
* A ``revision`` attribute has been added to ``Database``. It is increased on
every transaction that mutates it. :bug:`2988`
* The classes ``AlbumInfo`` and ``TrackInfo`` now convey arbitrary attributes
instead of a fixed, built-in set of field names (which was important to
address :bug:`1547`).
Thanks to :user:`dosoe`.
For packagers:

198
docs/plugins/aura.rst Normal file
View file

@ -0,0 +1,198 @@
AURA Plugin
===========
This plugin is a server implementation of the `AURA`_ specification using the
`Flask`_ framework. AURA is still a work in progress and doesn't yet have a
stable version, but this server should be kept up to date. You are advised to
read the :ref:`aura-issues` section.
.. _AURA: https://auraspec.readthedocs.io
.. _Flask: https://palletsprojects.com/p/flask/
Install
-------
The ``aura`` plugin depends on `Flask`_, which can be installed using
``python -m pip install flask``. Then you can enable the ``aura`` plugin in
your configuration (see :ref:`using-plugins`).
It is likely that you will need to enable :ref:`aura-cors`, which introduces
an additional dependency: `flask-cors`_. This can be installed with
``python -m pip install flask-cors``.
If `Pillow`_ is installed (``python -m pip install Pillow``) then the optional
``width`` and ``height`` attributes are included in image resource objects.
.. _flask-cors: https://flask-cors.readthedocs.io
.. _Pillow: https://pillow.readthedocs.io
Usage
-----
Use ``beet aura`` to start the AURA server.
By default Flask's built-in server is used, which will give a warning about
using it in a production environment. It is safe to ignore this warning if the
server will have only a few users.
Alternatively, you can use ``beet aura -d`` to start the server in
`development mode`_, which will reload the server every time the AURA plugin
file is changed.
You can specify the hostname and port number used by the server in your
:doc:`configuration file </reference/config>`. For more detail see the
:ref:`configuration` section below.
If you would prefer to use a different WSGI server, such as gunicorn or uWSGI,
then see :ref:`aura-external-server`.
AURA is designed to separate the client and server functionality. This plugin
provides the server but not the client, so unless you like looking at JSON you
will need a separate client. Currently the only client is `AURA Web Client`_.
By default the API is served under http://127.0.0.1:8337/aura/. For example
information about the track with an id of 3 can be obtained at
http://127.0.0.1:8337/aura/tracks/3.
**Note the absence of a trailing slash**:
http://127.0.0.1:8337/aura/tracks/3/ returns a ``404 Not Found`` error.
.. _development mode: https://flask.palletsprojects.com/en/1.1.x/server
.. _AURA Web Client: https://sr.ht/~callum/aura-web-client/
.. _configuration:
Configuration
-------------
To configure the plugin, make an ``aura:`` section in your
configuration file. The available options are:
- **host**: The server hostname. Set this to ``0.0.0.0`` to bind to all
interfaces. Default: ``127.0.0.1``.
- **port**: The server port.
Default: ``8337``.
- **cors**: A YAML list of origins to allow CORS requests from (see
:ref:`aura-cors`, below).
Default: disabled.
- **cors_supports_credentials**: Allow authenticated requests when using CORS.
Default: disabled.
- **page_limit**: The number of items responses should be truncated to if the
client does not specify. Default ``500``.
.. _aura-cors:
Cross-Origin Resource Sharing (CORS)
------------------------------------
`CORS`_ allows browser clients to make requests to the AURA server. You should
set the ``cors`` configuration option to a YAML list of allowed origins.
For example::
aura:
cors:
- http://www.example.com
- https://aura.example.org
Alternatively you can set it to ``'*'`` to enable access from all origins.
Note that there are security implications if you set the origin to ``'*'``,
so please research this before using it. Note the use of quote marks when
allowing all origins. Quote marks are also required when the origin is
``null``, for example when using ``file:///``.
If the server is behind a proxy that uses credentials, you might want to set
the ``cors_supports_credentials`` configuration option to true to let
in-browser clients log in. Note that this option has not been tested, so it
may not work.
.. _CORS: https://en.wikipedia.org/wiki/Cross-origin_resource_sharing
.. _aura-external-server:
Using an External WSGI Server
-----------------------------
If you would like to use a different WSGI server (not Flask's built-in one),
then you can! The ``beetsplug.aura`` module provides a WSGI callable called
``create_app()`` which can be used by many WSGI servers.
For example to run the AURA server using `gunicorn`_ use
``gunicorn 'beetsplug.aura:create_app()'``, or for `uWSGI`_ use
``uwsgi --http :8337 --module 'beetsplug.aura:create_app()'``.
Note that these commands just show how to use the AURA app and you would
probably use something a bit different in a production environment. Read the
relevant server's documentation to figure out what you need.
.. _gunicorn: https://gunicorn.org
.. _uWSGI: https://uwsgi-docs.readthedocs.io
Reverse Proxy Support
---------------------
The plugin should work behind a reverse proxy without further configuration,
however this has not been tested extensively. For details of what headers must
be rewritten and a sample NGINX configuration see `Flask proxy setups`_.
It is (reportedly) possible to run the application under a URL prefix (for
example so you could have ``/foo/aura/server`` rather than ``/aura/server``),
but you'll have to work it out for yourself :-)
If using NGINX, do **not** add a trailing slash (``/``) to the URL where the
application is running, otherwise you will get a 404. However if you are using
Apache then you **should** add a trailing slash.
.. _Flask proxy setups: https://flask.palletsprojects.com/en/1.1.x/deploying/wsgi-standalone/#proxy-setups
.. _aura-issues:
Issues
------
As of writing there are some differences between the specification and this
implementation:
- Compound filters are not specified in AURA, but this server interprets
multiple ``filter`` parameters as AND. See `issue #19`_ for discussion.
- The ``bitrate`` parameter used for content negotiation is not supported.
Adding support for this is doable, but the way Flask handles acceptable MIME
types means it's a lot easier not to bother with it. This means an error
could be returned even if no transcoding was required.
It is possible that some attributes required by AURA could be absent from the
server's response if beets does not have a saved value for them. However, this
has not happened so far.
Beets fields (including flexible fields) that do not have an AURA equivalent
are not provided in any resource's attributes section, however these fields may
be used for filtering.
The ``mimetype`` and ``framecount`` attributes for track resources are not
supported. The first is due to beets storing the file type (e.g. ``MP3``), so
it is hard to filter by MIME type. The second is because there is no
corresponding beets field.
Artists are defined by the ``artist`` field on beets Items, which means some
albums have no ``artists`` relationship. Albums only have related artists
when their beets ``albumartist`` field is the same as the ``artist`` field on
at least one of it's constituent tracks.
The only art tracked by beets is a single cover image, so only albums have
related images at the moment. This could be expanded to looking in the same
directory for other images, and relating tracks to their album's image.
There are likely to be some performance issues, especially with larger
libraries. Sorting, pagination and inclusion (most notably of images) are
probably the main offenders. On a related note, the program attempts to import
Pillow every time it constructs an image resource object, which is not good.
The beets library is accessed using a so called private function (with a single
leading underscore) ``beets.ui.__init__._open_library()``. This shouldn't cause
any issues but it is probably not best practice.
.. _issue #19: https://github.com/beetbox/aura/issues/19

View file

@ -61,6 +61,7 @@ following to your configuration::
absubmit
acousticbrainz
aura
badfiles
beatport
bpd
@ -184,6 +185,7 @@ Path Formats
Interoperability
----------------
* :doc:`aura`: A server implementation of the `AURA`_ specification.
* :doc:`badfiles`: Check audio file integrity.
* :doc:`embyupdate`: Automatically notifies `Emby`_ whenever the beets library changes.
* :doc:`fish`: Adds `Fish shell`_ tab autocompletion to ``beet`` commands.
@ -205,6 +207,7 @@ Interoperability
library changes.
.. _AURA: https://auraspec.readthedocs.io
.. _Emby: https://emby.media
.. _Fish shell: https://fishshell.com/
.. _Plex: https://plex.tv

View file

@ -225,6 +225,31 @@ class MigrationTest(unittest.TestCase):
self.fail("select failed")
class TransactionTest(unittest.TestCase):
def setUp(self):
self.db = DatabaseFixture1(':memory:')
def tearDown(self):
self.db._connection().close()
def test_mutate_increase_revision(self):
old_rev = self.db.revision
with self.db.transaction() as tx:
tx.mutate(
'INSERT INTO {0} '
'(field_one) '
'VALUES (?);'.format(ModelFixture1._table),
(111,),
)
self.assertGreater(self.db.revision, old_rev)
def test_query_no_increase_revision(self):
old_rev = self.db.revision
with self.db.transaction() as tx:
tx.query('PRAGMA table_info(%s)' % ModelFixture1._table)
self.assertEqual(self.db.revision, old_rev)
class ModelTest(unittest.TestCase):
def setUp(self):
self.db = DatabaseFixture1(':memory:')
@ -246,6 +271,30 @@ class ModelTest(unittest.TestCase):
row = self.db._connection().execute('select * from test').fetchone()
self.assertEqual(row['field_one'], 123)
def test_revision(self):
old_rev = self.db.revision
model = ModelFixture1()
model.add(self.db)
model.store()
self.assertEqual(model._revision, self.db.revision)
self.assertGreater(self.db.revision, old_rev)
mid_rev = self.db.revision
model2 = ModelFixture1()
model2.add(self.db)
model2.store()
self.assertGreater(model2._revision, mid_rev)
self.assertGreater(self.db.revision, model._revision)
# revision changed, so the model should be re-loaded
model.load()
self.assertEqual(model._revision, self.db.revision)
# revision did not change, so no reload
mod2_old_rev = model2._revision
model2.load()
self.assertEqual(model2._revision, mod2_old_rev)
def test_retrieve_by_id(self):
model = ModelFixture1()
model.add(self.db)

View file

@ -49,7 +49,7 @@ class IPFSPluginTest(unittest.TestCase, TestHelper):
want_item = test_album.items()[2]
for check_item in added_album.items():
try:
if check_item.ipfs:
if check_item.get('ipfs', with_album=False):
ipfs_item = os.path.basename(want_item.path).decode(
_fsencoding(),
)
@ -57,7 +57,8 @@ class IPFSPluginTest(unittest.TestCase, TestHelper):
ipfs_item)
want_path = bytestring_path(want_path)
self.assertEqual(check_item.path, want_path)
self.assertEqual(check_item.ipfs, want_item.ipfs)
self.assertEqual(check_item.get('ipfs', with_album=False),
want_item.ipfs)
self.assertEqual(check_item.title, want_item.title)
found = True
except AttributeError:

View file

@ -132,6 +132,21 @@ class GetSetTest(_common.TestCase):
def test_invalid_field_raises_attributeerror(self):
self.assertRaises(AttributeError, getattr, self.i, u'xyzzy')
def test_album_fallback(self):
# integration test of item-album fallback
lib = beets.library.Library(':memory:')
i = item(lib)
album = lib.add_album([i])
album['flex'] = u'foo'
album.store()
self.assertTrue('flex' in i)
self.assertFalse('flex' in i.keys(with_album=False))
self.assertEqual(i['flex'], u'foo')
self.assertEqual(i.get('flex'), u'foo')
self.assertEqual(i.get('flex', with_album=False), None)
self.assertEqual(i.get('flexx'), None)
class DestinationTest(_common.TestCase):
def setUp(self):
@ -491,6 +506,24 @@ class DestinationTest(_common.TestCase):
dest = self.i.destination()
self.assertEqual(dest[-2:], b'XX')
def test_album_field_query(self):
self.lib.directory = b'one'
self.lib.path_formats = [(u'default', u'two'),
(u'flex:foo', u'three')]
album = self.lib.add_album([self.i])
self.assertEqual(self.i.destination(), np('one/two'))
album['flex'] = u'foo'
album.store()
self.assertEqual(self.i.destination(), np('one/three'))
def test_album_field_in_template(self):
self.lib.directory = b'one'
self.lib.path_formats = [(u'default', u'$flex/two')]
album = self.lib.add_album([self.i])
album['flex'] = u'foo'
album.store()
self.assertEqual(self.i.destination(), np('one/foo/two'))
class ItemFormattedMappingTest(_common.LibTestCase):
def test_formatted_item_value(self):

View file

@ -109,7 +109,7 @@ class DummyDataTestCase(_common.TestCase, AssertsMixin):
items[2].comp = False
for item in items:
self.lib.add(item)
self.lib.add_album(items[:2])
self.album = self.lib.add_album(items[:2])
def assert_items_matched_all(self, results):
self.assert_items_matched(results, [
@ -300,6 +300,17 @@ class GetTest(DummyDataTestCase):
results = self.lib.items(q)
self.assertFalse(results)
def test_album_field_fallback(self):
self.album['albumflex'] = u'foo'
self.album.store()
q = u'albumflex:foo'
results = self.lib.items(q)
self.assert_items_matched(results, [
u'foo bar',
u'baz qux',
])
def test_invalid_query(self):
with self.assertRaises(InvalidQueryArgumentValueError) as raised:
dbcore.query.NumericQuery('year', u'199a')