benchmarking for slow matches

Investigating #609. The culprit seems to be repeated reading of the config's
weight settings.
This commit is contained in:
Adrian Sampson 2014-04-03 11:56:26 -07:00
parent d116e03bed
commit cd57c8da7f

View file

@ -1,5 +1,5 @@
# This file is part of beets.
# Copyright 2013, Adrian Sampson.
# Copyright 2014, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
@ -21,10 +21,14 @@ from beets import ui
from beets import vfs
from beets import library
from beets.util.functemplate import Template
from beets.autotag import match
from beets import plugins
from beets import importer
import cProfile
import timeit
def benchmark(lib, prof):
def aunique_benchmark(lib, prof):
def _build_tree():
vfs.libtree(lib)
@ -52,15 +56,45 @@ def benchmark(lib, prof):
interval = timeit.timeit(_build_tree, number=1)
print('Without %aunique:', interval)
def match_benchmark(lib, prof, query=None,
album_id='9c5c043e-bc69-4edb-81a4-1aaf9c81e6dc'):
# Get an album from the library to use as the source for the match.
items = lib.albums(query).get().items()
# Ensure fingerprinting is invoked (if enabled).
plugins.send('import_task_start',
task=importer.ImportTask(None, None, items),
session=importer.ImportSession(lib, None, None, None))
# Run the match.
def _run_match():
match.tag_album(items, search_id=album_id)
if prof:
cProfile.runctx('_run_match()', {}, {'_run_match': _run_match},
'match.prof')
else:
interval = timeit.timeit(_run_match, number=1)
print('match duration:', interval)
class BenchmarkPlugin(BeetsPlugin):
"""A plugin for performing some simple performance benchmarks.
"""
def commands(self):
def bench_func(lib, opts, args):
benchmark(lib, opts.profile)
bench_cmd = ui.Subcommand('bench', help='benchmark')
bench_cmd.parser.add_option('-p', '--profile',
action='store_true', default=False,
help='performance profiling')
bench_cmd.func = bench_func
return [bench_cmd]
aunique_bench_cmd = ui.Subcommand('bench_aunique',
help='benchmark for %aunique{}')
aunique_bench_cmd.parser.add_option('-p', '--profile',
action='store_true', default=False,
help='performance profiling')
aunique_bench_cmd.func = lambda lib, opts, args: \
aunique_benchmark(lib, opts.profile)
match_bench_cmd = ui.Subcommand('bench_match',
help='benchmark for track matching')
match_bench_cmd.parser.add_option('-p', '--profile',
action='store_true', default=False,
help='performance profiling')
match_bench_cmd.func = lambda lib, opts, args: \
match_benchmark(lib, opts.profile, ui.decargs(args))
return [aunique_bench_cmd, match_bench_cmd]