python3: apply various fixes from python-modernize "dict_six"

Imports were then fixed and consolidated to derive from the internal
polyglot.builtins
This commit is contained in:
Eli Schwartz 2019-03-23 23:09:38 -04:00
parent 13d236f556
commit a623717d96
No known key found for this signature in database
GPG key ID: CEB167EFB5722BD6
305 changed files with 1489 additions and 1391 deletions

View file

@ -15,6 +15,7 @@
from calibre.ebooks.oeb.polish.check.links import check_links, UnreferencedResource
from calibre.ebooks.oeb.polish.pretty import pretty_html_tree, pretty_opf
from calibre.utils.imghdr import identify
from polyglot.builtins import iteritems
class EPUBHelpBuilder(EpubBuilder):
@ -28,7 +29,7 @@ def build_epub(self, outdir, outname):
def fix_epub(self, container):
' Fix all the brokenness that sphinx\'s epub builder creates '
for name, mt in container.mime_map.iteritems():
for name, mt in iteritems(container.mime_map):
if mt in OEB_DOCS:
self.workaround_ade_quirks(container, name)
pretty_html_tree(container, container.parsed(name))
@ -49,9 +50,9 @@ def workaround_ade_quirks(self, container, name):
def fix_opf(self, container):
spine_names = {n for n, l in container.spine_names}
spine = container.opf_xpath('//opf:spine')[0]
rmap = {v:k for k, v in container.manifest_id_map.iteritems()}
rmap = {v:k for k, v in iteritems(container.manifest_id_map)}
# Add unreferenced text files to the spine
for name, mt in container.mime_map.iteritems():
for name, mt in iteritems(container.mime_map):
if mt in OEB_DOCS and name not in spine_names:
spine_names.add(name)
container.insert_into_xml(spine, spine.makeelement(OPF('itemref'), idref=rmap[name]))

View file

@ -33,6 +33,7 @@
from functools import partial
from multiprocessing.pool import ThreadPool
from xml.sax.saxutils import escape, quoteattr
from polyglot.builtins import iteritems, itervalues
# }}}
USER_AGENT = 'calibre mirror'
@ -292,7 +293,7 @@ def get_plugin_info(raw, check_for_qt5=False):
metadata = names[inits[0]]
else:
# Legacy plugin
for name, val in names.iteritems():
for name, val in iteritems(names):
if name.endswith('plugin.py'):
metadata = val
break
@ -331,7 +332,7 @@ def update_plugin_from_entry(plugin, entry):
def fetch_plugin(old_index, entry):
lm_map = {plugin['thread_id']:plugin for plugin in old_index.itervalues()}
lm_map = {plugin['thread_id']:plugin for plugin in itervalues(old_index)}
raw = read(entry.url)
url, name = parse_plugin_zip_url(raw)
if url is None:
@ -403,7 +404,7 @@ def fetch_plugins(old_index):
log('Failed to get plugin', entry.name, 'at', datetime.utcnow().isoformat(), 'with error:')
log(plugin)
# Move staged files
for plugin in ans.itervalues():
for plugin in itervalues(ans):
if plugin['file'].startswith('staging_'):
src = plugin['file']
plugin['file'] = src.partition('_')[-1]
@ -411,7 +412,7 @@ def fetch_plugins(old_index):
raw = bz2.compress(json.dumps(ans, sort_keys=True, indent=4, separators=(',', ': ')))
atomic_write(raw, PLUGINS)
# Cleanup any extra .zip files
all_plugin_files = {p['file'] for p in ans.itervalues()}
all_plugin_files = {p['file'] for p in itervalues(ans)}
extra = set(glob.glob('*.zip')) - all_plugin_files
for x in extra:
os.unlink(x)
@ -498,7 +499,7 @@ def plugin_stats(x):
name, count = x
return '<tr><td>%s</td><td>%s</td></tr>\n' % (escape(name), count)
pstats = map(plugin_stats, sorted(stats.iteritems(), reverse=True, key=lambda x:x[1]))
pstats = map(plugin_stats, sorted(iteritems(stats), reverse=True, key=lambda x:x[1]))
stats = '''\
<!DOCTYPE html>
<html>

View file

@ -4,7 +4,8 @@
__docformat__ = 'restructuredtext en'
import sys, os, re, time, random, warnings
from polyglot.builtins import builtins, codepoint_to_chr, unicode_type, range
from polyglot.builtins import (builtins, codepoint_to_chr, iteritems,
itervalues, unicode_type, range)
builtins.__dict__['dynamic_property'] = lambda func: func(None)
from math import floor
from functools import partial
@ -706,7 +707,7 @@ def remove_bracketed_text(src,
counts = Counter()
buf = []
src = force_unicode(src)
rmap = dict([(v, k) for k, v in brackets.iteritems()])
rmap = dict([(v, k) for k, v in iteritems(brackets)])
for char in src:
if char in brackets:
counts[char] += 1
@ -714,7 +715,7 @@ def remove_bracketed_text(src,
idx = rmap[char]
if counts[idx] > 0:
counts[idx] -= 1
elif sum(counts.itervalues()) < 1:
elif sum(itervalues(counts)) < 1:
buf.append(char)
return u''.join(buf)

View file

@ -23,6 +23,7 @@
plugin_dir, OptionParser)
from calibre.ebooks.metadata.sources.base import Source
from calibre.constants import DEBUG, numeric_version
from polyglot.builtins import iteritems, itervalues
builtin_names = frozenset(p.name for p in builtin_plugins)
BLACKLISTED_PLUGINS = frozenset({'Marvin XD', 'iOS reader applications'})
@ -347,7 +348,7 @@ def key(plugin):
return (1 if plugin.plugin_path is None else 0), plugin.name
for group in (_metadata_readers, _metadata_writers):
for plugins in group.itervalues():
for plugins in itervalues(group):
if len(plugins) > 1:
plugins.sort(key=key)
@ -640,7 +641,7 @@ def patch_metadata_plugins(possibly_updated_plugins):
# Metadata source plugins dont use initialize() but that
# might change in the future, so be safe.
patches[i].initialize()
for i, pup in patches.iteritems():
for i, pup in iteritems(patches):
_initialized_plugins[i] = pup
# }}}

View file

@ -2,7 +2,6 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
from polyglot.builtins import map, unicode_type
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
@ -15,7 +14,8 @@
from calibre import as_unicode
from calibre.customize import (Plugin, numeric_version, platform,
InvalidPlugin, PluginNotFound)
from polyglot.builtins import string_or_bytes
from polyglot.builtins import (itervalues, iterkeys, map,
string_or_bytes, unicode_type)
# PEP 302 based plugin loading mechanism, works around the bug in zipimport in
# python 2.x that prevents importing from zip files in locations whose paths
@ -202,7 +202,7 @@ def load(self, path_to_zip_file):
else:
m = importlib.import_module(plugin_module)
plugin_classes = []
for obj in m.__dict__.itervalues():
for obj in itervalues(m.__dict__):
if isinstance(obj, type) and issubclass(obj, Plugin) and \
obj.name != 'Trivial Plugin':
plugin_classes.append(obj)
@ -281,7 +281,7 @@ def _locate_code(self, zf, path_to_zip_file):
# Legacy plugins
if '__init__' not in names:
for name in list(names.iterkeys()):
for name in list(iterkeys(names)):
if '.' not in name and name.endswith('plugin'):
names['__init__'] = names[name]
break

View file

@ -10,7 +10,7 @@
SPOOL_SIZE = 30*1024*1024
import numbers
from polyglot.builtins import range
from polyglot.builtins import iteritems, range
def _get_next_series_num_for_list(series_indices, unwrap=True):
@ -82,7 +82,7 @@ def get_data_as_dict(self, prefix=None, authors_as_string=False, ids=None, conve
'rating', 'timestamp', 'size', 'tags', 'comments', 'series',
'series_index', 'uuid', 'pubdate', 'last_modified', 'identifiers',
'languages']).union(set(fdata))
for x, data in fdata.iteritems():
for x, data in iteritems(fdata):
if data['datatype'] == 'series':
FIELDS.add('%d_index'%x)
data = []

View file

@ -8,7 +8,7 @@
import os, time, re
from collections import defaultdict
from polyglot.builtins import map, unicode_type
from polyglot.builtins import itervalues, map, unicode_type
from contextlib import contextmanager
from functools import partial
@ -137,7 +137,7 @@ def find_books_in_directory(dirpath, single_book_per_directory, compiled_rules=(
if allow_path(path, ext, compiled_rules):
formats[ext] = path
if formats_ok(formats):
yield list(formats.itervalues())
yield list(itervalues(formats))
else:
books = defaultdict(dict)
for path in listdir_impl(dirpath, sort_by_mtime=True):
@ -145,9 +145,9 @@ def find_books_in_directory(dirpath, single_book_per_directory, compiled_rules=(
if allow_path(path, ext, compiled_rules):
books[icu_lower(key) if isinstance(key, unicode_type) else key.lower()][ext] = path
for formats in books.itervalues():
for formats in itervalues(books):
if formats_ok(formats):
yield list(formats.itervalues())
yield list(itervalues(formats))
def create_format_map(formats):

View file

@ -12,7 +12,8 @@
from functools import partial
import apsw
from polyglot.builtins import unicode_type, reraise, string_or_bytes
from polyglot.builtins import (iteritems, iterkeys, itervalues,
unicode_type, reraise, string_or_bytes)
from calibre import isbytestring, force_unicode, prints, as_unicode
from calibre.constants import (iswindows, filesystem_encoding,
@ -222,7 +223,7 @@ def step(ctxt, ndx, value):
def finalize(ctxt):
if len(ctxt) == 0:
return None
return sep.join(map(ctxt.get, sorted(ctxt.iterkeys())))
return sep.join(map(ctxt.get, sorted(iterkeys(ctxt))))
return ({}, step, finalize)
@ -247,7 +248,7 @@ def step(ctxt, ndx, author, sort, link):
ctxt[ndx] = ':::'.join((author, sort, link))
def finalize(ctxt):
keys = list(ctxt.iterkeys())
keys = list(iterkeys(ctxt))
l = len(keys)
if l == 0:
return None
@ -733,7 +734,7 @@ def adapt_number(x, d):
}
# Create Tag Browser categories for custom columns
for k in sorted(self.custom_column_label_map.iterkeys()):
for k in sorted(iterkeys(self.custom_column_label_map)):
v = self.custom_column_label_map[k]
if v['normalized']:
is_category = True
@ -786,10 +787,10 @@ def initialize_tables(self): # {{{
'last_modified':19, 'identifiers':20, 'languages':21,
}
for k,v in self.FIELD_MAP.iteritems():
for k,v in iteritems(self.FIELD_MAP):
self.field_metadata.set_field_record_index(k, v, prefer_custom=False)
base = max(self.FIELD_MAP.itervalues())
base = max(itervalues(self.FIELD_MAP))
for label_ in sorted(self.custom_column_label_map):
data = self.custom_column_label_map[label_]
@ -1263,7 +1264,7 @@ def read_tables(self):
'''
with self.conn: # Use a single transaction, to ensure nothing modifies the db while we are reading
for table in self.tables.itervalues():
for table in itervalues(self.tables):
try:
table.read(self)
except:
@ -1327,7 +1328,7 @@ def has_format(self, book_id, fmt, fname, path):
def remove_formats(self, remove_map):
paths = []
for book_id, removals in remove_map.iteritems():
for book_id, removals in iteritems(remove_map):
for fmt, fname, path in removals:
path = self.format_abspath(book_id, fmt, fname, path)
if path is not None:
@ -1585,7 +1586,7 @@ def update_path(self, book_id, title, author, path_field, formats_field):
if samefile(spath, tpath):
# The format filenames may have changed while the folder
# name remains the same
for fmt, opath in original_format_map.iteritems():
for fmt, opath in iteritems(original_format_map):
npath = format_map.get(fmt, None)
if npath and os.path.abspath(npath.lower()) != os.path.abspath(opath.lower()) and samefile(opath, npath):
# opath and npath are different hard links to the same file
@ -1648,7 +1649,7 @@ def read_backup(self, path):
def remove_books(self, path_map, permanent=False):
self.executemany(
'DELETE FROM books WHERE id=?', [(x,) for x in path_map])
paths = {os.path.join(self.library_path, x) for x in path_map.itervalues() if x}
paths = {os.path.join(self.library_path, x) for x in itervalues(path_map) if x}
paths = {x for x in paths if os.path.exists(x) and self.is_deletable(x)}
if permanent:
for path in paths:
@ -1663,7 +1664,7 @@ def add_custom_data(self, name, val_map, delete_first):
self.executemany(
'INSERT OR REPLACE INTO books_plugin_data (book, name, val) VALUES (?, ?, ?)',
[(book_id, name, json.dumps(val, default=to_json))
for book_id, val in val_map.iteritems()])
for book_id, val in iteritems(val_map)])
def get_custom_book_data(self, name, book_ids, default=None):
book_ids = frozenset(book_ids)
@ -1722,7 +1723,7 @@ def delete_conversion_options(self, book_ids, fmt):
def set_conversion_options(self, options, fmt):
options = [(book_id, fmt.upper(), buffer(pickle_binary_string(data.encode('utf-8') if isinstance(data, unicode_type) else data)))
for book_id, data in options.iteritems()]
for book_id, data in iteritems(options)]
self.executemany('INSERT OR REPLACE INTO conversion_options(book,format,data) VALUES (?,?,?)', options)
def get_top_level_move_items(self, all_paths):

View file

@ -11,7 +11,7 @@
from io import BytesIO
from collections import defaultdict, Set, MutableSet
from functools import wraps, partial
from polyglot.builtins import unicode_type, zip, string_or_bytes
from polyglot.builtins import iteritems, iterkeys, itervalues, unicode_type, zip, string_or_bytes
from time import time
from calibre import isbytestring, as_unicode
@ -170,7 +170,7 @@ def _initialize_dynamic_categories(self):
# Reconstruct the user categories, putting them into field_metadata
fm = self.field_metadata
fm.remove_dynamic_categories()
for user_cat in sorted(self._pref('user_categories', {}).iterkeys(), key=sort_key):
for user_cat in sorted(iterkeys(self._pref('user_categories', {})), key=sort_key):
cat_name = '@' + user_cat # add the '@' to avoid name collision
while cat_name:
try:
@ -181,7 +181,7 @@ def _initialize_dynamic_categories(self):
# add grouped search term user categories
muc = frozenset(self._pref('grouped_search_make_user_categories', []))
for cat in sorted(self._pref('grouped_search_terms', {}).iterkeys(), key=sort_key):
for cat in sorted(iterkeys(self._pref('grouped_search_terms', {})), key=sort_key):
if cat in muc:
# There is a chance that these can be duplicates of an existing
# user category. Print the exception and continue.
@ -200,7 +200,7 @@ def initialize_dynamic(self):
self.dirtied_cache = {x:i for i, (x,) in enumerate(
self.backend.execute('SELECT book FROM metadata_dirtied'))}
if self.dirtied_cache:
self.dirtied_sequence = max(self.dirtied_cache.itervalues())+1
self.dirtied_sequence = max(itervalues(self.dirtied_cache))+1
self._initialize_dynamic_categories()
@write_api
@ -213,7 +213,7 @@ def set_user_template_functions(self, user_template_functions):
@write_api
def clear_composite_caches(self, book_ids=None):
for field in self.composites.itervalues():
for field in itervalues(self.composites):
field.clear_caches(book_ids=book_ids)
@write_api
@ -229,7 +229,7 @@ def last_modified(self):
def clear_caches(self, book_ids=None, template_cache=True, search_cache=True):
if template_cache:
self._initialize_template_cache() # Clear the formatter template cache
for field in self.fields.itervalues():
for field in itervalues(self.fields):
if hasattr(field, 'clear_caches'):
field.clear_caches(book_ids=book_ids) # Clear the composite cache and ondevice caches
if book_ids:
@ -247,7 +247,7 @@ def reload_from_db(self, clear_caches=True):
with self.backend.conn: # Prevent other processes, such as calibredb from interrupting the reload by locking the db
self.backend.prefs.load_from_db()
self._search_api.saved_searches.load_from_db()
for field in self.fields.itervalues():
for field in itervalues(self.fields):
if hasattr(field, 'table'):
field.table.read(self.backend) # Reread data from metadata.db
@ -358,7 +358,7 @@ def init(self):
self.backend.read_tables()
bools_are_tristate = self.backend.prefs['bools_are_tristate']
for field, table in self.backend.tables.iteritems():
for field, table in iteritems(self.backend.tables):
self.fields[field] = create_field(field, table, bools_are_tristate,
self.backend.get_template_functions)
if table.metadata['datatype'] == 'composite':
@ -368,7 +368,7 @@ def init(self):
VirtualTable('ondevice'), bools_are_tristate,
self.backend.get_template_functions)
for name, field in self.fields.iteritems():
for name, field in iteritems(self.fields):
if name[0] == '#' and name.endswith('_index'):
field.series_field = self.fields[name[:-len('_index')]]
self.fields[name[:-len('_index')]].index_field = field
@ -494,7 +494,7 @@ def all_field_names(self, field):
return frozenset(self.fields[field].table.col_book_map)
try:
return frozenset(self.fields[field].table.id_map.itervalues())
return frozenset(itervalues(self.fields[field].table.id_map))
except AttributeError:
raise ValueError('%s is not a many-one or many-many field' % field)
@ -503,7 +503,7 @@ def get_usage_count_by_id(self, field):
''' Return a mapping of id to usage count for all values of the specified
field, which must be a many-one or many-many field. '''
try:
return {k:len(v) for k, v in self.fields[field].table.col_book_map.iteritems()}
return {k:len(v) for k, v in iteritems(self.fields[field].table.col_book_map)}
except AttributeError:
raise ValueError('%s is not a many-one or many-many field' % field)
@ -528,13 +528,13 @@ def get_item_name(self, field, item_id):
@read_api
def get_item_id(self, field, item_name):
' Return the item id for item_name (case-insensitive) '
rmap = {icu_lower(v) if isinstance(v, unicode_type) else v:k for k, v in self.fields[field].table.id_map.iteritems()}
rmap = {icu_lower(v) if isinstance(v, unicode_type) else v:k for k, v in iteritems(self.fields[field].table.id_map)}
return rmap.get(icu_lower(item_name) if isinstance(item_name, unicode_type) else item_name, None)
@read_api
def get_item_ids(self, field, item_names):
' Return the item id for item_name (case-insensitive) '
rmap = {icu_lower(v) if isinstance(v, unicode_type) else v:k for k, v in self.fields[field].table.id_map.iteritems()}
rmap = {icu_lower(v) if isinstance(v, unicode_type) else v:k for k, v in iteritems(self.fields[field].table.id_map)}
return {name:rmap.get(icu_lower(name) if isinstance(name, unicode_type) else name, None) for name in item_names}
@read_api
@ -1038,13 +1038,13 @@ def mark_as_dirty(self, book_ids):
new_dirtied = book_ids - already_dirtied
already_dirtied = {book_id:self.dirtied_sequence+i for i, book_id in enumerate(already_dirtied)}
if already_dirtied:
self.dirtied_sequence = max(already_dirtied.itervalues()) + 1
self.dirtied_sequence = max(itervalues(already_dirtied)) + 1
self.dirtied_cache.update(already_dirtied)
if new_dirtied:
self.backend.executemany('INSERT OR IGNORE INTO metadata_dirtied (book) VALUES (?)',
((x,) for x in new_dirtied))
new_dirtied = {book_id:self.dirtied_sequence+i for i, book_id in enumerate(new_dirtied)}
self.dirtied_sequence = max(new_dirtied.itervalues()) + 1
self.dirtied_sequence = max(itervalues(new_dirtied)) + 1
self.dirtied_cache.update(new_dirtied)
@write_api
@ -1075,7 +1075,7 @@ def set_field(self, name, book_id_to_val_map, allow_case_change=True, do_path_up
if is_series:
bimap, simap = {}, {}
sfield = self.fields[name + '_index']
for k, v in book_id_to_val_map.iteritems():
for k, v in iteritems(book_id_to_val_map):
if isinstance(v, string_or_bytes):
v, sid = get_series_values(v)
else:
@ -1117,7 +1117,7 @@ def update_path(self, book_ids, mark_as_dirtied=True):
@read_api
def get_a_dirtied_book(self):
if self.dirtied_cache:
return random.choice(tuple(self.dirtied_cache.iterkeys()))
return random.choice(tuple(iterkeys(self.dirtied_cache)))
return None
@read_api
@ -1220,7 +1220,7 @@ def set_cover(self, book_id_data_map):
QPixmap, file object or bytestring. It can also be None, in which
case any existing cover is removed. '''
for book_id, data in book_id_data_map.iteritems():
for book_id, data in iteritems(book_id_data_map):
try:
path = self._field_for('path', book_id).replace('/', os.sep)
except AttributeError:
@ -1231,7 +1231,7 @@ def set_cover(self, book_id_data_map):
for cc in self.cover_caches:
cc.invalidate(book_id_data_map)
return self._set_field('cover', {
book_id:(0 if data is None else 1) for book_id, data in book_id_data_map.iteritems()})
book_id:(0 if data is None else 1) for book_id, data in iteritems(book_id_data_map)})
@write_api
def add_cover_cache(self, cover_cache):
@ -1332,14 +1332,14 @@ def protected_set_field(name, val):
protected_set_field('identifiers', mi_idents)
elif mi_idents:
identifiers = self._field_for('identifiers', book_id, default_value={})
for key, val in mi_idents.iteritems():
for key, val in iteritems(mi_idents):
if val and val.strip(): # Don't delete an existing identifier
identifiers[icu_lower(key)] = val
protected_set_field('identifiers', identifiers)
user_mi = mi.get_all_user_metadata(make_copy=False)
fm = self.field_metadata
for key in user_mi.iterkeys():
for key in iterkeys(user_mi):
if (key in fm and user_mi[key]['datatype'] == fm[key]['datatype'] and (
user_mi[key]['datatype'] != 'text' or (
user_mi[key]['is_multiple'] == fm[key]['is_multiple']))):
@ -1433,15 +1433,15 @@ def remove_formats(self, formats_map, db_only=False):
:param db_only: If True, only remove the record for the format from the db, do not delete the actual format file from the filesystem.
'''
table = self.fields['formats'].table
formats_map = {book_id:frozenset((f or '').upper() for f in fmts) for book_id, fmts in formats_map.iteritems()}
formats_map = {book_id:frozenset((f or '').upper() for f in fmts) for book_id, fmts in iteritems(formats_map)}
for book_id, fmts in formats_map.iteritems():
for book_id, fmts in iteritems(formats_map):
for fmt in fmts:
self.format_metadata_cache[book_id].pop(fmt, None)
if not db_only:
removes = defaultdict(set)
for book_id, fmts in formats_map.iteritems():
for book_id, fmts in iteritems(formats_map):
try:
path = self._field_for('path', book_id).replace('/', os.sep)
except:
@ -1458,7 +1458,7 @@ def remove_formats(self, formats_map, db_only=False):
size_map = table.remove_formats(formats_map, self.backend)
self.fields['size'].table.update_sizes(size_map)
self._update_last_modified(tuple(formats_map.iterkeys()))
self._update_last_modified(tuple(iterkeys(formats_map)))
@read_api
def get_next_series_num_for(self, series, field='series', current_indices=False):
@ -1481,7 +1481,7 @@ def get_next_series_num_for(self, series, field='series', current_indices=False)
index_map = {book_id:self._fast_field_for(idf, book_id, default_value=1.0) for book_id in books}
if current_indices:
return index_map
series_indices = sorted(index_map.itervalues())
series_indices = sorted(itervalues(index_map))
return _get_next_series_num_for_list(tuple(series_indices), unwrap=False)
@read_api
@ -1491,7 +1491,7 @@ def author_sort_from_authors(self, authors, key_func=icu_lower):
string. '''
table = self.fields['authors'].table
result = []
rmap = {key_func(v):k for k, v in table.id_map.iteritems()}
rmap = {key_func(v):k for k, v in iteritems(table.id_map)}
for aut in authors:
aid = rmap.get(key_func(aut), None)
result.append(author_to_author_sort(aut) if aid is None else table.asort_map[aid])
@ -1503,10 +1503,10 @@ def data_for_has_book(self):
implementation of :meth:`has_book` in a worker process without access to the
db. '''
try:
return {icu_lower(title) for title in self.fields['title'].table.book_col_map.itervalues()}
return {icu_lower(title) for title in itervalues(self.fields['title'].table.book_col_map)}
except TypeError:
# Some non-unicode titles in the db
return {icu_lower(as_unicode(title)) for title in self.fields['title'].table.book_col_map.itervalues()}
return {icu_lower(as_unicode(title)) for title in itervalues(self.fields['title'].table.book_col_map)}
@read_api
def has_book(self, mi):
@ -1518,7 +1518,7 @@ def has_book(self, mi):
if isbytestring(title):
title = title.decode(preferred_encoding, 'replace')
q = icu_lower(title).strip()
for title in self.fields['title'].table.book_col_map.itervalues():
for title in itervalues(self.fields['title'].table.book_col_map):
if q == icu_lower(title):
return True
return False
@ -1599,7 +1599,7 @@ def add_books(self, books, add_duplicates=True, apply_import_tags=True, preserve
duplicates.append((mi, format_map))
else:
ids.append(book_id)
for fmt, stream_or_path in format_map.iteritems():
for fmt, stream_or_path in iteritems(format_map):
if self.add_format(book_id, fmt, stream_or_path, dbapi=dbapi, run_hooks=run_hooks):
fmt_map[fmt.lower()] = getattr(stream_or_path, 'name', stream_or_path) or '<stream>'
run_plugins_on_postadd(dbapi or self, book_id, fmt_map)
@ -1618,11 +1618,11 @@ def remove_books(self, book_ids, permanent=False):
path = None
path_map[book_id] = path
if iswindows:
paths = (x.replace(os.sep, '/') for x in path_map.itervalues() if x)
paths = (x.replace(os.sep, '/') for x in itervalues(path_map) if x)
self.backend.windows_check_if_files_in_use(paths)
self.backend.remove_books(path_map, permanent=permanent)
for field in self.fields.itervalues():
for field in itervalues(self.fields):
try:
table = field.table
except AttributeError:
@ -1665,7 +1665,7 @@ def rename_items(self, field, item_id_to_new_name_map, change_index=True, restri
restrict_to_book_ids = frozenset(restrict_to_book_ids)
id_map = {}
default_process_map = {}
for old_id, new_name in item_id_to_new_name_map.iteritems():
for old_id, new_name in iteritems(item_id_to_new_name_map):
new_names = tuple(x.strip() for x in new_name.split(sv)) if sv else (new_name,)
# Get a list of books in the VL with the item
books_with_id = f.books_for(old_id)
@ -1720,7 +1720,7 @@ def rename_items(self, field, item_id_to_new_name_map, change_index=True, restri
raise ValueError('Cannot rename items for one-one fields: %s' % field)
moved_books = set()
id_map = {}
for item_id, new_name in item_id_to_new_name_map.iteritems():
for item_id, new_name in iteritems(item_id_to_new_name_map):
new_names = tuple(x.strip() for x in new_name.split(sv)) if sv else (new_name,)
books, new_id = func(item_id, new_names[0], self.backend)
affected_books.update(books)
@ -1735,7 +1735,7 @@ def rename_items(self, field, item_id_to_new_name_map, change_index=True, restri
if affected_books:
if field == 'authors':
self._set_field('author_sort',
{k:' & '.join(v) for k, v in self._author_sort_strings_for_books(affected_books).iteritems()})
{k:' & '.join(v) for k, v in iteritems(self._author_sort_strings_for_books(affected_books))})
self._update_path(affected_books, mark_as_dirtied=False)
elif change_index and hasattr(f, 'index_field') and tweaks['series_index_auto_increment'] != 'no_change':
for book_id in moved_books:
@ -1835,7 +1835,7 @@ def tags_older_than(self, tag, delta=None, must_have_tag=None, must_have_authors
insensitive).
'''
tag_map = {icu_lower(v):k for k, v in self._get_id_map('tags').iteritems()}
tag_map = {icu_lower(v):k for k, v in iteritems(self._get_id_map('tags'))}
tag = icu_lower(tag.strip())
mht = icu_lower(must_have_tag.strip()) if must_have_tag else None
tag_id, mht_id = tag_map.get(tag, None), tag_map.get(mht, None)
@ -1848,7 +1848,7 @@ def tags_older_than(self, tag, delta=None, must_have_tag=None, must_have_authors
tagged_books = tagged_books.intersection(self._books_for_field('tags', mht_id))
if tagged_books:
if must_have_authors is not None:
amap = {icu_lower(v):k for k, v in self._get_id_map('authors').iteritems()}
amap = {icu_lower(v):k for k, v in iteritems(self._get_id_map('authors'))}
books = None
for author in must_have_authors:
abooks = self._books_for_field('authors', amap.get(icu_lower(author), None))
@ -1934,7 +1934,7 @@ def data_for_find_identical_books(self):
db. See db.utils for an implementation. '''
at = self.fields['authors'].table
author_map = defaultdict(set)
for aid, author in at.id_map.iteritems():
for aid, author in iteritems(at.id_map):
author_map[icu_lower(author)].add(aid)
return (author_map, at.col_book_map.copy(), self.fields['title'].table.book_col_map.copy(), self.fields['languages'].book_value_map.copy())
@ -2079,12 +2079,12 @@ def restore_book(self, book_id, mi, last_modified, path, formats):
def virtual_libraries_for_books(self, book_ids):
libraries = self._pref('virtual_libraries', {})
ans = {book_id:[] for book_id in book_ids}
for lib, expr in libraries.iteritems():
for lib, expr in iteritems(libraries):
books = self._search(expr) # We deliberately dont use book_ids as we want to use the search cache
for book in book_ids:
if book in books:
ans[book].append(lib)
return {k:tuple(sorted(v, key=sort_key)) for k, v in ans.iteritems()}
return {k:tuple(sorted(v, key=sort_key)) for k, v in iteritems(ans)}
@read_api
def user_categories_for_books(self, book_ids, proxy_metadata_map=None):
@ -2101,7 +2101,7 @@ def user_categories_for_books(self, book_ids, proxy_metadata_map=None):
for book_id in book_ids:
proxy_metadata = pmm.get(book_id) or self._get_proxy_metadata(book_id)
user_cat_vals = ans[book_id] = {}
for ucat, categories in user_cats.iteritems():
for ucat, categories in iteritems(user_cats):
user_cat_vals[ucat] = res = []
for name, cat, ign in categories:
try:
@ -2240,15 +2240,15 @@ def import_library(library_key, importer, library_path, progress=None, abort=Non
src.close()
cache = Cache(DB(library_path, load_user_formatter_functions=False))
cache.init()
format_data = {int(book_id):data for book_id, data in metadata['format_data'].iteritems()}
for i, (book_id, fmt_key_map) in enumerate(format_data.iteritems()):
format_data = {int(book_id):data for book_id, data in iteritems(metadata['format_data'])}
for i, (book_id, fmt_key_map) in enumerate(iteritems(format_data)):
if abort is not None and abort.is_set():
return
title = cache._field_for('title', book_id)
if progress is not None:
progress(title, i + 1, total)
cache._update_path((book_id,), mark_as_dirtied=False)
for fmt, fmtkey in fmt_key_map.iteritems():
for fmt, fmtkey in iteritems(fmt_key_map):
if fmt == '.cover':
stream = importer.start_file(fmtkey, _('Cover for %s') % title)
path = cache._field_for('path', book_id).replace('/', os.sep)

View file

@ -9,7 +9,7 @@
import copy
from functools import partial
from polyglot.builtins import unicode_type, map
from polyglot.builtins import iteritems, iterkeys, unicode_type, map
from calibre.constants import ispy3
from calibre.ebooks.metadata import author_to_author_sort
@ -75,7 +75,7 @@ def from_dict(cls, d):
def find_categories(field_metadata):
for category, cat in field_metadata.iteritems():
for category, cat in iteritems(field_metadata):
if (cat['is_category'] and cat['kind'] not in {'user', 'search'}):
yield (category, cat['is_multiple'].get('cache_to_list', None), False)
elif (cat['datatype'] == 'composite' and
@ -215,11 +215,11 @@ def get_metadata(book_id):
# do the verification in the category loop much faster, at the cost of
# temporarily duplicating the categories lists.
taglist = {}
for c, items in categories.iteritems():
for c, items in iteritems(categories):
taglist[c] = dict(map(lambda t:(icu_lower(t.name), t), items))
# Add the category values to the user categories
for user_cat in sorted(user_categories.iterkeys(), key=sort_key):
for user_cat in sorted(iterkeys(user_categories), key=sort_key):
items = []
names_seen = {}
user_cat_is_gst = user_cat in gst

View file

@ -7,6 +7,7 @@
from pprint import pformat
from calibre import prints
from polyglot.builtins import iteritems
readonly = True
version = 0 # change this if you change signature of implementation()
@ -37,7 +38,7 @@ def option_parser(get_parser, args):
def main(opts, args, dbctx):
for col, data in dbctx.run('custom_columns').iteritems():
for col, data in iteritems(dbctx.run('custom_columns')):
if opts.details:
prints(col)
print()

View file

@ -13,6 +13,7 @@
from calibre.db.cli.utils import str_width
from calibre.ebooks.metadata import authors_to_string
from calibre.utils.date import isoformat
from polyglot.builtins import iteritems
readonly = True
version = 0 # change this if you change signature of implementation()
@ -64,7 +65,7 @@ def implementation(
continue
if field == 'isbn':
x = db.all_field_for('identifiers', book_ids, default_value={})
data[field] = {k: v.get('isbn') or '' for k, v in x.iteritems()}
data[field] = {k: v.get('isbn') or '' for k, v in iteritems(x)}
continue
field = field.replace('*', '#')
metadata[field] = fm[field]
@ -80,37 +81,37 @@ def implementation(
def stringify(data, metadata, for_machine):
for field, m in metadata.iteritems():
for field, m in iteritems(metadata):
if field == 'authors':
data[field] = {
k: authors_to_string(v)
for k, v in data[field].iteritems()
for k, v in iteritems(data[field])
}
else:
dt = m['datatype']
if dt == 'datetime':
data[field] = {
k: isoformat(v, as_utc=for_machine) if v else 'None'
for k, v in data[field].iteritems()
for k, v in iteritems(data[field])
}
elif not for_machine:
ism = m['is_multiple']
if ism:
data[field] = {
k: ism['list_to_ui'].join(v)
for k, v in data[field].iteritems()
for k, v in iteritems(data[field])
}
if field == 'formats':
data[field] = {
k: '[' + v + ']'
for k, v in data[field].iteritems()
for k, v in iteritems(data[field])
}
def as_machine_data(book_ids, data, metadata):
for book_id in book_ids:
ans = {'id': book_id}
for field, val_map in data.iteritems():
for field, val_map in iteritems(data):
val = val_map.get(book_id)
if val is not None:
ans[field.replace('#', '*')] = val

View file

@ -9,6 +9,7 @@
from calibre import prints
from calibre.srv.changes import saved_searches
from polyglot.builtins import iteritems
def implementation(db, notify_changes, action, *args):
@ -56,7 +57,7 @@ def option_parser(get_parser, args):
def main(opts, args, dbctx):
args = args or ['list']
if args[0] == 'list':
for name, value in dbctx.run('saved_searches', 'list').iteritems():
for name, value in iteritems(dbctx.run('saved_searches', 'list')):
prints(_('Name:'), name)
prints(_('Search string:'), value)
print()

View file

@ -11,7 +11,7 @@
from calibre.ebooks.metadata.book.serialize import read_cover
from calibre.ebooks.metadata.opf import get_metadata
from calibre.srv.changes import metadata
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
readonly = False
version = 0 # change this if you change signature of implementation()
@ -170,7 +170,7 @@ def verify_int(x):
vals[field] = val
fvals = []
for field, val in sorted( # ensure series_index fields are set last
vals.iteritems(), key=lambda k: 1 if k[0].endswith('_index') else 0):
iteritems(vals), key=lambda k: 1 if k[0].endswith('_index') else 0):
if field.endswith('_index'):
try:
val = float(val)

View file

@ -13,14 +13,14 @@
import unittest
from cStringIO import StringIO
from calibre.db.cli.cmd_check_library import _print_check_library_results
from polyglot.builtins import iteritems
class Checker(object):
def __init__(self, kw):
for k, v in kw.iteritems():
for k, v in iteritems(kw):
setattr(self, k, v)

View file

@ -2,7 +2,6 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
# from polyglot.builtins import map
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
@ -20,6 +19,7 @@
from calibre.utils.icu import sort_key
from calibre.utils.date import UNDEFINED_DATE, clean_date_for_sort, parse_date
from calibre.utils.localization import calibre_langcode_to_name
from polyglot.builtins import iteritems, iterkeys
def bool_sort_key(bools_are_tristate):
@ -150,7 +150,7 @@ def get_categories(self, tag_class, book_rating_map, lang_map, book_ids=None):
id_map = self.table.id_map
special_sort = hasattr(self, 'category_sort_value')
for item_id, item_book_ids in self.table.col_book_map.iteritems():
for item_id, item_book_ids in iteritems(self.table.col_book_map):
if book_ids is not None:
item_book_ids = item_book_ids.intersection(book_ids)
if item_book_ids:
@ -184,7 +184,7 @@ def books_for(self, item_id):
return {item_id}
def __iter__(self):
return self.table.book_col_map.iterkeys()
return iterkeys(self.table.book_col_map)
def sort_keys_for_books(self, get_metadata, lang_map):
bcmg = self.table.book_col_map.get
@ -315,7 +315,7 @@ def iter_searchable_values(self, get_metadata, candidates, default_value=None):
for v in vals:
if v:
val_map[v].add(book_id)
for val, book_ids in val_map.iteritems():
for val, book_ids in iteritems(val_map):
yield val, book_ids
def get_composite_categories(self, tag_class, book_rating_map, book_ids,
@ -328,7 +328,7 @@ def get_composite_categories(self, tag_class, book_rating_map, book_ids,
for val in vals:
if val:
id_map[val].add(book_id)
for item_id, item_book_ids in id_map.iteritems():
for item_id, item_book_ids in iteritems(id_map):
ratings = tuple(r for r in (book_rating_map.get(book_id, 0) for
book_id in item_book_ids) if r > 0)
avg = sum(ratings)/len(ratings) if ratings else 0
@ -409,7 +409,7 @@ def iter_searchable_values(self, get_metadata, candidates, default_value=None):
val_map = defaultdict(set)
for book_id in candidates:
val_map[self.for_book(book_id, default_value=default_value)].add(book_id)
for val, book_ids in val_map.iteritems():
for val, book_ids in iteritems(val_map):
yield val, book_ids
@ -456,7 +456,7 @@ def books_for(self, item_id):
return self.table.col_book_map.get(item_id, set())
def __iter__(self):
return self.table.id_map.iterkeys()
return iterkeys(self.table.id_map)
def sort_keys_for_books(self, get_metadata, lang_map):
sk_map = LazySortMap(self._default_sort_key, self._sort_key, self.table.id_map)
@ -466,7 +466,7 @@ def sort_keys_for_books(self, get_metadata, lang_map):
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
cbm = self.table.col_book_map
empty = set()
for item_id, val in self.table.id_map.iteritems():
for item_id, val in iteritems(self.table.id_map):
book_ids = cbm.get(item_id, empty).intersection(candidates)
if book_ids:
yield val, book_ids
@ -475,7 +475,7 @@ def iter_searchable_values(self, get_metadata, candidates, default_value=None):
def book_value_map(self):
try:
return {book_id:self.table.id_map[item_id] for book_id, item_id in
self.table.book_col_map.iteritems()}
iteritems(self.table.book_col_map)}
except KeyError:
raise InvalidLinkTable(self.name)
@ -507,7 +507,7 @@ def books_for(self, item_id):
return self.table.col_book_map.get(item_id, set())
def __iter__(self):
return self.table.id_map.iterkeys()
return iterkeys(self.table.id_map)
def sort_keys_for_books(self, get_metadata, lang_map):
sk_map = LazySortMap(self._default_sort_key, self._sort_key, self.table.id_map)
@ -524,7 +524,7 @@ def sk(book_id):
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
cbm = self.table.col_book_map
empty = set()
for item_id, val in self.table.id_map.iteritems():
for item_id, val in iteritems(self.table.id_map):
book_ids = cbm.get(item_id, empty).intersection(candidates)
if book_ids:
yield val, book_ids
@ -534,14 +534,14 @@ def iter_counts(self, candidates):
cbm = self.table.book_col_map
for book_id in candidates:
val_map[len(cbm.get(book_id, ()))].add(book_id)
for count, book_ids in val_map.iteritems():
for count, book_ids in iteritems(val_map):
yield count, book_ids
@property
def book_value_map(self):
try:
return {book_id:tuple(self.table.id_map[item_id] for item_id in item_ids)
for book_id, item_ids in self.table.book_col_map.iteritems()}
for book_id, item_ids in iteritems(self.table.book_col_map)}
except KeyError:
raise InvalidLinkTable(self.name)
@ -561,7 +561,7 @@ def sort_keys_for_books(self, get_metadata, lang_map):
'Sort by identifier keys'
bcmg = self.table.book_col_map.get
dv = {self._default_sort_key:None}
return lambda book_id: tuple(sorted(bcmg(book_id, dv).iterkeys()))
return lambda book_id: tuple(sorted(iterkeys(bcmg(book_id, dv))))
def iter_searchable_values(self, get_metadata, candidates, default_value=()):
bcm = self.table.book_col_map
@ -573,7 +573,7 @@ def iter_searchable_values(self, get_metadata, candidates, default_value=()):
def get_categories(self, tag_class, book_rating_map, lang_map, book_ids=None):
ans = []
for id_key, item_book_ids in self.table.col_book_map.iteritems():
for id_key, item_book_ids in iteritems(self.table.col_book_map):
if book_ids is not None:
item_book_ids = item_book_ids.intersection(book_ids)
if item_book_ids:
@ -618,13 +618,13 @@ def iter_searchable_values(self, get_metadata, candidates, default_value=None):
for val in vals:
val_map[val].add(book_id)
for val, book_ids in val_map.iteritems():
for val, book_ids in iteritems(val_map):
yield val, book_ids
def get_categories(self, tag_class, book_rating_map, lang_map, book_ids=None):
ans = []
for fmt, item_book_ids in self.table.col_book_map.iteritems():
for fmt, item_book_ids in iteritems(self.table.col_book_map):
if book_ids is not None:
item_book_ids = item_book_ids.intersection(book_ids)
if item_book_ids:
@ -665,7 +665,7 @@ def sk(val, lang):
return ssk(ts(val, order=sso, lang=lang))
sk_map = LazySeriesSortMap(self._default_sort_key, sk, self.table.id_map)
bcmg = self.table.book_col_map.get
lang_map = {k:v[0] if v else None for k, v in lang_map.iteritems()}
lang_map = {k:v[0] if v else None for k, v in iteritems(lang_map)}
def key(book_id):
lang = lang_map.get(book_id, None)
@ -694,8 +694,8 @@ def iter_searchable_values_for_sort(self, candidates, lang_map, default_value=No
sso = tweaks['title_series_sorting']
ts = title_sort
empty = set()
lang_map = {k:v[0] if v else None for k, v in lang_map.iteritems()}
for item_id, val in self.table.id_map.iteritems():
lang_map = {k:v[0] if v else None for k, v in iteritems(lang_map)}
for item_id, val in iteritems(self.table.id_map):
book_ids = cbm.get(item_id, empty).intersection(candidates)
if book_ids:
lang_counts = Counter()
@ -712,7 +712,7 @@ class TagsField(ManyToManyField):
def get_news_category(self, tag_class, book_ids=None):
news_id = None
ans = []
for item_id, val in self.table.id_map.iteritems():
for item_id, val in iteritems(self.table.id_map):
if val == _('News'):
news_id = item_id
break
@ -724,7 +724,7 @@ def get_news_category(self, tag_class, book_ids=None):
news_books = news_books.intersection(book_ids)
if not news_books:
return ans
for item_id, item_book_ids in self.table.col_book_map.iteritems():
for item_id, item_book_ids in iteritems(self.table.col_book_map):
item_book_ids = item_book_ids.intersection(news_books)
if item_book_ids:
name = self.category_formatter(self.table.id_map[item_id])

View file

@ -15,7 +15,7 @@
from calibre.ebooks.metadata.book.base import Metadata, SIMPLE_GET, TOP_LEVEL_IDENTIFIERS, NULL_VALUES, ALL_METADATA_FIELDS
from calibre.ebooks.metadata.book.formatter import SafeFormat
from calibre.utils.date import utcnow
from polyglot.builtins import unicode_type
from polyglot.builtins import iterkeys, unicode_type
# Lazy format metadata retrieval {{{
'''
@ -393,7 +393,7 @@ def get_standard_metadata(self, field, make_copy=False):
def all_field_keys(self):
um = ga(self, '_user_metadata')
return frozenset(ALL_METADATA_FIELDS.union(um.iterkeys()))
return frozenset(ALL_METADATA_FIELDS.union(iterkeys(um)))
@property
def _proxy_metadata(self):

View file

@ -7,7 +7,7 @@
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os, traceback, types
from polyglot.builtins import zip
from polyglot.builtins import iteritems, zip
from calibre import force_unicode, isbytestring
from calibre.constants import preferred_encoding
@ -171,14 +171,14 @@ def is_empty(self):
return not bool(self.new_api.fields['title'].table.book_col_map)
def get_usage_count_by_id(self, field):
return [[k, v] for k, v in self.new_api.get_usage_count_by_id(field).iteritems()]
return [[k, v] for k, v in iteritems(self.new_api.get_usage_count_by_id(field))]
def field_id_map(self, field):
return [(k, v) for k, v in self.new_api.get_id_map(field).iteritems()]
return [(k, v) for k, v in iteritems(self.new_api.get_id_map(field))]
def get_custom_items_with_ids(self, label=None, num=None):
try:
return [[k, v] for k, v in self.new_api.get_id_map(self.custom_field_name(label, num)).iteritems()]
return [[k, v] for k, v in iteritems(self.new_api.get_id_map(self.custom_field_name(label, num)))]
except ValueError:
return []
@ -233,7 +233,7 @@ def add_books(self, paths, formats, metadata, add_duplicates=True, return_ids=Fa
paths, formats, metadata = [], [], []
for mi, format_map in duplicates:
metadata.append(mi)
for fmt, path in format_map.iteritems():
for fmt, path in iteritems(format_map):
formats.append(fmt)
paths.append(path)
duplicates = (paths, formats, metadata)
@ -416,7 +416,7 @@ def books_with_same_title(self, mi, all_matches=True):
ans = set()
if title:
title = icu_lower(force_unicode(title))
for book_id, x in self.new_api.get_id_map('title').iteritems():
for book_id, x in iteritems(self.new_api.get_id_map('title')):
if icu_lower(x) == title:
ans.add(book_id)
if not all_matches:
@ -521,7 +521,7 @@ def delete_tag(self, tag):
def delete_tags(self, tags):
with self.new_api.write_lock:
tag_map = {icu_lower(v):k for k, v in self.new_api._get_id_map('tags').iteritems()}
tag_map = {icu_lower(v):k for k, v in iteritems(self.new_api._get_id_map('tags'))}
tag_ids = (tag_map.get(icu_lower(tag), None) for tag in tags)
tag_ids = tuple(tid for tid in tag_ids if tid is not None)
if tag_ids:
@ -547,7 +547,7 @@ def format_path(self, index, fmt, index_is_id=False):
def format_files(self, index, index_is_id=False):
book_id = index if index_is_id else self.id(index)
return [(v, k) for k, v in self.new_api.format_files(book_id).iteritems()]
return [(v, k) for k, v in iteritems(self.new_api.format_files(book_id))]
def format_metadata(self, book_id, fmt, allow_cache=True, update_db=False, commit=False):
return self.new_api.format_metadata(book_id, fmt, allow_cache=allow_cache, update_db=update_db)
@ -632,7 +632,7 @@ def rename_custom_item(self, old_id, new_name, label=None, num=None):
def delete_item_from_multiple(self, item, label=None, num=None):
field = self.custom_field_name(label, num)
existing = self.new_api.get_id_map(field)
rmap = {icu_lower(v):k for k, v in existing.iteritems()}
rmap = {icu_lower(v):k for k, v in iteritems(existing)}
item_id = rmap.get(icu_lower(item), None)
if item_id is None:
return []
@ -854,7 +854,7 @@ def func(self):
LibraryDatabase.all_formats = MT(lambda self:self.new_api.all_field_names('formats'))
LibraryDatabase.all_custom = MT(lambda self, label=None, num=None:self.new_api.all_field_names(self.custom_field_name(label, num)))
for func, field in {'all_authors':'authors', 'all_titles':'title', 'all_tags2':'tags', 'all_series':'series', 'all_publishers':'publisher'}.iteritems():
for func, field in iteritems({'all_authors':'authors', 'all_titles':'title', 'all_tags2':'tags', 'all_series':'series', 'all_publishers':'publisher'}):
def getter(field):
def func(self):
return self.field_id_map(field)
@ -864,16 +864,16 @@ def func(self):
LibraryDatabase.all_tags = MT(lambda self: list(self.all_tag_names()))
LibraryDatabase.get_all_identifier_types = MT(lambda self: list(self.new_api.fields['identifiers'].table.all_identifier_types()))
LibraryDatabase.get_authors_with_ids = MT(
lambda self: [[aid, adata['name'], adata['sort'], adata['link']] for aid, adata in self.new_api.author_data().iteritems()])
lambda self: [[aid, adata['name'], adata['sort'], adata['link']] for aid, adata in iteritems(self.new_api.author_data())])
LibraryDatabase.get_author_id = MT(
lambda self, author: {icu_lower(v):k for k, v in self.new_api.get_id_map('authors').iteritems()}.get(icu_lower(author), None))
lambda self, author: {icu_lower(v):k for k, v in iteritems(self.new_api.get_id_map('authors'))}.get(icu_lower(author), None))
for field in ('tags', 'series', 'publishers', 'ratings', 'languages'):
def getter(field):
fname = field[:-1] if field in {'publishers', 'ratings'} else field
def func(self):
return [[tid, tag] for tid, tag in self.new_api.get_id_map(fname).iteritems()]
return [[tid, tag] for tid, tag in iteritems(self.new_api.get_id_map(fname))]
return func
setattr(LibraryDatabase, 'get_%s_with_ids' % field, MT(getter(field)))

View file

@ -16,6 +16,7 @@
from calibre.constants import filesystem_encoding
from calibre.utils.date import utcfromtimestamp
from calibre import isbytestring, force_unicode
from polyglot.builtins import iteritems
NON_EBOOK_EXTENSIONS = frozenset([
'jpg', 'jpeg', 'gif', 'png', 'bmp',
@ -206,7 +207,7 @@ def process_dir(self, dirpath, filenames, book_id):
self.mismatched_dirs.append(dirpath)
alm = mi.get('author_link_map', {})
for author, link in alm.iteritems():
for author, link in iteritems(alm):
existing_link, timestamp = self.authors_links.get(author, (None, None))
if existing_link is None or existing_link != link and timestamp < mi.timestamp:
self.authors_links[author] = (link, mi.timestamp)
@ -259,7 +260,7 @@ def restore_books(self):
self.progress_callback(book['mi'].title, i+1)
id_map = db.get_item_ids('authors', [author for author in self.authors_links])
link_map = {aid:self.authors_links[name][0] for name, aid in id_map.iteritems() if aid is not None}
link_map = {aid:self.authors_links[name][0] for name, aid in iteritems(id_map) if aid is not None}
if link_map:
db.set_link_for_authors(link_map)
db.close()

View file

@ -11,7 +11,7 @@
from calibre import prints
from calibre.utils.date import isoformat, DEFAULT_DATE
from polyglot.builtins import unicode_type
from polyglot.builtins import iterkeys, itervalues, unicode_type
class SchemaUpgrade(object):
@ -299,7 +299,7 @@ def create_tag_browser_view(table_name, column_name, view_column_name):
'''.format(tn=table_name, cn=column_name, vcn=view_column_name))
self.db.execute(script)
for field in self.field_metadata.itervalues():
for field in itervalues(self.field_metadata):
if field['is_category'] and not field['is_custom'] and 'link_column' in field:
table = self.db.get(
'SELECT name FROM sqlite_master WHERE type="table" AND name=?',
@ -375,7 +375,7 @@ def create_cust_tag_browser_view(table_name, link_table_name):
'''.format(lt=link_table_name, table=table_name)
self.db.execute(script)
for field in self.field_metadata.itervalues():
for field in itervalues(self.field_metadata):
if field['is_category'] and not field['is_custom'] and 'link_column' in field:
table = self.db.get(
'SELECT name FROM sqlite_master WHERE type="table" AND name=?',
@ -596,7 +596,7 @@ def upgrade_version_19(self):
custom_recipe_filename)
bdir = os.path.dirname(custom_recipes.file_path)
for id_, title, script in recipes:
existing = frozenset(map(int, custom_recipes.iterkeys()))
existing = frozenset(map(int, iterkeys(custom_recipes)))
if id_ in existing:
id_ = max(existing) + 1000
id_ = str(id_)

View file

@ -19,7 +19,7 @@
from calibre.utils.icu import primary_contains, sort_key
from calibre.utils.localization import lang_map, canonicalize_lang
from calibre.utils.search_query_parser import SearchQueryParser, ParseException
from polyglot.builtins import unicode_type, string_or_bytes
from polyglot.builtins import iteritems, iterkeys, unicode_type, string_or_bytes
CONTAINS_MATCH = 0
EQUALS_MATCH = 1
@ -167,7 +167,7 @@ def __call__(self, query, field_iter):
matches |= book_ids
return matches
for k, relop in self.operators.iteritems():
for k, relop in iteritems(self.operators):
if query.startswith(k):
query = query[len(k):]
break
@ -254,7 +254,7 @@ def __call__(self, query, field_iter, location, datatype, candidates, is_many=Fa
else:
relop = lambda x,y: x is not None
else:
for k, relop in self.operators.iteritems():
for k, relop in iteritems(self.operators):
if query.startswith(k):
query = query[len(k):]
break
@ -372,7 +372,7 @@ def __call__(self, query, field_iter, candidates, use_primary_find):
return found if valq == 'true' else candidates - found
for m, book_ids in field_iter():
for key, val in m.iteritems():
for key, val in iteritems(m):
if (keyq and not _match(keyq, (key,), keyq_mkind,
use_primary_find_in_search=use_primary_find)):
continue
@ -445,7 +445,7 @@ def set_all(self, smap):
db._set_pref(self.opt_name, smap)
def names(self):
return sorted(self.queries.iterkeys(), key=sort_key)
return sorted(iterkeys(self.queries), key=sort_key)
# }}}
@ -632,7 +632,7 @@ def fi(default_value=None):
text_fields = set()
field_metadata = {}
for x, fm in self.field_metadata.iteritems():
for x, fm in iteritems(self.field_metadata):
if x.startswith('@'):
continue
if fm['search_terms'] and x not in {'series_sort', 'id'}:
@ -670,7 +670,7 @@ def fi(default_value=None):
q = canonicalize_lang(query)
if q is None:
lm = lang_map()
rm = {v.lower():k for k,v in lm.iteritems()}
rm = {v.lower():k for k,v in iteritems(lm)}
q = rm.get(query, query)
if matchkind == CONTAINS_MATCH and q.lower() in {'true', 'false'}:
@ -799,7 +799,7 @@ def __getitem__(self, key):
return self.get(key)
def __iter__(self):
return self.item_map.iteritems()
return iteritems(self.item_map)
# }}}

View file

@ -14,7 +14,7 @@
from calibre.constants import plugins
from calibre.utils.date import parse_date, UNDEFINED_DATE, utc_tz
from calibre.ebooks.metadata import author_to_author_sort
from polyglot.builtins import range
from polyglot.builtins import iteritems, itervalues, range
_c_speedup = plugins['speedup'][0].parse_date
@ -154,10 +154,10 @@ class UUIDTable(OneToOneTable):
def read(self, db):
OneToOneTable.read(self, db)
self.uuid_to_id_map = {v:k for k, v in self.book_col_map.iteritems()}
self.uuid_to_id_map = {v:k for k, v in iteritems(self.book_col_map)}
def update_uuid_cache(self, book_id_val_map):
for book_id, uuid in book_id_val_map.iteritems():
for book_id, uuid in iteritems(book_id_val_map):
self.uuid_to_id_map.pop(self.book_col_map.get(book_id, None), None) # discard old uuid
self.uuid_to_id_map[uuid] = book_id
@ -226,7 +226,7 @@ def read_maps(self, db):
bcm[book] = item_id
def fix_link_table(self, db):
linked_item_ids = {item_id for item_id in self.book_col_map.itervalues()}
linked_item_ids = {item_id for item_id in itervalues(self.book_col_map)}
extra_item_ids = linked_item_ids - set(self.id_map)
if extra_item_ids:
for item_id in extra_item_ids:
@ -238,10 +238,10 @@ def fix_link_table(self, db):
def fix_case_duplicates(self, db):
case_map = defaultdict(set)
for item_id, val in self.id_map.iteritems():
for item_id, val in iteritems(self.id_map):
case_map[icu_lower(val)].add(item_id)
for v in case_map.itervalues():
for v in itervalues(case_map):
if len(v) > 1:
main_id = min(v)
v.discard(main_id)
@ -322,7 +322,7 @@ def remove_items(self, item_ids, db, restrict_to_book_ids=None):
return affected_books
def rename_item(self, item_id, new_name, db):
rmap = {icu_lower(v):k for k, v in self.id_map.iteritems()}
rmap = {icu_lower(v):k for k, v in iteritems(self.id_map)}
existing_item = rmap.get(icu_lower(new_name), None)
table, col, lcol = self.metadata['table'], self.metadata['column'], self.metadata['link_column']
affected_books = self.col_book_map.get(item_id, set())
@ -353,9 +353,9 @@ def read_id_maps(self, db):
ManyToOneTable.read_id_maps(self, db)
# Ensure there are no records with rating=0 in the table. These should
# be represented as rating:None instead.
bad_ids = {item_id for item_id, rating in self.id_map.iteritems() if rating == 0}
bad_ids = {item_id for item_id, rating in iteritems(self.id_map) if rating == 0}
if bad_ids:
self.id_map = {item_id:rating for item_id, rating in self.id_map.iteritems() if rating != 0}
self.id_map = {item_id:rating for item_id, rating in iteritems(self.id_map) if rating != 0}
db.executemany('DELETE FROM {0} WHERE {1}=?'.format(self.link_table, self.metadata['link_column']),
tuple((x,) for x in bad_ids))
db.execute('DELETE FROM {0} WHERE {1}=0'.format(
@ -382,10 +382,10 @@ def read_maps(self, db):
cbm[item_id].add(book)
bcm[book].append(item_id)
self.book_col_map = {k:tuple(v) for k, v in bcm.iteritems()}
self.book_col_map = {k:tuple(v) for k, v in iteritems(bcm)}
def fix_link_table(self, db):
linked_item_ids = {item_id for item_ids in self.book_col_map.itervalues() for item_id in item_ids}
linked_item_ids = {item_id for item_ids in itervalues(self.book_col_map) for item_id in item_ids}
extra_item_ids = linked_item_ids - set(self.id_map)
if extra_item_ids:
for item_id in extra_item_ids:
@ -461,7 +461,7 @@ def remove_items(self, item_ids, db, restrict_to_book_ids=None):
return affected_books
def rename_item(self, item_id, new_name, db):
rmap = {icu_lower(v):k for k, v in self.id_map.iteritems()}
rmap = {icu_lower(v):k for k, v in iteritems(self.id_map)}
existing_item = rmap.get(icu_lower(new_name), None)
table, col, lcol = self.metadata['table'], self.metadata['column'], self.metadata['link_column']
affected_books = self.col_book_map.get(item_id, set())
@ -490,10 +490,10 @@ def rename_item(self, item_id, new_name, db):
def fix_case_duplicates(self, db):
from calibre.db.write import uniq
case_map = defaultdict(set)
for item_id, val in self.id_map.iteritems():
for item_id, val in iteritems(self.id_map):
case_map[icu_lower(val)].add(item_id)
for v in case_map.itervalues():
for v in itervalues(case_map):
if len(v) > 1:
done_books = set()
main_id = min(v)
@ -541,19 +541,19 @@ def read_id_maps(self, db):
lm[aid] = link
def set_sort_names(self, aus_map, db):
aus_map = {aid:(a or '').strip() for aid, a in aus_map.iteritems()}
aus_map = {aid:a for aid, a in aus_map.iteritems() if a != self.asort_map.get(aid, None)}
aus_map = {aid:(a or '').strip() for aid, a in iteritems(aus_map)}
aus_map = {aid:a for aid, a in iteritems(aus_map) if a != self.asort_map.get(aid, None)}
self.asort_map.update(aus_map)
db.executemany('UPDATE authors SET sort=? WHERE id=?',
[(v, k) for k, v in aus_map.iteritems()])
[(v, k) for k, v in iteritems(aus_map)])
return aus_map
def set_links(self, link_map, db):
link_map = {aid:(l or '').strip() for aid, l in link_map.iteritems()}
link_map = {aid:l for aid, l in link_map.iteritems() if l != self.alink_map.get(aid, None)}
link_map = {aid:(l or '').strip() for aid, l in iteritems(link_map)}
link_map = {aid:l for aid, l in iteritems(link_map) if l != self.alink_map.get(aid, None)}
self.alink_map.update(link_map)
db.executemany('UPDATE authors SET link=? WHERE id=?',
[(v, k) for k, v in link_map.iteritems()])
[(v, k) for k, v in iteritems(link_map)])
return link_map
def remove_books(self, book_ids, db):
@ -602,7 +602,7 @@ def read_maps(self, db):
fnm[book][fmt] = name
sm[book][fmt] = sz
self.book_col_map = {k:tuple(sorted(v)) for k, v in bcm.iteritems()}
self.book_col_map = {k:tuple(sorted(v)) for k, v in iteritems(bcm)}
def remove_books(self, book_ids, db):
clean = ManyToManyTable.remove_books(self, book_ids, db)
@ -617,21 +617,21 @@ def set_fname(self, book_id, fmt, fname, db):
(fname, book_id, fmt))
def remove_formats(self, formats_map, db):
for book_id, fmts in formats_map.iteritems():
for book_id, fmts in iteritems(formats_map):
self.book_col_map[book_id] = [fmt for fmt in self.book_col_map.get(book_id, []) if fmt not in fmts]
for m in (self.fname_map, self.size_map):
m[book_id] = {k:v for k, v in m[book_id].iteritems() if k not in fmts}
m[book_id] = {k:v for k, v in iteritems(m[book_id]) if k not in fmts}
for fmt in fmts:
try:
self.col_book_map[fmt].discard(book_id)
except KeyError:
pass
db.executemany('DELETE FROM data WHERE book=? AND format=?',
[(book_id, fmt) for book_id, fmts in formats_map.iteritems() for fmt in fmts])
[(book_id, fmt) for book_id, fmts in iteritems(formats_map) for fmt in fmts])
def zero_max(book_id):
try:
return max(self.size_map[book_id].itervalues())
return max(itervalues(self.size_map[book_id]))
except ValueError:
return 0
@ -661,7 +661,7 @@ def update_fmt(self, book_id, fmt, fname, size, db):
self.size_map[book_id][fmt] = size
db.execute('INSERT OR REPLACE INTO data (book,format,uncompressed_size,name) VALUES (?,?,?,?)',
(book_id, fmt, size, fname))
return max(self.size_map[book_id].itervalues())
return max(itervalues(self.size_map[book_id]))
class IdentifiersTable(ManyToManyTable):
@ -702,4 +702,4 @@ def rename_item(self, item_id, new_name, db):
raise NotImplementedError('Cannot rename identifiers')
def all_identifier_types(self):
return frozenset(k for k, v in self.col_book_map.iteritems() if v)
return frozenset(k for k, v in iteritems(self.col_book_map) if v)

View file

@ -15,6 +15,7 @@
from calibre.db.tests.base import BaseTest, IMG
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.date import now, UNDEFINED_DATE
from polyglot.builtins import iteritems, itervalues
def import_test(replacement_data, replacement_fmt=None):
@ -217,14 +218,14 @@ def test_remove_books(self): # {{{
authors = cache.fields['authors'].table
# Delete a single book, with no formats and check cleaning
self.assertIn(_('Unknown'), set(authors.id_map.itervalues()))
self.assertIn(_('Unknown'), set(itervalues(authors.id_map)))
olen = len(authors.id_map)
item_id = {v:k for k, v in authors.id_map.iteritems()}[_('Unknown')]
item_id = {v:k for k, v in iteritems(authors.id_map)}[_('Unknown')]
cache.remove_books((3,))
for c in (cache, self.init_cache()):
table = c.fields['authors'].table
self.assertNotIn(3, c.all_book_ids())
self.assertNotIn(_('Unknown'), set(table.id_map.itervalues()))
self.assertNotIn(_('Unknown'), set(itervalues(table.id_map)))
self.assertNotIn(item_id, table.asort_map)
self.assertNotIn(item_id, table.alink_map)
ae(len(table.id_map), olen-1)
@ -235,17 +236,17 @@ def test_remove_books(self): # {{{
authorpath = os.path.dirname(bookpath)
os.mkdir(os.path.join(authorpath, '.DS_Store'))
open(os.path.join(authorpath, 'Thumbs.db'), 'wb').close()
item_id = {v:k for k, v in cache.fields['#series'].table.id_map.iteritems()}['My Series Two']
item_id = {v:k for k, v in iteritems(cache.fields['#series'].table.id_map)}['My Series Two']
cache.remove_books((1,), permanent=True)
for x in (fmtpath, bookpath, authorpath):
af(os.path.exists(x), 'The file %s exists, when it should not' % x)
for c in (cache, self.init_cache()):
table = c.fields['authors'].table
self.assertNotIn(1, c.all_book_ids())
self.assertNotIn('Author Two', set(table.id_map.itervalues()))
self.assertNotIn(6, set(c.fields['rating'].table.id_map.itervalues()))
self.assertIn('A Series One', set(c.fields['series'].table.id_map.itervalues()))
self.assertNotIn('My Series Two', set(c.fields['#series'].table.id_map.itervalues()))
self.assertNotIn('Author Two', set(itervalues(table.id_map)))
self.assertNotIn(6, set(itervalues(c.fields['rating'].table.id_map)))
self.assertIn('A Series One', set(itervalues(c.fields['series'].table.id_map)))
self.assertNotIn('My Series Two', set(itervalues(c.fields['#series'].table.id_map)))
self.assertNotIn(item_id, c.fields['#series'].table.col_book_map)
self.assertNotIn(1, c.fields['#series'].table.book_col_map)
@ -264,7 +265,7 @@ def test_remove_books(self): # {{{
fmtpath = cache.format_abspath(1, 'FMT1')
bookpath = os.path.dirname(fmtpath)
authorpath = os.path.dirname(bookpath)
item_id = {v:k for k, v in cache.fields['#series'].table.id_map.iteritems()}['My Series Two']
item_id = {v:k for k, v in iteritems(cache.fields['#series'].table.id_map)}['My Series Two']
cache.remove_books((1,))
delete_service().wait()
for x in (fmtpath, bookpath, authorpath):

View file

@ -13,6 +13,7 @@
from calibre.constants import iswindows
from calibre.db.tests.base import BaseTest
from calibre.ptempfile import TemporaryDirectory
from polyglot.builtins import iterkeys
class FilesystemTest(BaseTest):
@ -55,7 +56,7 @@ def test_metadata_move(self):
cache2 = self.init_cache(cl)
for c in (cache, cache2):
data = self.get_filesystem_data(c, 1)
ae(set(orig_data.iterkeys()), set(data.iterkeys()))
ae(set(iterkeys(orig_data)), set(iterkeys(data)))
ae(orig_data, data, 'Filesystem data does not match')
ae(c.field_for('path', 1), 'Moved/Moved (1)')
ae(c.field_for('path', 3), 'Moved1/Moved1 (3)')

View file

@ -14,7 +14,7 @@
from calibre.library.field_metadata import fm_as_dict
from calibre.db.tests.base import BaseTest
from polyglot.builtins import range
from polyglot.builtins import iteritems, iterkeys, range
# Utils {{{
@ -81,7 +81,7 @@ def to_unicode(x):
# We ignore the key rec_index, since it is not stable for
# custom columns (it is created by iterating over a dict)
return {k.decode('utf-8') if isinstance(k, bytes) else k:to_unicode(v)
for k, v in x.iteritems() if k != 'rec_index'}
for k, v in iteritems(x) if k != 'rec_index'}
return x
def get_props(db):
@ -108,7 +108,7 @@ def test_get_property(self): # {{{
'Test the get_property interface for reading data'
def get_values(db):
ans = {}
for label, loc in db.FIELD_MAP.iteritems():
for label, loc in iteritems(db.FIELD_MAP):
if isinstance(label, numbers.Integral):
label = '#'+db.custom_column_num_map[label]['label']
label = type('')(label)
@ -186,7 +186,7 @@ def test_legacy_direct(self): # {{{
self.assertEqual(dict(db.prefs), dict(ndb.prefs))
for meth, args in {
for meth, args in iteritems({
'find_identical_books': [(Metadata('title one', ['author one']),), (Metadata('unknown'),), (Metadata('xxxx'),)],
'get_books_for_category': [('tags', newstag), ('#formats', 'FMT1')],
'get_next_series_num_for': [('A Series One',)],
@ -251,7 +251,7 @@ def test_legacy_direct(self): # {{{
'book_on_device_string':[(1,), (2,), (3,)],
'books_in_series_of':[(0,), (1,), (2,)],
'books_with_same_title':[(Metadata(db.title(0)),), (Metadata(db.title(1)),), (Metadata('1234'),)],
}.iteritems():
}):
fmt = lambda x: x
if meth[0] in {'!', '@'}:
fmt = {'!':dict, '@':frozenset}[meth[0]]
@ -277,8 +277,8 @@ def f(x, y): # get_top_level_move_items is broken in the old db on case-insensi
old = db.get_data_as_dict(prefix='test-prefix')
new = ndb.get_data_as_dict(prefix='test-prefix')
for o, n in zip(old, new):
o = {type('')(k) if isinstance(k, bytes) else k:set(v) if isinstance(v, list) else v for k, v in o.iteritems()}
n = {k:set(v) if isinstance(v, list) else v for k, v in n.iteritems()}
o = {type('')(k) if isinstance(k, bytes) else k:set(v) if isinstance(v, list) else v for k, v in iteritems(o)}
n = {k:set(v) if isinstance(v, list) else v for k, v in iteritems(n)}
self.assertEqual(o, n)
ndb.search('title:Unknown')
@ -316,9 +316,9 @@ def test_legacy_delete_using(self): # {{{
db = self.init_old()
cache = ndb.new_api
tmap = cache.get_id_map('tags')
t = next(tmap.iterkeys())
t = next(iterkeys(tmap))
pmap = cache.get_id_map('publisher')
p = next(pmap.iterkeys())
p = next(iterkeys(pmap))
run_funcs(self, db, ndb, (
('delete_tag_using_id', t),
('delete_publisher_using_id', p),
@ -647,10 +647,10 @@ def test_legacy_setters(self): # {{{
ndb = self.init_legacy(self.cloned_library)
db = self.init_old(self.cloned_library)
a = {v:k for k, v in ndb.new_api.get_id_map('authors').iteritems()}['Author One']
t = {v:k for k, v in ndb.new_api.get_id_map('tags').iteritems()}['Tag One']
s = {v:k for k, v in ndb.new_api.get_id_map('series').iteritems()}['A Series One']
p = {v:k for k, v in ndb.new_api.get_id_map('publisher').iteritems()}['Publisher One']
a = {v:k for k, v in iteritems(ndb.new_api.get_id_map('authors'))}['Author One']
t = {v:k for k, v in iteritems(ndb.new_api.get_id_map('tags'))}['Tag One']
s = {v:k for k, v in iteritems(ndb.new_api.get_id_map('series'))}['A Series One']
p = {v:k for k, v in iteritems(ndb.new_api.get_id_map('publisher'))}['Publisher One']
run_funcs(self, db, ndb, (
('rename_author', a, 'Author Two'),
('rename_tag', t, 'News'),
@ -688,11 +688,11 @@ def test_legacy_custom(self): # {{{
run_funcs(self, db, ndb, [(func, idx, label) for idx in range(3)])
# Test renaming/deleting
t = {v:k for k, v in ndb.new_api.get_id_map('#tags').iteritems()}['My Tag One']
t2 = {v:k for k, v in ndb.new_api.get_id_map('#tags').iteritems()}['My Tag Two']
a = {v:k for k, v in ndb.new_api.get_id_map('#authors').iteritems()}['My Author Two']
a2 = {v:k for k, v in ndb.new_api.get_id_map('#authors').iteritems()}['Custom One']
s = {v:k for k, v in ndb.new_api.get_id_map('#series').iteritems()}['My Series One']
t = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#tags'))}['My Tag One']
t2 = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#tags'))}['My Tag Two']
a = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#authors'))}['My Author Two']
a2 = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#authors'))}['Custom One']
s = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#series'))}['My Series One']
run_funcs(self, db, ndb, (
('delete_custom_item_using_id', t, 'tags'),
('delete_custom_item_using_id', a, 'authors'),

View file

@ -13,7 +13,7 @@
from calibre.utils.date import utc_tz
from calibre.db.tests.base import BaseTest
from polyglot.builtins import range
from polyglot.builtins import iteritems, iterkeys, itervalues, range
class ReadingTest(BaseTest):
@ -116,8 +116,8 @@ def test_read(self): # {{{
},
}
for book_id, test in tests.iteritems():
for field, expected_val in test.iteritems():
for book_id, test in iteritems(tests):
for field, expected_val in iteritems(test):
val = cache.field_for(field, book_id)
if isinstance(val, tuple) and 'authors' not in field and 'languages' not in field:
val, expected_val = set(val), set(expected_val)
@ -130,7 +130,7 @@ def test_sorting(self): # {{{
'Test sorting'
cache = self.init_cache()
ae = self.assertEqual
for field, order in {
for field, order in iteritems({
'title' : [2, 1, 3],
'authors': [2, 1, 3],
'series' : [3, 1, 2],
@ -154,7 +154,7 @@ def test_sorting(self): # {{{
'#yesno':[2, 1, 3],
'#comments':[3, 2, 1],
'id': [1, 2, 3],
}.iteritems():
}):
x = list(reversed(order))
ae(order, cache.multisort([(field, True)],
ids_to_sort=x),
@ -222,7 +222,7 @@ def test_get_metadata(self): # {{{
old_metadata = {i:old.get_metadata(
i, index_is_id=True, get_cover=True, cover_as_data=True) for i in
range(1, 4)}
for mi in old_metadata.itervalues():
for mi in itervalues(old_metadata):
mi.format_metadata = dict(mi.format_metadata)
if mi.formats:
mi.formats = tuple(mi.formats)
@ -234,7 +234,7 @@ def test_get_metadata(self): # {{{
new_metadata = {i:cache.get_metadata(
i, get_cover=True, cover_as_data=True) for i in range(1, 4)}
cache = None
for mi2, mi1 in zip(new_metadata.values(), old_metadata.values()):
for mi2, mi1 in zip(list(new_metadata.values()), list(old_metadata.values())):
self.compare_metadata(mi1, mi2)
# }}}
@ -262,7 +262,7 @@ def test_get_cover(self): # {{{
old.conn.close()
old = None
cache = self.init_cache(self.library_path)
for book_id, cdata in covers.iteritems():
for book_id, cdata in iteritems(covers):
self.assertEqual(cdata, cache.cover(book_id), 'Reading of cover failed')
f = cache.cover(book_id, as_file=True)
self.assertEqual(cdata, f.read() if f else f, 'Reading of cover as file failed')
@ -325,7 +325,7 @@ def test_searching(self): # {{{
old = None
cache = self.init_cache(self.cloned_library)
for query, ans in oldvals.iteritems():
for query, ans in iteritems(oldvals):
nr = cache.search(query, '')
self.assertEqual(ans, nr,
'Old result: %r != New result: %r for search: %s'%(
@ -407,11 +407,11 @@ def test_get_formats(self): # {{{
lf = {i:set(old.formats(i, index_is_id=True).split(',')) if old.formats(
i, index_is_id=True) else set() for i in ids}
formats = {i:{f:old.format(i, f, index_is_id=True) for f in fmts} for
i, fmts in lf.iteritems()}
i, fmts in iteritems(lf)}
old.conn.close()
old = None
cache = self.init_cache(self.library_path)
for book_id, fmts in lf.iteritems():
for book_id, fmts in iteritems(lf):
self.assertEqual(fmts, set(cache.formats(book_id)),
'Set of formats is not the same')
for fmt in fmts:
@ -439,9 +439,9 @@ def test_author_sort_for_authors(self): # {{{
'Test getting the author sort for authors from the db'
cache = self.init_cache()
table = cache.fields['authors'].table
table.set_sort_names({next(table.id_map.iterkeys()): 'Fake Sort'}, cache.backend)
table.set_sort_names({next(iterkeys(table.id_map)): 'Fake Sort'}, cache.backend)
authors = tuple(table.id_map.itervalues())
authors = tuple(itervalues(table.id_map))
nval = cache.author_sort_from_authors(authors)
self.assertIn('Fake Sort', nval)
@ -458,7 +458,7 @@ def test_get_next_series_num(self): # {{{
cache.set_field('series', {3:'test series'})
cache.set_field('series_index', {3:13})
table = cache.fields['series'].table
series = tuple(table.id_map.itervalues())
series = tuple(itervalues(table.id_map))
nvals = {s:cache.get_next_series_num_for(s) for s in series}
db = self.init_old()
self.assertEqual({s:db.get_next_series_num_for(s) for s in series}, nvals)
@ -471,7 +471,7 @@ def test_has_book(self): # {{{
from calibre.ebooks.metadata.book.base import Metadata
cache = self.init_cache()
db = self.init_old()
for title in cache.fields['title'].table.book_col_map.itervalues():
for title in itervalues(cache.fields['title'].table.book_col_map):
for x in (db, cache):
self.assertTrue(x.has_book(Metadata(title)))
self.assertTrue(x.has_book(Metadata(title.upper())))

View file

@ -14,6 +14,7 @@
from calibre.ebooks.metadata import author_to_author_sort
from calibre.utils.date import UNDEFINED_DATE
from calibre.db.tests.base import BaseTest, IMG
from polyglot.builtins import iteritems, itervalues
class WritingTest(BaseTest):
@ -166,7 +167,7 @@ def test_many_one_basic(self): # {{{
self.assertEqual(cache.set_field('#enum', {1:None}), {1})
cache2 = self.init_cache(cl)
for c in (cache, cache2):
for i, val in {1:None, 2:'One', 3:'Three'}.iteritems():
for i, val in iteritems({1:None, 2:'One', 3:'Three'}):
self.assertEqual(c.field_for('#enum', i), val)
del cache2
@ -176,9 +177,9 @@ def test_many_one_basic(self): # {{{
self.assertEqual(cache.set_field('#rating', {1:None, 2:4, 3:8}), {1, 2, 3})
cache2 = self.init_cache(cl)
for c in (cache, cache2):
for i, val in {1:None, 2:4, 3:2}.iteritems():
for i, val in iteritems({1:None, 2:4, 3:2}):
self.assertEqual(c.field_for('rating', i), val)
for i, val in {1:None, 2:4, 3:8}.iteritems():
for i, val in iteritems({1:None, 2:4, 3:8}):
self.assertEqual(c.field_for('#rating', i), val)
del cache2
@ -191,14 +192,14 @@ def test_many_one_basic(self): # {{{
self.assertEqual(cache.set_field('#series', {2:'Series [0]'}), {2})
cache2 = self.init_cache(cl)
for c in (cache, cache2):
for i, val in {1:'A Series One', 2:'A Series One', 3:'Series'}.iteritems():
for i, val in iteritems({1:'A Series One', 2:'A Series One', 3:'Series'}):
self.assertEqual(c.field_for('series', i), val)
cs_indices = {1:c.field_for('#series_index', 1), 3:c.field_for('#series_index', 3)}
for i in (1, 2, 3):
self.assertEqual(c.field_for('#series', i), 'Series')
for i, val in {1:2, 2:1, 3:3}.iteritems():
for i, val in iteritems({1:2, 2:1, 3:3}):
self.assertEqual(c.field_for('series_index', i), val)
for i, val in {1:cs_indices[1], 2:0, 3:cs_indices[3]}.iteritems():
for i, val in iteritems({1:cs_indices[1], 2:0, 3:cs_indices[3]}):
self.assertEqual(c.field_for('#series_index', i), val)
del cache2
@ -461,13 +462,13 @@ def test_remove_items(self): # {{{
tmap = cache.get_id_map('tags')
self.assertEqual(cache.remove_items('tags', tmap), {1, 2})
tmap = cache.get_id_map('#tags')
t = {v:k for k, v in tmap.iteritems()}['My Tag Two']
t = {v:k for k, v in iteritems(tmap)}['My Tag Two']
self.assertEqual(cache.remove_items('#tags', (t,)), {1, 2})
smap = cache.get_id_map('series')
self.assertEqual(cache.remove_items('series', smap), {1, 2})
smap = cache.get_id_map('#series')
s = {v:k for k, v in smap.iteritems()}['My Series Two']
s = {v:k for k, v in iteritems(smap)}['My Series Two']
self.assertEqual(cache.remove_items('#series', (s,)), {1})
for c in (cache, self.init_cache()):
@ -507,7 +508,7 @@ def test_remove_items(self): # {{{
for c in (cache, c2):
self.assertEqual(c.field_for('tags', 1), ())
self.assertEqual(c.field_for('tags', 2), ('b', 'a'))
self.assertNotIn('c', set(c.get_id_map('tags').itervalues()))
self.assertNotIn('c', set(itervalues(c.get_id_map('tags'))))
self.assertEqual(c.field_for('series', 1), None)
self.assertEqual(c.field_for('series', 2), 'a')
self.assertEqual(c.field_for('series_index', 1), 1.0)
@ -520,9 +521,9 @@ def test_rename_items(self): # {{{
cl = self.cloned_library
cache = self.init_cache(cl)
# Check that renaming authors updates author sort and path
a = {v:k for k, v in cache.get_id_map('authors').iteritems()}['Unknown']
a = {v:k for k, v in iteritems(cache.get_id_map('authors'))}['Unknown']
self.assertEqual(cache.rename_items('authors', {a:'New Author'})[0], {3})
a = {v:k for k, v in cache.get_id_map('authors').iteritems()}['Author One']
a = {v:k for k, v in iteritems(cache.get_id_map('authors'))}['Author One']
self.assertEqual(cache.rename_items('authors', {a:'Author Two'})[0], {1, 2})
for c in (cache, self.init_cache(cl)):
self.assertEqual(c.all_field_names('authors'), {'New Author', 'Author Two'})
@ -531,7 +532,7 @@ def test_rename_items(self): # {{{
self.assertEqual(c.field_for('authors', 1), ('Author Two',))
self.assertEqual(c.field_for('author_sort', 1), 'Two, Author')
t = {v:k for k, v in cache.get_id_map('tags').iteritems()}['Tag One']
t = {v:k for k, v in iteritems(cache.get_id_map('tags'))}['Tag One']
# Test case change
self.assertEqual(cache.rename_items('tags', {t:'tag one'}), ({1, 2}, {t:t}))
for c in (cache, self.init_cache(cl)):
@ -551,14 +552,14 @@ def test_rename_items(self): # {{{
self.assertEqual(set(c.field_for('tags', 1)), {'Tag Two', 'News'})
self.assertEqual(set(c.field_for('tags', 2)), {'Tag Two'})
# Test on a custom column
t = {v:k for k, v in cache.get_id_map('#tags').iteritems()}['My Tag One']
t = {v:k for k, v in iteritems(cache.get_id_map('#tags'))}['My Tag One']
self.assertEqual(cache.rename_items('#tags', {t:'My Tag Two'})[0], {2})
for c in (cache, self.init_cache(cl)):
self.assertEqual(c.all_field_names('#tags'), {'My Tag Two'})
self.assertEqual(set(c.field_for('#tags', 2)), {'My Tag Two'})
# Test a Many-one field
s = {v:k for k, v in cache.get_id_map('series').iteritems()}['A Series One']
s = {v:k for k, v in iteritems(cache.get_id_map('series'))}['A Series One']
# Test case change
self.assertEqual(cache.rename_items('series', {s:'a series one'}), ({1, 2}, {s:s}))
for c in (cache, self.init_cache(cl)):
@ -574,7 +575,7 @@ def test_rename_items(self): # {{{
self.assertEqual(c.field_for('series', 2), 'series')
self.assertEqual(c.field_for('series_index', 1), 2.0)
s = {v:k for k, v in cache.get_id_map('#series').iteritems()}['My Series One']
s = {v:k for k, v in iteritems(cache.get_id_map('#series'))}['My Series One']
# Test custom column with rename to existing
self.assertEqual(cache.rename_items('#series', {s:'My Series Two'})[0], {2})
for c in (cache, self.init_cache(cl)):
@ -585,7 +586,7 @@ def test_rename_items(self): # {{{
# Test renaming many-many items to multiple items
cache = self.init_cache(self.cloned_library)
t = {v:k for k, v in cache.get_id_map('tags').iteritems()}['Tag One']
t = {v:k for k, v in iteritems(cache.get_id_map('tags'))}['Tag One']
affected_books, id_map = cache.rename_items('tags', {t:'Something, Else, Entirely'})
self.assertEqual({1, 2}, affected_books)
tmap = cache.get_id_map('tags')
@ -600,7 +601,7 @@ def test_rename_items(self): # {{{
# Test with restriction
cache = self.init_cache()
cache.set_field('tags', {1:'a,b,c', 2:'x,y,z', 3:'a,x,z'})
tmap = {v:k for k, v in cache.get_id_map('tags').iteritems()}
tmap = {v:k for k, v in iteritems(cache.get_id_map('tags'))}
self.assertEqual(cache.rename_items('tags', {tmap['a']:'r'}, restrict_to_book_ids=()), (set(), {}))
self.assertEqual(cache.rename_items('tags', {tmap['a']:'r', tmap['b']:'q'}, restrict_to_book_ids=(1,))[0], {1})
self.assertEqual(cache.rename_items('tags', {tmap['x']:'X'}, restrict_to_book_ids=(2,))[0], {2})
@ -657,7 +658,7 @@ def test_set_author_data(self): # {{{
ldata = {aid:str(aid) for aid in adata}
self.assertEqual({1,2,3}, cache.set_link_for_authors(ldata))
for c in (cache, self.init_cache()):
self.assertEqual(ldata, {aid:d['link'] for aid, d in c.author_data().iteritems()})
self.assertEqual(ldata, {aid:d['link'] for aid, d in iteritems(c.author_data())})
self.assertEqual({3}, cache.set_link_for_authors({aid:'xxx' if aid == max(adata) else str(aid) for aid in adata}),
'Setting the author link to the same value as before, incorrectly marked some books as dirty')
sdata = {aid:'%s, changed' % aid for aid in adata}
@ -709,7 +710,7 @@ def test_fix_case_duplicates(self): # {{{
conn.execute('INSERT INTO tags (name) VALUES ("t")')
norm = conn.last_insert_rowid()
conn.execute('DELETE FROM books_tags_link')
for book_id, vals in {1:(lid, uid), 2:(uid, mid), 3:(lid, norm)}.iteritems():
for book_id, vals in iteritems({1:(lid, uid), 2:(uid, mid), 3:(lid, norm)}):
conn.executemany('INSERT INTO books_tags_link (book,tag) VALUES (?,?)',
tuple((book_id, x) for x in vals))
cache.reload_from_db()

View file

@ -9,7 +9,7 @@
import os, errno, sys, re
from locale import localeconv
from collections import OrderedDict, namedtuple
from polyglot.builtins import map, unicode_type, string_or_bytes
from polyglot.builtins import iteritems, itervalues, map, unicode_type, string_or_bytes
from threading import Lock
from calibre import as_unicode, prints
@ -208,7 +208,7 @@ def record(line):
def _invalidate_sizes(self):
if self.size_changed:
size = self.thumbnail_size
remove = (key for key, entry in self.items.iteritems() if size != entry.thumbnail_size)
remove = (key for key, entry in iteritems(self.items) if size != entry.thumbnail_size)
for key in remove:
self._remove(key)
self.size_changed = False
@ -365,7 +365,7 @@ def empty(self):
pass
if not hasattr(self, 'total_size'):
self._load_index()
for entry in self.items.itervalues():
for entry in itervalues(self.items):
self._do_delete(entry.path)
self.total_size = 0
self.items = OrderedDict()

View file

@ -9,7 +9,8 @@
import weakref, operator, numbers
from functools import partial
from polyglot.builtins import map, unicode_type, range, zip
from polyglot.builtins import (iteritems, iterkeys, itervalues, map,
unicode_type, range, zip)
from calibre.ebooks.metadata import title_sort
from calibre.utils.config_base import tweaks, prefs
@ -71,7 +72,7 @@ def format_is_multiple(x, sep=',', repl=None):
def format_identifiers(x):
if not x:
return None
return ','.join('%s:%s'%(k, v) for k, v in x.iteritems())
return ','.join('%s:%s'%(k, v) for k, v in iteritems(x))
class View(object):
@ -88,7 +89,7 @@ def __init__(self, cache):
self.search_restriction_name = self.base_restriction_name = ''
self._field_getters = {}
self.column_count = len(cache.backend.FIELD_MAP)
for col, idx in cache.backend.FIELD_MAP.iteritems():
for col, idx in iteritems(cache.backend.FIELD_MAP):
label, fmt = col, lambda x:x
func = {
'id': self._get_id,
@ -373,14 +374,14 @@ def set_marked_ids(self, id_dict):
self.marked_ids = dict.fromkeys(id_dict, u'true')
else:
# Ensure that all the items in the dict are text
self.marked_ids = dict(zip(id_dict.iterkeys(), map(unicode_type,
id_dict.itervalues())))
self.marked_ids = dict(zip(iterkeys(id_dict), map(unicode_type,
itervalues(id_dict))))
# This invalidates all searches in the cache even though the cache may
# be shared by multiple views. This is not ideal, but...
cmids = set(self.marked_ids)
self.cache.clear_search_caches(old_marked_ids | cmids)
if old_marked_ids != cmids:
for funcref in self.marked_listeners.itervalues():
for funcref in itervalues(self.marked_listeners):
func = funcref()
if func is not None:
func(old_marked_ids, cmids)

View file

@ -10,7 +10,7 @@
import re
from functools import partial
from datetime import datetime
from polyglot.builtins import unicode_type, zip
from polyglot.builtins import iteritems, itervalues, unicode_type, zip
from calibre.constants import preferred_encoding
from calibre.ebooks.metadata import author_to_author_sort, title_sort
@ -131,7 +131,7 @@ def adapt_identifiers(to_tuple, x):
if not isinstance(x, dict):
x = {k:v for k, v in (y.partition(':')[0::2] for y in to_tuple(x))}
ans = {}
for k, v in x.iteritems():
for k, v in iteritems(x):
k, v = clean_identifier(k, v)
if k and v:
ans[k] = v
@ -194,7 +194,7 @@ def get_adapter(name, metadata):
def one_one_in_books(book_id_val_map, db, field, *args):
'Set a one-one field in the books table'
if book_id_val_map:
sequence = ((sqlite_datetime(v), k) for k, v in book_id_val_map.iteritems())
sequence = ((sqlite_datetime(v), k) for k, v in iteritems(book_id_val_map))
db.executemany(
'UPDATE books SET %s=? WHERE id=?'%field.metadata['column'], sequence)
field.table.book_col_map.update(book_id_val_map)
@ -210,23 +210,23 @@ def set_title(book_id_val_map, db, field, *args):
ans = one_one_in_books(book_id_val_map, db, field, *args)
# Set the title sort field
field.title_sort_field.writer.set_books(
{k:title_sort(v) for k, v in book_id_val_map.iteritems()}, db)
{k:title_sort(v) for k, v in iteritems(book_id_val_map)}, db)
return ans
def one_one_in_other(book_id_val_map, db, field, *args):
'Set a one-one field in the non-books table, like comments'
deleted = tuple((k,) for k, v in book_id_val_map.iteritems() if v is None)
deleted = tuple((k,) for k, v in iteritems(book_id_val_map) if v is None)
if deleted:
db.executemany('DELETE FROM %s WHERE book=?'%field.metadata['table'],
deleted)
for book_id in deleted:
field.table.book_col_map.pop(book_id[0], None)
updated = {k:v for k, v in book_id_val_map.iteritems() if v is not None}
updated = {k:v for k, v in iteritems(book_id_val_map) if v is not None}
if updated:
db.executemany('INSERT OR REPLACE INTO %s(book,%s) VALUES (?,?)'%(
field.metadata['table'], field.metadata['column']),
((k, sqlite_datetime(v)) for k, v in updated.iteritems()))
((k, sqlite_datetime(v)) for k, v in iteritems(updated)))
field.table.book_col_map.update(updated)
return set(book_id_val_map)
@ -234,7 +234,7 @@ def one_one_in_other(book_id_val_map, db, field, *args):
def custom_series_index(book_id_val_map, db, field, *args):
series_field = field.series_field
sequence = []
for book_id, sidx in book_id_val_map.iteritems():
for book_id, sidx in iteritems(book_id_val_map):
if sidx is None:
sidx = 1.0
ids = series_field.ids_for_book(book_id)
@ -285,12 +285,12 @@ def get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
def change_case(case_changes, dirtied, db, table, m, is_authors=False):
if is_authors:
vals = ((val.replace(',', '|'), item_id) for item_id, val in
case_changes.iteritems())
iteritems(case_changes))
else:
vals = ((val, item_id) for item_id, val in case_changes.iteritems())
vals = ((val, item_id) for item_id, val in iteritems(case_changes))
db.executemany(
'UPDATE %s SET %s=? WHERE id=?'%(m['table'], m['column']), vals)
for item_id, val in case_changes.iteritems():
for item_id, val in iteritems(case_changes):
table.id_map[item_id] = val
dirtied.update(table.col_book_map[item_id])
if is_authors:
@ -306,14 +306,14 @@ def many_one(book_id_val_map, db, field, allow_case_change, *args):
# Map values to db ids, including any new values
kmap = safe_lower if dt in {'text', 'series'} else lambda x:x
rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
rid_map = {kmap(item):item_id for item_id, item in iteritems(table.id_map)}
if len(rid_map) != len(table.id_map):
# table has some entries that differ only in case, fix it
table.fix_case_duplicates(db)
rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
rid_map = {kmap(item):item_id for item_id, item in iteritems(table.id_map)}
val_map = {None:None}
case_changes = {}
for val in book_id_val_map.itervalues():
for val in itervalues(book_id_val_map):
if val is not None:
get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
case_changes, val_map)
@ -321,17 +321,17 @@ def many_one(book_id_val_map, db, field, allow_case_change, *args):
if case_changes:
change_case(case_changes, dirtied, db, table, m)
book_id_item_id_map = {k:val_map[v] for k, v in book_id_val_map.iteritems()}
book_id_item_id_map = {k:val_map[v] for k, v in iteritems(book_id_val_map)}
# Ignore those items whose value is the same as the current value
book_id_item_id_map = {k:v for k, v in book_id_item_id_map.iteritems()
book_id_item_id_map = {k:v for k, v in iteritems(book_id_item_id_map)
if v != table.book_col_map.get(k, None)}
dirtied |= set(book_id_item_id_map)
# Update the book->col and col->book maps
deleted = set()
updated = {}
for book_id, item_id in book_id_item_id_map.iteritems():
for book_id, item_id in iteritems(book_id_item_id_map):
old_item_id = table.book_col_map.get(book_id, None)
if old_item_id is not None:
table.col_book_map[old_item_id].discard(book_id)
@ -355,7 +355,7 @@ def many_one(book_id_val_map, db, field, allow_case_change, *args):
)
db.executemany(sql.format(table.link_table, m['link_column']),
((book_id, book_id, item_id) for book_id, item_id in
updated.iteritems()))
iteritems(updated)))
# Remove no longer used items
remove = {item_id for item_id in table.id_map if not
@ -392,15 +392,15 @@ def many_many(book_id_val_map, db, field, allow_case_change, *args):
# Map values to db ids, including any new values
kmap = safe_lower if dt == 'text' else lambda x:x
rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
rid_map = {kmap(item):item_id for item_id, item in iteritems(table.id_map)}
if len(rid_map) != len(table.id_map):
# table has some entries that differ only in case, fix it
table.fix_case_duplicates(db)
rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
rid_map = {kmap(item):item_id for item_id, item in iteritems(table.id_map)}
val_map = {}
case_changes = {}
book_id_val_map = {k:uniq(vals, kmap) for k, vals in book_id_val_map.iteritems()}
for vals in book_id_val_map.itervalues():
book_id_val_map = {k:uniq(vals, kmap) for k, vals in iteritems(book_id_val_map)}
for vals in itervalues(book_id_val_map):
for val in vals:
get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
case_changes, val_map, is_authors=is_authors)
@ -408,7 +408,7 @@ def many_many(book_id_val_map, db, field, allow_case_change, *args):
if case_changes:
change_case(case_changes, dirtied, db, table, m, is_authors=is_authors)
if is_authors:
for item_id, val in case_changes.iteritems():
for item_id, val in iteritems(case_changes):
for book_id in table.col_book_map[item_id]:
current_sort = field.db_author_sort_for_book(book_id)
new_sort = field.author_sort_for_book(book_id)
@ -418,17 +418,17 @@ def many_many(book_id_val_map, db, field, allow_case_change, *args):
field.author_sort_field.writer.set_books({book_id:new_sort}, db)
book_id_item_id_map = {k:tuple(val_map[v] for v in vals)
for k, vals in book_id_val_map.iteritems()}
for k, vals in iteritems(book_id_val_map)}
# Ignore those items whose value is the same as the current value
book_id_item_id_map = {k:v for k, v in book_id_item_id_map.iteritems()
book_id_item_id_map = {k:v for k, v in iteritems(book_id_item_id_map)
if v != table.book_col_map.get(k, None)}
dirtied |= set(book_id_item_id_map)
# Update the book->col and col->book maps
deleted = set()
updated = {}
for book_id, item_ids in book_id_item_id_map.iteritems():
for book_id, item_ids in iteritems(book_id_item_id_map):
old_item_ids = table.book_col_map.get(book_id, None)
if old_item_ids:
for old_item_id in old_item_ids:
@ -448,7 +448,7 @@ def many_many(book_id_val_map, db, field, allow_case_change, *args):
((k,) for k in deleted))
if updated:
vals = (
(book_id, val) for book_id, vals in updated.iteritems()
(book_id, val) for book_id, vals in iteritems(updated)
for val in vals
)
db.executemany('DELETE FROM %s WHERE book=?'%table.link_table,
@ -481,7 +481,7 @@ def many_many(book_id_val_map, db, field, allow_case_change, *args):
def identifiers(book_id_val_map, db, field, *args): # {{{
table = field.table
updates = set()
for book_id, identifiers in book_id_val_map.iteritems():
for book_id, identifiers in iteritems(book_id_val_map):
if book_id not in table.book_col_map:
table.book_col_map[book_id] = {}
current_ids = table.book_col_map[book_id]
@ -490,7 +490,7 @@ def identifiers(book_id_val_map, db, field, *args): # {{{
table.col_book_map.get(key, set()).discard(book_id)
current_ids.pop(key, None)
current_ids.update(identifiers)
for key, val in identifiers.iteritems():
for key, val in iteritems(identifiers):
if key not in table.col_book_map:
table.col_book_map[key] = set()
table.col_book_map[key].add(book_id)
@ -538,7 +538,7 @@ def __init__(self, field):
def set_books(self, book_id_val_map, db, allow_case_change=True):
book_id_val_map = {k:self.adapter(v) for k, v in
book_id_val_map.iteritems() if self.accept_vals(v)}
iteritems(book_id_val_map) if self.accept_vals(v)}
if not book_id_val_map:
return set()
dirtied = self.set_books_func(book_id_val_map, db, self.field,
@ -548,7 +548,7 @@ def set_books(self, book_id_val_map, db, allow_case_change=True):
def set_books_for_enum(self, book_id_val_map, db, field,
allow_case_change):
allowed = set(field.metadata['display']['enum_values'])
book_id_val_map = {k:v for k, v in book_id_val_map.iteritems() if v is
book_id_val_map = {k:v for k, v in iteritems(book_id_val_map) if v is
None or v in allowed}
if not book_id_val_map:
return set()

View file

@ -32,7 +32,7 @@
from calibre.ptempfile import PersistentTemporaryFile
from calibre.constants import DEBUG
from calibre.utils.config_base import prefs
from polyglot.builtins import unicode_type, string_or_bytes
from polyglot.builtins import iteritems, itervalues, unicode_type, string_or_bytes
EPUB_EXT = '.epub'
KEPUB_EXT = '.kepub'
@ -407,7 +407,7 @@ def update_booklist(prefix, path, title, authors, mime, date, ContentType, Image
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(bl_cache.itervalues(), reverse=True):
for idx in sorted(itervalues(bl_cache), reverse=True):
if idx is not None:
need_sync = True
del bl[idx]
@ -908,13 +908,13 @@ def update_device_database_collections(self, booklists, collections_attributes,
ContentID = self.contentid_from_path(book.path, ContentType)
if category in readstatuslist.keys():
if category in list(readstatuslist.keys()):
# Manage ReadStatus
self.set_readstatus(connection, ContentID, readstatuslist.get(category))
elif category == 'Shortlist' and self.dbversion >= 14:
# Manage FavouritesIndex/Shortlist
self.set_favouritesindex(connection, ContentID)
elif category in accessibilitylist.keys():
elif category in list(accessibilitylist.keys()):
# Do not manage the Accessibility List
pass
else: # No collections
@ -1964,7 +1964,7 @@ def get_bookshelvesforbook(connection, ContentID):
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(bl_cache.itervalues(), reverse=True):
for idx in sorted(itervalues(bl_cache), reverse=True):
if idx is not None:
if not os.path.exists(self.normalize_path(os.path.join(prefix, bl[idx].lpath))) or not bl[idx].contentID:
need_sync = True
@ -2138,7 +2138,7 @@ def _modify_epub(self, book_file, metadata, container=None):
from calibre.ebooks.oeb.base import OEB_STYLES
is_dirty = False
for cssname, mt in container.mime_map.iteritems():
for cssname, mt in iteritems(container.mime_map):
if mt in OEB_STYLES:
newsheet = container.parsed(cssname)
oldrules = len(newsheet.cssRules)
@ -2447,7 +2447,7 @@ def update_device_database_collections(self, booklists, collections_attributes,
debug_print(' Setting bookshelf on device')
self.set_bookshelf(connection, book, category)
category_added = True
elif category in readstatuslist.keys():
elif category in list(readstatuslist.keys()):
debug_print("KoboTouch:update_device_database_collections - about to set_readstatus - category='%s'"%(category, ))
# Manage ReadStatus
self.set_readstatus(connection, book.contentID, readstatuslist.get(category))
@ -2462,7 +2462,7 @@ def update_device_database_collections(self, booklists, collections_attributes,
debug_print(' and about to set it - %s'%book.title)
self.set_favouritesindex(connection, book.contentID)
category_added = True
elif category in accessibilitylist.keys():
elif category in list(accessibilitylist.keys()):
# Do not manage the Accessibility List
pass

View file

@ -10,6 +10,7 @@
import traceback, re
from calibre.constants import iswindows
from polyglot.builtins import iteritems
class DeviceDefaults(object):
@ -47,7 +48,7 @@ def __call__(self, device, driver):
for rule in self.rules:
tests = rule[0]
matches = True
for k, v in tests.iteritems():
for k, v in iteritems(tests):
if k == 'vendor' and v != vid:
matches = False
break

View file

@ -17,7 +17,7 @@
from calibre.devices.mtp.defaults import DeviceDefaults
from calibre.ptempfile import SpooledTemporaryFile, PersistentTemporaryDirectory
from calibre.utils.filenames import shorten_components_to
from polyglot.builtins import unicode_type, zip
from polyglot.builtins import iteritems, itervalues, unicode_type, zip
BASE = importlib.import_module('calibre.devices.mtp.%s.driver'%(
'windows' if iswindows else 'unix')).MTP_DEVICE
@ -276,7 +276,7 @@ def books(self, oncard=None, end_session=True):
book.path = mtp_file.mtp_id_path
# Remove books in the cache that no longer exist
for idx in sorted(relpath_cache.itervalues(), reverse=True):
for idx in sorted(itervalues(relpath_cache), reverse=True):
del bl[idx]
need_sync = True
@ -546,7 +546,7 @@ def save_template(self):
def get_user_blacklisted_devices(self):
bl = frozenset(self.prefs['blacklist'])
ans = {}
for dev, x in self.prefs['history'].iteritems():
for dev, x in iteritems(self.prefs['history']):
name = x[0]
if dev in bl:
ans[dev] = name

View file

@ -10,7 +10,7 @@
import weakref, sys, json
from collections import deque
from operator import attrgetter
from polyglot.builtins import map, unicode_type
from polyglot.builtins import itervalues, map, unicode_type
from datetime import datetime
from calibre import human_readable, prints, force_unicode
@ -201,7 +201,7 @@ def __init__(self, all_storage, entries):
for entry in entries:
FileOrFolder(entry, self)
for item in self.id_map.itervalues():
for item in itervalues(self.id_map):
try:
p = item.parent
except KeyError:
@ -227,7 +227,7 @@ def storage(self, storage_id):
return e
def iterebooks(self, storage_id):
for x in self.id_map.itervalues():
for x in itervalues(self.id_map):
if x.storage_id == storage_id and x.is_ebook:
if x.parent_id == storage_id and x.name.lower().endswith('.txt'):
continue # Ignore .txt files in the root

View file

@ -9,7 +9,7 @@
import time, threading, traceback
from functools import wraps, partial
from polyglot.builtins import unicode_type, zip
from polyglot.builtins import iteritems, iterkeys, itervalues, unicode_type, zip
from itertools import chain
from calibre import as_unicode, prints, force_unicode
@ -107,7 +107,7 @@ def detect_managed_devices(self, devices_on_system, force_refresh=False):
# Get device data for detected devices. If there is an error, we will
# try again for that device the next time this method is called.
for dev in tuple(self.detected_devices.iterkeys()):
for dev in tuple(iterkeys(self.detected_devices)):
data = self.detected_devices.get(dev, None)
if data is None or data is False:
try:
@ -130,7 +130,7 @@ def detect_managed_devices(self, devices_on_system, force_refresh=False):
self.currently_connected_pnp_id in self.detected_devices
else None)
for dev, data in self.detected_devices.iteritems():
for dev, data in iteritems(self.detected_devices):
if dev in self.blacklisted_devices or dev in self.ejected_devices:
# Ignore blacklisted and ejected devices
continue
@ -267,10 +267,10 @@ def filesystem_cache(self):
self._currently_getting_sid = unicode_type(storage_id)
id_map = self.dev.get_filesystem(storage_id, partial(
self._filesystem_callback, {}))
for x in id_map.itervalues():
for x in itervalues(id_map):
x['storage_id'] = storage_id
all_storage.append(storage)
items.append(id_map.itervalues())
items.append(itervalues(id_map))
self._filesystem_cache = FilesystemCache(all_storage, chain(*items))
debug('Filesystem metadata loaded in %g seconds (%d objects)'%(
time.time()-st, len(self._filesystem_cache)))

View file

@ -13,7 +13,7 @@
from calibre import prints, as_unicode
from calibre.constants import (iswindows, isosx, plugins, islinux, isfreebsd,
isnetbsd)
from polyglot.builtins import range
from polyglot.builtins import iterkeys, range
osx_scanner = linux_scanner = freebsd_scanner = netbsd_scanner = None
@ -77,7 +77,7 @@ def __call__(self):
dev = USBDevice(*dev)
dev.busnum, dev.devnum = fingerprint[:2]
ans.add(dev)
extra = set(self.libusb.cache.iterkeys()) - seen
extra = set(iterkeys(self.libusb.cache)) - seen
for x in extra:
self.libusb.cache.pop(x, None)
return ans

View file

@ -23,7 +23,7 @@
from calibre.devices.usbms.deviceconfig import DeviceConfig
from calibre.constants import iswindows, islinux, isosx, isfreebsd, plugins
from calibre.utils.filenames import ascii_filename as sanitize
from polyglot.builtins import string_or_bytes
from polyglot.builtins import iteritems, string_or_bytes
if isosx:
usbobserver, usbobserver_err = plugins['usbobserver']
@ -404,7 +404,7 @@ def open_osx(self):
bsd_drives = self.osx_bsd_names()
drives = self.osx_sort_names(bsd_drives.copy())
mount_map = usbobserver.get_mounted_filesystems()
drives = {k: mount_map.get(v) for k, v in drives.iteritems()}
drives = {k: mount_map.get(v) for k, v in iteritems(drives)}
if DEBUG:
print()
from pprint import pprint

View file

@ -20,7 +20,7 @@
from calibre.devices.usbms.device import Device
from calibre.devices.usbms.books import BookList, Book
from calibre.ebooks.metadata.book.json_codec import JsonCodec
from polyglot.builtins import unicode_type, string_or_bytes
from polyglot.builtins import itervalues, unicode_type, string_or_bytes
BASE_TIME = None
@ -281,7 +281,7 @@ def update_booklist(filename, path, prefix):
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(bl_cache.itervalues(), reverse=True):
for idx in sorted(itervalues(bl_cache), reverse=True):
if idx is not None:
need_sync = True
del bl[idx]

View file

@ -15,7 +15,7 @@
)
from ctypes.wintypes import DWORD, WORD, ULONG, LPCWSTR, HWND, BOOL, LPWSTR, UINT, BYTE, HANDLE, USHORT
from pprint import pprint, pformat
from polyglot.builtins import map
from polyglot.builtins import iteritems, itervalues, map
from calibre import prints, as_unicode
@ -652,13 +652,13 @@ def get_volume_information(drive_letter):
'max_component_length': max_component_length.value,
}
for name, num in {'FILE_CASE_PRESERVED_NAMES':0x00000002, 'FILE_CASE_SENSITIVE_SEARCH':0x00000001, 'FILE_FILE_COMPRESSION':0x00000010,
for name, num in iteritems({'FILE_CASE_PRESERVED_NAMES':0x00000002, 'FILE_CASE_SENSITIVE_SEARCH':0x00000001, 'FILE_FILE_COMPRESSION':0x00000010,
'FILE_NAMED_STREAMS':0x00040000, 'FILE_PERSISTENT_ACLS':0x00000008, 'FILE_READ_ONLY_VOLUME':0x00080000,
'FILE_SEQUENTIAL_WRITE_ONCE':0x00100000, 'FILE_SUPPORTS_ENCRYPTION':0x00020000, 'FILE_SUPPORTS_EXTENDED_ATTRIBUTES':0x00800000,
'FILE_SUPPORTS_HARD_LINKS':0x00400000, 'FILE_SUPPORTS_OBJECT_IDS':0x00010000, 'FILE_SUPPORTS_OPEN_BY_FILE_ID':0x01000000,
'FILE_SUPPORTS_REPARSE_POINTS':0x00000080, 'FILE_SUPPORTS_SPARSE_FILES':0x00000040, 'FILE_SUPPORTS_TRANSACTIONS':0x00200000,
'FILE_SUPPORTS_USN_JOURNAL':0x02000000, 'FILE_UNICODE_ON_DISK':0x00000004, 'FILE_VOLUME_IS_COMPRESSED':0x00008000,
'FILE_VOLUME_QUOTAS':0x00000020}.iteritems():
'FILE_VOLUME_QUOTAS':0x00000020}):
ans[name] = bool(num & flags)
return ans
@ -809,7 +809,7 @@ def get_storage_number_map(drive_types=(DRIVE_REMOVABLE, DRIVE_FIXED), debug=Fal
' Get a mapping of drive letters to storage numbers for all drives on system (of the specified types) '
mask = GetLogicalDrives()
type_map = {letter:GetDriveType(letter + ':' + os.sep) for i, letter in enumerate(string.ascii_uppercase) if mask & (1 << i)}
drives = (letter for letter, dt in type_map.iteritems() if dt in drive_types)
drives = (letter for letter, dt in iteritems(type_map) if dt in drive_types)
ans = defaultdict(list)
for letter in drives:
try:
@ -819,7 +819,7 @@ def get_storage_number_map(drive_types=(DRIVE_REMOVABLE, DRIVE_FIXED), debug=Fal
if debug:
prints('Failed to get storage number for drive: %s with error: %s' % (letter, as_unicode(err)))
continue
for val in ans.itervalues():
for val in itervalues(ans):
val.sort(key=itemgetter(0))
return dict(ans)
@ -859,7 +859,7 @@ def get_storage_number_map_alt(debug=False):
if debug:
prints('Failed to get storage number for drive: %s with error: %s' % (name[0], as_unicode(err)))
continue
for val in ans.itervalues():
for val in itervalues(ans):
val.sort(key=itemgetter(0))
return dict(ans)

View file

@ -17,6 +17,7 @@
from calibre import patheq
from calibre.ebooks.conversion import ConversionUserFeedBack
from calibre.utils.localization import localize_user_manual_link
from polyglot.builtins import iteritems
USAGE = '%prog ' + _('''\
input_file output_file [options]
@ -254,7 +255,7 @@ def add_pipeline_options(parser, plumber):
))
for group, (desc, options) in groups.iteritems():
for group, (desc, options) in iteritems(groups):
if group:
group = OptionGroup(parser, group, desc)
parser.add_option_group(group)

View file

@ -8,7 +8,7 @@
from calibre.customize.conversion import InputFormatPlugin, OptionRecommendation
from calibre import guess_type
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
FB2NS = 'http://www.gribuser.ru/xml/fictionbook/2.0'
FB21NS = 'http://www.gribuser.ru/xml/fictionbook/2.1'
@ -103,7 +103,7 @@ def convert(self, stream, options, file_ext, log,
notes = {a.get('href')[1:]: a for a in result.xpath('//a[@link_note and @href]') if a.get('href').startswith('#')}
cites = {a.get('link_cite'): a for a in result.xpath('//a[@link_cite]') if not a.get('href', '')}
all_ids = {x for x in result.xpath('//*/@id')}
for cite, a in cites.iteritems():
for cite, a in iteritems(cites):
note = notes.get(cite, None)
if note:
c = 1

View file

@ -14,7 +14,7 @@
from calibre.customize.conversion import (OutputFormatPlugin,
OptionRecommendation)
from calibre.ptempfile import TemporaryDirectory
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
UNITS = ['millimeter', 'centimeter', 'point', 'inch' , 'pica' , 'didot',
'cicero', 'devicepixel']
@ -263,7 +263,7 @@ def convert_text(self, oeb_book):
self.process_fonts()
if self.opts.pdf_use_document_margins and self.stored_page_margins:
import json
for href, margins in self.stored_page_margins.iteritems():
for href, margins in iteritems(self.stored_page_margins):
item = oeb_book.manifest.hrefs.get(href)
if item is not None:
root = item.data

View file

@ -5,6 +5,7 @@
import os, glob, re, textwrap
from calibre.customize.conversion import InputFormatPlugin, OptionRecommendation
from polyglot.builtins import iteritems
border_style_map = {
'single' : 'solid',
@ -145,7 +146,7 @@ def extract_images(self, picts):
def convert_images(self, imap):
self.default_img = None
for count, val in imap.iteritems():
for count, val in iteritems(imap):
try:
imap[count] = self.convert_image(val)
except:
@ -210,7 +211,7 @@ def write_inline_css(self, ic, border_styles):
css += '\n'+'\n'.join(font_size_classes)
css += '\n' +'\n'.join(color_classes)
for cls, val in border_styles.iteritems():
for cls, val in iteritems(border_styles):
css += '\n\n.%s {\n%s\n}'%(cls, val)
with open(u'styles.css', 'ab') as f:

View file

@ -10,7 +10,7 @@
from collections import namedtuple
from contextlib import contextmanager
from math import ceil, sqrt, cos, sin, atan2
from polyglot.builtins import map, zip, string_or_bytes
from polyglot.builtins import iteritems, itervalues, map, zip, string_or_bytes
from itertools import chain
from PyQt5.Qt import (
@ -282,7 +282,7 @@ def preserve_fields(obj, fields):
try:
yield
finally:
for f, val in mem.iteritems():
for f, val in iteritems(mem):
if val is null:
delattr(obj, f)
else:
@ -324,10 +324,10 @@ def load_color_themes(prefs):
t = default_color_themes.copy()
t.update(prefs.color_themes)
disabled = frozenset(prefs.disabled_color_themes)
ans = [theme_to_colors(v) for k, v in t.iteritems() if k not in disabled]
ans = [theme_to_colors(v) for k, v in iteritems(t) if k not in disabled]
if not ans:
# Ignore disabled and return only the builtin color themes
ans = [theme_to_colors(v) for k, v in default_color_themes.iteritems()]
ans = [theme_to_colors(v) for k, v in iteritems(default_color_themes)]
return ans
@ -557,14 +557,14 @@ def __call__(self, painter, rect, color_theme, title_block, subtitle_block, foot
def all_styles():
return set(
x.NAME for x in globals().itervalues() if
x.NAME for x in itervalues(globals()) if
isinstance(x, type) and issubclass(x, Style) and x is not Style
)
def load_styles(prefs, respect_disabled=True):
disabled = frozenset(prefs.disabled_styles) if respect_disabled else ()
ans = tuple(x for x in globals().itervalues() if
ans = tuple(x for x in itervalues(globals()) if
isinstance(x, type) and issubclass(x, Style) and x is not Style and x.NAME not in disabled)
if not ans and disabled:
# If all styles have been disabled, ignore the disabling and return all

View file

@ -13,6 +13,7 @@
from calibre import force_unicode
from calibre.ebooks import parse_css_length
from calibre.ebooks.oeb.normalize_css import normalizers, safe_parser
from polyglot.builtins import iteritems
def compile_pat(pat):
@ -44,7 +45,7 @@ def __iter__(self):
yield p, None
else:
if p not in self.expanded_properties:
self.expanded_properties[p] = [Property(k, v, p.literalpriority) for k, v in n(p.name, p.propertyValue).iteritems()]
self.expanded_properties[p] = [Property(k, v, p.literalpriority) for k, v in iteritems(n(p.name, p.propertyValue))]
for ep in self.expanded_properties[p]:
yield ep, p
@ -338,7 +339,7 @@ def export_rules(serialized_rules):
lines = []
for rule in serialized_rules:
lines.extend('# ' + l for l in rule_to_text(rule).splitlines())
lines.extend('%s: %s' % (k, v.replace('\n', ' ')) for k, v in rule.iteritems() if k in allowed_keys)
lines.extend('%s: %s' % (k, v.replace('\n', ' ')) for k, v in iteritems(rule) if k in allowed_keys)
lines.append('')
return '\n'.join(lines).encode('utf-8')

View file

@ -8,6 +8,7 @@
import numbers
from collections import OrderedDict
from polyglot.builtins import iteritems
class Inherit:
@ -115,11 +116,11 @@ def read_border(parent, dest, XPath, get, border_edges=border_edges, name='pBdr'
for border in XPath('./w:' + name)(parent):
for edge in border_edges:
for prop, val in read_single_border(border, edge, XPath, get).iteritems():
for prop, val in iteritems(read_single_border(border, edge, XPath, get)):
if val is not None:
vals[prop % edge] = val
for key, val in vals.iteritems():
for key, val in iteritems(vals):
setattr(dest, key, val)

View file

@ -7,7 +7,7 @@
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os
from polyglot.builtins import range
from polyglot.builtins import iterkeys, itervalues, range
NBSP = '\xa0'
@ -54,7 +54,7 @@ def merge_run(run):
def liftable(css):
# A <span> is liftable if all its styling would work just as well if it is
# specified on the parent element.
prefixes = {x.partition('-')[0] for x in css.iterkeys()}
prefixes = {x.partition('-')[0] for x in iterkeys(css)}
return not (prefixes - {'text', 'font', 'letter', 'color', 'background'})
@ -134,7 +134,7 @@ def cleanup_markup(log, root, styles, dest_dir, detect_cover, XPath):
current_run = [span]
# Process dir attributes
class_map = dict(styles.classes.itervalues())
class_map = dict(itervalues(styles.classes))
parents = ('p', 'div') + tuple('h%d' % i for i in range(1, 7))
for parent in root.xpath('//*[(%s)]' % ' or '.join('name()="%s"' % t for t in parents)):
# Ensure that children of rtl parents that are not rtl have an

View file

@ -9,6 +9,7 @@
import re
from calibre.ebooks.docx.index import process_index, polish_index_markup
from polyglot.builtins import iteritems
class Field(object):
@ -222,7 +223,7 @@ def parse_index(self, field, parse_func, log):
def polish_markup(self, object_map):
if not self.index_fields:
return
rmap = {v:k for k, v in object_map.iteritems()}
rmap = {v:k for k, v in iteritems(object_map)}
for idx, blocks in self.index_fields:
polish_index_markup(idx, [rmap[b] for b in blocks])

View file

@ -14,7 +14,7 @@
from calibre.utils.fonts.scanner import font_scanner, NoFonts
from calibre.utils.fonts.utils import panose_to_css_generic_family, is_truetype_font
from calibre.utils.icu import ord_string
from polyglot.builtins import codepoint_to_chr, range
from polyglot.builtins import codepoint_to_chr, iteritems, range
Embed = namedtuple('Embed', 'name key subsetted')
@ -172,7 +172,7 @@ def embed_fonts(self, dest_dir, docx):
d['font-weight'] = 'bold'
if 'Italic' in variant:
d['font-style'] = 'italic'
d = ['%s: %s' % (k, v) for k, v in d.iteritems()]
d = ['%s: %s' % (k, v) for k, v in iteritems(d)]
d = ';\n\t'.join(d)
defs.append('@font-face {\n\t%s\n}\n' % d)
return '\n'.join(defs)

View file

@ -7,6 +7,7 @@
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from collections import OrderedDict
from polyglot.builtins import iteritems
class Note(object):
@ -57,10 +58,9 @@ def get_ref(self, ref):
return None, None
def __iter__(self):
for anchor, (counter, note) in self.notes.iteritems():
for anchor, (counter, note) in iteritems(self.notes):
yield anchor, counter, note
@property
def has_notes(self):
return bool(self.notes)

View file

@ -15,6 +15,7 @@
from calibre.utils.filenames import ascii_filename
from calibre.utils.img import resize_to_fit, image_to_data
from calibre.utils.imghdr import what
from polyglot.builtins import iteritems, itervalues
class LinkedImageNotFound(ValueError):
@ -66,7 +67,7 @@ def get_image_properties(parent, XPath, get):
def get_image_margins(elem):
ans = {}
for w, css in {'L':'left', 'T':'top', 'R':'right', 'B':'bottom'}.iteritems():
for w, css in iteritems({'L':'left', 'T':'top', 'R':'right', 'B':'bottom'}):
val = elem.get('dist%s' % w, None)
if val is not None:
try:
@ -157,7 +158,7 @@ def read_image_data(self, fname, base=None):
return raw, base
def unique_name(self, base):
exists = frozenset(self.used.itervalues())
exists = frozenset(itervalues(self.used))
c = 1
name = base
while name in exists:
@ -242,7 +243,7 @@ def drawing_to_html(self, drawing, page):
ans = self.pic_to_img(pic, alt, inline, title)
if ans is not None:
if style:
ans.set('style', '; '.join('%s: %s' % (k, v) for k, v in style.iteritems()))
ans.set('style', '; '.join('%s: %s' % (k, v) for k, v in iteritems(style)))
yield ans
# Now process the floats
@ -253,7 +254,7 @@ def drawing_to_html(self, drawing, page):
ans = self.pic_to_img(pic, alt, anchor, title)
if ans is not None:
if style:
ans.set('style', '; '.join('%s: %s' % (k, v) for k, v in style.iteritems()))
ans.set('style', '; '.join('%s: %s' % (k, v) for k, v in iteritems(style)))
yield ans
def pict_to_html(self, pict, page):
@ -275,7 +276,7 @@ def pict_to_html(self, pict, page):
style['margin-left'] = '0' if align == 'left' else 'auto'
style['margin-right'] = 'auto' if align == 'left' else '0'
if style:
hr.set('style', '; '.join(('%s:%s' % (k, v) for k, v in style.iteritems())))
hr.set('style', '; '.join(('%s:%s' % (k, v) for k, v in iteritems(style))))
yield hr
for imagedata in XPath('descendant::v:imagedata[@r:id]')(pict):

View file

@ -11,7 +11,7 @@
from lxml import etree
from calibre.utils.icu import partition_by_first_letter, sort_key
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
def get_applicable_xe_fields(index, xe_fields, XPath, expand):
@ -103,7 +103,7 @@ def process_index(field, index, xe_fields, log, XPath, expand):
if heading_text is not None:
groups = partition_by_first_letter(xe_fields, key=itemgetter('text'))
items = []
for key, fields in groups.iteritems():
for key, fields in iteritems(groups):
items.append(key), items.extend(fields)
if styles:
heading_style = styles[0]

View file

@ -11,6 +11,7 @@
from lxml.etree import XPath as X
from calibre.utils.filenames import ascii_text
from polyglot.builtins import iteritems
# Names {{{
TRANSITIONAL_NAMES = {
@ -32,7 +33,7 @@
STRICT_NAMES = {
k:v.replace('http://schemas.openxmlformats.org/officeDocument/2006', 'http://purl.oclc.org/ooxml/officeDocument')
for k, v in TRANSITIONAL_NAMES.iteritems()
for k, v in iteritems(TRANSITIONAL_NAMES)
}
TRANSITIONAL_NAMESPACES = {
@ -72,7 +73,7 @@
'http://schemas.openxmlformats.org/officeDocument/2006', 'http://purl.oclc.org/ooxml/officeDocument').replace(
'http://schemas.openxmlformats.org/wordprocessingml/2006', 'http://purl.oclc.org/ooxml/wordprocessingml').replace(
'http://schemas.openxmlformats.org/drawingml/2006', 'http://purl.oclc.org/ooxml/drawingml')
for k, v in TRANSITIONAL_NAMESPACES.iteritems()
for k, v in iteritems(TRANSITIONAL_NAMESPACES)
}
# }}}
@ -138,7 +139,7 @@ def descendants(self, elem, *args):
return self.XPath('|'.join('descendant::%s' % a for a in args))(elem)
def makeelement(self, root, tag, append=True, **attrs):
ans = root.makeelement(self.expand(tag), **{self.expand(k, sep='_'):v for k, v in attrs.iteritems()})
ans = root.makeelement(self.expand(tag), **{self.expand(k, sep='_'):v for k, v in iteritems(attrs)})
if append:
root.append(ans)
return ans

View file

@ -15,6 +15,7 @@
from calibre.ebooks.docx.block_styles import ParagraphStyle
from calibre.ebooks.docx.char_styles import RunStyle, inherit
from calibre.ebooks.metadata import roman
from polyglot.builtins import iteritems
STYLE_MAP = {
'aiueo': 'hiragana',
@ -168,7 +169,7 @@ def __init__(self, namespace, parent=None, an_id=None):
def copy(self):
ans = NumberingDefinition(self.namespace, an_id=self.abstract_numbering_definition_id)
for l, lvl in self.levels.iteritems():
for l, lvl in iteritems(self.levels):
ans.levels[l] = lvl.copy()
return ans
@ -224,7 +225,7 @@ def create_instance(n, definition):
if alvl is None:
alvl = Level(self.namespace)
alvl.read_from_xml(lvl, override=True)
for ilvl, so in start_overrides.iteritems():
for ilvl, so in iteritems(start_overrides):
try:
nd.levels[ilvl].start = start_override
except KeyError:
@ -244,22 +245,22 @@ def create_instance(n, definition):
self.instances[num_id] = create_instance(n, d)
numbering_links = styles.numbering_style_links
for an_id, style_link in lazy_load.iteritems():
for an_id, style_link in iteritems(lazy_load):
num_id = numbering_links[style_link]
self.definitions[an_id] = self.instances[num_id].copy()
for num_id, (an_id, n) in next_pass.iteritems():
for num_id, (an_id, n) in iteritems(next_pass):
d = self.definitions.get(an_id, None)
if d is not None:
self.instances[num_id] = create_instance(n, d)
for num_id, d in self.instances.iteritems():
for num_id, d in iteritems(self.instances):
self.starts[num_id] = {lvl:d.levels[lvl].start for lvl in d.levels}
def get_pstyle(self, num_id, style_id):
d = self.instances.get(num_id, None)
if d is not None:
for ilvl, lvl in d.levels.iteritems():
for ilvl, lvl in iteritems(d.levels):
if lvl.para_link == style_id:
return ilvl
@ -271,7 +272,7 @@ def get_para_style(self, num_id, lvl):
def update_counter(self, counter, levelnum, levels):
counter[levelnum] += 1
for ilvl, lvl in levels.iteritems():
for ilvl, lvl in iteritems(levels):
restart = lvl.restart
if (restart is None and ilvl == levelnum + 1) or restart == levelnum + 1:
counter[ilvl] = lvl.start

View file

@ -12,6 +12,7 @@
from calibre.ebooks.docx.block_styles import ParagraphStyle, inherit, twips
from calibre.ebooks.docx.char_styles import RunStyle
from calibre.ebooks.docx.tables import TableStyle
from polyglot.builtins import iteritems, itervalues
class PageProperties(object):
@ -124,7 +125,7 @@ def __init__(self, namespace, tables):
self.default_paragraph_style = self.default_character_style = None
def __iter__(self):
for s in self.id_map.itervalues():
for s in itervalues(self.id_map):
yield s
def __getitem__(self, key):
@ -341,7 +342,7 @@ def promote_property(char_styles, block_style, prop):
setattr(s, prop, inherit)
setattr(block_style, prop, next(iter(vals)))
for p, runs in layers.iteritems():
for p, runs in iteritems(layers):
has_links = '1' in {r.get('is-link', None) for r in runs}
char_styles = [self.resolve_run(r) for r in runs]
block_style = self.resolve_paragraph(p)
@ -421,7 +422,7 @@ def apply_section_page_breaks(self, paras):
ps.pageBreakBefore = True
def register(self, css, prefix):
h = hash(frozenset(css.iteritems()))
h = hash(frozenset(iteritems(css)))
ans, _ = self.classes.get(h, (None, None))
if ans is None:
self.counter[prefix] += 1
@ -430,17 +431,17 @@ def register(self, css, prefix):
return ans
def generate_classes(self):
for bs in self.para_cache.itervalues():
for bs in itervalues(self.para_cache):
css = bs.css
if css:
self.register(css, 'block')
for bs in self.run_cache.itervalues():
for bs in itervalues(self.run_cache):
css = bs.css
if css:
self.register(css, 'text')
def class_name(self, css):
h = hash(frozenset(css.iteritems()))
h = hash(frozenset(iteritems(css)))
return self.classes.get(h, (None, None))[0]
def generate_css(self, dest_dir, docx, notes_nopb, nosupsub):
@ -495,8 +496,8 @@ def generate_css(self, dest_dir, docx, notes_nopb, nosupsub):
prefix = ef + '\n' + prefix
ans = []
for (cls, css) in sorted(self.classes.itervalues(), key=lambda x:x[0]):
b = ('\t%s: %s;' % (k, v) for k, v in css.iteritems())
for (cls, css) in sorted(itervalues(self.classes), key=lambda x:x[0]):
b = ('\t%s: %s;' % (k, v) for k, v in iteritems(css))
b = '\n'.join(b)
ans.append('.%s {\n%s\n}\n' % (cls, b.rstrip(';')))
return prefix + '\n' + '\n'.join(ans)

View file

@ -10,7 +10,7 @@
from calibre.ebooks.docx.block_styles import inherit, read_shd as rs, read_border, binary_property, border_props, ParagraphStyle, border_to_css
from calibre.ebooks.docx.char_styles import RunStyle
from polyglot.builtins import range
from polyglot.builtins import iteritems, itervalues, range
# Read from XML {{{
read_shd = rs
@ -86,7 +86,7 @@ def read_spacing(parent, dest, XPath, get):
def read_float(parent, dest, XPath, get):
ans = inherit
for x in XPath('./w:tblpPr')(parent):
ans = {k.rpartition('}')[-1]: v for k, v in x.attrib.iteritems()}
ans = {k.rpartition('}')[-1]: v for k, v in iteritems(x.attrib)}
setattr(dest, 'float', ans)
@ -618,7 +618,7 @@ def handle_merged_cells(self):
def __iter__(self):
for p in self.paragraphs:
yield p
for t in self.sub_tables.itervalues():
for t in itervalues(self.sub_tables):
for p in t:
yield p
@ -665,7 +665,7 @@ def apply_markup(self, rmap, page, parent=None):
table_style = self.table_style.css
if table_style:
table.set('class', self.styles.register(table_style, 'table'))
for elem, style in style_map.iteritems():
for elem, style in iteritems(style_map):
css = style.css
if css:
elem.set('class', self.styles.register(css, elem.tag))
@ -686,7 +686,7 @@ def register(self, tbl, styles):
self.sub_tables |= set(self.tables[-1].sub_tables)
def apply_markup(self, object_map, page_map):
rmap = {v:k for k, v in object_map.iteritems()}
rmap = {v:k for k, v in iteritems(object_map)}
for table in self.tables:
table.apply_markup(rmap, page_map[table.tbl])

View file

@ -29,6 +29,8 @@
from calibre.ebooks.docx.settings import Settings
from calibre.ebooks.metadata.opf2 import OPFCreator
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
from polyglot.builtins import iteritems, itervalues
NBSP = '\xa0'
@ -122,7 +124,7 @@ def __call__(self):
self.read_page_properties(doc)
self.resolve_alternate_content(doc)
self.current_rels = relationships_by_id
for wp, page_properties in self.page_map.iteritems():
for wp, page_properties in iteritems(self.page_map):
self.current_page = page_properties
if wp.tag.endswith('}p'):
p = self.convert_p(wp)
@ -162,7 +164,7 @@ def __call__(self):
self.styles.apply_contextual_spacing(paras)
self.mark_block_runs(paras)
for p, wp in self.object_map.iteritems():
for p, wp in iteritems(self.object_map):
if len(p) > 0 and not p.text and len(p[0]) > 0 and not p[0].text and p[0][0].get('class', None) == 'tab':
# Paragraph uses tabs for indentation, convert to text-indent
parent = p[0]
@ -192,7 +194,7 @@ def __call__(self):
self.tables.apply_markup(self.object_map, self.page_map)
numbered = []
for html_obj, obj in self.object_map.iteritems():
for html_obj, obj in iteritems(self.object_map):
raw = obj.get('calibre_num_id', None)
if raw is not None:
lvl, num_id = raw.partition(':')[0::2]
@ -212,7 +214,7 @@ def __call__(self):
self.log.debug('Converting styles to CSS')
self.styles.generate_classes()
for html_obj, obj in self.object_map.iteritems():
for html_obj, obj in iteritems(self.object_map):
style = self.styles.resolve(obj)
if style is not None:
css = style.css
@ -220,7 +222,7 @@ def __call__(self):
cls = self.styles.class_name(css)
if cls:
html_obj.set('class', cls)
for html_obj, css in self.framed_map.iteritems():
for html_obj, css in iteritems(self.framed_map):
cls = self.styles.class_name(css)
if cls:
html_obj.set('class', cls)
@ -407,13 +409,13 @@ def read_block_anchors(self, doc):
doc_anchors = frozenset(self.namespace.XPath('./w:body/w:bookmarkStart[@w:name]')(doc))
if doc_anchors:
current_bm = set()
rmap = {v:k for k, v in self.object_map.iteritems()}
rmap = {v:k for k, v in iteritems(self.object_map)}
for p in self.namespace.descendants(doc, 'w:p', 'w:bookmarkStart[@w:name]'):
if p.tag.endswith('}p'):
if current_bm and p in rmap:
para = rmap[p]
if 'id' not in para.attrib:
para.set('id', generate_anchor(next(iter(current_bm)), frozenset(self.anchor_map.itervalues())))
para.set('id', generate_anchor(next(iter(current_bm)), frozenset(itervalues(self.anchor_map))))
for name in current_bm:
self.anchor_map[name] = para.get('id')
current_bm = set()
@ -469,10 +471,10 @@ def p_parent(x):
# _GoBack is a special bookmark inserted by Word 2010 for
# the return to previous edit feature, we ignore it
old_anchor = current_anchor
self.anchor_map[anchor] = current_anchor = generate_anchor(anchor, frozenset(self.anchor_map.itervalues()))
self.anchor_map[anchor] = current_anchor = generate_anchor(anchor, frozenset(itervalues(self.anchor_map)))
if old_anchor is not None:
# The previous anchor was not applied to any element
for a, t in tuple(self.anchor_map.iteritems()):
for a, t in tuple(iteritems(self.anchor_map)):
if t == old_anchor:
self.anchor_map[a] = current_anchor
elif x.tag.endswith('}hyperlink'):
@ -480,11 +482,11 @@ def p_parent(x):
elif x.tag.endswith('}instrText') and x.text and x.text.strip().startswith('TOC '):
old_anchor = current_anchor
anchor = str(uuid.uuid4())
self.anchor_map[anchor] = current_anchor = generate_anchor('toc', frozenset(self.anchor_map.itervalues()))
self.anchor_map[anchor] = current_anchor = generate_anchor('toc', frozenset(itervalues(self.anchor_map)))
self.toc_anchor = current_anchor
if old_anchor is not None:
# The previous anchor was not applied to any element
for a, t in tuple(self.anchor_map.iteritems()):
for a, t in tuple(iteritems(self.anchor_map)):
if t == old_anchor:
self.anchor_map[a] = current_anchor
if current_anchor is not None:
@ -559,7 +561,7 @@ def wrap_elems(self, elems, wrapper):
def resolve_links(self):
self.resolved_link_map = {}
for hyperlink, spans in self.link_map.iteritems():
for hyperlink, spans in iteritems(self.link_map):
relationships_by_id = self.link_source_map[hyperlink]
span = spans[0]
if len(spans) > 1:
@ -585,7 +587,7 @@ def resolve_links(self):
# hrefs that point nowhere give epubcheck a hernia. The element
# should be styled explicitly by Word anyway.
# span.set('href', '#')
rmap = {v:k for k, v in self.object_map.iteritems()}
rmap = {v:k for k, v in iteritems(self.object_map)}
for hyperlink, runs in self.fields.hyperlink_fields:
spans = [rmap[r] for r in runs if r in rmap]
if not spans:
@ -744,7 +746,7 @@ def apply_frames(self):
if not self.block_runs:
return
rmap = {v:k for k, v in self.object_map.iteritems()}
rmap = {v:k for k, v in iteritems(self.object_map)}
for border_style, blocks in self.block_runs:
paras = tuple(rmap[p] for p in blocks)
for p in paras:

View file

@ -13,7 +13,7 @@
from calibre.ebooks.metadata.toc import TOC
from calibre.ebooks.oeb.polish.toc import elem_to_toc_text
from polyglot.builtins import unicode_type, range
from polyglot.builtins import iteritems, unicode_type, range
def from_headings(body, log, namespace):
@ -25,7 +25,7 @@ def from_headings(body, log, namespace):
level_prev = {i+1:None for i in range(len(xpaths))}
level_prev[0] = tocroot
level_item_map = {i+1:frozenset(xp(body)) for i, xp in enumerate(xpaths)}
item_level_map = {e:i for i, elems in level_item_map.iteritems() for e in elems}
item_level_map = {e:i for i, elems in iteritems(level_item_map) for e in elems}
idcount = count()

View file

@ -19,6 +19,7 @@
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
from calibre.utils.zipfile import ZipFile
from calibre.ebooks.pdf.render.common import PAPER_SIZES
from polyglot.builtins import iteritems
def xml2str(root, pretty_print=False, with_tail=False):
@ -55,7 +56,7 @@ def create_skeleton(opts, namespaces=None):
def w(x):
return '{%s}%s' % (namespaces['w'], x)
dn = {k:v for k, v in namespaces.iteritems() if k in {'w', 'r', 'm', 've', 'o', 'wp', 'w10', 'wne', 'a', 'pic'}}
dn = {k:v for k, v in iteritems(namespaces) if k in {'w', 'r', 'm', 've', 'o', 'wp', 'w10', 'wne', 'a', 'pic'}}
E = ElementMaker(namespace=dn['w'], nsmap=dn)
doc = E.document()
body = E.body()
@ -73,7 +74,7 @@ def margin(which):
E.docGrid(**{w('linePitch'):"360"}),
))
dn = {k:v for k, v in namespaces.iteritems() if k in tuple('wra') + ('wp',)}
dn = {k:v for k, v in iteritems(namespaces) if k in tuple('wra') + ('wp',)}
E = ElementMaker(namespace=dn['w'], nsmap=dn)
styles = E.styles(
E.docDefaults(
@ -120,12 +121,12 @@ class DocumentRelationships(object):
def __init__(self, namespace):
self.rmap = {}
self.namespace = namespace
for typ, target in {
for typ, target in iteritems({
namespace.names['STYLES']: 'styles.xml',
namespace.names['NUMBERING']: 'numbering.xml',
namespace.names['WEB_SETTINGS']: 'webSettings.xml',
namespace.names['FONTS']: 'fontTable.xml',
}.iteritems():
}):
self.add_relationship(target, typ)
def get_relationship_id(self, target, rtype, target_mode=None):
@ -145,7 +146,7 @@ def serialize(self):
namespaces = self.namespace.namespaces
E = ElementMaker(namespace=namespaces['pr'], nsmap={None:namespaces['pr']})
relationships = E.Relationships()
for (target, rtype, target_mode), rid in self.rmap.iteritems():
for (target, rtype, target_mode), rid in iteritems(self.rmap):
r = E.Relationship(Id=rid, Type=rtype, Target=target)
if target_mode is not None:
r.set('TargetMode', target_mode)
@ -172,7 +173,7 @@ def __init__(self, opts, log):
def contenttypes(self):
E = ElementMaker(namespace=self.namespace.namespaces['ct'], nsmap={None:self.namespace.namespaces['ct']})
types = E.Types()
for partname, mt in {
for partname, mt in iteritems({
"/word/footnotes.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.footnotes+xml",
"/word/document.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml",
"/word/numbering.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml",
@ -184,15 +185,15 @@ def contenttypes(self):
"/word/webSettings.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.webSettings+xml",
"/docProps/core.xml": "application/vnd.openxmlformats-package.core-properties+xml",
"/docProps/app.xml": "application/vnd.openxmlformats-officedocument.extended-properties+xml",
}.iteritems():
}):
types.append(E.Override(PartName=partname, ContentType=mt))
added = {'png', 'gif', 'jpeg', 'jpg', 'svg', 'xml'}
for ext in added:
types.append(E.Default(Extension=ext, ContentType=guess_type('a.'+ext)[0]))
for ext, mt in {
for ext, mt in iteritems({
"rels": "application/vnd.openxmlformats-package.relationships+xml",
"odttf": "application/vnd.openxmlformats-officedocument.obfuscatedFont",
}.iteritems():
}):
added.add(ext)
types.append(E.Default(Extension=ext, ContentType=mt))
for fname in self.images:
@ -270,9 +271,9 @@ def write(self, path_or_stream, mi, create_empty_document=False):
zf.writestr('word/fontTable.xml', xml2str(self.font_table))
zf.writestr('word/_rels/document.xml.rels', self.document_relationships.serialize())
zf.writestr('word/_rels/fontTable.xml.rels', xml2str(self.embedded_fonts))
for fname, data_getter in self.images.iteritems():
for fname, data_getter in iteritems(self.images):
zf.writestr(fname, data_getter())
for fname, data in self.fonts.iteritems():
for fname, data in iteritems(self.fonts):
zf.writestr(fname, data)

View file

@ -10,7 +10,7 @@
import posixpath
from collections import namedtuple
from functools import partial
from polyglot.builtins import map
from polyglot.builtins import iteritems, itervalues, map
from lxml import etree
@ -131,7 +131,7 @@ def create_image_markup(self, html_img, stylizer, href, as_block=False):
if fake_margins:
# DOCX does not support setting margins for inline images, so we
# fake it by using effect extents to simulate margins
makeelement(parent, 'wp:effectExtent', **{k[-1].lower():v for k, v in get_image_margins(style).iteritems()})
makeelement(parent, 'wp:effectExtent', **{k[-1].lower():v for k, v in iteritems(get_image_margins(style))})
else:
makeelement(parent, 'wp:effectExtent', l='0', r='0', t='0', b='0')
if floating is not None:
@ -175,7 +175,7 @@ def create_filename(self, href, fmt):
return fname
def serialize(self, images_map):
for img in self.images.itervalues():
for img in itervalues(self.images):
images_map['word/' + img.fname] = partial(self.get_data, img.item)
def get_data(self, item):

View file

@ -9,6 +9,8 @@
from collections import defaultdict
from operator import attrgetter
from polyglot.builtins import iteritems, itervalues
LIST_STYLES = frozenset(
'disc circle square decimal decimal-leading-zero lower-roman upper-roman'
' lower-greek lower-alpha lower-latin upper-alpha upper-latin hiragana hebrew'
@ -62,7 +64,7 @@ def finalize(self):
items_for_level = defaultdict(list)
container_for_level = {}
type_for_level = {}
for ilvl, items in self.level_map.iteritems():
for ilvl, items in iteritems(self.level_map):
for container, list_tag, block, list_type, tag_style in items:
items_for_level[ilvl].append(list_tag)
container_for_level[ilvl] = container
@ -76,7 +78,7 @@ def __hash__(self):
return hash(self.levels)
def link_blocks(self):
for ilvl, items in self.level_map.iteritems():
for ilvl, items in iteritems(self.level_map):
for container, list_tag, block, list_type, tag_style in items:
block.numbering_id = (self.num_id + 1, ilvl)
@ -148,16 +150,16 @@ def finalize(self, all_blocks):
ilvl = len(container_tags) - 1
l.level_map[ilvl].append((container_tags[0], list_tag, block, list_type, tag_style))
[nd.finalize() for nd in lists.itervalues()]
[nd.finalize() for nd in itervalues(lists)]
definitions = {}
for defn in lists.itervalues():
for defn in itervalues(lists):
try:
defn = definitions[defn]
except KeyError:
definitions[defn] = defn
defn.num_id = len(definitions) - 1
defn.link_blocks()
self.definitions = sorted(definitions.itervalues(), key=attrgetter('num_id'))
self.definitions = sorted(itervalues(definitions), key=attrgetter('num_id'))
def serialize(self, parent):
for defn in self.definitions:

View file

@ -15,7 +15,7 @@
from calibre.ebooks import parse_css_length
from calibre.ebooks.docx.writer.utils import convert_color, int_or_zero
from calibre.utils.localization import lang_as_iso639_1
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, iterkeys, unicode_type
from tinycss.css21 import CSS21Parser
css_parser = CSS21Parser()
@ -158,7 +158,7 @@ def calculate_hash(self):
getattr(self, x) for x in self.ALL_PROPS))
def makeelement(self, parent, name, **attrs):
return parent.makeelement(self.w(name), **{self.w(k):v for k, v in attrs.iteritems()})
return parent.makeelement(self.w(name), **{self.w(k):v for k, v in iteritems(attrs)})
def __hash__(self):
return self._hash
@ -365,7 +365,7 @@ def __init__(self, parent_style, child_style):
p = []
def add(name, **props):
p.append((name, frozenset(props.iteritems())))
p.append((name, frozenset(iteritems(props))))
def vals(attr):
return getattr(parent_style, attr), getattr(child_style, attr)
@ -562,7 +562,7 @@ def serialize(self, styles, normal_style):
def serialize_properties(self, pPr, normal_style):
makeelement, w = self.makeelement, self.w
spacing = makeelement(pPr, 'spacing')
for edge, attr in {'top':'before', 'bottom':'after'}.iteritems():
for edge, attr in iteritems({'top':'before', 'bottom':'after'}):
getter = attrgetter('css_margin_' + edge)
css_val, css_unit = parse_css_length(getter(self))
if css_unit in ('em', 'ex'):
@ -696,7 +696,7 @@ def finalize(self, all_blocks):
counts = Counter()
smap = {}
for (bs, rs), blocks in used_pairs.iteritems():
for (bs, rs), blocks in iteritems(used_pairs):
s = CombinedStyle(bs, rs, blocks, self.namespace)
smap[(bs, rs)] = s
counts[s] += sum(1 for b in blocks if not b.is_empty())
@ -721,7 +721,7 @@ def finalize(self, all_blocks):
heading_styles.append(style)
style.id = style.name = val
style.seq = i
self.combined_styles = sorted(counts.iterkeys(), key=attrgetter('seq'))
self.combined_styles = sorted(iterkeys(counts), key=attrgetter('seq'))
[ls.apply() for ls in self.combined_styles]
descendant_style_map = {}

View file

@ -10,7 +10,7 @@
from calibre.ebooks.docx.writer.utils import convert_color
from calibre.ebooks.docx.writer.styles import read_css_block_borders as rcbb, border_edges
from polyglot.builtins import range
from polyglot.builtins import iteritems, range
class Dummy(object):
@ -125,7 +125,7 @@ def serialize(self, parent, makeelement):
makeelement(tcPr, 'w:shd', w_val="clear", w_color="auto", w_fill=bc)
b = makeelement(tcPr, 'w:tcBorders', append=False)
for edge, border in self.borders.iteritems():
for edge, border in iteritems(self.borders):
if border is not None and border.width > 0 and border.style != 'none':
makeelement(b, 'w:' + edge, w_val=border.style, w_sz=str(border.width), w_color=border.color)
if len(b) > 0:

View file

@ -10,7 +10,7 @@
from polyglot.builtins import map
from calibre.ebooks.epub.cfi.parse import parser, cfi_sort_key, decode_cfi
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
class Tests(unittest.TestCase):
@ -61,7 +61,7 @@ def a(before=None, after=None, **params):
if after is not None:
ta['after'] = after
if params:
ta['params'] = {unicode_type(k):(v,) if isinstance(v, unicode_type) else v for k, v in params.iteritems()}
ta['params'] = {unicode_type(k):(v,) if isinstance(v, unicode_type) else v for k, v in iteritems(params)}
if ta:
step['text_assertion'] = ta
return ans

View file

@ -10,6 +10,7 @@
import os
from pylrfopt import tagListOptimizer
from polyglot.builtins import iteritems
PYLRF_VERSION = "1.0"
@ -526,7 +527,7 @@ def appendTagDict(self, tagDict, genClass=None):
# belongs somewhere, so here it is.
#
composites = {}
for name, value in tagDict.iteritems():
for name, value in iteritems(tagDict):
if name == 'rubyAlignAndAdjust':
continue
if name in {

View file

@ -14,7 +14,7 @@
TOP_LEVEL_IDENTIFIERS, ALL_METADATA_FIELDS)
from calibre.library.field_metadata import FieldMetadata
from calibre.utils.icu import sort_key
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, iterkeys, unicode_type
# Special sets used to optimize the performance of getting and setting
# attributes on Metadata objects
@ -137,7 +137,7 @@ def __getattribute__(self, field):
return object.__getattribute__(self, field)
except AttributeError:
pass
if field in _data['user_metadata'].iterkeys():
if field in iterkeys(_data['user_metadata']):
d = _data['user_metadata'][field]
val = d['#value#']
if d['datatype'] != 'composite':
@ -180,7 +180,7 @@ def __setattr__(self, field, val, extra=None):
if val and val.lower() != 'und':
langs = [val]
_data['languages'] = langs
elif field in _data['user_metadata'].iterkeys():
elif field in iterkeys(_data['user_metadata']):
_data['user_metadata'][field]['#value#'] = val
_data['user_metadata'][field]['#extra#'] = extra
else:
@ -190,7 +190,7 @@ def __setattr__(self, field, val, extra=None):
self.__dict__[field] = val
def __iter__(self):
return object.__getattribute__(self, '_data').iterkeys()
return iterkeys(object.__getattribute__(self, '_data'))
def has_key(self, key):
return key in object.__getattribute__(self, '_data')
@ -219,7 +219,7 @@ def get(self, field, default=None):
def get_extra(self, field, default=None):
_data = object.__getattribute__(self, '_data')
if field in _data['user_metadata'].iterkeys():
if field in iterkeys(_data['user_metadata']):
try:
return _data['user_metadata'][field]['#extra#']
except:
@ -255,7 +255,7 @@ def set_identifiers(self, identifiers):
Set all identifiers. Note that if you previously set ISBN, calling
this method will delete it.
'''
cleaned = {ck(k):cv(v) for k, v in identifiers.iteritems() if k and v}
cleaned = {ck(k):cv(v) for k, v in iteritems(identifiers) if k and v}
object.__getattribute__(self, '_data')['identifiers'] = cleaned
def set_identifier(self, typ, val):
@ -287,14 +287,14 @@ def custom_field_keys(self):
'''
return a list of the custom fields in this book
'''
return object.__getattribute__(self, '_data')['user_metadata'].iterkeys()
return iterkeys(object.__getattribute__(self, '_data')['user_metadata'])
def all_field_keys(self):
'''
All field keys known by this instance, even if their value is None
'''
_data = object.__getattribute__(self, '_data')
return frozenset(ALL_METADATA_FIELDS.union(_data['user_metadata'].iterkeys()))
return frozenset(ALL_METADATA_FIELDS.union(iterkeys(_data['user_metadata'])))
def metadata_for_field(self, key):
'''
@ -320,7 +320,7 @@ def all_non_none_fields(self):
v = self.get(attr, None)
if v is not None:
result[attr] = v
for attr in _data['user_metadata'].iterkeys():
for attr in iterkeys(_data['user_metadata']):
v = self.get(attr, None)
if v is not None:
result[attr] = v
@ -396,7 +396,7 @@ def set_all_user_metadata(self, metadata):
return
um = {}
for key, meta in metadata.iteritems():
for key, meta in iteritems(metadata):
m = meta.copy()
if '#value#' not in m:
if m['datatype'] == 'text' and m['is_multiple']:
@ -576,7 +576,7 @@ def copy_not_none(dest, src, attr):
if callable(getattr(other, 'get_identifiers', None)):
d = self.get_identifiers()
s = other.get_identifiers()
d.update([v for v in s.iteritems() if v[1] is not None])
d.update([v for v in iteritems(s) if v[1] is not None])
self.set_identifiers(d)
else:
# other structure not Metadata. Copy the top-level identifiers
@ -749,7 +749,7 @@ def fmt(x, y):
fmt('Rights', unicode_type(self.rights))
if self.identifiers:
fmt('Identifiers', u', '.join(['%s:%s'%(k, v) for k, v in
self.identifiers.iteritems()]))
iteritems(self.identifiers)]))
if self.comments:
fmt('Comments', self.comments)

View file

@ -13,6 +13,7 @@
from calibre.constants import filesystem_encoding, preferred_encoding
from calibre.library.field_metadata import FieldMetadata
from calibre import isbytestring
from polyglot.builtins import iteritems, itervalues
# Translate datetimes to and from strings. The string form is the datetime in
# UTC. The returned date is also UTC
@ -149,7 +150,7 @@ def encode_book_metadata(self, book):
def encode_metadata_attr(self, book, key):
if key == 'user_metadata':
meta = book.get_all_user_metadata(make_copy=True)
for fm in meta.itervalues():
for fm in itervalues(meta):
if fm['datatype'] == 'datetime':
fm['#value#'] = datetime_to_string(fm['#value#'])
encode_is_multiple(fm)
@ -184,7 +185,7 @@ def decode_from_file(self, file_, booklist, book_class, prefix):
def raw_to_book(self, json_book, book_class, prefix):
try:
book = book_class(prefix, json_book.get('lpath', None))
for key,val in json_book.iteritems():
for key,val in iteritems(json_book):
meta = self.decode_metadata(key, val)
if key == 'user_metadata':
book.set_all_user_metadata(meta)
@ -201,7 +202,7 @@ def decode_metadata(self, key, value):
if key == 'classifiers':
key = 'identifiers'
if key == 'user_metadata':
for fm in value.itervalues():
for fm in itervalues(value):
if fm['datatype'] == 'datetime':
fm['#value#'] = string_to_datetime(fm['#value#'])
decode_is_multiple(fm)

View file

@ -10,7 +10,7 @@
from calibre.ebooks.metadata.book import SERIALIZABLE_FIELDS
from calibre.ebooks.metadata.book.base import Metadata
from calibre.utils.imghdr import what
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
def ensure_unicode(obj, enc=preferred_encoding):
@ -21,7 +21,7 @@ def ensure_unicode(obj, enc=preferred_encoding):
if isinstance(obj, (list, tuple)):
return [ensure_unicode(x) for x in obj]
if isinstance(obj, dict):
return {ensure_unicode(k): ensure_unicode(v) for k, v in obj.iteritems()}
return {ensure_unicode(k): ensure_unicode(v) for k, v in iteritems(obj)}
return obj
@ -63,7 +63,7 @@ def metadata_as_dict(mi, encode_cover_data=False):
def metadata_from_dict(src):
ans = Metadata('Unknown')
for key, value in src.iteritems():
for key, value in iteritems(src):
if key == 'user_metadata':
ans.set_all_user_metadata(value)
else:

View file

@ -16,7 +16,7 @@
from calibre.ebooks.lrf.meta import LRFMetaFile
from calibre import prints
from calibre.utils.date import parse_date
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
USAGE=_('%prog ebook_file [options]\n') + \
_('''
@ -150,7 +150,7 @@ def do_set_metadata(opts, mi, stream, stream_type):
if val:
orig = mi.get_identifiers()
orig.update(val)
val = {k:v for k, v in orig.iteritems() if k and v}
val = {k:v for k, v in iteritems(orig) if k and v}
mi.set_identifiers(val)
if getattr(opts, 'cover', None) is not None:

View file

@ -16,6 +16,7 @@
from calibre.ebooks.chardet import xml_to_unicode
from calibre import replace_entities, isbytestring
from calibre.utils.date import parse_date, is_date_undefined
from polyglot.builtins import iteritems, itervalues
def get_metadata(stream):
@ -60,7 +61,7 @@ def get_metadata(stream):
def parse_meta_tags(src):
rmap = {}
for field, names in META_NAMES.iteritems():
for field, names in iteritems(META_NAMES):
for name in names:
rmap[name.lower()] = field
all_names = '|'.join(rmap)
@ -89,8 +90,8 @@ def parse_meta_tags(src):
def parse_comment_tags(src):
all_names = '|'.join(COMMENT_NAMES.itervalues())
rmap = {v:k for k, v in COMMENT_NAMES.iteritems()}
all_names = '|'.join(itervalues(COMMENT_NAMES))
rmap = {v:k for k, v in iteritems(COMMENT_NAMES)}
ans = {}
for match in re.finditer(r'''<!--\s*(?P<name>%s)\s*=\s*%s''' % (all_names, attr_pat), src):
field = rmap[match.group('name')]

View file

@ -11,6 +11,7 @@
from calibre.ebooks.metadata.opf3 import apply_metadata, read_metadata
from calibre.ebooks.metadata.utils import parse_opf, normalize_languages, create_manifest_item, parse_opf_version
from calibre.ebooks.metadata import MetaInformation
from polyglot.builtins import iteritems
class DummyFile(object):
@ -61,7 +62,7 @@ def set_metadata_opf2(root, cover_prefix, mi, opf_version,
else:
orig = opf.get_identifiers()
orig.update(mi.get_identifiers())
opf.set_identifiers({k:v for k, v in orig.iteritems() if k and v})
opf.set_identifiers({k:v for k, v in iteritems(orig) if k and v})
if update_timestamp and mi.timestamp is not None:
opf.timestamp = mi.timestamp
raster_cover = opf.raster_cover

View file

@ -23,7 +23,7 @@
from calibre import prints, guess_type
from calibre.utils.cleantext import clean_ascii_chars, clean_xml_chars
from calibre.utils.config import tweaks
from polyglot.builtins import unicode_type, range
from polyglot.builtins import iteritems, unicode_type, range
from polyglot.urllib import unquote, urlparse
pretty_print_opf = False
@ -977,7 +977,7 @@ def get_identifiers(self):
'descendant::*[local-name() = "identifier" and text()]')(
self.metadata):
found_scheme = False
for attr, val in x.attrib.iteritems():
for attr, val in iteritems(x.attrib):
if attr.endswith('scheme'):
typ = icu_lower(val)
val = etree.tostring(x, with_tail=False, encoding=unicode_type,
@ -1010,7 +1010,7 @@ def set_identifiers(self, identifiers):
self.metadata):
xid = x.get('id', None)
is_package_identifier = uuid_id is not None and uuid_id == xid
typ = {val for attr, val in x.attrib.iteritems() if attr.endswith('scheme')}
typ = {val for attr, val in iteritems(x.attrib) if attr.endswith('scheme')}
if is_package_identifier:
typ = tuple(typ)
if typ and typ[0].lower() in identifiers:
@ -1019,7 +1019,7 @@ def set_identifiers(self, identifiers):
if typ and not (typ & {'calibre', 'uuid'}):
x.getparent().remove(x)
for typ, val in identifiers.iteritems():
for typ, val in iteritems(identifiers):
attrib = {'{%s}scheme'%self.NAMESPACES['opf']: typ.upper()}
self.set_text(self.create_metadata_element(
'identifier', attrib=attrib), unicode_type(val))
@ -1155,7 +1155,7 @@ def unique_identifier(self):
def page_progression_direction(self):
spine = self.XPath('descendant::*[re:match(name(), "spine", "i")][1]')(self.root)
if spine:
for k, v in spine[0].attrib.iteritems():
for k, v in iteritems(spine[0].attrib):
if k == 'page-progression-direction' or k.endswith('}page-progression-direction'):
return v
@ -1525,7 +1525,7 @@ def CAL_ELEM(name, content):
a(DC_ELEM('description', self.comments))
if self.publisher:
a(DC_ELEM('publisher', self.publisher))
for key, val in self.get_identifiers().iteritems():
for key, val in iteritems(self.get_identifiers()):
a(DC_ELEM('identifier', val, opf_attrs={'scheme':icu_upper(key)}))
if self.rights:
a(DC_ELEM('rights', self.rights))
@ -1651,7 +1651,7 @@ def factory(tag, text=None, sort=None, role=None, scheme=None, name=None,
try:
elem = metadata.makeelement(tag, attrib=attrib)
except ValueError:
elem = metadata.makeelement(tag, attrib={k:clean_xml_chars(v) for k, v in attrib.iteritems()})
elem = metadata.makeelement(tag, attrib={k:clean_xml_chars(v) for k, v in iteritems(attrib)})
elem.tail = '\n'+(' '*8)
if text:
try:
@ -1672,7 +1672,7 @@ def factory(tag, text=None, sort=None, role=None, scheme=None, name=None,
factory(DC('description'), clean_ascii_chars(mi.comments))
if mi.publisher:
factory(DC('publisher'), mi.publisher)
for key, val in mi.get_identifiers().iteritems():
for key, val in iteritems(mi.get_identifiers()):
factory(DC('identifier'), val, scheme=icu_upper(key))
if mi.rights:
factory(DC('rights'), mi.rights)

View file

@ -8,7 +8,7 @@
import re
from collections import defaultdict, namedtuple
from functools import wraps
from polyglot.builtins import map
from polyglot.builtins import iteritems, map
from lxml import etree
@ -190,9 +190,9 @@ def ensure_prefix(root, prefixes, prefix, value=None):
if prefixes is None:
prefixes = read_prefixes(root)
prefixes[prefix] = value or reserved_prefixes[prefix]
prefixes = {k:v for k, v in prefixes.iteritems() if reserved_prefixes.get(k) != v}
prefixes = {k:v for k, v in iteritems(prefixes) if reserved_prefixes.get(k) != v}
if prefixes:
root.set('prefix', ' '.join('%s: %s' % (k, v) for k, v in prefixes.iteritems()))
root.set('prefix', ' '.join('%s: %s' % (k, v) for k, v in iteritems(prefixes)))
else:
root.attrib.pop('prefix', None)
@ -299,7 +299,7 @@ def set_identifiers(root, prefixes, refines, new_identifiers, force_identifiers=
remove_element(ident, refines)
continue
metadata = XPath('./opf:metadata')(root)[0]
for scheme, val in new_identifiers.iteritems():
for scheme, val in iteritems(new_identifiers):
ident = metadata.makeelement(DC('identifier'))
ident.text = '%s:%s' % (scheme, val)
if package_identifier is None:
@ -854,7 +854,7 @@ def writer(root, prefixes, refines, val):
def deserialize_user_metadata(val):
val = json.loads(val, object_hook=from_json)
ans = {}
for name, fm in val.iteritems():
for name, fm in iteritems(val):
decode_is_multiple(fm)
ans[name] = fm
return ans
@ -969,7 +969,7 @@ def read_metadata(root, ver=None, return_extra_data=False):
prefixes, refines = read_prefixes(root), read_refines(root)
identifiers = read_identifiers(root, prefixes, refines)
ids = {}
for key, vals in identifiers.iteritems():
for key, vals in iteritems(identifiers):
if key == 'calibre':
ans.application_id = vals[0]
elif key == 'uuid':
@ -1007,7 +1007,7 @@ def read_metadata(root, ver=None, return_extra_data=False):
ans.series, ans.series_index = s, si
ans.author_link_map = read_author_link_map(root, prefixes, refines) or ans.author_link_map
ans.user_categories = read_user_categories(root, prefixes, refines) or ans.user_categories
for name, fm in (read_user_metadata(root, prefixes, refines) or {}).iteritems():
for name, fm in iteritems((read_user_metadata(root, prefixes, refines) or {})):
ans.set_user_metadata(name, fm)
if return_extra_data:
ans = ans, ver, read_raster_cover(root, prefixes, refines), first_spine_item(root, prefixes, refines)

View file

@ -13,6 +13,7 @@
set_refines, set_user_metadata3
)
from calibre.ebooks.metadata.utils import parse_opf, pretty_print_opf
from polyglot.builtins import itervalues
class Data(object):
@ -140,7 +141,7 @@ def upgrade_series(root, data):
def upgrade_custom(root, data):
m = read_user_metadata2(root, remove_tags=True)
if m:
for fm in m.itervalues():
for fm in itervalues(m):
encode_is_multiple(fm)
set_user_metadata3(root, data.prefixes, data.refines, m)

View file

@ -12,7 +12,7 @@
from calibre.ebooks.metadata import (
MetaInformation, string_to_authors, check_isbn, check_doi)
from calibre.utils.ipc.simple_worker import fork_job, WorkerError
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
def get_tools():
@ -153,9 +153,9 @@ def get_metadata(stream, cover=True):
# Look for recognizable identifiers in the info dict, if they were not
# found in the XMP metadata
for scheme, check_func in {'doi':check_doi, 'isbn':check_isbn}.iteritems():
for scheme, check_func in iteritems({'doi':check_doi, 'isbn':check_isbn}):
if scheme not in mi.get_identifiers():
for k, v in info.iteritems():
for k, v in iteritems(info):
if k != 'xmp_metadata':
val = check_func(v)
if val:

View file

@ -4,6 +4,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from polyglot.builtins import iteritems
from polyglot.urllib import quote_plus
AUTHOR_SEARCHES = {
@ -54,7 +55,7 @@ def qquote(val):
def url_for(template, data):
return template.format(**{k: qquote(v) for k, v in data.iteritems()})
return template.format(**{k: qquote(v) for k, v in iteritems(data)})
def url_for_author_search(key, **kw):

View file

@ -14,6 +14,7 @@
from calibre.ebooks.metadata import check_isbn
from calibre.ebooks.metadata.author_mapper import cap_author_token
from calibre.utils.localization import canonicalize_lang, get_lang
from polyglot.builtins import iteritems
def create_log(ostream=None):
@ -65,7 +66,7 @@ class InternalMetadataCompareKeyGen(object):
def __init__(self, mi, source_plugin, title, authors, identifiers):
same_identifier = 2
idents = mi.get_identifiers()
for k, v in identifiers.iteritems():
for k, v in iteritems(identifiers):
if idents.get(k) == v:
same_identifier = 1
break
@ -280,7 +281,7 @@ def browser(self):
def get_related_isbns(self, id_):
with self.cache_lock:
for isbn, q in self._isbn_to_identifier_cache.iteritems():
for isbn, q in iteritems(self._isbn_to_identifier_cache):
if q == id_:
yield isbn

View file

@ -27,7 +27,7 @@
from calibre.utils.icu import lower
from calibre.utils.date import UNDEFINED_DATE
from calibre.utils.formatter import EvalFormatter
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, iterkeys, itervalues, unicode_type
# Download worker {{{
@ -99,7 +99,7 @@ def __init__(self, log):
def isbn_in_pool(self, isbn):
if isbn:
for isbns, pool in self.pools.iteritems():
for isbns, pool in iteritems(self.pools):
if isbn in isbns:
return pool
return None
@ -147,7 +147,7 @@ def add_result(self, result):
def finalize(self):
has_isbn_result = False
for results in self.pools.itervalues():
for results in itervalues(self.pools):
if results:
has_isbn_result = True
break
@ -192,7 +192,7 @@ def merge_metadata_results(self, merge_on_identifiers=False):
if len(groups) != len(self.results):
self.results = []
for rgroup in groups.itervalues():
for rgroup in itervalues(groups):
rel = [r.average_source_relevance for r in rgroup]
if len(rgroup) > 1:
result = self.merge(rgroup, None, do_asr=False)
@ -206,7 +206,7 @@ def merge_metadata_results(self, merge_on_identifiers=False):
groups, empty = {}, []
for result in self.results:
key = set()
for typ, val in result.identifiers.iteritems():
for typ, val in iteritems(result.identifiers):
if typ and val:
key.add((typ, val))
if key:
@ -227,7 +227,7 @@ def merge_metadata_results(self, merge_on_identifiers=False):
if len(groups) != len(self.results):
self.results = []
for rgroup in groups.itervalues():
for rgroup in itervalues(groups):
rel = [r.average_source_relevance for r in rgroup]
if len(rgroup) > 1:
result = self.merge(rgroup, None, do_asr=False)
@ -244,7 +244,7 @@ def merge_metadata_results(self, merge_on_identifiers=False):
def merge_isbn_results(self):
self.results = []
sources = set()
for min_year, results in self.pools.itervalues():
for min_year, results in itervalues(self.pools):
if results:
for r in results:
sources.add(r.identify_plugin)
@ -362,7 +362,7 @@ def merge(self, results, min_year, do_asr=True):
def merge_identify_results(result_map, log):
isbn_merge = ISBNMerge(log)
for plugin, results in result_map.iteritems():
for plugin, results in iteritems(result_map):
for result in results:
isbn_merge.add_result(result)
@ -439,12 +439,12 @@ def get_results():
pass
sort_kwargs = dict(kwargs)
for k in list(sort_kwargs.iterkeys()):
for k in list(iterkeys(sort_kwargs)):
if k not in ('title', 'authors', 'identifiers'):
sort_kwargs.pop(k)
longest, lp = -1, ''
for plugin, presults in results.iteritems():
for plugin, presults in iteritems(results):
presults.sort(key=plugin.identify_results_keygen(**sort_kwargs))
# Throw away lower priority results from the same source that have exactly the same
@ -542,7 +542,7 @@ def swap_to_ln_fn(a):
def urls_from_identifiers(identifiers): # {{{
identifiers = {k.lower():v for k, v in identifiers.iteritems()}
identifiers = {k.lower():v for k, v in iteritems(identifiers)}
ans = []
keys_left = set(identifiers)
@ -553,7 +553,7 @@ def add(name, k, val, url):
rules = msprefs['id_link_rules']
if rules:
formatter = EvalFormatter()
for k, val in identifiers.iteritems():
for k, val in iteritems(identifiers):
val = val.replace('|', ',')
vals = {'id':quote(val if isinstance(val, bytes) else val.encode('utf-8')).decode('ascii')}
items = rules.get(k) or ()
@ -592,7 +592,7 @@ def add(name, k, val, url):
add(issn, 'issn', issn,
'https://www.worldcat.org/issn/'+issn)
q = {'http', 'https', 'file'}
for k, url in identifiers.iteritems():
for k, url in iteritems(identifiers):
if url and re.match(r'ur[il]\d*$', k) is not None:
url = url[:8].replace('|', ':') + url[8:].replace('|', ',')
if url.partition(':')[0].lower() in q:

View file

@ -17,6 +17,7 @@
from calibre.ebooks.metadata.sources.base import Source
from calibre.utils.config import JSONConfig
from calibre.utils.https import get_https_resource_securely
from polyglot.builtins import iteritems, itervalues
cache = JSONConfig('metadata-sources-cache.json')
@ -38,7 +39,7 @@ def load_plugin(src):
src = src.encode('utf-8')
ns = {}
exec(src, ns)
for x in ns.itervalues():
for x in itervalues(ns):
if isinstance(x, type) and issubclass(x, Source) and x is not Source:
return x
@ -76,7 +77,7 @@ def patch_search_engines(src):
def patch_plugins():
from calibre.customize.ui import patch_metadata_plugins
patches = {}
for name, val in cache.iteritems():
for name, val in iteritems(cache):
if name == 'hashes':
continue
if name == 'search_engines':
@ -94,7 +95,7 @@ def update_needed():
'https://code.calibre-ebook.com/metadata-sources/hashes.json')
hashes = bz2.decompress(hashes)
hashes = json.loads(hashes)
for k, v in hashes.iteritems():
for k, v in iteritems(hashes):
if current_hashes.get(k) != v:
needed[k] = v
remove = set(current_hashes) - set(hashes)
@ -132,7 +133,7 @@ def main(report_error=prints, report_action=prints):
cache.touch()
return
updated = {}
for name, expected_hash in needed.iteritems():
for name, expected_hash in iteritems(needed):
report_action('Updating metadata source {}...'.format(name))
try:
update_plugin(name, updated, expected_hash)

View file

@ -18,6 +18,7 @@
from calibre.utils.date import as_utc
from calibre.utils.logging import GUILog
from polyglot.queue import Empty, Queue
from polyglot.builtins import iteritems
def merge_result(oldmi, newmi, ensure_fields=None):
@ -54,7 +55,7 @@ def main(do_identify, covers, metadata, ensure_fields, tdir):
log = GUILog()
patch_plugins()
for book_id, mi in metadata.iteritems():
for book_id, mi in iteritems(metadata):
mi = OPF(BytesIO(mi), basedir=tdir,
populate_spine=False).to_book_metadata()
title, authors, identifiers = mi.title, mi.authors, mi.identifiers

View file

@ -19,7 +19,7 @@
from calibre.ebooks.metadata.opf2 import dump_dict
from calibre.utils.date import parse_date, isoformat, now
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
from polyglot.builtins import string_or_bytes
from polyglot.builtins import iteritems, string_or_bytes
_xml_declaration = re.compile(r'<\?xml[^<>]+encoding\s*=\s*[\'"](.*?)[\'"][^<>]*>', re.IGNORECASE)
@ -323,7 +323,7 @@ def metadata_from_xmp_packet(raw_bytes):
identifiers[scheme] = val
# Check Dublin Core for recognizable identifier types
for scheme, check_func in {'doi':check_doi, 'isbn':check_isbn}.iteritems():
for scheme, check_func in iteritems({'doi':check_doi, 'isbn':check_isbn}):
if scheme not in identifiers:
val = check_func(first_simple('//dc:identifier', root))
if val:
@ -407,7 +407,7 @@ def create_identifiers(xmp, identifiers):
xmp.append(xmpid)
bag = xmpid.makeelement(expand('rdf:Bag'))
xmpid.append(bag)
for scheme, value in identifiers.iteritems():
for scheme, value in iteritems(identifiers):
li = bag.makeelement(expand('rdf:li'))
li.set(expand('rdf:parseType'), 'Resource')
bag.append(li)
@ -443,7 +443,7 @@ def create_user_metadata(calibre, all_user_metadata):
calibre.append(s)
bag = s.makeelement(expand('rdf:Bag'))
s.append(bag)
for name, fm in all_user_metadata.iteritems():
for name, fm in iteritems(all_user_metadata):
try:
fm = copy.copy(fm)
encode_is_multiple(fm)
@ -473,12 +473,12 @@ def metadata_to_xmp_packet(mi):
dc = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap('dc'))
dc.set(expand('rdf:about'), '')
rdf.append(dc)
for prop, tag in {'title':'dc:title', 'comments':'dc:description'}.iteritems():
for prop, tag in iteritems({'title':'dc:title', 'comments':'dc:description'}):
val = mi.get(prop) or ''
create_alt_property(dc, tag, val)
for prop, (tag, ordered) in {
for prop, (tag, ordered) in iteritems({
'authors':('dc:creator', True), 'tags':('dc:subject', False), 'publisher':('dc:publisher', False),
}.iteritems():
}):
val = mi.get(prop) or ()
if isinstance(val, string_or_bytes):
val = [val]
@ -502,9 +502,9 @@ def metadata_to_xmp_packet(mi):
identifiers = mi.get_identifiers()
if identifiers:
create_identifiers(xmp, identifiers)
for scheme, val in identifiers.iteritems():
for scheme, val in iteritems(identifiers):
if scheme in {'isbn', 'doi'}:
for prefix, parent in extra_ids.iteritems():
for prefix, parent in iteritems(extra_ids):
ie = parent.makeelement(expand('%s:%s'%(prefix, scheme)))
ie.text = val
parent.append(ie)
@ -552,7 +552,7 @@ def find_used_namespaces(elem):
def find_preferred_prefix(namespace, elems):
for elem in elems:
ans = {v:k for k, v in elem.nsmap.iteritems()}.get(namespace, None)
ans = {v:k for k, v in iteritems(elem.nsmap)}.get(namespace, None)
if ans is not None:
return ans
return find_preferred_prefix(namespace, elem.iterchildren(etree.Element))
@ -564,7 +564,7 @@ def find_nsmap(elems):
used_namespaces |= find_used_namespaces(elem)
ans = {}
used_namespaces -= {NS_MAP['xml'], NS_MAP['x'], None, NS_MAP['rdf']}
rmap = {v:k for k, v in NS_MAP.iteritems()}
rmap = {v:k for k, v in iteritems(NS_MAP)}
i = 0
for ns in used_namespaces:
if ns in rmap:

View file

@ -14,7 +14,7 @@
from calibre.ebooks.mobi.langcodes import main_language, sub_language
from calibre.ebooks.mobi.debug import format_bytes
from calibre.ebooks.mobi.utils import get_trailing_data
from polyglot.builtins import range
from polyglot.builtins import iteritems, range
# PalmDB {{{
@ -597,7 +597,7 @@ def __init__(self, idx, record, extra_data_flags, decompress):
self.trailing_data['uncrossable_breaks'] = self.trailing_data.pop(2)
self.trailing_data['raw_bytes'] = raw_trailing_bytes
for typ, val in self.trailing_data.iteritems():
for typ, val in iteritems(self.trailing_data):
if isinstance(typ, numbers.Integral):
print ('Record %d has unknown trailing data of type: %d : %r'%
(idx, typ, val))
@ -609,7 +609,7 @@ def dump(self, folder):
with open(os.path.join(folder, name+'.txt'), 'wb') as f:
f.write(self.raw)
with open(os.path.join(folder, name+'.trailing_data'), 'wb') as f:
for k, v in self.trailing_data.iteritems():
for k, v in iteritems(self.trailing_data):
raw = '%s : %r\n\n'%(k, v)
f.write(raw.encode('utf-8'))

View file

@ -15,7 +15,7 @@
from calibre.ebooks.mobi.reader.index import (CNCX, parse_indx_header,
parse_tagx_section, parse_index_record, INDEX_HEADER_FIELDS)
from calibre.ebooks.mobi.reader.ncx import (tag_fieldname_map, default_entry)
from polyglot.builtins import range
from polyglot.builtins import iteritems, iterkeys, range
File = namedtuple('File',
'file_number name divtbl_count start_position length')
@ -110,13 +110,13 @@ def render(self):
if self.cncx:
a('*'*10 + ' CNCX ' + '*'*10)
for offset, val in self.cncx.iteritems():
for offset, val in iteritems(self.cncx):
a('%10s: %s'%(offset, val))
ans.extend(['', ''])
if self.table is not None:
a('*'*10 + ' %d Index Entries '%len(self.table) + '*'*10)
for k, v in self.table.iteritems():
for k, v in iteritems(self.table):
a('%s: %r'%(k, v))
if self.records:
@ -140,11 +140,11 @@ def __init__(self, skelidx, records, codec):
self.records = []
if self.table is not None:
for i, text in enumerate(self.table.iterkeys()):
for i, text in enumerate(iterkeys(self.table)):
tag_map = self.table[text]
if set(tag_map.iterkeys()) != {1, 6}:
if set(iterkeys(tag_map)) != {1, 6}:
raise ValueError('SKEL Index has unknown tags: %s'%
(set(tag_map.iterkeys())-{1,6}))
(set(iterkeys(tag_map))-{1,6}))
self.records.append(File(
i, # file_number
text, # name
@ -161,11 +161,11 @@ def __init__(self, sectidx, records, codec):
self.records = []
if self.table is not None:
for i, text in enumerate(self.table.iterkeys()):
for i, text in enumerate(iterkeys(self.table)):
tag_map = self.table[text]
if set(tag_map.iterkeys()) != {2, 3, 4, 6}:
if set(iterkeys(tag_map)) != {2, 3, 4, 6}:
raise ValueError('Chunk Index has unknown tags: %s'%
(set(tag_map.iterkeys())-{2, 3, 4, 6}))
(set(iterkeys(tag_map))-{2, 3, 4, 6}))
toc_text = self.cncx[tag_map[2][0]]
self.records.append(Elem(
@ -186,9 +186,9 @@ def __init__(self, guideidx, records, codec):
self.records = []
if self.table is not None:
for i, text in enumerate(self.table.iterkeys()):
for i, text in enumerate(iterkeys(self.table)):
tag_map = self.table[text]
if set(tag_map.iterkeys()) not in ({1, 6}, {1, 2, 3}):
if set(iterkeys(tag_map)) not in ({1, 6}, {1, 2, 3}):
raise ValueError('Guide Index has unknown tags: %s'%
tag_map)
@ -211,13 +211,13 @@ def __init__(self, ncxidx, records, codec):
NCXEntry = namedtuple('NCXEntry', 'index start length depth parent '
'first_child last_child title pos_fid kind')
for num, x in enumerate(self.table.iteritems()):
for num, x in enumerate(iteritems(self.table)):
text, tag_map = x
entry = e = default_entry.copy()
entry['name'] = text
entry['num'] = num
for tag in tag_fieldname_map.iterkeys():
for tag in iterkeys(tag_fieldname_map):
fieldname, i = tag_fieldname_map[tag]
if tag in tag_map:
fieldvalue = tag_map[tag][i]
@ -226,9 +226,9 @@ def __init__(self, ncxidx, records, codec):
# offset
fieldvalue = tuple(tag_map[tag])
entry[fieldname] = fieldvalue
for which, name in {3:'text', 5:'kind', 70:'description',
for which, name in iteritems({3:'text', 5:'kind', 70:'description',
71:'author', 72:'image_caption',
73:'image_attribution'}.iteritems():
73:'image_attribution'}):
if tag == which:
entry[name] = self.cncx.get(fieldvalue,
default_entry[name])

View file

@ -12,7 +12,7 @@
from calibre.ebooks.mobi.utils import (decint, count_set_bits,
decode_string)
from polyglot.builtins import range
from polyglot.builtins import iteritems, range
TagX = namedtuple('TagX', 'tag num_of_values bitmask eof')
PTagX = namedtuple('PTagX', 'tag value_count value_bytes num_of_values')
@ -123,7 +123,7 @@ def __bool__(self):
__nonzero__ = __bool__
def iteritems(self):
return self.records.iteritems()
return iteritems(self.records)
# }}}

View file

@ -23,7 +23,7 @@
from calibre.ebooks.mobi.reader.headers import BookHeader
from calibre.utils.img import save_cover_data_to
from calibre.utils.imghdr import what
from polyglot.builtins import unicode_type, range
from polyglot.builtins import iteritems, unicode_type, range
class TopazError(ValueError):
@ -498,7 +498,7 @@ def barename(x):
try:
float(sz)
except ValueError:
if sz in size_map.keys():
if sz in list(size_map.keys()):
attrib['size'] = size_map[sz]
elif tag.tag == 'img':
recindex = None
@ -892,7 +892,7 @@ def extract_images(self, processed_records, output_dir):
def test_mbp_regex():
for raw, m in {
for raw, m in iteritems({
'<mbp:pagebreak></mbp:pagebreak>':'',
'<mbp:pagebreak xxx></mbp:pagebreak>yyy':' xxxyyy',
'<mbp:pagebreak> </mbp:pagebreak>':'',
@ -903,7 +903,7 @@ def test_mbp_regex():
'</mbp:pagebreak>':'',
'</mbp:pagebreak sdf>':' sdf',
'</mbp:pagebreak><mbp:pagebreak></mbp:pagebreak>xxx':'xxx',
}.iteritems():
}):
ans = MobiReader.PAGE_BREAK_PAT.sub(r'\1', raw)
if ans != m:
raise Exception('%r != %r for %r'%(ans, m, raw))

View file

@ -24,7 +24,7 @@
from calibre.ebooks.mobi.utils import read_font_record
from calibre.ebooks.oeb.parse_utils import parse_html
from calibre.ebooks.oeb.base import XPath, XHTML, xml2text
from polyglot.builtins import range, zip
from polyglot.builtins import iterkeys, range, zip
from polyglot.urllib import urldefrag
Part = namedtuple('Part',
@ -134,7 +134,7 @@ def read_indices(self):
File = namedtuple('File',
'file_number name divtbl_count start_position length')
for i, text in enumerate(table.iterkeys()):
for i, text in enumerate(iterkeys(table)):
tag_map = table[text]
self.files.append(File(i, text, tag_map[1][0],
tag_map[6][0], tag_map[6][1]))
@ -143,7 +143,7 @@ def read_indices(self):
if self.header.dividx != NULL_INDEX:
table, cncx = read_index(self.kf8_sections, self.header.dividx,
self.header.codec)
for i, text in enumerate(table.iterkeys()):
for i, text in enumerate(iterkeys(table)):
tag_map = table[text]
toc_text = cncx[tag_map[2][0]]
self.elems.append(Elem(int(text), toc_text, tag_map[3][0],
@ -156,14 +156,14 @@ def read_indices(self):
Item = namedtuple('Item',
'type title pos_fid')
for i, ref_type in enumerate(table.iterkeys()):
for i, ref_type in enumerate(iterkeys(table)):
tag_map = table[ref_type]
# ref_type, ref_title, div/frag number
title = cncx[tag_map[1][0]]
fileno = None
if 3 in tag_map.keys():
if 3 in list(tag_map.keys()):
fileno = tag_map[3][0]
if 6 in tag_map.keys():
if 6 in list(tag_map.keys()):
fileno = tag_map[6]
self.guide.append(Item(ref_type.decode(self.header.codec),
title, fileno))

View file

@ -13,6 +13,7 @@
from calibre.ebooks.metadata.toc import TOC
from calibre.ebooks.mobi.reader.headers import NULL_INDEX
from calibre.ebooks.mobi.reader.index import read_index
from polyglot.builtins import iteritems, iterkeys
tag_fieldname_map = {
1: ['pos',0],
@ -56,13 +57,13 @@ def read_ncx(sections, index, codec):
if index != NULL_INDEX:
table, cncx = read_index(sections, index, codec)
for num, x in enumerate(table.iteritems()):
for num, x in enumerate(iteritems(table)):
text, tag_map = x
entry = default_entry.copy()
entry['name'] = text
entry['num'] = num
for tag in tag_fieldname_map.iterkeys():
for tag in iterkeys(tag_fieldname_map):
fieldname, i = tag_fieldname_map[tag]
if tag in tag_map:
fieldvalue = tag_map[tag][i]
@ -71,9 +72,9 @@ def read_ncx(sections, index, codec):
# offset
fieldvalue = tuple(tag_map[tag])
entry[fieldname] = fieldvalue
for which, name in {3:'text', 5:'kind', 70:'description',
for which, name in iteritems({3:'text', 5:'kind', 70:'description',
71:'author', 72:'image_caption',
73:'image_attribution'}.iteritems():
73:'image_attribution'}):
if tag == which:
entry[name] = cncx.get(fieldvalue,
default_entry[name])
@ -100,4 +101,3 @@ def build_toc(index_entries):
item.play_order = i
return ans

View file

@ -14,7 +14,7 @@
from calibre.utils.img import save_cover_data_to, scale_image, image_to_data, image_from_data, resize_image
from calibre.utils.imghdr import what
from calibre.ebooks import normalize
from polyglot.builtins import unicode_type, range
from polyglot.builtins import iterkeys, unicode_type, range
from tinycss.color3 import parse_color_string
IMAGE_MAX_SIZE = 10 * 1024 * 1024
@ -589,7 +589,7 @@ def __init__(self, strings=()):
offset = 0
buf = BytesIO()
RECORD_LIMIT = 0x10000 - 1024 # kindlegen appears to use 1024, PDB limit is 0x10000
for key in self.strings.iterkeys():
for key in iterkeys(self.strings):
utf8 = utf8_text(key[:self.MAX_STRING_LENGTH])
l = len(utf8)
sz_bytes = encint(l)

View file

@ -2,7 +2,6 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
from polyglot.builtins import filter, map
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
@ -15,7 +14,7 @@
from calibre.ebooks.mobi.utils import (encint, encode_number_as_hex,
encode_tbs, align_block, RECORD_SIZE, CNCX as CNCX_)
from polyglot.builtins import range
from polyglot.builtins import filter, iteritems, iterkeys, itervalues, map, range
class CNCX(CNCX_): # {{{
@ -109,7 +108,7 @@ class IndexEntry(object):
'author_offset': 71,
}
RTAG_MAP = {v:k for k, v in TAG_VALUES.iteritems()} # noqa
RTAG_MAP = {v:k for k, v in iteritems(TAG_VALUES)} # noqa
def __init__(self, offset, label_offset):
self.offset, self.label_offset = offset, label_offset
@ -227,7 +226,7 @@ def __init__(self, index):
# The values for this index entry
# I dont know what the 5 means, it is not the number of entries
self.secondary = [5 if tag == min(
self.INDEX_MAP.itervalues()) else 0, 0, tag]
itervalues(self.INDEX_MAP)) else 0, 0, tag]
@property
def tag_nums(self):
@ -239,7 +238,7 @@ def entry_type(self):
@classmethod
def entries(cls):
rmap = {v:k for k,v in cls.INDEX_MAP.iteritems()}
rmap = {v:k for k,v in iteritems(cls.INDEX_MAP)}
for tag in sorted(rmap, reverse=True):
yield cls(rmap[tag])
@ -284,7 +283,7 @@ def __init__(self, data, is_periodical, first=False, section_map={},
for x in ('starts', 'ends', 'completes'):
for idx in data[x]:
depth_map[idx.depth].append(idx)
for l in depth_map.itervalues():
for l in itervalues(depth_map):
l.sort(key=lambda x:x.offset)
self.periodical_tbs(data, first, depth_map)
else:
@ -318,7 +317,7 @@ def periodical_tbs(self, data, first, depth_map):
if first_node is not None and first_node.depth > 0:
parent_section_index = (first_node.index if first_node.depth == 1 else first_node.parent_index)
else:
parent_section_index = max(self.section_map.iterkeys())
parent_section_index = max(iterkeys(self.section_map))
else:
# Non terminal record

View file

@ -19,7 +19,7 @@
from calibre.ebooks.mobi.utils import (encint, encode_trailing_data,
align_block, detect_periodical, RECORD_SIZE, create_text_record)
from calibre.ebooks.mobi.writer2.indexer import Indexer
from polyglot.builtins import unicode_type, range
from polyglot.builtins import iteritems, unicode_type, range
# Disabled as I dont care about uncrossable breaks
WRITE_UNCROSSABLE_BREAKS = False
@ -425,10 +425,10 @@ def generate_joint_record0(self): # {{{
extra_data_flags |= 0b10
header_fields['extra_data_flags'] = extra_data_flags
for k, v in {'last_text_record':'last_text_record_idx',
for k, v in iteritems({'last_text_record':'last_text_record_idx',
'first_non_text_record':'first_non_text_record_idx',
'ncx_index':'primary_index_record_idx',
}.iteritems():
}):
header_fields[k] = getattr(self, v)
if header_fields['ncx_index'] is None:
header_fields['ncx_index'] = NULL_INDEX

View file

@ -16,7 +16,7 @@
from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.imghdr import what
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
PLACEHOLDER_GIF = b'GIF89a\x01\x00\x01\x00\xf0\x00\x00\x00\x00\x00\xff\xff\xff!\xf9\x04\x01\x00\x00\x00\x00!\xfe calibre-placeholder-gif-for-azw3\x00,\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;' # noqa
@ -149,7 +149,7 @@ def add_extra_images(self):
def serialize(self, records, used_images):
used_image_indices = self.used_image_indices | {
v-1 for k, v in self.item_map.iteritems() if k in used_images}
v-1 for k, v in iteritems(self.item_map) if k in used_images}
for i in self.image_indices-used_image_indices:
self.records[i] = PLACEHOLDER_GIF
records.extend(self.records)

View file

@ -15,7 +15,7 @@
from calibre.ebooks.mobi.utils import (utf8_text, to_base)
from calibre.utils.localization import lang_as_iso639_1
from calibre.ebooks.metadata import authors_to_sort_string
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
EXTH_CODES = {
'creator': 100,
@ -163,7 +163,7 @@ def build_exth(metadata, prefer_author_sort=False, is_periodical=False,
else:
# Pretend to be kindlegen 1.2
vals = {204:201, 205:1, 206:2, 207:33307}
for code, val in vals.iteritems():
for code, val in iteritems(vals):
exth.write(pack(b'>III', code, 12, val))
nrecs += 1
if be_kindlegen2:

View file

@ -13,6 +13,7 @@
from struct import pack
from calibre.ebooks.mobi.utils import align_block
from polyglot.builtins import iteritems
NULL = 0xffffffff
zeroes = lambda x: b'\0'*x
@ -51,18 +52,18 @@ def __init__(self):
@property
def dynamic_fields(self):
return tuple(k for k, v in self.iteritems() if v is None)
return tuple(k for k, v in iteritems(self) if v is None)
def __call__(self, **kwargs):
positions = {}
for name, val in kwargs.iteritems():
for name, val in iteritems(kwargs):
if name not in self:
raise KeyError('Not a valid header field: %r'%name)
self[name] = val
buf = BytesIO()
buf.write(bytes(self.HEADER_NAME))
for name, val in self.iteritems():
for name, val in iteritems(self):
val = self.format_value(name, val)
positions[name] = buf.tell()
if val is None:
@ -72,7 +73,7 @@ def __call__(self, **kwargs):
val = pack(b'>'+fmt, val)
buf.write(val)
for pos_field, field in self.POSITIONS.iteritems():
for pos_field, field in iteritems(self.POSITIONS):
buf.seek(positions[pos_field])
buf.write(pack(b'>I', positions[field]))

View file

@ -31,7 +31,7 @@
from calibre.ebooks.mobi.writer8.mobi import KF8Book
from calibre.ebooks.mobi.writer8.tbs import apply_trailing_byte_sequences
from calibre.ebooks.mobi.writer8.toc import TOCAdder
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
XML_DOCS = OEB_DOCS | {SVG_MIME}
@ -133,7 +133,7 @@ def pointer(item, oref):
if item.media_type in XML_DOCS:
root = self.data(item)
for tag in XPath('//h:img|//svg:image')(root):
for attr, ref in tag.attrib.iteritems():
for attr, ref in iteritems(tag.attrib):
if attr.split('}')[-1].lower() in {'src', 'href'}:
tag.attrib[attr] = pointer(item, ref)
@ -206,7 +206,7 @@ def fix_import_rules(sheet):
extract(tag)
inlines[raw].append(repl)
for raw, elems in inlines.iteritems():
for raw, elems in iteritems(inlines):
idx = to_ref(len(self.flows))
self.flows.append(raw)
for link in elems:
@ -320,7 +320,7 @@ def in_table(elem):
def chunk_it_up(self):
placeholder_map = {}
for placeholder, x in self.link_map.iteritems():
for placeholder, x in iteritems(self.link_map):
href, frag = x
aid = self.id_map.get(x, None)
if aid is None:

View file

@ -17,7 +17,7 @@
from calibre.ebooks.oeb.base import XHTML_NS, extract
from calibre.constants import ispy3
from calibre.ebooks.mobi.utils import to_base
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
CHUNK_SIZE = 8192
@ -214,7 +214,7 @@ def __init__(self, oeb, data_func, placeholder_map):
def remove_namespaces(self, root):
lang = None
for attr, val in root.attrib.iteritems():
for attr, val in iteritems(root.attrib):
if attr.rpartition('}')[-1] == 'lang':
lang = val
@ -248,11 +248,11 @@ def remove_namespaces(self, root):
tn = tag.tag
if tn is not None:
tn = tn.rpartition('}')[-1]
attrib = {k.rpartition('}')[-1]:v for k, v in tag.attrib.iteritems()}
attrib = {k.rpartition('}')[-1]:v for k, v in iteritems(tag.attrib)}
try:
elem = nroot.makeelement(tn, attrib=attrib)
except ValueError:
attrib = {k:v for k, v in attrib.iteritems() if ':' not in k}
attrib = {k:v for k, v in iteritems(attrib) if ':' not in k}
elem = nroot.makeelement(tn, attrib=attrib)
elem.text = tag.text
elem.tail = tag.tail
@ -402,7 +402,7 @@ def to_placeholder(aid):
return bytes(':off:'.join((pos, fid)))
placeholder_map = {bytes(k):to_placeholder(v) for k, v in
self.placeholder_map.iteritems()}
iteritems(self.placeholder_map)}
# Now update the links
def sub(match):

View file

@ -23,6 +23,7 @@
from calibre.ebooks.mobi.utils import (encode_trailing_data,
encode_tbs)
from polyglot.builtins import iteritems, itervalues
Entry = namedtuple('IndexEntry', 'index start length depth parent '
'first_child last_child title action start_offset length_offset '
@ -122,7 +123,7 @@ def encode_strands_as_sequences(strands, tbs_type=8):
max_length_offset = 0
first_entry = None
for strand in strands:
for entries in strand.itervalues():
for entries in itervalues(strand):
for entry in entries:
if first_entry is None:
first_entry = entry
@ -131,7 +132,7 @@ def encode_strands_as_sequences(strands, tbs_type=8):
for strand in strands:
strand_seqs = []
for depth, entries in strand.iteritems():
for depth, entries in iteritems(strand):
extra = {}
if entries[-1].action == 'spans':
extra[0b1] = 0
@ -207,9 +208,7 @@ def apply_trailing_byte_sequences(index_table, records, text_record_lengths):
except NegativeStrandIndex:
rmap = calculate_all_tbs(indexing_data, tbs_type=5)
for i, tbs_bytes in rmap.iteritems():
for i, tbs_bytes in iteritems(rmap):
records[i] += encode_trailing_data(tbs_bytes)
return True

View file

@ -20,7 +20,7 @@
namespace, XHTML, parse_html, NotHTML)
from calibre.utils.cleantext import clean_xml_chars
from calibre.utils.short_uuid import uuid4
from polyglot.builtins import unicode_type, string_or_bytes, range
from polyglot.builtins import iteritems, unicode_type, string_or_bytes, range
from polyglot.urllib import unquote, urldefrag, urljoin, urlparse, urlunparse
XML_NS = 'http://www.w3.org/XML/1998/namespace'
@ -1479,7 +1479,7 @@ def remove(self, type):
return self.refs.pop(type, None)
def remove_by_href(self, href):
remove = [r for r, i in self.refs.iteritems() if i.href == href]
remove = [r for r, i in iteritems(self.refs) if i.href == href]
for r in remove:
self.remove(r)

View file

@ -10,6 +10,7 @@
import re
from calibre import guess_type
from polyglot.builtins import iteritems
class EntityDeclarationProcessor(object): # {{{
@ -21,7 +22,7 @@ def __init__(self, html):
if len(tokens) > 1:
self.declared_entities[tokens[0].strip()] = tokens[1].strip().replace('"', '')
self.processed_html = html
for key, val in self.declared_entities.iteritems():
for key, val in iteritems(self.declared_entities):
self.processed_html = self.processed_html.replace('&%s;'%key, val)
# }}}

View file

@ -7,7 +7,7 @@
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import numbers
from polyglot.builtins import zip, string_or_bytes
from polyglot.builtins import iteritems, zip, string_or_bytes
from functools import wraps
from css_parser.css import PropertyValue
@ -140,7 +140,7 @@ def normalize_border(name, cssvalue):
style = normalizers['border-' + EDGES[0]]('border-' + EDGES[0], cssvalue)
vals = style.copy()
for edge in EDGES[1:]:
style.update({k.replace(EDGES[0], edge):v for k, v in vals.iteritems()})
style.update({k.replace(EDGES[0], edge):v for k, v in iteritems(vals)})
return style
@ -254,7 +254,7 @@ def condense_rule(style):
if prop.name and prop.name.startswith(x):
expanded[x].append(prop)
break
for prefix, vals in expanded.iteritems():
for prefix, vals in iteritems(expanded):
if len(vals) > 1 and {x.priority for x in vals} == {''}:
condensers[prefix[:-1]](style, vals)
@ -280,7 +280,7 @@ def font_dict(expected):
ans.update(expected)
return ans
for raw, expected in {
for raw, expected in iteritems({
'some_font': {'font-family':'some_font'}, 'inherit':{k:'inherit' for k in font_composition},
'1.2pt/1.4 A_Font': {'font-family':'A_Font', 'font-size':'1.2pt', 'line-height':'1.4'},
'bad font': {'font-family':'"bad font"'}, '10% serif': {'font-family':'serif', 'font-size':'10%'},
@ -291,7 +291,7 @@ def font_dict(expected):
{'font-family':'serif', 'font-weight':'bold', 'font-style':'italic', 'font-size':'larger',
'line-height':'normal', 'font-variant':'small-caps'},
'2em A B': {'font-family': '"A B"', 'font-size': '2em'},
}.iteritems():
}):
val = tuple(parseStyle('font: %s' % raw, validate=False))[0].cssValue
style = normalizers['font']('font', val)
self.assertDictEqual(font_dict(expected), style, raw)
@ -299,7 +299,7 @@ def font_dict(expected):
def test_border_normalization(self):
def border_edge_dict(expected, edge='right'):
ans = {'border-%s-%s' % (edge, x): DEFAULTS['border-%s-%s' % (edge, x)] for x in ('style', 'width', 'color')}
for x, v in expected.iteritems():
for x, v in iteritems(expected):
ans['border-%s-%s' % (edge, x)] = v
return ans
@ -315,39 +315,39 @@ def border_val_dict(expected, val='color'):
ans['border-%s-%s' % (edge, val)] = expected
return ans
for raw, expected in {
for raw, expected in iteritems({
'solid 1px red': {'color':'red', 'width':'1px', 'style':'solid'},
'1px': {'width': '1px'}, '#aaa': {'color': '#aaa'},
'2em groove': {'width':'2em', 'style':'groove'},
}.iteritems():
}):
for edge in EDGES:
br = 'border-%s' % edge
val = tuple(parseStyle('%s: %s' % (br, raw), validate=False))[0].cssValue
self.assertDictEqual(border_edge_dict(expected, edge), normalizers[br](br, val))
for raw, expected in {
for raw, expected in iteritems({
'solid 1px red': {'color':'red', 'width':'1px', 'style':'solid'},
'1px': {'width': '1px'}, '#aaa': {'color': '#aaa'},
'thin groove': {'width':'thin', 'style':'groove'},
}.iteritems():
}):
val = tuple(parseStyle('%s: %s' % ('border', raw), validate=False))[0].cssValue
self.assertDictEqual(border_dict(expected), normalizers['border']('border', val))
for name, val in {
for name, val in iteritems({
'width': '10%', 'color': 'rgb(0, 1, 1)', 'style': 'double',
}.iteritems():
}):
cval = tuple(parseStyle('border-%s: %s' % (name, val), validate=False))[0].cssValue
self.assertDictEqual(border_val_dict(val, name), normalizers['border-'+name]('border-'+name, cval))
def test_edge_normalization(self):
def edge_dict(prefix, expected):
return {'%s-%s' % (prefix, edge) : x for edge, x in zip(EDGES, expected)}
for raw, expected in {
for raw, expected in iteritems({
'2px': ('2px', '2px', '2px', '2px'),
'1em 2em': ('1em', '2em', '1em', '2em'),
'1em 2em 3em': ('1em', '2em', '3em', '2em'),
'1 2 3 4': ('1', '2', '3', '4'),
}.iteritems():
}):
for prefix in ('margin', 'padding'):
cval = tuple(parseStyle('%s: %s' % (prefix, raw), validate=False))[0].cssValue
self.assertDictEqual(edge_dict(prefix, expected), normalizers[prefix](prefix, cval))
@ -355,14 +355,14 @@ def edge_dict(prefix, expected):
def test_list_style_normalization(self):
def ls_dict(expected):
ans = {'list-style-%s' % x : DEFAULTS['list-style-%s' % x] for x in ('type', 'image', 'position')}
for k, v in expected.iteritems():
for k, v in iteritems(expected):
ans['list-style-%s' % k] = v
return ans
for raw, expected in {
for raw, expected in iteritems({
'url(http://www.example.com/images/list.png)': {'image': 'url(http://www.example.com/images/list.png)'},
'inside square': {'position':'inside', 'type':'square'},
'upper-roman url(img) outside': {'position':'outside', 'type':'upper-roman', 'image':'url(img)'},
}.iteritems():
}):
cval = tuple(parseStyle('list-style: %s' % raw, validate=False))[0].cssValue
self.assertDictEqual(ls_dict(expected), normalizers['list-style']('list-style', cval))
@ -382,7 +382,7 @@ def test_filter_css_normalization(self):
ae({'list-style', 'list-style-image', 'list-style-type', 'list-style-position'}, normalize_filter_css({'list-style'}))
def test_edge_condensation(self):
for s, v in {
for s, v in iteritems({
(1, 1, 3) : None,
(1, 2, 3, 4) : '2pt 3pt 4pt 1pt',
(1, 2, 3, 2) : '2pt 3pt 2pt 1pt',
@ -391,10 +391,10 @@ def test_edge_condensation(self):
(1, 1, 1, 1) : '1pt',
('2%', '2%', '2%', '2%') : '2%',
tuple('0 0 0 0'.split()) : '0',
}.iteritems():
}):
for prefix in ('margin', 'padding'):
css = {'%s-%s' % (prefix, x) : str(y)+'pt' if isinstance(y, numbers.Number) else y for x, y in zip(('left', 'top', 'right', 'bottom'), s)}
css = '; '.join(('%s:%s' % (k, v) for k, v in css.iteritems()))
css = '; '.join(('%s:%s' % (k, v) for k, v in iteritems(css)))
style = parseStyle(css)
condense_rule(style)
val = getattr(style.getProperty(prefix), 'value', None)

View file

@ -14,7 +14,7 @@
from calibre import xml_replace_entities, force_unicode
from calibre.constants import filesystem_encoding
from calibre.ebooks.chardet import xml_to_unicode, strip_encoding_declarations
from polyglot.builtins import unicode_type, string_or_bytes
from polyglot.builtins import iteritems, itervalues, unicode_type, string_or_bytes
RECOVER_PARSER = etree.XMLParser(recover=True, no_network=True)
XHTML_NS = 'http://www.w3.org/1999/xhtml'
@ -148,8 +148,8 @@ def clean_word_doc(data, log):
def ensure_namespace_prefixes(node, nsmap):
namespace_uris = frozenset(nsmap.itervalues())
fnsmap = {k:v for k, v in node.nsmap.iteritems() if v not in namespace_uris}
namespace_uris = frozenset(itervalues(nsmap))
fnsmap = {k:v for k, v in iteritems(node.nsmap) if v not in namespace_uris}
fnsmap.update(nsmap)
if fnsmap != dict(node.nsmap):
node = clone_element(node, nsmap=fnsmap, in_context=False)
@ -205,7 +205,7 @@ def parse_html(data, log=None, decoder=None, preprocessor=None,
val = val[1:-1]
user_entities[match.group(1)] = val
if user_entities:
pat = re.compile(r'&(%s);'%('|'.join(user_entities.keys())))
pat = re.compile(r'&(%s);'%('|'.join(list(user_entities.keys()))))
data = pat.sub(lambda m:user_entities[m.group(1)], data)
if preprocessor is not None:
@ -245,7 +245,7 @@ def parse_html(data, log=None, decoder=None, preprocessor=None,
for x in data.iterdescendants():
try:
x.tag = x.tag.lower()
for key, val in list(x.attrib.iteritems()):
for key, val in list(iteritems(x.attrib)):
del x.attrib[key]
key = key.lower()
x.attrib[key] = val

Some files were not shown because too many files have changed in this diff Show more