mirror of
https://github.com/JimmXinu/FanFicFare.git
synced 2025-12-19 23:33:52 +01:00
Fixes for epub update.
This commit is contained in:
parent
fa10cd36d1
commit
5e66aabb97
3 changed files with 13 additions and 11 deletions
|
|
@ -355,9 +355,9 @@ def do_download(arg,
|
|||
output_filename = writer.getOutputFileName()
|
||||
noturl, chaptercount = get_dcsource_chaptercount(output_filename)
|
||||
print('Updating %s, URL: %s' % (output_filename, url))
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
print("Failed to read epub for update: (%s) Continuing with update=false"%e)
|
||||
options.update = False
|
||||
pass
|
||||
|
||||
# Check for include_images without no_image_processing. In absence of PIL, give warning.
|
||||
if adapter.getConfig('include_images') and not adapter.getConfig('no_image_processing'):
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2017, Jim Miller'
|
||||
__copyright__ = '2018, Jim Miller'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import logging
|
||||
|
|
@ -11,7 +12,11 @@ import re, os, traceback
|
|||
from collections import defaultdict
|
||||
from zipfile import ZipFile, ZIP_STORED, ZIP_DEFLATED
|
||||
from xml.dom.minidom import parseString
|
||||
from six import StringIO
|
||||
|
||||
# py2 vs py3 transition
|
||||
from six import text_type as unicode
|
||||
from six import string_types as basestring
|
||||
from six import BytesIO # StringIO under py2
|
||||
|
||||
import bs4
|
||||
|
||||
|
|
@ -158,7 +163,7 @@ def get_update_data(inputio,
|
|||
chapurl = soup.find('meta',{'name':'chapterurl'})
|
||||
if chapurl:
|
||||
if chapurl['content'] not in urlsoups: # keep first found if more than one.
|
||||
#print("Found chapurl['content']:%s"%chapurl['content'])
|
||||
# print("Found chapurl['content']:%s"%chapurl['content'])
|
||||
currenturl = chapurl['content']
|
||||
urlsoups[chapurl['content']] = bodysoup
|
||||
else:
|
||||
|
|
@ -188,7 +193,7 @@ def get_update_data(inputio,
|
|||
|
||||
#for k in images.keys():
|
||||
#print("\tlongdesc:%s\n\tData len:%s\n"%(k,len(images[k])))
|
||||
# print("datamaps:%s"%datamaps)
|
||||
print("datamaps:%s"%datamaps)
|
||||
return (source,filecount,soups,images,oldcover,calibrebookmark,logfile,urlsoups,datamaps)
|
||||
|
||||
def get_path_part(n):
|
||||
|
|
@ -274,7 +279,7 @@ def reset_orig_chapters_epub(inputio,outfile):
|
|||
inputepub = ZipFile(inputio, 'r') # works equally well with a path or a blob
|
||||
|
||||
## build zip in memory in case updating in place(CLI).
|
||||
zipio = StringIO()
|
||||
zipio = BytesIO()
|
||||
|
||||
## Write mimetype file, must be first and uncompressed.
|
||||
## Older versions of python(2.4/5) don't allow you to specify
|
||||
|
|
|
|||
|
|
@ -579,14 +579,11 @@ div { margin: 0pt; padding: 0pt; }
|
|||
package.appendChild(guide)
|
||||
|
||||
# write content.opf to zip.
|
||||
contentxml = contentdom.toxml() # encoding='utf-8'
|
||||
|
||||
contentxml = contentdom.toxml(encoding='utf-8')
|
||||
# Causes py2 vs py3 issues with encoding nonsense. Skip for now.
|
||||
# tweak for brain damaged Nook STR. Nook insists on name before content.
|
||||
# contentxml = contentxml.replace('<meta content="%s" name="cover"/>'%coverimgid,
|
||||
# '<meta name="cover" content="%s"/>'%coverimgid)
|
||||
|
||||
|
||||
outputepub.writestr("content.opf",contentxml)
|
||||
|
||||
contentdom.unlink()
|
||||
|
|
|
|||
Loading…
Reference in a new issue