mirror of
https://github.com/JimmXinu/FanFicFare.git
synced 2026-05-09 05:21:13 +02:00
Compare commits
45 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a172a7bd2b | ||
|
|
ab103dce6e | ||
|
|
892e9207f0 | ||
|
|
b4e392fae1 | ||
|
|
d9525d9726 | ||
|
|
cb77b12754 | ||
|
|
b41a633821 | ||
|
|
50c8db2992 | ||
|
|
ef6dd99bfe | ||
|
|
59796ff537 | ||
|
|
8ee0a6e898 | ||
|
|
c53fc362bd | ||
|
|
c87cfc1057 | ||
|
|
6ee151c90a | ||
|
|
db01c828a0 | ||
|
|
4d03874f06 | ||
|
|
36f56483e6 | ||
|
|
18e45a403b | ||
|
|
2e25172ba3 | ||
|
|
65e3fd562b | ||
|
|
7089bf6689 | ||
|
|
061dc1333f | ||
|
|
0a7fb5c090 | ||
|
|
cf02f729ae | ||
|
|
730c4f77f9 | ||
|
|
c02da29cbd | ||
|
|
b87d796221 | ||
|
|
436370fe5b | ||
|
|
ac77f31bc2 | ||
|
|
16f2c74e4b | ||
|
|
af5c2aa0bc | ||
|
|
31dec5b62d | ||
|
|
97d37fcfc1 | ||
|
|
c730aa2f68 | ||
|
|
4e2e359dee | ||
|
|
bb96049934 | ||
|
|
84965ef25f | ||
|
|
348d129a1e | ||
|
|
4794e9bc51 | ||
|
|
d46dc76ae1 | ||
|
|
08bae8d9be | ||
|
|
405c37aeb5 | ||
|
|
270e01c3c7 | ||
|
|
12d57f5950 | ||
|
|
562b3a4ecd |
46 changed files with 12024 additions and 11539 deletions
|
|
@ -33,7 +33,7 @@ except NameError:
|
|||
from calibre.customize import InterfaceActionBase
|
||||
|
||||
# pulled out from FanFicFareBase for saving in prefs.py
|
||||
__version__ = (4, 56, 0)
|
||||
__version__ = (4, 57, 7)
|
||||
|
||||
## Apparently the name for this class doesn't matter--it was still
|
||||
## 'demo' for the first few versions.
|
||||
|
|
|
|||
|
|
@ -371,6 +371,7 @@ class ConfigWidget(QWidget):
|
|||
prefs['suppresstitlesort'] = self.std_columns_tab.suppresstitlesort.isChecked()
|
||||
prefs['authorcase'] = self.std_columns_tab.authorcase.isChecked()
|
||||
prefs['titlecase'] = self.std_columns_tab.titlecase.isChecked()
|
||||
prefs['seriescase'] = self.std_columns_tab.seriescase.isChecked()
|
||||
prefs['setanthologyseries'] = self.std_columns_tab.setanthologyseries.isChecked()
|
||||
|
||||
prefs['set_author_url'] =self.std_columns_tab.set_author_url.isChecked()
|
||||
|
|
@ -1637,6 +1638,11 @@ class StandardColumnsTab(QWidget):
|
|||
self.setanthologyseries.setChecked(prefs['setanthologyseries'])
|
||||
row.append(self.setanthologyseries)
|
||||
|
||||
self.seriescase = QCheckBox(_('Fix Series Case?'),self)
|
||||
self.seriescase.setToolTip(_("If checked, Calibre's routine for correcting the capitalization of title will be applied.")
|
||||
+"\n"+_("This effects Calibre metadata only, not FanFicFare metadata in title page."))
|
||||
self.seriescase.setChecked(prefs['seriescase'])
|
||||
row.append(self.seriescase)
|
||||
grid = QGridLayout()
|
||||
for rownum, row in enumerate(rows):
|
||||
for colnum, col in enumerate(row):
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ from calibre.gui2 import gprefs
|
|||
show_download_options = 'fff:add new/update dialogs:show_download_options'
|
||||
from calibre.gui2.dialogs.confirm_delete import confirm
|
||||
from calibre.gui2.complete2 import EditWithComplete
|
||||
from fanficfare.exceptions import NotGoingToDownload
|
||||
from fanficfare.six import text_type as unicode, ensure_text
|
||||
|
||||
# pulls in translation files for _() strings
|
||||
|
|
@ -155,15 +156,6 @@ class RejectUrlEntry:
|
|||
|
||||
return retval
|
||||
|
||||
class NotGoingToDownload(Exception):
|
||||
def __init__(self,error,icon='dialog_error.png',showerror=True):
|
||||
self.error=error
|
||||
self.icon=icon
|
||||
self.showerror=showerror
|
||||
|
||||
def __str__(self):
|
||||
return self.error
|
||||
|
||||
class DroppableQTextEdit(QTextEdit):
|
||||
def __init__(self,parent):
|
||||
QTextEdit.__init__(self,parent)
|
||||
|
|
|
|||
|
|
@ -10,27 +10,6 @@ __docformat__ = 'restructuredtext en'
|
|||
import fanficfare.six as six
|
||||
from fanficfare.six import ensure_text, string_types, text_type as unicode
|
||||
|
||||
# from io import StringIO
|
||||
# import cProfile, pstats
|
||||
# from pstats import SortKey
|
||||
|
||||
# def do_cprofile(func):
|
||||
# def profiled_func(*args, **kwargs):
|
||||
# profile = cProfile.Profile()
|
||||
# try:
|
||||
# profile.enable()
|
||||
# result = func(*args, **kwargs)
|
||||
# profile.disable()
|
||||
# return result
|
||||
# finally:
|
||||
# # profile.print_stats()
|
||||
# s = StringIO()
|
||||
# sortby = SortKey.CUMULATIVE
|
||||
# ps = pstats.Stats(profile, stream=s).sort_stats(sortby)
|
||||
# ps.print_stats(20)
|
||||
# print(s.getvalue())
|
||||
# return profiled_func
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -85,12 +64,14 @@ from fanficfare import adapters, exceptions
|
|||
|
||||
from fanficfare.epubutils import (
|
||||
get_dcsource, get_dcsource_chaptercount, get_story_url_from_epub_html,
|
||||
get_story_url_from_zip_html, reset_orig_chapters_epub, get_cover_data)
|
||||
get_story_url_from_zip_html, reset_orig_chapters_epub, get_cover_img)
|
||||
|
||||
from fanficfare.geturls import (
|
||||
get_urls_from_page, get_urls_from_text,get_urls_from_imap,
|
||||
get_urls_from_mime)
|
||||
|
||||
from fanficfare.fff_profile import do_cprofile
|
||||
|
||||
from calibre_plugins.fanficfare_plugin.fff_util import (
|
||||
get_fff_adapter, get_fff_config, get_fff_personalini,
|
||||
get_common_elements)
|
||||
|
|
@ -118,7 +99,7 @@ from calibre_plugins.fanficfare_plugin.dialogs import (
|
|||
LoopProgressDialog, UserPassDialog, AboutDialog, CollectURLDialog,
|
||||
RejectListDialog, EmailPassDialog, TOTPDialog,
|
||||
save_collisions, question_dialog_all,
|
||||
NotGoingToDownload, RejectUrlEntry, IniTextDialog,
|
||||
RejectUrlEntry, IniTextDialog,
|
||||
EditTextDialog)
|
||||
|
||||
# because calibre immediately transforms html into zip and don't want
|
||||
|
|
@ -1172,9 +1153,9 @@ class FanFicFarePlugin(InterfaceAction):
|
|||
## Aug2024 moved site specific search changes to adapters as
|
||||
## classmethod
|
||||
regexp = adapters.get_url_search(url)
|
||||
logger.debug(regexp)
|
||||
# logger.debug(regexp)
|
||||
retval = self.gui.current_db.search_getting_ids(regexp,None,use_virtual_library=False)
|
||||
logger.debug(retval)
|
||||
# logger.debug(retval)
|
||||
return retval
|
||||
|
||||
def prep_downloads(self, options, books, merge=False, extrapayload=None):
|
||||
|
|
@ -1304,7 +1285,7 @@ class FanFicFarePlugin(InterfaceAction):
|
|||
# let other exceptions percolate up.
|
||||
return adapter.getStoryMetadataOnly(get_cover=False)
|
||||
|
||||
# @do_cprofile
|
||||
@do_cprofile
|
||||
def prep_download_loop(self,book,
|
||||
options={'fileform':'epub',
|
||||
'collision':ADDNEW,
|
||||
|
|
@ -1343,11 +1324,11 @@ class FanFicFarePlugin(InterfaceAction):
|
|||
## network hit.
|
||||
identicalbooks = self.do_id_search(url)
|
||||
if collision == SKIP and identicalbooks:
|
||||
raise NotGoingToDownload(_("Skipping duplicate story."),"list_remove.png")
|
||||
raise exceptions.NotGoingToDownload(_("Skipping duplicate story."),"list_remove.png")
|
||||
|
||||
# Dialogs should prevent this case now.
|
||||
if collision in (UPDATE,UPDATEALWAYS) and fileform != 'epub':
|
||||
raise NotGoingToDownload(_("Cannot update non-epub format."))
|
||||
raise exceptions.NotGoingToDownload(_("Cannot update non-epub format."))
|
||||
|
||||
if not book['good']:
|
||||
# book has already been flagged bad for whatever reason.
|
||||
|
|
@ -1541,7 +1522,7 @@ class FanFicFarePlugin(InterfaceAction):
|
|||
logger.debug("existing found by identifier URL")
|
||||
|
||||
if collision == SKIP and identicalbooks:
|
||||
raise NotGoingToDownload(_("Skipping duplicate story."),"list_remove.png")
|
||||
raise exceptions.NotGoingToDownload(_("Skipping duplicate story."),"list_remove.png")
|
||||
|
||||
if len(identicalbooks) > 1:
|
||||
identicalbooks_msg = _("More than one identical book by Identifier URL or title/author(s)--can't tell which book to update/overwrite.")
|
||||
|
|
@ -1572,7 +1553,7 @@ class FanFicFarePlugin(InterfaceAction):
|
|||
identicalbooks = []
|
||||
collision = book['collision'] = ADDNEW
|
||||
else:
|
||||
raise NotGoingToDownload(identicalbooks_msg,"minusminus.png")
|
||||
raise exceptions.NotGoingToDownload(identicalbooks_msg,"minusminus.png")
|
||||
|
||||
## changed: add new book when CALIBREONLY if none found.
|
||||
if collision in (CALIBREONLY, CALIBREONLYSAVECOL) and not identicalbooks:
|
||||
|
|
@ -1659,11 +1640,11 @@ class FanFicFarePlugin(InterfaceAction):
|
|||
# returns int adjusted for start-end range.
|
||||
urlchaptercount = story.getChapterCount()
|
||||
if chaptercount == urlchaptercount and collision == UPDATE:
|
||||
raise NotGoingToDownload(_("Already contains %d chapters.")%chaptercount,'edit-undo.png',showerror=False)
|
||||
raise exceptions.NotGoingToDownload(_("Already contains %d chapters.")%chaptercount,'edit-undo.png',showerror=False)
|
||||
elif chaptercount > urlchaptercount and not (collision == UPDATEALWAYS and adapter.getConfig('force_update_epub_always')):
|
||||
raise NotGoingToDownload(_("Existing epub contains %d chapters, web site only has %d. Use Overwrite or force_update_epub_always to force update.") % (chaptercount,urlchaptercount),'dialog_error.png')
|
||||
raise exceptions.NotGoingToDownload(_("Existing epub contains %d chapters, web site only has %d. Use Overwrite or force_update_epub_always to force update.") % (chaptercount,urlchaptercount),'dialog_error.png')
|
||||
elif chaptercount == 0:
|
||||
raise NotGoingToDownload(_("FanFicFare doesn't recognize chapters in existing epub, epub is probably from a different source. Use Overwrite to force update."),'dialog_error.png')
|
||||
raise exceptions.NotGoingToDownload(_("FanFicFare doesn't recognize chapters in existing epub, epub is probably from a different source. Use Overwrite to force update."),'dialog_error.png')
|
||||
|
||||
if collision == OVERWRITE and \
|
||||
db.has_format(book_id,formmapping[fileform],index_is_id=True):
|
||||
|
|
@ -1680,7 +1661,7 @@ class FanFicFarePlugin(InterfaceAction):
|
|||
# updated does have time, use full timestamps.
|
||||
if (lastupdated.time() == time.min and fileupdated.date() > lastupdated.date()) or \
|
||||
(lastupdated.time() != time.min and fileupdated > lastupdated):
|
||||
raise NotGoingToDownload(_("Not Overwriting, web site is not newer."),'edit-undo.png',showerror=False)
|
||||
raise exceptions.NotGoingToDownload(_("Not Overwriting, web site is not newer."),'edit-undo.png',showerror=False)
|
||||
|
||||
# For update, provide a tmp file copy of the existing epub so
|
||||
# it can't change underneath us. Now also overwrite for logpage preserve.
|
||||
|
|
@ -1900,7 +1881,7 @@ class FanFicFarePlugin(InterfaceAction):
|
|||
else:
|
||||
return None
|
||||
|
||||
# @do_cprofile
|
||||
@do_cprofile
|
||||
def update_books_loop(self,book,db=None,
|
||||
options={'fileform':'epub',
|
||||
'collision':ADDNEW,
|
||||
|
|
@ -2232,30 +2213,45 @@ class FanFicFarePlugin(InterfaceAction):
|
|||
## start with None. If no subbook covers, don't force one
|
||||
## here. User can configure FFF to always create/polish a
|
||||
## cover if they want. This is about when we force it.
|
||||
coverpath = None
|
||||
coverimgpath = None
|
||||
coverimgtype = None
|
||||
had_cover = False
|
||||
|
||||
## first, look for covers inside the subbooks. Stop at the
|
||||
## first one, which will be used if there isn't a pre-existing
|
||||
# epubmerge wants a path to cover img on disk
|
||||
def write_image(imgtype,imgdata):
|
||||
tmp = PersistentTemporaryFile(prefix='cover_',
|
||||
suffix='.'+imagetypes[imgtype],
|
||||
dir=options['tdir'])
|
||||
tmp.write(imgdata)
|
||||
tmp.flush()
|
||||
tmp.close()
|
||||
return tmp.name
|
||||
|
||||
## if prior epub had a cover, we should use it again.
|
||||
if mergebook['calibre_id'] and db.has_format(mergebook['calibre_id'],'EPUB',index_is_id=True):
|
||||
(covertype,coverdata) = get_cover_img(db.format(mergebook['calibre_id'],'EPUB',index_is_id=True,as_file=True))
|
||||
if coverdata:
|
||||
had_cover = True
|
||||
coverimgpath = write_image(covertype,coverdata)
|
||||
coverimgtype = covertype
|
||||
logger.debug("prior anthology cover found")
|
||||
|
||||
## look for covers inside the subbooks. Stop at the first
|
||||
## one, which will be used if there isn't a pre-existing
|
||||
## calibre cover.
|
||||
if not coverpath:
|
||||
if not coverimgpath:
|
||||
for book in good_list:
|
||||
coverdata = get_cover_data(book['outfile'])
|
||||
(covertype,coverdata) = get_cover_img(book['outfile'])
|
||||
if coverdata: # found a cover.
|
||||
(coverimgtype,coverimgdata) = coverdata[4:6]
|
||||
# logger.debug('coverimgtype:%s [%s]'%(coverimgtype,imagetypes[coverimgtype]))
|
||||
tmpcover = PersistentTemporaryFile(suffix='.'+imagetypes[coverimgtype],
|
||||
dir=options['tdir'])
|
||||
tmpcover.write(coverimgdata)
|
||||
tmpcover.flush()
|
||||
tmpcover.close()
|
||||
coverpath = tmpcover.name
|
||||
coverimgpath = write_image(covertype,coverdata)
|
||||
coverimgtype = covertype
|
||||
logger.debug('from subbook coverimgpath:%s'%coverimgpath)
|
||||
break
|
||||
# logger.debug('coverpath:%s'%coverpath)
|
||||
|
||||
## if updating an existing book and there is at least one
|
||||
## subbook cover:
|
||||
if coverpath and mergebook['calibre_id']:
|
||||
if not had_cover and coverimgpath and mergebook['calibre_id']:
|
||||
logger.debug("anth cover: using cal cover")
|
||||
# Couldn't find a better way to get the cover path.
|
||||
calcoverpath = os.path.join(db.library_path,
|
||||
db.path(mergebook['calibre_id'], index_is_id=True),
|
||||
|
|
@ -2263,9 +2259,11 @@ class FanFicFarePlugin(InterfaceAction):
|
|||
## if there's an existing cover, use it. Calibre will set
|
||||
## it for us during lots of different actions anyway.
|
||||
if os.path.exists(calcoverpath):
|
||||
coverpath = calcoverpath
|
||||
coverimgpath = calcoverpath
|
||||
|
||||
# logger.debug('coverpath:%s'%coverpath)
|
||||
## Note that this cover will be replaced if 'inject
|
||||
## generated' cover is on
|
||||
logger.debug('coverimgpath:%s'%coverimgpath)
|
||||
mrg_args = [tmp.name,
|
||||
[ x['outfile'] for x in good_list ],]
|
||||
mrg_kwargs = {
|
||||
|
|
@ -2273,7 +2271,7 @@ class FanFicFarePlugin(InterfaceAction):
|
|||
'titleopt':mergebook['title'],
|
||||
'keepmetadatafiles':True,
|
||||
'source':mergebook['url'],
|
||||
'coverjpgpath':coverpath
|
||||
'coverjpgpath':coverimgpath
|
||||
}
|
||||
logger.debug('anthology_merge_keepsingletocs:%s'%
|
||||
mergebook['anthology_merge_keepsingletocs'])
|
||||
|
|
@ -2651,7 +2649,6 @@ class FanFicFarePlugin(InterfaceAction):
|
|||
db.new_api.set_link_for_authors(author_id_to_link_map)
|
||||
|
||||
# set series link if found.
|
||||
logger.debug("has link_map:%s"%(hasattr(db.new_api,'set_link_map')))
|
||||
## new_api.set_link_map added in Calibre v6.15
|
||||
if hasattr(db.new_api,'set_link_map') and \
|
||||
prefs['set_series_url'] and \
|
||||
|
|
@ -2660,6 +2657,7 @@ class FanFicFarePlugin(InterfaceAction):
|
|||
series = book['series']
|
||||
if '[' in series: # a few can have a series w/o number
|
||||
series = series[:series.rindex(' [')]
|
||||
logger.debug("Setting series link:%s"%book['all_metadata']['seriesUrl'])
|
||||
db.new_api.set_link_map('series',{series:
|
||||
book['all_metadata']['seriesUrl']})
|
||||
|
||||
|
|
@ -2849,6 +2847,9 @@ class FanFicFarePlugin(InterfaceAction):
|
|||
mi.pubdate = book['pubdate']
|
||||
mi.timestamp = book['timestamp']
|
||||
mi.comments = book['comments']
|
||||
if prefs['seriescase']:
|
||||
from calibre.ebooks.metadata.sources.base import fixcase
|
||||
book['series'] = fixcase(book['series'])
|
||||
mi.series = book['series']
|
||||
return mi
|
||||
|
||||
|
|
@ -3200,6 +3201,7 @@ The previously downloaded book is still in the anthology, but FFF doesn't have t
|
|||
|
||||
if prefs['setanthologyseries'] and book['title'] == series:
|
||||
book['series'] = series+' [0]'
|
||||
book['all_metadata']['seriesUrl'] = options.get('anthology_url','')
|
||||
|
||||
# logger.debug("anthology_title_pattern:%s"%configuration.getConfig('anthology_title_pattern'))
|
||||
if configuration.getConfig('anthology_title_pattern'):
|
||||
|
|
@ -3220,7 +3222,9 @@ The previously downloaded book is still in the anthology, but FFF doesn't have t
|
|||
s = options.get('frompage',{}).get('status','')
|
||||
if s:
|
||||
book['all_metadata']['status'] = s
|
||||
book['tags'].append(s)
|
||||
## status into tags only if in include_subject_tags
|
||||
if 'status' in configuration.getConfigList('include_subject_tags'):
|
||||
book['tags'].append(s)
|
||||
book['tags'].extend(configuration.getConfigList('anthology_tags'))
|
||||
book['all_metadata']['anthology'] = "true"
|
||||
|
||||
|
|
|
|||
|
|
@ -44,34 +44,44 @@ def do_download_worker_single(site,
|
|||
print_basic_debug_info(sys.stderr)
|
||||
|
||||
notification(0.01, _('Downloading FanFiction Stories'))
|
||||
from calibre_plugins.fanficfare_plugin import FanFicFareBase
|
||||
fffbase = FanFicFareBase(options['plugin_path'])
|
||||
with fffbase: # so the sys.path was modified while loading the
|
||||
# plug impl.
|
||||
from fanficfare.fff_profile import do_cprofile
|
||||
|
||||
count = 0
|
||||
totals = {}
|
||||
# can't do direct assignment in list comprehension? I'm sure it
|
||||
# makes sense to some pythonista.
|
||||
# [ totals[x['url']]=0.0 for x in book_list if x['good'] ]
|
||||
[ totals.update({x['url']:0.0}) for x in book_list if x['good'] ]
|
||||
# logger.debug(sites_lists.keys())
|
||||
## extra function just so I can easily use the same
|
||||
## @do_cprofile decorator
|
||||
@do_cprofile
|
||||
def profiled_func():
|
||||
count = 0
|
||||
totals = {}
|
||||
# can't do direct assignment in list comprehension? I'm sure it
|
||||
# makes sense to some pythonista.
|
||||
# [ totals[x['url']]=0.0 for x in book_list if x['good'] ]
|
||||
[ totals.update({x['url']:0.0}) for x in book_list if x['good'] ]
|
||||
# logger.debug(sites_lists.keys())
|
||||
|
||||
def do_indiv_notif(percent,msg):
|
||||
totals[msg] = percent/len(totals)
|
||||
notification(max(0.01,sum(totals.values())), _('%(count)d of %(total)d stories finished downloading')%{'count':count,'total':len(totals)})
|
||||
def do_indiv_notif(percent,msg):
|
||||
totals[msg] = percent/len(totals)
|
||||
notification(max(0.01,sum(totals.values())), _('%(count)d of %(total)d stories finished downloading')%{'count':count,'total':len(totals)})
|
||||
|
||||
do_list = []
|
||||
done_list = []
|
||||
logger.info("\n\n"+_("Downloading FanFiction Stories")+"\n%s\n"%("\n".join([ "%(status)s %(url)s %(comment)s" % book for book in book_list])))
|
||||
## pass failures from metadata through bg job so all results are
|
||||
## together.
|
||||
for book in book_list:
|
||||
if book['good']:
|
||||
do_list.append(book)
|
||||
else:
|
||||
done_list.append(book)
|
||||
for book in do_list:
|
||||
# logger.info("%s"%book['url'])
|
||||
done_list.append(do_download_for_worker(book,options,merge,do_indiv_notif))
|
||||
count += 1
|
||||
return finish_download(done_list)
|
||||
do_list = []
|
||||
done_list = []
|
||||
logger.info("\n\n"+_("Downloading FanFiction Stories")+"\n%s\n"%("\n".join([ "%(status)s %(url)s %(comment)s" % book for book in book_list])))
|
||||
## pass failures from metadata through bg job so all results are
|
||||
## together.
|
||||
for book in book_list:
|
||||
if book['good']:
|
||||
do_list.append(book)
|
||||
else:
|
||||
done_list.append(book)
|
||||
for book in do_list:
|
||||
# logger.info("%s"%book['url'])
|
||||
done_list.append(do_download_for_worker(book,options,merge,do_indiv_notif))
|
||||
count += 1
|
||||
return finish_download(done_list)
|
||||
return profiled_func()
|
||||
|
||||
def finish_download(donelist):
|
||||
book_list = sorted(donelist,key=lambda x : x['listorder'])
|
||||
|
|
@ -114,15 +124,6 @@ def finish_download(donelist):
|
|||
# return the book list as the job result
|
||||
return book_list
|
||||
|
||||
def do_download_site(site,book_list,options,merge,notification=lambda x,y:x):
|
||||
# logger.info(_("Started job for %s")%site)
|
||||
retval = []
|
||||
for book in book_list:
|
||||
# logger.info("%s"%book['url'])
|
||||
retval.append(do_download_for_worker(book,options,merge,notification))
|
||||
notification(10.0,book['url'])
|
||||
return retval
|
||||
|
||||
def do_download_for_worker(book,options,merge,notification=lambda x,y:x):
|
||||
'''
|
||||
Child job, to download story when run as a worker job
|
||||
|
|
@ -132,13 +133,13 @@ def do_download_for_worker(book,options,merge,notification=lambda x,y:x):
|
|||
fffbase = FanFicFareBase(options['plugin_path'])
|
||||
with fffbase: # so the sys.path was modified while loading the
|
||||
# plug impl.
|
||||
from calibre_plugins.fanficfare_plugin.dialogs import NotGoingToDownload
|
||||
from calibre_plugins.fanficfare_plugin.prefs import (
|
||||
SAVE_YES, SAVE_YES_UNLESS_SITE, OVERWRITE, OVERWRITEALWAYS, UPDATE,
|
||||
UPDATEALWAYS, ADDNEW, SKIP, CALIBREONLY, CALIBREONLYSAVECOL)
|
||||
from calibre_plugins.fanficfare_plugin.wordcount import get_word_count
|
||||
from fanficfare import adapters, writers
|
||||
from fanficfare.epubutils import get_update_data
|
||||
from fanficfare.exceptions import NotGoingToDownload
|
||||
from fanficfare.six import text_type as unicode
|
||||
|
||||
from calibre_plugins.fanficfare_plugin.fff_util import get_fff_config
|
||||
|
|
|
|||
|
|
@ -1599,6 +1599,8 @@ chaptertitles:Prologue,Chapter 1\, Xenos on Cinnabar,Chapter 2\, Sinmay on Kinti
|
|||
|
||||
|
||||
[adult-fanfiction.org]
|
||||
use_basic_cache:true
|
||||
|
||||
extra_valid_entries:eroticatags,disclaimer
|
||||
eroticatags_label:Erotica Tags
|
||||
disclaimer_label:Disclaimer
|
||||
|
|
@ -1717,13 +1719,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
|
|||
## hardcoded to include the site specific metadata freeformtags &
|
||||
## ao3categories in the standard metadata field genre. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_genre: freeformtags, ao3categories
|
||||
include_in_genre: genre, freeformtags, ao3categories
|
||||
|
||||
## AO3 uses the word 'category' differently than most sites. The
|
||||
## adapter used to be hardcoded to include the site specific metadata
|
||||
## fandom in the standard metadata field category. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_category:fandoms
|
||||
include_in_category:category,fandoms
|
||||
|
||||
## freeformtags was previously typo'ed as freefromtags. This way,
|
||||
## freefromtags will still work for people who've used it.
|
||||
|
|
@ -1932,7 +1934,7 @@ make_linkhtml_entries:translators,betas
|
|||
## For most sites, 'category' is the fandom, but fanfics.me has
|
||||
## fandoms and a separate category. By making it configurable, users
|
||||
## can change it.
|
||||
include_in_category:fandoms
|
||||
include_in_category:category,fandoms
|
||||
|
||||
[fanfictalk.com]
|
||||
use_basic_cache:true
|
||||
|
|
@ -2708,13 +2710,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
|
|||
## hardcoded to include the site specific metadata freeformtags &
|
||||
## ao3categories in the standard metadata field genre. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_genre: freeformtags, ao3categories
|
||||
include_in_genre: genre, freeformtags, ao3categories
|
||||
|
||||
## OTW uses the word 'category' differently than most sites. The
|
||||
## adapter used to be hardcoded to include the site specific metadata
|
||||
## fandom in the standard metadata field category. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_category:fandoms
|
||||
include_in_category:category,fandoms
|
||||
|
||||
## freeformtags was previously typo'ed as freefromtags. This way,
|
||||
## freefromtags will still work for people who've used it.
|
||||
|
|
@ -3015,13 +3017,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
|
|||
## hardcoded to include the site specific metadata freeformtags &
|
||||
## ao3categories in the standard metadata field genre. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_genre: freeformtags, ao3categories
|
||||
include_in_genre: genre, freeformtags, ao3categories
|
||||
|
||||
## OTW uses the word 'category' differently than most sites. The
|
||||
## adapter used to be hardcoded to include the site specific metadata
|
||||
## fandom in the standard metadata field category. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_category:fandoms
|
||||
include_in_category:category,fandoms
|
||||
|
||||
## freeformtags was previously typo'ed as freefromtags. This way,
|
||||
## freefromtags will still work for people who've used it.
|
||||
|
|
@ -3150,8 +3152,8 @@ bookmarkmemo_label:ブックマークメモ
|
|||
bookmarkprivate_label:非公開ブックマーク
|
||||
subscribed_label:更新通知
|
||||
|
||||
include_in_genre: fullgenre
|
||||
#include_in_genre: biggenre, smallgenre
|
||||
include_in_genre: genre, fullgenre
|
||||
#include_in_genre: genre, biggenre, smallgenre
|
||||
|
||||
## adds to titlepage_entries instead of replacing it.
|
||||
#extra_titlepage_entries: fullgenre,biggenre,smallgenre,imprint,freeformtags,comments,reviews,bookmarks,ratingpoints,overallpoints,bookmarked,bookmarkcategory,bookmarkmemo,bookmarkprivate,subscribed
|
||||
|
|
@ -3394,13 +3396,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
|
|||
## hardcoded to include the site specific metadata freeformtags &
|
||||
## ao3categories in the standard metadata field genre. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_genre: freeformtags, ao3categories
|
||||
include_in_genre: genre, freeformtags, ao3categories
|
||||
|
||||
## OTW uses the word 'category' differently than most sites. The
|
||||
## adapter used to be hardcoded to include the site specific metadata
|
||||
## fandom in the standard metadata field category. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_category:fandoms
|
||||
include_in_category:category,fandoms
|
||||
|
||||
## freeformtags was previously typo'ed as freefromtags. This way,
|
||||
## freefromtags will still work for people who've used it.
|
||||
|
|
@ -3531,7 +3533,7 @@ upvotes_label:Upvotes
|
|||
subscribers_label:Subscribers
|
||||
views_label:Views
|
||||
|
||||
include_in_category:tags
|
||||
include_in_category:category,tags
|
||||
|
||||
#extra_titlepage_entries:upvotes,subscribers,views
|
||||
|
||||
|
|
@ -3667,13 +3669,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
|
|||
## hardcoded to include the site specific metadata freeformtags &
|
||||
## ao3categories in the standard metadata field genre. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_genre: freeformtags, ao3categories
|
||||
include_in_genre: genre, freeformtags, ao3categories
|
||||
|
||||
## OTW uses the word 'category' differently than most sites. The
|
||||
## adapter used to be hardcoded to include the site specific metadata
|
||||
## fandom in the standard metadata field category. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_category:fandoms
|
||||
include_in_category:category,fandoms
|
||||
|
||||
## freeformtags was previously typo'ed as freefromtags. This way,
|
||||
## freefromtags will still work for people who've used it.
|
||||
|
|
|
|||
|
|
@ -126,6 +126,7 @@ default_prefs['suppressauthorsort'] = False
|
|||
default_prefs['suppresstitlesort'] = False
|
||||
default_prefs['authorcase'] = False
|
||||
default_prefs['titlecase'] = False
|
||||
default_prefs['seriescase'] = False
|
||||
default_prefs['setanthologyseries'] = False
|
||||
default_prefs['mark'] = False
|
||||
default_prefs['mark_success'] = True
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -53,6 +53,9 @@ class FanficAuthorsNetAdapter(BaseSiteAdapter):
|
|||
#Setting the 'Zone' for each "Site"
|
||||
self.zone = self.parsedUrl.netloc.replace('.fanficauthors.net','')
|
||||
|
||||
# site change .nsns to -nsns
|
||||
self.zone = self.zone.replace('.nsns','-nsns')
|
||||
|
||||
# normalized story URL.
|
||||
self._setURL('https://{0}.{1}/{2}/'.format(
|
||||
self.zone, self.getBaseDomain(), self.story.getMetadata('storyId')))
|
||||
|
|
@ -79,7 +82,10 @@ class FanficAuthorsNetAdapter(BaseSiteAdapter):
|
|||
@classmethod
|
||||
def getAcceptDomains(cls):
|
||||
|
||||
# need both .nsns(old) and -nsns(new) because it's a domain
|
||||
# change, not just URL change.
|
||||
return ['aaran-st-vines.nsns.fanficauthors.net',
|
||||
'aaran-st-vines-nsns.fanficauthors.net',
|
||||
'abraxan.fanficauthors.net',
|
||||
'bobmin.fanficauthors.net',
|
||||
'canoncansodoff.fanficauthors.net',
|
||||
|
|
@ -95,9 +101,12 @@ class FanficAuthorsNetAdapter(BaseSiteAdapter):
|
|||
'jeconais.fanficauthors.net',
|
||||
'kinsfire.fanficauthors.net',
|
||||
'kokopelli.nsns.fanficauthors.net',
|
||||
'kokopelli-nsns.fanficauthors.net',
|
||||
'ladya.nsns.fanficauthors.net',
|
||||
'ladya-nsns.fanficauthors.net',
|
||||
'lorddwar.fanficauthors.net',
|
||||
'mrintel.nsns.fanficauthors.net',
|
||||
'mrintel-nsns.fanficauthors.net',
|
||||
'musings-of-apathy.fanficauthors.net',
|
||||
'ruskbyte.fanficauthors.net',
|
||||
'seelvor.fanficauthors.net',
|
||||
|
|
@ -108,7 +117,7 @@ class FanficAuthorsNetAdapter(BaseSiteAdapter):
|
|||
################################################################################################
|
||||
@classmethod
|
||||
def getSiteExampleURLs(self):
|
||||
return ("https://aaran-st-vines.nsns.fanficauthors.net/A_Story_Name/ "
|
||||
return ("https://aaran-st-vines-nsns.fanficauthors.net/A_Story_Name/ "
|
||||
+ "https://abraxan.fanficauthors.net/A_Story_Name/ "
|
||||
+ "https://bobmin.fanficauthors.net/A_Story_Name/ "
|
||||
+ "https://canoncansodoff.fanficauthors.net/A_Story_Name/ "
|
||||
|
|
@ -123,10 +132,10 @@ class FanficAuthorsNetAdapter(BaseSiteAdapter):
|
|||
+ "https://jbern.fanficauthors.net/A_Story_Name/ "
|
||||
+ "https://jeconais.fanficauthors.net/A_Story_Name/ "
|
||||
+ "https://kinsfire.fanficauthors.net/A_Story_Name/ "
|
||||
+ "https://kokopelli.nsns.fanficauthors.net/A_Story_Name/ "
|
||||
+ "https://ladya.nsns.fanficauthors.net/A_Story_Name/ "
|
||||
+ "https://kokopelli-nsns.fanficauthors.net/A_Story_Name/ "
|
||||
+ "https://ladya-nsns.fanficauthors.net/A_Story_Name/ "
|
||||
+ "https://lorddwar.fanficauthors.net/A_Story_Name/ "
|
||||
+ "https://mrintel.nsns.fanficauthors.net/A_Story_Name/ "
|
||||
+ "https://mrintel-nsns.fanficauthors.net/A_Story_Name/ "
|
||||
+ "https://musings-of-apathy.fanficauthors.net/A_Story_Name/ "
|
||||
+ "https://ruskbyte.fanficauthors.net/A_Story_Name/ "
|
||||
+ "https://seelvor.fanficauthors.net/A_Story_Name/ "
|
||||
|
|
@ -136,8 +145,16 @@ class FanficAuthorsNetAdapter(BaseSiteAdapter):
|
|||
|
||||
################################################################################################
|
||||
def getSiteURLPattern(self):
|
||||
## .nsns kept here to match both . and -
|
||||
return r'https?://(aaran-st-vines.nsns|abraxan|bobmin|canoncansodoff|chemprof|copperbadge|crys|deluded-musings|draco664|fp|frenchsession|ishtar|jbern|jeconais|kinsfire|kokopelli.nsns|ladya.nsns|lorddwar|mrintel.nsns|musings-of-apathy|ruskbyte|seelvor|tenhawk|viridian|whydoyouneedtoknow)\.fanficauthors\.net/([a-zA-Z0-9_]+)/'
|
||||
|
||||
@classmethod
|
||||
def get_section_url(cls,url):
|
||||
## only changing .nsns to -nsns and only when part of the
|
||||
## domain.
|
||||
url = url.replace('.nsns.fanficauthors.net','-nsns.fanficauthors.net')
|
||||
return url
|
||||
|
||||
################################################################################################
|
||||
def doExtractChapterUrlsAndMetadata(self, get_cover=True):
|
||||
|
||||
|
|
|
|||
|
|
@ -66,7 +66,8 @@ class FicwadComSiteAdapter(BaseSiteAdapter):
|
|||
params['username']))
|
||||
d = self.post_request(loginUrl,params,usecache=False)
|
||||
|
||||
if "Login attempt failed..." in d:
|
||||
if "Login attempt failed..." in d or \
|
||||
'<div id="error">Please enter your username and password.</div>' in d:
|
||||
logger.info("Failed to login to URL %s as %s" % (loginUrl,
|
||||
params['username']))
|
||||
raise exceptions.FailedToLogin(url,params['username'])
|
||||
|
|
|
|||
|
|
@ -163,7 +163,7 @@ class KakuyomuJpAdapter(BaseSiteAdapter):
|
|||
titles = []
|
||||
nestingLevel = 0
|
||||
newSection = False
|
||||
for tocNodeRef in info[workKey]['tableOfContents']:
|
||||
for tocNodeRef in info[workKey]['tableOfContentsV2']:
|
||||
tocNode = info[tocNodeRef['__ref']]
|
||||
|
||||
if tocNode['chapter'] is not None:
|
||||
|
|
|
|||
|
|
@ -395,7 +395,7 @@ class LiteroticaSiteAdapter(BaseSiteAdapter):
|
|||
|
||||
## Collect tags from series/story page if tags_from_chapters is enabled
|
||||
if self.getConfig("tags_from_chapters"):
|
||||
self.story.extendList('eroticatags', [ stripHTML(t['tag']).title() for t in chap['tags'] ])
|
||||
self.story.extendList('eroticatags', [ unicode(t['tag']).title() for t in chap['tags'] ])
|
||||
|
||||
|
||||
except Exception as e:
|
||||
|
|
|
|||
|
|
@ -197,33 +197,20 @@ class ScribbleHubComAdapter(BaseSiteAdapter): # XXX
|
|||
|
||||
# Get the contents list from scribblehub, iterate through and add to chapters
|
||||
# Can be fairly certain this will not 404 - we know the story id is valid
|
||||
contents_payload = {"action": "wi_gettocchp",
|
||||
"strSID": self.story.getMetadata('storyId'),
|
||||
"strmypostid": 0,
|
||||
"strFic": "yes"}
|
||||
|
||||
# 14/12/22 - Looks like it should follow this format now (below), but still returns a 400
|
||||
# but not a 403. tested in browser getting rid of all other cookies to try and get a 400 and nopes.
|
||||
|
||||
# contents_payload = {"action": "wi_getreleases_pagination",
|
||||
# "pagenum": 1,
|
||||
# "mypostid": 421879}
|
||||
# contents_payload = "action=wi_getreleases_pagination&pagenum=1&mypostid=421879"
|
||||
contents_payload = {"action": "wi_getreleases_pagination",
|
||||
"pagenum": -1,
|
||||
"mypostid": self.story.getMetadata('storyId')}
|
||||
|
||||
contents_data = self.post_request("https://www.scribblehub.com/wp-admin/admin-ajax.php", contents_payload)
|
||||
|
||||
# logger.debug(contents_data)
|
||||
contents_soup = self.make_soup(contents_data)
|
||||
|
||||
for i in range(1, int(contents_soup.find('ol',{'id':'ol_toc'}).get('count')) + 1):
|
||||
chapter_url = contents_soup.find('li',{'cnt':str(i)}).find('a').get('href')
|
||||
chapter_name = contents_soup.find('li',{'cnt':str(i)}).find('a').get('title')
|
||||
# logger.debug("Found Chapter " + str(i) + ", name: " + chapter_name + ", url: " + chapter_url)
|
||||
for toca in contents_soup.select('a.toc_a'):
|
||||
chapter_url = toca['href']
|
||||
chapter_name = stripHTML(toca)
|
||||
# logger.debug("Found Chapter: " + chapter_name + ", url: " + chapter_url)
|
||||
self.add_chapter(chapter_name, chapter_url)
|
||||
|
||||
|
||||
# eFiction sites don't help us out a lot with their meta data
|
||||
# formating, so it's a little ugly.
|
||||
|
||||
# utility method
|
||||
def defaultGetattr(d,k):
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -491,6 +491,7 @@ Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor
|
|||
desc = '<div><p>The Great Test Series of '+self.getSiteDomain()+'!</p><p>Now with two lines!</p></div>'
|
||||
return {'name':'The Great Test',
|
||||
'desc':desc,
|
||||
'status':'AStatus',
|
||||
'urllist':['http://'+self.getSiteDomain()+'?sid=1',
|
||||
'http://'+self.getSiteDomain()+'?sid=2',
|
||||
'http://'+self.getSiteDomain()+'?sid=3',
|
||||
|
|
|
|||
|
|
@ -833,7 +833,9 @@ try to download.</p>
|
|||
## handle identifiers that otherwise appear to be
|
||||
## selectors themselves. #966
|
||||
try:
|
||||
if href[0] == "#" and soup.select_one("[id='%s']"%href[1:]):
|
||||
# logger.debug("Search for internal link anchor href:(%s)"%href)
|
||||
if href[0] == "#" and soup.select_one("[id='%s'], [name='%s']"%(href[1:],href[1:])):
|
||||
# logger.debug("Found internal link anchor href:(%s)"%href)
|
||||
hrefurl = href
|
||||
except Exception as e:
|
||||
logger.debug("Search for internal link anchor failed href:(%s)"%href)
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ from .base_browsercache import BaseBrowserCache, CACHE_DIR_CONFIG
|
|||
from .browsercache_simple import SimpleCache
|
||||
from .browsercache_blockfile import BlockfileCache
|
||||
from .browsercache_firefox2 import FirefoxCache2
|
||||
from .browsercache_sqldb import SqldbCache
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
@ -34,12 +35,13 @@ class BrowserCache(object):
|
|||
def __init__(self, site, getConfig_fn, getConfigList_fn):
|
||||
"""Constructor for BrowserCache"""
|
||||
# import of child classes have to be inside the def to avoid circular import error
|
||||
for browser_cache_class in [SimpleCache, BlockfileCache, FirefoxCache2]:
|
||||
for browser_cache_class in [SimpleCache, BlockfileCache, FirefoxCache2, SqldbCache]:
|
||||
self.browser_cache_impl = browser_cache_class.new_browser_cache(site,
|
||||
getConfig_fn,
|
||||
getConfigList_fn)
|
||||
if self.browser_cache_impl is not None:
|
||||
break
|
||||
logger.debug("Not using Browser Cache Class %s"%browser_cache_class)
|
||||
if self.browser_cache_impl is None:
|
||||
raise BrowserCacheException("%s is not set, or directory does not contain a known browser cache type: '%s'"%
|
||||
(CACHE_DIR_CONFIG,getConfig_fn(CACHE_DIR_CONFIG)))
|
||||
|
|
|
|||
|
|
@ -90,18 +90,23 @@ class BlockfileCache(BaseChromiumCache):
|
|||
def is_cache_dir(cache_dir):
|
||||
"""Return True only if a directory is a valid Cache for this class"""
|
||||
if not os.path.isdir(cache_dir):
|
||||
logger.debug("Cache dir not found")
|
||||
return False
|
||||
index_path = os.path.join(cache_dir, "index")
|
||||
if not os.path.isfile(index_path):
|
||||
logger.debug("index file not found")
|
||||
return False
|
||||
with share_open(index_path, 'rb') as index_file:
|
||||
if struct.unpack('I', index_file.read(4))[0] != INDEX_MAGIC_NUMBER:
|
||||
logger.debug("index file failed magic number check")
|
||||
return False
|
||||
data0_path = os.path.join(cache_dir, "data_0")
|
||||
if not os.path.isfile(data0_path):
|
||||
logger.debug("data_0 file not found")
|
||||
return False
|
||||
with share_open(data0_path, 'rb') as data0_file:
|
||||
if struct.unpack('I', data0_file.read(4))[0] != BLOCK_MAGIC_NUMBER:
|
||||
logger.debug("data_0 failed magic number check")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
|
|
|||
|
|
@ -68,6 +68,7 @@ class FirefoxCache2(BaseBrowserCache):
|
|||
"""Return True only if a directory is a valid Cache for this class"""
|
||||
# logger.debug("\n\n1Starting cache check\n\n")
|
||||
if not os.path.isdir(cache_dir):
|
||||
logger.debug("Cache dir not found")
|
||||
return False
|
||||
## check at least one entry file exists.
|
||||
for en_fl in glob.iglob(os.path.join(cache_dir, 'entries', '????????????????????????????????????????')):
|
||||
|
|
@ -75,6 +76,7 @@ class FirefoxCache2(BaseBrowserCache):
|
|||
k = _validate_entry_file(en_fl)
|
||||
if k is not None:
|
||||
return True
|
||||
logger.debug("No valid cache files found")
|
||||
return False
|
||||
|
||||
def make_keys(self,url):
|
||||
|
|
|
|||
|
|
@ -76,15 +76,19 @@ class SimpleCache(BaseChromiumCache):
|
|||
def is_cache_dir(cache_dir):
|
||||
"""Return True only if a directory is a valid Cache for this class"""
|
||||
if not os.path.isdir(cache_dir):
|
||||
logger.debug("Cache dir not found")
|
||||
return False
|
||||
index_file = os.path.join(cache_dir, "index")
|
||||
if not (os.path.isfile(index_file) and os.path.getsize(index_file) == 24):
|
||||
if not os.path.isfile(index_file) or os.path.getsize(index_file) > 24:
|
||||
logger.debug("index file not found or too big(%s)"%os.path.getsize(index_file))
|
||||
return False
|
||||
real_index_file = os.path.join(cache_dir, "index-dir", "the-real-index")
|
||||
if not os.path.isfile(real_index_file):
|
||||
logger.debug("real_index_file not found")
|
||||
return False
|
||||
with share_open(real_index_file, 'rb') as index_file:
|
||||
if struct.unpack('QQ', index_file.read(16))[1] != THE_REAL_INDEX_MAGIC_NUMBER:
|
||||
logger.debug("real_index_file failed magic number check")
|
||||
return False
|
||||
try:
|
||||
# logger.debug("\n\nStarting cache check\n\n")
|
||||
|
|
@ -92,9 +96,11 @@ class SimpleCache(BaseChromiumCache):
|
|||
k = _validate_entry_file(en_fl)
|
||||
if k is not None:
|
||||
return True
|
||||
except SimpleCacheException:
|
||||
except SimpleCacheException as sce:
|
||||
# raise
|
||||
logger.debug(sce)
|
||||
return False
|
||||
logger.debug("No valid cache files found")
|
||||
return False
|
||||
|
||||
def get_data_key_impl(self, url, key):
|
||||
|
|
|
|||
185
fanficfare/browsercache/browsercache_sqldb.py
Normal file
185
fanficfare/browsercache/browsercache_sqldb.py
Normal file
|
|
@ -0,0 +1,185 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2026 FanFicFare team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import apsw
|
||||
import ctypes
|
||||
|
||||
# note share_open (on windows CLI) is implicitly readonly.
|
||||
from .share_open import share_open
|
||||
from .base_chromium import BaseChromiumCache
|
||||
from .chromagnon import SuperFastHash
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class SqldbCache(BaseChromiumCache):
|
||||
"""Class to access data stream in Chrome Disk Sqldb Cache format cache files"""
|
||||
|
||||
def __init__(self, *args, **kargs):
|
||||
"""Constructor for SqldbCache"""
|
||||
super(SqldbCache,self).__init__(*args, **kargs)
|
||||
logger.debug("Using SqldbCache")
|
||||
|
||||
# def scan_cache_keys(self):
|
||||
## XXX will impl a scan if and when needed. It's a lot easier
|
||||
## to peek inside an sqlite
|
||||
|
||||
@staticmethod
|
||||
def is_cache_dir(cache_dir):
|
||||
"""Return True only if a directory is a valid Cache for this class"""
|
||||
if not os.path.isdir(cache_dir):
|
||||
logger.debug("Cache dir not found")
|
||||
return False
|
||||
index_path = os.path.join(cache_dir, "index")
|
||||
if not os.path.isfile(index_path):
|
||||
logger.debug("index file not found")
|
||||
return False
|
||||
sqldb0_path = os.path.join(cache_dir, "sqldb0")
|
||||
if not os.path.isfile(sqldb0_path):
|
||||
logger.debug("sqldb0 file not found")
|
||||
return False
|
||||
## XXX check schema of db?
|
||||
return True
|
||||
|
||||
## XXX others uses share_open() - will sqlite open work concurrently?
|
||||
|
||||
def get_data_key_impl(self, url, key):
|
||||
"""
|
||||
returns location, entry age(unix epoch), content-encoding and
|
||||
raw(compressed) data
|
||||
"""
|
||||
location, age, encoding, data = '', None, None, None
|
||||
qstr = 'SELECT last_used, head, blob FROM resources as r join blobs as b on b.res_id=r.res_id where cache_key_hash=?'
|
||||
cache_key_hash = _key_hash(key)
|
||||
logger.debug(" key:%s"%key)
|
||||
logger.debug("cache_key_hash:%s"%cache_key_hash)
|
||||
## XXX worth optimizing to keep sql conn open?
|
||||
|
||||
from ..six.moves.urllib.request import pathname2url
|
||||
fileuri = os.path.join(self.cache_dir, "sqldb0")# pathname2url()
|
||||
|
||||
logger.debug(fileuri)
|
||||
shareopenVFS = ShareOpenVFS()
|
||||
logger.debug("VFS available %s"% apsw.vfs_names())
|
||||
with apsw.Connection("file:"+fileuri+"?immutable=1",
|
||||
flags=apsw.SQLITE_OPEN_READONLY | apsw.SQLITE_OPEN_URI,
|
||||
vfs=shareopenVFS.vfs_name
|
||||
) as db:
|
||||
logger.debug("db flags:%xd"%db.open_flags)
|
||||
logger.debug("db vfs:%s"%db.open_vfs)
|
||||
for last, head, blob in db.execute(qstr,[cache_key_hash]):
|
||||
|
||||
row_age = self.make_age(last)
|
||||
if age and row_age < age:
|
||||
logger.debug("skipping an older row for same hash")
|
||||
break
|
||||
|
||||
age = row_age
|
||||
logger.debug("age from last_used:%s"%age)
|
||||
|
||||
## cheesy way to pull out the http headers, inspired
|
||||
## by equal cheese in chromagnon/cacheData.py. Only
|
||||
## actually care about location &content-encoding,
|
||||
## ignore the rest.
|
||||
head = head[head.index(b'HTTP'):]
|
||||
head = head[:head.index(b'\x00\x00')]
|
||||
# logger.debug(head)
|
||||
for line in head.split(b'\0'):
|
||||
logger.debug(line)
|
||||
if b'content-encoding' in line.lower():
|
||||
encoding = line.split(b':')[1].strip().lower()
|
||||
logger.debug("encoding from header:%s"%encoding)
|
||||
if b'location' in line.lower():
|
||||
location = b':'.join(line.split(b':')[1:]).strip()
|
||||
logger.debug("location from header:%s"%encoding)
|
||||
## XXX might need entry age from header, too.
|
||||
## Hoping db last_used is equiv.
|
||||
data = blob
|
||||
if data:
|
||||
return (location, age, encoding, data)
|
||||
else:
|
||||
return None
|
||||
|
||||
## calculate SuperFashHash, but the sql saved it signed.
|
||||
def _key_hash(key):
|
||||
unsigned_hash = SuperFastHash.superFastHash(key)
|
||||
number = unsigned_hash & 0xFFFFFFFF
|
||||
return ctypes.c_int32(number).value
|
||||
|
||||
|
||||
class ShareOpenVFS(apsw.VFS):
|
||||
def __init__(self):
|
||||
self.vfs_name = 'shareopen'
|
||||
super().__init__(name=self.vfs_name, base='')
|
||||
|
||||
def xAccess(self, pathname, flags):
|
||||
return True
|
||||
|
||||
def xFullPathname(self, filename):
|
||||
return filename
|
||||
|
||||
def xDelete(self, filename, syncdir):
|
||||
logger.debug("xDelete NOT DELETING")
|
||||
pass
|
||||
|
||||
def xOpen(self, name, flags):
|
||||
return ShareOpenVFSFile(name, flags)
|
||||
|
||||
class ShareOpenVFSFile:
|
||||
def __init__(self, name, flags):
|
||||
self.filename = name.filename() if isinstance(name, apsw.URIFilename) else name
|
||||
self.filename = os.path.normpath(self.filename)
|
||||
logger.debug("Doing share open(%s)"%self.filename)
|
||||
self.file = share_open(self.filename, 'rb')
|
||||
|
||||
def xRead(self, amount, offset):
|
||||
self.file.seek(offset, 0)
|
||||
return self.file.read(amount)
|
||||
|
||||
def xFileSize(self):
|
||||
return os.stat(self.filename).st_size
|
||||
|
||||
def xClose(self):
|
||||
self.file.close()
|
||||
|
||||
def xSectorSize(self):
|
||||
return 0
|
||||
|
||||
def xFileControl(self, *args):
|
||||
return False
|
||||
|
||||
def xCheckReservedLock(self):
|
||||
return False
|
||||
|
||||
def xLock(self, level):
|
||||
pass
|
||||
|
||||
def xUnlock(self, level):
|
||||
pass
|
||||
|
||||
def xSync(self, flags):
|
||||
return True
|
||||
|
||||
def xTruncate(self, newsize):
|
||||
logger.debug("xTruncate NOT TRUNCING")
|
||||
pass
|
||||
|
||||
def xWrite(self, data, offset):
|
||||
logger.debug("xWrite NOT WRITING")
|
||||
pass
|
||||
|
|
@ -27,8 +27,7 @@ import pprint
|
|||
import string
|
||||
import os, sys, platform
|
||||
|
||||
|
||||
version="4.56.0"
|
||||
version="4.57.7"
|
||||
os.environ['CURRENT_VERSION_ID']=version
|
||||
|
||||
global_cache = 'global_cache'
|
||||
|
|
@ -51,6 +50,8 @@ from fanficfare.geturls import get_urls_from_page, get_urls_from_imap
|
|||
from fanficfare.six.moves import configparser
|
||||
from fanficfare.six import text_type as unicode
|
||||
|
||||
from fanficfare.fff_profile import do_cprofile
|
||||
|
||||
def write_story(config, adapter, writeformat,
|
||||
metaonly=False, nooutput=False,
|
||||
outstream=None):
|
||||
|
|
@ -346,6 +347,7 @@ def main(argv=None,
|
|||
dispatch(options, urls, passed_defaultsini, passed_personalini, warn, fail)
|
||||
|
||||
# make rest a function and loop on it.
|
||||
@do_cprofile
|
||||
def do_download(arg,
|
||||
options,
|
||||
passed_defaultsini,
|
||||
|
|
|
|||
|
|
@ -606,6 +606,9 @@ class Configuration(ConfigParser):
|
|||
|
||||
self.url_config_set = False
|
||||
|
||||
## to improve performance, cache config values.
|
||||
self.cached_config = {}
|
||||
|
||||
def section_url_names(self,domain,section_url_f):
|
||||
## domain is passed as a method to limit the damage if/when an
|
||||
## adapter screws up _section_url
|
||||
|
|
@ -683,6 +686,10 @@ class Configuration(ConfigParser):
|
|||
return self.get_config(self.sectionslist,key,default)
|
||||
|
||||
def get_config(self, sections, key, default=""):
|
||||
try:
|
||||
return self.cached_config[(tuple(sections),key)]
|
||||
except KeyError as ke:
|
||||
pass
|
||||
val = default
|
||||
|
||||
val_files = []
|
||||
|
|
@ -727,6 +734,7 @@ class Configuration(ConfigParser):
|
|||
except (configparser.NoOptionError, configparser.NoSectionError) as e:
|
||||
pass
|
||||
|
||||
self.cached_config[(tuple(sections),key)] = val
|
||||
return val
|
||||
|
||||
# split and strip each.
|
||||
|
|
|
|||
|
|
@ -1712,13 +1712,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
|
|||
## hardcoded to include the site specific metadata freeformtags &
|
||||
## ao3categories in the standard metadata field genre. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_genre: freeformtags, ao3categories
|
||||
include_in_genre: genre, freeformtags, ao3categories
|
||||
|
||||
## AO3 uses the word 'category' differently than most sites. The
|
||||
## adapter used to be hardcoded to include the site specific metadata
|
||||
## fandom in the standard metadata field category. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_category:fandoms
|
||||
include_in_category:category,fandoms
|
||||
|
||||
## freeformtags was previously typo'ed as freefromtags. This way,
|
||||
## freefromtags will still work for people who've used it.
|
||||
|
|
@ -1927,7 +1927,7 @@ make_linkhtml_entries:translators,betas
|
|||
## For most sites, 'category' is the fandom, but fanfics.me has
|
||||
## fandoms and a separate category. By making it configurable, users
|
||||
## can change it.
|
||||
include_in_category:fandoms
|
||||
include_in_category:category,fandoms
|
||||
|
||||
[fanfictalk.com]
|
||||
use_basic_cache:true
|
||||
|
|
@ -2703,13 +2703,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
|
|||
## hardcoded to include the site specific metadata freeformtags &
|
||||
## ao3categories in the standard metadata field genre. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_genre: freeformtags, ao3categories
|
||||
include_in_genre: genre, freeformtags, ao3categories
|
||||
|
||||
## OTW uses the word 'category' differently than most sites. The
|
||||
## adapter used to be hardcoded to include the site specific metadata
|
||||
## fandom in the standard metadata field category. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_category:fandoms
|
||||
include_in_category:category,fandoms
|
||||
|
||||
## freeformtags was previously typo'ed as freefromtags. This way,
|
||||
## freefromtags will still work for people who've used it.
|
||||
|
|
@ -3010,13 +3010,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
|
|||
## hardcoded to include the site specific metadata freeformtags &
|
||||
## ao3categories in the standard metadata field genre. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_genre: freeformtags, ao3categories
|
||||
include_in_genre: genre, freeformtags, ao3categories
|
||||
|
||||
## OTW uses the word 'category' differently than most sites. The
|
||||
## adapter used to be hardcoded to include the site specific metadata
|
||||
## fandom in the standard metadata field category. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_category:fandoms
|
||||
include_in_category:category,fandoms
|
||||
|
||||
## freeformtags was previously typo'ed as freefromtags. This way,
|
||||
## freefromtags will still work for people who've used it.
|
||||
|
|
@ -3145,8 +3145,8 @@ bookmarkmemo_label:ブックマークメモ
|
|||
bookmarkprivate_label:非公開ブックマーク
|
||||
subscribed_label:更新通知
|
||||
|
||||
include_in_genre: fullgenre
|
||||
#include_in_genre: biggenre, smallgenre
|
||||
include_in_genre: genre, fullgenre
|
||||
#include_in_genre: genre, biggenre, smallgenre
|
||||
|
||||
## adds to titlepage_entries instead of replacing it.
|
||||
#extra_titlepage_entries: fullgenre,biggenre,smallgenre,imprint,freeformtags,comments,reviews,bookmarks,ratingpoints,overallpoints,bookmarked,bookmarkcategory,bookmarkmemo,bookmarkprivate,subscribed
|
||||
|
|
@ -3389,13 +3389,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
|
|||
## hardcoded to include the site specific metadata freeformtags &
|
||||
## ao3categories in the standard metadata field genre. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_genre: freeformtags, ao3categories
|
||||
include_in_genre: genre, freeformtags, ao3categories
|
||||
|
||||
## OTW uses the word 'category' differently than most sites. The
|
||||
## adapter used to be hardcoded to include the site specific metadata
|
||||
## fandom in the standard metadata field category. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_category:fandoms
|
||||
include_in_category:category,fandoms
|
||||
|
||||
## freeformtags was previously typo'ed as freefromtags. This way,
|
||||
## freefromtags will still work for people who've used it.
|
||||
|
|
@ -3526,7 +3526,7 @@ upvotes_label:Upvotes
|
|||
subscribers_label:Subscribers
|
||||
views_label:Views
|
||||
|
||||
include_in_category:tags
|
||||
include_in_category:category,tags
|
||||
|
||||
#extra_titlepage_entries:upvotes,subscribers,views
|
||||
|
||||
|
|
@ -3662,13 +3662,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
|
|||
## hardcoded to include the site specific metadata freeformtags &
|
||||
## ao3categories in the standard metadata field genre. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_genre: freeformtags, ao3categories
|
||||
include_in_genre: genre, freeformtags, ao3categories
|
||||
|
||||
## OTW uses the word 'category' differently than most sites. The
|
||||
## adapter used to be hardcoded to include the site specific metadata
|
||||
## fandom in the standard metadata field category. By making it
|
||||
## configurable, users can change it.
|
||||
include_in_category:fandoms
|
||||
include_in_category:category,fandoms
|
||||
|
||||
## freeformtags was previously typo'ed as freefromtags. This way,
|
||||
## freefromtags will still work for people who've used it.
|
||||
|
|
|
|||
|
|
@ -22,25 +22,7 @@ from io import BytesIO
|
|||
|
||||
FONT_EXTS = ('ttf','otf','woff','woff2')
|
||||
|
||||
# from io import StringIO
|
||||
# import cProfile, pstats
|
||||
# from pstats import SortKey
|
||||
# def do_cprofile(func):
|
||||
# def profiled_func(*args, **kwargs):
|
||||
# profile = cProfile.Profile()
|
||||
# try:
|
||||
# profile.enable()
|
||||
# result = func(*args, **kwargs)
|
||||
# profile.disable()
|
||||
# return result
|
||||
# finally:
|
||||
# # profile.sort_stats(SortKey.CUMULATIVE).print_stats(20)
|
||||
# s = StringIO()
|
||||
# sortby = SortKey.CUMULATIVE
|
||||
# ps = pstats.Stats(profile, stream=s).sort_stats(sortby)
|
||||
# ps.print_stats(20)
|
||||
# print(s.getvalue())
|
||||
# return profiled_func
|
||||
from fanficfare.fff_profile import do_cprofile
|
||||
|
||||
import bs4
|
||||
|
||||
|
|
@ -51,9 +33,52 @@ def get_dcsource_chaptercount(inputio):
|
|||
## getsoups=True to check for continue_on_chapter_error chapters.
|
||||
return get_update_data(inputio,getfilecount=True,getsoups=True)[:2] # (source,filecount)
|
||||
|
||||
def get_cover_data(inputio):
|
||||
# (oldcoverhtmlhref,oldcoverhtmltype,oldcoverhtmldata,oldcoverimghref,oldcoverimgtype,oldcoverimgdata)
|
||||
return get_update_data(inputio,getfilecount=True,getsoups=False)[4]
|
||||
## only finds and returns cover image type and data, not cover page.
|
||||
## should work on any epub. Added for anthology cover issues.
|
||||
def get_cover_img(inputio):
|
||||
# (oldcoverimgtype,oldcoverimgdata)
|
||||
epub = ZipFile(inputio, 'r') # works equally well with inputio as a path or a blob
|
||||
|
||||
## Find the .opf file.
|
||||
container = epub.read("META-INF/container.xml")
|
||||
containerdom = parseString(container)
|
||||
rootfilenodelist = containerdom.getElementsByTagName("rootfile")
|
||||
rootfilename = rootfilenodelist[0].getAttribute("full-path")
|
||||
|
||||
contentdom = parseString(epub.read(rootfilename))
|
||||
firstmetadom = contentdom.getElementsByTagName("metadata")[0]
|
||||
|
||||
## Save the path to the .opf file--hrefs inside it are relative to it.
|
||||
relpath = get_path_part(rootfilename)
|
||||
# logger.debug("relpath:%s"%relpath)
|
||||
|
||||
# <meta name="cover" content="cover"/>
|
||||
|
||||
coverid = None
|
||||
covertype = None
|
||||
coverdata = None
|
||||
|
||||
for metatag in firstmetadom.getElementsByTagName("meta"):
|
||||
if metatag.getAttribute('name') == 'cover':
|
||||
coverid = metatag.getAttribute('content')
|
||||
# logger.debug("coverid:%s"%coverid)
|
||||
break
|
||||
if coverid:
|
||||
for item in contentdom.getElementsByTagName("item"):
|
||||
if item.getAttribute('id') == coverid:
|
||||
coverhref = relpath+item.getAttribute("href")
|
||||
## remove .. and the part it obviates
|
||||
coverhref = re.sub(r"([^/]+/\.\./)","",coverhref)
|
||||
covertype = item.getAttribute('media-type')
|
||||
# logger.debug("covertype:%s coverhref:%s"%(covertype,coverhref))
|
||||
try:
|
||||
coverdata = epub.read(coverhref)
|
||||
# logger.debug("coverdatalen:%s"%len(coverdata))
|
||||
except Exception as e:
|
||||
logger.info("Failed to read cover (%s): %s"%(coverhref,e))
|
||||
covertype, coverdata = None, None
|
||||
break
|
||||
return covertype, coverdata
|
||||
|
||||
def get_oldcover(epub,relpath,contentdom,item):
|
||||
href=relpath+item.getAttribute("href")
|
||||
|
|
@ -407,7 +432,7 @@ def get_story_url_from_zip_html(inputio,_is_good_url=None):
|
|||
return ahref
|
||||
return None
|
||||
|
||||
# @do_cprofile
|
||||
@do_cprofile
|
||||
def reset_orig_chapters_epub(inputio,outfile):
|
||||
inputepub = ZipFile(inputio, 'r') # works equally well with a path or a blob
|
||||
|
||||
|
|
@ -460,28 +485,50 @@ def reset_orig_chapters_epub(inputio,outfile):
|
|||
if re.match(r'.*/file\d+\.xhtml',zf):
|
||||
#logger.debug("zf:%s"%zf)
|
||||
data = data.decode('utf-8')
|
||||
# should be re-reading an FFF file, single soup should
|
||||
# be good enough and halve processing time.
|
||||
soup = make_soup(data,dblsoup=False)
|
||||
|
||||
chapterorigtitle = None
|
||||
tag = soup.find('meta',{'name':'chapterorigtitle'})
|
||||
if tag:
|
||||
chapterorigtitle = tag['content']
|
||||
## For higher performance checking, don't need to
|
||||
## make_soup if not different
|
||||
header = data[0:data.find("</head>")]
|
||||
'''
|
||||
<meta name="chapterorigtitle" content="8. Chapter 7" />
|
||||
<meta name="chaptertoctitle" content="8. Chapter 7" />
|
||||
<meta name="chaptertitle" content="8. (new) Chapter 7" />
|
||||
'''
|
||||
# logger.debug(header)
|
||||
def get_meta_content(n,d):
|
||||
m = re.match(r'.*<meta( name="%s"| content="(?P<found>[^"]+))+".*'%n,d,re.DOTALL)
|
||||
if m:
|
||||
# logger.debug("%s -> %s"%(n,m.groupdict().get('found',None)))
|
||||
return m.groupdict().get('found',None)
|
||||
|
||||
# toctitle is separate for add_chapter_numbers:toconly users.
|
||||
chaptertoctitle = None
|
||||
tag = soup.find('meta',{'name':'chaptertoctitle'})
|
||||
if tag:
|
||||
chaptertoctitle = tag['content']
|
||||
else:
|
||||
chaptertoctitle = chapterorigtitle
|
||||
chapterorigtitle = get_meta_content('chapterorigtitle',header)
|
||||
chaptertoctitle =get_meta_content('chaptertoctitle',header)
|
||||
chaptertitle = get_meta_content('chaptertitle',header)
|
||||
|
||||
chaptertitle = None
|
||||
tag = soup.find('meta',{'name':'chaptertitle'})
|
||||
if tag:
|
||||
chaptertitle = tag['content']
|
||||
chaptertitle_tag = tag
|
||||
if not (chapterorigtitle and chaptertoctitle and chaptertitle \
|
||||
and chapterorigtitle == chaptertitle):
|
||||
# should be re-reading an FFF file, single soup should
|
||||
# be good enough and halve processing time.
|
||||
soup = make_soup(data,dblsoup=False)
|
||||
|
||||
chapterorigtitle = None
|
||||
tag = soup.find('meta',{'name':'chapterorigtitle'})
|
||||
if tag:
|
||||
chapterorigtitle = tag['content']
|
||||
|
||||
# toctitle is separate for add_chapter_numbers:toconly users.
|
||||
chaptertoctitle = None
|
||||
tag = soup.find('meta',{'name':'chaptertoctitle'})
|
||||
if tag:
|
||||
chaptertoctitle = tag['content']
|
||||
else:
|
||||
chaptertoctitle = chapterorigtitle
|
||||
|
||||
chaptertitle = None
|
||||
tag = soup.find('meta',{'name':'chaptertitle'})
|
||||
if tag:
|
||||
chaptertitle = tag['content']
|
||||
chaptertitle_tag = tag
|
||||
|
||||
#logger.debug("chaptertitle:(%s) chapterorigtitle:(%s)"%(chaptertitle, chapterorigtitle))
|
||||
if chaptertitle and chapterorigtitle and chapterorigtitle != chaptertitle:
|
||||
|
|
|
|||
|
|
@ -148,3 +148,12 @@ class HTTPErrorFFF(Exception):
|
|||
class BrowserCacheException(Exception):
|
||||
pass
|
||||
|
||||
class NotGoingToDownload(Exception):
|
||||
def __init__(self,error,icon='dialog_error.png',showerror=True):
|
||||
self.error=error
|
||||
self.icon=icon
|
||||
self.showerror=showerror
|
||||
|
||||
def __str__(self):
|
||||
return self.error
|
||||
|
||||
|
|
|
|||
44
fanficfare/fff_profile.py
Normal file
44
fanficfare/fff_profile.py
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
# Copyright 2026 FanFicFare team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the 'License');
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an 'AS IS' BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
## not compatibly with py2, SortKey not available.
|
||||
import sys
|
||||
DO_PROFILING = False
|
||||
if DO_PROFILING and sys.version_info >= (3, 7):
|
||||
from io import StringIO
|
||||
import cProfile, pstats
|
||||
from pstats import SortKey
|
||||
def do_cprofile(func):
|
||||
def profiled_func(*args, **kwargs):
|
||||
profile = cProfile.Profile()
|
||||
try:
|
||||
profile.enable()
|
||||
result = func(*args, **kwargs)
|
||||
profile.disable()
|
||||
return result
|
||||
finally:
|
||||
# profile.print_stats()
|
||||
s = StringIO()
|
||||
sortby = SortKey.CUMULATIVE
|
||||
ps = pstats.Stats(profile, stream=s).sort_stats(sortby)
|
||||
ps.print_stats(20)
|
||||
print(s.getvalue())
|
||||
return profiled_func
|
||||
else:
|
||||
## no-nothing for py2
|
||||
def do_cprofile(func):
|
||||
def profiled_func(*args, **kwargs):
|
||||
return func(*args, **kwargs)
|
||||
return profiled_func
|
||||
|
|
@ -16,7 +16,7 @@ name = "FanFicFare" # Required
|
|||
#
|
||||
# For a discussion on single-sourcing the version, see
|
||||
# https://packaging.python.org/guides/single-sourcing-package-version/
|
||||
version = "4.56.0"
|
||||
version = "4.57.7"
|
||||
|
||||
# This is a one-line description or tagline of what your project does. This
|
||||
# corresponds to the "Summary" metadata field:
|
||||
|
|
|
|||
Loading…
Reference in a new issue