Compare commits

..

No commits in common. "main" and "v4.55.0" have entirely different histories.

47 changed files with 11540 additions and 12066 deletions

View file

@ -33,7 +33,7 @@ except NameError:
from calibre.customize import InterfaceActionBase
# pulled out from FanFicFareBase for saving in prefs.py
__version__ = (4, 57, 7)
__version__ = (4, 55, 0)
## Apparently the name for this class doesn't matter--it was still
## 'demo' for the first few versions.

View file

@ -371,7 +371,6 @@ class ConfigWidget(QWidget):
prefs['suppresstitlesort'] = self.std_columns_tab.suppresstitlesort.isChecked()
prefs['authorcase'] = self.std_columns_tab.authorcase.isChecked()
prefs['titlecase'] = self.std_columns_tab.titlecase.isChecked()
prefs['seriescase'] = self.std_columns_tab.seriescase.isChecked()
prefs['setanthologyseries'] = self.std_columns_tab.setanthologyseries.isChecked()
prefs['set_author_url'] =self.std_columns_tab.set_author_url.isChecked()
@ -761,7 +760,6 @@ class BasicTab(QWidget):
tooltip=_("One URL per line:\n<b>http://...,note</b>\n<b>http://...,title by author - note</b>"),
rejectreasons=rejecturllist.get_reject_reasons(),
reasonslabel=_('Add this reason to all URLs added:'),
accept_storyurls=True,
save_size_name='fff:Add Reject List')
d.exec_()
if d.result() == d.Accepted:
@ -1638,11 +1636,6 @@ class StandardColumnsTab(QWidget):
self.setanthologyseries.setChecked(prefs['setanthologyseries'])
row.append(self.setanthologyseries)
self.seriescase = QCheckBox(_('Fix Series Case?'),self)
self.seriescase.setToolTip(_("If checked, Calibre's routine for correcting the capitalization of title will be applied.")
+"\n"+_("This effects Calibre metadata only, not FanFicFare metadata in title page."))
self.seriescase.setChecked(prefs['seriescase'])
row.append(self.seriescase)
grid = QGridLayout()
for rownum, row in enumerate(rows):
for colnum, col in enumerate(row):

View file

@ -38,7 +38,6 @@ from calibre.gui2 import gprefs
show_download_options = 'fff:add new/update dialogs:show_download_options'
from calibre.gui2.dialogs.confirm_delete import confirm
from calibre.gui2.complete2 import EditWithComplete
from fanficfare.exceptions import NotGoingToDownload
from fanficfare.six import text_type as unicode, ensure_text
# pulls in translation files for _() strings
@ -156,6 +155,15 @@ class RejectUrlEntry:
return retval
class NotGoingToDownload(Exception):
def __init__(self,error,icon='dialog_error.png',showerror=True):
self.error=error
self.icon=icon
self.showerror=showerror
def __str__(self):
return self.error
class DroppableQTextEdit(QTextEdit):
def __init__(self,parent):
QTextEdit.__init__(self,parent)
@ -1320,7 +1328,6 @@ class EditTextDialog(SizePersistedDialog):
icon=None, title=None, label=None, tooltip=None,
read_only=False,
rejectreasons=[],reasonslabel=None,
accept_storyurls=False,
save_size_name='fff:edit text dialog',
):
SizePersistedDialog.__init__(self, parent, save_size_name)
@ -1334,10 +1341,7 @@ class EditTextDialog(SizePersistedDialog):
self.setWindowIcon(icon)
self.l.addWidget(self.label)
if accept_storyurls:
self.textedit = DroppableQTextEdit(self)
else:
self.textedit = QTextEdit(self)
self.textedit = QTextEdit(self)
self.textedit.setLineWrapMode(QTextEditNoWrap)
self.textedit.setReadOnly(read_only)
self.textedit.setText(text)

View file

@ -10,6 +10,20 @@ __docformat__ = 'restructuredtext en'
import fanficfare.six as six
from fanficfare.six import ensure_text, string_types, text_type as unicode
# import cProfile
# def do_cprofile(func):
# def profiled_func(*args, **kwargs):
# profile = cProfile.Profile()
# try:
# profile.enable()
# result = func(*args, **kwargs)
# profile.disable()
# return result
# finally:
# profile.print_stats()
# return profiled_func
import logging
logger = logging.getLogger(__name__)
@ -64,14 +78,12 @@ from fanficfare import adapters, exceptions
from fanficfare.epubutils import (
get_dcsource, get_dcsource_chaptercount, get_story_url_from_epub_html,
get_story_url_from_zip_html, reset_orig_chapters_epub, get_cover_img)
get_story_url_from_zip_html, reset_orig_chapters_epub, get_cover_data)
from fanficfare.geturls import (
get_urls_from_page, get_urls_from_text,get_urls_from_imap,
get_urls_from_mime)
from fanficfare.fff_profile import do_cprofile
from calibre_plugins.fanficfare_plugin.fff_util import (
get_fff_adapter, get_fff_config, get_fff_personalini,
get_common_elements)
@ -99,8 +111,7 @@ from calibre_plugins.fanficfare_plugin.dialogs import (
LoopProgressDialog, UserPassDialog, AboutDialog, CollectURLDialog,
RejectListDialog, EmailPassDialog, TOTPDialog,
save_collisions, question_dialog_all,
RejectUrlEntry, IniTextDialog,
EditTextDialog)
NotGoingToDownload, RejectUrlEntry, IniTextDialog)
# because calibre immediately transforms html into zip and don't want
# to have an 'if html'. db.has_format is cool with the case mismatch,
@ -194,6 +205,20 @@ class FanFicFarePlugin(InterfaceAction):
prefs,
self.qaction.icon())
## Kludgey, yes, but with the real configuration inside the
## library now, how else would a user be able to change this
## setting if it's crashing calibre?
def check_macmenuhack(self):
try:
return self.macmenuhack
except:
file_path = os.path.join(calibre_config_dir,
*("plugins/fanficfare_macmenuhack.txt".split('/')))
file_path = os.path.abspath(file_path)
logger.debug("Plugin %s macmenuhack file_path:%s"%(self.name,file_path))
self.macmenuhack = os.access(file_path, os.F_OK)
return self.macmenuhack
accepts_drops = True
def accept_enter_event(self, event, mime_data):
@ -418,38 +443,30 @@ class FanFicFarePlugin(InterfaceAction):
self.reject_list_action = self.create_menu_item_ex(self.menu, _('Reject Selected Books'),
unique_name='Reject Selected Books', image='rotate-right.png',
triggered=self.reject_list_urls)
# self.menu.addSeparator()
self.add_reject_urls_action = self.create_menu_item_ex(self.menu, _('Add Reject URLs'),
image='rotate-right.png',
unique_name='Add Reject URLs',
shortcut_name=_('Add Reject URLs'),
triggered=self.add_reject_urls)
# print("platform.system():%s"%platform.system())
# print("platform.mac_ver()[0]:%s"%platform.mac_ver()[0])
if not self.check_macmenuhack(): # not platform.mac_ver()[0]: # Some macs crash on these menu items for unknown reasons.
self.menu.addSeparator()
self.editpersonalini_action = self.create_menu_item_ex(self.menu, _('Edit personal.ini'),
image= 'config.png',
unique_name='Edit personal.ini',
shortcut_name=_('Edit personal.ini'),
triggered=self.editpersonalini)
self.edit_reject_urls_action = self.create_menu_item_ex(self.menu, _('Edit Reject URLs'),
image='rotate-right.png',
unique_name='Edit Reject URLs',
shortcut_name=_('Edit Reject URLs'),
triggered=self.edit_reject_urls)
self.config_action = self.create_menu_item_ex(self.menu, _('&Configure FanFicFare'),
image= 'config.png',
unique_name='Configure FanFicFare',
shortcut_name=_('Configure FanFicFare'),
triggered=do_user_config)
self.menu.addSeparator()
self.about_action = self.create_menu_item_ex(self.menu, _('About FanFicFare'),
image= 'images/icon.png',
unique_name='About FanFicFare',
shortcut_name=_('About FanFicFare'),
triggered=self.about)
self.editpersonalini_action = self.create_menu_item_ex(self.menu, _('Edit personal.ini'),
image= 'config.png',
unique_name='Edit personal.ini',
shortcut_name=_('Edit personal.ini'),
triggered=self.editpersonalini)
self.config_action = self.create_menu_item_ex(self.menu, _('&Configure FanFicFare'),
image= 'config.png',
unique_name='Configure FanFicFare',
shortcut_name=_('Configure FanFicFare'),
triggered=do_user_config)
self.about_action = self.create_menu_item_ex(self.menu, _('About FanFicFare'),
image= 'images/icon.png',
unique_name='About FanFicFare',
shortcut_name=_('About FanFicFare'),
triggered=self.about)
self.gui.keyboard.finalize()
def about(self,checked):
@ -485,35 +502,6 @@ class FanFicFarePlugin(InterfaceAction):
prefs['personal.ini'] = get_resources('plugin-example.ini')
prefs.save_to_db()
def add_reject_urls(self):
d = EditTextDialog(self.gui,
"http://example.com/story.php?sid=5,"+_("Reason why I rejected it")+"\nhttp://example.com/story.php?sid=6,"+_("Title by Author")+" - "+_("Reason why I rejected it"),
# icon=self.windowIcon(),
title=_("FanFicFare"),
label=_("Add Reject URLs. Use: <b>http://...,note</b> or <b>http://...,title by author - note</b><br>Invalid story URLs will be ignored."),
tooltip=_("One URL per line:\n<b>http://...,note</b>\n<b>http://...,title by author - note</b>"),
rejectreasons=rejecturllist.get_reject_reasons(),
reasonslabel=_('Add this reason to all URLs added:'),
accept_storyurls=True,
save_size_name='fff:Add Reject List')
d.exec_()
if d.result() == d.Accepted:
rejecturllist.add_text(d.get_plain_text(),d.get_reason_text())
def edit_reject_urls(self):
with busy_cursor():
d = RejectListDialog(self.gui,
rejecturllist.get_list(),
rejectreasons=rejecturllist.get_reject_reasons(),
header=_("Edit Reject URLs List"),
show_delete=False,
show_all_reasons=False)
d.exec_()
if d.result() != d.Accepted:
return
with busy_cursor():
rejecturllist.add(d.get_reject_list(),clear=True)
def create_menu_item_ex(self, parent_menu, menu_text, image=None, tooltip=None,
shortcut=None, triggered=None, is_checked=None, shortcut_name=None,
unique_name=None):
@ -1153,9 +1141,9 @@ class FanFicFarePlugin(InterfaceAction):
## Aug2024 moved site specific search changes to adapters as
## classmethod
regexp = adapters.get_url_search(url)
# logger.debug(regexp)
logger.debug(regexp)
retval = self.gui.current_db.search_getting_ids(regexp,None,use_virtual_library=False)
# logger.debug(retval)
logger.debug(retval)
return retval
def prep_downloads(self, options, books, merge=False, extrapayload=None):
@ -1285,7 +1273,7 @@ class FanFicFarePlugin(InterfaceAction):
# let other exceptions percolate up.
return adapter.getStoryMetadataOnly(get_cover=False)
@do_cprofile
# @do_cprofile
def prep_download_loop(self,book,
options={'fileform':'epub',
'collision':ADDNEW,
@ -1324,11 +1312,11 @@ class FanFicFarePlugin(InterfaceAction):
## network hit.
identicalbooks = self.do_id_search(url)
if collision == SKIP and identicalbooks:
raise exceptions.NotGoingToDownload(_("Skipping duplicate story."),"list_remove.png")
raise NotGoingToDownload(_("Skipping duplicate story."),"list_remove.png")
# Dialogs should prevent this case now.
if collision in (UPDATE,UPDATEALWAYS) and fileform != 'epub':
raise exceptions.NotGoingToDownload(_("Cannot update non-epub format."))
raise NotGoingToDownload(_("Cannot update non-epub format."))
if not book['good']:
# book has already been flagged bad for whatever reason.
@ -1522,7 +1510,7 @@ class FanFicFarePlugin(InterfaceAction):
logger.debug("existing found by identifier URL")
if collision == SKIP and identicalbooks:
raise exceptions.NotGoingToDownload(_("Skipping duplicate story."),"list_remove.png")
raise NotGoingToDownload(_("Skipping duplicate story."),"list_remove.png")
if len(identicalbooks) > 1:
identicalbooks_msg = _("More than one identical book by Identifier URL or title/author(s)--can't tell which book to update/overwrite.")
@ -1553,7 +1541,7 @@ class FanFicFarePlugin(InterfaceAction):
identicalbooks = []
collision = book['collision'] = ADDNEW
else:
raise exceptions.NotGoingToDownload(identicalbooks_msg,"minusminus.png")
raise NotGoingToDownload(identicalbooks_msg,"minusminus.png")
## changed: add new book when CALIBREONLY if none found.
if collision in (CALIBREONLY, CALIBREONLYSAVECOL) and not identicalbooks:
@ -1640,11 +1628,11 @@ class FanFicFarePlugin(InterfaceAction):
# returns int adjusted for start-end range.
urlchaptercount = story.getChapterCount()
if chaptercount == urlchaptercount and collision == UPDATE:
raise exceptions.NotGoingToDownload(_("Already contains %d chapters.")%chaptercount,'edit-undo.png',showerror=False)
raise NotGoingToDownload(_("Already contains %d chapters.")%chaptercount,'edit-undo.png',showerror=False)
elif chaptercount > urlchaptercount and not (collision == UPDATEALWAYS and adapter.getConfig('force_update_epub_always')):
raise exceptions.NotGoingToDownload(_("Existing epub contains %d chapters, web site only has %d. Use Overwrite or force_update_epub_always to force update.") % (chaptercount,urlchaptercount),'dialog_error.png')
raise NotGoingToDownload(_("Existing epub contains %d chapters, web site only has %d. Use Overwrite or force_update_epub_always to force update.") % (chaptercount,urlchaptercount),'dialog_error.png')
elif chaptercount == 0:
raise exceptions.NotGoingToDownload(_("FanFicFare doesn't recognize chapters in existing epub, epub is probably from a different source. Use Overwrite to force update."),'dialog_error.png')
raise NotGoingToDownload(_("FanFicFare doesn't recognize chapters in existing epub, epub is probably from a different source. Use Overwrite to force update."),'dialog_error.png')
if collision == OVERWRITE and \
db.has_format(book_id,formmapping[fileform],index_is_id=True):
@ -1661,7 +1649,7 @@ class FanFicFarePlugin(InterfaceAction):
# updated does have time, use full timestamps.
if (lastupdated.time() == time.min and fileupdated.date() > lastupdated.date()) or \
(lastupdated.time() != time.min and fileupdated > lastupdated):
raise exceptions.NotGoingToDownload(_("Not Overwriting, web site is not newer."),'edit-undo.png',showerror=False)
raise NotGoingToDownload(_("Not Overwriting, web site is not newer."),'edit-undo.png',showerror=False)
# For update, provide a tmp file copy of the existing epub so
# it can't change underneath us. Now also overwrite for logpage preserve.
@ -1881,7 +1869,6 @@ class FanFicFarePlugin(InterfaceAction):
else:
return None
@do_cprofile
def update_books_loop(self,book,db=None,
options={'fileform':'epub',
'collision':ADDNEW,
@ -2213,45 +2200,30 @@ class FanFicFarePlugin(InterfaceAction):
## start with None. If no subbook covers, don't force one
## here. User can configure FFF to always create/polish a
## cover if they want. This is about when we force it.
coverimgpath = None
coverpath = None
coverimgtype = None
had_cover = False
# epubmerge wants a path to cover img on disk
def write_image(imgtype,imgdata):
tmp = PersistentTemporaryFile(prefix='cover_',
suffix='.'+imagetypes[imgtype],
dir=options['tdir'])
tmp.write(imgdata)
tmp.flush()
tmp.close()
return tmp.name
## if prior epub had a cover, we should use it again.
if mergebook['calibre_id'] and db.has_format(mergebook['calibre_id'],'EPUB',index_is_id=True):
(covertype,coverdata) = get_cover_img(db.format(mergebook['calibre_id'],'EPUB',index_is_id=True,as_file=True))
if coverdata:
had_cover = True
coverimgpath = write_image(covertype,coverdata)
coverimgtype = covertype
logger.debug("prior anthology cover found")
## look for covers inside the subbooks. Stop at the first
## one, which will be used if there isn't a pre-existing
## first, look for covers inside the subbooks. Stop at the
## first one, which will be used if there isn't a pre-existing
## calibre cover.
if not coverimgpath:
if not coverpath:
for book in good_list:
(covertype,coverdata) = get_cover_img(book['outfile'])
coverdata = get_cover_data(book['outfile'])
if coverdata: # found a cover.
coverimgpath = write_image(covertype,coverdata)
coverimgtype = covertype
logger.debug('from subbook coverimgpath:%s'%coverimgpath)
(coverimgtype,coverimgdata) = coverdata[4:6]
# logger.debug('coverimgtype:%s [%s]'%(coverimgtype,imagetypes[coverimgtype]))
tmpcover = PersistentTemporaryFile(suffix='.'+imagetypes[coverimgtype],
dir=options['tdir'])
tmpcover.write(coverimgdata)
tmpcover.flush()
tmpcover.close()
coverpath = tmpcover.name
break
# logger.debug('coverpath:%s'%coverpath)
## if updating an existing book and there is at least one
## subbook cover:
if not had_cover and coverimgpath and mergebook['calibre_id']:
logger.debug("anth cover: using cal cover")
if coverpath and mergebook['calibre_id']:
# Couldn't find a better way to get the cover path.
calcoverpath = os.path.join(db.library_path,
db.path(mergebook['calibre_id'], index_is_id=True),
@ -2259,11 +2231,9 @@ class FanFicFarePlugin(InterfaceAction):
## if there's an existing cover, use it. Calibre will set
## it for us during lots of different actions anyway.
if os.path.exists(calcoverpath):
coverimgpath = calcoverpath
coverpath = calcoverpath
## Note that this cover will be replaced if 'inject
## generated' cover is on
logger.debug('coverimgpath:%s'%coverimgpath)
# logger.debug('coverpath:%s'%coverpath)
mrg_args = [tmp.name,
[ x['outfile'] for x in good_list ],]
mrg_kwargs = {
@ -2271,7 +2241,7 @@ class FanFicFarePlugin(InterfaceAction):
'titleopt':mergebook['title'],
'keepmetadatafiles':True,
'source':mergebook['url'],
'coverjpgpath':coverimgpath
'coverjpgpath':coverpath
}
logger.debug('anthology_merge_keepsingletocs:%s'%
mergebook['anthology_merge_keepsingletocs'])
@ -2649,6 +2619,7 @@ class FanFicFarePlugin(InterfaceAction):
db.new_api.set_link_for_authors(author_id_to_link_map)
# set series link if found.
logger.debug("has link_map:%s"%(hasattr(db.new_api,'set_link_map')))
## new_api.set_link_map added in Calibre v6.15
if hasattr(db.new_api,'set_link_map') and \
prefs['set_series_url'] and \
@ -2657,7 +2628,6 @@ class FanFicFarePlugin(InterfaceAction):
series = book['series']
if '[' in series: # a few can have a series w/o number
series = series[:series.rindex(' [')]
logger.debug("Setting series link:%s"%book['all_metadata']['seriesUrl'])
db.new_api.set_link_map('series',{series:
book['all_metadata']['seriesUrl']})
@ -2847,9 +2817,6 @@ class FanFicFarePlugin(InterfaceAction):
mi.pubdate = book['pubdate']
mi.timestamp = book['timestamp']
mi.comments = book['comments']
if prefs['seriescase']:
from calibre.ebooks.metadata.sources.base import fixcase
book['series'] = fixcase(book['series'])
mi.series = book['series']
return mi
@ -3201,7 +3168,6 @@ The previously downloaded book is still in the anthology, but FFF doesn't have t
if prefs['setanthologyseries'] and book['title'] == series:
book['series'] = series+' [0]'
book['all_metadata']['seriesUrl'] = options.get('anthology_url','')
# logger.debug("anthology_title_pattern:%s"%configuration.getConfig('anthology_title_pattern'))
if configuration.getConfig('anthology_title_pattern'):
@ -3222,9 +3188,7 @@ The previously downloaded book is still in the anthology, but FFF doesn't have t
s = options.get('frompage',{}).get('status','')
if s:
book['all_metadata']['status'] = s
## status into tags only if in include_subject_tags
if 'status' in configuration.getConfigList('include_subject_tags'):
book['tags'].append(s)
book['tags'].append(s)
book['tags'].extend(configuration.getConfigList('anthology_tags'))
book['all_metadata']['anthology'] = "true"

View file

@ -44,44 +44,33 @@ def do_download_worker_single(site,
print_basic_debug_info(sys.stderr)
notification(0.01, _('Downloading FanFiction Stories'))
from calibre_plugins.fanficfare_plugin import FanFicFareBase
fffbase = FanFicFareBase(options['plugin_path'])
with fffbase: # so the sys.path was modified while loading the
# plug impl.
from fanficfare.fff_profile import do_cprofile
## extra function just so I can easily use the same
## @do_cprofile decorator
@do_cprofile
def profiled_func():
count = 0
totals = {}
# can't do direct assignment in list comprehension? I'm sure it
# makes sense to some pythonista.
# [ totals[x['url']]=0.0 for x in book_list if x['good'] ]
[ totals.update({x['url']:0.0}) for x in book_list if x['good'] ]
# logger.debug(sites_lists.keys())
count = 0
totals = {}
# can't do direct assignment in list comprehension? I'm sure it
# makes sense to some pythonista.
# [ totals[x['url']]=0.0 for x in book_list if x['good'] ]
[ totals.update({x['url']:0.0}) for x in book_list if x['good'] ]
# logger.debug(sites_lists.keys())
def do_indiv_notif(percent,msg):
totals[msg] = percent/len(totals)
notification(max(0.01,sum(totals.values())), _('%(count)d of %(total)d stories finished downloading')%{'count':count,'total':len(totals)})
def do_indiv_notif(percent,msg):
totals[msg] = percent/len(totals)
notification(max(0.01,sum(totals.values())), _('%(count)d of %(total)d stories finished downloading')%{'count':count,'total':len(totals)})
do_list = []
done_list = []
logger.info("\n\n"+_("Downloading FanFiction Stories")+"\n%s\n"%("\n".join([ "%(status)s %(url)s %(comment)s" % book for book in book_list])))
## pass failures from metadata through bg job so all results are
## together.
for book in book_list:
if book['good']:
do_list.append(book)
else:
done_list.append(book)
for book in do_list:
# logger.info("%s"%book['url'])
done_list.append(do_download_for_worker(book,options,merge,do_indiv_notif))
count += 1
return finish_download(done_list)
return profiled_func()
do_list = []
done_list = []
## pass failures from metadata through bg job so all results are
## together.
for book in book_list:
if book['good']:
do_list.append(book)
else:
done_list.append(book)
for book in do_list:
# logger.info("%s"%book['url'])
done_list.append(do_download_for_worker(book,options,merge,do_indiv_notif))
count += 1
return finish_download(done_list)
def finish_download(donelist):
book_list = sorted(donelist,key=lambda x : x['listorder'])
@ -124,6 +113,15 @@ def finish_download(donelist):
# return the book list as the job result
return book_list
def do_download_site(site,book_list,options,merge,notification=lambda x,y:x):
# logger.info(_("Started job for %s")%site)
retval = []
for book in book_list:
# logger.info("%s"%book['url'])
retval.append(do_download_for_worker(book,options,merge,notification))
notification(10.0,book['url'])
return retval
def do_download_for_worker(book,options,merge,notification=lambda x,y:x):
'''
Child job, to download story when run as a worker job
@ -133,13 +131,13 @@ def do_download_for_worker(book,options,merge,notification=lambda x,y:x):
fffbase = FanFicFareBase(options['plugin_path'])
with fffbase: # so the sys.path was modified while loading the
# plug impl.
from calibre_plugins.fanficfare_plugin.dialogs import NotGoingToDownload
from calibre_plugins.fanficfare_plugin.prefs import (
SAVE_YES, SAVE_YES_UNLESS_SITE, OVERWRITE, OVERWRITEALWAYS, UPDATE,
UPDATEALWAYS, ADDNEW, SKIP, CALIBREONLY, CALIBREONLYSAVECOL)
from calibre_plugins.fanficfare_plugin.wordcount import get_word_count
from fanficfare import adapters, writers
from fanficfare.epubutils import get_update_data
from fanficfare.exceptions import NotGoingToDownload
from fanficfare.six import text_type as unicode
from calibre_plugins.fanficfare_plugin.fff_util import get_fff_config

View file

@ -1599,8 +1599,6 @@ chaptertitles:Prologue,Chapter 1\, Xenos on Cinnabar,Chapter 2\, Sinmay on Kinti
[adult-fanfiction.org]
use_basic_cache:true
extra_valid_entries:eroticatags,disclaimer
eroticatags_label:Erotica Tags
disclaimer_label:Disclaimer
@ -1719,13 +1717,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
## hardcoded to include the site specific metadata freeformtags &
## ao3categories in the standard metadata field genre. By making it
## configurable, users can change it.
include_in_genre: genre, freeformtags, ao3categories
include_in_genre: freeformtags, ao3categories
## AO3 uses the word 'category' differently than most sites. The
## adapter used to be hardcoded to include the site specific metadata
## fandom in the standard metadata field category. By making it
## configurable, users can change it.
include_in_category:category,fandoms
include_in_category:fandoms
## freeformtags was previously typo'ed as freefromtags. This way,
## freefromtags will still work for people who've used it.
@ -1934,7 +1932,7 @@ make_linkhtml_entries:translators,betas
## For most sites, 'category' is the fandom, but fanfics.me has
## fandoms and a separate category. By making it configurable, users
## can change it.
include_in_category:category,fandoms
include_in_category:fandoms
[fanfictalk.com]
use_basic_cache:true
@ -2710,13 +2708,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
## hardcoded to include the site specific metadata freeformtags &
## ao3categories in the standard metadata field genre. By making it
## configurable, users can change it.
include_in_genre: genre, freeformtags, ao3categories
include_in_genre: freeformtags, ao3categories
## OTW uses the word 'category' differently than most sites. The
## adapter used to be hardcoded to include the site specific metadata
## fandom in the standard metadata field category. By making it
## configurable, users can change it.
include_in_category:category,fandoms
include_in_category:fandoms
## freeformtags was previously typo'ed as freefromtags. This way,
## freefromtags will still work for people who've used it.
@ -3017,13 +3015,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
## hardcoded to include the site specific metadata freeformtags &
## ao3categories in the standard metadata field genre. By making it
## configurable, users can change it.
include_in_genre: genre, freeformtags, ao3categories
include_in_genre: freeformtags, ao3categories
## OTW uses the word 'category' differently than most sites. The
## adapter used to be hardcoded to include the site specific metadata
## fandom in the standard metadata field category. By making it
## configurable, users can change it.
include_in_category:category,fandoms
include_in_category:fandoms
## freeformtags was previously typo'ed as freefromtags. This way,
## freefromtags will still work for people who've used it.
@ -3152,8 +3150,8 @@ bookmarkmemo_label:ブックマークメモ
bookmarkprivate_label:非公開ブックマーク
subscribed_label:更新通知
include_in_genre: genre, fullgenre
#include_in_genre: genre, biggenre, smallgenre
include_in_genre: fullgenre
#include_in_genre: biggenre, smallgenre
## adds to titlepage_entries instead of replacing it.
#extra_titlepage_entries: fullgenre,biggenre,smallgenre,imprint,freeformtags,comments,reviews,bookmarks,ratingpoints,overallpoints,bookmarked,bookmarkcategory,bookmarkmemo,bookmarkprivate,subscribed
@ -3396,13 +3394,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
## hardcoded to include the site specific metadata freeformtags &
## ao3categories in the standard metadata field genre. By making it
## configurable, users can change it.
include_in_genre: genre, freeformtags, ao3categories
include_in_genre: freeformtags, ao3categories
## OTW uses the word 'category' differently than most sites. The
## adapter used to be hardcoded to include the site specific metadata
## fandom in the standard metadata field category. By making it
## configurable, users can change it.
include_in_category:category,fandoms
include_in_category:fandoms
## freeformtags was previously typo'ed as freefromtags. This way,
## freefromtags will still work for people who've used it.
@ -3533,7 +3531,7 @@ upvotes_label:Upvotes
subscribers_label:Subscribers
views_label:Views
include_in_category:category,tags
include_in_category:tags
#extra_titlepage_entries:upvotes,subscribers,views
@ -3669,13 +3667,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
## hardcoded to include the site specific metadata freeformtags &
## ao3categories in the standard metadata field genre. By making it
## configurable, users can change it.
include_in_genre: genre, freeformtags, ao3categories
include_in_genre: freeformtags, ao3categories
## OTW uses the word 'category' differently than most sites. The
## adapter used to be hardcoded to include the site specific metadata
## fandom in the standard metadata field category. By making it
## configurable, users can change it.
include_in_category:category,fandoms
include_in_category:fandoms
## freeformtags was previously typo'ed as freefromtags. This way,
## freefromtags will still work for people who've used it.

View file

@ -126,7 +126,6 @@ default_prefs['suppressauthorsort'] = False
default_prefs['suppresstitlesort'] = False
default_prefs['authorcase'] = False
default_prefs['titlecase'] = False
default_prefs['seriescase'] = False
default_prefs['setanthologyseries'] = False
default_prefs['mark'] = False
default_prefs['mark_success'] = True

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -53,9 +53,6 @@ class FanficAuthorsNetAdapter(BaseSiteAdapter):
#Setting the 'Zone' for each "Site"
self.zone = self.parsedUrl.netloc.replace('.fanficauthors.net','')
# site change .nsns to -nsns
self.zone = self.zone.replace('.nsns','-nsns')
# normalized story URL.
self._setURL('https://{0}.{1}/{2}/'.format(
self.zone, self.getBaseDomain(), self.story.getMetadata('storyId')))
@ -82,10 +79,7 @@ class FanficAuthorsNetAdapter(BaseSiteAdapter):
@classmethod
def getAcceptDomains(cls):
# need both .nsns(old) and -nsns(new) because it's a domain
# change, not just URL change.
return ['aaran-st-vines.nsns.fanficauthors.net',
'aaran-st-vines-nsns.fanficauthors.net',
'abraxan.fanficauthors.net',
'bobmin.fanficauthors.net',
'canoncansodoff.fanficauthors.net',
@ -101,12 +95,9 @@ class FanficAuthorsNetAdapter(BaseSiteAdapter):
'jeconais.fanficauthors.net',
'kinsfire.fanficauthors.net',
'kokopelli.nsns.fanficauthors.net',
'kokopelli-nsns.fanficauthors.net',
'ladya.nsns.fanficauthors.net',
'ladya-nsns.fanficauthors.net',
'lorddwar.fanficauthors.net',
'mrintel.nsns.fanficauthors.net',
'mrintel-nsns.fanficauthors.net',
'musings-of-apathy.fanficauthors.net',
'ruskbyte.fanficauthors.net',
'seelvor.fanficauthors.net',
@ -117,7 +108,7 @@ class FanficAuthorsNetAdapter(BaseSiteAdapter):
################################################################################################
@classmethod
def getSiteExampleURLs(self):
return ("https://aaran-st-vines-nsns.fanficauthors.net/A_Story_Name/ "
return ("https://aaran-st-vines.nsns.fanficauthors.net/A_Story_Name/ "
+ "https://abraxan.fanficauthors.net/A_Story_Name/ "
+ "https://bobmin.fanficauthors.net/A_Story_Name/ "
+ "https://canoncansodoff.fanficauthors.net/A_Story_Name/ "
@ -132,10 +123,10 @@ class FanficAuthorsNetAdapter(BaseSiteAdapter):
+ "https://jbern.fanficauthors.net/A_Story_Name/ "
+ "https://jeconais.fanficauthors.net/A_Story_Name/ "
+ "https://kinsfire.fanficauthors.net/A_Story_Name/ "
+ "https://kokopelli-nsns.fanficauthors.net/A_Story_Name/ "
+ "https://ladya-nsns.fanficauthors.net/A_Story_Name/ "
+ "https://kokopelli.nsns.fanficauthors.net/A_Story_Name/ "
+ "https://ladya.nsns.fanficauthors.net/A_Story_Name/ "
+ "https://lorddwar.fanficauthors.net/A_Story_Name/ "
+ "https://mrintel-nsns.fanficauthors.net/A_Story_Name/ "
+ "https://mrintel.nsns.fanficauthors.net/A_Story_Name/ "
+ "https://musings-of-apathy.fanficauthors.net/A_Story_Name/ "
+ "https://ruskbyte.fanficauthors.net/A_Story_Name/ "
+ "https://seelvor.fanficauthors.net/A_Story_Name/ "
@ -145,16 +136,8 @@ class FanficAuthorsNetAdapter(BaseSiteAdapter):
################################################################################################
def getSiteURLPattern(self):
## .nsns kept here to match both . and -
return r'https?://(aaran-st-vines.nsns|abraxan|bobmin|canoncansodoff|chemprof|copperbadge|crys|deluded-musings|draco664|fp|frenchsession|ishtar|jbern|jeconais|kinsfire|kokopelli.nsns|ladya.nsns|lorddwar|mrintel.nsns|musings-of-apathy|ruskbyte|seelvor|tenhawk|viridian|whydoyouneedtoknow)\.fanficauthors\.net/([a-zA-Z0-9_]+)/'
@classmethod
def get_section_url(cls,url):
## only changing .nsns to -nsns and only when part of the
## domain.
url = url.replace('.nsns.fanficauthors.net','-nsns.fanficauthors.net')
return url
################################################################################################
def doExtractChapterUrlsAndMetadata(self, get_cover=True):

View file

@ -66,8 +66,7 @@ class FicwadComSiteAdapter(BaseSiteAdapter):
params['username']))
d = self.post_request(loginUrl,params,usecache=False)
if "Login attempt failed..." in d or \
'<div id="error">Please enter your username and password.</div>' in d:
if "Login attempt failed..." in d:
logger.info("Failed to login to URL %s as %s" % (loginUrl,
params['username']))
raise exceptions.FailedToLogin(url,params['username'])

View file

@ -163,7 +163,7 @@ class KakuyomuJpAdapter(BaseSiteAdapter):
titles = []
nestingLevel = 0
newSection = False
for tocNodeRef in info[workKey]['tableOfContentsV2']:
for tocNodeRef in info[workKey]['tableOfContents']:
tocNode = info[tocNodeRef['__ref']]
if tocNode['chapter'] is not None:

View file

@ -241,7 +241,7 @@ class LiteroticaSiteAdapter(BaseSiteAdapter):
self.story.extendList('eroticatags', [ stripHTML(t).title() for t in soup.select('div#tabpanel-tags a.av_as') ])
if soup.select('div[class^="_widget__tags_"]'):
# logger.debug("tags2")
self.story.extendList('eroticatags', [ stripHTML(t).title() for t in soup.select('div[class^="_widget__tags_"] a[class^="_tag_item_"]') ])
self.story.extendList('eroticatags', [ stripHTML(t).title() for t in soup.select('div[class^="_widget__tags_"] a[class^="_tags__link_"]') ])
# logger.debug(self.story.getList('eroticatags'))
## look first for 'Series Introduction', then Info panel short desc
@ -395,7 +395,7 @@ class LiteroticaSiteAdapter(BaseSiteAdapter):
## Collect tags from series/story page if tags_from_chapters is enabled
if self.getConfig("tags_from_chapters"):
self.story.extendList('eroticatags', [ unicode(t['tag']).title() for t in chap['tags'] ])
self.story.extendList('eroticatags', [ stripHTML(t['tag']).title() for t in chap['tags'] ])
except Exception as e:

View file

@ -197,20 +197,33 @@ class ScribbleHubComAdapter(BaseSiteAdapter): # XXX
# Get the contents list from scribblehub, iterate through and add to chapters
# Can be fairly certain this will not 404 - we know the story id is valid
contents_payload = {"action": "wi_getreleases_pagination",
"pagenum": -1,
"mypostid": self.story.getMetadata('storyId')}
contents_payload = {"action": "wi_gettocchp",
"strSID": self.story.getMetadata('storyId'),
"strmypostid": 0,
"strFic": "yes"}
# 14/12/22 - Looks like it should follow this format now (below), but still returns a 400
# but not a 403. tested in browser getting rid of all other cookies to try and get a 400 and nopes.
# contents_payload = {"action": "wi_getreleases_pagination",
# "pagenum": 1,
# "mypostid": 421879}
# contents_payload = "action=wi_getreleases_pagination&pagenum=1&mypostid=421879"
contents_data = self.post_request("https://www.scribblehub.com/wp-admin/admin-ajax.php", contents_payload)
# logger.debug(contents_data)
contents_soup = self.make_soup(contents_data)
for toca in contents_soup.select('a.toc_a'):
chapter_url = toca['href']
chapter_name = stripHTML(toca)
# logger.debug("Found Chapter: " + chapter_name + ", url: " + chapter_url)
for i in range(1, int(contents_soup.find('ol',{'id':'ol_toc'}).get('count')) + 1):
chapter_url = contents_soup.find('li',{'cnt':str(i)}).find('a').get('href')
chapter_name = contents_soup.find('li',{'cnt':str(i)}).find('a').get('title')
# logger.debug("Found Chapter " + str(i) + ", name: " + chapter_name + ", url: " + chapter_url)
self.add_chapter(chapter_name, chapter_url)
# eFiction sites don't help us out a lot with their meta data
# formating, so it's a little ugly.
# utility method
def defaultGetattr(d,k):
try:

View file

@ -491,7 +491,6 @@ Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor
desc = '<div><p>The Great Test Series of '+self.getSiteDomain()+'!</p><p>Now with two lines!</p></div>'
return {'name':'The Great Test',
'desc':desc,
'status':'AStatus',
'urllist':['http://'+self.getSiteDomain()+'?sid=1',
'http://'+self.getSiteDomain()+'?sid=2',
'http://'+self.getSiteDomain()+'?sid=3',

View file

@ -782,7 +782,7 @@ try to download.</p>
(img['src'],longdesc)=self.story.addImgUrl(url,self.img_url_trans(img['src']),fetch,
coverexclusion=self.getConfig('cover_exclusion_regexp'))
if longdesc:
# logger.debug("---set longdesc:%s"%longdesc)
logger.debug("---set longdesc:%s"%longdesc)
img['longdesc'] = longdesc
except AttributeError as ae:
logger.info("Parsing for img tags failed--probably poor input HTML. Skipping img(%s)"%img)
@ -833,9 +833,7 @@ try to download.</p>
## handle identifiers that otherwise appear to be
## selectors themselves. #966
try:
# logger.debug("Search for internal link anchor href:(%s)"%href)
if href[0] == "#" and soup.select_one("[id='%s'], [name='%s']"%(href[1:],href[1:])):
# logger.debug("Found internal link anchor href:(%s)"%href)
if href[0] == "#" and soup.select_one("[id='%s']"%href[1:]):
hrefurl = href
except Exception as e:
logger.debug("Search for internal link anchor failed href:(%s)"%href)

View file

@ -22,7 +22,6 @@ from .base_browsercache import BaseBrowserCache, CACHE_DIR_CONFIG
from .browsercache_simple import SimpleCache
from .browsercache_blockfile import BlockfileCache
from .browsercache_firefox2 import FirefoxCache2
from .browsercache_sqldb import SqldbCache
import logging
logger = logging.getLogger(__name__)
@ -35,13 +34,12 @@ class BrowserCache(object):
def __init__(self, site, getConfig_fn, getConfigList_fn):
"""Constructor for BrowserCache"""
# import of child classes have to be inside the def to avoid circular import error
for browser_cache_class in [SimpleCache, BlockfileCache, FirefoxCache2, SqldbCache]:
for browser_cache_class in [SimpleCache, BlockfileCache, FirefoxCache2]:
self.browser_cache_impl = browser_cache_class.new_browser_cache(site,
getConfig_fn,
getConfigList_fn)
if self.browser_cache_impl is not None:
break
logger.debug("Not using Browser Cache Class %s"%browser_cache_class)
if self.browser_cache_impl is None:
raise BrowserCacheException("%s is not set, or directory does not contain a known browser cache type: '%s'"%
(CACHE_DIR_CONFIG,getConfig_fn(CACHE_DIR_CONFIG)))

View file

@ -90,23 +90,18 @@ class BlockfileCache(BaseChromiumCache):
def is_cache_dir(cache_dir):
"""Return True only if a directory is a valid Cache for this class"""
if not os.path.isdir(cache_dir):
logger.debug("Cache dir not found")
return False
index_path = os.path.join(cache_dir, "index")
if not os.path.isfile(index_path):
logger.debug("index file not found")
return False
with share_open(index_path, 'rb') as index_file:
if struct.unpack('I', index_file.read(4))[0] != INDEX_MAGIC_NUMBER:
logger.debug("index file failed magic number check")
return False
data0_path = os.path.join(cache_dir, "data_0")
if not os.path.isfile(data0_path):
logger.debug("data_0 file not found")
return False
with share_open(data0_path, 'rb') as data0_file:
if struct.unpack('I', data0_file.read(4))[0] != BLOCK_MAGIC_NUMBER:
logger.debug("data_0 failed magic number check")
return False
return True

View file

@ -68,7 +68,6 @@ class FirefoxCache2(BaseBrowserCache):
"""Return True only if a directory is a valid Cache for this class"""
# logger.debug("\n\n1Starting cache check\n\n")
if not os.path.isdir(cache_dir):
logger.debug("Cache dir not found")
return False
## check at least one entry file exists.
for en_fl in glob.iglob(os.path.join(cache_dir, 'entries', '????????????????????????????????????????')):
@ -76,7 +75,6 @@ class FirefoxCache2(BaseBrowserCache):
k = _validate_entry_file(en_fl)
if k is not None:
return True
logger.debug("No valid cache files found")
return False
def make_keys(self,url):

View file

@ -76,19 +76,15 @@ class SimpleCache(BaseChromiumCache):
def is_cache_dir(cache_dir):
"""Return True only if a directory is a valid Cache for this class"""
if not os.path.isdir(cache_dir):
logger.debug("Cache dir not found")
return False
index_file = os.path.join(cache_dir, "index")
if not os.path.isfile(index_file) or os.path.getsize(index_file) > 24:
logger.debug("index file not found or too big(%s)"%os.path.getsize(index_file))
if not (os.path.isfile(index_file) and os.path.getsize(index_file) == 24):
return False
real_index_file = os.path.join(cache_dir, "index-dir", "the-real-index")
if not os.path.isfile(real_index_file):
logger.debug("real_index_file not found")
return False
with share_open(real_index_file, 'rb') as index_file:
if struct.unpack('QQ', index_file.read(16))[1] != THE_REAL_INDEX_MAGIC_NUMBER:
logger.debug("real_index_file failed magic number check")
return False
try:
# logger.debug("\n\nStarting cache check\n\n")
@ -96,11 +92,9 @@ class SimpleCache(BaseChromiumCache):
k = _validate_entry_file(en_fl)
if k is not None:
return True
except SimpleCacheException as sce:
except SimpleCacheException:
# raise
logger.debug(sce)
return False
logger.debug("No valid cache files found")
return False
def get_data_key_impl(self, url, key):

View file

@ -1,185 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2026 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import os
import apsw
import ctypes
# note share_open (on windows CLI) is implicitly readonly.
from .share_open import share_open
from .base_chromium import BaseChromiumCache
from .chromagnon import SuperFastHash
import logging
logger = logging.getLogger(__name__)
class SqldbCache(BaseChromiumCache):
"""Class to access data stream in Chrome Disk Sqldb Cache format cache files"""
def __init__(self, *args, **kargs):
"""Constructor for SqldbCache"""
super(SqldbCache,self).__init__(*args, **kargs)
logger.debug("Using SqldbCache")
# def scan_cache_keys(self):
## XXX will impl a scan if and when needed. It's a lot easier
## to peek inside an sqlite
@staticmethod
def is_cache_dir(cache_dir):
"""Return True only if a directory is a valid Cache for this class"""
if not os.path.isdir(cache_dir):
logger.debug("Cache dir not found")
return False
index_path = os.path.join(cache_dir, "index")
if not os.path.isfile(index_path):
logger.debug("index file not found")
return False
sqldb0_path = os.path.join(cache_dir, "sqldb0")
if not os.path.isfile(sqldb0_path):
logger.debug("sqldb0 file not found")
return False
## XXX check schema of db?
return True
## XXX others uses share_open() - will sqlite open work concurrently?
def get_data_key_impl(self, url, key):
"""
returns location, entry age(unix epoch), content-encoding and
raw(compressed) data
"""
location, age, encoding, data = '', None, None, None
qstr = 'SELECT last_used, head, blob FROM resources as r join blobs as b on b.res_id=r.res_id where cache_key_hash=?'
cache_key_hash = _key_hash(key)
logger.debug(" key:%s"%key)
logger.debug("cache_key_hash:%s"%cache_key_hash)
## XXX worth optimizing to keep sql conn open?
from ..six.moves.urllib.request import pathname2url
fileuri = os.path.join(self.cache_dir, "sqldb0")# pathname2url()
logger.debug(fileuri)
shareopenVFS = ShareOpenVFS()
logger.debug("VFS available %s"% apsw.vfs_names())
with apsw.Connection("file:"+fileuri+"?immutable=1",
flags=apsw.SQLITE_OPEN_READONLY | apsw.SQLITE_OPEN_URI,
vfs=shareopenVFS.vfs_name
) as db:
logger.debug("db flags:%xd"%db.open_flags)
logger.debug("db vfs:%s"%db.open_vfs)
for last, head, blob in db.execute(qstr,[cache_key_hash]):
row_age = self.make_age(last)
if age and row_age < age:
logger.debug("skipping an older row for same hash")
break
age = row_age
logger.debug("age from last_used:%s"%age)
## cheesy way to pull out the http headers, inspired
## by equal cheese in chromagnon/cacheData.py. Only
## actually care about location &content-encoding,
## ignore the rest.
head = head[head.index(b'HTTP'):]
head = head[:head.index(b'\x00\x00')]
# logger.debug(head)
for line in head.split(b'\0'):
logger.debug(line)
if b'content-encoding' in line.lower():
encoding = line.split(b':')[1].strip().lower()
logger.debug("encoding from header:%s"%encoding)
if b'location' in line.lower():
location = b':'.join(line.split(b':')[1:]).strip()
logger.debug("location from header:%s"%encoding)
## XXX might need entry age from header, too.
## Hoping db last_used is equiv.
data = blob
if data:
return (location, age, encoding, data)
else:
return None
## calculate SuperFashHash, but the sql saved it signed.
def _key_hash(key):
unsigned_hash = SuperFastHash.superFastHash(key)
number = unsigned_hash & 0xFFFFFFFF
return ctypes.c_int32(number).value
class ShareOpenVFS(apsw.VFS):
def __init__(self):
self.vfs_name = 'shareopen'
super().__init__(name=self.vfs_name, base='')
def xAccess(self, pathname, flags):
return True
def xFullPathname(self, filename):
return filename
def xDelete(self, filename, syncdir):
logger.debug("xDelete NOT DELETING")
pass
def xOpen(self, name, flags):
return ShareOpenVFSFile(name, flags)
class ShareOpenVFSFile:
def __init__(self, name, flags):
self.filename = name.filename() if isinstance(name, apsw.URIFilename) else name
self.filename = os.path.normpath(self.filename)
logger.debug("Doing share open(%s)"%self.filename)
self.file = share_open(self.filename, 'rb')
def xRead(self, amount, offset):
self.file.seek(offset, 0)
return self.file.read(amount)
def xFileSize(self):
return os.stat(self.filename).st_size
def xClose(self):
self.file.close()
def xSectorSize(self):
return 0
def xFileControl(self, *args):
return False
def xCheckReservedLock(self):
return False
def xLock(self, level):
pass
def xUnlock(self, level):
pass
def xSync(self, flags):
return True
def xTruncate(self, newsize):
logger.debug("xTruncate NOT TRUNCING")
pass
def xWrite(self, data, offset):
logger.debug("xWrite NOT WRITING")
pass

View file

@ -27,7 +27,8 @@ import pprint
import string
import os, sys, platform
version="4.57.7"
version="4.55.0"
os.environ['CURRENT_VERSION_ID']=version
global_cache = 'global_cache'
@ -50,8 +51,6 @@ from fanficfare.geturls import get_urls_from_page, get_urls_from_imap
from fanficfare.six.moves import configparser
from fanficfare.six import text_type as unicode
from fanficfare.fff_profile import do_cprofile
def write_story(config, adapter, writeformat,
metaonly=False, nooutput=False,
outstream=None):
@ -347,7 +346,6 @@ def main(argv=None,
dispatch(options, urls, passed_defaultsini, passed_personalini, warn, fail)
# make rest a function and loop on it.
@do_cprofile
def do_download(arg,
options,
passed_defaultsini,

View file

@ -526,6 +526,7 @@ def get_immutable_entries():
return list([
'authorId',
'authorUrl',
'seriesUrl',
'storyId',
'storyUrl',
'langcode',
@ -606,9 +607,6 @@ class Configuration(ConfigParser):
self.url_config_set = False
## to improve performance, cache config values.
self.cached_config = {}
def section_url_names(self,domain,section_url_f):
## domain is passed as a method to limit the damage if/when an
## adapter screws up _section_url
@ -686,10 +684,6 @@ class Configuration(ConfigParser):
return self.get_config(self.sectionslist,key,default)
def get_config(self, sections, key, default=""):
try:
return self.cached_config[(tuple(sections),key)]
except KeyError as ke:
pass
val = default
val_files = []
@ -734,7 +728,6 @@ class Configuration(ConfigParser):
except (configparser.NoOptionError, configparser.NoSectionError) as e:
pass
self.cached_config[(tuple(sections),key)] = val
return val
# split and strip each.

View file

@ -1712,13 +1712,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
## hardcoded to include the site specific metadata freeformtags &
## ao3categories in the standard metadata field genre. By making it
## configurable, users can change it.
include_in_genre: genre, freeformtags, ao3categories
include_in_genre: freeformtags, ao3categories
## AO3 uses the word 'category' differently than most sites. The
## adapter used to be hardcoded to include the site specific metadata
## fandom in the standard metadata field category. By making it
## configurable, users can change it.
include_in_category:category,fandoms
include_in_category:fandoms
## freeformtags was previously typo'ed as freefromtags. This way,
## freefromtags will still work for people who've used it.
@ -1927,7 +1927,7 @@ make_linkhtml_entries:translators,betas
## For most sites, 'category' is the fandom, but fanfics.me has
## fandoms and a separate category. By making it configurable, users
## can change it.
include_in_category:category,fandoms
include_in_category:fandoms
[fanfictalk.com]
use_basic_cache:true
@ -2703,13 +2703,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
## hardcoded to include the site specific metadata freeformtags &
## ao3categories in the standard metadata field genre. By making it
## configurable, users can change it.
include_in_genre: genre, freeformtags, ao3categories
include_in_genre: freeformtags, ao3categories
## OTW uses the word 'category' differently than most sites. The
## adapter used to be hardcoded to include the site specific metadata
## fandom in the standard metadata field category. By making it
## configurable, users can change it.
include_in_category:category,fandoms
include_in_category:fandoms
## freeformtags was previously typo'ed as freefromtags. This way,
## freefromtags will still work for people who've used it.
@ -3010,13 +3010,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
## hardcoded to include the site specific metadata freeformtags &
## ao3categories in the standard metadata field genre. By making it
## configurable, users can change it.
include_in_genre: genre, freeformtags, ao3categories
include_in_genre: freeformtags, ao3categories
## OTW uses the word 'category' differently than most sites. The
## adapter used to be hardcoded to include the site specific metadata
## fandom in the standard metadata field category. By making it
## configurable, users can change it.
include_in_category:category,fandoms
include_in_category:fandoms
## freeformtags was previously typo'ed as freefromtags. This way,
## freefromtags will still work for people who've used it.
@ -3145,8 +3145,8 @@ bookmarkmemo_label:ブックマークメモ
bookmarkprivate_label:非公開ブックマーク
subscribed_label:更新通知
include_in_genre: genre, fullgenre
#include_in_genre: genre, biggenre, smallgenre
include_in_genre: fullgenre
#include_in_genre: biggenre, smallgenre
## adds to titlepage_entries instead of replacing it.
#extra_titlepage_entries: fullgenre,biggenre,smallgenre,imprint,freeformtags,comments,reviews,bookmarks,ratingpoints,overallpoints,bookmarked,bookmarkcategory,bookmarkmemo,bookmarkprivate,subscribed
@ -3389,13 +3389,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
## hardcoded to include the site specific metadata freeformtags &
## ao3categories in the standard metadata field genre. By making it
## configurable, users can change it.
include_in_genre: genre, freeformtags, ao3categories
include_in_genre: freeformtags, ao3categories
## OTW uses the word 'category' differently than most sites. The
## adapter used to be hardcoded to include the site specific metadata
## fandom in the standard metadata field category. By making it
## configurable, users can change it.
include_in_category:category,fandoms
include_in_category:fandoms
## freeformtags was previously typo'ed as freefromtags. This way,
## freefromtags will still work for people who've used it.
@ -3526,7 +3526,7 @@ upvotes_label:Upvotes
subscribers_label:Subscribers
views_label:Views
include_in_category:category,tags
include_in_category:tags
#extra_titlepage_entries:upvotes,subscribers,views
@ -3662,13 +3662,13 @@ make_linkhtml_entries:series00,series01,series02,series03,collections
## hardcoded to include the site specific metadata freeformtags &
## ao3categories in the standard metadata field genre. By making it
## configurable, users can change it.
include_in_genre: genre, freeformtags, ao3categories
include_in_genre: freeformtags, ao3categories
## OTW uses the word 'category' differently than most sites. The
## adapter used to be hardcoded to include the site specific metadata
## fandom in the standard metadata field category. By making it
## configurable, users can change it.
include_in_category:category,fandoms
include_in_category:fandoms
## freeformtags was previously typo'ed as freefromtags. This way,
## freefromtags will still work for people who've used it.

View file

@ -22,7 +22,25 @@ from io import BytesIO
FONT_EXTS = ('ttf','otf','woff','woff2')
from fanficfare.fff_profile import do_cprofile
# from io import StringIO
# import cProfile, pstats
# from pstats import SortKey
# def do_cprofile(func):
# def profiled_func(*args, **kwargs):
# profile = cProfile.Profile()
# try:
# profile.enable()
# result = func(*args, **kwargs)
# profile.disable()
# return result
# finally:
# # profile.sort_stats(SortKey.CUMULATIVE).print_stats(20)
# s = StringIO()
# sortby = SortKey.CUMULATIVE
# ps = pstats.Stats(profile, stream=s).sort_stats(sortby)
# ps.print_stats(20)
# print(s.getvalue())
# return profiled_func
import bs4
@ -33,52 +51,9 @@ def get_dcsource_chaptercount(inputio):
## getsoups=True to check for continue_on_chapter_error chapters.
return get_update_data(inputio,getfilecount=True,getsoups=True)[:2] # (source,filecount)
## only finds and returns cover image type and data, not cover page.
## should work on any epub. Added for anthology cover issues.
def get_cover_img(inputio):
# (oldcoverimgtype,oldcoverimgdata)
epub = ZipFile(inputio, 'r') # works equally well with inputio as a path or a blob
## Find the .opf file.
container = epub.read("META-INF/container.xml")
containerdom = parseString(container)
rootfilenodelist = containerdom.getElementsByTagName("rootfile")
rootfilename = rootfilenodelist[0].getAttribute("full-path")
contentdom = parseString(epub.read(rootfilename))
firstmetadom = contentdom.getElementsByTagName("metadata")[0]
## Save the path to the .opf file--hrefs inside it are relative to it.
relpath = get_path_part(rootfilename)
# logger.debug("relpath:%s"%relpath)
# <meta name="cover" content="cover"/>
coverid = None
covertype = None
coverdata = None
for metatag in firstmetadom.getElementsByTagName("meta"):
if metatag.getAttribute('name') == 'cover':
coverid = metatag.getAttribute('content')
# logger.debug("coverid:%s"%coverid)
break
if coverid:
for item in contentdom.getElementsByTagName("item"):
if item.getAttribute('id') == coverid:
coverhref = relpath+item.getAttribute("href")
## remove .. and the part it obviates
coverhref = re.sub(r"([^/]+/\.\./)","",coverhref)
covertype = item.getAttribute('media-type')
# logger.debug("covertype:%s coverhref:%s"%(covertype,coverhref))
try:
coverdata = epub.read(coverhref)
# logger.debug("coverdatalen:%s"%len(coverdata))
except Exception as e:
logger.info("Failed to read cover (%s): %s"%(coverhref,e))
covertype, coverdata = None, None
break
return covertype, coverdata
def get_cover_data(inputio):
# (oldcoverhtmlhref,oldcoverhtmltype,oldcoverhtmldata,oldcoverimghref,oldcoverimgtype,oldcoverimgdata)
return get_update_data(inputio,getfilecount=True,getsoups=False)[4]
def get_oldcover(epub,relpath,contentdom,item):
href=relpath+item.getAttribute("href")
@ -329,10 +304,6 @@ def get_update_data(inputio,
for item in contentdom.getElementsByTagName("item"):
href=relpath+item.getAttribute("href")
if item.getAttribute("media-type").startswith("image/") and getsoups:
if oldcover and href == oldcover[3]:
# don't include cover image, already handled by
# oldcover code and can trip de-dup unintentionally.
continue
img_url = href.replace("OEBPS/","")
# logger.debug("-->img img:%s"%img_url)
if img_url not in images:
@ -432,7 +403,7 @@ def get_story_url_from_zip_html(inputio,_is_good_url=None):
return ahref
return None
@do_cprofile
# @do_cprofile
def reset_orig_chapters_epub(inputio,outfile):
inputepub = ZipFile(inputio, 'r') # works equally well with a path or a blob
@ -485,50 +456,28 @@ def reset_orig_chapters_epub(inputio,outfile):
if re.match(r'.*/file\d+\.xhtml',zf):
#logger.debug("zf:%s"%zf)
data = data.decode('utf-8')
# should be re-reading an FFF file, single soup should
# be good enough and halve processing time.
soup = make_soup(data,dblsoup=False)
## For higher performance checking, don't need to
## make_soup if not different
header = data[0:data.find("</head>")]
'''
<meta name="chapterorigtitle" content="8. Chapter 7" />
<meta name="chaptertoctitle" content="8. Chapter 7" />
<meta name="chaptertitle" content="8. (new) Chapter 7" />
'''
# logger.debug(header)
def get_meta_content(n,d):
m = re.match(r'.*<meta( name="%s"| content="(?P<found>[^"]+))+".*'%n,d,re.DOTALL)
if m:
# logger.debug("%s -> %s"%(n,m.groupdict().get('found',None)))
return m.groupdict().get('found',None)
chapterorigtitle = None
tag = soup.find('meta',{'name':'chapterorigtitle'})
if tag:
chapterorigtitle = tag['content']
chapterorigtitle = get_meta_content('chapterorigtitle',header)
chaptertoctitle =get_meta_content('chaptertoctitle',header)
chaptertitle = get_meta_content('chaptertitle',header)
# toctitle is separate for add_chapter_numbers:toconly users.
chaptertoctitle = None
tag = soup.find('meta',{'name':'chaptertoctitle'})
if tag:
chaptertoctitle = tag['content']
else:
chaptertoctitle = chapterorigtitle
if not (chapterorigtitle and chaptertoctitle and chaptertitle \
and chapterorigtitle == chaptertitle):
# should be re-reading an FFF file, single soup should
# be good enough and halve processing time.
soup = make_soup(data,dblsoup=False)
chapterorigtitle = None
tag = soup.find('meta',{'name':'chapterorigtitle'})
if tag:
chapterorigtitle = tag['content']
# toctitle is separate for add_chapter_numbers:toconly users.
chaptertoctitle = None
tag = soup.find('meta',{'name':'chaptertoctitle'})
if tag:
chaptertoctitle = tag['content']
else:
chaptertoctitle = chapterorigtitle
chaptertitle = None
tag = soup.find('meta',{'name':'chaptertitle'})
if tag:
chaptertitle = tag['content']
chaptertitle_tag = tag
chaptertitle = None
tag = soup.find('meta',{'name':'chaptertitle'})
if tag:
chaptertitle = tag['content']
chaptertitle_tag = tag
#logger.debug("chaptertitle:(%s) chapterorigtitle:(%s)"%(chaptertitle, chapterorigtitle))
if chaptertitle and chapterorigtitle and chapterorigtitle != chaptertitle:

View file

@ -148,12 +148,3 @@ class HTTPErrorFFF(Exception):
class BrowserCacheException(Exception):
pass
class NotGoingToDownload(Exception):
def __init__(self,error,icon='dialog_error.png',showerror=True):
self.error=error
self.icon=icon
self.showerror=showerror
def __str__(self):
return self.error

View file

@ -1,44 +0,0 @@
# Copyright 2026 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
## not compatibly with py2, SortKey not available.
import sys
DO_PROFILING = False
if DO_PROFILING and sys.version_info >= (3, 7):
from io import StringIO
import cProfile, pstats
from pstats import SortKey
def do_cprofile(func):
def profiled_func(*args, **kwargs):
profile = cProfile.Profile()
try:
profile.enable()
result = func(*args, **kwargs)
profile.disable()
return result
finally:
# profile.print_stats()
s = StringIO()
sortby = SortKey.CUMULATIVE
ps = pstats.Stats(profile, stream=s).sort_stats(sortby)
ps.print_stats(20)
print(s.getvalue())
return profiled_func
else:
## no-nothing for py2
def do_cprofile(func):
def profiled_func(*args, **kwargs):
return func(*args, **kwargs)
return profiled_func

View file

@ -658,7 +658,7 @@ class ImageStore:
if failure:
info['newsrc'] = 'failedtoload'
info['actuallyused'] = False
# logger.debug("add_img(%s,%s,%s,%s,%s,used:%s)"%(url,ext,mime,uuid,info['newsrc'],info['actuallyused']))
logger.debug("add_img(%s,%s,%s,%s,%s,used:%s)"%(url,ext,mime,uuid,info['newsrc'],info['actuallyused']))
return info
def cache_failed_url(self,url):
@ -1639,7 +1639,7 @@ class Story(Requestable):
## likely changed to jpg.
(src,data)=oldimgs[url]
ext = src.split('.')[-1]
# logger.debug("load_oldimgs:(%s,%s,%s)"%(url,ext,imagetypes[ext]))
logger.debug("load_oldimgs:(%s,%s,%s)"%(url,ext,imagetypes[ext]))
self.img_store.add_img(url,
ext,
imagetypes[ext],
@ -1746,7 +1746,7 @@ class Story(Requestable):
(data,ext,mime) = no_convert_image(imgurl,
imgdata)
else:
# logger.debug("Doing image processing on (%s)"%imgurl)
logger.debug("Doing image processing on (%s)"%imgurl)
try:
sizes = [ int(x) for x in self.getConfigList('image_max_size',['580', '725']) ]
except Exception as e:

View file

@ -16,7 +16,7 @@ name = "FanFicFare" # Required
#
# For a discussion on single-sourcing the version, see
# https://packaging.python.org/guides/single-sourcing-package-version/
version = "4.57.7"
version = "4.55.0"
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field: