mirror of
https://github.com/JimmXinu/FanFicFare.git
synced 2025-12-22 08:44:13 +01:00
fix deprecation warnings for logger.warn()
The correct function since 2003 has been .warning(), and .warn() is a compat wrapper over it. It wasn't documented until https://bugs.python.org/issue13235 added documentation stating it exists but is obsolete. Nevertheless, the whole world is full of code that mysteriously uses it anyway. Let's at least remove it here, though.
This commit is contained in:
parent
dda3c591b6
commit
d226f4791f
18 changed files with 38 additions and 38 deletions
|
|
@ -442,7 +442,7 @@ class ArchiveOfOurOwnOrgAdapter(BaseSiteAdapter):
|
|||
## sanity check just in case.
|
||||
self.use_full_work_soup = False
|
||||
self.full_work_soup = None
|
||||
logger.warn("chapter count in view_full_work(%s) disagrees with num of chapters(%s)--ending use_view_full_work"%(len(self.full_work_chapters),self.num_chapters()))
|
||||
logger.warning("chapter count in view_full_work(%s) disagrees with num of chapters(%s)--ending use_view_full_work"%(len(self.full_work_chapters),self.num_chapters()))
|
||||
whole_dl_soup = self.full_work_soup
|
||||
|
||||
if whole_dl_soup:
|
||||
|
|
|
|||
|
|
@ -331,7 +331,7 @@ class FanFicsMeAdapter(BaseSiteAdapter):
|
|||
chapter_div = whole_dl_soup.find('div',{'id':'c%s'%(index)})
|
||||
if not chapter_div:
|
||||
self.use_full_work_soup = False
|
||||
logger.warn("c%s not found in view_full_work--ending use_view_full_work"%(index))
|
||||
logger.warning("c%s not found in view_full_work--ending use_view_full_work"%(index))
|
||||
if chapter_div == None:
|
||||
whole_dl_soup = self.make_soup(self._fetchUrl(url))
|
||||
chapter_div = whole_dl_soup.find('div',{'id':'c%s'%(index)})
|
||||
|
|
|
|||
|
|
@ -143,7 +143,7 @@ class FanFictionNetSiteAdapter(BaseSiteAdapter):
|
|||
if e.code == 503:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logger.warn("Caught an exception reading URL: %s Exception %s."%(unicode(url),unicode(e)))
|
||||
logger.warning("Caught an exception reading URL: %s Exception %s."%(unicode(url),unicode(e)))
|
||||
pass
|
||||
|
||||
# Find authorid and URL from... author url.
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ except ImportError:
|
|||
# A version of feedparser is available in the Calibre plugin version
|
||||
from calibre.web.feeds import feedparser
|
||||
except ImportError:
|
||||
# logger.warn('No version of feedparser module available, falling back to naive published and updated date')
|
||||
# logger.warning('No version of feedparser module available, falling back to naive published and updated date')
|
||||
feedparser = None
|
||||
|
||||
# py2 vs py3 transition
|
||||
|
|
|
|||
|
|
@ -161,7 +161,7 @@ class SilmarillionWritersGuildOrgAdapter(BaseSiteAdapter):
|
|||
# pages after story found.
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.warn("series parsing failed(%s)"%e)
|
||||
logger.warning("series parsing failed(%s)"%e)
|
||||
pass
|
||||
|
||||
# Find the chapters by regexing urls
|
||||
|
|
@ -187,7 +187,7 @@ class SilmarillionWritersGuildOrgAdapter(BaseSiteAdapter):
|
|||
self.story.addToList('characters',char.string)
|
||||
|
||||
except Exception as e:
|
||||
logger.warn("character parsing failed(%s)"%e)
|
||||
logger.warning("character parsing failed(%s)"%e)
|
||||
|
||||
#get warnings
|
||||
try:
|
||||
|
|
@ -196,7 +196,7 @@ class SilmarillionWritersGuildOrgAdapter(BaseSiteAdapter):
|
|||
self.story.addToList('warnings', warn.string)
|
||||
|
||||
except Exception as e:
|
||||
logger.warn("warning parsing failed(%s)"%e)
|
||||
logger.warning("warning parsing failed(%s)"%e)
|
||||
|
||||
#get genres
|
||||
try:
|
||||
|
|
@ -205,7 +205,7 @@ class SilmarillionWritersGuildOrgAdapter(BaseSiteAdapter):
|
|||
self.story.addToList('genre', genre.string)
|
||||
|
||||
except Exception as e:
|
||||
logger.warn("genre parsing failed(%s)"%e)
|
||||
logger.warning("genre parsing failed(%s)"%e)
|
||||
|
||||
# no convenient way to extract remaining metadata so bodge it by finding relevant identifier string and using next element as the data source
|
||||
|
||||
|
|
@ -220,7 +220,7 @@ class SilmarillionWritersGuildOrgAdapter(BaseSiteAdapter):
|
|||
#logger.debug(summaryValue)
|
||||
self.setDescription(url,summaryValue)
|
||||
except Exception as e:
|
||||
logger.warn("summary parsing failed(%s) -- This can be caused by bad HTML in story description."%e)
|
||||
logger.warning("summary parsing failed(%s) -- This can be caused by bad HTML in story description."%e)
|
||||
|
||||
|
||||
#get rating
|
||||
|
|
@ -228,7 +228,7 @@ class SilmarillionWritersGuildOrgAdapter(BaseSiteAdapter):
|
|||
rating = workDetails.find('strong',text='Rated:').next_sibling.string
|
||||
self.story.setMetadata('rating', rating)
|
||||
except Exception as e:
|
||||
logger.warn("rating parsing failed(%s) -- This can be caused by bad HTML in story description."%e)
|
||||
logger.warning("rating parsing failed(%s) -- This can be caused by bad HTML in story description."%e)
|
||||
|
||||
#get completion status and correct for consistency with other adapters
|
||||
try:
|
||||
|
|
@ -240,14 +240,14 @@ class SilmarillionWritersGuildOrgAdapter(BaseSiteAdapter):
|
|||
|
||||
self.story.setMetadata('status', status)
|
||||
except Exception as e:
|
||||
logger.warn("status parsing failed(%s) -- This can be caused by bad HTML in story description."%e)
|
||||
logger.warning("status parsing failed(%s) -- This can be caused by bad HTML in story description."%e)
|
||||
|
||||
#get wordcount
|
||||
try:
|
||||
wordCount = workDetails.find('strong',text='Word count:').next_sibling.string
|
||||
self.story.setMetadata('numWords', wordCount)
|
||||
except Exception as e:
|
||||
logger.warn("wordcount parsing failed(%s) -- This can be caused by bad HTML in story description."%e)
|
||||
logger.warning("wordcount parsing failed(%s) -- This can be caused by bad HTML in story description."%e)
|
||||
|
||||
#get published date, this works for some reason yet doesn't without the spaces in it
|
||||
try:
|
||||
|
|
@ -255,7 +255,7 @@ class SilmarillionWritersGuildOrgAdapter(BaseSiteAdapter):
|
|||
self.story.setMetadata('datePublished', makeDate(datePublished, self.dateformat))
|
||||
|
||||
except Exception as e:
|
||||
logger.warn("datePublished parsing failed(%s) -- This can be caused by bad HTML in story description."%e)
|
||||
logger.warning("datePublished parsing failed(%s) -- This can be caused by bad HTML in story description."%e)
|
||||
|
||||
#get updated date
|
||||
try:
|
||||
|
|
@ -263,7 +263,7 @@ class SilmarillionWritersGuildOrgAdapter(BaseSiteAdapter):
|
|||
self.story.setMetadata('dateUpdated', makeDate(dateUpdated, self.dateformat))
|
||||
|
||||
except Exception as e:
|
||||
logger.warn("dateUpdated parsing failed(%s) -- This can be caused by bad HTML in story description."%e)
|
||||
logger.warning("dateUpdated parsing failed(%s) -- This can be caused by bad HTML in story description."%e)
|
||||
|
||||
# grab the text for an individual chapter.
|
||||
def getChapterText(self, url):
|
||||
|
|
|
|||
|
|
@ -131,7 +131,7 @@ class StoriesOfArdaComAdapter(BaseSiteAdapter):
|
|||
self.story.setMetadata('status', td[2].string.split(': ')[1])
|
||||
self.story.setMetadata('datePublished', makeDate(stripHTML(td[4]).split(': ')[1], self.dateformat))
|
||||
except Exception as e:
|
||||
logger.warn("rating, status and/or datePublished parsing failed(%s) -- This can be caused by bad HTML in story description."%e)
|
||||
logger.warning("rating, status and/or datePublished parsing failed(%s) -- This can be caused by bad HTML in story description."%e)
|
||||
|
||||
# grab the text for an individual chapter.
|
||||
def getChapterText(self, url):
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ class TestSiteAdapter(BaseSiteAdapter):
|
|||
self.do_sleep()
|
||||
|
||||
if idnum >= 1000:
|
||||
logger.warn("storyId:%s - Custom INI data will be used."%idstr)
|
||||
logger.warning("storyId:%s - Custom INI data will be used."%idstr)
|
||||
|
||||
sections = ['teststory:%s'%idstr,'teststory:defaults']
|
||||
#print("self.get_config_list(sections,'valid_entries'):%s"%self.get_config_list(sections,'valid_entries'))
|
||||
|
|
@ -98,7 +98,7 @@ class TestSiteAdapter(BaseSiteAdapter):
|
|||
idnum = int(idstr)
|
||||
|
||||
if idstr == '665' and not (self.is_adult or self.getConfig("is_adult")):
|
||||
logger.warn("self.is_adult:%s"%self.is_adult)
|
||||
logger.warning("self.is_adult:%s"%self.is_adult)
|
||||
raise exceptions.AdultCheckRequired(self.url)
|
||||
|
||||
if idstr == '666':
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ class WattpadComAdapter(BaseSiteAdapter):
|
|||
try:
|
||||
WattpadComAdapter.CATEGORY_DEFs = json.loads(self._fetchUrl(WattpadComAdapter.API_GETCATEGORIES))
|
||||
except:
|
||||
logger.warn('API_GETCATEGORIES failed.')
|
||||
logger.warning('API_GETCATEGORIES failed.')
|
||||
WattpadComAdapter.CATEGORY_DEFs = []
|
||||
|
||||
@staticmethod
|
||||
|
|
|
|||
|
|
@ -140,7 +140,7 @@ class WhoficComSiteAdapter(BaseSiteAdapter):
|
|||
# <li class="list-inline-item small"><b>Word count:</b> 6363</li>
|
||||
# </ul>
|
||||
# </div>
|
||||
# logger.warn(metadata)
|
||||
# logger.warning(metadata)
|
||||
|
||||
cat_as = metadata.find_all('a', href=re.compile(r'categories.php'))
|
||||
for cat_a in cat_as:
|
||||
|
|
@ -207,7 +207,7 @@ class WhoficComSiteAdapter(BaseSiteAdapter):
|
|||
self.setSeries(series_name, 0)
|
||||
self.story.setMetadata('seriesUrl',series_url)
|
||||
|
||||
# logger.warn(metadata) #.find_all('p')
|
||||
# logger.warning(metadata) #.find_all('p')
|
||||
|
||||
ps = metadata.find_all('p')
|
||||
|
||||
|
|
@ -230,7 +230,7 @@ class WhoficComSiteAdapter(BaseSiteAdapter):
|
|||
if chars != 'None':
|
||||
self.story.extendList('characters',chars.split(', '))
|
||||
|
||||
# logger.warn(metadata)
|
||||
# logger.warning(metadata)
|
||||
|
||||
def getChapterText(self, url):
|
||||
|
||||
|
|
|
|||
|
|
@ -174,7 +174,7 @@ class BaseXenForo2ForumAdapter(BaseXenForoForumAdapter):
|
|||
for p in posts:
|
||||
if 'sticky-container' not in p['class']:
|
||||
return p
|
||||
logger.warn("First X posts all sticky? Using first-first post.")
|
||||
logger.warning("First X posts all sticky? Using first-first post.")
|
||||
return posts[0]
|
||||
|
||||
def get_first_post_body(self,topsoup):
|
||||
|
|
@ -246,7 +246,7 @@ class BaseXenForo2ForumAdapter(BaseXenForoForumAdapter):
|
|||
# not paying any attention to TZ issues.
|
||||
return datetime.fromtimestamp(float(datetag['data-time']))
|
||||
except:
|
||||
# logger.warn('No date found in %s'%parenttag,exc_info=True)
|
||||
# logger.warning('No date found in %s'%parenttag,exc_info=True)
|
||||
return None
|
||||
|
||||
def make_reader_url(self,tmcat_num,reader_page_num):
|
||||
|
|
|
|||
|
|
@ -590,7 +590,7 @@ class Configuration(ConfigParser):
|
|||
self._sections = self._dict((section_url_f(k) if (domain in k and 'http' in k) else k, v) for k, v in six.viewitems(self._sections))
|
||||
# logger.debug(self._sections.keys())
|
||||
except Exception as e:
|
||||
logger.warn("Failed to perform section_url_names: %s"%e)
|
||||
logger.warning("Failed to perform section_url_names: %s"%e)
|
||||
|
||||
def addUrlConfigSection(self,url):
|
||||
if not self.lightweight: # don't need when just checking for normalized URL.
|
||||
|
|
@ -671,7 +671,7 @@ class Configuration(ConfigParser):
|
|||
except:
|
||||
pass
|
||||
if not file_val:
|
||||
logger.warn("All files for (%s) failed! Using (%s) instead. Filelist: (%s)"%
|
||||
logger.warning("All files for (%s) failed! Using (%s) instead. Filelist: (%s)"%
|
||||
(key+"_filelist",key,val_files))
|
||||
|
||||
if not file_val:
|
||||
|
|
@ -1040,7 +1040,7 @@ class Configuration(ConfigParser):
|
|||
try:
|
||||
return reduce_zalgo(data,max_zalgo)
|
||||
except Exception as e:
|
||||
logger.warn("reduce_zalgo failed(%s), continuing."%e)
|
||||
logger.warning("reduce_zalgo failed(%s), continuing."%e)
|
||||
return data
|
||||
|
||||
# Assumes application/x-www-form-urlencoded. parameters, headers are dict()s
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ def parse_relative_date_string(string_):
|
|||
else:
|
||||
# This is "just as wrong" as always returning the currentq
|
||||
# date, but prevents unneeded updates each time
|
||||
logger.warn('Failed to parse relative date string: %r, falling back to unix epoche', string_)
|
||||
logger.warning('Failed to parse relative date string: %r, falling back to unix epoche', string_)
|
||||
return UNIX_EPOCHE
|
||||
|
||||
kwargs = {unit: int(value)}
|
||||
|
|
|
|||
|
|
@ -82,8 +82,8 @@ def get_update_data(inputio,
|
|||
continue
|
||||
except Exception as e:
|
||||
## Calibre's Polish Book corrupts sub-book covers.
|
||||
logger.warn("Cover (x)html file %s not found"%href)
|
||||
logger.warn("Exception: %s"%(unicode(e)))
|
||||
logger.warning("Cover (x)html file %s not found"%href)
|
||||
logger.warning("Exception: %s"%(unicode(e)))
|
||||
|
||||
try:
|
||||
# remove all .. and the path part above it, if present.
|
||||
|
|
@ -98,8 +98,8 @@ def get_update_data(inputio,
|
|||
break
|
||||
oldcover = (oldcoverhtmlhref,oldcoverhtmltype,oldcoverhtmldata,oldcoverimghref,oldcoverimgtype,oldcoverimgdata)
|
||||
except Exception as e:
|
||||
logger.warn("Cover Image %s not found"%src)
|
||||
logger.warn("Exception: %s"%(unicode(e)))
|
||||
logger.warning("Cover Image %s not found"%src)
|
||||
logger.warning("Exception: %s"%(unicode(e)))
|
||||
|
||||
filecount = 0
|
||||
soups = [] # list of xhmtl blocks
|
||||
|
|
@ -142,8 +142,8 @@ def get_update_data(inputio,
|
|||
# it indicates a failed download
|
||||
# originally.
|
||||
if newsrc != u'OEBPS/failedtoload':
|
||||
logger.warn("Image %s not found!\n(originally:%s)"%(newsrc,longdesc))
|
||||
logger.warn("Exception: %s"%(unicode(e)),exc_info=True)
|
||||
logger.warning("Image %s not found!\n(originally:%s)"%(newsrc,longdesc))
|
||||
logger.warning("Exception: %s"%(unicode(e)),exc_info=True)
|
||||
bodysoup = soup.find('body')
|
||||
# ffdl epubs have chapter title h3
|
||||
h3 = bodysoup.find('h3')
|
||||
|
|
|
|||
|
|
@ -235,7 +235,7 @@ def get_urls_from_imap(srv,user,passwd,folder,markread=True):
|
|||
folders.append(m.group("folder").replace("\\",""))
|
||||
# logger.debug(folders[-1])
|
||||
else:
|
||||
logger.warn("Failed to parse IMAP folder line(%s)"%ensure_str(f))
|
||||
logger.warning("Failed to parse IMAP folder line(%s)"%ensure_str(f))
|
||||
except:
|
||||
folders = []
|
||||
logger.warning("Failed to parse IMAP folder list, continuing without list.")
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ def _unirepl(match):
|
|||
except:
|
||||
# This way, at least if there's more of entities out there
|
||||
# that fail, it doesn't blow the entire download.
|
||||
logger.warn("Numeric entity translation failed, skipping: &#x%s%s"%(match.group(1),match.group(2)))
|
||||
logger.warning("Numeric entity translation failed, skipping: &#x%s%s"%(match.group(1),match.group(2)))
|
||||
retval = ""
|
||||
return retval
|
||||
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ class HtmlProcessor:
|
|||
# TODO(chatham): Using regexes and looking for name= would be better.
|
||||
newpos = assembled_text.find(b'name="'+ensure_binary(ref)) # .encode('utf-8')
|
||||
if newpos == -1:
|
||||
logger.warn('Could not find anchor "%s"' % original_ref)
|
||||
logger.warning('Could not find anchor "%s"' % original_ref)
|
||||
continue
|
||||
# instead of somewhere slightly *after* the <a> tag pointed to,
|
||||
# let's go right in front of it instead by looking for the page
|
||||
|
|
|
|||
|
|
@ -765,7 +765,7 @@ class Story(Configurable):
|
|||
try:
|
||||
value = commaGroups(unicode(value))
|
||||
except Exception as e:
|
||||
logger.warn("Failed to add commas to %s value:(%s) exception(%s)"%(key,value,e))
|
||||
logger.warning("Failed to add commas to %s value:(%s) exception(%s)"%(key,value,e))
|
||||
if key in ("dateCreated"):
|
||||
value = value.strftime(self.getConfig(key+"_format","%Y-%m-%d %H:%M:%S"))
|
||||
if key in ("datePublished","dateUpdated"):
|
||||
|
|
|
|||
|
|
@ -195,7 +195,7 @@ class BaseStoryWriter(Configurable):
|
|||
lastupdated=self.story.getMetadataRaw('dateUpdated').date()
|
||||
fileupdated=datetime.datetime.fromtimestamp(os.stat(outfilename)[8]).date()
|
||||
if fileupdated > lastupdated:
|
||||
logger.warn("File(%s) Updated(%s) more recently than Story(%s) - Skipping" % (outfilename,fileupdated,lastupdated))
|
||||
logger.warning("File(%s) Updated(%s) more recently than Story(%s) - Skipping" % (outfilename,fileupdated,lastupdated))
|
||||
return
|
||||
if not metaonly:
|
||||
self.story = self.adapter.getStory() # get full story
|
||||
|
|
|
|||
Loading…
Reference in a new issue