1
0
Fork 0
mirror of https://github.com/kemayo/leech synced 2026-01-14 11:32:43 +01:00

Minor readability improvement: use f-strings

This commit is contained in:
David Lynch 2019-10-15 11:14:27 -05:00
parent c584988994
commit 7208cfdaaf
7 changed files with 21 additions and 21 deletions

View file

@ -76,19 +76,19 @@ class CoverOptions:
def chapter_html(story, titleprefix=None):
chapters = []
for i, chapter in enumerate(story):
title = chapter.title or '#{}'.format(i)
title = chapter.title or f'#{i}'
if hasattr(chapter, '__iter__'):
# This is a Section
chapters.extend(chapter_html(chapter, titleprefix=title))
else:
title = titleprefix and '{}: {}'.format(titleprefix, title) or title
title = titleprefix and f'{titleprefix}: {title}' or title
chapters.append((
title,
'{}/chapter{}.html'.format(story.id, i + 1),
f'{story.id}/chapter{i + 1}.html',
html_template.format(title=title, text=chapter.contents)
))
if story.footnotes:
chapters.append(("Footnotes", '{}/footnotes.html'.format(story.id), html_template.format(title="Footnotes", text='\n\n'.join(story.footnotes))))
chapters.append(("Footnotes", f'{story.id}/footnotes.html', html_template.format(title="Footnotes", text='\n\n'.join(story.footnotes))))
return chapters

View file

@ -164,14 +164,14 @@ class Site:
# epub spec footnotes are all about epub:type on the footnote and the link
# http://www.idpf.org/accessibility/guidelines/content/semantics/epub-type.php
contents.name = 'div'
contents.attrs['id'] = "footnote{}".format(idx)
contents.attrs['id'] = f'footnote{idx}'
contents.attrs['epub:type'] = 'rearnote'
# a backlink is essential for Kindle to think of this as a footnote
# otherwise it doesn't get the inline-popup treatment
# http://kindlegen.s3.amazonaws.com/AmazonKindlePublishingGuidelines.pdf
# section 3.9.10
backlink = self._new_tag('a', href="chapter{}.html#noteback{}".format(chapterid, idx))
backlink = self._new_tag('a', href=f'chapter{chapterid}.html#noteback{idx}')
backlink.string = '^'
contents.insert(0, backlink)
@ -181,8 +181,8 @@ class Site:
# epub annotations.
spoiler_link = self._new_tag('a')
spoiler_link.attrs = {
'id': 'noteback{}'.format(idx),
'href': "footnotes.html#footnote{}".format(idx),
'id': f'noteback{idx}',
'href': f'footnotes.html#footnote{idx}',
'epub:type': 'noteref',
}
spoiler_link.string = str(idx)

View file

@ -48,7 +48,7 @@ class ArchiveOfOurOwn(Site):
def _extract_work(self, workid):
# Fetch the full work
url = 'http://archiveofourown.org/works/{}?view_adult=true&view_full_work=true'.format(workid)
url = f'http://archiveofourown.org/works/{workid}?view_adult=true&view_full_work=true'
logger.info("Extracting full work @ %s", url)
soup = self._soup(url)
@ -56,11 +56,11 @@ class ArchiveOfOurOwn(Site):
title=soup.select('#workskin > .preface .title')[0].text.strip(),
author=soup.select('#workskin .preface .byline a')[0].text.strip(),
summary=soup.select('#workskin .preface .summary blockquote')[0].prettify(),
url='http://archiveofourown.org/works/{}'.format(workid)
url=f'http://archiveofourown.org/works/{workid}'
)
# Fetch the chapter list as well because it contains info that's not in the full work
nav_soup = self._soup('https://archiveofourown.org/works/{}/navigate'.format(workid))
nav_soup = self._soup(f'https://archiveofourown.org/works/{workid}/navigate')
for index, chapter in enumerate(nav_soup.select('#main ol[role="navigation"] li')):
link = chapter.find('a')
@ -73,7 +73,7 @@ class ArchiveOfOurOwn(Site):
story.add(Chapter(
title=link.string,
contents=self._chapter(soup.find(id='chapter-{}'.format(index + 1))),
contents=self._chapter(soup.find(id=f'chapter-{index + 1}')),
date=updated
))
@ -109,12 +109,12 @@ class ArchiveOfOurOwnSeries(ArchiveOfOurOwn):
def extract(self, url):
seriesid = re.match(r'^https?://archiveofourown\.org/series/(\d+)/?.*', url).group(1)
soup = self._soup('http://archiveofourown.org/series/{}?view_adult=true'.format(seriesid))
soup = self._soup(f'http://archiveofourown.org/series/{seriesid}?view_adult=true')
story = Section(
title=soup.select('#main h2.heading')[0].text.strip(),
author=soup.select('#main dl.series.meta a[rel="author"]')[0].string,
url='http://archiveofourown.org/series/{}'.format(seriesid)
url=f'http://archiveofourown.org/series/{seriesid}'
)
for work in soup.select('#main ul.series li.work'):

View file

@ -22,7 +22,7 @@ class FictionLive(Site):
def extract(self, url):
workid = re.match(r'^https?://fiction\.live/stories/[^\/]+/([0-9a-zA-Z]+)/?.*', url).group(1)
response = self.session.get('https://fiction.live/api/node/{}'.format(workid)).json()
response = self.session.get(f'https://fiction.live/api/node/{workid}').json()
story = Section(
title=response['t'],
@ -42,7 +42,7 @@ class FictionLive(Site):
# https://fiction.live/api/anonkun/chapters/SBBA49fQavNQMWxFT/1449266444062/1449615394752
# https://fiction.live/api/anonkun/chapters/SBBA49fQavNQMWxFT/1502823848216/9999999999999998
# i.e. format is [current timestamp] / [next timestamp - 1]
chapter_url = 'https://fiction.live/api/anonkun/chapters/{}/{}/{}'.format(workid, currc['ct'], nextc['ct'] - 1)
chapter_url = f'https://fiction.live/api/anonkun/chapters/{workid}/{currc["ct"]}/{nextc["ct"] - 1}'
logger.info("Extracting chapter \"%s\" @ %s", currc['title'], chapter_url)
data = self.session.get(chapter_url).json()
html = []
@ -70,7 +70,7 @@ class FictionLive(Site):
choices.sort(reverse=True)
html.append('<hr/><ul>')
for votecount, choice in choices:
html.append('<li>{}: {}</li>'.format(choice, votecount))
html.append(f'<li>{choice}: {votecount}</li>')
html.append('</ul><hr/>')
story.add(Chapter(

View file

@ -23,7 +23,7 @@ class RoyalRoad(Site):
def extract(self, url):
workid = re.match(r'^https?://(?:www\.)?%s\.com/fiction/(\d+)/?.*' % self.domain, url).group(1)
soup = self._soup('https://www.{}.com/fiction/{}'.format(self.domain, workid))
soup = self._soup(f'https://www.{self.domain}.com/fiction/{workid}')
# should have gotten redirected, for a valid title
original_maxheaders = http.client._MAXHEADERS

View file

@ -178,7 +178,7 @@ class XenForo(Site):
# Note: the fetched threadmarks can contain more placeholder elements to fetch. Ergo, loop.
# Good test case: https://forums.sufficientvelocity.com/threads/ignition-mtg-multicross-planeswalker-pc.26099/threadmarks
# e.g.: <li class="primaryContent threadmarkListItem ThreadmarkFetcher _depth0 filler" data-range-min="0" data-range-max="306" data-thread-id="26099" data-category-id="1" title="305 hidden">
response = self.session.post('https://{}/index.php?threads/threadmarks/load-range'.format(self.domain), data={
response = self.session.post(f'https://{self.domain}/index.php?threads/threadmarks/load-range', data={
# I did try a fetch on min/data-min+data-max, but there seems
# to be an absolute limit which the API fetch won't override
'min': fetcher.get('data-range-min'),
@ -275,7 +275,7 @@ class XenForo(Site):
link.string = spoiler_title.get_text()
else:
if spoiler_title:
link = '[SPOILER: {}]'.format(spoiler_title.get_text())
link = f'[SPOILER: {spoiler_title.get_text()}]'
else:
link = '[SPOILER]'
new_spoiler = self._new_tag('div')

View file

@ -44,7 +44,7 @@ class XenForo2(XenForo):
link.string = spoiler_title.get_text()
else:
if spoiler_title:
link = '[SPOILER: {}]'.format(spoiler_title.get_text())
link = f'[SPOILER: {spoiler_title.get_text()}]'
else:
link = '[SPOILER]'
new_spoiler = self._new_tag('div')