mirror of
git://github.com/kovidgoyal/calibre.git
synced 2026-05-09 00:33:26 +02:00
Merge branch 'cover' of https://github.com/xxyzz/calibre
This commit is contained in:
commit
bdfb061ac9
2 changed files with 8 additions and 28 deletions
|
|
@ -264,20 +264,10 @@ def parse_index(self):
|
|||
return ans
|
||||
|
||||
def economist_parse_index(self, soup):
|
||||
img = None
|
||||
if edition_date:
|
||||
archive_url = "https://www.economist.com/weeklyedition/archive?year={}".format(edition_date[:4])
|
||||
archive = self.index_to_soup(archive_url)
|
||||
q = edition_date.replace('-', '')
|
||||
q = '/print-covers/{}_'.format(q)
|
||||
img = archive.find('img', srcset=lambda x: x and q in x)
|
||||
else:
|
||||
archive = self.index_to_soup("https://www.economist.com/weeklyedition/archive")
|
||||
div = archive.find(attrs={'class': 'edition-teaser__image'})
|
||||
if div is not None:
|
||||
img = div.find('img', srcset=True)
|
||||
if img:
|
||||
self.cover_url = img['srcset'].split(',')[-1].split()[0]
|
||||
script_tag = soup.select_one("script#__NEXT_DATA__")
|
||||
if script_tag is not None:
|
||||
data = json.loads(script_tag.string)
|
||||
self.cover_url = data['props']['pageProps']['content']['image']['main']['url']['canonical']
|
||||
self.log('Got cover:', self.cover_url)
|
||||
feeds = []
|
||||
for section in soup.findAll(**classes('layout-weekly-edition-section')):
|
||||
|
|
|
|||
|
|
@ -264,20 +264,10 @@ def parse_index(self):
|
|||
return ans
|
||||
|
||||
def economist_parse_index(self, soup):
|
||||
img = None
|
||||
if edition_date:
|
||||
archive_url = "https://www.economist.com/weeklyedition/archive?year={}".format(edition_date[:4])
|
||||
archive = self.index_to_soup(archive_url)
|
||||
q = edition_date.replace('-', '')
|
||||
q = '/print-covers/{}_'.format(q)
|
||||
img = archive.find('img', srcset=lambda x: x and q in x)
|
||||
else:
|
||||
archive = self.index_to_soup("https://www.economist.com/weeklyedition/archive")
|
||||
div = archive.find(attrs={'class': 'edition-teaser__image'})
|
||||
if div is not None:
|
||||
img = div.find('img', srcset=True)
|
||||
if img:
|
||||
self.cover_url = img['srcset'].split(',')[-1].split()[0]
|
||||
script_tag = soup.select_one("script#__NEXT_DATA__")
|
||||
if script_tag is not None:
|
||||
data = json.loads(script_tag.string)
|
||||
self.cover_url = data['props']['pageProps']['content']['image']['main']['url']['canonical']
|
||||
self.log('Got cover:', self.cover_url)
|
||||
feeds = []
|
||||
for section in soup.findAll(**classes('layout-weekly-edition-section')):
|
||||
|
|
|
|||
Loading…
Reference in a new issue