mirror of
https://github.com/kemayo/leech
synced 2026-02-16 03:32:49 +01:00
Add support for sta.sh
This commit is contained in:
parent
37f20415ec
commit
d1a60dbb9c
2 changed files with 66 additions and 6 deletions
10
leech.py
10
leech.py
|
|
@ -27,11 +27,11 @@ def leech(url, filename=None):
|
|||
# check a bunch of things which are completely ff.n specific, to get text from it
|
||||
site = _get_site(url)
|
||||
if not site:
|
||||
return
|
||||
raise Exception("No site handler found")
|
||||
|
||||
story = site.extract(url, fetch)
|
||||
if not story:
|
||||
return
|
||||
raise Exception("Couldn't extract story")
|
||||
|
||||
metadata = {
|
||||
'title': story['title'],
|
||||
|
|
@ -74,7 +74,5 @@ if __name__ == '__main__':
|
|||
args = parser.parse_args()
|
||||
|
||||
filename = leech(args.url, filename=args.filename)
|
||||
if filename:
|
||||
print("File created:", filename)
|
||||
else:
|
||||
print("A problem occurred.")
|
||||
print("File created:", filename)
|
||||
|
||||
|
|
|
|||
62
sites/stash.py
Normal file
62
sites/stash.py
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import re
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
|
||||
def match(url):
|
||||
# Need a stack page
|
||||
return re.match(r'^https?://sta\.sh/2.+/?.*', url)
|
||||
|
||||
|
||||
def extract(url, fetch):
|
||||
page = fetch(url)
|
||||
soup = BeautifulSoup(page, 'html5lib')
|
||||
content = soup.find(id="stash-body")
|
||||
if not content:
|
||||
return
|
||||
|
||||
story = {}
|
||||
chapters = []
|
||||
|
||||
# metadata = content.find(id='profile_top')
|
||||
story['title'] = str(soup.find(class_="stash-folder-name").h2.string)
|
||||
story['author'] = str(soup.find('span', class_="oh-stashlogo-name").string).rstrip("'s")
|
||||
|
||||
thumbs = content.select(".stash-folder-stream .thumb")
|
||||
if not thumbs:
|
||||
return
|
||||
for thumb in thumbs:
|
||||
try:
|
||||
if thumb['href'] is not '#':
|
||||
chapters.append(_extract_chapter(thumb['href'], fetch))
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
story['chapters'] = chapters
|
||||
|
||||
return story
|
||||
|
||||
|
||||
def _extract_chapter(url, fetch):
|
||||
print("Extracting chapter from", url)
|
||||
page = fetch(url)
|
||||
soup = BeautifulSoup(page, 'html5lib')
|
||||
|
||||
content = soup.find(class_="journal-wrapper")
|
||||
if not content:
|
||||
raise Exception("No content")
|
||||
|
||||
title = str(content.find(class_="gr-top").find(class_='metadata').h2.a.string)
|
||||
|
||||
text = content.find(class_="text")
|
||||
|
||||
# clean up some invalid xhtml attributes
|
||||
# TODO: be more selective about this somehow
|
||||
try:
|
||||
for tag in text.find_all(True):
|
||||
tag.attrs = None
|
||||
except Exception as e:
|
||||
raise Exception("Trouble cleaning attributes", e)
|
||||
|
||||
return (title, text.prettify())
|
||||
Loading…
Reference in a new issue