mirror of
https://github.com/JimmXinu/FanFicFare.git
synced 2025-12-30 12:42:30 +01:00
Merge in changes from master
This commit is contained in:
commit
b5a2fa1395
4 changed files with 1847 additions and 1400 deletions
2450
defaults.ini
2450
defaults.ini
File diff suppressed because it is too large
Load diff
|
|
@ -1,179 +1,182 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2011 Fanficdownloader team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os, re, sys, glob, types
|
||||
from os.path import dirname, basename, normpath
|
||||
import logging
|
||||
import urlparse as up
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from .. import exceptions as exceptions
|
||||
|
||||
## must import each adapter here.
|
||||
|
||||
import adapter_test1
|
||||
import adapter_fanfictionnet
|
||||
import adapter_castlefansorg
|
||||
import adapter_fictionalleyorg
|
||||
import adapter_fictionpresscom
|
||||
import adapter_ficwadcom
|
||||
import adapter_fimfictionnet
|
||||
import adapter_harrypotterfanfictioncom
|
||||
import adapter_mediaminerorg
|
||||
import adapter_potionsandsnitchesnet
|
||||
import adapter_tenhawkpresentscom
|
||||
import adapter_adastrafanficcom
|
||||
import adapter_thewriterscoffeeshopcom
|
||||
import adapter_tthfanficorg
|
||||
import adapter_twilightednet
|
||||
import adapter_twiwritenet
|
||||
import adapter_whoficcom
|
||||
import adapter_siyecouk
|
||||
import adapter_archiveofourownorg
|
||||
import adapter_ficbooknet
|
||||
import adapter_portkeyorg
|
||||
import adapter_mugglenetcom
|
||||
import adapter_hpfandomnet
|
||||
import adapter_thequidditchpitchorg
|
||||
import adapter_nfacommunitycom
|
||||
import adapter_midnightwhispersca
|
||||
import adapter_ksarchivecom
|
||||
import adapter_archiveskyehawkecom
|
||||
import adapter_squidgeorgpeja
|
||||
import adapter_libraryofmoriacom
|
||||
import adapter_wraithbaitcom
|
||||
import adapter_checkmatedcom
|
||||
import adapter_chaossycophanthexcom
|
||||
import adapter_dramioneorg
|
||||
import adapter_erosnsapphosycophanthexcom
|
||||
import adapter_lumossycophanthexcom
|
||||
import adapter_occlumencysycophanthexcom
|
||||
import adapter_phoenixsongnet
|
||||
import adapter_walkingtheplankorg
|
||||
import adapter_ashwindersycophanthexcom
|
||||
import adapter_thehexfilesnet
|
||||
import adapter_dokugacom
|
||||
import adapter_iketernalnet
|
||||
import adapter_onedirectionfanfictioncom
|
||||
import adapter_prisonbreakficnet
|
||||
import adapter_storiesofardacom
|
||||
import adapter_yourfanfictioncom
|
||||
import adapter_samdeanarchivenu
|
||||
import adapter_destinysgatewaycom
|
||||
import adapter_ncisfictioncom
|
||||
import adapter_stargateatlantisorg
|
||||
import adapter_thealphagatecom
|
||||
import adapter_fanfiktionde
|
||||
import adapter_ponyfictionarchivenet
|
||||
import adapter_sg1heliopoliscom
|
||||
import adapter_ncisficcom
|
||||
import adapter_nationallibrarynet
|
||||
import adapter_themasquenet
|
||||
import adapter_pretendercentrecom
|
||||
import adapter_darksolaceorg
|
||||
import adapter_finestoriescom
|
||||
import adapter_hpfanficarchivecom
|
||||
import adapter_svufictioncom
|
||||
import adapter_twilightarchivescom
|
||||
import adapter_wizardtalesnet
|
||||
import adapter_nhamagicalworldsus
|
||||
import adapter_hlfictionnet
|
||||
import adapter_grangerenchantedcom
|
||||
import adapter_dracoandginnycom
|
||||
import adapter_scarvesandcoffeenet
|
||||
import adapter_thepetulantpoetesscom
|
||||
import adapter_wolverineandroguecom
|
||||
import adapter_sinfuldesireorg
|
||||
import adapter_merlinficdtwinscouk
|
||||
import adapter_thehookupzonenet
|
||||
import adapter_bloodtiesfancom
|
||||
import adapter_qafficcom
|
||||
|
||||
## This bit of complexity allows adapters to be added by just adding
|
||||
## importing. It eliminates the long if/else clauses we used to need
|
||||
## to pick out the adapter.
|
||||
|
||||
## List of registered site adapters.
|
||||
__class_list = []
|
||||
|
||||
def imports():
|
||||
for name, val in globals().items():
|
||||
if isinstance(val, types.ModuleType):
|
||||
yield val.__name__
|
||||
|
||||
for x in imports():
|
||||
if "fanficdownloader.adapters.adapter_" in x:
|
||||
#print x
|
||||
__class_list.append(sys.modules[x].getClass())
|
||||
|
||||
def getAdapter(config,url):
|
||||
|
||||
logger.debug("trying url:"+url)
|
||||
(cls,fixedurl) = getClassFor(url)
|
||||
logger.debug("fixedurl:"+fixedurl)
|
||||
if cls:
|
||||
adapter = cls(config,fixedurl) # raises InvalidStoryURL
|
||||
return adapter
|
||||
# No adapter found.
|
||||
raise exceptions.UnknownSite( url, [cls.getSiteDomain() for cls in __class_list] )
|
||||
|
||||
def getConfigSections():
|
||||
return [cls.getConfigSection() for cls in __class_list]
|
||||
|
||||
def getConfigSectionFor(url):
|
||||
(cls,fixedurl) = getClassFor(url)
|
||||
if cls:
|
||||
return cls.getConfigSection()
|
||||
|
||||
# No adapter found.
|
||||
raise exceptions.UnknownSite( url, [cls.getSiteDomain() for cls in __class_list] )
|
||||
|
||||
def getClassFor(url):
|
||||
## fix up leading protocol.
|
||||
fixedurl = re.sub(r"(?i)^[htps]+[:/]+","http://",url.strip())
|
||||
if not fixedurl.startswith("http"):
|
||||
fixedurl = "http://%s"%url
|
||||
## remove any trailing '#' locations.
|
||||
fixedurl = re.sub(r"#.*$","",fixedurl)
|
||||
|
||||
## remove any trailing '&' parameters--?sid=999 will be left.
|
||||
## that's all that any of the current adapters need or want.
|
||||
fixedurl = re.sub(r"&.*$","",fixedurl)
|
||||
|
||||
parsedUrl = up.urlparse(fixedurl)
|
||||
domain = parsedUrl.netloc.lower()
|
||||
if( domain != parsedUrl.netloc ):
|
||||
fixedurl = fixedurl.replace(parsedUrl.netloc,domain)
|
||||
|
||||
cls = getClassFromList(domain)
|
||||
if not cls and domain.startswith("www."):
|
||||
domain = domain.replace("www.","")
|
||||
logger.debug("trying site:without www: "+domain)
|
||||
cls = getClassFromList(domain)
|
||||
fixedurl = fixedurl.replace("http://www.","http://")
|
||||
if not cls:
|
||||
logger.debug("trying site:www."+domain)
|
||||
cls = getClassFromList("www."+domain)
|
||||
fixedurl = fixedurl.replace("http://","http://www.")
|
||||
|
||||
return (cls,fixedurl)
|
||||
|
||||
def getClassFromList(domain):
|
||||
for cls in __class_list:
|
||||
if cls.matchesSite(domain):
|
||||
return cls
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2011 Fanficdownloader team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os, re, sys, glob, types
|
||||
from os.path import dirname, basename, normpath
|
||||
import logging
|
||||
import urlparse as up
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from .. import exceptions as exceptions
|
||||
|
||||
## must import each adapter here.
|
||||
|
||||
import adapter_test1
|
||||
import adapter_fanfictionnet
|
||||
import adapter_castlefansorg
|
||||
import adapter_fictionalleyorg
|
||||
import adapter_fictionpresscom
|
||||
import adapter_ficwadcom
|
||||
import adapter_fimfictionnet
|
||||
import adapter_harrypotterfanfictioncom
|
||||
import adapter_mediaminerorg
|
||||
import adapter_potionsandsnitchesnet
|
||||
import adapter_tenhawkpresentscom
|
||||
import adapter_adastrafanficcom
|
||||
import adapter_thewriterscoffeeshopcom
|
||||
import adapter_tthfanficorg
|
||||
import adapter_twilightednet
|
||||
import adapter_twiwritenet
|
||||
import adapter_whoficcom
|
||||
import adapter_siyecouk
|
||||
import adapter_archiveofourownorg
|
||||
import adapter_ficbooknet
|
||||
import adapter_portkeyorg
|
||||
import adapter_mugglenetcom
|
||||
import adapter_hpfandomnet
|
||||
import adapter_thequidditchpitchorg
|
||||
import adapter_nfacommunitycom
|
||||
import adapter_midnightwhispersca
|
||||
import adapter_ksarchivecom
|
||||
import adapter_archiveskyehawkecom
|
||||
import adapter_squidgeorgpeja
|
||||
import adapter_libraryofmoriacom
|
||||
import adapter_wraithbaitcom
|
||||
import adapter_checkmatedcom
|
||||
import adapter_chaossycophanthexcom
|
||||
import adapter_dramioneorg
|
||||
import adapter_erosnsapphosycophanthexcom
|
||||
import adapter_lumossycophanthexcom
|
||||
import adapter_occlumencysycophanthexcom
|
||||
import adapter_phoenixsongnet
|
||||
import adapter_walkingtheplankorg
|
||||
import adapter_ashwindersycophanthexcom
|
||||
import adapter_thehexfilesnet
|
||||
import adapter_dokugacom
|
||||
import adapter_iketernalnet
|
||||
import adapter_onedirectionfanfictioncom
|
||||
import adapter_prisonbreakficnet
|
||||
import adapter_storiesofardacom
|
||||
import adapter_yourfanfictioncom
|
||||
import adapter_samdeanarchivenu
|
||||
import adapter_destinysgatewaycom
|
||||
import adapter_ncisfictioncom
|
||||
import adapter_stargateatlantisorg
|
||||
import adapter_thealphagatecom
|
||||
import adapter_fanfiktionde
|
||||
import adapter_ponyfictionarchivenet
|
||||
import adapter_sg1heliopoliscom
|
||||
import adapter_ncisficcom
|
||||
import adapter_nationallibrarynet
|
||||
import adapter_themasquenet
|
||||
import adapter_pretendercentrecom
|
||||
import adapter_darksolaceorg
|
||||
import adapter_finestoriescom
|
||||
import adapter_hpfanficarchivecom
|
||||
import adapter_svufictioncom
|
||||
import adapter_twilightarchivescom
|
||||
import adapter_wizardtalesnet
|
||||
import adapter_nhamagicalworldsus
|
||||
import adapter_hlfictionnet
|
||||
import adapter_grangerenchantedcom
|
||||
import adapter_dracoandginnycom
|
||||
import adapter_scarvesandcoffeenet
|
||||
import adapter_thepetulantpoetesscom
|
||||
import adapter_wolverineandroguecom
|
||||
import adapter_sinfuldesireorg
|
||||
import adapter_merlinficdtwinscouk
|
||||
import adapter_thehookupzonenet
|
||||
import adapter_bloodtiesfancom
|
||||
import adapter_indeathnet
|
||||
import adapter_jlaunlimitedcom
|
||||
import adapter_qafficcom
|
||||
|
||||
|
||||
## This bit of complexity allows adapters to be added by just adding
|
||||
## importing. It eliminates the long if/else clauses we used to need
|
||||
## to pick out the adapter.
|
||||
|
||||
## List of registered site adapters.
|
||||
__class_list = []
|
||||
|
||||
def imports():
|
||||
for name, val in globals().items():
|
||||
if isinstance(val, types.ModuleType):
|
||||
yield val.__name__
|
||||
|
||||
for x in imports():
|
||||
if "fanficdownloader.adapters.adapter_" in x:
|
||||
#print x
|
||||
__class_list.append(sys.modules[x].getClass())
|
||||
|
||||
def getAdapter(config,url):
|
||||
|
||||
logger.debug("trying url:"+url)
|
||||
(cls,fixedurl) = getClassFor(url)
|
||||
logger.debug("fixedurl:"+fixedurl)
|
||||
if cls:
|
||||
adapter = cls(config,fixedurl) # raises InvalidStoryURL
|
||||
return adapter
|
||||
# No adapter found.
|
||||
raise exceptions.UnknownSite( url, [cls.getSiteDomain() for cls in __class_list] )
|
||||
|
||||
def getConfigSections():
|
||||
return [cls.getConfigSection() for cls in __class_list]
|
||||
|
||||
def getConfigSectionFor(url):
|
||||
(cls,fixedurl) = getClassFor(url)
|
||||
if cls:
|
||||
return cls.getConfigSection()
|
||||
|
||||
# No adapter found.
|
||||
raise exceptions.UnknownSite( url, [cls.getSiteDomain() for cls in __class_list] )
|
||||
|
||||
def getClassFor(url):
|
||||
## fix up leading protocol.
|
||||
fixedurl = re.sub(r"(?i)^[htps]+[:/]+","http://",url.strip())
|
||||
if not fixedurl.startswith("http"):
|
||||
fixedurl = "http://%s"%url
|
||||
## remove any trailing '#' locations.
|
||||
fixedurl = re.sub(r"#.*$","",fixedurl)
|
||||
|
||||
## remove any trailing '&' parameters--?sid=999 will be left.
|
||||
## that's all that any of the current adapters need or want.
|
||||
fixedurl = re.sub(r"&.*$","",fixedurl)
|
||||
|
||||
parsedUrl = up.urlparse(fixedurl)
|
||||
domain = parsedUrl.netloc.lower()
|
||||
if( domain != parsedUrl.netloc ):
|
||||
fixedurl = fixedurl.replace(parsedUrl.netloc,domain)
|
||||
|
||||
cls = getClassFromList(domain)
|
||||
if not cls and domain.startswith("www."):
|
||||
domain = domain.replace("www.","")
|
||||
logger.debug("trying site:without www: "+domain)
|
||||
cls = getClassFromList(domain)
|
||||
fixedurl = fixedurl.replace("http://www.","http://")
|
||||
if not cls:
|
||||
logger.debug("trying site:www."+domain)
|
||||
cls = getClassFromList("www."+domain)
|
||||
fixedurl = fixedurl.replace("http://","http://www.")
|
||||
|
||||
return (cls,fixedurl)
|
||||
|
||||
def getClassFromList(domain):
|
||||
for cls in __class_list:
|
||||
if cls.matchesSite(domain):
|
||||
return cls
|
||||
|
|
|
|||
178
fanficdownloader/adapters/adapter_indeathnet.py
Normal file
178
fanficdownloader/adapters/adapter_indeathnet.py
Normal file
|
|
@ -0,0 +1,178 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2012 Fanficdownloader team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import time
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
import re
|
||||
import urllib2
|
||||
|
||||
from .. import BeautifulSoup as bs
|
||||
from ..htmlcleanup import stripHTML
|
||||
from .. import exceptions as exceptions
|
||||
|
||||
from base_adapter import BaseSiteAdapter, makeDate
|
||||
|
||||
def getClass():
|
||||
return InDeathNetAdapter
|
||||
|
||||
# Class name has to be unique. Our convention is camel case the
|
||||
# sitename with Adapter at the end. www is skipped.
|
||||
class InDeathNetAdapter(BaseSiteAdapter):
|
||||
|
||||
def __init__(self, config, url):
|
||||
BaseSiteAdapter.__init__(self, config, url)
|
||||
|
||||
self.decode = ["Windows-1252",
|
||||
"utf8"] # 1252 is a superset of iso-8859-1.
|
||||
# Most sites that claim to be
|
||||
# iso-8859-1 (and some that claim to be
|
||||
# utf8) are really windows-1252.
|
||||
self.username = "NoneGiven" # if left empty, site doesn't return any message at all.
|
||||
self.password = ""
|
||||
self.is_adult=False
|
||||
|
||||
|
||||
# get storyId from url--url validation guarantees query correct
|
||||
m = re.match(self.getSiteURLPattern(),url)
|
||||
if m:
|
||||
self.story.setMetadata('storyId',m.group('id'))
|
||||
logger.debug("storyId: (%s)"%self.story.getMetadata('storyId'))
|
||||
# normalized story URL.
|
||||
self._setURL('http://www.' + self.getSiteDomain() + '/blog/archive/'+self.story.getMetadata('storyId')+'-'+m.group('name')+'/')
|
||||
else:
|
||||
raise exceptions.InvalidStoryURL(url,
|
||||
self.getSiteDomain(),
|
||||
self.getSiteExampleURLs())
|
||||
|
||||
# Each adapter needs to have a unique site abbreviation.
|
||||
self.story.setMetadata('siteabbrev','idn')
|
||||
|
||||
# The date format will vary from site to site.
|
||||
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
|
||||
self.dateformat = "%d %B %Y"
|
||||
|
||||
@staticmethod # must be @staticmethod, don't remove it.
|
||||
def getSiteDomain():
|
||||
# The site domain. Does have www here, if it uses it.
|
||||
return 'indeath.net'
|
||||
|
||||
|
||||
def getSiteExampleURLs(self):
|
||||
return "http://"+self.getSiteDomain()+"/blog/archive/123-story-in-death/"
|
||||
|
||||
def getSiteURLPattern(self):
|
||||
# http://www.indeath.net/blog/archive/169-ransom-in-death/
|
||||
return re.escape("http://")+re.escape(self.getSiteDomain())+r"/blog/(archive/)?(?P<id>\d+)\-(?P<name>[a-z0-9\-]*)/?$"
|
||||
|
||||
|
||||
def getDateFromComponents(self, postmonth, postday):
|
||||
ym = re.search("Entries\ in\ (?P<mon>January|February|March|April|May|June|July|August|September|October|November|December)\ (?P<year>\d{4})",postmonth)
|
||||
d = re.search("(?P<day>\d{2})\ (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)",postday)
|
||||
postdate = makeDate(d.group('day')+' '+ym.group('mon')+' '+ym.group('year'),self.dateformat)
|
||||
return postdate
|
||||
|
||||
## Getting the chapter list and the meta data, plus 'is adult' checking.
|
||||
def extractChapterUrlsAndMetadata(self):
|
||||
|
||||
url = self.url
|
||||
try:
|
||||
data = self._fetchUrl(url)
|
||||
|
||||
except urllib2.HTTPError, e:
|
||||
if e.code == 404:
|
||||
raise exceptions.StoryDoesNotExist(self.meta)
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
||||
# use BeautifulSoup HTML parser to make everything easier to find.
|
||||
soup = bs.BeautifulSoup(data)
|
||||
|
||||
# Now go hunting for all the meta data and the chapter list.
|
||||
|
||||
## Title
|
||||
h = soup.find('a', id="blog_title")
|
||||
t = h.find('span')
|
||||
self.story.setMetadata('title',t.contents[0].string.strip())
|
||||
|
||||
s = t.find('div')
|
||||
if s != None:
|
||||
self.setDescription(url,s)
|
||||
|
||||
# Find authorid and URL from first link in Recent Entries (don't yet reference 'recent entries' - let's see if that is required)
|
||||
a = soup.find('a', href=re.compile(r"http://www.indeath.net/user/\d+\-[a-z0-9]+/$")) #http://www.indeath.net/user/9083-cyrex/
|
||||
m = re.search('http://www.indeath.net/user/(?P<id>\d+)\-(?P<name>[a-z0-9]*)/$',a['href'])
|
||||
self.story.setMetadata('authorId',m.group('id'))
|
||||
self.story.setMetadata('authorUrl',a['href'])
|
||||
self.story.setMetadata('author',m.group('name'))
|
||||
|
||||
|
||||
|
||||
|
||||
# Find the chapters:
|
||||
chapters=soup.findAll('a', title="View entry", href=re.compile(r'http://www.indeath.net/blog/'+self.story.getMetadata('storyId')+"/entry\-(\d+)\-([^/]*)/$"))
|
||||
|
||||
#reverse the list since newest at the top
|
||||
chapters.reverse()
|
||||
|
||||
# Get date published & updated from first & last entries
|
||||
posttable=soup.find('div', id="main_column")
|
||||
|
||||
postmonths=posttable.findAll('th', text=re.compile(r'Entries\ in\ '))
|
||||
postmonths.reverse()
|
||||
|
||||
postdates=posttable.findAll('span', _class="desc", text=re.compile('\d{2}\ (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)'))
|
||||
postdates.reverse()
|
||||
|
||||
self.story.setMetadata('datePublished',self.getDateFromComponents(postmonths[0],postdates[0]))
|
||||
self.story.setMetadata('dateUpdated',self.getDateFromComponents(postmonths[len(postmonths)-1],postdates[len(postdates)-1]))
|
||||
|
||||
# Process List of Chapters
|
||||
self.story.setMetadata('numChapters',len(chapters))
|
||||
logger.debug("numChapters: (%s)"%self.story.getMetadata('numChapters'))
|
||||
for x in range(0,len(chapters)):
|
||||
# just in case there's tags, like <i> in chapter titles.
|
||||
chapter=chapters[x]
|
||||
if len(chapters)==1:
|
||||
self.chapterUrls.append((self.story.getMetadata('title'),chapter['href']))
|
||||
else:
|
||||
ct = stripHTML(chapter)
|
||||
tnew = re.match("(?i)"+self.story.getMetadata('title')+r" - (?P<newtitle>.*)$",ct)
|
||||
if tnew:
|
||||
chaptertitle = tnew.group('newtitle')
|
||||
else:
|
||||
chaptertitle = ct
|
||||
self.chapterUrls.append((chaptertitle,chapter['href']))
|
||||
|
||||
|
||||
|
||||
|
||||
# grab the text for an individual chapter.
|
||||
def getChapterText(self, url):
|
||||
logger.debug('Getting chapter text from: %s' % url)
|
||||
|
||||
#chapter=bs.BeautifulSoup('<div class="story"></div>')
|
||||
data = self._fetchUrl(url)
|
||||
soup = bs.BeautifulSoup(data,selfClosingTags=('br','hr','span','center'))
|
||||
|
||||
chapter = soup.find("div", "entry_content")
|
||||
|
||||
if None == chapter:
|
||||
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
|
||||
|
||||
return self.utf8FromSoup(url,chapter)
|
||||
258
fanficdownloader/adapters/adapter_jlaunlimitedcom.py
Normal file
258
fanficdownloader/adapters/adapter_jlaunlimitedcom.py
Normal file
|
|
@ -0,0 +1,258 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2011 Fanficdownloader team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import time
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
import re
|
||||
import urllib2
|
||||
|
||||
from .. import BeautifulSoup as bs
|
||||
from ..htmlcleanup import stripHTML
|
||||
from .. import exceptions as exceptions
|
||||
|
||||
from base_adapter import BaseSiteAdapter, makeDate
|
||||
|
||||
|
||||
def getClass():
|
||||
return JLAUnlimitedComAdapter
|
||||
|
||||
|
||||
class JLAUnlimitedComAdapter(BaseSiteAdapter):
|
||||
|
||||
def __init__(self, config, url):
|
||||
BaseSiteAdapter.__init__(self, config, url)
|
||||
|
||||
self.decode = ["Windows-1252",
|
||||
"utf8"] # 1252 is a superset of iso-8859-1.
|
||||
# Most sites that claim to be
|
||||
# iso-8859-1 (and some that claim to be
|
||||
# utf8) are really windows-1252.
|
||||
self.username = "" # if left empty, site doesn't return any message at all.
|
||||
self.password = ""
|
||||
self.is_adult=False
|
||||
|
||||
# get storyId from url--url validation guarantees query is only sid=1234
|
||||
self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1])
|
||||
logger.debug("storyId: (%s)"%self.story.getMetadata('storyId'))
|
||||
|
||||
self._setURL('http://' + self.getSiteDomain() + '/eFiction1.1/viewstory.php?sid='+self.story.getMetadata('storyId'))
|
||||
|
||||
# Each adapter needs to have a unique site abbreviation.
|
||||
self.story.setMetadata('siteabbrev','jla')
|
||||
|
||||
# The date format will vary from site to site.
|
||||
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
|
||||
self.dateformat = "%m/%d/%y"
|
||||
|
||||
@staticmethod # must be @staticmethod, don't remove it.
|
||||
def getSiteDomain():
|
||||
# The site domain. Does have www here, if it uses it.
|
||||
return 'www.jlaunlimited.com'
|
||||
|
||||
def getSiteExampleURLs(self):
|
||||
return "http://"+self.getSiteDomain()+"/eFiction1.1/viewstory.php?sid=1234"
|
||||
|
||||
def getSiteURLPattern(self):
|
||||
return re.escape("http://"+self.getSiteDomain()+"/eFiction1.1/viewstory.php?sid=")+r"\d+$"
|
||||
|
||||
|
||||
|
||||
|
||||
## Getting the chapter list and the meta data, plus 'is adult' checking.
|
||||
def extractChapterUrlsAndMetadata(self):
|
||||
|
||||
if self.is_adult or self.getConfig("is_adult"):
|
||||
# Weirdly, different sites use different warning numbers.
|
||||
# If the title search below fails, there's a good chance
|
||||
# you need a different number. print data at that point
|
||||
# and see what the 'click here to continue' url says.
|
||||
addurl = "&ageconsent=ok&warning=4" # XXX
|
||||
else:
|
||||
addurl=""
|
||||
|
||||
# index=1 makes sure we see the story chapter index. Some
|
||||
# sites skip that for one-chapter stories.
|
||||
url = self.url+'&index=1'+addurl
|
||||
logger.debug("URL: "+url)
|
||||
|
||||
try:
|
||||
data = self._fetchUrl(url)
|
||||
except urllib2.HTTPError, e:
|
||||
if e.code == 404:
|
||||
raise exceptions.StoryDoesNotExist(self.url)
|
||||
else:
|
||||
raise e
|
||||
|
||||
# Assume that if there is a url with 'warning=#' in the page then it is a 'check' page
|
||||
m = re.search(r"'viewstory.php\?sid=\d+((?:&ageconsent=ok)?&warning=\d+)",data)
|
||||
if m != None:
|
||||
if self.is_adult or self.getConfig("is_adult"):
|
||||
# We tried the default and still got a warning, so
|
||||
# let's pull the warning number from the 'continue'
|
||||
# link and reload data.
|
||||
addurl = m.group(1)
|
||||
# correct stupid & error in url.
|
||||
addurl = addurl.replace("&","&")
|
||||
url = self.url+'&index=1'+addurl
|
||||
logger.debug("URL 2nd try: "+url)
|
||||
|
||||
try:
|
||||
data = self._fetchUrl(url)
|
||||
except urllib2.HTTPError, e:
|
||||
if e.code == 404:
|
||||
raise exceptions.StoryDoesNotExist(self.url)
|
||||
else:
|
||||
raise e
|
||||
else:
|
||||
raise exceptions.AdultCheckRequired(self.url)
|
||||
|
||||
# use BeautifulSoup HTML parser to make everything easier to find.
|
||||
soup = bs.BeautifulSoup(data)
|
||||
# print data
|
||||
|
||||
# Now go hunting for all the meta data and the chapter list.
|
||||
|
||||
## Title
|
||||
a = soup.find('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')))
|
||||
self.story.setMetadata('title',a.string)
|
||||
|
||||
# Find authorid and URL from... author url.
|
||||
a = soup.find('a', href=re.compile(r"viewuser.php\?uid=\d+"))
|
||||
self.story.setMetadata('authorId',a['href'].split('=')[1])
|
||||
self.story.setMetadata('authorUrl','http://'+self.host+'/'+a['href'])
|
||||
self.story.setMetadata('author',a.string)
|
||||
|
||||
# Find the chapters:
|
||||
for chapter in soup.findAll('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"&chapter=\d+")):
|
||||
# just in case there's tags, like <i> in chapter titles.
|
||||
self.chapterUrls.append((stripHTML(chapter),'http://'+self.host+'/eFiction1.1/'+chapter['href']+addurl))
|
||||
|
||||
self.story.setMetadata('numChapters',len(self.chapterUrls))
|
||||
|
||||
# eFiction sites don't help us out a lot with their meta data
|
||||
# formating, so it's a little ugly.
|
||||
|
||||
# utility method
|
||||
def defaultGetattr(d,k):
|
||||
try:
|
||||
return d[k]
|
||||
except:
|
||||
return ""
|
||||
|
||||
# <span class="label">Rated:</span> NC-17<br /> etc
|
||||
labels = soup.findAll('span',{'class':'label'})
|
||||
for labelspan in labels:
|
||||
value = labelspan.nextSibling
|
||||
label = labelspan.string
|
||||
|
||||
if 'Summary' in label:
|
||||
## Everything until the next span class='label'
|
||||
svalue = ""
|
||||
while not defaultGetattr(value,'class') == 'label':
|
||||
svalue += str(value)
|
||||
value = value.nextSibling
|
||||
self.setDescription(url,svalue)
|
||||
#self.story.setMetadata('description',stripHTML(svalue))
|
||||
|
||||
if 'Rated' in label:
|
||||
self.story.setMetadata('rating', value)
|
||||
|
||||
if 'Word count' in label:
|
||||
self.story.setMetadata('numWords', value)
|
||||
|
||||
if 'Categories' in label:
|
||||
cats = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=categories'))
|
||||
catstext = [cat.string for cat in cats]
|
||||
for cat in catstext:
|
||||
self.story.addToList('category',cat.string)
|
||||
|
||||
if 'Characters' in label:
|
||||
chars = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=characters'))
|
||||
charstext = [char.string for char in chars]
|
||||
for char in charstext:
|
||||
self.story.addToList('characters',char.string)
|
||||
|
||||
## Not all sites use Genre, but there's no harm to
|
||||
## leaving it in. Check to make sure the type_id number
|
||||
## is correct, though--it's site specific.
|
||||
if 'Genre' in label:
|
||||
genres = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=1')) # XXX
|
||||
genrestext = [genre.string for genre in genres]
|
||||
self.genre = ', '.join(genrestext)
|
||||
for genre in genrestext:
|
||||
self.story.addToList('genre',genre.string)
|
||||
|
||||
## Not all sites use Warnings, but there's no harm to
|
||||
## leaving it in. Check to make sure the type_id number
|
||||
## is correct, though--it's site specific.
|
||||
if 'Warnings' in label:
|
||||
warnings = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=2')) # XXX
|
||||
warningstext = [warning.string for warning in warnings]
|
||||
self.warning = ', '.join(warningstext)
|
||||
for warning in warningstext:
|
||||
self.story.addToList('warnings',warning.string)
|
||||
|
||||
if 'Completed' in label:
|
||||
if 'Yes' in value:
|
||||
self.story.setMetadata('status', 'Completed')
|
||||
else:
|
||||
self.story.setMetadata('status', 'In-Progress')
|
||||
|
||||
if 'Published' in label:
|
||||
self.story.setMetadata('datePublished', makeDate(stripHTML(value), self.dateformat))
|
||||
|
||||
if 'Updated' in label:
|
||||
# there's a stray [ at the end.
|
||||
#value = value[0:-1]
|
||||
self.story.setMetadata('dateUpdated', makeDate(stripHTML(value), self.dateformat))
|
||||
|
||||
try:
|
||||
# Find Series name from series URL.
|
||||
a = soup.find('a', href=re.compile(r"viewseries.php\?seriesid=\d+"))
|
||||
series_name = a.string
|
||||
series_url = 'http://'+self.host+'/fanfic/'+a['href']
|
||||
|
||||
# use BeautifulSoup HTML parser to make everything easier to find.
|
||||
seriessoup = bs.BeautifulSoup(self._fetchUrl(series_url))
|
||||
storyas = seriessoup.findAll('a', href=re.compile(r'^viewstory.php\?sid=\d+$'))
|
||||
i=1
|
||||
for a in storyas:
|
||||
if a['href'] == ('viewstory.php?sid='+self.story.getMetadata('storyId')):
|
||||
self.setSeries(series_name, i)
|
||||
break
|
||||
i+=1
|
||||
|
||||
except:
|
||||
# I find it hard to care if the series parsing fails
|
||||
pass
|
||||
|
||||
|
||||
# grab the text for an individual chapter.
|
||||
def getChapterText(self, url):
|
||||
|
||||
logger.debug('Getting chapter text from: %s' % url)
|
||||
|
||||
soup = bs.BeautifulSoup(self._fetchUrl(url),
|
||||
selfClosingTags=('br','hr')) # otherwise soup eats the br/hr tags.
|
||||
|
||||
div = soup.find('div', {'id' : 'story'})
|
||||
|
||||
if None == div:
|
||||
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
|
||||
|
||||
return self.utf8FromSoup(url,div)
|
||||
Loading…
Reference in a new issue