diff --git a/defaults.ini b/defaults.ini index 95dc19c8..1eccd9e5 100644 --- a/defaults.ini +++ b/defaults.ini @@ -923,6 +923,15 @@ extracategories:Harry Potter ## personal.ini, not defaults.ini. #is_adult:true +[tokra.fandomnet.com] +## Site dedicated to these categories/characters/ships +extracategories:Stargate: SG-1 + +## Some sites do not require a login, but do require the user to +## confirm they are adult for adult content. In commandline version, +## this should go in your personal.ini, not defaults.ini. +#is_adult:true + [www.adastrafanfic.com] ## Site dedicated to these categories/characters/ships extracategories:Star Trek diff --git a/fanficdownloader/adapters/__init__.py b/fanficdownloader/adapters/__init__.py index 1db14208..45a8647c 100644 --- a/fanficdownloader/adapters/__init__.py +++ b/fanficdownloader/adapters/__init__.py @@ -113,6 +113,7 @@ import adapter_imagineeficcom import adapter_buffynfaithnet import adapter_psychficcom import adapter_hennethannunnet +import adapter_tokrafandomnetcom ## This bit of complexity allows adapters to be added by just adding ## importing. It eliminates the long if/else clauses we used to need diff --git a/fanficdownloader/adapters/adapter_tokrafandomnetcom.py b/fanficdownloader/adapters/adapter_tokrafandomnetcom.py new file mode 100644 index 00000000..87036da4 --- /dev/null +++ b/fanficdownloader/adapters/adapter_tokrafandomnetcom.py @@ -0,0 +1,236 @@ +# -*- coding: utf-8 -*- + +# Copyright 2013 Fanficdownloader team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import time +import logging +logger = logging.getLogger(__name__) +import re +import urllib2 + +from .. import BeautifulSoup as bs +from ..htmlcleanup import stripHTML +from .. import exceptions as exceptions + +from base_adapter import BaseSiteAdapter, makeDate + +def getClass(): + return TokraFandomnetComAdapter + +# Class name has to be unique. Our convention is camel case the +# sitename with Adapter at the end. www is skipped. +class TokraFandomnetComAdapter(BaseSiteAdapter): + + def __init__(self, config, url): + BaseSiteAdapter.__init__(self, config, url) + + self.decode = ["Windows-1252", + "utf8"] # 1252 is a superset of iso-8859-1. + # Most sites that claim to be + # iso-8859-1 (and some that claim to be + # utf8) are really windows-1252. + self.username = "NoneGiven" # if left empty, site doesn't return any message at all. + self.password = "" + self.is_adult=False + + # get storyId from url--url validation guarantees query is only sid=1234 + self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1]) + logger.debug("storyId: (%s)"%self.story.getMetadata('storyId')) + + # normalized story URL. + self._setURL('http://' + self.getSiteDomain() + '/viewstory.php?sid='+self.story.getMetadata('storyId')) + + # Each adapter needs to have a unique site abbreviation. + self.story.setMetadata('siteabbrev','tokra') + + # The date format will vary from site to site. + # http://docs.python.org/library/datetime.html#strftime-strptime-behavior + self.dateformat = "%m/%d/%Y" + + @staticmethod # must be @staticmethod, don't remove it. + def getSiteDomain(): + # The site domain. Does have www here, if it uses it. But it + # doesn't matter too much anymore. + return 'tokra.fandomnet.com' + + def getSiteExampleURLs(self): + return "http://"+self.getSiteDomain()+"/viewstory.php?sid=1234" + + def getSiteURLPattern(self): + return re.escape("http://"+self.getSiteDomain()+"/viewstory.php?sid=")+r"\d+$" + + ## Getting the chapter list and the meta data, plus 'is adult' checking. + def extractChapterUrlsAndMetadata(self): + + if self.is_adult or self.getConfig("is_adult"): + # Weirdly, different sites use different warning numbers. + # If the title search below fails, there's a good chance + # you need a different number. print data at that point + # and see what the 'click here to continue' url says. + addurl = "&ageconsent=ok&warning=3" + else: + addurl="" + + # index=1 makes sure we see the story chapter index. Some + # sites skip that for one-chapter stories. + url = self.url+'&index=1'+addurl + logger.debug("URL: "+url) + + try: + data = self._fetchUrl(url) + except urllib2.HTTPError, e: + if e.code == 404: + raise exceptions.StoryDoesNotExist(self.url) + else: + raise e + + m = re.search(r"'viewstory.php\?sid=\d+((?:&ageconsent=ok)?&warning=\d+)'",data) + if m != None: + if self.is_adult or self.getConfig("is_adult"): + # We tried the default and still got a warning, so + # let's pull the warning number from the 'continue' + # link and reload data. + addurl = m.group(1) + # correct stupid & error in url. + addurl = addurl.replace("&","&") + url = self.url+'&index=1'+addurl + logger.debug("URL 2nd try: "+url) + + try: + data = self._fetchUrl(url) + except urllib2.HTTPError, e: + if e.code == 404: + raise exceptions.StoryDoesNotExist(self.url) + else: + raise e + else: + raise exceptions.AdultCheckRequired(self.url) + + if "Access denied. This story has not been validated by the adminstrators of this site." in data: + raise exceptions.FailedToDownload(self.getSiteDomain() +" says: Access denied. This story has not been validated by the adminstrators of this site.") + + # use BeautifulSoup HTML parser to make everything easier to find. + soup = bs.BeautifulSoup(data) + #print data + + # Now go hunting for all the meta data and the chapter list. + + ## Title + a = soup.find('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"$")) + self.story.setMetadata('title',a.string) + + # Find authorid and URL from... author url. + a = soup.find('a', href=re.compile(r"viewuser.php\?uid=\d+")) + self.story.setMetadata('authorId',a['href'].split('=')[1]) + self.story.setMetadata('authorUrl','http://'+self.host+'/'+a['href']) + self.story.setMetadata('author',a.string) + + # Rating + rate = stripHTML(soup.find('div',{'id':'pagetitle'})) + rate = rate[rate.rindex('[')+1:rate.rindex(']')] + self.story.setMetadata('rating', rate) + + # Find the chapters: + for chapter in soup.findAll('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"&chapter=\d+$")): + # just in case there's tags, like in chapter titles. + self.chapterUrls.append((stripHTML(chapter),'http://'+self.host+'/'+chapter['href']+addurl)) + + self.story.setMetadata('numChapters',len(self.chapterUrls)) + + # eFiction sites don't help us out a lot with their meta data + # formating, so it's a little ugly. + + metadiv = soup.find('div',{'class':'content'}) + smalldiv = metadiv.find('div',{'class':'small'}) + + # tokra categories -> genre + # categories will be filled from ini. + genres = smalldiv.parent.findAll('a',href=re.compile(r'browse.php\?type=categories')) + for genre in genres: + self.story.addToList('genre',genre.string) + + chars = smalldiv.parent.findAll('a',href=re.compile(r'browse.php\?type=characters')) + for char in chars: + self.story.addToList('characters',char.string) + + metatext = stripHTML(smalldiv) + + if 'Completed: Yes' in metatext: + self.story.setMetadata('status', 'Completed') + else: + self.story.setMetadata('status', 'In-Progress') + + wordstart=metatext.rindex('Word count:')+12 + words = metatext[wordstart:metatext.index(' ',wordstart)] + self.story.setMetadata('numWords', words) + + datesdiv = soup.find('div',{'class':'bottom'}) + dates = stripHTML(datesdiv).split() + # Published: 04/26/2011 Updated: 03/06/2013 + self.story.setMetadata('datePublished', makeDate(dates[1], self.dateformat)) + self.story.setMetadata('dateUpdated', makeDate(dates[3], self.dateformat)) + + try: + # Find Series name from series URL. + a = soup.find('a', href=re.compile(r"viewseries.php\?seriesid=\d+")) + series_name = a.string + series_url = 'http://'+self.host+'/'+a['href'] + + # use BeautifulSoup HTML parser to make everything easier to find. + seriessoup = bs.BeautifulSoup(self._fetchUrl(series_url)) + # can't use ^viewstory...$ in case of higher rated stories with javascript href. + storyas = seriessoup.findAll('a', href=re.compile(r'viewstory.php\?sid=\d+')) + i=1 + for a in storyas: + # skip 'report this' and 'TOC' links + if 'contact.php' not in a['href'] and 'index' not in a['href']: + if a['href'] == ('viewstory.php?sid='+self.story.getMetadata('storyId')): + self.setSeries(series_name, i) + self.story.setMetadata('seriesUrl',series_url) + break + i+=1 + + except: + # I find it hard to care if the series parsing fails + pass + + # remove 'small' leaving only summary. + smalldiv.extract() + self.setDescription(url,metadiv) + + # grab the text for an individual chapter. + def getChapterText(self, url): + + logger.debug('Getting chapter text from: %s' % url) + + soup = bs.BeautifulSoup(self._fetchUrl(url)) + + div = soup.find('div', {'class' : 'content'}) + + if None == div: + raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url) + + # remove some decorations while keeping notes. + remove = div.find('div', {'id' : 'pagetitle'}) + remove.extract() + + for remove in div.findAll('div', {'class' : 'right'}): + remove.extract() + + for remove in div.findAll('div', {'class' : 'left'}): + remove.extract() + + return self.utf8FromSoup(url,div)