diff --git a/defaults.ini b/defaults.ini
index d8d29f56..da1fe27f 100644
--- a/defaults.ini
+++ b/defaults.ini
@@ -1085,6 +1085,27 @@ extracategories:Stargate: SG-1
## this should go in your personal.ini, not defaults.ini.
#is_adult:true
+[trekiverse.org]
+## Site dedicated to these categories/characters/ships
+extracategories:Star Trek
+
+## Some sites require login (or login for some rated stories) The
+## program can prompt you, or you can save it in config. In
+## commandline version, this should go in your personal.ini, not
+## defaults.ini.
+#username:YourName
+#password:yourpassword
+
+## Some sites also require the user to confirm they are adult for
+## adult content. In commandline version, this should go in your
+## personal.ini, not defaults.ini.
+#is_adult:true
+
+extra_valid_entries:awards
+awards_label:Awards
+
+cover_exclusion_regexp:art/.*Awards.jpg
+
[www.adastrafanfic.com]
## Site dedicated to these categories/characters/ships
extracategories:Star Trek
diff --git a/fanficdownloader/adapters/__init__.py b/fanficdownloader/adapters/__init__.py
index 67b20a19..2096cfd6 100644
--- a/fanficdownloader/adapters/__init__.py
+++ b/fanficdownloader/adapters/__init__.py
@@ -121,6 +121,7 @@ import adapter_simplyundeniablecom
import adapter_scarheadnet
import adapter_fictionpadcom
import adapter_storiesonlinenet
+import adapter_trekiverseorg
## This bit of complexity allows adapters to be added by just adding
## importing. It eliminates the long if/else clauses we used to need
diff --git a/fanficdownloader/adapters/adapter_trekiverseorg.py b/fanficdownloader/adapters/adapter_trekiverseorg.py
new file mode 100644
index 00000000..58852aa1
--- /dev/null
+++ b/fanficdownloader/adapters/adapter_trekiverseorg.py
@@ -0,0 +1,321 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Fanficdownloader team
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import time
+import logging
+logger = logging.getLogger(__name__)
+import re
+import urllib2
+
+from .. import BeautifulSoup as bs
+from ..htmlcleanup import stripHTML
+from .. import exceptions as exceptions
+
+from base_adapter import BaseSiteAdapter, makeDate
+
+def getClass():
+ return TrekiverseOrgAdapter
+
+# Class name has to be unique. Our convention is camel case the
+# sitename with Adapter at the end. www is skipped.
+class TrekiverseOrgAdapter(BaseSiteAdapter):
+
+ def __init__(self, config, url):
+ BaseSiteAdapter.__init__(self, config, url)
+
+ self.decode = ["iso-8859-1",
+ "Windows-1252"] # 1252 is a superset of iso-8859-1.
+ # Most sites that claim to be
+ # iso-8859-1 (and some that claim to be
+ # utf8) are really windows-1252.
+ self.username = "NoneGiven" # if left empty, site doesn't return any message at all.
+ self.password = ""
+ self.is_adult=False
+
+ # get storyId from url--url validation guarantees query is only sid=1234
+ self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1])
+
+ # normalized story URL.
+ self._setURL("http://"+self.getSiteDomain()\
+ +"/efiction/viewstory.php?sid="+self.story.getMetadata('storyId'))
+
+ # Each adapter needs to have a unique site abbreviation.
+ self.story.setMetadata('siteabbrev','trkvs')
+
+ # The date format will vary from site to site.
+ # http://docs.python.org/library/datetime.html#strftime-strptime-behavior
+ self.dateformat = "%d/%m/%Y"
+
+ @staticmethod # must be @staticmethod, don't remove it.
+ def getSiteDomain():
+ # The site domain.
+ return 'trekiverse.org'
+
+ @classmethod
+ def getSiteExampleURLs(self):
+ return "http://"+self.getSiteDomain()+"/efiction/viewstory.php?sid=1234"
+
+ def getSiteURLPattern(self):
+ return r'http://trekiverse\.org/efiction/viewstory\.php\?sid=\d+'
+
+ ## Login seems to be reasonably standard across eFiction sites.
+ def needToLoginCheck(self, data):
+ if 'Registered Users Only' in data \
+ or 'There is no such account on our website' in data \
+ or "That password doesn't match the one in our database" in data:
+ return True
+ else:
+ return False
+
+ def performLogin(self, url):
+ params = {}
+
+ if self.password:
+ params['penname'] = self.username
+ params['password'] = self.password
+ else:
+ params['penname'] = self.getConfig("username")
+ params['password'] = self.getConfig("password")
+ params['cookiecheck'] = '1'
+ params['submit'] = 'Submit'
+
+ loginUrl = 'http://' + self.getSiteDomain() + '/efiction/user.php?action=login'
+ logger.debug("Will now login to URL (%s) as (%s)" % (loginUrl,
+ params['penname']))
+
+ d = self._fetchUrl(loginUrl, params)
+
+ if "Member Account" not in d : #Member Account
+ logger.info("Failed to login to URL %s as %s" % (loginUrl,
+ params['penname']))
+ raise exceptions.FailedToLogin(url,params['penname'])
+ return False
+ else:
+ return True
+
+
+ ## Getting the chapter list and the meta data, plus 'is adult' checking.
+ def extractChapterUrlsAndMetadata(self):
+
+ if self.is_adult or self.getConfig("is_adult"):
+ # Weirdly, different sites use different warning numbers.
+ # If the title search below fails, there's a good chance
+ # you need a different number. print data at that point
+ # and see what the 'click here to continue' url says.
+ addurl = "&index=1&ageconsent=ok&warning=5"
+ else:
+ addurl="&index=1"
+
+ # index=1 makes sure we see the story chapter index. Some
+ # sites skip that for one-chapter stories.
+ url = self.url+addurl
+ logger.debug("URL: "+url)
+
+ try:
+ data = self._fetchUrl(url)
+ except urllib2.HTTPError, e:
+ if e.code == 404:
+ raise exceptions.StoryDoesNotExist(self.url)
+ else:
+ raise e
+
+ if self.needToLoginCheck(data):
+ # need to log in for this one.
+ self.performLogin(url)
+ data = self._fetchUrl(url)
+
+ m = re.search(r"'viewstory.php\?sid=\d+((?:&ageconsent=ok)?&warning=\d+)'",data)
+ if m != None:
+ if self.is_adult or self.getConfig("is_adult"):
+ # We tried the default and still got a warning, so
+ # let's pull the warning number from the 'continue'
+ # link and reload data.
+ addurl = m.group(1)
+ # correct stupid & error in url.
+ addurl = addurl.replace("&","&")
+ url = self.url+'&index=1'+addurl
+ logger.debug("URL 2nd try: "+url)
+
+ try:
+ data = self._fetchUrl(url)
+ except urllib2.HTTPError, e:
+ if e.code == 404:
+ raise exceptions.StoryDoesNotExist(self.url)
+ else:
+ raise e
+ else:
+ raise exceptions.AdultCheckRequired(self.url)
+
+ if "Access denied. This story has not been validated by the adminstrators of this site." in data:
+ raise exceptions.FailedToDownload(self.getSiteDomain() +" says: Access denied. This story has not been validated by the adminstrators of this site.")
+
+ # use BeautifulSoup HTML parser to make everything easier to find.
+ soup = bs.BeautifulSoup(data)
+
+ # Now go hunting for all the meta data and the chapter list.
+
+ ## Title and author
+ a = soup.find('div', {'id' : 'pagetitle'})
+ aut = a.find('a', href=re.compile(r"^viewuser\.php\?uid="))
+ self.story.setMetadata('authorId',aut['href'].split('=')[1])
+ self.story.setMetadata('authorUrl','http://'+self.host+'/efiction/'+aut['href'])
+ self.story.setMetadata('author',aut.string)
+
+ ttl = a.find('a', href=re.compile(r'^viewstory.php\?sid=%s$'%self.story.getMetadata('storyId')))
+ self.story.setMetadata('title',ttl.string)
+
+ # Find the chapters:
+ outputdiv = soup.find('div', {'id':'output'})
+ # (amp;)? because it should be &, but is escaped to & in URL.
+ # viewstory.php?sid=35&chapter=3
+ chapters=outputdiv.findAll('a', href=re.compile(r'^viewstory.php\?sid=%s&(amp;)?chapter=\d+$'%self.story.getMetadata('storyId')))
+ if len(chapters)==0:
+ raise exceptions.FailedToDownload(self.getSiteDomain() +" says: No php/html chapters found.")
+ if len(chapters)==1:
+ self.chapterUrls.append((self.story.getMetadata('title'),'http://'+self.host+'/efiction/'+chapters[0]['href']))
+ else:
+ for chapter in chapters:
+ # just in case there's tags, like in chapter titles.
+ self.chapterUrls.append((stripHTML(chapter),'http://'+self.host+'/efiction/'+chapter['href']))
+
+ self.story.setMetadata('numChapters',len(self.chapterUrls))
+
+ # eFiction sites don't help us out a lot with their meta data
+ # formating, so it's a little ugly.
+
+ def defaultGetattr(d,k):
+ try:
+ return d[k]
+ except:
+ return ""
+
+ # Rated: NC-17
etc
+ labels = soup.findAll('span',{'class':'label'})
+ for labelspan in labels:
+ value = labelspan.nextSibling
+ label = labelspan.string
+
+ if 'Summary' in label:
+ ## Everything until the next span class='label'
+ svalue = ''
+ while value and not defaultGetattr(value,'class') == 'label':
+ svalue += str(value)
+ value = value.nextSibling
+ # sometimes poorly formated desc (
w/o
) leads + # to all labels being included. + svalue=svalue[:svalue.find('')] + self.setDescription(url,svalue) + #self.story.setMetadata('description',stripHTML(svalue)) + + if 'Rated' in label: + self.story.setMetadata('rating', value) + + if 'Word count' in label: + self.story.setMetadata('numWords', value) + + if 'Categories' in label: + cats = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=categories')) + catstext = [cat.string for cat in cats] + for cat in catstext: + self.story.addToList('category',cat.string) + + if 'Characters' in label: + chars = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=characters')) + charstext = [char.string for char in chars] + for char in charstext: + self.story.addToList('characters',char.string) + + if 'Genre' in label: + genres = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=9')) + genrestext = [genre.string for genre in genres] + self.genre = ', '.join(genrestext) + for genre in genrestext: + self.story.addToList('genre',genre.string) + + if 'Awards' in label: + awards = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=12')) + awardstext = [award.string for award in awards] + self.award = ', '.join(awardstext) + for award in awardstext: + self.story.addToList('awards',award.string) + + if 'Pairing' in label: + ships = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=10')) + shipstext = [ship.string for ship in ships] + self.ship = ', '.join(shipstext) + for ship in shipstext: + self.story.addToList('ships',ship.string) + + if 'Warnings' in label: + warnings = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=11')) + warningstext = [warning.string for warning in warnings] + self.warning = ', '.join(warningstext) + for warning in warningstext: + self.story.addToList('warnings',warning.string) + + if 'Completed' in label: + if 'Yes' in value: + self.story.setMetadata('status', 'Completed') + else: + self.story.setMetadata('status', 'In-Progress') + + if 'Published' in label: + self.story.setMetadata('datePublished', makeDate(value.strip(), "%d %b %Y")) + + if 'Updated' in label: + self.story.setMetadata('dateUpdated', makeDate(value.strip(), "%d %b %Y")) + + try: + # Find Series name from series URL. + a = soup.find('a', href=re.compile(r"viewseries.php\?seriesid=\d+")) + series_name = a.string + series_url = 'http://'+self.host+'/efiction/'+a['href'] + + # use BeautifulSoup HTML parser to make everything easier to find. + seriessoup = bs.BeautifulSoup(self._fetchUrl(series_url)) + storyas = seriessoup.findAll('a', href=re.compile(r'^viewstory.php\?sid=\d+$')) + i=1 + for a in storyas: + if a['href'] == ('viewstory.php?sid='+self.story.getMetadata('storyId')): + self.setSeries(series_name, i) + self.story.setMetadata('seriesUrl',series_url) + break + i+=1 + + except: + # I find it hard to care if the series parsing fails + pass + + # grab the text for an individual chapter. + def getChapterText(self, url): + + logger.debug('Getting chapter text from: %s' % url) + + soup = bs.BeautifulSoup(self._fetchUrl(url)) # otherwise soup eats the br/hr tags. + + div = soup.find('div', {'id' : 'story'}) + + if None == div: + raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url) + + notesdiv = soup.find('div', {'class':'noteinfo'}) + if notesdiv != None: + div.insert(0,"