diff --git a/calibre-plugin/plugin-defaults.ini b/calibre-plugin/plugin-defaults.ini index 6b66eb9d..7f436d57 100644 --- a/calibre-plugin/plugin-defaults.ini +++ b/calibre-plugin/plugin-defaults.ini @@ -1820,13 +1820,6 @@ reader_posts_per_page:30 #username:YourName #password:yourpassword -[gravitytales.com] -## Extra metadata that this adapter knows about. See [archiveofourown.org] -## for examples of how to use them. -extra_valid_entries:translator -translator_label: Translator -extra_titlepage_entries: translator - [harrypotterfanfiction.com] extra_valid_entries:reviews,era diff --git a/fanficfare/adapters/__init__.py b/fanficfare/adapters/__init__.py index c5c110d4..a5aceda5 100644 --- a/fanficfare/adapters/__init__.py +++ b/fanficfare/adapters/__init__.py @@ -152,7 +152,6 @@ from . import adapter_tasteofpoisoninkubationnet from . import adapter_thedelphicexpansecom from . import adapter_thundercatsfansorg from . import adapter_wwwaneroticstorycom -from . import adapter_gravitytalescom from . import adapter_lcfanficcom from . import adapter_noveltrovecom from . import adapter_inkbunnynet diff --git a/fanficfare/adapters/adapter_gravitytalescom.py b/fanficfare/adapters/adapter_gravitytalescom.py deleted file mode 100644 index d55019de..00000000 --- a/fanficfare/adapters/adapter_gravitytalescom.py +++ /dev/null @@ -1,205 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2014 Fanficdownloader team, 2018 FanFicFare team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -#################################################################################################### -## Adapted by GComyn on April 21, 2017 -#################################################################################################### - -from __future__ import absolute_import -import logging -import re -import time -from datetime import datetime - -logger = logging.getLogger(__name__) - -try: - # If feedparser ever becomes an included dependency for FanFicFare - import feedparser -except ImportError: - try: - # A version of feedparser is available in the Calibre plugin version - from calibre.web.feeds import feedparser - except ImportError: - # logger.warning('No version of feedparser module available, falling back to naive published and updated date') - feedparser = None - -# py2 vs py3 transition -from ..six import text_type as unicode -from ..six.moves.urllib.error import HTTPError - -from .base_adapter import BaseSiteAdapter - -from .. import exceptions as exceptions -from ..htmlcleanup import stripHTML - - -#################################################################################################### -def getClass(): - return GravityTalesComSiteAdapter - - -#################################################################################################### -class GravityTalesComSiteAdapter(BaseSiteAdapter): - - def __init__(self, config, url): - BaseSiteAdapter.__init__(self, config, url) - - self.username = "NoneGiven" # if left empty, site doesn't return any message at all. - self.password = "" - self.is_adult = False - - # get storyId from url - # http://gravitytales.com/novel/a-dragons-curiosity - self.story.setMetadata('storyId',self.parsedUrl.path.split('/')[2]) - - # normalized story URL. - self._setURL("http://"+self.getSiteDomain()\ - +"/novel/"+self.story.getMetadata('storyId')) - - # Each adapter needs to have a unique site abbreviation. - self.story.setMetadata('siteabbrev','gtcom') - - # The date format will vary from site to site. - # http://docs.python.org/library/datetime.html#strftime-strptime-behavior - ## There are no dates listed on this site, so am commenting this out - #self.dateformat = "%Y-%b-%d" - -#################################################################################################### - @staticmethod # must be @staticmethod, don't remove it. - def getSiteDomain(): - # The site domain. Does have www here, if it uses it. - return 'gravitytales.com' - -#################################################################################################### - @classmethod - def getSiteExampleURLs(cls): - return "http://"+cls.getSiteDomain()+"/novel/a-story-name" - -#################################################################################################### - def getSiteURLPattern(self): - return r"http://"+re.escape(self.getSiteDomain())+r"/(novel|post)/*(?P[^/]+)" - -#################################################################################################### - ## Getting the chapter list and the meta data, plus 'is adult' checking. - def doExtractChapterUrlsAndMetadata(self, get_cover=True): - - url = self.url - - try: - data = self._fetchUrl(url) - except HTTPError as e: - if e.code == 404: - raise exceptions.StoryDoesNotExist('Error 404: {0}'.format(self.url)) - else: - raise e - - # use BeautifulSoup HTML parser to make everything easier to find. - soup = self.make_soup(data) - - # Now go hunting for all the meta data and the chapter list. - - ## This is the block that holds the metadata - bookdetails = soup.find('div', {'class':'main-content'}) - - ## Title - title = bookdetails.h3 - for tag in title.find_all('span'): - tag.extract() - self.story.setMetadata('title',stripHTML(title)) - - author = stripHTML(bookdetails.h4) - self.story.setMetadata('author', author) - self.story.setMetadata('authorId', author) - self.story.setMetadata('authorUrl', url) - - # Find authorid and URL from... author url. - bookdesc = bookdetails.find('div', {'class':'desc'}) - addtosys = False - paras = bookdesc.find_all() - synopsis = '' - for para in paras: - parat = stripHTML(para) - ## I had a section of code that took the author name from the list, and added it to - ## the author name from the

... and a section that took the title from the list, - ## and added it to the title from the

... - ## but decided to remove them and let it be added to the synopsis. - if parat[:7] == 'Genres:' and unicode(para)[:2] == '
' in synopsis: - synopsis = synopsis.replace('

', '
') - - self.setDescription(url, unicode(synopsis)) - - ## this is constantly being forbidden, so I'm commenting it out for now. -# if get_cover: -# cover_meta = soup.find('div', {'id':'coverImg'}) -# cover_url = cover_meta['style'].replace('background-image: url(', '').replace(');', '') -# self.setCoverImage(url, cover_url) - - ## Getting the ChapterUrls - ## fetch from separate chapters url. - chap_url = self.story.getMetadata('storyUrl')+"/chapters" - chap_soup = self.make_soup(self._fetchUrl(chap_url)) - found_chaps = {} - for alink in chap_soup.find_all('a',href=re.compile(self.getSiteDomain())): # ignore anchor links - ## Some stories have that same chapters in different sections - if alink['href'] not in found_chaps: - self.add_chapter(alink,alink['href']) - found_chaps[alink['href']] = alink['href'] - - if feedparser: - # Parse published and updated date from latest RSS feed entry. The RSS feed urls seems to appear due to - # some JavaScript on the page, so get the URL by mangling the URL (this is not very robust, but probably - # good enough) - rss_feed_url = url.replace('/novel/', '/feed/') - feed = feedparser.parse(rss_feed_url) - date_updated = datetime.fromtimestamp( - time.mktime(feed.entries[0].published_parsed)) if feed.entries else datetime.now() - else: - # Fall back to the previous method of generating the published and update date... - date_updated = datetime.now() - - # Since the original published date isn't available, we'll simply use the updated date - self.story.setMetadata('datePublished', date_updated) - self.story.setMetadata('dateUpdated', date_updated) - - # grab the text for an individual chapter. - def getChapterText(self, url): - logger.debug('Getting chapter text from: %s' % url) - - data = self._fetchUrl(url) - html = self.make_soup(data) - - story = html.find('div', {'id':'chapterContent'}) - - if story == None: - raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url) - - return self.utf8FromSoup(url,story) diff --git a/fanficfare/defaults.ini b/fanficfare/defaults.ini index bdfea26d..83f2294c 100644 --- a/fanficfare/defaults.ini +++ b/fanficfare/defaults.ini @@ -1853,13 +1853,6 @@ reader_posts_per_page:30 #username:YourName #password:yourpassword -[gravitytales.com] -## Extra metadata that this adapter knows about. See [archiveofourown.org] -## for examples of how to use them. -extra_valid_entries:translator -translator_label: Translator -extra_titlepage_entries: translator - [harrypotterfanfiction.com] extra_valid_entries:reviews,era