Fix rebase errors.

This commit is contained in:
Jim Miller 2021-01-20 19:39:12 -06:00
parent ea4cf245ac
commit 9572c25c0b
5 changed files with 7 additions and 33 deletions

View file

@ -170,6 +170,7 @@ class FanFictionNetSiteAdapter(BaseSiteAdapter):
if "Please check to see you are not using an outdated url." in data:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! 'Chapter not found. Please check to see you are not using an outdated url.'" % url)
# <link rel="canonical" href="//www.fanfiction.net/s/13551154/100/Haze-Gray">
canonicalurl = soup.select_one('link[rel=canonical]')['href']
self.set_story_idurl(canonicalurl)
@ -185,9 +186,10 @@ class FanFictionNetSiteAdapter(BaseSiteAdapter):
# get chapter part of url.
except:
chapcount = 1
tryurl = "https://%s/s/%s/%d/"%(self.getSiteDomain(),
self.story.getMetadata('storyId'),
chapcount+1)
tryurl = "https://%s/s/%s/%d/%s"%(self.getSiteDomain(),
self.story.getMetadata('storyId'),
chapcount+1,
self.urltitle)
logger.debug('=Trying newer chapter: %s' % tryurl)
newdata = self._fetchUrl(tryurl)
if "not found. Please check to see you are not using an outdated url." not in newdata \

View file

@ -55,10 +55,7 @@ class CacheBlock():
# Read Magic Number
magic = struct.unpack('I', header.read(4))[0]
<<<<<<< HEAD
# print("magic number:%s"%hex(magic))
=======
>>>>>>> e94a36a7 (Rebasing onto master)
if magic == CacheBlock.BLOCK_MAGIC:
self.type = CacheBlock.BLOCK
header.seek(2, 1)

View file

@ -113,11 +113,7 @@ class CacheData():
else:
block = open(os.path.join(self.address.path,self.address.fileSelector), 'rb')
block.seek(8192 + self.address.blockNumber*self.address.entrySize)
<<<<<<< HEAD
data = block.read(self.size)
=======
data = block.read(self.size).decode('utf-8')
>>>>>>> e94a36a7 (Rebasing onto master)
data = block.read(self.size)#.decode('utf-8',errors='ignore')
block.close()
return data

View file

@ -118,7 +118,7 @@ class CacheEntry():
if self.keyAddress == 0:
return self.key
else:
return self.key.data()
return self.key.data().decode('utf-8')
def __str__(self):
string = "Hash: 0x%08x" % self.hash + '\n'

View file

@ -39,7 +39,6 @@ import os
import struct
import sys
import re
<<<<<<< HEAD
import time
def do_cprofile(func):
@ -67,10 +66,6 @@ except:
def brotli_decompress(inbuf):
# wants the output, too, but returns it
return brotlidec(inbuf,[])
import time
=======
import brotli
>>>>>>> e94a36a7 (Rebasing onto master)
from . import csvOutput
from . import SuperFastHash
@ -143,24 +138,16 @@ class ChromeCache(object):
def __init__(self,path):
self.cache = parse(path)
self.hash_cache = {}
<<<<<<< HEAD
# t = time.time()
for entry in self.cache:
key = entry.keyToStr()
if 'fanfiction.net' not in key:
continue
=======
for entry in self.cache:
key = entry.keyToStr()
>>>>>>> e94a36a7 (Rebasing onto master)
self.hash_cache[key] = entry
normkey = re.sub(r'^(https://www.fanfiction.net/s/\d+/\d+/).+$',r'\1',key)
## either overwrites (no harm), or adds new.
self.hash_cache[normkey] = entry
<<<<<<< HEAD
# print("======:%s"%(time.time()-t))
=======
>>>>>>> e94a36a7 (Rebasing onto master)
def get_cached_file(self,url):
if url in self.hash_cache:
@ -176,11 +163,7 @@ class ChromeCache(object):
if entry.httpHeader.headers[b'content-encoding'] == b"gzip":
data = gzip.decompress(data)
elif entry.httpHeader.headers[b'content-encoding'] == b"br":
<<<<<<< HEAD
data = brotli_decompress(data)
=======
data = brotli.decompress(data)
>>>>>>> e94a36a7 (Rebasing onto master)
return data
return None
@ -259,10 +242,6 @@ def exportToHTML(cache, outpath):
# print("unbrotli'ed:%s"%name)
except IOError:
page.write("Something wrong happened while unzipping")
<<<<<<< HEAD
=======
brotli
>>>>>>> e94a36a7 (Rebasing onto master)
else:
page.write('<a href="%s">%s</a>'%(name ,
entry.keyToStr().split('/')[-1]))