1
0
Fork 0
mirror of https://github.com/kemayo/leech synced 2025-12-06 00:15:22 +01:00

Add a bit more messaging around logging in to sites

This commit is contained in:
David Lynch 2025-06-09 20:06:54 -05:00
parent 5bfd1b40a0
commit 6fddf628fb
2 changed files with 20 additions and 16 deletions

View file

@ -108,6 +108,7 @@ def open_story(site, url, session, login, options):
)
if login:
logger.info("Attempting to log in as %s", login[0])
handler.login(login)
try:

View file

@ -21,22 +21,25 @@ class ArchiveOfOurOwn(Site):
def login(self, login_details):
with requests_cache.disabled():
# Can't just pass this url to _soup because I need the cookies later
login = self.session.get('https://archiveofourown.org/users/login')
soup, nobase = self._soup(login.text)
post, action, method = self._form_data(soup.find(id='new_user'))
post['user[login]'] = login_details[0]
post['user[password]'] = login_details[1]
# I feel the session *should* handle this cookies bit for me. But
# it doesn't. And I don't know why.
result = self.session.post(
self._join_url(login.url, action),
data=post, cookies=login.cookies
)
if result.ok:
logger.info("Logged in as %s", login_details[0])
else:
logger.error("Failed to log in as %s", login_details[0])
try:
# Can't just pass this url to _soup because I need the cookies later
login = self.session.get('https://archiveofourown.org/users/login')
soup, nobase = self._soup(login.text)
post, action, method = self._form_data(soup.find(id='new_user'))
post['user[login]'] = login_details[0]
post['user[password]'] = login_details[1]
# I feel the session *should* handle this cookies bit for me. But
# it doesn't. And I don't know why.
result = self.session.post(
self._join_url(login.url, action),
data=post, cookies=login.cookies
)
if result.ok:
logger.info("Logged in as %s", login_details[0])
else:
logger.error("Failed to log in as %s", login_details[0])
except Exception as e:
logger.error("Failed to log in as %s, but so hard that it threw an exception %s", login_details[0], e)
def extract(self, url):
workid = re.match(r'^https?://(?:www\.)?archiveofourown\.org/works/(\d+)/?.*', url).group(1)