summaryrefslogtreecommitdiff
path: root/dl.py
diff options
context:
space:
mode:
authorEvgeny Zinoviev <me@ch1p.io>2024-06-16 00:04:44 +0300
committerEvgeny Zinoviev <me@ch1p.io>2024-06-16 00:31:39 +0300
commit5fd7512f903522a47c416ebcda3b6acc6b080e49 (patch)
treed758fb0d5432e09edb44a1e39b92fd6724e7c4d4 /dl.py
initialHEADmaster
Diffstat (limited to 'dl.py')
-rwxr-xr-xdl.py59
1 files changed, 59 insertions, 0 deletions
diff --git a/dl.py b/dl.py
new file mode 100755
index 0000000..0741d77
--- /dev/null
+++ b/dl.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+import os
+import requests
+
+from fb import Database
+from fb.util import get_dl_url, get_long_book_id
+from argparse import ArgumentParser
+
+
+def download_file(url, dst, cookie, user_agent):
+ streamed_response = requests.get(url, stream=True,
+ headers={
+ 'User-Agent': user_agent,
+ 'Cookie': cookie
+ })
+ with open(dst, 'wb') as file:
+ for chunk in streamed_response.iter_content(chunk_size=1024):
+ if chunk: # filter out keep-alive new chunks
+ file.write(chunk)
+
+
+def remove_from_my_books(bid, cookie, user_agent):
+ r = requests.post('https://www.forgottenbooks.com/books-remove', data={
+ 'p': get_long_book_id(bid),
+ }, headers={
+ 'User-Agent': user_agent,
+ 'Cookie': cookie
+ })
+ r.raise_for_status()
+
+
+if __name__ == '__main__':
+ parser = ArgumentParser()
+ parser.add_argument('--from-id', type=int, required=True)
+ parser.add_argument('--to-id', type=int, required=True)
+ parser.add_argument('--output-directory', type=str, required=True)
+ parser.add_argument('--cookie', type=str, required=True)
+ parser.add_argument('--user-agent', type=str,
+ default='Mozilla/5.0 (X11; Linux x86_64; rv:126.0) Gecko/20100101 Firefox/126.0')
+ args = parser.parse_args()
+
+ if not os.path.exists(args.output_directory):
+ os.makedirs(args.output_directory)
+
+ db = Database()
+ ids = db.get_ids(args.from_id, args.to_id)
+ for i in ids:
+ url = get_dl_url(i)
+ skip = False
+ dst = os.path.join(args.output_directory, f'{i}.pdf')
+ if not os.path.exists(dst):
+ download_file(url, dst, args.cookie, args.user_agent)
+ else:
+ print(f'{i} already downloaded, removing from my books')
+ skip = True
+
+ remove_from_my_books(i, args.cookie, args.user_agent)
+ if not skip:
+ print(f"saved {i}")