1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
|
#!/usr/bin/env python3
import os
import requests
from fb import Database
from fb.util import get_dl_url, get_long_book_id
from argparse import ArgumentParser
def download_file(url, dst, cookie, user_agent):
streamed_response = requests.get(url, stream=True,
headers={
'User-Agent': user_agent,
'Cookie': cookie
})
with open(dst, 'wb') as file:
for chunk in streamed_response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
file.write(chunk)
def remove_from_my_books(bid, cookie, user_agent):
r = requests.post('https://www.forgottenbooks.com/books-remove', data={
'p': get_long_book_id(bid),
}, headers={
'User-Agent': user_agent,
'Cookie': cookie
})
r.raise_for_status()
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--from-id', type=int, required=True)
parser.add_argument('--to-id', type=int, required=True)
parser.add_argument('--output-directory', type=str, required=True)
parser.add_argument('--cookie', type=str, required=True)
parser.add_argument('--user-agent', type=str,
default='Mozilla/5.0 (X11; Linux x86_64; rv:126.0) Gecko/20100101 Firefox/126.0')
args = parser.parse_args()
if not os.path.exists(args.output_directory):
os.makedirs(args.output_directory)
db = Database()
ids = db.get_ids(args.from_id, args.to_id)
for i in ids:
url = get_dl_url(i)
skip = False
dst = os.path.join(args.output_directory, f'{i}.pdf')
if not os.path.exists(dst):
download_file(url, dst, args.cookie, args.user_agent)
else:
print(f'{i} already downloaded, removing from my books')
skip = True
remove_from_my_books(i, args.cookie, args.user_agent)
if not skip:
print(f"saved {i}")
|