summaryrefslogtreecommitdiff
path: root/main.py
blob: 3bb8163beb990b9f58ba2285690cc2c56419b5aa (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
#!/usr/bin/env python3
import os
import requests
import tempfile
import re

from argparse import ArgumentParser
from PIL import Image


def extract_uuid(url):
    uuid_pattern = re.compile(r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}')
    match = uuid_pattern.search(url)
    return match.group(0) if match else None


def fetch_manifest(uuid):
    url = f'https://iiif.bodleian.ox.ac.uk/iiif/manifest/{uuid}.json'
    try:
        response = requests.get(url)
        response.raise_for_status()
        return response.json()
    except requests.exceptions.RequestException as e:
        print(f"Error fetching JSON data: {e}")
        return None


def calculate_chunks(image_width, image_height, chunk_size=4000):
    chunks = []
    for y in range(0, image_height, chunk_size):
        for x in range(0, image_width, chunk_size):
            width = min(chunk_size, image_width - x)
            height = min(chunk_size, image_height - y)
            chunks.append((x, y, width, height))
    return chunks


def generate_url(uuid, x, y, width, height):
    return f'https://iiif.bodleian.ox.ac.uk/iiif/image/{uuid}/{x},{y},{width},{height}/{width},{height}/0/default.jpg'


def download_chunk(url, save_path):
    response = requests.get(url, stream=True)
    if response.status_code == 200:
        with open(save_path, 'wb') as f:
            for chunk in response.iter_content(1024):
                f.write(chunk)
    else:
        raise Exception(f"Failed to download chunk: {url}")


def merge_chunks(image_width, image_height, chunk_paths):
    full_image = Image.new('RGB', (image_width, image_height))
    for (x, y, width, height), path in chunk_paths:
        chunk = Image.open(path)
        full_image.paste(chunk, (x, y))
    return full_image


def download_and_merge_image(image_width, image_height, image_uuid, output_file):
    chunks = calculate_chunks(image_width, image_height)
    chunk_paths = []

    with tempfile.TemporaryDirectory() as temp_dir:
        for i, (x, y, width, height) in enumerate(chunks):
            url = generate_url(image_uuid, x, y, width, height)
            chunk_path = os.path.join(temp_dir, f"chunk_{i}.jpg")
            download_chunk(url, chunk_path)
            chunk_paths.append(((x, y, width, height), chunk_path))

        full_image = merge_chunks(image_width, image_height, chunk_paths)
        full_image.save(output_file, subsampling=0, quality=99)


if __name__ == '__main__':
    parser = ArgumentParser()
    parser.add_argument('--uuid', type=str, required=True)
    parser.add_argument('--output-dir', type=str, required=True)
    parser.add_argument('--skip-pages', type=int, default=0,
                        help='number of pages to skip')
    args = parser.parse_args()

    m = fetch_manifest(args.uuid)
    page = 0
    for c in m['sequences'][0]['canvases']:
        if args.skip_pages != 0 and page < args.skip_pages:
            page += 1
            continue
        download_and_merge_image(c['width'], c['height'], extract_uuid(c['@id']), os.path.join(args.output_dir, f'{page+1}.jpg'))
        print(f'{page} done')
        page += 1