aboutsummaryrefslogtreecommitdiff
path: root/mosgorsud/parser.py
blob: 01303544322ca72f637df7855846d855cf38a09b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import requests
import textract
import re
import os
import tempfile
import random
import string
import logging

from bs4 import BeautifulSoup
from typing import List, Dict

logger = logging.getLogger(__name__)

BASE_URL = "https://mos-gorsud.ru/mgs/defend"

headers = {
    'Referer': 'https://mos-gorsud.ru/',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; rv:78.0) Gecko/20100101 Firefox/78.0'
}

regex = r"""(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:\'\".,<>?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)\b/?(?!@)))"""


def strgen(n: int) -> str:
    return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(n)).lower()


def get_links(s: str) -> List[str]:
    return list(set(re.findall(regex, s)))


def get_full_url(url: str) -> str:
    if not url.startswith('http:') and not url.startswith('https:'):
        if not url.startswith('/'):
            url = '/' + url
        url = 'https://mos-gorsud.ru' + url
    return url


def get_document_text(url: str) -> str:
    print(f'downloading {url}')

    r = requests.get(url, allow_redirects=True, headers=headers)
    content_disposition = r.headers['Content-Disposition']
    filename, file_extension = os.path.splitext(re.search('attachment; filename="(.*?)"', content_disposition).group(1))

    tempname = '%s/%s%s' % (tempfile.gettempdir(), strgen(10), file_extension)

    with open(tempname, 'wb') as f:
        f.write(r.content)

    text = textract.process(tempname).decode('utf-8')
    os.unlink(tempname)

    return text


def get_cases(from_page: int, to_page: int) -> List[Dict]:
    cases = []

    for page in range(from_page, to_page+1):
        url = f'{BASE_URL}?page={page}'
        print(f'page {page} ({url})')

        r = requests.get(url, headers=headers)

        soup = BeautifulSoup(r.text, "html.parser")
        rows = soup.select('.searchResultContainer table.custom_table tbody tr')

        for row in rows:
            cols = row.find_all('td')

            try:
                date = cols[0].get_text().strip()
                statement_number = cols[1].get_text().strip()
                applicant = cols[3].get_text().strip()
                object = cols[4].get_text().strip()
                link = get_full_url(cols[5].find('a')['href'])

                decision_text = get_document_text(link)
                violation_links = '\n'.join(get_links(decision_text))

                cases.append(dict(
                    date=date,
                    statement_number=statement_number,
                    applicant=applicant,
                    object=object,
                    doc_link=link,
                    violation_links=violation_links,
                    decision_text=decision_text
                ))

            except (TypeError, KeyError) as e:
                logger.exception(e)

    return cases