wikisource-crawler/crawler.py
2023-03-12 15:57:35 +00:00

85 lines
3.6 KiB
Python

import pandas as pd
import requests
from bs4 import BeautifulSoup
import re
from tqdm import tqdm
import time
import argparse
MAIN_URL = "https://pl.wikisource.org/"
def get_page_data(page_element):
time.sleep(0.5)
doc = requests.get(MAIN_URL + page_element['href'])
doc_soup = BeautifulSoup(doc.text, 'lxml')
text = doc_soup.find("div", {"class": "pagetext"}).next_element
image_url = doc_soup.find("div", {"class": "prp-page-image"}).find("img")['src']
return {"title": page_element['title'], "href": MAIN_URL + page_element['href'], "image_url": image_url, "text": text.text}
def save_data(file_name, data, args):
if not args.testing:
df = pd.DataFrame(data)
df.to_csv(f"./{file_name}.tsv", sep="\t")
def main(args):
category_dict = {"green": "Uwierzytelniona", "yellow": "Skorygowana", "red": "Przepisana"}
if args.start_file_name and args.start_page_number:
CATEGORY_URL = f"{MAIN_URL}/w/index.php?title=Kategoria:{category_dict[args.type]}&pagefrom={args.start_file_name}"
else:
CATEGORY_URL = f"{MAIN_URL}/wiki/Kategoria:{category_dict[args.type]}"
r = requests.get(CATEGORY_URL)
soup = BeautifulSoup(r.text, 'lxml')
page_number = 1 if not args.start_page_number else args.start_page_number
data_number = 0 if not args.start_page_number else args.start_page_number * 200
result_list = []
max_len = int("".join(re.findall("\d", re.sub("\xa0",'', soup.find("div", {"id": "mw-pages"}).find("p").text))[3:]))
try:
with tqdm(total=max_len) as pbar:
if args.start_page_number:
pbar.update(data_number)
pbar.refresh()
while data_number < max_len:
pbar.set_description(f"Page number: {page_number}")
time.sleep(5)
next_page = soup.find("a", {"href": re.compile(r"\/w\/index.php.*")}, string="następna strona").get('href', None)
if next_page and page_number != 1:
r = requests.get(MAIN_URL + next_page)
elif not next_page:
break
if r.status_code != 200:
print(r.__dict__)
time.sleep(60)
r = requests.get(MAIN_URL + next_page)
if r.status_code != 200:
break
soup = BeautifulSoup(r.text, 'lxml')
links = soup.find_all("a", {"href": re.compile(r"\/wiki\/Strona:.*")})
page_number += 1
for link in links:
result_list.append(get_page_data(link))
data_number += 1
pbar.update(1)
except Exception as e:
print("Error:", e)
save_data(f"./{args.output_file_name}-{args.type}", result_list, args)
except KeyboardInterrupt:
save_data(f"./{args.output_file_name}-{args.type}", result_list, args)
save_data(f"./{args.output_file_name}-{args.type}", result_list, args)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--wiki_type", type=str, default='green', choices=["green", "yellow", "red"], required=True)
parser.add_argument("--output_file_name", type=str, required=True)
parser.add_argument("--start_file_name", type=str, required=False)
parser.add_argument("--start_page_number", type=int, required=False)
parser.add_argument("--testing", type=bool, required=False, default=False)
args, left_argv = parser.parse_known_args()
print(type(vars(args)))
# main(args)