wikisource-crawler/crawler.py

68 lines
2.7 KiB
Python
Raw Normal View History

2023-01-03 10:53:34 +01:00
import pandas as pd
import requests
from bs4 import BeautifulSoup
import re
from tqdm import tqdm
import time
2023-01-03 14:50:37 +01:00
import argparse
2023-01-03 10:53:34 +01:00
2023-01-03 16:11:50 +01:00
MAIN_URL = "https://pl.wikisource.org/"
def main(args):
category_dict = {"green": "Uwierzytelniona", "yellow": "Skorygowana", "red": "Przepisana"}
CATEGORY_URL = f"{MAIN_URL}/wiki/Kategoria:{category_dict[args.type]}"
2023-01-03 10:53:34 +01:00
def get_page_data(page_element):
2023-01-03 14:12:57 +01:00
time.sleep(0.5)
2023-01-03 10:53:34 +01:00
doc = requests.get(MAIN_URL + page_element['href'])
doc_soup = BeautifulSoup(doc.text, 'lxml', from_encoding="utf-8")
text_elem = doc_soup.find("div", {"class": "pagetext"}).next_element
2023-01-03 14:50:37 +01:00
text = text_elem.text
2023-01-03 10:53:34 +01:00
image_url = doc_soup.find("div", {"class": "prp-page-image"}).next_element['src']
return {"title": page_element['title'], "href": MAIN_URL + page_element['href'], "image_url": image_url, "text": text,}
2023-01-03 16:11:50 +01:00
r = requests.get(CATEGORY_URL)
2023-01-03 10:53:34 +01:00
soup = BeautifulSoup(r.text, 'lxml')
page_number = 1
2023-01-03 16:11:50 +01:00
data_number = 0
2023-01-03 10:53:34 +01:00
result_list = []
2023-01-03 16:11:50 +01:00
max_len = int("".join(re.findall("\d", re.sub("\xa0",'', soup.find("div", {"id": "mw-pages"}).find("p").text))[3:]))
2023-01-03 10:53:34 +01:00
try:
2023-01-03 16:11:50 +01:00
with tqdm(total=max_len) as pbar:
while data_number < max_len:
pbar.set_description(f"Page number: {page_number}")
time.sleep(5)
next_page = soup.find("a", {"href": re.compile(r"\/w\/index.php.*")}, string="następna strona").get('href', None)
if next_page and page_number != 1:
r = requests.get(MAIN_URL + next_page)
elif not next_page:
break
2023-01-03 10:53:34 +01:00
2023-01-03 16:11:50 +01:00
if r.status_code != 200:
print(r.__dict__)
time.sleep(30)
2023-01-03 16:11:50 +01:00
r = requests.get(MAIN_URL + next_page)
if r.status_code != 200:
break
soup = BeautifulSoup(r.text, 'lxml')
links = soup.find_all("a", {"href": re.compile(r"\/wiki\/Strona:.*")})
page_number += 1
for link in links:
result_list.append(get_page_data(link))
data_number += 1
pbar.update(1)
2023-01-03 10:53:34 +01:00
except Exception as e:
print(e)
df = pd.DataFrame(result_list)
2023-01-03 16:11:50 +01:00
df.to_csv(f"./{args.type}.tsv", sep="\t")
2023-01-03 10:53:34 +01:00
df = pd.DataFrame(result_list)
2023-01-03 16:11:50 +01:00
df.to_csv(f"./{args.type}.tsv", sep="\t")
2023-01-03 10:53:34 +01:00
if __name__ == "__main__":
2023-01-03 16:11:50 +01:00
parser = argparse.ArgumentParser()
parser.add_argument("--type", type=str, default='green', choices=["green", "yellow", "red"])
args, left_argv = parser.parse_known_args()
main(args)