scrap n images depends on argv param

This commit is contained in:
Mateusz 2023-01-15 14:17:33 +01:00
parent a27a7e612a
commit 1f4a2d9dfc

View File

@ -3,8 +3,12 @@ from bs4 import BeautifulSoup
from lxml import etree from lxml import etree
import os import os
import csv import csv
import sys
ROOT_URL = "https://myanimelist.net/character.php" if len(sys.argv) > 1:
IMG_N = int(sys.argv[1])
else:
IMG_N = 50
def get_page_xpath_result(url, xpath_str): def get_page_xpath_result(url, xpath_str):
HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'} HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
@ -19,20 +23,26 @@ def createDirectory(path):
os.makedirs(path) os.makedirs(path)
print(f"The {path} is created!") print(f"The {path} is created!")
character_links = get_page_xpath_result(ROOT_URL, '//div[@class="information di-ib mt24"]/a/@href') n = IMG_N // 50
character_names = get_page_xpath_result(ROOT_URL, '//div[@class="information di-ib mt24"]/a')
character_names = [link.text for link in character_names]
character_img_urls = [get_page_xpath_result(link, '//td/div/a/img/@data-src')[0] for i in range(n):
ROOT_URL = f'https://myanimelist.net/character.php?limit={(i)*50}'
print(ROOT_URL)
character_links = get_page_xpath_result(ROOT_URL, '//div[@class="information di-ib mt24"]/a/@href')
character_names = get_page_xpath_result(ROOT_URL, '//div[@class="information di-ib mt24"]/a')
character_names = [link.text for link in character_names]
character_img_urls = [get_page_xpath_result(link, '//td/div/a/img/@data-src')[0]
for link in character_links] for link in character_links]
createDirectory('./data') createDirectory('./data')
createDirectory('./data/images') createDirectory('./data/images')
csv_header = ['name', 'file', 'link'] csv_header = ['name', 'file', 'link']
data_rows = [] data_rows = []
for name, url, link in zip(character_names, character_img_urls, character_links): for name, url, link in zip(character_names, character_img_urls, character_links):
img_data = requests.get(url).content img_data = requests.get(url).content
img_path = f'./data/images/{name}.jpg' img_path = f'./data/images/{name}.jpg'
img_path_exist = os.path.exists(img_path) img_path_exist = os.path.exists(img_path)
@ -46,8 +56,8 @@ for name, url, link in zip(character_names, character_img_urls, character_links)
data_row = [name, f'{name}.jpg', link] data_row = [name, f'{name}.jpg', link]
data_rows.append(data_row) data_rows.append(data_row)
csv_path = './data/anime-faces.csv' csv_path = './data/anime-faces.csv'
if len(data_rows) > 0: if len(data_rows) > 0:
csv_file_exist = os.path.exists(csv_path) csv_file_exist = os.path.exists(csv_path)
with open(csv_path, 'a', encoding='UTF8') as f: with open(csv_path, 'a', encoding='UTF8') as f:
writer = csv.writer(f) writer = csv.writer(f)