import requests from bs4 import BeautifulSoup from lxml import etree import os import csv import sys if len(sys.argv) > 1: IMG_N = int(sys.argv[1]) else: IMG_N = 50 def get_page_xpath_result(url, xpath_str): HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'} page = requests.get(url, headers=HEADERS) soup = BeautifulSoup(page.content, "html.parser") dom = etree.HTML(str(soup)) return dom.xpath(xpath_str) def createDirectory(path): isExist = os.path.exists(path) if not isExist: os.makedirs(path) print(f"The {path} is created!") n = IMG_N // 50 for i in range(n): ROOT_URL = f'https://myanimelist.net/character.php?limit={(i)*50}' print(ROOT_URL) character_links = get_page_xpath_result(ROOT_URL, '//div[@class="information di-ib mt24"]/a/@href') character_names = get_page_xpath_result(ROOT_URL, '//div[@class="information di-ib mt24"]/a') character_names = [link.text for link in character_names] character_img_urls = [get_page_xpath_result(link, '//td/div/a/img/@data-src')[0] for link in character_links] createDirectory('./data') createDirectory('./data/images') csv_header = ['name', 'file', 'link'] data_rows = [] for name, url, link in zip(character_names, character_img_urls, character_links): img_data = requests.get(url).content img_path = f'./data/images/{name}.jpg' img_path_exist = os.path.exists(img_path) if not img_path_exist: with open(img_path, 'wb') as handler: handler.write(img_data) handler.close() print(f'{name}.jpg downloaded') data_row = [name, f'{name}.jpg', link] data_rows.append(data_row) csv_path = './data/anime-faces.csv' if len(data_rows) > 0: csv_file_exist = os.path.exists(csv_path) with open(csv_path, 'a', encoding='UTF8') as f: writer = csv.writer(f) if not csv_file_exist: writer.writerow(csv_header) writer.writerows(data_rows) print("finish!")