36 lines
1.2 KiB
Python
36 lines
1.2 KiB
Python
import requests
|
|
from bs4 import BeautifulSoup
|
|
from lxml import etree
|
|
import os
|
|
|
|
ROOT_URL = "https://myanimelist.net/character.php"
|
|
|
|
def get_page_xpath_result(url, xpath_str):
|
|
HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
|
|
page = requests.get(url, headers=HEADERS)
|
|
soup = BeautifulSoup(page.content, "html.parser")
|
|
dom = etree.HTML(str(soup))
|
|
return dom.xpath(xpath_str)
|
|
|
|
character_links = get_page_xpath_result(ROOT_URL, '//div[@class="information di-ib mt24"]/a/@href')
|
|
character_names = get_page_xpath_result(ROOT_URL, '//div[@class="information di-ib mt24"]/a')
|
|
character_names = [link.text for link in character_names]
|
|
|
|
character_img_urls = [get_page_xpath_result(link, '//td/div/a/img/@data-src')[0]
|
|
for link in character_links]
|
|
|
|
path = './images'
|
|
isExist = os.path.exists(path)
|
|
if not isExist:
|
|
os.makedirs(path)
|
|
print("The images directory is created!")
|
|
|
|
for name, url in zip(character_names, character_img_urls):
|
|
img_data = requests.get(url).content
|
|
with open(f'images/{name}.jpg', 'wb') as handler:
|
|
handler.write(img_data)
|
|
handler.close()
|
|
print(f'{name}.jpg downloaded')
|
|
|
|
|