This commit is contained in:
Marcin Kostrzewski 2023-01-15 12:40:38 +01:00
commit c94a3c916d
2 changed files with 20 additions and 10 deletions

4
.gitignore vendored
View File

@ -1,3 +1,5 @@
images
# Byte-compiled / optimized / DLL files # Byte-compiled / optimized / DLL files
__pycache__/ __pycache__/
*.py[cod] *.py[cod]
@ -157,4 +159,4 @@ cython_debug/
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear # and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder. # option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/ #.idea/

View File

@ -1,7 +1,7 @@
import requests import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from lxml import etree from lxml import etree
import re import os
ROOT_URL = "https://myanimelist.net/character.php" ROOT_URL = "https://myanimelist.net/character.php"
@ -12,16 +12,24 @@ def get_page_xpath_result(url, xpath_str):
dom = etree.HTML(str(soup)) dom = etree.HTML(str(soup))
return dom.xpath(xpath_str) return dom.xpath(xpath_str)
# 1. face image
# 2. character name
# 3. link
character_links = get_page_xpath_result(ROOT_URL, '//div[@class="information di-ib mt24"]/a/@href') character_links = get_page_xpath_result(ROOT_URL, '//div[@class="information di-ib mt24"]/a/@href')
character_names = get_page_xpath_result(ROOT_URL, '//div[@class="information di-ib mt24"]/a') character_names = get_page_xpath_result(ROOT_URL, '//div[@class="information di-ib mt24"]/a')
character_names = [link.text for link in character_names] character_names = [link.text for link in character_names]
print("character_links") character_img_urls = [get_page_xpath_result(link, '//td/div/a/img/@data-src')[0]
print(character_links) for link in character_links]
path = './images'
isExist = os.path.exists(path)
if not isExist:
os.makedirs(path)
print("The images directory is created!")
for name, url in zip(character_names, character_img_urls):
img_data = requests.get(url).content
with open(f'images/{name}.jpg', 'wb') as handler:
handler.write(img_data)
handler.close()
print(f'{name}.jpg downloaded')
print("character_names")
print(character_names)