scrap n images depends on argv param
This commit is contained in:
parent
a27a7e612a
commit
1f4a2d9dfc
@ -3,8 +3,12 @@ from bs4 import BeautifulSoup
|
||||
from lxml import etree
|
||||
import os
|
||||
import csv
|
||||
import sys
|
||||
|
||||
ROOT_URL = "https://myanimelist.net/character.php"
|
||||
if len(sys.argv) > 1:
|
||||
IMG_N = int(sys.argv[1])
|
||||
else:
|
||||
IMG_N = 50
|
||||
|
||||
def get_page_xpath_result(url, xpath_str):
|
||||
HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
|
||||
@ -19,40 +23,46 @@ def createDirectory(path):
|
||||
os.makedirs(path)
|
||||
print(f"The {path} is created!")
|
||||
|
||||
character_links = get_page_xpath_result(ROOT_URL, '//div[@class="information di-ib mt24"]/a/@href')
|
||||
character_names = get_page_xpath_result(ROOT_URL, '//div[@class="information di-ib mt24"]/a')
|
||||
character_names = [link.text for link in character_names]
|
||||
n = IMG_N // 50
|
||||
|
||||
character_img_urls = [get_page_xpath_result(link, '//td/div/a/img/@data-src')[0]
|
||||
for link in character_links]
|
||||
for i in range(n):
|
||||
ROOT_URL = f'https://myanimelist.net/character.php?limit={(i)*50}'
|
||||
print(ROOT_URL)
|
||||
|
||||
character_links = get_page_xpath_result(ROOT_URL, '//div[@class="information di-ib mt24"]/a/@href')
|
||||
character_names = get_page_xpath_result(ROOT_URL, '//div[@class="information di-ib mt24"]/a')
|
||||
character_names = [link.text for link in character_names]
|
||||
|
||||
createDirectory('./data')
|
||||
createDirectory('./data/images')
|
||||
character_img_urls = [get_page_xpath_result(link, '//td/div/a/img/@data-src')[0]
|
||||
for link in character_links]
|
||||
|
||||
csv_header = ['name', 'file', 'link']
|
||||
data_rows = []
|
||||
createDirectory('./data')
|
||||
createDirectory('./data/images')
|
||||
|
||||
for name, url, link in zip(character_names, character_img_urls, character_links):
|
||||
img_data = requests.get(url).content
|
||||
img_path = f'./data/images/{name}.jpg'
|
||||
img_path_exist = os.path.exists(img_path)
|
||||
csv_header = ['name', 'file', 'link']
|
||||
data_rows = []
|
||||
|
||||
if not img_path_exist:
|
||||
with open(img_path, 'wb') as handler:
|
||||
handler.write(img_data)
|
||||
handler.close()
|
||||
print(f'{name}.jpg downloaded')
|
||||
for name, url, link in zip(character_names, character_img_urls, character_links):
|
||||
img_data = requests.get(url).content
|
||||
img_path = f'./data/images/{name}.jpg'
|
||||
img_path_exist = os.path.exists(img_path)
|
||||
|
||||
data_row = [name, f'{name}.jpg', link]
|
||||
data_rows.append(data_row)
|
||||
if not img_path_exist:
|
||||
with open(img_path, 'wb') as handler:
|
||||
handler.write(img_data)
|
||||
handler.close()
|
||||
print(f'{name}.jpg downloaded')
|
||||
|
||||
csv_path = './data/anime-faces.csv'
|
||||
if len(data_rows) > 0:
|
||||
csv_file_exist = os.path.exists(csv_path)
|
||||
with open(csv_path, 'a', encoding='UTF8') as f:
|
||||
writer = csv.writer(f)
|
||||
if not csv_file_exist:
|
||||
writer.writerow(csv_header)
|
||||
writer.writerows(data_rows)
|
||||
data_row = [name, f'{name}.jpg', link]
|
||||
data_rows.append(data_row)
|
||||
|
||||
csv_path = './data/anime-faces.csv'
|
||||
if len(data_rows) > 0:
|
||||
csv_file_exist = os.path.exists(csv_path)
|
||||
with open(csv_path, 'a', encoding='UTF8') as f:
|
||||
writer = csv.writer(f)
|
||||
if not csv_file_exist:
|
||||
writer.writerow(csv_header)
|
||||
writer.writerows(data_rows)
|
||||
|
||||
print("finish!")
|
Loading…
Reference in New Issue
Block a user