added link scraper
This commit is contained in:
parent
7409f60137
commit
776ed9c75c
@ -6,6 +6,7 @@ import urlparse
|
|||||||
email_re = re.compile(r'([\w\.,]+@[\w\.,]+\.\w+)')
|
email_re = re.compile(r'([\w\.,]+@[\w\.,]+\.\w+)')
|
||||||
link_re = re.compile(r'href="(.*?)"')
|
link_re = re.compile(r'href="(.*?)"')
|
||||||
|
|
||||||
|
|
||||||
def crawl(url, maxlevel):
|
def crawl(url, maxlevel):
|
||||||
|
|
||||||
result = set()
|
result = set()
|
||||||
@ -25,16 +26,16 @@ def crawl(url, maxlevel):
|
|||||||
# Get an absolute URL for a link
|
# Get an absolute URL for a link
|
||||||
link = urlparse.urljoin(url, link)
|
link = urlparse.urljoin(url, link)
|
||||||
|
|
||||||
# Find all emails on current page
|
# Find all emails on current page
|
||||||
result.update(email_re.findall(req.text))
|
result.update(email_re.findall(req.text))
|
||||||
|
|
||||||
print "Crawled level: {}".format(maxlevel)
|
print "Crawled level: {}".format(maxlevel)
|
||||||
|
|
||||||
# new level
|
# new level
|
||||||
maxlevel -= 1
|
maxlevel -= 1
|
||||||
|
|
||||||
# recurse
|
# recurse
|
||||||
crawl(link, maxlevel)
|
crawl(link, maxlevel)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -42,4 +43,4 @@ emails = crawl('http://www.website_goes_here_dot_com', 2)
|
|||||||
|
|
||||||
print "\nScrapped e-mail addresses:"
|
print "\nScrapped e-mail addresses:"
|
||||||
for email in emails:
|
for email in emails:
|
||||||
print email
|
print email
|
||||||
|
44
09_basic_link_web_crawler.py
Normal file
44
09_basic_link_web_crawler.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
import requests
|
||||||
|
import re
|
||||||
|
import urlparse
|
||||||
|
|
||||||
|
# regex
|
||||||
|
link_re = re.compile(r'href="(.*?)"')
|
||||||
|
|
||||||
|
|
||||||
|
def crawl(url, maxlevel):
|
||||||
|
|
||||||
|
result = set()
|
||||||
|
|
||||||
|
while maxlevel > 0:
|
||||||
|
|
||||||
|
# Get the webpage
|
||||||
|
req = requests.get(url)
|
||||||
|
|
||||||
|
# Check if successful
|
||||||
|
if(req.status_code != 200):
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Find and follow all the links
|
||||||
|
links = link_re.findall(req.text)
|
||||||
|
for link in links:
|
||||||
|
# Get an absolute URL for a link
|
||||||
|
link = urlparse.urljoin(url, link)
|
||||||
|
# add links to result set
|
||||||
|
result.update(link)
|
||||||
|
|
||||||
|
print "Crawled level: {}".format(maxlevel)
|
||||||
|
|
||||||
|
# new level
|
||||||
|
maxlevel -= 1
|
||||||
|
|
||||||
|
# recurse
|
||||||
|
crawl(link, maxlevel)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
emails = crawl('http://www.website_goes_here_dot_com', 2)
|
||||||
|
|
||||||
|
print "\nScrapped links:"
|
||||||
|
for link in links:
|
||||||
|
print link
|
Loading…
Reference in New Issue
Block a user