added link scraper

This commit is contained in:
Michael Herman 2014-05-18 07:29:23 -07:00
parent 7409f60137
commit 776ed9c75c
2 changed files with 53 additions and 8 deletions

View File

@ -6,6 +6,7 @@ import urlparse
email_re = re.compile(r'([\w\.,]+@[\w\.,]+\.\w+)') email_re = re.compile(r'([\w\.,]+@[\w\.,]+\.\w+)')
link_re = re.compile(r'href="(.*?)"') link_re = re.compile(r'href="(.*?)"')
def crawl(url, maxlevel): def crawl(url, maxlevel):
result = set() result = set()
@ -25,16 +26,16 @@ def crawl(url, maxlevel):
# Get an absolute URL for a link # Get an absolute URL for a link
link = urlparse.urljoin(url, link) link = urlparse.urljoin(url, link)
# Find all emails on current page # Find all emails on current page
result.update(email_re.findall(req.text)) result.update(email_re.findall(req.text))
print "Crawled level: {}".format(maxlevel) print "Crawled level: {}".format(maxlevel)
# new level # new level
maxlevel -= 1 maxlevel -= 1
# recurse # recurse
crawl(link, maxlevel) crawl(link, maxlevel)
return result return result
@ -42,4 +43,4 @@ emails = crawl('http://www.website_goes_here_dot_com', 2)
print "\nScrapped e-mail addresses:" print "\nScrapped e-mail addresses:"
for email in emails: for email in emails:
print email print email

View File

@ -0,0 +1,44 @@
import requests
import re
import urlparse
# regex
link_re = re.compile(r'href="(.*?)"')
def crawl(url, maxlevel):
result = set()
while maxlevel > 0:
# Get the webpage
req = requests.get(url)
# Check if successful
if(req.status_code != 200):
return []
# Find and follow all the links
links = link_re.findall(req.text)
for link in links:
# Get an absolute URL for a link
link = urlparse.urljoin(url, link)
# add links to result set
result.update(link)
print "Crawled level: {}".format(maxlevel)
# new level
maxlevel -= 1
# recurse
crawl(link, maxlevel)
return result
emails = crawl('http://www.website_goes_here_dot_com', 2)
print "\nScrapped links:"
for link in links:
print link