2014-05-14 02:48:46 +02:00
|
|
|
import requests
|
|
|
|
import re
|
|
|
|
import urlparse
|
|
|
|
|
|
|
|
# regex
|
|
|
|
email_re = re.compile(r'([\w\.,]+@[\w\.,]+\.\w+)')
|
|
|
|
link_re = re.compile(r'href="(.*?)"')
|
|
|
|
|
2014-05-18 16:29:23 +02:00
|
|
|
|
2014-05-14 02:48:46 +02:00
|
|
|
def crawl(url, maxlevel):
|
|
|
|
|
|
|
|
result = set()
|
|
|
|
|
|
|
|
while maxlevel > 0:
|
|
|
|
|
|
|
|
# Get the webpage
|
|
|
|
req = requests.get(url)
|
|
|
|
|
|
|
|
# Check if successful
|
|
|
|
if(req.status_code != 200):
|
|
|
|
return []
|
|
|
|
|
|
|
|
# Find and follow all the links
|
|
|
|
links = link_re.findall(req.text)
|
|
|
|
for link in links:
|
|
|
|
# Get an absolute URL for a link
|
|
|
|
link = urlparse.urljoin(url, link)
|
|
|
|
|
2014-05-18 16:29:23 +02:00
|
|
|
# Find all emails on current page
|
|
|
|
result.update(email_re.findall(req.text))
|
2014-05-14 02:48:46 +02:00
|
|
|
|
2014-05-18 16:29:23 +02:00
|
|
|
print "Crawled level: {}".format(maxlevel)
|
2014-05-14 02:48:46 +02:00
|
|
|
|
2014-05-18 16:29:23 +02:00
|
|
|
# new level
|
|
|
|
maxlevel -= 1
|
2014-05-14 02:48:46 +02:00
|
|
|
|
2014-05-18 16:29:23 +02:00
|
|
|
# recurse
|
|
|
|
crawl(link, maxlevel)
|
2014-05-14 02:48:46 +02:00
|
|
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
emails = crawl('http://www.website_goes_here_dot_com', 2)
|
|
|
|
|
|
|
|
print "\nScrapped e-mail addresses:"
|
|
|
|
for email in emails:
|
2014-05-18 16:29:23 +02:00
|
|
|
print email
|