python-scripts/08_basic_email_web_crawler.py

43 lines
818 B
Python
Raw Normal View History

2014-05-14 02:48:46 +02:00
import requests
import re
import urlparse
# regex
email_re = re.compile(r'([\w\.,]+@[\w\.,]+\.\w+)')
link_re = re.compile(r'href="(.*?)"')
2014-05-18 16:29:23 +02:00
2014-05-18 20:16:56 +02:00
def crawl(url):
2014-05-14 02:48:46 +02:00
result = set()
2014-05-18 20:16:56 +02:00
req = requests.get(url)
2014-05-14 02:48:46 +02:00
2014-05-18 20:16:56 +02:00
# Check if successful
if(req.status_code != 200):
return []
2014-05-14 02:48:46 +02:00
2014-05-18 20:16:56 +02:00
# Find links
links = link_re.findall(req.text)
2014-05-14 02:48:46 +02:00
2014-05-18 20:16:56 +02:00
print "\nFound {} links".format(len(links))
2014-05-14 02:48:46 +02:00
2014-05-18 20:16:56 +02:00
# Search links for emails
for link in links:
2014-05-14 02:48:46 +02:00
2014-05-18 20:16:56 +02:00
# Get an absolute URL for a link
link = urlparse.urljoin(url, link)
2014-05-14 02:48:46 +02:00
2014-05-18 20:16:56 +02:00
# Find all emails on current page
result.update(email_re.findall(req.text))
2014-05-14 02:48:46 +02:00
return result
2014-05-18 20:16:56 +02:00
if __name__ == '__main__':
emails = crawl('http://www.realpython.com')
2014-05-14 02:48:46 +02:00
2014-05-18 20:16:56 +02:00
print "\nScrapped e-mail addresses:"
for email in emails:
print email
print "\n"