webcrawler.py 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768
  1. """Recursive webcrawler example.
  2. For asynchronous DNS lookups install the `dnspython` package:
  3. $ pip install dnspython
  4. Requires the `pybloom` module for the bloom filter which is used
  5. to ensure a lower chance of recrawling an URL previously seen.
  6. Since the bloom filter is not shared, but only passed as an argument
  7. to each subtask, it would be much better to have this as a centralized
  8. service. Redis sets could also be a practical solution.
  9. A BloomFilter with a capacity of 100_000 members and an error rate
  10. of 0.001 is 2.8MB pickled, but if compressed with zlib it only takes
  11. up 2.9kB(!).
  12. We don't have to do compression manually, just set the tasks compression
  13. to "zlib", and the serializer to "pickle".
  14. """
  15. from __future__ import with_statement
  16. import re
  17. import time
  18. import urlparse
  19. from celery import task, group
  20. from eventlet import Timeout
  21. from eventlet.green import urllib2
  22. from pybloom import BloomFilter
  23. # http://daringfireball.net/2009/11/liberal_regex_for_matching_urls
  24. url_regex = re.compile(
  25. r'\b(([\w-]+://?|www[.])[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|/)))')
  26. def domain(url):
  27. """Returns the domain part of an URL."""
  28. return urlparse.urlsplit(url)[1].split(':')[0]
  29. @task(ignore_result=True, serializer='pickle', compression='zlib')
  30. def crawl(url, seen=None):
  31. print('crawling: %r' % (url, ))
  32. if not seen:
  33. seen = BloomFilter(capacity=50000, error_rate=0.0001)
  34. with Timeout(5, False):
  35. try:
  36. data = urllib2.urlopen(url).read()
  37. except (urllib2.HTTPError, IOError):
  38. return
  39. location = domain(url)
  40. wanted_urls = []
  41. for url_match in url_regex.finditer(data):
  42. url = url_match.group(0)
  43. # To not destroy the internet, we only fetch URLs on the same domain.
  44. if url not in seen and location in domain(url):
  45. wanted_urls.append(url)
  46. seen.add(url)
  47. subtasks = group(crawl.s(url, seen) for url in wanted_urls)
  48. subtasks.apply_async()