Преглед на файлове

Merge branch 'master' into statistics

Conflicts:
	celery/bin/celeryd.py
Ask Solem преди 16 години
родител
ревизия
9d8e79f6f8
променени са 12 файла, в които са добавени 197 реда и са изтрити 151 реда
  1. 23 2
      README
  2. 46 14
      celery/bin/celeryd.py
  3. 1 25
      celery/conf.py
  4. 2 0
      celery/managers.py
  5. 13 4
      celery/task.py
  6. 0 4
      celery/tests/test_conf.py
  7. 4 3
      celery/tests/test_models.py
  8. 3 1
      celery/tests/test_task.py
  9. 15 7
      celery/views.py
  10. 45 89
      celery/worker.py
  11. 44 0
      docs/links.rst
  12. 1 2
      setup.py

+ 23 - 2
README

@@ -100,7 +100,7 @@ Using the development version
 
 You can clone the repository by doing the following::
 
-    $ git clone git@github.com:ask/celery.git celery
+    $ git clone git://github.com/ask/celery.git celery
 
 
 Usage
@@ -144,7 +144,7 @@ You only need three simple steps to use celery with your Django project.
     3. Configure celery to use the AMQP user and virtual host we created
         before, by adding the following to your ``settings.py``::
 
-            AMQP_HOST = "localhost"
+            AMQP_SERVER = "localhost"
             AMQP_PORT = 5672
             AMQP_USER = "myuser"
             AMQP_PASSWORD = "mypassword"
@@ -204,6 +204,7 @@ advanced features of celery later.
 This is a task that basically does nothing but take some arguments,
 and return a value:
 
+    >>> from celery.task import Task, tasks
     >>> class MyTask(Task):
     ...     name = "myapp.mytask"
     ...     def run(self, some_arg, **kwargs):
@@ -277,6 +278,26 @@ Here's an example of a periodic task:
 **Note:** Periodic tasks does not support arguments, as this doesn't
 really make sense.
 
+Getting Help
+============
+
+Mailing list
+------------
+
+Join the `celery-users`_ mailing list for discussion about using and
+the development of celery.
+
+.. _`celery-users`: http://groups.google.com/group/celery-users/
+
+IRC
+---
+
+Come chat with us on IRC. The `#celery`_ channel is located at the `Freenode`_
+network.
+
+.. _`#celery`: irc://irc.freenode.net/celery
+.. _`Freenode`: http://freenode.net
+
 
 Bug tracker
 ===========

+ 46 - 14
celery/bin/celeryd.py

@@ -5,7 +5,8 @@
 
 .. cmdoption:: -c, --concurrency
 
-    Number of child processes processing the queue.
+    Number of child processes processing the queue. The default
+    is the number of CPUs available on your system.
 
 .. cmdoption:: -f, --logfile
 
@@ -20,6 +21,7 @@
 
     Path to pidfile.
 
+<<<<<<< HEAD:celery/bin/celeryd.py
 .. cmdoption:: -s, --statistics
 
     Turn on reporting of statistics (remember to flush the statistics message
@@ -31,6 +33,8 @@
     daemon sleeps until it wakes up to check if there's any
     new messages on the queue.
 
+=======
+>>>>>>> master:celery/bin/celeryd.py
 .. cmdoption:: -d, --detach, --daemon
 
     Run in the background as a daemon.
@@ -73,10 +77,11 @@ from django.conf import settings
 from celery.log import emergency_error
 from celery.conf import LOG_LEVELS, DAEMON_LOG_FILE, DAEMON_LOG_LEVEL
 from celery.conf import DAEMON_CONCURRENCY, DAEMON_PID_FILE
-from celery.conf import QUEUE_WAKEUP_AFTER
+from celery import conf
 from celery import discovery
 from celery.task import discard_all
 from celery.worker import WorkController
+import multiprocessing
 import traceback
 import optparse
 import atexit
@@ -85,8 +90,17 @@ from daemon.pidlockfile import PIDLockFile
 import errno
 
 USE_STATISTICS = getattr(settings, "CELERY_STATISTICS", False)
+# Make sure the setting exists.
 settings.CELERY_STATISTICS = USE_STATISTICS
 
+STARTUP_INFO_FMT = """
+    * Celery loading with the following configuration
+        * Broker -> amqp://%(vhost)s@%(host)s:%(port)s 
+        * Exchange -> %(exchange)s (%(exchange_type)s)
+        * Consumer -> Queue:%(consumer_queue)s Routing:%(consumer_rkey)s
+        * Concurrency:%(concurrency)s
+""".strip()
+
 
 def acquire_pidlock(pidfile):
     """Get the :class:`daemon.pidlockfile.PIDLockFile` handler for
@@ -115,21 +129,23 @@ def acquire_pidlock(pidfile):
                 "ERROR: Pidfile (%s) already exists.\n"
                 "Seems celeryd is already running? (PID: %d)" % (
                     pidfile, pid))
-    return pidlock        
+    return pidlock
 
 
 def run_worker(concurrency=DAEMON_CONCURRENCY, daemon=False,
         loglevel=DAEMON_LOG_LEVEL, logfile=DAEMON_LOG_FILE, discard=False,
-        pidfile=DAEMON_PID_FILE, queue_wakeup_after=QUEUE_WAKEUP_AFTER,
-        umask=0, uid=None, gid=None, working_directory=None, chroot=None,
-        statistics=None, **kwargs):
-    """Run the celery daemon."""
+        pidfile=DAEMON_PID_FILE, umask=0, uid=None, gid=None,
+        working_directory=None, chroot=None, statistics=None, **kwargs):
+    """Start a celery worker server."""
 
-    print(">>> Launching celery, please hold on to something...")
+    print(". Launching celery, please hold on to something...")
 
     if statistics:
         settings.CELERY_STATISTICS = statistics
 
+    if not concurrency:
+        concurrency = multiprocessing.cpu_count()
+
     if settings.DATABASE_ENGINE == "sqlite3" and concurrency > 1:
         import warnings
         warnings.warn("The sqlite3 database engine doesn't support "
@@ -142,13 +158,27 @@ def run_worker(concurrency=DAEMON_CONCURRENCY, daemon=False,
 
     if discard:
         discarded_count = discard_all()
-        what = "message"
-        if discarded_count > 1:
-            what = "messages"
-        sys.stderr.write("* Discard: Erased %d %s from the queue.\n" % (
-            discarded_count, what))
+        what = discard_count > 1 and "messages" or "message"
+        print("* Discard: Erased %d %s from the queue." % (
+                discarded_count, what))
+    
+    startup_info = STARTUP_INFO_FMT % {
+            "vhost": settings.AMQP_VHOST,
+            "host": settings.AMQP_SERVER,
+            "port": settings.AMQP_PORT,
+            "exchange": conf.AMQP_EXCHANGE,
+            "exchange_type": conf.AMQP_EXCHANGE_TYPE,
+            "consumer_queue": conf.AMQP_CONSUMER_QUEUE,
+            "consumer_rkey": conf.AMQP_CONSUMER_ROUTING_KEY,
+            "publisher_rkey": conf.AMQP_PUBLISHER_ROUTING_KEY,
+            "concurrency": concurrency,
+            "loglevel": loglevel,
+            "pidfile": pidfile,
+    }
+    print(startup_info)
     print("* Reporting of statistics is %s..." % (
         settings.CELERY_STATISTICS and "ON" or "OFF"))
+
     context = None
     if daemon:
         # Since without stderr any errors will be silently suppressed,
@@ -176,7 +206,6 @@ def run_worker(concurrency=DAEMON_CONCURRENCY, daemon=False,
     celeryd = WorkController(concurrency=concurrency,
                                loglevel=loglevel,
                                logfile=logfile,
-                               queue_wakeup_after=queue_wakeup_after,
                                is_detached=daemon)
     try:
         celeryd.run()
@@ -207,6 +236,7 @@ OPTION_LIST = (
     optparse.make_option('-p', '--pidfile', default=DAEMON_PID_FILE,
             action="store", dest="pidfile",
             help="Path to pidfile."),
+<<<<<<< HEAD:celery/bin/celeryd.py
     optparse.make_option('-s', '--statistics', default=USE_STATISTICS,
             action="store_true", dest="statistics",
             help="Turn on reporting of statistics (remember to flush the "
@@ -216,6 +246,8 @@ OPTION_LIST = (
             help="If the queue is empty, this is the time *in seconds* the "
                  "daemon sleeps until it wakes up to check if there's any "
                  "new messages on the queue."),
+=======
+>>>>>>> master:celery/bin/celeryd.py
     optparse.make_option('-d', '--detach', '--daemon', default=False,
             action="store_true", dest="daemon",
             help="Run in the background as a daemon."),

+ 1 - 25
celery/conf.py

@@ -7,9 +7,7 @@ DEFAULT_AMQP_PUBLISHER_ROUTING_KEY = "celery"
 DEFAULT_AMQP_CONSUMER_ROUTING_KEY = "celery"
 DEFAULT_AMQP_CONSUMER_QUEUE = "celery"
 DEFAULT_AMQP_EXCHANGE_TYPE = "direct"
-DEFAULT_DAEMON_CONCURRENCY = 10
-DEFAULT_QUEUE_WAKEUP_AFTER = 0.1
-DEFAULT_EMPTY_MSG_EMIT_EVERY = 5
+DEFAULT_DAEMON_CONCURRENCY = 0 # defaults to cpu count
 DEFAULT_DAEMON_PID_FILE = "celeryd.pid"
 DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s/%(processName)s] %(message)s'
 DEFAULT_DAEMON_LOG_LEVEL = "INFO"
@@ -61,28 +59,6 @@ DAEMON_LOG_FILE = getattr(settings, "CELERYD_LOG_FILE",
 DAEMON_LOG_LEVEL = LOG_LEVELS[getattr(settings, "CELERYD_DAEMON_LOG_LEVEL",
                                       DEFAULT_DAEMON_LOG_LEVEL).upper()]
 
-"""
-.. data:: QUEUE_WAKEUP_AFTER
-
-    The time (in seconds) the celery worker should sleep when there's
-    no messages left on the queue. After the time is slept, the worker
-    wakes up and checks the queue again.
-
-"""
-QUEUE_WAKEUP_AFTER = getattr(settings, "CELERYD_QUEUE_WAKEUP_AFTER",
-                             DEFAULT_QUEUE_WAKEUP_AFTER)
-
-"""
-.. data:: EMPTY_MSG_EMIT_EVERY
-
-    How often the celery daemon should write a log message saying there are no
-    messages in the queue. If this is ``None`` or ``0``, it will never print
-    this message.
-
-"""
-EMPTY_MSG_EMIT_EVERY = getattr(settings, "CELERYD_EMPTY_MSG_EMIT_EVERY",
-                               DEFAULT_EMPTY_MSG_EMIT_EVERY)
-
 """
 .. data:: DAEMON_PID_FILE
 

+ 2 - 0
celery/managers.py

@@ -62,6 +62,8 @@ class PeriodicTaskManager(models.Manager):
         """
         periodic_tasks = tasks.get_all_periodic()
         waiting = []
+        # XXX This will become a lot of queries. Maybe just only create
+        # the rows at init, and then select all later.
         for task_name, task in periodic_tasks.items():
             task_meta, created = self.get_or_create(name=task_name)
             # task_run.every must be a timedelta object.

+ 13 - 4
celery/task.py

@@ -15,7 +15,8 @@ import pickle
 
 
 def apply_async(task, args=None, kwargs=None, routing_key=None,
-        immediate=None, mandatory=None, connect_timeout=None, priority=None):
+        immediate=None, mandatory=None, connection=None,
+        connect_timeout=None, priority=None):
     """Run a task asynchronously by the celery daemon(s).
 
     :param task: The task to run (a callable object, or a :class:`Task`
@@ -35,6 +36,9 @@ def apply_async(task, args=None, kwargs=None, routing_key=None,
     :keyword mandatory: Mandatory routing. Raises an exception if there's
         no running workers able to take on this task.
 
+    :keyword connection: Re-use existing AMQP connection.
+        The ``connect_timeout`` argument is not respected if this is set.
+
     :keyword connect_timeout: The timeout in seconds, before we give up
         on establishing a connection to the AMQP server.
 
@@ -52,11 +56,16 @@ def apply_async(task, args=None, kwargs=None, routing_key=None,
     for option_name, option_value in message_opts.items():
         message_opts[option_name] = getattr(task, option_name, option_value)
 
-    conn = DjangoAMQPConnection(connect_timeout=connect_timeout)
-    publisher = TaskPublisher(connection=conn)
+    need_to_close_connection = False
+    if not connection:
+        connection = DjangoAMQPConnection(connect_timeout=connect_timeout)
+        need_to_close_connection = True
+
+    publisher = TaskPublisher(connection=connection)
     task_id = publisher.delay_task(task.name, args, kwargs, **message_opts)
     publisher.close()
-    conn.close()
+    if need_to_close_connection:
+        connection.close()
     return AsyncResult(task_id)
 
 

+ 0 - 4
celery/tests/test_conf.py

@@ -18,10 +18,6 @@ SETTING_VARS = (
         "DEFAULT_DAEMON_CONCURRENCY"),
     ("CELERYD_PID_FILE", "DAEMON_PID_FILE",
         "DEFAULT_DAEMON_PID_FILE"),
-    ("CELERYD_EMPTY_MSG_EMIT_EVERY", "EMPTY_MSG_EMIT_EVERY",
-        "DEFAULT_EMPTY_MSG_EMIT_EVERY"),
-    ("CELERYD_QUEUE_WAKEUP_AFTER", "QUEUE_WAKEUP_AFTER",
-        "DEFAULT_QUEUE_WAKEUP_AFTER"),
     ("CELERYD_LOG_FILE", "DAEMON_LOG_FILE",
         "DEFAULT_DAEMON_LOG_FILE"),
     ("CELERYD_DAEMON_LOG_FORMAT", "LOG_FORMAT",

+ 4 - 3
celery/tests/test_models.py

@@ -8,7 +8,7 @@ from celery.registry import tasks
 
 class TestPeriodicTask(PeriodicTask):
     name = "celery.unittest.test_models.test_periodic_task"
-    run_every = timedelta(days=1)
+    run_every = timedelta(minutes=30)
 
 
 class TestModels(unittest.TestCase):
@@ -57,8 +57,9 @@ class TestModels(unittest.TestCase):
         self.assertTrue(unicode(p).startswith("<PeriodicTask:"))
         self.assertFalse(p in PeriodicTaskMeta.objects.get_waiting_tasks())
         # Have to avoid save() because it applies the auto_now=True.
-        PeriodicTaskMeta.objects.filter(name=p.name).update(
-                last_run_at=datetime.now() - TestPeriodicTask.run_every)
+        PeriodicTaskMeta.objects.filter(name=p.name).update (
+                last_run_at=datetime.now() - (TestPeriodicTask.run_every +
+                timedelta(seconds=10)))
         self.assertTrue(p in PeriodicTaskMeta.objects.get_waiting_tasks())
         self.assertTrue(isinstance(p.task, TestPeriodicTask))
 

+ 3 - 1
celery/tests/test_task.py

@@ -121,7 +121,9 @@ class TestTaskSet(unittest.TestCase):
         self.assertEquals(ts.task_name, IncrementCounterTask.name)
         self.assertEquals(ts.total, 9)
 
-        taskset_id, subtask_ids = ts.run()
+        taskset_res = ts.run()
+        subtask_ids = taskset_res.subtask_ids
+        taskset_id = taskset_res.taskset_id
 
         consumer = IncrementCounterTask().get_consumer()
         for subtask_id in subtask_ids:

+ 15 - 7
celery/views.py

@@ -8,15 +8,23 @@ from carrot.serialization import serialize as JSON_dump
 def is_task_done(request, task_id):
     """Returns task execute status in JSON format."""
     response_data = {"task": {"id": task_id, "executed": is_done(task_id)}}
-    return HttpResponse(JSON_dump(response_data))
+    return HttpResponse(JSON_dump(response_data), mimetype="application/json")
 
 
 def task_status(request, task_id):
     """Returns task status and result in JSON format."""
     async_result = AsyncResult(task_id)
-    response_data = {"task": {
-                        "id": task_id,
-                        "status": async_result.status,
-                        "result": async_result.result,
-    }}
-    return HttpResponse(JSON_dump(response_data))
+    status = async_result.status
+    if status == "FAILURE":
+        response_data = {
+            "id": task_id,
+            "status": status,
+            "result": async_result.result.args[0],
+        }
+    else:
+        response_data = {
+            "id": task_id,
+            "status": status,
+            "result": async_result.result,
+        }
+    return HttpResponse(JSON_dump({"task": response_data}), mimetype="application/json")

+ 45 - 89
celery/worker.py

@@ -2,7 +2,6 @@
 from carrot.connection import DjangoAMQPConnection
 from celery.messaging import TaskConsumer
 from celery.conf import DAEMON_CONCURRENCY, DAEMON_LOG_FILE
-from celery.conf import QUEUE_WAKEUP_AFTER, EMPTY_MSG_EMIT_EVERY
 from celery.conf import SEND_CELERY_TASK_ERROR_EMAILS
 from celery.log import setup_logger
 from celery.registry import tasks
@@ -16,6 +15,7 @@ import multiprocessing
 import traceback
 import threading
 import logging
+import signal
 import socket
 import time
 import sys
@@ -31,16 +31,12 @@ The contents of the full traceback was:
 
 %%(traceback)s
 
-%%(EMAIL_SIGNATURE_SEP)s
+%(EMAIL_SIGNATURE_SEP)s
 Just thought I'd let you know!
 celeryd at %%(hostname)s.
 """ % {"EMAIL_SIGNATURE_SEP": EMAIL_SIGNATURE_SEP}
 
 
-class EmptyQueue(Exception):
-    """The message queue is currently empty."""
-
-
 class UnknownTask(Exception):
     """Got an unknown task in the queue. The message is requeued and
     ignored."""
@@ -93,9 +89,6 @@ def jail(task_id, task_name, func, args, kwargs):
     # Backend process cleanup
     default_backend.process_cleanup()
 
-    # Convert any unicode keys in the keyword arguments to ascii.
-    kwargs = dict([(k.encode("utf-8"), v)
-                        for k, v in kwargs.items()])
     try:
         result = func(*args, **kwargs)
     except (SystemExit, KeyboardInterrupt):
@@ -191,6 +184,11 @@ class TaskWrapper(object):
         task_id = message_data["id"]
         args = message_data["args"]
         kwargs = message_data["kwargs"]
+
+        # Convert any unicode keys in the keyword arguments to ascii.
+        kwargs = dict([(key.encode("utf-8"), value)
+                    for key, value in kwargs.items()])
+
         if task_name not in tasks:
             raise UnknownTask(task_name)
         task_func = tasks[task_name]
@@ -286,7 +284,7 @@ class PeriodicWorkController(threading.Thread):
         super(PeriodicWorkController, self).__init__()
         self._shutdown = threading.Event()
         self._stopped = threading.Event()
-    
+
     def run(self):
         """Don't use :meth:`run`. use :meth:`start`."""
         while True:
@@ -310,8 +308,6 @@ class WorkController(object):
 
     :param loglevel: see :attr:`loglevel`.
 
-    :param queue_wakeup_after: see :attr:`queue_wakeup_after`.
-
 
     .. attribute:: concurrency
 
@@ -327,18 +323,6 @@ class WorkController(object):
         The logfile used, if no logfile is specified it uses ``stderr``
         (default: :const:`celery.conf.DAEMON_LOG_FILE`).
 
-    .. attribute:: queue_wakeup_after
-
-        The time it takes for the daemon to wake up after the queue is empty,
-        so it can check for more work
-        (default: :const:`celery.conf.QUEUE_WAKEUP_AFTER`).
-
-    .. attribute:: empty_msg_emit_every
-
-        How often the daemon emits the ``"Waiting for queue..."`` message.
-        If this is ``None``, the message will never be logged.
-        (default: :const:`celery.conf.EMPTY_MSG_EMIT_EVERY`)
-
     .. attribute:: logger
 
         The :class:`logging.Logger` instance used for logging.
@@ -355,23 +339,24 @@ class WorkController(object):
     loglevel = logging.ERROR
     concurrency = DAEMON_CONCURRENCY
     logfile = DAEMON_LOG_FILE
-    queue_wakeup_after = QUEUE_WAKEUP_AFTER
-    empty_msg_emit_every = EMPTY_MSG_EMIT_EVERY
 
     def __init__(self, concurrency=None, logfile=None, loglevel=None,
-            queue_wakeup_after=None, is_detached=False):
+            is_detached=False):
         self.loglevel = loglevel or self.loglevel
         self.concurrency = concurrency or self.concurrency
         self.logfile = logfile or self.logfile
-        self.queue_wakeup_after = queue_wakeup_after or \
-                                    self.queue_wakeup_after
         self.logger = setup_logger(loglevel, logfile)
         self.pool = TaskPool(self.concurrency, logger=self.logger)
         self.periodicworkcontroller = PeriodicWorkController()
-        self.task_consumer = None
-        self.task_consumer_it = None
         self.is_detached = is_detached
-        self.reset_connection()
+        self.amqp_connection = None
+        self.task_consumer = None
+
+    def close_connection(self):
+        if self.task_consumer:
+            self.task_consumer.close()
+        if self.amqp_connection:
+            self.amqp_connection.close()
 
     def reset_connection(self):
         """Reset the AMQP connection, and reinitialize the
@@ -380,11 +365,10 @@ class WorkController(object):
         Resets the task consumer in :attr:`task_consumer`.
 
         """
-        if self.task_consumer:
-            self.task_consumer.connection.close()
-        amqp_connection = DjangoAMQPConnection()
-        self.task_consumer = TaskConsumer(connection=amqp_connection)
-        self.task_consumer_it = self.task_consumer.iterqueue(infinite=True)
+        self.close_connection()
+        self.amqp_connection = DjangoAMQPConnection()
+        self.task_consumer = TaskConsumer(connection=self.amqp_connection)
+        return self.task_consumer
 
     def connection_diagnostics(self):
         """Diagnose the AMQP connection, and reset connection if
@@ -396,19 +380,24 @@ class WorkController(object):
                     "AMQP Connection has died, restoring connection.")
             self.reset_connection()
 
-    def receive_message(self):
-        """Receive the next message from the message broker.
-
-        Tries to reset the AMQP connection if not available.
-        Returns ``None`` if no message is waiting on the queue.
-
-        :rtype: :class:`carrot.messaging.Message` instance.
-
-        """
-        message = self.task_consumer_it.next()
-        if not message:
-            raise EmptyQueue()
-        return message
+    def _message_callback(self, message_data, message):
+        try:
+            try:
+                self.process_task(message)
+            except ValueError:
+                # execute_next_task didn't return a r/name/id tuple,
+                # probably because it got an exception.
+                pass
+            except UnknownTask, exc:
+                self.logger.info("Unknown task ignored: %s" % (exc))
+            except Exception, exc:
+                self.logger.critical("Message queue raised %s: %s\n%s" % (
+                                exc.__class__, exc, traceback.format_exc()))
+            except:
+                self.shutdown()
+                raise
+        except (SystemExit, KeyboardInterrupt):
+            self.shutdown()
 
     def process_task(self, message):
         """Process task message by passing it to the pool of workers."""
@@ -424,31 +413,17 @@ class WorkController(object):
 
         return result
 
-    def execute_next_task(self):
-        """Execute the next task on the queue using the multiprocessing pool.
-
-        Catches all exceptions and logs them with level
-        :const:`logging.CRITICAL`.
-
-        Raises :exc:`EmptyQueue` exception if there is no message
-        waiting on the queue.
-
-        """
-        self.process_task(self.receive_message())
-
-    def schedule_retry_tasks(self):
-        """Reschedule all requeued tasks waiting for retry."""
-        pass
-    
     def shutdown(self):
         # shut down the periodic work controller thread
         self.periodicworkcontroller.stop()
         self.pool.terminate()
+        self.close_connection()
 
     def run(self):
         """Starts the workers main loop."""
-        log_wait = lambda: self.logger.debug("Waiting for queue...")
-        ev_msg_waiting = EventTimer(log_wait, self.empty_msg_emit_every)
+        task_consumer = self.reset_connection()
+        task_consumer.register_callback(self._message_callback)
+        it = task_consumer.iterconsume(limit=None)
 
         self.pool.run()
         self.periodicworkcontroller.start()
@@ -462,26 +437,7 @@ class WorkController(object):
                 time.sleep(1)
         
         try:
-            while True:
-                try:
-                    self.execute_next_task()
-                except ValueError:
-                    # execute_next_task didn't return a r/name/id tuple,
-                    # probably because it got an exception.
-                    continue
-                except EmptyQueue:
-                    ev_msg_waiting.tick()
-                    time.sleep(self.queue_wakeup_after)
-                    continue
-                except UnknownTask, exc:
-                    self.logger.info("Unknown task ignored: %s" % (exc))
-                    continue
-                except Exception, exc:
-                    self.logger.critical("Message queue raised %s: %s\n%s" % (
-                                 exc.__class__, exc, traceback.format_exc()))
-                    continue
-                except:
-                    self.shutdown()
-                    raise
+            while True: 
+                it.next()
         except (SystemExit, KeyboardInterrupt):
             self.shutdown()

+ 44 - 0
docs/links.rst

@@ -0,0 +1,44 @@
+===================
+ Interesting Links
+===================
+
+
+celery
+------
+
+* IRC logs from ``#celery`` (Freenode):
+    http://botland.oebfare.com/logger/celery/
+
+AMQP
+----
+
+* `Shovel`_: An AMQP Relay
+
+.. _`Shovel`: http://botland.oebfare.com/logger/celery/
+
+RabbitMQ
+--------
+
+* `Trixx`_: Administration and Monitoring tool for `RabbitMQ`_ (in
+    development).
+
+.. _`Trixx`_: http://github.com/aaronfeng/trixx/tree/master
+.. _`RabbitMQ`: http://rabbitmq.com/
+
+
+Non-relational databases
+------------------------
+
+* `CouchDB`_: Document database.
+
+* `Disco`_: `Map/Reduce`_ framework.
+
+* `Tokyo Cabinet`_/`Tokyo Tyrant`_: Modern and fast (networked) DBM-
+    implementation.
+
+.. _`CouchDB`: http://couchdb.org
+.. _`Disco`: http://discoproject.org
+.. _`Map/Reduce`: http://en.wikipedia.org/wiki/MapReduce
+.. _`Tokyo Cabinet`: http://tokyocabinet.sourceforge.net/
+.. _`Tokyo Tyrant`: http://tokyocabinet.sourceforge.net/tyrantdoc/
+

+ 1 - 2
setup.py

@@ -40,7 +40,7 @@ class RunTests(Command):
         os.chdir(this_dir)
 
 
-install_requires = ["carrot", "django"]
+install_requires = ["carrot"]
 py_version_info = sys.version_info
 py_major_version = py_version_info[0]
 py_minor_version = py_version_info[1]
@@ -65,7 +65,6 @@ setup(
     install_requires=[
         'carrot>=0.4.1',
         'python-daemon',
-        'django',
     ],
     cmdclass = {"test": RunTests},
     classifiers=[