Browse Source

Merge branch 'master' into pertask-time-limits

Conflicts:
	celery/concurrency/processes/__init__.py
	celery/concurrency/processes/pool.py
	celery/task/base.py
	celery/worker/job.py
Ask Solem 14 years ago
parent
commit
72ab0930fa
100 changed files with 8200 additions and 3812 deletions
  1. 2 0
      .gitignore
  2. 24 0
      AUTHORS
  3. 1216 1
      Changelog
  4. 134 118
      FAQ
  5. 7 7
      INSTALL
  6. 1 1
      LICENSE
  7. 59 41
      README.rst
  8. 0 9
      bin/camqadm
  9. 0 8
      bin/celerybeat
  10. 0 8
      bin/celeryctl
  11. 0 31
      bin/celeryd
  12. 0 5
      bin/celeryd-multi
  13. 0 5
      bin/celeryev
  14. 30 1
      celery/__init__.py
  15. 247 0
      celery/app/__init__.py
  16. 372 0
      celery/app/amqp.py
  17. 361 0
      celery/app/base.py
  18. 163 0
      celery/app/defaults.py
  19. 37 41
      celery/apps/beat.py
  20. 162 115
      celery/apps/worker.py
  21. 15 31
      celery/backends/__init__.py
  22. 192 142
      celery/backends/amqp.py
  23. 60 18
      celery/backends/base.py
  24. 39 29
      celery/backends/cache.py
  25. 39 28
      celery/backends/cassandra.py
  26. 33 17
      celery/backends/database.py
  27. 6 8
      celery/backends/mongodb.py
  28. 62 82
      celery/backends/pyredis.py
  29. 9 9
      celery/backends/tyrant.py
  30. 176 121
      celery/beat.py
  31. 159 20
      celery/bin/base.py
  32. 33 30
      celery/bin/camqadm.py
  33. 37 15
      celery/bin/celerybeat.py
  34. 57 50
      celery/bin/celeryctl.py
  35. 65 17
      celery/bin/celeryd.py
  36. 13 22
      celery/bin/celeryd_detach.py
  37. 32 14
      celery/bin/celeryd_multi.py
  38. 77 49
      celery/bin/celeryev.py
  39. 13 0
      celery/concurrency/__init__.py
  40. 129 0
      celery/concurrency/base.py
  41. 106 0
      celery/concurrency/evg.py
  42. 123 0
      celery/concurrency/evlet.py
  43. 23 66
      celery/concurrency/processes/__init__.py
  44. 184 65
      celery/concurrency/processes/pool.py
  45. 22 0
      celery/concurrency/solo.py
  46. 18 54
      celery/concurrency/threads.py
  47. 93 289
      celery/conf.py
  48. 4 4
      celery/contrib/abortable.py
  49. 150 36
      celery/contrib/batches.py
  50. 154 0
      celery/contrib/rdb.py
  51. 162 123
      celery/datastructures.py
  52. 4 4
      celery/db/a805d4bd.py
  53. 6 4
      celery/db/models.py
  54. 1 2
      celery/db/session.py
  55. 25 72
      celery/decorators.py
  56. 138 40
      celery/events/__init__.py
  57. 131 54
      celery/events/cursesmon.py
  58. 5 5
      celery/events/dumper.py
  59. 36 30
      celery/events/snapshot.py
  60. 68 49
      celery/events/state.py
  61. 7 20
      celery/exceptions.py
  62. 16 195
      celery/execute/__init__.py
  63. 29 8
      celery/execute/trace.py
  64. 2 1
      celery/loaders/__init__.py
  65. 10 0
      celery/loaders/app.py
  66. 119 27
      celery/loaders/base.py
  67. 6 5
      celery/loaders/default.py
  68. 114 0
      celery/local.py
  69. 173 129
      celery/log.py
  70. 0 16
      celery/management/commands/celeryd.py
  71. 8 311
      celery/messaging.py
  72. 0 56
      celery/models.py
  73. 21 3
      celery/platforms.py
  74. 13 18
      celery/registry.py
  75. 296 170
      celery/result.py
  76. 26 27
      celery/routes.py
  77. 21 14
      celery/schedules.py
  78. 281 0
      celery/signals.py
  79. 16 3
      celery/states.py
  80. 72 54
      celery/task/__init__.py
  81. 481 342
      celery/task/base.py
  82. 0 68
      celery/task/builtins.py
  83. 46 0
      celery/task/chord.py
  84. 156 141
      celery/task/control.py
  85. 5 13
      celery/task/http.py
  86. 4 0
      celery/task/schedules.py
  87. 65 84
      celery/task/sets.py
  88. 62 7
      celery/tests/__init__.py
  89. 17 2
      celery/tests/config.py
  90. 14 13
      celery/tests/functional/case.py
  91. 1 1
      celery/tests/functional/tasks.py
  92. 0 0
      celery/tests/test_app/__init__.py
  93. 218 0
      celery/tests/test_app/test_app.py
  94. 2 2
      celery/tests/test_app/test_app_amqp.py
  95. 79 16
      celery/tests/test_app/test_beat.py
  96. 1 1
      celery/tests/test_app/test_celery.py
  97. 257 0
      celery/tests/test_app/test_loaders.py
  98. 45 12
      celery/tests/test_app/test_routes.py
  99. 3 3
      celery/tests/test_backends/__init__.py
  100. 0 60
      celery/tests/test_backends/disabled_amqp.py

+ 2 - 0
.gitignore

@@ -1,5 +1,6 @@
 .DS_Store
 *.pyc
+*$py.class
 *~
 .*.sw[po]
 dist/
@@ -15,3 +16,4 @@ erl_crash.dump
 *.db
 Documentation/
 .tox/
+.ropeproject/

+ 24 - 0
AUTHORS

@@ -44,3 +44,27 @@ Ordered by date of first contribution:
   Noah Kantrowitz <noah@coderanger.net>
   Gert Van Gool <gertvangool@gmail.com>
   sdcooke
+  David Cramer <dcramer@gmail.com>
+  Bryan Berg <bryan@mixedmedialabs.com>
+  Piotr Sikora <piotr.sikora@frickle.com>
+  Sam Cooke <sam@mixcloud.com>
+  John Watson <johnw@mahalo.com>
+  Martin Galpin <m@66laps.com>
+  Chris Rose <offby1@offby1.net>
+  Christopher Peplin <peplin@bueda.com>
+  David White <dpwhite2@ncsu.edu>
+  Vladimir Kryachko <vladimir.kryachko@etvnet.com>
+  Simon Josi <simon.josi@atizo.com>
+  jpellerin
+  Norman Richards <orb@nostacktrace.com>
+  Christoph Burgmer <christoph@nwebs.de>
+  Allan Caffee <allan.caffee@gmail.com>
+  Ales Zoulek <ales.zoulek@gmail.com>
+  Roberto Gaiser <gaiser@geekbunker.org>
+  Balachandran C <balachandran.c@gramvaani.org>
+  Kevin Tran <hekevintran@gmail.com>
+  Branko Čibej <brane@apache.org>
+  Jeff Terrace <jterrace@gmail.com>
+  Ryan Petrello <lists@ryanpetrello.com>
+  Marcin Kuźmiński <marcin@python-works.com>
+  Adriano Petrich <petrich@gmail.com>

File diff suppressed because it is too large
+ 1216 - 1
Changelog


+ 134 - 118
FAQ

@@ -27,7 +27,7 @@ These are some common use cases:
 
 * Running something in the background. For example, to finish the web request
   as soon as possible, then update the users page incrementally.
-  This gives the user the impression of good performane and "snappiness", even
+  This gives the user the impression of good performance and "snappiness", even
   though the real work might actually take some time.
 
 * Running something after the web request has finished.
@@ -56,10 +56,14 @@ Is Celery dependent on pickle?
 **Answer:** No.
 
 Celery can support any serialization scheme and has support for JSON/YAML and
-Pickle by default. You can even send one task using pickle, and another one
-with JSON seamlessly, this is because every task is associated with a
-content-type. The default serialization scheme is pickle because it's the most
-used, and it has support for sending complex objects as task arguments.
+Pickle by default. And as every task is associated with a content type, you
+can even send one task using pickle, and another using JSON.
+
+The default serialization format is pickle simply because it is
+convenient as it supports sending complex Python objects as task arguments.
+
+If you need to communicate with other languages you should change
+to a serialization format that is suitable for that.
 
 You can set a global default serializer, the default serializer for a
 particular Task, or even what serializer to use when sending a single task
@@ -84,8 +88,11 @@ Do I have to use AMQP/RabbitMQ?
 
 **Answer**: No.
 
-You can also use Redis or an SQL database, see `Using other
-queues`_.
+You can also use Redis, Beanstalk, CouchDB, MongoDB or an SQL database,
+see `Using other queues`_.
+
+These "virtual transports" may have limited broadcast and event functionality.
+For example remote control commands only works with AMQP and Redis.
 
 .. _`Using other queues`:
     http://ask.github.com/celery/tutorials/otherqueues.html
@@ -112,7 +119,7 @@ language has an AMQP client, there shouldn't be much work to create a worker
 in your language.  A Celery worker is just a program connecting to the broker
 to process messages.
 
-Also, there's another way to be language indepedent, and that is to use REST
+Also, there's another way to be language independent, and that is to use REST
 tasks, instead of your tasks being functions, they're URLs. With this
 information you can even create simple web servers that enable preloading of
 code. See: `User Guide: Remote Tasks`_.
@@ -130,14 +137,14 @@ Troubleshooting
 MySQL is throwing deadlock errors, what can I do?
 -------------------------------------------------
 
-**Answer:** MySQL has default isolation level set to ``REPEATABLE-READ``,
-if you don't really need that, set it to ``READ-COMMITTED``.
+**Answer:** MySQL has default isolation level set to `REPEATABLE-READ`,
+if you don't really need that, set it to `READ-COMMITTED`.
 You can do that by adding the following to your :file:`my.cnf`::
 
     [mysqld]
     transaction-isolation = READ-COMMITTED
 
-For more information about InnoDBs transaction model see `MySQL - The InnoDB
+For more information about InnoDB`s transaction model see `MySQL - The InnoDB
 Transaction Model and Locking`_ in the MySQL user manual.
 
 (Thanks to Honza Kral and Anton Tsigularov for this solution)
@@ -168,7 +175,7 @@ most systems), it usually contains a message describing the reason.
 Why won't celeryd run on FreeBSD?
 ---------------------------------
 
-**Answer:** multiprocessing.Pool requires a working POSIX semaphore
+**Answer:** The multiprocessing pool requires a working POSIX semaphore
 implementation which isn't enabled in FreeBSD by default. You have to enable
 POSIX semaphores in the kernel and manually recompile multiprocessing.
 
@@ -178,7 +185,7 @@ http://www.playingwithwire.com/2009/10/how-to-get-celeryd-to-work-on-freebsd/
 
 .. _faq-duplicate-key-errors:
 
-I'm having ``IntegrityError: Duplicate Key`` errors. Why?
+I'm having `IntegrityError: Duplicate Key` errors. Why?
 ---------------------------------------------------------
 
 **Answer:** See `MySQL is throwing deadlock errors, what can I do?`_.
@@ -203,7 +210,7 @@ One reason that the queue is never emptied could be that you have a stale
 worker process taking the messages hostage. This could happen if celeryd
 wasn't properly shut down.
 
-When a message is recieved by a worker the broker waits for it to be
+When a message is received by a worker the broker waits for it to be
 acknowledged before marking the message as processed. The broker will not
 re-send that message to another consumer until the consumer is shut down
 properly.
@@ -232,7 +239,7 @@ task manually:
     >>> from myapp.tasks import MyPeriodicTask
     >>> MyPeriodicTask.delay()
 
-Watch celeryds logfile to see if it's able to find the task, or if some
+Watch celeryd`s log file to see if it's able to find the task, or if some
 other error is happening.
 
 .. _faq-periodic-task-does-not-run:
@@ -247,16 +254,25 @@ Why won't my Periodic Task run?
 How do I discard all waiting tasks?
 ------------------------------------
 
-**Answer:** Use :func:`~celery.task.control.discard_all`, like this:
+**Answer:** You can use celeryctl to purge all configured task queues::
+
+        $ celeryctl purge
+
+or programatically::
+
+        >>> from celery.task.control import discard_all
+        >>> discard_all()
+        1753
 
-    >>> from celery.task.control import discard_all
-    >>> discard_all()
-    1753
+If you only want to purge messages from a specific queue
+you have to use the AMQP API or the :program:`camqadm` utility::
+
+    $ camqadm queue.purge <queue name>
 
 The number 1753 is the number of messages deleted.
 
 You can also start :mod:`~celery.bin.celeryd` with the
-:option:`--discard` argument which will accomplish the same thing.
+:option:`--purge` argument, to purge messages when the worker starts.
 
 .. _faq-messages-left-after-purge:
 
@@ -268,7 +284,7 @@ as they are actually executed. After the worker has received a task, it will
 take some time until it is actually executed, especially if there are a lot
 of tasks already waiting for execution. Messages that are not acknowledged are
 held on to by the worker until it closes the connection to the broker (AMQP
-server). When that connection is closed (e.g because the worker was stopped)
+server). When that connection is closed (e.g. because the worker was stopped)
 the tasks will be re-sent by the broker to the next available worker (or the
 same worker when it has been restarted), so to properly purge the queue of
 waiting tasks you have to stop all the workers, and then discard the tasks
@@ -284,7 +300,7 @@ Results
 How do I get the result of a task if I have the ID that points there?
 ----------------------------------------------------------------------
 
-**Answer**: Use ``Task.AsyncResult``::
+**Answer**: Use `Task.AsyncResult`::
 
     >>> result = MyTask.AsyncResult(task_id)
     >>> result.get()
@@ -299,6 +315,48 @@ If you need to specify a custom result backend you should use
     >>> result = BaseAsyncResult(task_id, backend=...)
     >>> result.get()
 
+.. _faq-security:
+
+Security
+========
+
+Isn't using `pickle` a security concern?
+----------------------------------------
+
+**Answer**: Yes, indeed it is.
+
+You are right to have a security concern, as this can indeed be a real issue.
+It is essential that you protect against unauthorized
+access to your broker, databases and other services transmitting pickled
+data.
+
+For the task messages you can set the :setting:`CELERY_TASK_SERIALIZER`
+setting to "json" or "yaml" instead of pickle. There is
+currently no alternative solution for task results (but writing a
+custom result backend using JSON is a simple task)
+
+Note that this is not just something you should be aware of with Celery, for
+example also Django uses pickle for its cache client.
+
+Can messages be encrypted?
+--------------------------
+
+**Answer**: Some AMQP brokers supports using SSL (including RabbitMQ).
+You can enable this using the :setting:`BROKER_USE_SSL` setting.
+
+It is also possible to add additional encryption and security to messages,
+if you have a need for this then you should contact the :ref:`mailing-list`.
+
+Is it safe to run :program:`celeryd` as root?
+---------------------------------------------
+
+**Answer**: No!
+
+We're not currently aware of any security issues, but it would
+be incredibly naive to assume that they don't exist, so running
+the Celery services (:program:`celeryd`, :program:`celerybeat`,
+:program:`celeryev`, etc) as an unprivileged user is recommended.
+
 .. _faq-brokers:
 
 Brokers
@@ -307,7 +365,7 @@ Brokers
 Why is RabbitMQ crashing?
 -------------------------
 
-RabbitMQ will crash if it runs out of memory. This will be fixed in a
+**Answer:** RabbitMQ will crash if it runs out of memory. This will be fixed in a
 future release of RabbitMQ. please refer to the RabbitMQ FAQ:
 http://www.rabbitmq.com/faq.html#node-runs-out-of-memory
 
@@ -320,10 +378,10 @@ http://www.rabbitmq.com/faq.html#node-runs-out-of-memory
     If you're still running an older version of RabbitMQ and experience
     crashes, then please upgrade!
 
-Some common Celery misconfigurations can eventually lead to a crash
-on older version of RabbitMQ. Even if it doesn't crash, these
-misconfigurations can still consume a lot of resources, so it is very
-important that you are aware of them.
+Misconfiguration of Celery can eventually lead to a crash
+on older version of RabbitMQ. Even if it doesn't crash, this
+can still consume a lot of resources, so it is very
+important that you are aware of the common pitfalls.
 
 * Events.
 
@@ -340,7 +398,7 @@ as a message. If you don't collect these results, they will build up and
 RabbitMQ will eventually run out of memory.
 
 If you don't use the results for a task, make sure you set the
-``ignore_result`` option:
+`ignore_result` option:
 
 .. code-block python
 
@@ -367,69 +425,23 @@ Results can also be disabled globally using the
 Can I use Celery with ActiveMQ/STOMP?
 -------------------------------------
 
-**Answer**: Yes, but this is somewhat experimental for now.
-It is working ok in a test configuration, but it has not
-been tested in production. If you have any problems
-using STOMP with Celery, please report an issue here::
+**Answer**: No.  It used to be supported by Carrot,
+but is not currently supported in Kombu.
 
-    http://github.com/ask/celery/issues/
+.. _faq-non-amqp-missing-features:
 
-The STOMP carrot backend requires the `stompy`_ library::
-
-    $ pip install stompy
-    $ cd python-stomp
-    $ sudo python setup.py install
-    $ cd ..
-
-.. _`stompy`: http://pypi.python.org/pypi/stompy
-
-In this example we will use a queue called ``celery`` which we created in
-the ActiveMQ web admin interface.
-
-**Note**: When using ActiveMQ the queue name needs to have ``"/queue/"``
-prepended to it. i.e. the queue ``celery`` becomes ``/queue/celery``.
-
-Since STOMP doesn't have exchanges and the routing capabilities of AMQP,
-you need to set ``exchange`` name to the same as the queue name. This is
-a minor inconvenience since carrot needs to maintain the same interface
-for both AMQP and STOMP.
-
-Use the following settings in your :file:`celeryconfig.py`/
-django :file:`settings.py`:
-
-.. code-block:: python
-
-    # Use the stomp carrot backend.
-    CARROT_BACKEND = "stomp"
-
-    # STOMP hostname and port settings.
-    BROKER_HOST = "localhost"
-    BROKER_PORT = 61613
-
-    # The queue name to use (the exchange *must* be set to the
-    # same as the queue name when using STOMP)
-    CELERY_DEFAULT_QUEUE = "/queue/celery"
-    CELERY_DEFAULT_EXCHANGE = "/queue/celery" 
-
-    CELERY_QUEUES = {
-        "/queue/celery": {"exchange": "/queue/celery"}
-    }
-
-.. _faq-stomp-missing-features:
-
-What features are not supported when using ghettoq/STOMP?
----------------------------------------------------------
-
-This is a (possible incomplete) list of features not available when
-using the STOMP backend:
+What features are not supported when not using an AMQP broker?
+--------------------------------------------------------------
 
-    * routing keys
+This is an incomplete list of features not available when
+using the virtual transports:
 
-    * exchange types (direct, topic, headers, etc)
+    * Remote control commands (supported only by Redis).
 
-    * immediate
+    * Monitoring with events may not work in all virtual transports.
 
-    * mandatory
+    * The `header` and `fanout` exchange types
+        (`fanout` is supported by Redis).
 
 .. _faq-tasks:
 
@@ -461,27 +473,30 @@ that has an AMQP client.
 How can I get the task id of the current task?
 ----------------------------------------------
 
-**Answer**: Celery does set some default keyword arguments if the task
-accepts them (you can accept them by either using ``**kwargs``, or list them
-specifically)::
+**Answer**: The current id and more is available in the task request::
 
     @task
-    def mytask(task_id=None):
-        cache.set(task_id, "Running")
+    def mytask():
+        cache.set(mytask.request.id, "Running")
 
-The default keyword arguments are documented here:
-http://celeryq.org/docs/userguide/tasks.html#default-keyword-arguments
+For more information see :ref:`task-request-info`.
 
 .. _faq-custom-task-ids:
 
 Can I specify a custom task_id?
 -------------------------------
 
-**Answer**: Yes. Use the ``task_id`` argument to
+**Answer**: Yes.  Use the `task_id` argument to
 :meth:`~celery.execute.apply_async`::
 
     >>> task.apply_async(args, kwargs, task_id="...")
 
+
+Can I use decorators with tasks?
+--------------------------------
+
+**Answer**: Yes.  But please see note at :ref:`tasks-decorating`.
+
 .. _faq-natural-task-ids:
 
 Can I use natural task ids?
@@ -526,7 +541,7 @@ See :doc:`userguide/tasksets` for more information.
 
 Can I cancel the execution of a task?
 -------------------------------------
-**Answer**: Yes. Use ``result.revoke``::
+**Answer**: Yes. Use `result.revoke`::
 
     >>> result = add.apply_async(args=[2, 2], countdown=120)
     >>> result.revoke()
@@ -542,17 +557,17 @@ Why aren't my remote control commands received by all workers?
 --------------------------------------------------------------
 
 **Answer**: To receive broadcast remote control commands, every worker node
-uses its hostname to create a unique queue name to listen to,
-so if you have more than one worker with the same hostname, the
-control commands will be recieved in round-robin between them.
+uses its host name to create a unique queue name to listen to,
+so if you have more than one worker with the same host name, the
+control commands will be received in round-robin between them.
 
-To work around this you can explicitly set the hostname for every worker
+To work around this you can explicitly set the host name for every worker
 using the :option:`--hostname` argument to :mod:`~celery.bin.celeryd`::
 
     $ celeryd --hostname=$(hostname).1
     $ celeryd --hostname=$(hostname).2
 
-etc, etc.
+etc., etc...
 
 .. _faq-task-routing:
 
@@ -569,8 +584,9 @@ See :doc:`userguide/routing` for more information.
 Can I change the interval of a periodic task at runtime?
 --------------------------------------------------------
 
-**Answer**: Yes. You can override ``PeriodicTask.is_due`` or turn
-``PeriodicTask.run_every`` into a property:
+**Answer**: Yes. You can use the Django database scheduler, or you can
+override `PeriodicTask.is_due` or turn `PeriodicTask.run_every` into a
+property:
 
 .. code-block:: python
 
@@ -594,7 +610,7 @@ RabbitMQ doesn't implement them yet.
 The usual way to prioritize work in Celery, is to route high priority tasks
 to different servers. In the real world this may actually work better than per message
 priorities. You can use this in combination with rate limiting to achieve a
-highly performant system.
+highly responsive system.
 
 .. _faq-acks_late-vs-retry:
 
@@ -604,11 +620,11 @@ Should I use retry or acks_late?
 **Answer**: Depends. It's not necessarily one or the other, you may want
 to use both.
 
-``Task.retry`` is used to retry tasks, notably for expected errors that
-is catchable with the ``try:`` block. The AMQP transaction is not used
-for these errors: **if the task raises an exception it is still acked!**.
+`Task.retry` is used to retry tasks, notably for expected errors that
+is catchable with the `try:` block. The AMQP transaction is not used
+for these errors: **if the task raises an exception it is still acknowledged!**.
 
-The ``acks_late`` setting would be used when you need the task to be
+The `acks_late` setting would be used when you need the task to be
 executed again if the worker (for some reason) crashes mid-execution.
 It's important to note that the worker is not known to crash, and if
 it does it is usually an unrecoverable error that requires human
@@ -634,11 +650,11 @@ It's a good default, users who require it and know what they
 are doing can still enable acks_late (and in the future hopefully
 use manual acknowledgement)
 
-In addition ``Task.retry`` has features not available in AMQP
+In addition `Task.retry` has features not available in AMQP
 transactions: delay between retries, max retries, etc.
 
-So use retry for Python errors, and if your task is reentrant
-combine that with ``acks_late`` if that level of reliability
+So use retry for Python errors, and if your task is idempotent
+combine that with `acks_late` if that level of reliability
 is required.
 
 .. _faq-schedule-at-specific-time:
@@ -648,24 +664,24 @@ Can I schedule tasks to execute at a specific time?
 
 .. module:: celery.task.base
 
-**Answer**: Yes. You can use the ``eta`` argument of :meth:`Task.apply_async`.
+**Answer**: Yes. You can use the `eta` argument of :meth:`Task.apply_async`.
 
 Or to schedule a periodic task at a specific time, use the
-:class:`celery.task.schedules.crontab` schedule behavior:
+:class:`celery.schedules.crontab` schedule behavior:
 
 
 .. code-block:: python
 
     from celery.task.schedules import crontab
-    from celery.decorators import periodic_task
+    from celery.task import periodic_task
 
     @periodic_task(run_every=crontab(hours=7, minute=30, day_of_week="mon"))
     def every_monday_morning():
-        print("This is run every monday morning at 7:30")
+        print("This is run every Monday morning at 7:30")
 
 .. _faq-safe-worker-shutdown:
 
-How do I shut down ``celeryd`` safely?
+How do I shut down `celeryd` safely?
 --------------------------------------
 
 **Answer**: Use the :sig:`TERM` signal, and the worker will finish all currently
@@ -675,7 +691,7 @@ You should never stop :mod:`~celery.bin.celeryd` with the :sig:`KILL` signal
 (:option:`-9`), unless you've tried :sig:`TERM` a few times and waited a few
 minutes to let it get a chance to shut down.  As if you do tasks may be
 terminated mid-execution, and they will not be re-run unless you have the
-``acks_late`` option set (``Task.acks_late`` / :setting:`CELERY_ACKS_LATE`).
+`acks_late` option set (`Task.acks_late` / :setting:`CELERY_ACKS_LATE`).
 
 .. seealso::
 
@@ -708,14 +724,14 @@ See http://bit.ly/bo9RSw
 
 .. _faq-windows-worker-embedded-beat:
 
-The ``-B`` / ``--beat`` option to celeryd doesn't work?
+The `-B` / `--beat` option to celeryd doesn't work?
 ----------------------------------------------------------------
-**Answer**: That's right. Run ``celerybeat`` and ``celeryd`` as separate
+**Answer**: That's right. Run `celerybeat` and `celeryd` as separate
 services instead.
 
 .. _faq-windows-django-settings:
 
-``django-celery`` can’t find settings?
+`django-celery` can't find settings?
 --------------------------------------
 
 **Answer**: You need to specify the :option:`--settings` argument to

+ 7 - 7
INSTALL

@@ -1,19 +1,19 @@
-Installing celery
+Installing Celery
 =================
 
-You can install ``celery`` either via the Python Package Index (PyPI)
+You can install Celery either via the Python Package Index (PyPI)
 or from source.
 
-To install using ``pip``,::
+To install using `pip`::
 
-    $ pip install celery
+    $ pip install Celery
 
-To install using ``easy_install``,::
+To install using `easy_install`::
 
-    $ easy_install celery
+    $ easy_install Celery
 
 If you have downloaded a source tarball you can install it
-by doing the following,::
+by doing the following::
 
     $ python setup.py build
     # python setup.py install # as root

+ 1 - 1
LICENSE

@@ -1,4 +1,4 @@
-Copyright (c) 2009-2010, Ask Solem and contributors.
+Copyright (c) 2009-2011, Ask Solem and contributors.
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without

+ 59 - 41
README.rst

@@ -4,7 +4,7 @@
 
 .. image:: http://cloud.github.com/downloads/ask/celery/celery_favicon_128.png
 
-:Version: 2.1.1
+:Version: 2.3.0a1
 :Web: http://celeryproject.org/
 :Download: http://pypi.python.org/pypi/celery/
 :Source: http://github.com/ask/celery/
@@ -20,16 +20,18 @@ distributed message passing.  It is focused on real-time operation,
 but supports scheduling as well.
 
 The execution units, called tasks, are executed concurrently on one or
-more worker nodes.  Tasks can execute asynchronously (in the background) or
-synchronously (wait until ready).
+more worker nodes using multiprocessing, `Eventlet`_ or `gevent`_.  Tasks can
+execute asynchronously (in the background) or synchronously
+(wait until ready).
 
-Celery is already used in production to process millions of tasks a day.
+Celery is used in production systems to process millions of tasks a day.
 
 Celery is written in Python, but the protocol can be implemented in any
 language.  It can also `operate with other languages using webhooks`_.
 
-The recommended message broker is `RabbitMQ`_, but support for `Redis`_ and
-databases (`SQLAlchemy`_) is also available.
+The recommended message broker is `RabbitMQ`_, but limited support for
+`Redis`_, `Beanstalk`_, `MongoDB`_, `CouchDB`_ and
+databases (using `SQLAlchemy`_ or the `Django ORM`_) is also available.
 
 Celery is easy to integrate with `Django`_, `Pylons`_ and `Flask`_, using
 the `django-celery`_, `celery-pylons`_ and `Flask-Celery`_ add-on packages.
@@ -37,11 +39,17 @@ the `django-celery`_, `celery-pylons`_ and `Flask-Celery`_ add-on packages.
 .. _`RabbitMQ`: http://www.rabbitmq.com/
 .. _`Redis`: http://code.google.com/p/redis/
 .. _`SQLAlchemy`: http://www.sqlalchemy.org/
-.. _`Django`: http://djangoproject.org/
+.. _`Django`: http://djangoproject.com/
+.. _`Django ORM`: http://djangoproject.com/
+.. _`Eventlet`: http://eventlet.net/
+.. _`gevent`: http://gevent.org/
+.. _`Beanstalk`: http://kr.github.com/beanstalkd/
+.. _`MongoDB`: http://mongodb.org/
+.. _`CouchDB`: http://couchdb.apache.org/
 .. _`Pylons`: http://pylonshq.com/
 .. _`Flask`: http://flask.pocoo.org/
 .. _`django-celery`: http://pypi.python.org/pypi/django-celery
-.. _`celery-pylons`: http://bitbucket.org/ianschenck/celery-pylons
+.. _`celery-pylons`: http://pypi.python.org/pypi/celery-pylons
 .. _`Flask-Celery`: http://github.com/ask/flask-celery/
 .. _`operate with other languages using webhooks`:
     http://ask.github.com/celery/userguide/remote-tasks.html
@@ -58,8 +66,8 @@ This is a high level overview of the architecture.
 
 .. image:: http://cloud.github.com/downloads/ask/celery/Celery-Overview-v4.jpg
 
-The broker delivers tasks to the worker servers.
-A worker server is a networked machine running ``celeryd``.  This can be one or
+The broker delivers tasks to the worker nodes.
+A worker node is a networked machine running `celeryd`.  This can be one or
 more machines depending on the workload.
 
 The result of the task can be stored for later retrieval (called its
@@ -74,7 +82,7 @@ You probably want to see some code by now, so here's an example task
 adding two numbers:
 ::
 
-    from celery.decorators import task
+    from celery.task import task
 
     @task
     def add(x, y):
@@ -94,33 +102,37 @@ Features
 ========
 
     +-----------------+----------------------------------------------------+
-    | Messaging       | Supported brokers include `RabbitMQ`_, `Stomp`_,   |
-    |                 | `Redis`_, and most common SQL databases.           |
+    | Messaging       | Supported brokers include `RabbitMQ`_, `Redis`_,   |
+    |                 | `Beanstalk`_, `MongoDB`_, `CouchDB`_, and popular  |
+    |                 | SQL databases.                                     |
     +-----------------+----------------------------------------------------+
-    | Robust          | Using `RabbitMQ`, celery survives most error       |
+    | Fault-tolerant  | Excellent configurable error recovery when using   |
+    |                 | `RabbitMQ`, ensures your tasks are never lost.     |
     |                 | scenarios, and your tasks will never be lost.      |
     +-----------------+----------------------------------------------------+
     | Distributed     | Runs on one or more machines. Supports             |
-    |                 | `clustering`_ when used in combination with        |
-    |                 | `RabbitMQ`_. You can set up new workers without    |
-    |                 | central configuration (e.g. use your dads laptop   |
-    |                 | while the queue is temporarily overloaded).        |
+    |                 | broker `clustering`_ and `HA`_ when used in        |
+    |                 | combination with `RabbitMQ`_.  You can set up new  |
+    |                 | workers without central configuration (e.g. use    |
+    |                 | your grandma's laptop to help if the queue is      |
+    |                 | temporarily congested).                            |
     +-----------------+----------------------------------------------------+
-    | Concurrency     | Tasks are executed in parallel using the           |
-    |                 | ``multiprocessing`` module.                        |
+    | Concurrency     | Concurrency is achieved by using multiprocessing,  |
+    |                 | `Eventlet`_, `gevent` or a mix of these.           |
     +-----------------+----------------------------------------------------+
     | Scheduling      | Supports recurring tasks like cron, or specifying  |
     |                 | an exact date or countdown for when after the task |
     |                 | should be executed.                                |
     +-----------------+----------------------------------------------------+
-    | Performance     | Able to execute tasks while the user waits.        |
+    | Latency         | Low latency means you are able to execute tasks    |
+    |                 | *while the user is waiting*.                       |
     +-----------------+----------------------------------------------------+
     | Return Values   | Task return values can be saved to the selected    |
     |                 | result store backend. You can wait for the result, |
     |                 | retrieve it later, or ignore it.                   |
     +-----------------+----------------------------------------------------+
     | Result Stores   | Database, `MongoDB`_, `Redis`_, `Tokyo Tyrant`,    |
-    |                 | `AMQP`_ (high performance).                        |
+    |                 | `Cassandra`, or `AMQP`_ (message notification).    |
     +-----------------+----------------------------------------------------+
     | Webhooks        | Your tasks can also be HTTP callbacks, enabling    |
     |                 | cross-language communication.                      |
@@ -130,11 +142,15 @@ Features
     |                 | Rate limits can be set for each task type, or      |
     |                 | globally for all.                                  |
     +-----------------+----------------------------------------------------+
-    | Routing         | Using AMQP you can route tasks arbitrarily to      |
-    |                 | different workers.                                 |
+    | Routing         | Using AMQP's flexible routing model you can route  |
+    |                 | tasks to different workers, or select different    |
+    |                 | message topologies, by configuration or even at    |
+    |                 | runtime.                                           |
     +-----------------+----------------------------------------------------+
-    | Remote-control  | You can rate limit and delete (revoke) tasks       |
-    |                 | remotely.                                          |
+    | Remote-control  | Worker nodes can be controlled from remote by      |
+    |                 | using broadcast messaging.  A range of built-in    |
+    |                 | commands exist in addition to the ability to       |
+    |                 | easily define your own. (AMQP/Redis only)          |
     +-----------------+----------------------------------------------------+
     | Monitoring      | You can capture everything happening with the      |
     |                 | workers in real-time by subscribing to events.     |
@@ -165,18 +181,15 @@ Features
     |                 | enabling the ability to poll task status using     |
     |                 | Ajax.                                              |
     +-----------------+----------------------------------------------------+
-    | Error e-mails   | Can be configured to send e-mails to the           |
+    | Error Emails   | Can be configured to send emails to the           |
     |                 | administrators when tasks fails.                   |
     +-----------------+----------------------------------------------------+
-    | Supervised      | Pool workers are supervised and automatically      |
-    |                 | replaced if they crash.                            |
-    +-----------------+----------------------------------------------------+
 
 
 .. _`clustering`: http://www.rabbitmq.com/clustering.html
+.. _`HA`: http://www.rabbitmq.com/pacemaker.html
 .. _`AMQP`: http://www.amqp.org/
 .. _`Stomp`: http://stomp.codehaus.org/
-.. _`MongoDB`: http://www.mongodb.org/
 .. _`Tokyo Tyrant`: http://tokyocabinet.sourceforge.net/
 
 .. _celery-documentation:
@@ -194,23 +207,23 @@ is hosted at Github.
 Installation
 ============
 
-You can install ``celery`` either via the Python Package Index (PyPI)
+You can install Celery either via the Python Package Index (PyPI)
 or from source.
 
-To install using ``pip``,::
+To install using `pip`,::
 
-    $ pip install celery
+    $ pip install Celery
 
-To install using ``easy_install``,::
+To install using `easy_install`,::
 
-    $ easy_install celery
+    $ easy_install Celery
 
 .. _celery-installing-from-source:
 
 Downloading and installing from source
 --------------------------------------
 
-Download the latest version of ``celery`` from
+Download the latest version of Celery from
 http://pypi.python.org/pypi/celery/
 
 You can install it by doing the following,::
@@ -270,23 +283,28 @@ Wiki
 
 http://wiki.github.com/ask/celery/
 
-.. _contributing:
+.. _contributing-short:
 
 Contributing
 ============
 
-Development of ``celery`` happens at Github: http://github.com/ask/celery
+Development of `celery` happens at Github: http://github.com/ask/celery
 
 You are highly encouraged to participate in the development
-of ``celery``. If you don't like Github (for some reason) you're welcome
+of `celery`. If you don't like Github (for some reason) you're welcome
 to send regular patches.
 
+Be sure to also read the `Contributing to Celery`_ section in the
+documentation.
+
+.. _`Contributing to Celery`: http://ask.github.com/celery/contributing.html
+
 .. _license:
 
 License
 =======
 
-This software is licensed under the ``New BSD License``. See the ``LICENSE``
+This software is licensed under the `New BSD License`. See the ``LICENSE``
 file in the top distribution directory for the full license text.
 
 .. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround

+ 0 - 9
bin/camqadm

@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-import sys
-if '' not in sys.path:
-    sys.path.insert(0, '')
-from celery.bin import camqadm
-
-if __name__ == "__main__":
-    options, values = camqadm.parse_options(sys.argv[1:])
-    sys.exit(not camqadm.camqadm(*values, **vars(options)))

+ 0 - 8
bin/celerybeat

@@ -1,8 +0,0 @@
-#!/usr/bin/env python
-from celery.bin import celerybeat
-
-def main():
-    celerybeat.main()
-
-if __name__ == "__main__":
-    main()

+ 0 - 8
bin/celeryctl

@@ -1,8 +0,0 @@
-#!/usr/bin/env python
-import sys
-if '' not in sys.path:
-    sys.path.insert(0, '')
-from celery.bin import celeryctl
-
-if __name__ == "__main__":
-    celeryctl.main()

+ 0 - 31
bin/celeryd

@@ -1,31 +0,0 @@
-#!/usr/bin/env python
-import sys
-
-from celery.bin import celeryd
-
-WINDOWS_MESSAGE = """
-
-The celeryd command does not work on Windows.
-
-Instead, please use:
-
-    ..> python -m celery.bin.celeryd
-
-You can also supply arguments:
-
-    ..> python -m celery.bin.celeryd --concurrency=10 --loglevel=DEBUG
-
-
-"""
-
-def main():
-    if sys.platform == "win32":
-        sys.stderr.write(WINDOWS_MESSAGE)
-        sys.exit()
-
-    celeryd.main()
-
-if __name__ == "__main__":
-    import multiprocessing
-    multiprocessing.freeze_support()
-    main()

+ 0 - 5
bin/celeryd-multi

@@ -1,5 +0,0 @@
-#!/usr/bin/env python
-from celery.bin.celeryd_multi import main
-
-if __name__ == "__main__":
-    main()

+ 0 - 5
bin/celeryev

@@ -1,5 +0,0 @@
-#!/usr/bin/env python
-from celery.bin import celeryev
-
-if __name__ == "__main__":
-    celeryev.main()

+ 30 - 1
celery/__init__.py

@@ -1,9 +1,38 @@
 """Distributed Task Queue"""
+# :copyright: (c) 2009 - 2011 by Ask Solem.
+# :license:   BSD, see LICENSE for more details.
 
-VERSION = (2, 1, 1)
+import os
+import sys
+
+VERSION = (2, 3, 0, "a1")
 
 __version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
 __author__ = "Ask Solem"
 __contact__ = "ask@celeryproject.org"
 __homepage__ = "http://celeryproject.org"
 __docformat__ = "restructuredtext"
+
+if sys.version_info < (2, 5):
+    import warnings
+    warnings.warn(DeprecationWarning("""
+
+Python 2.4 support is deprecated and only versions 2.5, 2.6, 2.7+
+will be supported starting from Celery version 2.3.
+
+
+"""))
+
+
+def Celery(*args, **kwargs):
+    from celery.app import App
+    return App(*args, **kwargs)
+
+if not os.environ.get("CELERY_NO_EVAL", False):
+    from celery.local import LocalProxy
+
+    def _get_current_app():
+        from celery.app import current_app
+        return current_app()
+
+    current_app = LocalProxy(_get_current_app)

+ 247 - 0
celery/app/__init__.py

@@ -0,0 +1,247 @@
+"""
+celery.app
+==========
+
+Celery Application.
+
+:copyright: (c) 2009 - 2011 by Ask Solem.
+:license: BSD, see LICENSE for more details.
+
+"""
+import os
+import threading
+
+from inspect import getargspec
+
+from kombu.utils import cached_property
+
+from celery import registry
+from celery.app import base
+from celery.utils import instantiate
+from celery.utils.functional import wraps
+
+# Apps with the :attr:`~celery.app.base.BaseApp.set_as_current` attribute
+# sets this, so it will always contain the last instantiated app,
+# and is the default app returned by :func:`app_or_default`.
+_tls = threading.local()
+_tls.current_app = None
+
+
+class App(base.BaseApp):
+    """Celery Application.
+
+    :param main: Name of the main module if running as `__main__`.
+    :keyword loader: The loader class, or the name of the loader class to use.
+                     Default is :class:`celery.loaders.app.AppLoader`.
+    :keyword backend: The result store backend class, or the name of the
+                      backend class to use. Default is the value of the
+                      :setting:`CELERY_RESULT_BACKEND` setting.
+    :keyword amqp: AMQP object or class name.
+    :keyword events: Events object or class name.
+    :keyword log: Log object or class name.
+    :keyword control: Control object or class name.
+    :keyword set_as_current:  Make this the global current app.
+
+    """
+
+    def set_current(self):
+        _tls.current_app = self
+
+    def on_init(self):
+        if self.set_as_current:
+            self.set_current()
+
+    def create_task_cls(self):
+        """Creates a base task class using default configuration
+        taken from this app."""
+        conf = self.conf
+
+        from celery.task.base import BaseTask
+
+        class Task(BaseTask):
+            abstract = True
+            app = self
+            backend = self.backend
+            exchange_type = conf.CELERY_DEFAULT_EXCHANGE_TYPE
+            delivery_mode = conf.CELERY_DEFAULT_DELIVERY_MODE
+            send_error_emails = conf.CELERY_SEND_TASK_ERROR_EMAILS
+            error_whitelist = conf.CELERY_TASK_ERROR_WHITELIST
+            serializer = conf.CELERY_TASK_SERIALIZER
+            rate_limit = conf.CELERY_DEFAULT_RATE_LIMIT
+            track_started = conf.CELERY_TRACK_STARTED
+            acks_late = conf.CELERY_ACKS_LATE
+            ignore_result = conf.CELERY_IGNORE_RESULT
+            store_errors_even_if_ignored = \
+                conf.CELERY_STORE_ERRORS_EVEN_IF_IGNORED
+            accept_magic_kwargs = self.accept_magic_kwargs
+        Task.__doc__ = BaseTask.__doc__
+
+        return Task
+
+    def Worker(self, **kwargs):
+        """Create new :class:`~celery.apps.worker.Worker` instance."""
+        return instantiate("celery.apps.worker.Worker", app=self, **kwargs)
+
+    def Beat(self, **kwargs):
+        """Create new :class:`~celery.apps.beat.Beat` instance."""
+        return instantiate("celery.apps.beat.Beat", app=self, **kwargs)
+
+    def TaskSet(self, *args, **kwargs):
+        """Create new :class:`~celery.task.sets.TaskSet`."""
+        from celery.task.sets import TaskSet
+        kwargs["app"] = self
+        return TaskSet(*args, **kwargs)
+
+    def worker_main(self, argv=None):
+        """Run :program:`celeryd` using `argv`.  Uses :data:`sys.argv`
+        if `argv` is not specified."""
+        from celery.bin.celeryd import WorkerCommand
+        return WorkerCommand(app=self).execute_from_commandline(argv)
+
+    def task(self, *args, **options):
+        """Decorator to create a task class out of any callable.
+
+        .. admonition:: Examples
+
+            .. code-block:: python
+
+                @task()
+                def refresh_feed(url):
+                    return Feed.objects.get(url=url).refresh()
+
+            With setting extra options and using retry.
+
+            .. code-block:: python
+
+                @task(exchange="feeds")
+                def refresh_feed(url, **kwargs):
+                    try:
+                        return Feed.objects.get(url=url).refresh()
+                    except socket.error, exc:
+                        refresh_feed.retry(args=[url], kwargs=kwargs, exc=exc)
+
+            Calling the resulting task:
+
+                >>> refresh_feed("http://example.com/rss") # Regular
+                <Feed: http://example.com/rss>
+                >>> refresh_feed.delay("http://example.com/rss") # Async
+                <AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d>
+
+        """
+
+        def inner_create_task_cls(**options):
+
+            def _create_task_cls(fun):
+                options["app"] = self
+                options.setdefault("accept_magic_kwargs", False)
+                base = options.pop("base", None) or self.Task
+
+                @wraps(fun, assigned=("__module__", "__name__"))
+                def run(self, *args, **kwargs):
+                    return fun(*args, **kwargs)
+
+                # Save the argspec for this task so we can recognize
+                # which default task kwargs we're going to pass to it later.
+                # (this happens in celery.utils.fun_takes_kwargs)
+                run.argspec = getargspec(fun)
+
+                cls_dict = dict(options, run=run,
+                                __module__=fun.__module__,
+                                __doc__=fun.__doc__)
+                T = type(fun.__name__, (base, ), cls_dict)()
+                return registry.tasks[T.name]             # global instance.
+
+            return _create_task_cls
+
+        if len(args) == 1 and callable(args[0]):
+            return inner_create_task_cls(**options)(*args)
+        return inner_create_task_cls(**options)
+
+    @cached_property
+    def Task(self):
+        """Default Task base class for this application."""
+        return self.create_task_cls()
+
+    def __repr__(self):
+        return "<Celery: %s:0x%x>" % (self.main or "__main__", id(self), )
+
+    def __reduce__(self):
+        # Reduce only pickles the configuration changes,
+        # so the default configuration doesn't have to be passed
+        # between processes.
+        return (_unpickle_app, (self.__class__,
+                                self.main,
+                                self.conf.changes,
+                                self.loader_cls,
+                                self.backend_cls,
+                                self.amqp_cls,
+                                self.events_cls,
+                                self.log_cls,
+                                self.control_cls,
+                                self.accept_magic_kwargs))
+
+
+def _unpickle_app(cls, main, changes, loader, backend, amqp,
+        events, log, control, accept_magic_kwargs):
+    app = cls(main, loader=loader, backend=backend, amqp=amqp,
+                    events=events, log=log, control=control,
+                    set_as_current=False,
+                    accept_magic_kwargs=accept_magic_kwargs)
+    app.conf.update(changes)
+    return app
+
+
+#: The "default" loader is the default loader used by old applications.
+default_loader = os.environ.get("CELERY_LOADER") or "default"
+
+#: Global fallback app instance.
+default_app = App("default", loader=default_loader,
+                  set_as_current=False, accept_magic_kwargs=True)
+
+
+def current_app():
+    return getattr(_tls, "current_app", None) or default_app
+
+
+def _app_or_default(app=None):
+    """Returns the app provided or the default app if none.
+
+    The environment variable :envvar:`CELERY_TRACE_APP` is used to
+    trace app leaks.  When enabled an exception is raised if there
+    is no active app.
+
+    """
+    if app is None:
+        return getattr(_tls, "current_app", None) or default_app
+    return app
+
+
+def _app_or_default_trace(app=None):  # pragma: no cover
+    from traceback import print_stack
+    from multiprocessing import current_process
+    if app is None:
+        if getattr(_tls, "current_app", None):
+            print("-- RETURNING TO CURRENT APP --")
+            print_stack()
+            return _tls.current_app
+        if current_process()._name == "MainProcess":
+            raise Exception("DEFAULT APP")
+        print("-- RETURNING TO DEFAULT APP --")
+        print_stack()
+        return default_app
+    return app
+
+
+def enable_trace():
+    global app_or_default
+    app_or_default = _app_or_default_trace
+
+
+def disable_trace():
+    global app_or_default
+    app_or_default = _app_or_default
+
+
+app_or_default = _app_or_default
+if os.environ.get("CELERY_TRACE_APP"):  # pragma: no cover
+    enable_trace()

+ 372 - 0
celery/app/amqp.py

@@ -0,0 +1,372 @@
+# -*- coding: utf-8 -*-
+"""
+celery.app.amqp
+===============
+
+AMQ related functionality.
+
+:copyright: (c) 2009 - 2011 by Ask Solem.
+:license: BSD, see LICENSE for more details.
+
+"""
+from datetime import datetime, timedelta
+
+from kombu import BrokerConnection, Exchange
+from kombu.connection import Resource
+from kombu import compat as messaging
+from kombu.utils import cached_property
+
+from celery import routes as _routes
+from celery import signals
+from celery.utils import gen_unique_id, textindent
+from celery.utils import promise, maybe_promise
+from celery.utils.compat import UserDict
+
+#: List of known options to a Kombu producers send method.
+#: Used to extract the message related options out of any `dict`.
+MSG_OPTIONS = ("mandatory", "priority", "immediate", "routing_key",
+                "serializer", "delivery_mode", "compression")
+
+#: Human readable queue declaration.
+QUEUE_FORMAT = """
+. %(name)s exchange:%(exchange)s (%(exchange_type)s) \
+binding:%(binding_key)s
+"""
+
+#: Set of exchange names that have already been declared.
+_exchanges_declared = set()
+
+#: Set of queue names that have already been declared.
+_queues_declared = set()
+
+
+def extract_msg_options(options, keep=MSG_OPTIONS):
+    """Extracts known options to `basic_publish` from a dict,
+    and returns a new dict."""
+    return dict((name, options.get(name)) for name in keep)
+
+
+class Queues(UserDict):
+    """Queue name⇒ declaration mapping.
+
+    Celery will consult this mapping to find the options
+    for any queue by name.
+
+    :param queues: Initial mapping.
+
+    """
+    #: If set, this is a subset of queues to consume from.
+    #: The rest of the queues are then used for routing only.
+    _consume_from = None
+
+    def __init__(self, queues):
+        self.data = {}
+        for queue_name, options in (queues or {}).items():
+            self.add(queue_name, **options)
+
+    def add(self, queue, exchange=None, routing_key=None,
+            exchange_type="direct", **options):
+        """Add new queue.
+
+        :param queue: Name of the queue.
+        :keyword exchange: Name of the exchange.
+        :keyword routing_key: Binding key.
+        :keyword exchange_type: Type of exchange.
+        :keyword \*\*options: Additional declaration options.
+
+        """
+        q = self[queue] = self.options(exchange, routing_key,
+                                       exchange_type, **options)
+        return q
+
+    def options(self, exchange, routing_key,
+            exchange_type="direct", **options):
+        """Creates new option mapping for queue, with required
+        keys present."""
+        return dict(options, routing_key=routing_key,
+                             binding_key=routing_key,
+                             exchange=exchange,
+                             exchange_type=exchange_type)
+
+    def format(self, indent=0, indent_first=True):
+        """Format routing table into string for log dumps."""
+        queues = self
+        if self._consume_from is not None:
+            queues = self._consume_from
+        info = [QUEUE_FORMAT.strip() % dict(
+                    name=(name + ":").ljust(12), **config)
+                        for name, config in sorted(queues.items())]
+        if indent_first:
+            return textindent("\n".join(info), indent)
+        return info[0] + "\n" + textindent("\n".join(info[1:]), indent)
+
+    def select_subset(self, wanted, create_missing=True):
+        """Select subset of the currently defined queues.
+
+        Does not return anything: queues not in `wanted` will
+        be discarded in-place.
+
+        :param wanted: List of wanted queue names.
+        :keyword create_missing: By default any unknown queues will be
+                                 added automatically, but if disabled
+                                 the occurrence of unknown queues
+                                 in `wanted` will raise :exc:`KeyError`.
+
+        """
+        acc = {}
+        for queue in wanted:
+            try:
+                options = self[queue]
+            except KeyError:
+                if not create_missing:
+                    raise
+                options = self.options(queue, queue)
+            acc[queue] = options
+        self._consume_from = acc
+        self.update(acc)
+
+    @classmethod
+    def with_defaults(cls, queues, default_exchange, default_exchange_type):
+        """Alternate constructor that adds default exchange and
+        exchange type information to queues that does not have any."""
+        for opts in queues.values():
+            opts.setdefault("exchange", default_exchange),
+            opts.setdefault("exchange_type", default_exchange_type)
+            opts.setdefault("binding_key", default_exchange)
+            opts.setdefault("routing_key", opts.get("binding_key"))
+        return cls(queues)
+
+    @property
+    def consume_from(self):
+        if self._consume_from is not None:
+            return self._consume_from
+        return self
+
+
+class TaskPublisher(messaging.Publisher):
+    auto_declare = True
+    retry = False
+    retry_policy = None
+
+    def __init__(self, *args, **kwargs):
+        self.app = kwargs.pop("app")
+        self.retry = kwargs.pop("retry", self.retry)
+        self.retry_policy = kwargs.pop("retry_policy",
+                                        self.retry_policy or {})
+        super(TaskPublisher, self).__init__(*args, **kwargs)
+
+    def declare(self):
+        if self.exchange.name and \
+                self.exchange.name not in _exchanges_declared:
+            super(TaskPublisher, self).declare()
+            _exchanges_declared.add(self.exchange.name)
+
+    def _declare_queue(self, name, retry=False, retry_policy={}):
+        options = self.app.queues[name]
+        queue = messaging.entry_to_queue(name, **options)(self.channel)
+        if retry:
+            self.connection.ensure(queue, queue.declare, **retry_policy)()
+        else:
+            queue.declare()
+        return queue
+
+    def _declare_exchange(self, name, type, retry=False, retry_policy={}):
+        ex = Exchange(name, type=type, durable=self.durable,
+                      auto_delete=self.auto_delete)(self.channel)
+        if retry:
+            return self.connection.ensure(ex, ex.declare, **retry_policy)
+        return ex.declare()
+
+    def delay_task(self, task_name, task_args=None, task_kwargs=None,
+            countdown=None, eta=None, task_id=None, taskset_id=None,
+            expires=None, exchange=None, exchange_type=None,
+            event_dispatcher=None, retry=None, retry_policy=None,
+            queue=None, now=None, retries=0, chord=None, **kwargs):
+        """Send task message."""
+
+        connection = self.connection
+        _retry_policy = self.retry_policy
+        if retry_policy:  # merge default and custom policy
+            _retry_policy = dict(_retry_policy, **retry_policy)
+
+        # declare entities
+        if queue and queue not in _queues_declared:
+            entity = self._declare_queue(queue, retry, _retry_policy)
+            _exchanges_declared.add(entity.exchange.name)
+            _queues_declared.add(entity.name)
+        if exchange and exchange not in _exchanges_declared:
+            self._declare_exchange(exchange,
+                    exchange_type or self.exchange_type, retry, _retry_policy)
+            _exchanges_declared.add(exchange)
+
+        task_id = task_id or gen_unique_id()
+        task_args = task_args or []
+        task_kwargs = task_kwargs or {}
+        if not isinstance(task_args, (list, tuple)):
+            raise ValueError("task args must be a list or tuple")
+        if not isinstance(task_kwargs, dict):
+            raise ValueError("task kwargs must be a dictionary")
+        if countdown:                           # Convert countdown to ETA.
+            now = now or datetime.now()
+            eta = now + timedelta(seconds=countdown)
+        if isinstance(expires, int):
+            now = now or datetime.now()
+            expires = now + timedelta(seconds=expires)
+        eta = eta and eta.isoformat()
+        expires = expires and expires.isoformat()
+
+        body = {"task": task_name,
+                "id": task_id,
+                "args": task_args or [],
+                "kwargs": task_kwargs or {},
+                "retries": retries or 0,
+                "eta": eta,
+                "expires": expires}
+
+        if taskset_id:
+            body["taskset"] = taskset_id
+        if chord:
+            body["chord"] = chord
+
+        send = self.send
+        if retry is None and self.retry or retry:
+            send = connection.ensure(self, self.send, **_retry_policy)
+        send(body, exchange=exchange, **extract_msg_options(kwargs))
+        signals.task_sent.send(sender=task_name, **body)
+        if event_dispatcher:
+            event_dispatcher.send("task-sent", uuid=task_id,
+                                               name=task_name,
+                                               args=repr(task_args),
+                                               kwargs=repr(task_kwargs),
+                                               retries=retries,
+                                               eta=eta,
+                                               expires=expires)
+        return task_id
+
+    def __exit__(self, *exc_info):
+        try:
+            self.release()
+        except AttributeError:
+            self.close()
+
+
+class PublisherPool(Resource):
+
+    def __init__(self, app=None):
+        self.app = app
+        super(PublisherPool, self).__init__(limit=self.app.pool.limit)
+
+    def create_publisher(self):
+        conn = self.app.pool.acquire(block=True)
+        pub = self.app.amqp.TaskPublisher(conn, auto_declare=False)
+        conn._publisher_chan = pub.channel
+        return pub
+
+    def new(self):
+        return promise(self.create_publisher)
+
+    def setup(self):
+        if self.limit:
+            for _ in xrange(self.limit):
+                self._resource.put_nowait(self.new())
+
+    def prepare(self, publisher):
+        pub = maybe_promise(publisher)
+        if not pub.connection:
+            pub.connection = self.app.pool.acquire(block=True)
+            if not getattr(pub.connection, "_publisher_chan", None):
+                pub.connection._publisher_chan = pub.connection.channel()
+            pub.revive(pub.connection._publisher_chan)
+        return pub
+
+    def release(self, resource):
+        resource.connection.release()
+        resource.connection = None
+        super(PublisherPool, self).release(resource)
+
+
+class AMQP(object):
+    BrokerConnection = BrokerConnection
+    Publisher = messaging.Publisher
+    Consumer = messaging.Consumer
+    ConsumerSet = messaging.ConsumerSet
+
+    #: Cached and prepared routing table.
+    _rtable = None
+
+    def __init__(self, app):
+        self.app = app
+
+    def flush_routes(self):
+        self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES)
+
+    def Queues(self, queues):
+        """Create new :class:`Queues` instance, using queue defaults
+        from the current configuration."""
+        conf = self.app.conf
+        if not queues:
+            queues = {conf.CELERY_DEFAULT_QUEUE: {
+                        "exchange": conf.CELERY_DEFAULT_EXCHANGE,
+                        "exchange_type": conf.CELERY_DEFAULT_EXCHANGE_TYPE,
+                        "binding_key": conf.CELERY_DEFAULT_ROUTING_KEY}}
+        return Queues.with_defaults(queues, conf.CELERY_DEFAULT_EXCHANGE,
+                                            conf.CELERY_DEFAULT_EXCHANGE_TYPE)
+
+    def Router(self, queues=None, create_missing=None):
+        """Returns the current task router."""
+        return _routes.Router(self.routes, queues or self.queues,
+                              self.app.either("CELERY_CREATE_MISSING_QUEUES",
+                                              create_missing), app=self.app)
+
+    def TaskConsumer(self, *args, **kwargs):
+        """Returns consumer for a single task queue."""
+        default_queue_name, default_queue = self.get_default_queue()
+        defaults = dict({"queue": default_queue_name}, **default_queue)
+        defaults["routing_key"] = defaults.pop("binding_key", None)
+        return self.Consumer(*args,
+                             **self.app.merge(defaults, kwargs))
+
+    def TaskPublisher(self, *args, **kwargs):
+        """Returns publisher used to send tasks.
+
+        You should use `app.send_task` instead.
+
+        """
+        conf = self.app.conf
+        _, default_queue = self.get_default_queue()
+        defaults = {"exchange": default_queue["exchange"],
+                    "exchange_type": default_queue["exchange_type"],
+                    "routing_key": conf.CELERY_DEFAULT_ROUTING_KEY,
+                    "serializer": conf.CELERY_TASK_SERIALIZER,
+                    "retry": conf.CELERY_TASK_PUBLISH_RETRY,
+                    "retry_policy": conf.CELERY_TASK_PUBLISH_RETRY_POLICY,
+                    "app": self}
+        return TaskPublisher(*args, **self.app.merge(defaults, kwargs))
+
+    def get_task_consumer(self, connection, queues=None, **kwargs):
+        """Return consumer configured to consume from all known task
+        queues."""
+        return self.ConsumerSet(connection,
+                                from_dict=queues or self.queues.consume_from,
+                                **kwargs)
+
+    def get_default_queue(self):
+        """Returns `(queue_name, queue_options)` tuple for the queue
+        configured to be default (:setting:`CELERY_DEFAULT_QUEUE`)."""
+        q = self.app.conf.CELERY_DEFAULT_QUEUE
+        return q, self.queues[q]
+
+    @cached_property
+    def queues(self):
+        """Queue name⇒ declaration mapping."""
+        return self.Queues(self.app.conf.CELERY_QUEUES)
+
+    @property
+    def routes(self):
+        if self._rtable is None:
+            self.flush_routes()
+        return self._rtable
+
+    @cached_property
+    def publisher_pool(self):
+        return PublisherPool(app=self.app)

+ 361 - 0
celery/app/base.py

@@ -0,0 +1,361 @@
+"""
+celery.app.base
+===============
+
+Application Base Class.
+
+:copyright: (c) 2009 - 2011 by Ask Solem.
+:license: BSD, see LICENSE for more details.
+
+"""
+import platform as _platform
+
+from copy import deepcopy
+from threading import Lock
+
+from kombu.utils import cached_property
+
+from celery.app.defaults import DEFAULTS
+from celery.datastructures import ConfigurationView
+from celery.utils import instantiate, lpmerge
+from celery.utils.functional import wraps
+
+import kombu
+if kombu.VERSION < (1, 0, 8):
+    raise ImportError("Celery requires Kombu version 1.0.8 or higher.")
+
+
+class LamportClock(object):
+    """Lamports logical clock.
+
+    From Wikipedia:
+
+    "A Lamport logical clock is a monotonically incrementing software counter
+    maintained in each process.  It follows some simple rules:
+
+        * A process increments its counter before each event in that process;
+        * When a process sends a message, it includes its counter value with
+          the message;
+        * On receiving a message, the receiver process sets its counter to be
+          greater than the maximum of its own value and the received value
+          before it considers the message received.
+
+    Conceptually, this logical clock can be thought of as a clock that only
+    has meaning in relation to messages moving between processes.  When a
+    process receives a message, it resynchronizes its logical clock with
+    the sender.
+
+    .. seealso::
+
+        http://en.wikipedia.org/wiki/Lamport_timestamps
+        http://en.wikipedia.org/wiki/Lamport's_Distributed_
+            Mutual_Exclusion_Algorithm
+
+    *Usage*
+
+    When sending a message use :meth:`forward` to increment the clock,
+    when receiving a message use :meth:`adjust` to sync with
+    the timestamp of the incoming message.
+
+    """
+    #: The clocks current value.
+    value = 0
+
+    def __init__(self, initial_value=0):
+        self.value = initial_value
+        self._mutex = Lock()
+
+    def adjust(self, other):
+        self._mutex.acquire()
+        try:
+            self.value = max(self.value, other) + 1
+        finally:
+            self._mutex.release()
+
+    def forward(self):
+        self._mutex.acquire()
+        try:
+            self.value += 1
+        finally:
+            self._mutex.release()
+        return self.value
+
+
+class BaseApp(object):
+    """Base class for apps."""
+    SYSTEM = _platform.system()
+    IS_OSX = SYSTEM == "Darwin"
+    IS_WINDOWS = SYSTEM == "Windows"
+
+    amqp_cls = "celery.app.amqp.AMQP"
+    backend_cls = None
+    events_cls = "celery.events.Events"
+    loader_cls = "app"
+    log_cls = "celery.log.Logging"
+    control_cls = "celery.task.control.Control"
+
+    _pool = None
+
+    def __init__(self, main=None, loader=None, backend=None,
+            amqp=None, events=None, log=None, control=None,
+            set_as_current=True, accept_magic_kwargs=False):
+        self.main = main
+        self.amqp_cls = amqp or self.amqp_cls
+        self.backend_cls = backend or self.backend_cls
+        self.events_cls = events or self.events_cls
+        self.loader_cls = loader or self.loader_cls
+        self.log_cls = log or self.log_cls
+        self.control_cls = control or self.control_cls
+        self.set_as_current = set_as_current
+        self.accept_magic_kwargs = accept_magic_kwargs
+        self.on_init()
+        self.clock = LamportClock()
+
+    def on_init(self):
+        """Called at the end of the constructor."""
+        pass
+
+    def config_from_object(self, obj, silent=False):
+        """Read configuration from object, where object is either
+        a real object, or the name of an object to import.
+
+            >>> celery.config_from_object("myapp.celeryconfig")
+
+            >>> from myapp import celeryconfig
+            >>> celery.config_from_object(celeryconfig)
+
+        """
+        del(self.conf)
+        return self.loader.config_from_object(obj, silent=silent)
+
+    def config_from_envvar(self, variable_name, silent=False):
+        """Read configuration from environment variable.
+
+        The value of the environment variable must be the name
+        of an object to import.
+
+            >>> os.environ["CELERY_CONFIG_MODULE"] = "myapp.celeryconfig"
+            >>> celery.config_from_envvar("CELERY_CONFIG_MODULE")
+
+        """
+        del(self.conf)
+        return self.loader.config_from_envvar(variable_name, silent=silent)
+
+    def config_from_cmdline(self, argv, namespace="celery"):
+        """Read configuration from argv.
+
+        The config
+
+        """
+        self.conf.update(self.loader.cmdline_config_parser(argv, namespace))
+
+    def send_task(self, name, args=None, kwargs=None, countdown=None,
+            eta=None, task_id=None, publisher=None, connection=None,
+            connect_timeout=None, result_cls=None, expires=None,
+            queues=None, **options):
+        """Send task by name.
+
+        :param name: Name of task to execute (e.g. `"tasks.add"`).
+        :keyword result_cls: Specify custom result class. Default is
+            using :meth:`AsyncResult`.
+
+        Supports the same arguments as
+        :meth:`~celery.task.base.BaseTask.apply_async`.
+
+        """
+        router = self.amqp.Router(queues)
+        result_cls = result_cls or self.AsyncResult
+
+        options.setdefault("compression",
+                           self.conf.CELERY_MESSAGE_COMPRESSION)
+        options = router.route(options, name, args, kwargs)
+        exchange = options.get("exchange")
+        exchange_type = options.get("exchange_type")
+
+        def _do_publish(connection=None, **_):
+            publish = publisher or self.amqp.TaskPublisher(connection,
+                                            exchange=exchange,
+                                            exchange_type=exchange_type)
+            try:
+                new_id = publish.delay_task(name, args, kwargs,
+                                            task_id=task_id,
+                                            countdown=countdown, eta=eta,
+                                            expires=expires, **options)
+            finally:
+                publisher or publish.close()
+
+            return result_cls(new_id)
+
+        return self.with_default_connection(_do_publish)(
+                connection=connection, connect_timeout=connect_timeout)
+
+    def AsyncResult(self, task_id, backend=None, task_name=None):
+        """Create :class:`celery.result.BaseAsyncResult` instance."""
+        from celery.result import BaseAsyncResult
+        return BaseAsyncResult(task_id, app=self,
+                               task_name=task_name,
+                               backend=backend or self.backend)
+
+    def TaskSetResult(self, taskset_id, results, **kwargs):
+        """Create :class:`celery.result.TaskSetResult` instance."""
+        from celery.result import TaskSetResult
+        return TaskSetResult(taskset_id, results, app=self)
+
+    def broker_connection(self, hostname=None, userid=None,
+            password=None, virtual_host=None, port=None, ssl=None,
+            insist=None, connect_timeout=None, transport=None, **kwargs):
+        """Establish a connection to the message broker.
+
+        :keyword hostname: defaults to the :setting:`BROKER_HOST` setting.
+        :keyword userid: defaults to the :setting:`BROKER_USER` setting.
+        :keyword password: defaults to the :setting:`BROKER_PASSWORD` setting.
+        :keyword virtual_host: defaults to the :setting:`BROKER_VHOST` setting.
+        :keyword port: defaults to the :setting:`BROKER_PORT` setting.
+        :keyword ssl: defaults to the :setting:`BROKER_USE_SSL` setting.
+        :keyword insist: defaults to the :setting:`BROKER_INSIST` setting.
+        :keyword connect_timeout: defaults to the
+            :setting:`BROKER_CONNECTION_TIMEOUT` setting.
+        :keyword backend_cls: defaults to the :setting:`BROKER_BACKEND`
+            setting.
+
+        :returns :class:`kombu.connection.BrokerConnection`:
+
+        """
+        return self.amqp.BrokerConnection(
+                    hostname or self.conf.BROKER_HOST,
+                    userid or self.conf.BROKER_USER,
+                    password or self.conf.BROKER_PASSWORD,
+                    virtual_host or self.conf.BROKER_VHOST,
+                    port or self.conf.BROKER_PORT,
+                    transport=transport or self.conf.BROKER_BACKEND,
+                    insist=self.either("BROKER_INSIST", insist),
+                    ssl=self.either("BROKER_USE_SSL", ssl),
+                    connect_timeout=self.either(
+                                "BROKER_CONNECTION_TIMEOUT", connect_timeout),
+                    transport_options=self.conf.BROKER_TRANSPORT_OPTIONS)
+
+    def with_default_connection(self, fun):
+        """With any function accepting `connection` and `connect_timeout`
+        keyword arguments, establishes a default connection if one is
+        not already passed to it.
+
+        Any automatically established connection will be closed after
+        the function returns.
+
+        """
+
+        @wraps(fun)
+        def _inner(*args, **kwargs):
+            connection = kwargs.get("connection")
+            kwargs["connection"] = conn = connection or \
+                    self.pool.acquire(block=True)
+            close_connection = not connection and conn.release or None
+
+            try:
+                return fun(*args, **kwargs)
+            finally:
+                if close_connection:
+                    close_connection()
+        return _inner
+
+    def prepare_config(self, c):
+        """Prepare configuration before it is merged with the defaults."""
+        if not c.get("CELERY_RESULT_BACKEND"):
+            rbackend = c.get("CELERY_BACKEND")
+            if rbackend:
+                c["CELERY_RESULT_BACKEND"] = rbackend
+        if not c.get("BROKER_BACKEND"):
+            cbackend = c.get("BROKER_TRANSPORT") or c.get("CARROT_BACKEND")
+            if cbackend:
+                c["BROKER_BACKEND"] = cbackend
+        return c
+
+    def mail_admins(self, subject, body, fail_silently=False):
+        """Send an email to the admins in the :setting:`ADMINS` setting."""
+        if self.conf.ADMINS:
+            to = [admin_email for _, admin_email in self.conf.ADMINS]
+            return self.loader.mail_admins(subject, body, fail_silently, to=to,
+                                       sender=self.conf.SERVER_EMAIL,
+                                       host=self.conf.EMAIL_HOST,
+                                       port=self.conf.EMAIL_PORT,
+                                       user=self.conf.EMAIL_HOST_USER,
+                                       password=self.conf.EMAIL_HOST_PASSWORD,
+                                       timeout=self.conf.EMAIL_TIMEOUT)
+
+    def either(self, default_key, *values):
+        """Fallback to the value of a configuration key if none of the
+        `*values` are true."""
+        for value in values:
+            if value is not None:
+                return value
+        return self.conf.get(default_key)
+
+    def merge(self, l, r):
+        """Like `dict(a, **b)` except it will keep values from `a`
+        if the value in `b` is :const:`None`."""
+        return lpmerge(l, r)
+
+    def _get_backend(self):
+        from celery.backends import get_backend_cls
+        backend_cls = self.backend_cls or self.conf.CELERY_RESULT_BACKEND
+        backend_cls = get_backend_cls(backend_cls, loader=self.loader)
+        return backend_cls(app=self)
+
+    def _get_config(self):
+        return ConfigurationView({},
+                [self.prepare_config(self.loader.conf), deepcopy(DEFAULTS)])
+
+    def _after_fork(self, obj_):
+        if self._pool:
+            self._pool.force_close_all()
+            self._pool = None
+
+    @property
+    def pool(self):
+        if self._pool is None:
+            try:
+                from multiprocessing.util import register_after_fork
+                register_after_fork(self, self._after_fork)
+            except ImportError:
+                pass
+            self._pool = self.broker_connection().Pool(
+                            self.conf.BROKER_POOL_LIMIT)
+        return self._pool
+
+    @cached_property
+    def amqp(self):
+        """Sending/receiving messages.  See :class:`~celery.app.amqp.AMQP`."""
+        return instantiate(self.amqp_cls, app=self)
+
+    @cached_property
+    def backend(self):
+        """Storing/retreiving task state.  See
+        :class:`~celery.backend.base.BaseBackend`."""
+        return self._get_backend()
+
+    @cached_property
+    def conf(self):
+        """Current configuration (dict and attribute access)."""
+        return self._get_config()
+
+    @cached_property
+    def control(self):
+        """Controlling worker nodes.  See
+        :class:`~celery.task.control.Control`."""
+        return instantiate(self.control_cls, app=self)
+
+    @cached_property
+    def events(self):
+        """Sending/receiving events.  See :class:`~celery.events.Events`. """
+        return instantiate(self.events_cls, app=self)
+
+    @cached_property
+    def loader(self):
+        """Current loader."""
+        from celery.loaders import get_loader_cls
+        return get_loader_cls(self.loader_cls)(app=self)
+
+    @cached_property
+    def log(self):
+        """Logging utilities.  See :class:`~celery.log.Logging`."""
+        return instantiate(self.log_cls, app=self)

+ 163 - 0
celery/app/defaults.py

@@ -0,0 +1,163 @@
+import sys
+
+from datetime import timedelta
+
+is_jython = sys.platform.startswith("java")
+is_pypy = hasattr(sys, "pypy_version_info")
+
+DEFAULT_POOL = "processes"
+if is_jython:
+    DEFAULT_POOL = "threads"
+elif is_pypy:
+    DEFAULT_POOL = "solo"
+
+DEFAULT_PROCESS_LOG_FMT = """
+    [%(asctime)s: %(levelname)s/%(processName)s] %(message)s
+""".strip()
+DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s'
+DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \
+%(task_name)s[%(task_id)s]: %(message)s"""
+
+
+def str_to_bool(term, table={"false": False, "no": False, "0": False,
+                             "true":  True, "yes": True,  "1": True}):
+    try:
+        return table[term.lower()]
+    except KeyError:
+        raise TypeError("%r can not be converted to type bool" % (term, ))
+
+
+class Option(object):
+    typemap = dict(string=str, int=int, float=float, any=lambda v: v,
+                   bool=str_to_bool, dict=dict, tuple=tuple)
+
+    def __init__(self, default=None, *args, **kwargs):
+        self.default = default
+        self.type = kwargs.get("type") or "string"
+
+    def to_python(self, value):
+        return self.typemap[self.type](value)
+
+
+NAMESPACES = {
+    "BROKER": {
+        "HOST": Option("localhost"),
+        "PORT": Option(type="int"),
+        "USER": Option("guest"),
+        "PASSWORD": Option("guest"),
+        "VHOST": Option("/"),
+        "BACKEND": Option(),
+        "CONNECTION_TIMEOUT": Option(4, type="int"),
+        "CONNECTION_RETRY": Option(True, type="bool"),
+        "CONNECTION_MAX_RETRIES": Option(100, type="int"),
+        "POOL_LIMIT": Option(None, type="int"),
+        "INSIST": Option(False, type="bool"),
+        "USE_SSL": Option(False, type="bool"),
+        "TRANSPORT_OPTIONS": Option({}, type="dict")
+    },
+    "CELERY": {
+        "ACKS_LATE": Option(False, type="bool"),
+        "ALWAYS_EAGER": Option(False, type="bool"),
+        "AMQP_TASK_RESULT_EXPIRES": Option(type="int"),
+        "AMQP_TASK_RESULT_CONNECTION_MAX": Option(1, type="int"),
+        "BROADCAST_QUEUE": Option("celeryctl"),
+        "BROADCAST_EXCHANGE": Option("celeryctl"),
+        "BROADCAST_EXCHANGE_TYPE": Option("fanout"),
+        "CACHE_BACKEND": Option(),
+        "CACHE_BACKEND_OPTIONS": Option({}, type="dict"),
+        "CREATE_MISSING_QUEUES": Option(True, type="bool"),
+        "DEFAULT_RATE_LIMIT": Option(type="string"),
+        "DISABLE_RATE_LIMITS": Option(False, type="bool"),
+        "DEFAULT_ROUTING_KEY": Option("celery"),
+        "DEFAULT_QUEUE": Option("celery"),
+        "DEFAULT_EXCHANGE": Option("celery"),
+        "DEFAULT_EXCHANGE_TYPE": Option("direct"),
+        "DEFAULT_DELIVERY_MODE": Option(2, type="string"),
+        "EAGER_PROPAGATES_EXCEPTIONS": Option(False, type="bool"),
+        "EVENT_SERIALIZER": Option("json"),
+        "IMPORTS": Option((), type="tuple"),
+        "IGNORE_RESULT": Option(False, type="bool"),
+        "MAX_CACHED_RESULTS": Option(5000, type="int"),
+        "MESSAGE_COMPRESSION": Option(None, type="string"),
+        "RESULT_BACKEND": Option(None, type="string"),
+        "RESULT_DBURI": Option(),
+        "RESULT_ENGINE_OPTIONS": Option(None, type="dict"),
+        "RESULT_EXCHANGE": Option("celeryresults"),
+        "RESULT_EXCHANGE_TYPE": Option("direct"),
+        "RESULT_SERIALIZER": Option("pickle"),
+        "RESULT_PERSISTENT": Option(False, type="bool"),
+        "ROUTES": Option(None, type="any"),
+        "SEND_EVENTS": Option(False, type="bool"),
+        "SEND_TASK_ERROR_EMAILS": Option(False, type="bool"),
+        "SEND_TASK_SENT_EVENT": Option(False, type="bool"),
+        "STORE_ERRORS_EVEN_IF_IGNORED": Option(False, type="bool"),
+        "TASK_ERROR_WHITELIST": Option((), type="tuple"),
+        "TASK_PUBLISH_RETRY": Option(True, type="bool"),
+        "TASK_PUBLISH_RETRY_POLICY": Option({
+                "max_retries": 100,
+                "interval_start": 0,
+                "interval_max": 1,
+                "interval_step": 0.2}, type="dict"),
+        "TASK_RESULT_EXPIRES": Option(timedelta(days=1), type="int"),
+        "TASK_SERIALIZER": Option("pickle"),
+        "TRACK_STARTED": Option(False, type="bool"),
+        "REDIRECT_STDOUTS": Option(True, type="bool"),
+        "REDIRECT_STDOUTS_LEVEL": Option("WARNING"),
+        "QUEUES": Option(None, type="dict"),
+    },
+    "CELERYD": {
+        "AUTOSCALER": Option("celery.worker.autoscale.Autoscaler"),
+        "CONCURRENCY": Option(0, type="int"),
+        "ETA_SCHEDULER": Option(None, type="str"),
+        "ETA_SCHEDULER_PRECISION": Option(1.0, type="float"),
+        "HIJACK_ROOT_LOGGER": Option(True, type="bool"),
+        "CONSUMER": Option("celery.worker.consumer.Consumer"),
+        "LOG_FORMAT": Option(DEFAULT_PROCESS_LOG_FMT),
+        "LOG_COLOR": Option(type="bool"),
+        "LOG_LEVEL": Option("WARN"),
+        "LOG_FILE": Option(),
+        "MEDIATOR": Option("celery.worker.mediator.Mediator"),
+        "MAX_TASKS_PER_CHILD": Option(type="int"),
+        "POOL": Option(DEFAULT_POOL),
+        "POOL_PUTLOCKS": Option(True, type="bool"),
+        "PREFETCH_MULTIPLIER": Option(4, type="int"),
+        "STATE_DB": Option(),
+        "TASK_LOG_FORMAT": Option(DEFAULT_TASK_LOG_FMT),
+        "TASK_SOFT_TIME_LIMIT": Option(type="int"),
+        "TASK_TIME_LIMIT": Option(type="int"),
+    },
+    "CELERYBEAT": {
+        "SCHEDULE": Option({}, type="dict"),
+        "SCHEDULER": Option("celery.beat.PersistentScheduler"),
+        "SCHEDULE_FILENAME": Option("celerybeat-schedule"),
+        "MAX_LOOP_INTERVAL": Option(5 * 60, type="int"),
+        "LOG_LEVEL": Option("INFO"),
+        "LOG_FILE": Option(),
+    },
+    "CELERYMON": {
+        "LOG_LEVEL": Option("INFO"),
+        "LOG_FILE": Option(),
+        "LOG_FORMAT": Option(DEFAULT_LOG_FMT),
+    },
+    "EMAIL": {
+        "HOST": Option("localhost"),
+        "PORT": Option(25, type="int"),
+        "HOST_USER": Option(None),
+        "HOST_PASSWORD": Option(None),
+        "TIMEOUT": Option(2, type="int"),
+    },
+    "SERVER_EMAIL": Option("celery@localhost"),
+    "ADMINS": Option((), type="tuple"),
+}
+
+
+def _flatten(d, ns=""):
+    acc = []
+    for key, value in d.iteritems():
+        if isinstance(value, dict):
+            acc.extend(_flatten(value, ns=key + '_'))
+        else:
+            acc.append((ns + key, value.default))
+    return acc
+
+DEFAULTS = dict(_flatten(NAMESPACES))

+ 37 - 41
celery/apps/beat.py

@@ -1,3 +1,4 @@
+import atexit
 import socket
 import sys
 import traceback
@@ -5,10 +6,9 @@ import traceback
 from celery import __version__
 from celery import beat
 from celery import platforms
-from celery.log import emergency_error
-from celery.utils import get_full_cls_name, info, LOG_LEVELS
-from celery.utils.info import humanize_seconds
-from celery.utils import term
+from celery.app import app_or_default
+from celery.utils import get_full_cls_name, LOG_LEVELS
+from celery.utils.timeutils import humanize_seconds
 
 STARTUP_INFO_FMT = """
 Configuration ->
@@ -25,25 +25,24 @@ class Beat(object):
     Service = beat.Service
 
     def __init__(self, loglevel=None, logfile=None, schedule=None,
-            max_interval=None, scheduler_cls=None, defaults=None,
+            max_interval=None, scheduler_cls=None, app=None,
             socket_timeout=30, redirect_stdouts=None,
-            redirect_stdouts_level=None, **kwargs):
+            redirect_stdouts_level=None, pidfile=None, **kwargs):
         """Starts the celerybeat task scheduler."""
+        self.app = app = app_or_default(app)
 
-        if defaults is None:
-            from celery import conf as defaults
-        self.defaults = defaults
-
-        self.loglevel = loglevel or defaults.CELERYBEAT_LOG_LEVEL
-        self.logfile = logfile or defaults.CELERYBEAT_LOG_FILE
-        self.schedule = schedule or defaults.CELERYBEAT_SCHEDULE_FILENAME
-        self.scheduler_cls = scheduler_cls or defaults.CELERYBEAT_SCHEDULER
+        self.loglevel = loglevel or app.conf.CELERYBEAT_LOG_LEVEL
+        self.logfile = logfile or app.conf.CELERYBEAT_LOG_FILE
+        self.schedule = schedule or app.conf.CELERYBEAT_SCHEDULE_FILENAME
+        self.scheduler_cls = scheduler_cls or app.conf.CELERYBEAT_SCHEDULER
         self.max_interval = max_interval
         self.socket_timeout = socket_timeout
-        self.colored = term.colored(enabled=defaults.CELERYD_LOG_COLOR)
-        self.redirect_stdouts = redirect_stdouts or defaults.REDIRECT_STDOUTS
+        self.colored = app.log.colored(self.logfile)
+        self.redirect_stdouts = (redirect_stdouts or
+                                 app.conf.CELERY_REDIRECT_STDOUTS)
         self.redirect_stdouts_level = (redirect_stdouts_level or
-                                       defaults.REDIRECT_STDOUTS_LEVEL)
+                                       app.conf.CELERY_REDIRECT_STDOUTS_LEVEL)
+        self.pidfile = pidfile
 
         if not isinstance(self.loglevel, int):
             self.loglevel = LOG_LEVELS[self.loglevel.upper()]
@@ -57,19 +56,21 @@ class Beat(object):
         self.start_scheduler(logger)
 
     def setup_logging(self):
-        from celery import log
-        handled = log.setup_logging_subsystem(loglevel=self.loglevel,
-                                              logfile=self.logfile)
-        if not handled:
-            logger = log.get_default_logger(name="celery.beat")
-            if self.redirect_stdouts:
-                log.redirect_stdouts_to_logger(logger,
-                        loglevel=self.redirect_stdouts_level)
+        handled = self.app.log.setup_logging_subsystem(loglevel=self.loglevel,
+                                                       logfile=self.logfile)
+        logger = self.app.log.get_default_logger(name="celery.beat")
+        if self.redirect_stdouts and not handled:
+            self.app.log.redirect_stdouts_to_logger(logger,
+                    loglevel=self.redirect_stdouts_level)
         return logger
 
     def start_scheduler(self, logger=None):
         c = self.colored
-        beat = self.Service(logger=logger,
+        if self.pidfile:
+            pidlock = platforms.create_pidlock(self.pidfile).acquire()
+            atexit.register(pidlock.release)
+        beat = self.Service(app=self.app,
+                            logger=logger,
                             max_interval=self.max_interval,
                             scheduler_cls=self.scheduler_cls,
                             schedule_filename=self.schedule)
@@ -86,25 +87,24 @@ class Beat(object):
             self.install_sync_handler(beat)
             beat.start()
         except Exception, exc:
-            emergency_error(self.logfile,
-                    "celerybeat raised exception %s: %s\n%s" % (
-                            exc.__class__, exc, traceback.format_exc()))
+            logger.critical("celerybeat raised exception %s: %r\n%s" % (
+                            exc.__class__, exc, traceback.format_exc()),
+                            exc_info=sys.exc_info())
 
     def init_loader(self):
         # Run the worker init handler.
         # (Usually imports task modules and such.)
-        from celery.loaders import current_loader
-        self.loader = current_loader()
-        self.loader.init_worker()
+        self.app.loader.init_worker()
 
     def startup_info(self, beat):
+        scheduler = beat.get_scheduler(lazy=True)
         return STARTUP_INFO_FMT % {
-            "conninfo": info.format_broker_info(),
+            "conninfo": self.app.broker_connection().as_uri(),
             "logfile": self.logfile or "[stderr]",
             "loglevel": LOG_LEVELS[self.loglevel],
-            "loader": get_full_cls_name(self.loader.__class__),
-            "scheduler": get_full_cls_name(beat.scheduler.__class__),
-            "scheduler_info": beat.scheduler.info,
+            "loader": get_full_cls_name(self.app.loader.__class__),
+            "scheduler": get_full_cls_name(scheduler.__class__),
+            "scheduler_info": scheduler.info,
             "hmax_interval": humanize_seconds(beat.max_interval),
             "max_interval": beat.max_interval,
         }
@@ -115,7 +115,7 @@ class Beat(object):
                                info=" ".join(sys.argv[arg_start:]))
 
     def install_sync_handler(self, beat):
-        """Install a ``SIGTERM`` + ``SIGINT`` handler that saves
+        """Install a `SIGTERM` + `SIGINT` handler that saves
         the celerybeat schedule."""
 
         def _sync(signum, frame):
@@ -124,7 +124,3 @@ class Beat(object):
 
         platforms.install_signal_handler("SIGTERM", _sync)
         platforms.install_signal_handler("SIGINT", _sync)
-
-
-def run_celerybeat(*args, **kwargs):
-    return Beat(*args, **kwargs).run()

+ 162 - 115
celery/apps/worker.py

@@ -1,41 +1,52 @@
 import atexit
 import logging
-import multiprocessing
-import platform as _platform
+try:
+    import multiprocessing
+except ImportError:
+    multiprocessing = None
 import os
 import socket
 import sys
 import warnings
 
+from kombu.utils import partition
+
 from celery import __version__
 from celery import platforms
 from celery import signals
-from celery.exceptions import ImproperlyConfigured
-from celery.routes import Router
-from celery.task import discard_all
-from celery.utils import get_full_cls_name, LOG_LEVELS
-from celery.utils import info
-from celery.utils import term
+from celery.app import app_or_default
+from celery.exceptions import ImproperlyConfigured, SystemTerminate
+from celery.utils import get_full_cls_name, LOG_LEVELS, cry
 from celery.worker import WorkController
 
-
-SYSTEM = _platform.system()
-IS_OSX = SYSTEM == "Darwin"
-
-STARTUP_INFO_FMT = """
-Configuration ->
-    . broker -> %(conninfo)s
-    . queues ->
-%(queues)s
-    . concurrency -> %(concurrency)s
-    . loader -> %(loader)s
-    . logfile -> %(logfile)s@%(loglevel)s
-    . events -> %(events)s
-    . beat -> %(celerybeat)s
+BANNER = """
+ -------------- celery@%(hostname)s v%(version)s
+---- **** -----
+--- * ***  * -- [Configuration]
+-- * - **** ---   . broker:      %(conninfo)s
+- ** ----------   . loader:      %(loader)s
+- ** ----------   . logfile:     %(logfile)s@%(loglevel)s
+- ** ----------   . concurrency: %(concurrency)s
+- ** ----------   . events:      %(events)s
+- *** --- * ---   . beat:        %(celerybeat)s
+-- ******* ----
+--- ***** ----- [Queues]
+ --------------   %(queues)s
+"""
+
+EXTRA_INFO_FMT = """
+[Tasks]
 %(tasks)s
-""".strip()
+"""
 
-TASK_LIST_FMT = """    . tasks ->\n%s"""
+
+def cpu_count():
+    if multiprocessing is not None:
+        try:
+            return multiprocessing.cpu_count()
+        except NotImplementedError:
+            pass
+    return 2
 
 
 class Worker(object):
@@ -45,41 +56,51 @@ class Worker(object):
             hostname=None, discard=False, run_clockservice=False,
             schedule=None, task_time_limit=None, task_soft_time_limit=None,
             max_tasks_per_child=None, queues=None, events=False, db=None,
-            include=None, defaults=None, pidfile=None,
-            redirect_stdouts=None, redirect_stdouts_level=None, **kwargs):
-        if defaults is None:
-            from celery import conf
-            defaults = conf
-        self.defaults = defaults
+            include=None, app=None, pidfile=None,
+            redirect_stdouts=None, redirect_stdouts_level=None,
+            autoscale=None, scheduler_cls=None, pool=None, **kwargs):
+        self.app = app = app_or_default(app)
         self.concurrency = (concurrency or
-                            defaults.CELERYD_CONCURRENCY or
-                            multiprocessing.cpu_count())
-        self.loglevel = loglevel or defaults.CELERYD_LOG_LEVEL
-        self.logfile = logfile or defaults.CELERYD_LOG_FILE
+                            app.conf.CELERYD_CONCURRENCY or
+                            cpu_count())
+        self.loglevel = loglevel or app.conf.CELERYD_LOG_LEVEL
+        self.logfile = logfile or app.conf.CELERYD_LOG_FILE
+
         self.hostname = hostname or socket.gethostname()
         self.discard = discard
         self.run_clockservice = run_clockservice
-        self.schedule = schedule or defaults.CELERYBEAT_SCHEDULE_FILENAME
+        if self.app.IS_WINDOWS and self.run_clockservice:
+            self.die("-B option does not work on Windows.  "
+                     "Please run celerybeat as a separate service.")
+        self.schedule = schedule or app.conf.CELERYBEAT_SCHEDULE_FILENAME
+        self.scheduler_cls = scheduler_cls or app.conf.CELERYBEAT_SCHEDULER
         self.events = events
         self.task_time_limit = (task_time_limit or
-                                defaults.CELERYD_TASK_TIME_LIMIT)
+                                app.conf.CELERYD_TASK_TIME_LIMIT)
         self.task_soft_time_limit = (task_soft_time_limit or
-                                     defaults.CELERYD_TASK_SOFT_TIME_LIMIT)
+                                     app.conf.CELERYD_TASK_SOFT_TIME_LIMIT)
         self.max_tasks_per_child = (max_tasks_per_child or
-                                    defaults.CELERYD_MAX_TASKS_PER_CHILD)
+                                    app.conf.CELERYD_MAX_TASKS_PER_CHILD)
         self.redirect_stdouts = (redirect_stdouts or
-                                 defaults.REDIRECT_STDOUTS)
+                                 app.conf.CELERY_REDIRECT_STDOUTS)
         self.redirect_stdouts_level = (redirect_stdouts_level or
-                                       defaults.REDIRECT_STDOUTS_LEVEL)
+                                       app.conf.CELERY_REDIRECT_STDOUTS_LEVEL)
+        self.pool = (pool or app.conf.CELERYD_POOL)
         self.db = db
-        self.queues = queues or []
+        self.use_queues = queues or []
+        self.queues = None
         self.include = include or []
         self.pidfile = pidfile
+        self.autoscale = None
+        if autoscale:
+            max_c, _, min_c = partition(autoscale, ",")
+            self.autoscale = [int(max_c), min_c and int(min_c) or 0]
         self._isatty = sys.stdout.isatty()
-        self.colored = term.colored(enabled=defaults.CELERYD_LOG_COLOR)
 
-        if isinstance(self.queues, basestring):
-            self.queues = self.queues.split(",")
+        self.colored = app.log.colored(self.logfile)
+
+        if isinstance(self.use_queues, basestring):
+            self.use_queues = self.use_queues.split(",")
         if isinstance(self.include, basestring):
             self.include = self.include.split(",")
 
@@ -97,70 +118,59 @@ class Worker(object):
         self.init_queues()
         self.worker_init()
         self.redirect_stdouts_to_logger()
-        print(str(self.colored.cyan(
-                "celery@%s v%s is starting." % (self.hostname, __version__))))
 
         if getattr(os, "geteuid", None) and os.geteuid() == 0:
             warnings.warn(
                 "Running celeryd with superuser privileges is not encouraged!")
 
-        if getattr(self.settings, "DEBUG", False):
-            warnings.warn("Using settings.DEBUG leads to a memory leak, "
-                    "never use this setting in a production environment!")
-
         if self.discard:
             self.purge_messages()
 
         # Dump configuration to screen so we have some basic information
         # for when users sends bug reports.
-        print(str(self.colored.reset(" \n", self.startup_info())))
-        self.set_process_status("Running...")
+        print(str(self.colored.cyan(" \n", self.startup_info())) +
+              str(self.colored.reset(self.extra_info())))
+        self.set_process_status("-active-")
 
         self.run_worker()
 
-    def on_listener_ready(self, listener):
-        signals.worker_ready.send(sender=listener)
+    def on_consumer_ready(self, consumer):
+        signals.worker_ready.send(sender=consumer)
         print("celery@%s has started." % self.hostname)
 
     def init_queues(self):
-        conf = self.defaults
-        if self.queues:
-            conf.QUEUES = dict((queue, options)
-                                for queue, options in conf.QUEUES.items()
-                                    if queue in self.queues)
-            for queue in self.queues:
-                if queue not in conf.QUEUES:
-                    if conf.CREATE_MISSING_QUEUES:
-                        Router(queues=conf.QUEUES).add_queue(queue)
-                    else:
-                        raise ImproperlyConfigured(
-                            "Queue '%s' not defined in CELERY_QUEUES" % queue)
+        if self.use_queues:
+            create_missing = self.app.conf.CELERY_CREATE_MISSING_QUEUES
+            try:
+                self.app.amqp.queues.select_subset(self.use_queues,
+                                                   create_missing)
+            except KeyError, exc:
+                raise ImproperlyConfigured(
+                    "Trying to select queue subset of %r, but queue %s"
+                    "is not defined in CELERY_QUEUES. If you want to "
+                    "automatically declare unknown queues you have to "
+                    "enable CELERY_CREATE_MISSING_QUEUES" % (
+                        self.use_queues, exc))
 
     def init_loader(self):
-        from celery.loaders import current_loader, load_settings
-        self.loader = current_loader()
-        self.settings = load_settings()
-        if not self.loader.configured:
-            raise ImproperlyConfigured(
-                    "Celery needs to be configured to run celeryd.")
-        map(self.loader.import_module, self.include)
+        self.loader = self.app.loader
+        self.settings = self.app.conf
+        for module in self.include:
+            self.loader.import_module(module)
 
     def redirect_stdouts_to_logger(self):
-        from celery import log
-        handled = log.setup_logging_subsystem(loglevel=self.loglevel,
-                                              logfile=self.logfile)
-        # Redirect stdout/stderr to our logger.
+        handled = self.app.log.setup_logging_subsystem(loglevel=self.loglevel,
+                                                       logfile=self.logfile)
         if not handled:
-            logger = log.get_default_logger()
+            logger = self.app.log.get_default_logger()
             if self.redirect_stdouts:
-                log.redirect_stdouts_to_logger(logger,
-                        loglevel=self.redirect_stdouts_level)
+                self.app.log.redirect_stdouts_to_logger(logger,
+                                loglevel=self.redirect_stdouts_level)
 
     def purge_messages(self):
-        discarded_count = discard_all()
-        what = discarded_count > 1 and "messages" or "message"
-        print("discard: Erased %d %s from the queue.\n" % (
-            discarded_count, what))
+        count = self.app.control.discard_all()
+        what = (not count or count > 1) and "messages" or "message"
+        print("discard: Erased %d %s from the queue.\n" % (count, what))
 
     def worker_init(self):
         # Run the worker init handler.
@@ -173,51 +183,60 @@ class Worker(object):
         if not include_builtins:
             tasklist = filter(lambda s: not s.startswith("celery."),
                               tasklist)
-        return TASK_LIST_FMT % "\n".join("\t. %s" % task
-                                            for task in sorted(tasklist))
+        return "\n".join("  . %s" % task for task in sorted(tasklist))
 
-    def startup_info(self):
-        tasklist = ""
+    def extra_info(self):
         if self.loglevel <= logging.INFO:
             include_builtins = self.loglevel <= logging.DEBUG
             tasklist = self.tasklist(include_builtins=include_builtins)
+            return EXTRA_INFO_FMT % {"tasks": tasklist}
+        return ""
 
-        queues = self.defaults.get_queues()
-
-        return STARTUP_INFO_FMT % {
-            "conninfo": info.format_broker_info(),
-            "queues": info.format_queues(queues, indent=8),
-            "concurrency": self.concurrency,
+    def startup_info(self):
+        app = self.app
+        concurrency = self.concurrency
+        if self.autoscale:
+            cmax, cmin = self.autoscale
+            concurrency = "{min=%s, max=%s}" % (cmin, cmax)
+        return BANNER % {
+            "hostname": self.hostname,
+            "version": __version__,
+            "conninfo": self.app.broker_connection().as_uri(),
+            "concurrency": concurrency,
             "loglevel": LOG_LEVELS[self.loglevel],
             "logfile": self.logfile or "[stderr]",
             "celerybeat": self.run_clockservice and "ON" or "OFF",
             "events": self.events and "ON" or "OFF",
-            "tasks": tasklist,
             "loader": get_full_cls_name(self.loader.__class__),
+            "queues": app.amqp.queues.format(indent=18, indent_first=False),
         }
 
     def run_worker(self):
         if self.pidfile:
             pidlock = platforms.create_pidlock(self.pidfile).acquire()
             atexit.register(pidlock.release)
-        worker = self.WorkController(concurrency=self.concurrency,
+        worker = self.WorkController(app=self.app,
+                                concurrency=self.concurrency,
                                 loglevel=self.loglevel,
                                 logfile=self.logfile,
                                 hostname=self.hostname,
-                                ready_callback=self.on_listener_ready,
+                                ready_callback=self.on_consumer_ready,
                                 embed_clockservice=self.run_clockservice,
                                 schedule_filename=self.schedule,
+                                scheduler_cls=self.scheduler_cls,
                                 send_events=self.events,
                                 db=self.db,
                                 max_tasks_per_child=self.max_tasks_per_child,
                                 task_time_limit=self.task_time_limit,
-                                task_soft_time_limit=self.task_soft_time_limit)
+                                task_soft_time_limit=self.task_soft_time_limit,
+                                autoscale=self.autoscale,
+                                pool_cls=self.pool)
         self.install_platform_tweaks(worker)
         worker.start()
 
     def install_platform_tweaks(self, worker):
         """Install platform specific tweaks and workarounds."""
-        if IS_OSX:
+        if self.app.IS_OSX:
             self.osx_proxy_detection_workaround()
 
         # Install signal handler so SIGHUP restarts the worker.
@@ -225,7 +244,7 @@ class Worker(object):
             # only install HUP handler if detached from terminal,
             # so closing the terminal window doesn't restart celeryd
             # into the background.
-            if IS_OSX:
+            if self.app.IS_OSX:
                 # OS X can't exec from a process using threads.
                 # See http://github.com/ask/celery/issues#issue/152
                 install_HUP_not_supported_handler(worker)
@@ -233,6 +252,8 @@ class Worker(object):
                 install_worker_restart_handler(worker)
         install_worker_term_handler(worker)
         install_worker_int_handler(worker)
+        install_cry_handler(worker.logger)
+        install_rdb_handler()
         signals.worker_init.send(sender=worker)
 
     def osx_proxy_detection_workaround(self):
@@ -253,15 +274,17 @@ class Worker(object):
 def install_worker_int_handler(worker):
 
     def _stop(signum, frame):
-        process_name = multiprocessing.current_process().name
-        if process_name == "MainProcess":
+        process_name = None
+        if multiprocessing:
+            process_name = multiprocessing.current_process().name
+        if not process_name or process_name == "MainProcess":
             worker.logger.warn(
                 "celeryd: Hitting Ctrl+C again will terminate "
                 "all running tasks!")
             install_worker_int_again_handler(worker)
             worker.logger.warn("celeryd: Warm shutdown (%s)" % (
                 process_name))
-            worker.stop()
+            worker.stop(in_sighandler=True)
         raise SystemExit()
 
     platforms.install_signal_handler("SIGINT", _stop)
@@ -270,12 +293,14 @@ def install_worker_int_handler(worker):
 def install_worker_int_again_handler(worker):
 
     def _stop(signum, frame):
-        process_name = multiprocessing.current_process().name
-        if process_name == "MainProcess":
+        process_name = None
+        if multiprocessing:
+            process_name = multiprocessing.current_process().name
+        if not process_name or process_name == "MainProcess":
             worker.logger.warn("celeryd: Cold shutdown (%s)" % (
                 process_name))
-            worker.terminate()
-        raise SystemExit()
+            worker.terminate(in_sighandler=True)
+        raise SystemTerminate()
 
     platforms.install_signal_handler("SIGINT", _stop)
 
@@ -283,11 +308,13 @@ def install_worker_int_again_handler(worker):
 def install_worker_term_handler(worker):
 
     def _stop(signum, frame):
-        process_name = multiprocessing.current_process().name
-        if process_name == "MainProcess":
+        process_name = None
+        if multiprocessing:
+            process_name = multiprocessing.current_process().name
+        if not process_name or process_name == "MainProcess":
             worker.logger.warn("celeryd: Warm shutdown (%s)" % (
                 process_name))
-            worker.stop()
+            worker.stop(in_sighandler=True)
         raise SystemExit()
 
     platforms.install_signal_handler("SIGTERM", _stop)
@@ -299,12 +326,36 @@ def install_worker_restart_handler(worker):
         """Signal handler restarting the current python program."""
         worker.logger.warn("Restarting celeryd (%s)" % (
             " ".join(sys.argv)))
-        worker.stop()
+        worker.stop(in_sighandler=True)
         os.execv(sys.executable, [sys.executable] + sys.argv)
 
     platforms.install_signal_handler("SIGHUP", restart_worker_sig_handler)
 
 
+def install_cry_handler(logger):
+    # 2.4 does not have sys._current_frames
+    is_jython = sys.platform.startswith("java")
+    is_pypy = hasattr(sys, "pypy_version_info")
+    if not (is_jython or is_pypy) and sys.version_info > (2, 5):
+
+        def cry_handler(signum, frame):
+            """Signal handler logging the stacktrace of all active threads."""
+            logger.error("\n" + cry())
+
+        platforms.install_signal_handler("SIGUSR1", cry_handler)
+
+
+def install_rdb_handler():  # pragma: no cover
+
+    def rdb_handler(signum, frame):
+        """Signal handler setting a rdb breakpoint at the current frame."""
+        from celery.contrib import rdb
+        rdb.set_trace(frame)
+
+    if os.environ.get("CELERY_RDBSIG"):
+        platforms.install_signal_handler("SIGUSR2", rdb_handler)
+
+
 def install_HUP_not_supported_handler(worker):
 
     def warn_on_HUP_handler(signum, frame):
@@ -312,7 +363,3 @@ def install_HUP_not_supported_handler(worker):
             "Restarting with HUP is unstable on this platform!")
 
     platforms.install_signal_handler("SIGHUP", warn_on_HUP_handler)
-
-
-def run_worker(*args, **kwargs):
-    return Worker(*args, **kwargs).run()

+ 15 - 31
celery/backends/__init__.py

@@ -1,7 +1,6 @@
-from celery import conf
+from celery import current_app
+from celery.local import LocalProxy
 from celery.utils import get_cls_by_name
-from celery.utils.functional import curry
-from celery.loaders import current_loader
 
 BACKEND_ALIASES = {
     "amqp": "celery.backends.amqp.AMQPBackend",
@@ -11,41 +10,26 @@ BACKEND_ALIASES = {
     "tyrant": "celery.backends.tyrant.TyrantBackend",
     "database": "celery.backends.database.DatabaseBackend",
     "cassandra": "celery.backends.cassandra.CassandraBackend",
+    "disabled": "celery.backends.base.DisabledBackend",
 }
 
 _backend_cache = {}
 
 
-def get_backend_cls(backend):
+def get_backend_cls(backend=None, loader=None):
     """Get backend class by name/alias"""
+    backend = backend or "disabled"
+    loader = loader or current_app.loader
     if backend not in _backend_cache:
-        aliases = dict(BACKEND_ALIASES, **current_loader().override_backends)
-        _backend_cache[backend] = get_cls_by_name(backend, aliases)
+        aliases = dict(BACKEND_ALIASES, **loader.override_backends)
+        try:
+            _backend_cache[backend] = get_cls_by_name(backend, aliases)
+        except ValueError, exc:
+            raise ValueError("Unknown result backend: %r.  "
+                             "Did you spell it correctly?  (%s)" % (backend,
+                                                                    exc))
     return _backend_cache[backend]
 
 
-"""
-.. function:: get_default_backend_cls()
-
-    Get the backend class specified in the ``CELERY_RESULT_BACKEND`` setting.
-
-"""
-get_default_backend_cls = curry(get_backend_cls, conf.RESULT_BACKEND)
-
-
-"""
-.. class:: DefaultBackend
-
-    The default backend class used for storing task results and status,
-    specified in the ``CELERY_RESULT_BACKEND`` setting.
-
-"""
-DefaultBackend = get_default_backend_cls()
-
-"""
-.. data:: default_backend
-
-    An instance of :class:`DefaultBackend`.
-
-"""
-default_backend = DefaultBackend()
+# deprecate this.
+default_backend = LocalProxy(lambda: current_app.backend)

+ 192 - 142
celery/backends/amqp.py

@@ -1,140 +1,143 @@
-"""celery.backends.amqp"""
+# -*- coding: utf-8 -*-
 import socket
+import threading
 import time
-import warnings
 
 from datetime import timedelta
+from itertools import count
 
-from carrot.messaging import Consumer, Publisher
+from kombu.entity import Exchange, Queue
+from kombu.messaging import Consumer, Producer
 
-from celery import conf
 from celery import states
 from celery.backends.base import BaseDictBackend
 from celery.exceptions import TimeoutError
-from celery.messaging import establish_connection
 from celery.utils import timeutils
 
 
-class AMQResultWarning(UserWarning):
-    pass
+class BacklogLimitExceeded(Exception):
+    """Too much state history to fast-forward."""
 
 
-class ResultPublisher(Publisher):
-    exchange = conf.RESULT_EXCHANGE
-    exchange_type = conf.RESULT_EXCHANGE_TYPE
-    delivery_mode = conf.RESULT_PERSISTENT and 2 or 1
-    serializer = conf.RESULT_SERIALIZER
-    durable = conf.RESULT_PERSISTENT
-    auto_delete = True
-
-    def __init__(self, connection, task_id, **kwargs):
-        super(ResultPublisher, self).__init__(connection,
-                        routing_key=task_id.replace("-", ""),
-                        **kwargs)
-
-
-class ResultConsumer(Consumer):
-    exchange = conf.RESULT_EXCHANGE
-    exchange_type = conf.RESULT_EXCHANGE_TYPE
-    durable = conf.RESULT_PERSISTENT
-    no_ack = True
-    auto_delete = True
-
-    def __init__(self, connection, task_id, **kwargs):
-        routing_key = task_id.replace("-", "")
-        super(ResultConsumer, self).__init__(connection,
-                queue=routing_key, routing_key=routing_key, **kwargs)
+def repair_uuid(s):
+    # Historically the dashes in UUIDS are removed from AMQ entity names,
+    # but there is no known reason to.  Hopefully we'll be able to fix
+    # this in v3.0.
+    return "%s-%s-%s-%s-%s" % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])
 
 
 class AMQPBackend(BaseDictBackend):
-    """AMQP backend. Publish results by sending messages to the broker
-    using the task id as routing key.
+    """Publishes results by sending messages."""
+    Exchange = Exchange
+    Queue = Queue
+    Consumer = Consumer
+    Producer = Producer
 
-    **NOTE:** Results published using this backend is read-once only.
-    After the result has been read, the result is deleted. (however, it's
-    still cached locally by the backend instance).
-
-    """
-
-    _connection = None
+    BacklogLimitExceeded = BacklogLimitExceeded
 
     def __init__(self, connection=None, exchange=None, exchange_type=None,
-            persistent=None, serializer=None, auto_delete=None,
-            expires=None, **kwargs):
+            persistent=None, serializer=None, auto_delete=True,
+            expires=None, connection_max=None, **kwargs):
+        super(AMQPBackend, self).__init__(**kwargs)
+        conf = self.app.conf
         self._connection = connection
         self.queue_arguments = {}
-        self.exchange = exchange
-        self.exchange_type = exchange_type
+        if persistent is None:
+            persistent = conf.CELERY_RESULT_PERSISTENT
         self.persistent = persistent
-        self.serializer = serializer
+        delivery_mode = persistent and "persistent" or "transient"
+        exchange = exchange or conf.CELERY_RESULT_EXCHANGE
+        exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE
+        self.exchange = self.Exchange(name=exchange,
+                                      type=exchange_type,
+                                      delivery_mode=delivery_mode,
+                                      durable=self.persistent,
+                                      auto_delete=auto_delete)
+        self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
         self.auto_delete = auto_delete
         self.expires = expires
         if self.expires is None:
-            self.expires = conf.AMQP_TASK_RESULT_EXPIRES
+            self.expires = conf.CELERY_AMQP_TASK_RESULT_EXPIRES
         if isinstance(self.expires, timedelta):
             self.expires = timeutils.timedelta_seconds(self.expires)
         if self.expires is not None:
             self.expires = int(self.expires)
-            # WARNING: Requries RabbitMQ 2.1.0 or higher.
-            # x-expires must be a signed-int, or long describing the
-            # expiry time in milliseconds.
+            # requires RabbitMQ 2.1.0 or higher.
             self.queue_arguments["x-expires"] = int(self.expires * 1000.0)
-        super(AMQPBackend, self).__init__(**kwargs)
+        self.connection_max = (connection_max or
+                               conf.CELERY_AMQP_TASK_RESULT_CONNECTION_MAX)
+        self.mutex = threading.Lock()
+
+    def _create_binding(self, task_id):
+        name = task_id.replace("-", "")
+        return self.Queue(name=name,
+                          exchange=self.exchange,
+                          routing_key=name,
+                          durable=self.persistent,
+                          auto_delete=self.auto_delete,
+                          queue_arguments=self.queue_arguments)
+
+    def _create_producer(self, task_id, channel):
+        self._create_binding(task_id)(channel).declare()
+        return self.Producer(channel, exchange=self.exchange,
+                             routing_key=task_id.replace("-", ""),
+                             serializer=self.serializer)
+
+    def _create_consumer(self, bindings, channel):
+        return self.Consumer(channel, bindings, no_ack=True)
+
+    def _publish_result(self, connection, task_id, meta):
+        # cache single channel
+        if hasattr(connection, "_result_producer_chan") and \
+                connection._result_producer_chan is not None and \
+                connection._result_producer_chan.connection is not None:
+            channel = connection._result_producer_chan
+        else:
+            channel = connection._result_producer_chan = connection.channel()
 
-    def _create_publisher(self, task_id, connection):
-        delivery_mode = self.persistent and 2 or 1
-
-        # Declares the queue.
-        self._create_consumer(task_id, connection).close()
-
-        return ResultPublisher(connection, task_id,
-                               exchange=self.exchange,
-                               exchange_type=self.exchange_type,
-                               delivery_mode=delivery_mode,
-                               serializer=self.serializer,
-                               auto_delete=self.auto_delete)
-
-    def _create_consumer(self, task_id, connection):
-        return ResultConsumer(connection, task_id,
-                              exchange=self.exchange,
-                              exchange_type=self.exchange_type,
-                              durable=self.persistent,
-                              auto_delete=self.auto_delete,
-                              queue_arguments=self.queue_arguments)
-
-    def store_result(self, task_id, result, status, traceback=None,
-            max_retries=20, retry_delay=0.2):
-        """Send task return value and status."""
-        result = self.encode_result(result, status)
+        self._create_producer(task_id, channel).publish(meta)
 
-        meta = {"task_id": task_id,
-                "result": result,
-                "status": status,
-                "traceback": traceback}
+    def revive(self, channel):
+        pass
 
-        for i in range(max_retries + 1):
+    def _store_result(self, task_id, result, status, traceback=None,
+            max_retries=20, interval_start=0, interval_step=1,
+            interval_max=1):
+        """Send task return value and status."""
+        self.mutex.acquire()
+        try:
+            conn = self.app.pool.acquire(block=True)
             try:
-                publisher = self._create_publisher(task_id, self.connection)
-                publisher.send(meta)
-                publisher.close()
-            except Exception, exc:
-                if not max_retries:
-                    raise
-                self._connection = None
-                warnings.warn(AMQResultWarning(
-                    "Error sending result %s: %r" % (task_id, exc)))
-                time.sleep(retry_delay)
-            break
+
+                def errback(error, delay):
+                    conn._result_producer_chan = None
+                    print("Couldn't send result for %r: %r. Retry in %rs." % (
+                            task_id, error, delay))
+
+                send = conn.ensure(self, self._publish_result,
+                            max_retries=max_retries,
+                            errback=errback,
+                            interval_start=interval_start,
+                            interval_step=interval_step,
+                            interval_max=interval_max)
+                send(conn, task_id, {"task_id": task_id, "status": status,
+                                "result": self.encode_result(result, status),
+                                "traceback": traceback})
+            finally:
+                conn.release()
+        finally:
+            self.mutex.release()
 
         return result
 
     def get_task_meta(self, task_id, cache=True):
         return self.poll(task_id)
 
-    def wait_for(self, task_id, timeout=None, cache=True):
+    def wait_for(self, task_id, timeout=None, cache=True, propagate=True,
+            **kwargs):
         cached_meta = self._cache.get(task_id)
-
-        if cached_meta and cached_meta["status"] in states.READY_STATES:
+        if cache and cached_meta and \
+                cached_meta["status"] in states.READY_STATES:
             meta = cached_meta
         else:
             try:
@@ -142,67 +145,112 @@ class AMQPBackend(BaseDictBackend):
             except socket.timeout:
                 raise TimeoutError("The operation timed out.")
 
-        if meta["status"] == states.SUCCESS:
+        state = meta["status"]
+        if state == states.SUCCESS:
+            return meta["result"]
+        elif state in states.PROPAGATE_STATES:
+            if propagate:
+                raise self.exception_to_python(meta["result"])
             return meta["result"]
-        elif meta["status"] in states.PROPAGATE_STATES:
-            raise self.exception_to_python(meta["result"])
         else:
             return self.wait_for(task_id, timeout, cache)
 
-    def poll(self, task_id):
-        consumer = self._create_consumer(task_id, self.connection)
-        result = consumer.fetch()
+    def poll(self, task_id, backlog_limit=100):
+        conn = self.app.pool.acquire(block=True)
+        channel = conn.channel()
         try:
-            if result:
-                payload = self._cache[task_id] = result.payload
+            binding = self._create_binding(task_id)(channel)
+            binding.declare()
+            latest, acc = None, None
+            for i in count():  # fast-forward
+                latest, acc = acc, binding.get(no_ack=True)
+                if not acc:
+                    break
+                if i > backlog_limit:
+                    raise self.BacklogLimitExceeded(task_id)
+            if latest:
+                payload = self._cache[task_id] = latest.payload
                 return payload
-            else:
-
-                # Use previously received status if any.
-                if task_id in self._cache:
-                    return self._cache[task_id]
-
-                return {"status": states.PENDING, "result": None}
+            elif task_id in self._cache:  # use previously received state.
+                return self._cache[task_id]
+            return {"status": states.PENDING, "result": None}
         finally:
-            consumer.close()
+            channel.close()
+            conn.release()
 
-    def consume(self, task_id, timeout=None):
-        results = []
+    def drain_events(self, connection, consumer, timeout=None, now=time.time):
+        wait = connection.drain_events
+        results = {}
 
         def callback(meta, message):
             if meta["status"] in states.READY_STATES:
-                results.append(meta)
-
-        wait = self.connection.drain_events
-        consumer = self._create_consumer(task_id, self.connection)
+                uuid = repair_uuid(message.delivery_info["routing_key"])
+                results[uuid] = meta
         consumer.register_callback(callback)
 
-        consumer.consume()
+        time_start = now()
+        while 1:
+            # Total time spent may exceed a single call to wait()
+            if timeout and now() - time_start >= timeout:
+                raise socket.timeout()
+            wait(timeout=timeout)
+            if results:  # got event on the wanted channel.
+                break
+        self._cache.update(results)
+        return results
+
+    def consume(self, task_id, timeout=None):
+        conn = self.app.pool.acquire(block=True)
+        channel = conn.channel()
         try:
-            time_start = time.time()
-            while True:
-                # Total time spent may exceed a single call to wait()
-                if timeout and time.time() - time_start >= timeout:
-                    raise socket.timeout()
-                wait(timeout=timeout)
-                if results:
-                    # Got event on the wanted channel.
-                    break
+            binding = self._create_binding(task_id)
+            consumer = self._create_consumer(binding, channel)
+            consumer.consume()
+            try:
+                return self.drain_events(conn, consumer, timeout).values()[0]
+            finally:
+                consumer.cancel()
         finally:
-            consumer.close()
-
-        self._cache[task_id] = results[0]
-        return results[0]
+            channel.close()
+            conn.release()
 
-    def close(self):
-        if self._connection is not None:
-            self._connection.close()
-
-    @property
-    def connection(self):
-        if not self._connection:
-            self._connection = establish_connection()
-        return self._connection
+    def get_many(self, task_ids, timeout=None):
+        conn = self.app.pool.acquire(block=True)
+        channel = conn.channel()
+        try:
+            ids = set(task_ids)
+            cached_ids = set()
+            for task_id in ids:
+                try:
+                    cached = self._cache[task_id]
+                except KeyError:
+                    pass
+                else:
+                    if cached["status"] in states.READY_STATES:
+                        yield task_id, cached
+                        cached_ids.add(task_id)
+            ids ^= cached_ids
+
+            bindings = [self._create_binding(task_id) for task_id in task_ids]
+            consumer = self._create_consumer(bindings, channel)
+            consumer.consume()
+            try:
+                while ids:
+                    r = self.drain_events(conn, consumer, timeout)
+                    ids ^= set(r.keys())
+                    for ready_id, ready_meta in r.items():
+                        yield ready_id, ready_meta
+            except:   # ☹ Py2.4 — Cannot yield inside try: finally: block
+                consumer.cancel()
+                raise
+            consumer.cancel()
+
+        except:  # … ☹
+            channel.close()
+            conn.release()
+            raise
+        channel.close()
+        conn.release()
 
     def reload_task_result(self, task_id):
         raise NotImplementedError(
@@ -214,11 +262,13 @@ class AMQPBackend(BaseDictBackend):
                 "reload_taskset_result is not supported by this backend.")
 
     def save_taskset(self, taskset_id, result):
-        """Store the result and status of a task."""
         raise NotImplementedError(
                 "save_taskset is not supported by this backend.")
 
     def restore_taskset(self, taskset_id, cache=True):
-        """Get the result of a taskset."""
         raise NotImplementedError(
                 "restore_taskset is not supported by this backend.")
+
+    def delete_taskset(self, taskset_id):
+        raise NotImplementedError(
+                "delete_taskset is not supported by this backend.")

+ 60 - 18
celery/backends/base.py

@@ -1,17 +1,15 @@
 """celery.backends.base"""
 import time
 
-from celery import conf
 from celery import states
 from celery.exceptions import TimeoutError, TaskRevokedError
-from celery.serialization import pickle, get_pickled_exception
-from celery.serialization import get_pickleable_exception
+from celery.utils.serialization import pickle, get_pickled_exception
+from celery.utils.serialization import get_pickleable_exception
 from celery.datastructures import LocalCache
 
 
 class BaseBackend(object):
-    """The base backend class. All backends should inherit from this."""
-
+    """Base backend class."""
     READY_STATES = states.READY_STATES
     UNREADY_STATES = states.UNREADY_STATES
     EXCEPTION_STATES = states.EXCEPTION_STATES
@@ -19,7 +17,8 @@ class BaseBackend(object):
     TimeoutError = TimeoutError
 
     def __init__(self, *args, **kwargs):
-        pass
+        from celery.app import app_or_default
+        self.app = app_or_default(kwargs.get("app"))
 
     def encode_result(self, result, status):
         if status in self.EXCEPTION_STATES:
@@ -71,19 +70,18 @@ class BaseBackend(object):
         raise NotImplementedError("%s does not implement forget." % (
                     self.__class__))
 
-    def wait_for(self, task_id, timeout=None):
+    def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5):
         """Wait for task and return its result.
 
         If the task raises an exception, this exception
         will be re-raised by :func:`wait_for`.
 
-        If ``timeout`` is not ``None``, this raises the
+        If `timeout` is not :const:`None`, this raises the
         :class:`celery.exceptions.TimeoutError` exception if the operation
-        takes longer than ``timeout`` seconds.
+        takes longer than `timeout` seconds.
 
         """
 
-        sleep_inbetween = 0.5
         time_elapsed = 0.0
 
         while True:
@@ -91,10 +89,13 @@ class BaseBackend(object):
             if status == states.SUCCESS:
                 return self.get_result(task_id)
             elif status in states.PROPAGATE_STATES:
-                raise self.get_result(task_id)
+                result = self.get_result(task_id)
+                if propagate:
+                    raise result
+                return result
             # avoid hammering the CPU checking status.
-            time.sleep(sleep_inbetween)
-            time_elapsed += sleep_inbetween
+            time.sleep(interval)
+            time_elapsed += interval
             if timeout and time_elapsed >= timeout:
                 raise TimeoutError("The operation timed out.")
 
@@ -132,6 +133,10 @@ class BaseBackend(object):
         raise NotImplementedError(
                 "restore_taskset is not supported by this backend.")
 
+    def delete_taskset(self, taskset_id):
+        raise NotImplementedError(
+                "delete_taskset is not supported by this backend.")
+
     def reload_task_result(self, task_id):
         """Reload task result, even if it has been previously fetched."""
         raise NotImplementedError(
@@ -142,23 +147,38 @@ class BaseBackend(object):
         raise NotImplementedError(
                 "reload_taskset_result is not supported by this backend.")
 
+    def on_chord_part_return(self, task):
+        pass
+
+    def on_chord_apply(self, setid, body, **kwargs):
+        from celery.registry import tasks
+        tasks["celery.chord_unlock"].apply_async((setid, body, ), kwargs,
+                                                 countdown=1)
+
+    def __reduce__(self):
+        return (self.__class__, ())
+
 
 class BaseDictBackend(BaseBackend):
 
     def __init__(self, *args, **kwargs):
         super(BaseDictBackend, self).__init__(*args, **kwargs)
         self._cache = LocalCache(limit=kwargs.get("max_cached_results") or
-                                 conf.MAX_CACHED_RESULTS)
+                                 self.app.conf.CELERY_MAX_CACHED_RESULTS)
 
-    def store_result(self, task_id, result, status, traceback=None):
+    def store_result(self, task_id, result, status, traceback=None, **kwargs):
         """Store task result and status."""
         result = self.encode_result(result, status)
-        return self._store_result(task_id, result, status, traceback)
+        return self._store_result(task_id, result, status, traceback, **kwargs)
 
     def forget(self, task_id):
         self._cache.pop(task_id, None)
         self._forget(task_id)
 
+    def _forget(self, task_id):
+        raise NotImplementedError("%s does not implement forget." % (
+                    self.__class__))
+
     def get_status(self, task_id):
         """Get the status of a task."""
         return self.get_task_meta(task_id)["status"]
@@ -210,6 +230,10 @@ class BaseDictBackend(BaseBackend):
         """Store the result of an executed taskset."""
         return self._save_taskset(taskset_id, result)
 
+    def delete_taskset(self, taskset_id):
+        self._cache.pop(taskset_id, None)
+        return self._delete_taskset(taskset_id)
+
 
 class KeyValueStoreBackend(BaseDictBackend):
 
@@ -239,10 +263,13 @@ class KeyValueStoreBackend(BaseDictBackend):
         return result
 
     def _save_taskset(self, taskset_id, result):
-        meta = {"result": result}
-        self.set(self.get_key_for_taskset(taskset_id), pickle.dumps(meta))
+        self.set(self.get_key_for_taskset(taskset_id),
+                 pickle.dumps({"result": result}))
         return result
 
+    def _delete_taskset(self, taskset_id):
+        self.delete(self.get_key_for_taskset(taskset_id))
+
     def _get_task_meta_for(self, task_id):
         """Get task metadata for a task by id."""
         meta = self.get(self.get_key_for_task(task_id))
@@ -256,3 +283,18 @@ class KeyValueStoreBackend(BaseDictBackend):
         if meta:
             meta = pickle.loads(str(meta))
             return meta
+
+
+class DisabledBackend(BaseBackend):
+
+    def store_result(self, *args, **kwargs):
+        pass
+
+    def _is_disabled(self, *args, **kwargs):
+        raise NotImplementedError("No result backend configured.  "
+                "Please see the documentation for more information.")
+
+    wait_for = _is_disabled
+    get_status = _is_disabled
+    get_result = _is_disabled
+    get_traceback = _is_disabled

+ 39 - 29
celery/backends/cache.py

@@ -1,26 +1,35 @@
 from datetime import timedelta
 
-from carrot.utils import partition
+from kombu.utils import partition, cached_property
 
-from celery import conf
 from celery.backends.base import KeyValueStoreBackend
 from celery.exceptions import ImproperlyConfigured
 from celery.utils import timeutils
 from celery.datastructures import LocalCache
 
+_imp = [None]
 
-def get_best_memcache(*args, **kwargs):
-    behaviors = kwargs.pop("behaviors", None)
-    is_pylibmc = False
-    try:
-        import pylibmc as memcache
-        is_pylibmc = True
-    except ImportError:
+
+def import_best_memcache():
+    if _imp[0] is None:
+        is_pylibmc = False
         try:
-            import memcache
+            import pylibmc as memcache
+            is_pylibmc = True
         except ImportError:
-            raise ImproperlyConfigured("Memcached backend requires either "
-                                       "the 'memcache' or 'pylibmc' library")
+            try:
+                import memcache
+            except ImportError:
+                raise ImproperlyConfigured(
+                        "Memcached backend requires either the 'pylibmc' "
+                        "or 'memcache' library")
+        _imp[0] = is_pylibmc, memcache
+    return _imp[0]
+
+
+def get_best_memcache(*args, **kwargs):
+    behaviors = kwargs.pop("behaviors", None)
+    is_pylibmc, memcache = import_best_memcache()
     client = memcache.Client(*args, **kwargs)
     if is_pylibmc and behaviors is not None:
         client.behaviors = behaviors
@@ -42,26 +51,29 @@ class DummyClient(object):
         self.cache.pop(key, None)
 
 
-backends = {"memcache": get_best_memcache,
-            "memcached": get_best_memcache,
-            "pylibmc": get_best_memcache,
-            "memory": DummyClient}
+backends = {"memcache": lambda: get_best_memcache,
+            "memcached": lambda: get_best_memcache,
+            "pylibmc": lambda: get_best_memcache,
+            "memory": lambda: DummyClient}
 
 
 class CacheBackend(KeyValueStoreBackend):
-    _client = None
 
-    def __init__(self, expires=conf.TASK_RESULT_EXPIRES,
-            backend=conf.CACHE_BACKEND, options={}, **kwargs):
+    def __init__(self, expires=None, backend=None, options={}, **kwargs):
         super(CacheBackend, self).__init__(self, **kwargs)
-        if isinstance(expires, timedelta):
-            expires = timeutils.timedelta_seconds(expires)
-        self.expires = int(expires)
-        self.options = dict(conf.CACHE_BACKEND_OPTIONS, **options)
+
+        self.expires = expires or self.app.conf.CELERY_TASK_RESULT_EXPIRES
+        if isinstance(self.expires, timedelta):
+            self.expires = timeutils.timedelta_seconds(self.expires)
+        self.options = dict(self.app.conf.CELERY_CACHE_BACKEND_OPTIONS,
+                            **options)
+
+        backend = backend or self.app.conf.CELERY_CACHE_BACKEND
+        self.expires = int(self.expires)
         self.backend, _, servers = partition(backend, "://")
-        self.servers = servers.split(";")
+        self.servers = servers.rstrip('/').split(";")
         try:
-            self.Client = backends[self.backend]
+            self.Client = backends[self.backend]()
         except KeyError:
             raise ImproperlyConfigured(
                     "Unknown cache backend: %s. Please use one of the "
@@ -77,8 +89,6 @@ class CacheBackend(KeyValueStoreBackend):
     def delete(self, key):
         return self.client.delete(key)
 
-    @property
+    @cached_property
     def client(self):
-        if self._client is None:
-            self._client = self.Client(self.servers, **self.options)
-        return self._client
+        return self.Client(self.servers, **self.options)

+ 39 - 28
celery/backends/cassandra.py

@@ -2,7 +2,7 @@
 try:
     import pycassa
     from thrift import Thrift
-    C = __import__('cassandra').ttypes          # FIXME Namespace kludge
+    C = pycassa.cassandra.ttypes
 except ImportError:
     pycassa = None
 
@@ -14,11 +14,9 @@ import time
 from datetime import datetime
 
 from celery.backends.base import BaseDictBackend
-from celery import conf
 from celery.exceptions import ImproperlyConfigured
-from celery.loaders import load_settings
-from celery.log import setup_logger
-from celery.serialization import pickle
+from celery.utils.serialization import pickle
+from celery.utils.timeutils import maybe_timedelta
 from celery import states
 
 
@@ -50,34 +48,49 @@ class CassandraBackend(BaseDictBackend):
         the :setting:`CASSANDRA_SERVERS` setting is not set.
 
         """
-        self.logger = setup_logger("celery.backends.cassandra")
+        super(CassandraBackend, self).__init__(**kwargs)
+        self.logger = self.app.log.setup_logger(
+                            name="celery.backends.cassandra")
 
         self.result_expires = kwargs.get("result_expires") or \
-                                conf.TASK_RESULT_EXPIRES
+                                maybe_timedelta(
+                                    self.app.conf.CELERY_TASK_RESULT_EXPIRES)
 
         if not pycassa:
             raise ImproperlyConfigured(
-                    "You need to install the pycassa library to use the "
-                    "Cassandra backend. See http://github.com/vomjom/pycassa")
-
-        settings = load_settings()
+                "You need to install the pycassa library to use the "
+                "Cassandra backend. See https://github.com/pycassa/pycassa")
 
         self.servers = servers or \
-                         getattr(settings, "CASSANDRA_SERVERS", self.servers)
+                        self.app.conf.get("CASSANDRA_SERVERS", self.servers)
         self.keyspace = keyspace or \
-                          getattr(settings, "CASSANDRA_KEYSPACE",
-                                  self.keyspace)
+                            self.app.conf.get("CASSANDRA_KEYSPACE",
+                                              self.keyspace)
         self.column_family = column_family or \
-                               getattr(settings, "CASSANDRA_COLUMN_FAMILY",
-                                       self.column_family)
+                                self.app.conf.get("CASSANDRA_COLUMN_FAMILY",
+                                                  self.column_family)
         self.cassandra_options = dict(cassandra_options or {},
-                                   **getattr(settings,
-                                             "CASSANDRA_OPTIONS", {}))
+                                   **self.app.conf.get("CASSANDRA_OPTIONS",
+                                                       {}))
+        read_cons = self.app.conf.get("CASSANDRA_READ_CONSISTENCY",
+                                      "LOCAL_QUORUM")
+        write_cons = self.app.conf.get("CASSANDRA_WRITE_CONSISTENCY",
+                                       "LOCAL_QUORUM")
+        try:
+            self.read_consistency = getattr(pycassa.ConsistencyLevel,
+                                            read_cons)
+        except AttributeError:
+            self.read_consistency = pycassa.ConsistencyLevel.LOCAL_QUORUM
+        try:
+            self.write_consistency = getattr(pycassa.ConsistencyLevel,
+                                             write_cons)
+        except AttributeError:
+            self.write_consistency = pycassa.ConsistencyLevel.LOCAL_QUORUM
+
         if not self.servers or not self.keyspace or not self.column_family:
             raise ImproperlyConfigured(
                     "Cassandra backend not configured.")
 
-        super(CassandraBackend, self).__init__()
         self._column_family = None
 
     def _retry_on_error(func):
@@ -88,7 +101,6 @@ class CassandraBackend(BaseDictBackend):
                 try:
                     return func(*args, **kwargs)
                 except (pycassa.InvalidRequestException,
-                        pycassa.NoServerAvailable,
                         pycassa.TimedOutException,
                         pycassa.UnavailableException,
                         socket.error,
@@ -102,13 +114,12 @@ class CassandraBackend(BaseDictBackend):
 
     def _get_column_family(self):
         if self._column_family is None:
-            conn = pycassa.connect(self.servers,
+            conn = pycassa.connect(self.keyspace, servers=self.servers,
                                    **self.cassandra_options)
             self._column_family = \
-              pycassa.ColumnFamily(conn, self.keyspace,
-                    self.column_family,
-                    read_consistency_level=pycassa.ConsistencyLevel.DCQUORUM,
-                    write_consistency_level=pycassa.ConsistencyLevel.DCQUORUM)
+              pycassa.ColumnFamily(conn, self.column_family,
+                    read_consistency_level=self.read_consistency,
+                    write_consistency_level=self.write_consistency)
         return self._column_family
 
     def process_cleanup(self):
@@ -157,11 +168,11 @@ class CassandraBackend(BaseDictBackend):
         cf = self._get_column_family()
         column_parent = C.ColumnParent(cf.column_family)
         slice_pred = C.SlicePredicate(
-                        slice_range=C.SliceRange('', end_column,
-                                                 count=2 ** 30))
+                            slice_range=C.SliceRange('', end_column,
+                                                     count=2 ** 30))
         columns = cf.client.multiget_slice(cf.keyspace, self._index_keys,
                                            column_parent, slice_pred,
-                                           pycassa.ConsistencyLevel.DCQUORUM)
+                                           self.read_consistency)
 
         index_cols = [c.column.name
                         for c in itertools.chain(*columns.values())]

+ 33 - 17
celery/backends/database.py

@@ -1,17 +1,22 @@
 from datetime import datetime
 
-from celery import conf
+from celery import states
 from celery.backends.base import BaseDictBackend
 from celery.db.models import Task, TaskSet
 from celery.db.session import ResultSession
 from celery.exceptions import ImproperlyConfigured
+from celery.utils.timeutils import maybe_timedelta
 
-try:
-    import sqlalchemy as _
-except ImportError:
-    raise ImproperlyConfigured(
-        "The database result backend requires SQLAlchemy to be installed."
-        "See http://pypi.python.org/pypi/SQLAlchemy")
+
+def _sqlalchemy_installed():
+    try:
+        import sqlalchemy
+    except ImportError:
+        raise ImproperlyConfigured(
+            "The database result backend requires SQLAlchemy to be installed."
+            "See http://pypi.python.org/pypi/SQLAlchemy")
+    return sqlalchemy
+_sqlalchemy_installed()
 
 
 class DatabaseBackend(BaseDictBackend):
@@ -19,17 +24,18 @@ class DatabaseBackend(BaseDictBackend):
 
     def __init__(self, dburi=None, result_expires=None,
             engine_options=None, **kwargs):
-        self.result_expires = result_expires or conf.TASK_RESULT_EXPIRES
-        self.dburi = dburi or conf.RESULT_DBURI
+        super(DatabaseBackend, self).__init__(**kwargs)
+        self.result_expires = result_expires or \
+                                maybe_timedelta(
+                                    self.app.conf.CELERY_TASK_RESULT_EXPIRES)
+        self.dburi = dburi or self.app.conf.CELERY_RESULT_DBURI
         self.engine_options = dict(engine_options or {},
-                                   **conf.RESULT_ENGINE_OPTIONS or {})
+                        **self.app.conf.CELERY_RESULT_ENGINE_OPTIONS or {})
         if not self.dburi:
             raise ImproperlyConfigured(
                     "Missing connection string! Do you have "
                     "CELERY_RESULT_DBURI set to a real value?")
 
-        super(DatabaseBackend, self).__init__(**kwargs)
-
     def ResultSession(self):
         return ResultSession(dburi=self.dburi, **self.engine_options)
 
@@ -55,11 +61,10 @@ class DatabaseBackend(BaseDictBackend):
         session = self.ResultSession()
         try:
             task = session.query(Task).filter(Task.task_id == task_id).first()
-            if not task:
+            if task is None:
                 task = Task(task_id)
-                session.add(task)
-                session.flush()
-                session.commit()
+                task.status = states.PENDING
+                task.result = None
             return task.to_dict()
         finally:
             session.close()
@@ -77,7 +82,7 @@ class DatabaseBackend(BaseDictBackend):
             session.close()
 
     def _restore_taskset(self, taskset_id):
-        """Get taskset metadata for a taskset by id."""
+        """Get metadata for taskset by id."""
         session = self.ResultSession()
         try:
             taskset = session.query(TaskSet).filter(
@@ -87,6 +92,17 @@ class DatabaseBackend(BaseDictBackend):
         finally:
             session.close()
 
+    def _delete_taskset(self, taskset_id):
+        """Delete metadata for taskset by id."""
+        session = self.ResultSession()
+        try:
+            session.query(TaskSet).filter(
+                    TaskSet.taskset_id == taskset_id).delete()
+            session.flush()
+            session.commit()
+        finally:
+            session.close()
+
     def _forget(self, task_id):
         """Forget about result."""
         session = self.ResultSession()

+ 6 - 8
celery/backends/mongodb.py

@@ -6,12 +6,11 @@ try:
 except ImportError:
     pymongo = None
 
-from celery import conf
 from celery import states
-from celery.loaders import load_settings
 from celery.backends.base import BaseDictBackend
 from celery.exceptions import ImproperlyConfigured
-from celery.serialization import pickle
+from celery.utils.serialization import pickle
+from celery.utils.timeutils import maybe_timedelta
 
 
 class Bunch:
@@ -35,17 +34,17 @@ class MongoBackend(BaseDictBackend):
             module :mod:`pymongo` is not available.
 
         """
+        super(MongoBackend, self).__init__(*args, **kwargs)
         self.result_expires = kwargs.get("result_expires") or \
-                                conf.TASK_RESULT_EXPIRES
+                                maybe_timedelta(
+                                    self.app.conf.CELERY_TASK_RESULT_EXPIRES)
 
         if not pymongo:
             raise ImproperlyConfigured(
                 "You need to install the pymongo library to use the "
                 "MongoDB backend.")
 
-        settings = load_settings()
-
-        config = getattr(settings, "CELERY_MONGODB_BACKEND_SETTINGS", None)
+        config = self.app.conf.get("CELERY_MONGODB_BACKEND_SETTINGS", None)
         if config is not None:
             if not isinstance(config, dict):
                 raise ImproperlyConfigured(
@@ -61,7 +60,6 @@ class MongoBackend(BaseDictBackend):
             self.mongodb_taskmeta_collection = config.get(
                 "taskmeta_collection", self.mongodb_taskmeta_collection)
 
-        super(MongoBackend, self).__init__(*args, **kwargs)
         self._connection = None
         self._database = None
 

+ 62 - 82
celery/backends/pyredis.py

@@ -1,11 +1,11 @@
-import warnings
 from datetime import timedelta
 
+from kombu.utils import cached_property
 
-from celery import conf
-from celery.loaders import load_settings
 from celery.backends.base import KeyValueStoreBackend
 from celery.exceptions import ImproperlyConfigured
+from celery.result import TaskSetResult
+from celery.task.sets import subtask
 from celery.utils import timeutils
 
 try:
@@ -17,109 +17,89 @@ except ImportError:
 
 
 class RedisBackend(KeyValueStoreBackend):
-    """Redis based task backend store.
+    """Redis task result store."""
 
-    .. attribute:: redis_host
+    #: redis-py client module.
+    redis = redis
 
-        The hostname to the Redis server.
-
-    .. attribute:: redis_port
-
-        The port to the Redis server.
-
-        Raises :class:`celery.exceptions.ImproperlyConfigured` if
-        the :setting:`REDIS_HOST` or :setting:`REDIS_PORT` settings is not set.
-
-    """
+    #: default Redis server hostname (`localhost`).
     redis_host = "localhost"
+
+    #: default Redis server port (6379)
     redis_port = 6379
     redis_db = 0
-    redis_password = None
-    redis_timeout = None
-    redis_connect_retry = None
-    expires = None
 
-    deprecated_settings = frozenset(["REDIS_TIMEOUT",
-                                     "REDIS_CONNECT_RETRY"])
+    #: default Redis password (:const:`None`)
+    redis_password = None
 
     def __init__(self, redis_host=None, redis_port=None, redis_db=None,
-            redis_timeout=None,
             redis_password=None,
-            redis_connect_retry=None,
-            redis_connect_timeout=None,
-            expires=None):
-        if redis is None:
+            expires=None, **kwargs):
+        super(RedisBackend, self).__init__(**kwargs)
+        if self.redis is None:
             raise ImproperlyConfigured(
                     "You need to install the redis library in order to use "
                   + "Redis result store backend.")
 
-        settings = load_settings()
-        self.redis_host = redis_host or \
-                            getattr(settings, "REDIS_HOST", self.redis_host)
-        self.redis_port = redis_port or \
-                            getattr(settings, "REDIS_PORT", self.redis_port)
-        self.redis_db = redis_db or \
-                            getattr(settings, "REDIS_DB", self.redis_db)
-        self.redis_password = redis_password or \
-                            getattr(settings, "REDIS_PASSWORD",
-                                    self.redis_password)
+        self.redis_host = (redis_host or
+                           self.app.conf.get("REDIS_HOST") or
+                           self.redis_host)
+        self.redis_port = (redis_port or
+                           self.app.conf.get("REDIS_PORT") or
+                           self.redis_port)
+        self.redis_db = (redis_db or
+                         self.app.conf.get("REDIS_DB") or
+                         self.redis_db)
+        self.redis_password = (redis_password or
+                               self.app.conf.get("REDIS_PASSWORD") or
+                               self.redis_password)
         self.expires = expires
         if self.expires is None:
-            self.expires = conf.TASK_RESULT_EXPIRES
+            self.expires = self.app.conf.CELERY_TASK_RESULT_EXPIRES
         if isinstance(self.expires, timedelta):
             self.expires = timeutils.timedelta_seconds(self.expires)
         if self.expires is not None:
             self.expires = int(self.expires)
+        self.redis_port = int(self.redis_port)
 
-        for setting_name in self.deprecated_settings:
-            if getattr(settings, setting_name, None) is not None:
-                warnings.warn(
-                    "The setting '%s' is no longer supported by the "
-                    "python Redis client!" % setting_name.upper(),
-                    DeprecationWarning)
+    def get(self, key):
+        return self.client.get(key)
 
-        if self.redis_port:
-            self.redis_port = int(self.redis_port)
-        if not self.redis_host or not self.redis_port:
-            raise ImproperlyConfigured(
-                "In order to use the Redis result store backend, you have to "
-                "set the REDIS_HOST and REDIS_PORT settings")
-        super(RedisBackend, self).__init__()
-        self._connection = None
-
-    def open(self):
-        """Get :class:`redis.Redis`` instance with the current
-        server configuration.
-
-        The connection is then cached until you do an
-        explicit :meth:`close`.
-
-        """
-        # connection overrides bool()
-        if self._connection is None:
-            self._connection = redis.Redis(host=self.redis_host,
-                                    port=self.redis_port,
-                                    db=self.redis_db,
-                                    password=self.redis_password)
-        return self._connection
+    def set(self, key, value):
+        client = self.client
+        client.set(key, value)
+        if self.expires is not None:
+            client.expire(key, self.expires)
+
+    def delete(self, key):
+        self.client.delete(key)
 
     def close(self):
-        """Close the connection to redis."""
-        if self._connection is not None:
-            self._connection.connection.disconnect()
-            self._connection = None
+        """Closes the Redis connection."""
+        del(self.client)
 
     def process_cleanup(self):
         self.close()
 
-    def get(self, key):
-        return self.open().get(key)
-
-    def set(self, key, value):
-        r = self.open()
-        r.set(key, value)
-        if self.expires is not None:
-            r.expire(key, self.expires)
-
-    def delete(self, key):
-        self.open().delete(key)
+    def on_chord_apply(self, *args, **kwargs):
+        pass
+
+    def on_chord_part_return(self, task, keyprefix="chord-unlock-%s"):
+        setid = task.request.taskset
+        key = keyprefix % setid
+        deps = TaskSetResult.restore(setid, backend=task.backend)
+        if self.client.incr(key) >= deps.total:
+            subtask(task.request.chord).delay(deps.join())
+            deps.delete()
+        self.client.expire(key, 86400)
+
+    @cached_property
+    def client(self):
+        return self.redis.Redis(host=self.redis_host,
+                                port=self.redis_port,
+                                db=self.redis_db,
+                                password=self.redis_password)
+
+    @client.deleter
+    def client(self, client):
+        client.connection.disconnect()

+ 9 - 9
celery/backends/tyrant.py

@@ -4,7 +4,6 @@ try:
 except ImportError:
     pytyrant = None
 
-from celery.loaders import load_settings
 from celery.backends.base import KeyValueStoreBackend
 from celery.exceptions import ImproperlyConfigured
 
@@ -24,34 +23,35 @@ class TyrantBackend(KeyValueStoreBackend):
     tyrant_host = None
     tyrant_port = None
 
-    def __init__(self, tyrant_host=None, tyrant_port=None):
+    def __init__(self, tyrant_host=None, tyrant_port=None, **kwargs):
         """Initialize Tokyo Tyrant backend instance.
 
         Raises :class:`celery.exceptions.ImproperlyConfigured` if
         :setting:`TT_HOST` or :setting:`TT_PORT` is not set.
 
         """
+        super(TyrantBackend, self).__init__(**kwargs)
 
         if not pytyrant:
             raise ImproperlyConfigured(
                     "You need to install the pytyrant library to use the "
                   + "Tokyo Tyrant backend.")
-        settings = load_settings()
-        self.tyrant_host = tyrant_host or \
-                            getattr(settings, "TT_HOST", self.tyrant_host)
-        self.tyrant_port = tyrant_port or \
-                            getattr(settings, "TT_PORT", self.tyrant_port)
+        self.tyrant_host = (tyrant_host or
+                            self.app.conf.get("TT_HOST") or
+                            self.tyrant_host)
+        self.tyrant_port = (tyrant_port or
+                            self.app.conf.get("TT_PORT") or
+                            self.tyrant_port)
         if self.tyrant_port:
             self.tyrant_port = int(self.tyrant_port)
         if not self.tyrant_host or not self.tyrant_port:
             raise ImproperlyConfigured(
                 "To use the Tokyo Tyrant backend, you have to "
                 "set the TT_HOST and TT_PORT settings in your settings.py")
-        super(TyrantBackend, self).__init__()
         self._connection = None
 
     def open(self):
-        """Get :class:`pytyrant.PyTyrant`` instance with the current
+        """Get :class:`pytyrant.PyTyrant` instance with the current
         server configuration.
 
         The connection is then cached until you do an

+ 176 - 121
celery/beat.py

@@ -1,25 +1,28 @@
-"""
-
-Periodic Task Scheduler
-
-"""
+import errno
+import os
 import time
 import shelve
+import sys
 import threading
 import traceback
-import multiprocessing
+try:
+    import multiprocessing
+except ImportError:
+    multiprocessing = None
+
 from datetime import datetime
-from UserDict import UserDict
 
-from celery import log
-from celery import conf
+from kombu.utils import cached_property
+
+from celery import __version__
 from celery import platforms
 from celery import registry
-from celery.execute import send_task
-from celery.schedules import maybe_schedule
-from celery.messaging import establish_connection, TaskPublisher
-from celery.utils import instantiate
-from celery.utils.info import humanize_seconds
+from celery import signals
+from celery.app import app_or_default
+from celery.log import SilenceRepeated
+from celery.schedules import maybe_schedule, crontab
+from celery.utils import instantiate, maybe_promise
+from celery.utils.timeutils import humanize_seconds
 
 
 class SchedulingError(Exception):
@@ -29,38 +32,37 @@ class SchedulingError(Exception):
 class ScheduleEntry(object):
     """An entry in the scheduler.
 
-    :param name: see :attr:`name`.
-    :param schedule: see :attr:`schedule`.
-    :param args: see :attr:`args`.
-    :param kwargs: see :attr:`kwargs`.
+    :keyword name: see :attr:`name`.
+    :keyword schedule: see :attr:`schedule`.
+    :keyword args: see :attr:`args`.
+    :keyword kwargs: see :attr:`kwargs`.
+    :keyword options: see :attr:`options`.
     :keyword last_run_at: see :attr:`last_run_at`.
     :keyword total_run_count: see :attr:`total_run_count`.
+    :keyword relative: Is the time relative to when the server starts?
 
-    .. attribute:: name
-
-        The task name.
-
-    .. attribute:: schedule
-
-        The schedule (run_every/crontab)
-
-    .. attribute:: args
-
-        Args to apply.
+    """
 
-    .. attribute:: kwargs
+    #: The task name
+    name = None
 
-        Keyword arguments to apply.
+    #: The schedule (run_every/crontab)
+    schedule = None
 
-    .. attribute:: last_run_at
+    #: Positional arguments to apply.
+    args = None
 
-        The time and date of when this task was last run.
+    #: Keyword arguments to apply.
+    kwargs = None
 
-    .. attribute:: total_run_count
+    #: Task execution options.
+    options = None
 
-        Total number of times this periodic task has been executed.
+    #: The time and date of when this task was last scheduled.
+    last_run_at = None
 
-    """
+    #: Total number of times this task has been scheduled.
+    total_run_count = 0
 
     def __init__(self, name=None, task=None, last_run_at=None,
             total_run_count=None, schedule=None, args=(), kwargs={},
@@ -104,45 +106,53 @@ class ScheduleEntry(object):
         return vars(self).iteritems()
 
     def __repr__(self):
-        return "<Entry: %s %s(*%s, **%s) {%s}>" % (self.name,
-                                                   self.task,
-                                                   self.args,
-                                                   self.kwargs,
-                                                   self.schedule)
+        return "<Entry: %s %s(*%s, **%s) {%s}>" % (
+                self.name, self.task, self.args, self.kwargs, self.schedule)
 
 
-class Scheduler(UserDict):
+class Scheduler(object):
     """Scheduler for periodic tasks.
 
     :keyword schedule: see :attr:`schedule`.
-    :keyword logger:  see :attr:`logger`.
+    :keyword logger: see :attr:`logger`.
     :keyword max_interval: see :attr:`max_interval`.
 
-    .. attribute:: schedule
-
-        The schedule dict/shelve.
-
-    .. attribute:: logger
+    """
 
-        The logger to use.
+    Entry = ScheduleEntry
 
-    .. attribute:: max_interval
+    #: The schedule dict/shelve.
+    schedule = None
 
-        Maximum time to sleep between re-checking the schedule.
+    #: Current logger.
+    logger = None
 
-    """
-    Entry = ScheduleEntry
-    Publisher = TaskPublisher
+    #: Maximum time to sleep between re-checking the schedule.
+    max_interval = 1
 
     def __init__(self, schedule=None, logger=None, max_interval=None,
-            **kwargs):
-        UserDict.__init__(self)
+            app=None, Publisher=None, lazy=False, **kwargs):
         if schedule is None:
             schedule = {}
-        self.data = schedule
-        self.logger = logger or log.get_default_logger(name="celery.beat")
+        self.app = app_or_default(app)
+        conf = self.app.conf
+        self.data = maybe_promise(schedule)
+        self.logger = logger or self.app.log.get_default_logger(
+                                                name="celery.beat")
         self.max_interval = max_interval or conf.CELERYBEAT_MAX_LOOP_INTERVAL
-        self.setup_schedule()
+        self.Publisher = Publisher or self.app.amqp.TaskPublisher
+        if not lazy:
+            self.setup_schedule()
+
+    def install_default_entries(self, data):
+        entries = {}
+        if self.app.conf.CELERY_TASK_RESULT_EXPIRES:
+            if "celery.backend_cleanup" not in data:
+                entries["celery.backend_cleanup"] = {
+                        "task": "celery.backend_cleanup",
+                        "schedule": crontab("0", "4", "*"),
+                        "options": {"expires": 12 * 3600}}
+        self.update_from_dict(entries)
 
     def maybe_due(self, entry, publisher=None):
         is_due, next_time_to_run = entry.is_due()
@@ -153,7 +163,7 @@ class Scheduler(UserDict):
                 result = self.apply_async(entry, publisher=publisher)
             except Exception, exc:
                 self.logger.error("Message Error: %s\n%s" % (exc,
-                    traceback.format_stack()))
+                    traceback.format_stack()), exc_info=sys.exc_info())
             else:
                 self.logger.debug("%s sent. id->%s" % (entry.task,
                                                        result.task_id))
@@ -166,19 +176,13 @@ class Scheduler(UserDict):
 
         """
         remaining_times = []
-        connection = establish_connection()
-        publisher = self.Publisher(connection=connection)
         try:
-            try:
-                for entry in self.schedule.itervalues():
-                    next_time_to_run = self.maybe_due(entry, publisher)
-                    if next_time_to_run:
-                        remaining_times.append(next_time_to_run)
-            except RuntimeError:
-                pass
-        finally:
-            publisher.close()
-            connection.close()
+            for entry in self.schedule.itervalues():
+                next_time_to_run = self.maybe_due(entry, self.publisher)
+                if next_time_to_run:
+                    remaining_times.append(next_time_to_run)
+        except RuntimeError:
+            pass
 
         return min(remaining_times + [self.max_interval])
 
@@ -212,10 +216,10 @@ class Scheduler(UserDict):
         return result
 
     def send_task(self, *args, **kwargs):               # pragma: no cover
-        return send_task(*args, **kwargs)
+        return self.app.send_task(*args, **kwargs)
 
     def setup_schedule(self):
-        pass
+        self.install_default_entries(self.data)
 
     def sync(self):
         pass
@@ -228,28 +232,45 @@ class Scheduler(UserDict):
         self.schedule[entry.name] = entry
         return entry
 
+    def _maybe_entry(self, name, entry):
+        if isinstance(entry, self.Entry):
+            return entry
+        return self.Entry(**dict(entry, name=name))
+
     def update_from_dict(self, dict_):
-        self.update(dict((name, self.Entry(name, **entry))
-                            for name, entry in dict_.items()))
+        self.schedule.update(dict((name, self._maybe_entry(name, entry))
+                                for name, entry in dict_.items()))
 
     def merge_inplace(self, b):
-        A, B = set(self.keys()), set(b.keys())
+        schedule = self.schedule
+        A, B = set(schedule.keys()), set(b.keys())
 
         # Remove items from disk not in the schedule anymore.
         for key in A ^ B:
-            self.pop(key, None)
+            schedule.pop(key, None)
 
         # Update and add new items in the schedule
         for key in B:
-            entry = self.Entry(**dict(b[key]))
-            if self.get(key):
-                self[key].update(entry)
+            entry = self.Entry(**dict(b[key], name=key))
+            if schedule.get(key):
+                schedule[key].update(entry)
             else:
-                self[key] = entry
+                schedule[key] = entry
 
     def get_schedule(self):
         return self.data
 
+    def set_schedule(self, schedule):
+        self.data = schedule
+
+    @cached_property
+    def connection(self):
+        return self.app.broker_connection()
+
+    @cached_property
+    def publisher(self):
+        return self.Publisher(connection=self.connection)
+
     @property
     def schedule(self):
         return self.get_schedule()
@@ -268,12 +289,39 @@ class PersistentScheduler(Scheduler):
         self.schedule_filename = kwargs.get("schedule_filename")
         Scheduler.__init__(self, *args, **kwargs)
 
+    def _remove_db(self):
+        for suffix in "", ".db", ".dat", ".bak", ".dir":
+            try:
+                os.remove(self.schedule_filename + suffix)
+            except OSError, exc:
+                if exc.errno != errno.ENOENT:
+                    raise
+
     def setup_schedule(self):
-        self._store = self.persistence.open(self.schedule_filename)
-        self.data = self._store
-        self.merge_inplace(conf.CELERYBEAT_SCHEDULE)
+        try:
+            self._store = self.persistence.open(self.schedule_filename,
+                                                writeback=True)
+            entries = self._store.setdefault("entries", {})
+        except Exception, exc:
+            self.logger.error("Removing corrupted schedule file %r: %r" % (
+                self.schedule_filename, exc))
+            self._remove_db()
+            self._store = self.persistence.open(self.schedule_filename,
+                                                writeback=True)
+        else:
+            if "__version__" not in self._store:
+                self._store.clear()   # remove schedule at 2.2.2 upgrade.
+        entries = self._store.setdefault("entries", {})
+        self.merge_inplace(self.app.conf.CELERYBEAT_SCHEDULE)
+        self.install_default_entries(self.schedule)
+        self._store["__version__"] = __version__
         self.sync()
-        self.data = self._store
+        self.logger.debug("Current schedule:\n" +
+                          "\n".join(repr(entry)
+                                    for entry in entries.itervalues()))
+
+    def get_schedule(self):
+        return self._store["entries"]
 
     def sync(self):
         if self._store is not None:
@@ -292,23 +340,22 @@ class PersistentScheduler(Scheduler):
 class Service(object):
     scheduler_cls = PersistentScheduler
 
-    def __init__(self, logger=None,
-            max_interval=conf.CELERYBEAT_MAX_LOOP_INTERVAL,
-            schedule=conf.CELERYBEAT_SCHEDULE,
-            schedule_filename=conf.CELERYBEAT_SCHEDULE_FILENAME,
-            scheduler_cls=None):
-        self.max_interval = max_interval
+    def __init__(self, logger=None, max_interval=None, schedule_filename=None,
+            scheduler_cls=None, app=None):
+        self.app = app_or_default(app)
+        self.max_interval = max_interval or \
+                            self.app.conf.CELERYBEAT_MAX_LOOP_INTERVAL
         self.scheduler_cls = scheduler_cls or self.scheduler_cls
-        self.logger = logger or log.get_default_logger(name="celery.beat")
-        self.schedule = schedule
-        self.schedule_filename = schedule_filename
+        self.logger = logger or self.app.log.get_default_logger(
+                                                name="celery.beat")
+        self.schedule_filename = schedule_filename or \
+                                    self.app.conf.CELERYBEAT_SCHEDULE_FILENAME
 
-        self._scheduler = None
         self._shutdown = threading.Event()
         self._stopped = threading.Event()
         silence = self.max_interval < 60 and 10 or 1
-        self.debug = log.SilenceRepeated(self.logger.debug,
-                                         max_iterations=silence)
+        self.debug = SilenceRepeated(self.logger.debug,
+                                     max_iterations=silence)
 
     def start(self, embedded_process=False):
         self.logger.info("Celerybeat: Starting...")
@@ -316,7 +363,9 @@ class Service(object):
             "Ticking with max interval->%s" % (
                     humanize_seconds(self.scheduler.max_interval)))
 
+        signals.beat_init.send(sender=self)
         if embedded_process:
+            signals.beat_embedded_init.send(sender=self)
             platforms.set_process_title("celerybeat")
 
         try:
@@ -340,16 +389,19 @@ class Service(object):
         self._shutdown.set()
         wait and self._stopped.wait()           # block until shutdown done.
 
-    @property
+    def get_scheduler(self, lazy=False):
+        filename = self.schedule_filename
+        scheduler = instantiate(self.scheduler_cls,
+                                app=self.app,
+                                schedule_filename=filename,
+                                logger=self.logger,
+                                max_interval=self.max_interval,
+                                lazy=lazy)
+        return scheduler
+
+    @cached_property
     def scheduler(self):
-        if self._scheduler is None:
-            filename = self.schedule_filename
-            self._scheduler = instantiate(self.scheduler_cls,
-                                          schedule_filename=filename,
-                                          logger=self.logger,
-                                          max_interval=self.max_interval)
-            self._scheduler.update_from_dict(self.schedule)
-        return self._scheduler
+        return self.get_scheduler()
 
 
 class _Threaded(threading.Thread):
@@ -368,31 +420,34 @@ class _Threaded(threading.Thread):
         self.service.stop(wait=True)
 
 
-class _Process(multiprocessing.Process):
-    """Embedded task scheduler using multiprocessing."""
+if multiprocessing is not None:
+    class _Process(multiprocessing.Process):
+        """Embedded task scheduler using multiprocessing."""
 
-    def __init__(self, *args, **kwargs):
-        super(_Process, self).__init__()
-        self.service = Service(*args, **kwargs)
-        self.name = "Beat"
+        def __init__(self, *args, **kwargs):
+            super(_Process, self).__init__()
+            self.service = Service(*args, **kwargs)
+            self.name = "Beat"
 
-    def run(self):
-        platforms.reset_signal("SIGTERM")
-        self.service.start(embedded_process=True)
+        def run(self):
+            platforms.reset_signal("SIGTERM")
+            self.service.start(embedded_process=True)
 
-    def stop(self):
-        self.service.stop()
-        self.terminate()
+        def stop(self):
+            self.service.stop()
+            self.terminate()
+else:
+    _Process = None
 
 
 def EmbeddedService(*args, **kwargs):
     """Return embedded clock service.
 
     :keyword thread: Run threaded instead of as a separate process.
-        Default is ``False``.
+        Default is :const:`False`.
 
     """
-    if kwargs.pop("thread", False):
+    if kwargs.pop("thread", False) or _Process is None:
         # Need short max interval to be able to stop thread
         # in reasonable time.
         kwargs.setdefault("max_interval", 1)

+ 159 - 20
celery/bin/base.py

@@ -3,25 +3,109 @@ import sys
 
 from optparse import OptionParser, make_option as Option
 
-from celery import __version__
+import celery
 
 
 class Command(object):
+    """Base class for command line applications.
+
+    :keyword app: The current app.
+    :keyword get_app: Callable returning the current app if no app provided.
+
+    """
+    #: Arg list used in help.
     args = ''
-    version = __version__
+
+    #: Application version.
+    version = celery.__version__
+
+    #: If false the parser will raise an exception if positional
+    #: args are provided.
+    supports_args = True
+
+    #: List of options (without preload options).
     option_list = ()
 
+    #: List of options to parse before parsing other options.
+    preload_options = (
+            Option("--app",
+                    default=None, action="store", dest="app",
+                    help="Name of the app instance to use. "),
+            Option("--loader",
+                   default=None, action="store", dest="loader",
+                   help="Name of the loader class to use. "
+                        "Taken from the environment variable CELERY_LOADER, "
+                        "or 'default' if that is not set."),
+            Option("--config",
+                    default="celeryconfig", action="store",
+                    dest="config_module",
+                    help="Name of the module to read configuration from.")
+    )
+
+    #: Enable if the application should support config from the cmdline.
+    enable_config_from_cmdline = False
+
+    #: Default configuration namespace.
+    namespace = "celery"
+
     Parser = OptionParser
 
-    def __init__(self, defaults=None):
-        self.defaults = defaults
+    def __init__(self, app=None, get_app=None):
+        self.app = app
+        self.get_app = get_app or self._get_default_app
+
+    def run(self, *args, **options):
+        """This is the body of the command called by :meth:`handle_argv`."""
+        raise NotImplementedError("subclass responsibility")
+
+    def execute_from_commandline(self, argv=None):
+        """Execute application from command line.
+
+        :keyword argv: The list of command line arguments.
+                       Defaults to ``sys.argv``.
+
+        """
+        if argv is None:
+            argv = list(sys.argv)
+        argv = self.setup_app_from_commandline(argv)
+        prog_name = os.path.basename(argv[0])
+        return self.handle_argv(prog_name, argv[1:])
+
+    def usage(self):
+        """Returns the command-line usage string for this app."""
+        return "%%prog [options] %s" % (self.args, )
+
+    def get_options(self):
+        """Get supported command line options."""
+        return self.option_list
+
+    def handle_argv(self, prog_name, argv):
+        """Parses command line arguments from ``argv`` and dispatches
+        to :meth:`run`.
+
+        :param prog_name: The program name (``argv[0]``).
+        :param argv: Command arguments.
 
-        if self.defaults is None:
-            from celery import conf
-            self.defaults = conf
+        Exits with an error message if :attr:`supports_args` is disabled
+        and ``argv`` contains positional arguments.
+
+        """
+        options, args = self.parse_options(prog_name, argv)
+        if not self.supports_args and args:
+            sys.stderr.write(
+                "\nUnrecognized command line arguments: %r\n" % (
+                    ", ".join(args), ))
+            sys.stderr.write("\nTry --help?\n")
+            sys.exit(1)
+        return self.run(*args, **vars(options))
 
     def parse_options(self, prog_name, arguments):
         """Parse the available options."""
+        # Don't want to load configuration to just print the version,
+        # so we handle --version manually here.
+        if "--version" in arguments:
+            print(self.version)
+            sys.exit(0)
         parser = self.create_parser(prog_name)
         options, args = parser.parse_args(arguments)
         return options, args
@@ -30,20 +114,75 @@ class Command(object):
         return self.Parser(prog=prog_name,
                            usage=self.usage(),
                            version=self.version,
-                           option_list=self.get_options())
+                           option_list=(self.preload_options +
+                                        self.get_options()))
 
-    def execute_from_commandline(self, argv=None):
-        if argv is None:
-            argv = list(sys.argv)
-        prog_name = os.path.basename(argv[0])
-        options, args = self.parse_options(prog_name, argv[1:])
-        return self.run(*args, **vars(options))
+    def setup_app_from_commandline(self, argv):
+        preload_options = self.parse_preload_options(argv)
+        app = (preload_options.pop("app", None) or
+               os.environ.get("CELERY_APP") or
+               self.app)
+        loader = (preload_options.pop("loader", None) or
+                  os.environ.get("CELERY_LOADER") or
+                  "default")
+        config_module = preload_options.pop("config_module", None)
+        if config_module:
+            os.environ["CELERY_CONFIG_MODULE"] = config_module
+        if app:
+            self.app = self.get_cls_by_name(app)
+        else:
+            self.app = self.get_app(loader=loader)
+        if self.enable_config_from_cmdline:
+            argv = self.process_cmdline_config(argv)
+        return argv
 
-    def usage(self):
-        return "%%prog [options] %s" % (self.args, )
+    def get_cls_by_name(self, name):
+        from celery.utils import get_cls_by_name, import_from_cwd
+        return get_cls_by_name(name, imp=import_from_cwd)
 
-    def get_options(self):
-        return self.option_list
+    def process_cmdline_config(self, argv):
+        try:
+            cargs_start = argv.index('--')
+        except ValueError:
+            return argv
+        argv, cargs = argv[:cargs_start], argv[cargs_start + 1:]
+        self.app.config_from_cmdline(cargs, namespace=self.namespace)
+        return argv
 
-    def run(self, *args, **options):
-        raise NotImplementedError("subclass responsibility")
+    def parse_preload_options(self, args):
+        acc = {}
+        preload_options = dict((opt._long_opts[0], opt.dest)
+                                for opt in self.preload_options)
+        for arg in args:
+            if arg.startswith('--') and '=' in arg:
+                key, value = arg.split('=', 1)
+                dest = preload_options.get(key)
+                if dest:
+                    acc[dest] = value
+        return acc
+
+    def _get_default_app(self, *args, **kwargs):
+        return celery.Celery(*args, **kwargs)
+
+
+def daemon_options(default_pidfile, default_logfile=None):
+    return (
+        Option('-f', '--logfile', default=default_logfile,
+               action="store", dest="logfile",
+               help="Path to the logfile"),
+        Option('--pidfile', default=default_pidfile,
+               action="store", dest="pidfile",
+               help="Path to the pidfile."),
+        Option('--uid', default=None,
+               action="store", dest="uid",
+               help="Effective user id to run as when detached."),
+        Option('--gid', default=None,
+               action="store", dest="gid",
+               help="Effective group id to run as when detached."),
+        Option('--umask', default=0,
+               action="store", type="int", dest="umask",
+               help="Umask of the process when detached."),
+        Option('--workdir', default=None,
+               action="store", dest="working_directory",
+               help="Directory to change to when detached."),
+)

+ 33 - 30
celery/bin/camqadm.py

@@ -8,15 +8,15 @@ import cmd
 import sys
 import shlex
 import pprint
-import optparse
+
 from itertools import count
 
 from amqplib import client_0_8 as amqp
-from carrot.utils import partition
+from kombu.utils import partition
 
-from celery.utils import info
+from celery.app import app_or_default
+from celery.bin.base import Command
 from celery.utils import padlist
-from celery.messaging import establish_connection
 
 # Valid string -> bool coercions.
 BOOLS = {"1": True, "0": False,
@@ -27,8 +27,6 @@ BOOLS = {"1": True, "0": False,
 # Map to coerce strings to other types.
 COERCE = {bool: lambda value: BOOLS[value.lower()]}
 
-OPTION_LIST = ()
-
 HELP_HEADER = """
 Commands
 --------
@@ -56,12 +54,12 @@ class Spec(object):
     .. attribute args::
 
         List of arguments this command takes. Should
-        contain ``(argument_name, argument_type)`` tuples.
+        contain `(argument_name, argument_type)` tuples.
 
     .. attribute returns:
 
         Helpful human string representation of what this command returns.
-        May be ``None``, to signify the return type is unknown.
+        May be :const:`None`, to signify the return type is unknown.
 
     """
     def __init__(self, *args, **kwargs):
@@ -71,13 +69,14 @@ class Spec(object):
     def coerce(self, index, value):
         """Coerce value for argument at index.
 
-        E.g. if :attr:`args` is ``[("is_active", bool)]``:
+        E.g. if :attr:`args` is `[("is_active", bool)]`:
 
             >>> coerce(0, "False")
             False
 
         """
-        arg_name, arg_type = self.args[index]
+        arg_info = self.args[index]
+        arg_type = arg_info[1]
         # Might be a custom way to coerce the string value,
         # so look in the coercion map.
         return COERCE.get(arg_type, arg_type)(value)
@@ -133,8 +132,8 @@ class AMQShell(cmd.Cmd):
     :keyword connect: Function used to connect to the server, must return
         connection object.
 
-    :keyword silent: If ``True``, the commands won't have annoying output not
-        relevant when running in non-shell mode.
+    :keyword silent: If :const:`True`, the commands won't have annoying
+                     output not relevant when running in non-shell mode.
 
 
     .. attribute: builtins
@@ -200,7 +199,7 @@ class AMQShell(cmd.Cmd):
         self._reconnect()
 
     def say(self, m):
-        """Say something to the user. Disabled if :attr:`silent``."""
+        """Say something to the user. Disabled if :attr:`silent`."""
         if not self.silent:
             say(m)
 
@@ -209,7 +208,7 @@ class AMQShell(cmd.Cmd):
         to Python values and find the corresponding method on the AMQP channel
         object.
 
-        :returns: tuple of ``(method, processed_args)``.
+        :returns: tuple of `(method, processed_args)`.
 
         Example:
 
@@ -227,7 +226,7 @@ class AMQShell(cmd.Cmd):
         return getattr(self.chan, attr_name), args, spec.format_response
 
     def do_exit(self, *args):
-        """The ``"exit"`` command."""
+        """The `"exit"` command."""
         self.say("\n-> please, don't leave!")
         sys.exit(0)
 
@@ -251,7 +250,7 @@ class AMQShell(cmd.Cmd):
         return set(self.builtins.keys() + self.amqp.keys())
 
     def completenames(self, text, *ignored):
-        """Return all commands starting with ``text``, for tab-completion."""
+        """Return all commands starting with `text`, for tab-completion."""
         names = self.get_names()
         first = [cmd for cmd in names
                         if cmd.startswith(text.replace("_", "."))]
@@ -276,7 +275,7 @@ class AMQShell(cmd.Cmd):
         """Parse input line.
 
         :returns: tuple of three items:
-            ``(command_name, arglist, original_line)``
+            `(command_name, arglist, original_line)`
 
         E.g::
 
@@ -320,7 +319,7 @@ class AMQShell(cmd.Cmd):
     def _reconnect(self):
         """Re-establish connection to the AMQP server."""
         self.conn = self.connect(self.conn)
-        self.chan = self.conn.create_backend().channel
+        self.chan = self.conn.channel()
         self.needs_reconnect = False
 
     @property
@@ -329,9 +328,10 @@ class AMQShell(cmd.Cmd):
 
 
 class AMQPAdmin(object):
-    """The celery ``camqadm`` utility."""
+    """The celery :program:`camqadm` utility."""
 
     def __init__(self, *args, **kwargs):
+        self.app = app_or_default(kwargs.get("app"))
         self.silent = bool(args)
         if "silent" in kwargs:
             self.silent = kwargs["silent"]
@@ -340,8 +340,8 @@ class AMQPAdmin(object):
     def connect(self, conn=None):
         if conn:
             conn.close()
-        self.say("-> connecting to %s." % info.format_broker_info())
-        conn = establish_connection()
+        conn = self.app.broker_connection()
+        self.say("-> connecting to %s." % conn.as_uri())
         conn.connect()
         self.say("-> connected.")
         return conn
@@ -350,27 +350,30 @@ class AMQPAdmin(object):
         shell = AMQShell(connect=self.connect)
         if self.args:
             return shell.onecmd(" ".join(self.args))
-        return shell.cmdloop()
+        try:
+            return shell.cmdloop()
+        except KeyboardInterrupt:
+            self.say("(bibi)")
+            pass
 
     def say(self, m):
         if not self.silent:
             say(m)
 
 
-def parse_options(arguments):
-    """Parse the available options to ``celeryd``."""
-    parser = optparse.OptionParser(option_list=OPTION_LIST)
-    options, values = parser.parse_args(arguments)
-    return options, values
+class AMQPAdminCommand(Command):
+
+    def run(self, *args, **options):
+        options["app"] = self.app
+        return AMQPAdmin(*args, **options).run()
 
 
 def camqadm(*args, **options):
-    return AMQPAdmin(*args, **options).run()
+    AMQPAdmin(*args, **options).run()
 
 
 def main():
-    options, values = parse_options(sys.argv[1:])
-    return camqadm(*values, **vars(options))
+    AMQPAdminCommand().execute_from_commandline()
 
 if __name__ == "__main__":              # pragma: no cover
     main()

+ 37 - 15
celery/bin/celerybeat.py

@@ -5,7 +5,7 @@
 
 .. cmdoption:: -s, --schedule
 
-    Path to the schedule database. Defaults to ``celerybeat-schedule``.
+    Path to the schedule database. Defaults to `celerybeat-schedule`.
     The extension ".db" will be appended to the filename.
 
 .. cmdoption:: -S, --scheduler
@@ -14,28 +14,52 @@
 
 .. cmdoption:: -f, --logfile
 
-    Path to log file. If no logfile is specified, ``stderr`` is used.
+    Path to log file. If no logfile is specified, `stderr` is used.
 
 .. cmdoption:: -l, --loglevel
 
-    Logging level, choose between ``DEBUG``, ``INFO``, ``WARNING``,
-    ``ERROR``, ``CRITICAL``, or ``FATAL``.
+    Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
+    `ERROR`, `CRITICAL`, or `FATAL`.
 
 """
-from celery.bin.base import Command, Option
+from celery.bin.base import Command, Option, daemon_options
+from celery.platforms import create_daemon_context
+from celery.utils.functional import partial
 
 
 class BeatCommand(Command):
-
-    def run(self, *args, **kwargs):
-        from celery.apps.beat import Beat
-        kwargs["defaults"] = self.defaults
-        return Beat(**kwargs).run()
+    supports_args = False
+
+    def run(self, detach=False, logfile=None, pidfile=None, uid=None,
+            gid=None, umask=None, working_directory=None, **kwargs):
+        kwargs.pop("app", None)
+
+        beat = partial(self.app.Beat, logfile=logfile, pidfile=pidfile,
+                       **kwargs)
+
+        if not detach:
+            return beat().run()
+
+        context, on_stop = create_daemon_context(
+                                logfile=logfile,
+                                pidfile=pidfile,
+                                uid=uid,
+                                gid=gid,
+                                umask=umask,
+                                working_directory=working_directory)
+        context.open()
+        try:
+            beat().run()
+        finally:
+            on_stop()
 
     def get_options(self):
-        conf = self.defaults
+        conf = self.app.conf
 
         return (
+            Option('--detach',
+                default=False, action="store_true", dest="detach",
+                help="Detach and run in the background."),
             Option('-s', '--schedule',
                 default=conf.CELERYBEAT_SCHEDULE_FILENAME,
                 action="store", dest="schedule",
@@ -50,14 +74,12 @@ class BeatCommand(Command):
                 action="store", dest="scheduler_cls",
                 help="Scheduler class. Default is "
                      "celery.beat.PersistentScheduler"),
-            Option('-f', '--logfile', default=conf.CELERYBEAT_LOG_FILE,
-                action="store", dest="logfile",
-                help="Path to log file."),
             Option('-l', '--loglevel',
                 default=conf.CELERYBEAT_LOG_LEVEL,
                 action="store", dest="loglevel",
                 help="Loglevel. One of DEBUG/INFO/WARNING/ERROR/CRITICAL."),
-        )
+        ) + daemon_options(default_pidfile="celerybeat.pid",
+                           default_logfile=conf.CELERYBEAT_LOG_FILE)
 
 
 def main():

+ 57 - 50
celery/bin/celeryctl.py

@@ -1,4 +1,3 @@
-import os
 import sys
 
 from optparse import OptionParser, make_option as Option
@@ -8,6 +7,8 @@ from textwrap import wrap
 from anyjson import deserialize
 
 from celery import __version__
+from celery.app import app_or_default, current_app
+from celery.bin.base import Command as CeleryCommand
 from celery.utils import term
 
 
@@ -28,18 +29,15 @@ class Command(object):
     args = ""
     version = __version__
 
-    option_list = (
+    option_list = CeleryCommand.preload_options + (
         Option("--quiet", "-q", action="store_true", dest="quiet",
                 default=False),
-        Option("--conf", dest="conf",
-            help="Celery config module name (default: celeryconfig)"),
-        Option("--loader", dest="loader",
-            help="Celery loaders module name (default: default)"),
         Option("--no-color", "-C", dest="no_color", action="store_true",
             help="Don't colorize output."),
     )
 
-    def __init__(self, no_color=False):
+    def __init__(self, app=None, no_color=False):
+        self.app = app_or_default(app)
         self.colored = term.colored(enabled=not no_color)
 
     def __call__(self, *args, **kwargs):
@@ -63,16 +61,12 @@ class Command(object):
                             version=self.version,
                             option_list=self.option_list)
 
-    def run_from_argv(self, argv):
-        self.prog_name = os.path.basename(argv[0])
-        self.command = argv[1]
-        self.arglist = argv[2:]
+    def run_from_argv(self, prog_name, argv):
+        self.prog_name = prog_name
+        self.command = argv[0]
+        self.arglist = argv[1:]
         self.parser = self.create_parser(self.prog_name, self.command)
         options, args = self.parser.parse_args(self.arglist)
-        if options.loader:
-            os.environ["CELERY_LOADER"] = options.loader
-        if options.conf:
-            os.environ["CELERY_CONFIG_MODULE"] = options.conf
         self.colored = term.colored(enabled=not options.no_color)
         self(*args, **options.__dict__)
 
@@ -125,8 +119,6 @@ class apply(Command):
     )
 
     def run(self, name, *_, **kw):
-        from celery.execute import send_task
-
         # Positional args.
         args = kw.get("args") or ()
         if isinstance(args, basestring):
@@ -144,19 +136,40 @@ class apply(Command):
         except (TypeError, ValueError):
             pass
 
-        res = send_task(name, args=args, kwargs=kwargs,
-                        countdown=kw.get("countdown"),
-                        serializer=kw.get("serializer"),
-                        queue=kw.get("queue"),
-                        exchange=kw.get("exchange"),
-                        routing_key=kw.get("routing_key"),
-                        eta=kw.get("eta"),
-                        expires=expires)
-
+        res = self.app.send_task(name, args=args, kwargs=kwargs,
+                                 countdown=kw.get("countdown"),
+                                 serializer=kw.get("serializer"),
+                                 queue=kw.get("queue"),
+                                 exchange=kw.get("exchange"),
+                                 routing_key=kw.get("routing_key"),
+                                 eta=kw.get("eta"),
+                                 expires=expires)
         self.out(res.task_id)
 apply = command(apply)
 
 
+def pluralize(n, text, suffix='s'):
+    if n > 1:
+        return text + suffix
+    return text
+
+
+class purge(Command):
+
+    def run(self, *args, **kwargs):
+        app = current_app()
+        queues = len(app.amqp.queues.keys())
+        messages_removed = app.control.discard_all()
+        if messages_removed:
+            self.out("Purged %s %s from %s known task %s." % (
+                messages_removed, pluralize(messages_removed, "message"),
+                queues, pluralize(queues, "queue")))
+        else:
+            self.out("No messages purged from %s known %s" % (
+                queues, pluralize(queues, "queue")))
+purge = command(purge)
+
+
 class result(Command):
     args = "<task_id>"
     option_list = Command.option_list + (
@@ -165,8 +178,7 @@ class result(Command):
 
     def run(self, task_id, *args, **kwargs):
         from celery import registry
-        from celery.result import AsyncResult
-        result_cls = AsyncResult
+        result_cls = self.app.AsyncResult
         task = kwargs.get("task")
 
         if task:
@@ -178,6 +190,7 @@ result = command(result)
 
 class inspect(Command):
     choices = {"active": 1.0,
+               "active_queues": 1.0,
                "scheduled": 1.0,
                "reserved": 1.0,
                "stats": 1.0,
@@ -208,24 +221,23 @@ class inspect(Command):
             raise Error("Did you mean 'inspect --help'?")
         if command not in self.choices:
             raise Error("Unknown inspect command: %s" % command)
-        from celery.task.control import inspect
 
         destination = kwargs.get("destination")
         timeout = kwargs.get("timeout") or self.choices[command]
         if destination and isinstance(destination, basestring):
             destination = map(str.strip, destination.split(","))
 
-        def on_reply(message_data):
+        def on_reply(body):
             c = self.colored
-            node = message_data.keys()[0]
-            reply = message_data[node]
+            node = body.keys()[0]
+            reply = body[node]
             status, preply = self.prettify(reply)
             self.say("->", c.cyan(node, ": ") + status, indent(preply))
 
         self.say("<-", command)
-        i = inspect(destination=destination,
-                    timeout=timeout,
-                    callback=on_reply)
+        i = self.app.control.inspect(destination=destination,
+                                     timeout=timeout,
+                                     callback=on_reply)
         replies = getattr(i, command)(*args[1:])
         if not replies:
             raise Error("No nodes replied within time constraint.")
@@ -251,8 +263,9 @@ class status(Command):
     option_list = inspect.option_list
 
     def run(self, *args, **kwargs):
-        replies = inspect(no_color=kwargs.get("no_color", False)) \
-                            .run("ping", **dict(kwargs, quiet=True))
+        replies = inspect(app=self.app,
+                          no_color=kwargs.get("no_color", False)) \
+                    .run("ping", **dict(kwargs, quiet=True))
         if not replies:
             raise Error("No nodes replied within time constraint")
         nodecount = len(replies)
@@ -280,32 +293,26 @@ class help(Command):
 help = command(help)
 
 
-class celeryctl(object):
+class celeryctl(CeleryCommand):
     commands = commands
 
     def execute(self, command, argv=None):
-        if argv is None:
-            argv = sys.arg
-        argv = list(argv)
         try:
             cls = self.commands[command]
         except KeyError:
-            cls = self.commands["help"]
-            argv.insert(1, "help")
+            cls, argv = self.commands["help"], ["help"]
         cls = self.commands.get(command) or self.commands["help"]
         try:
-            cls().run_from_argv(argv)
+            cls(app=self.app).run_from_argv(self.prog_name, argv)
         except Error:
             return self.execute("help", argv)
 
-    def execute_from_commandline(self, argv=None):
-        if argv is None:
-            argv = sys.argv
+    def handle_argv(self, prog_name, argv):
+        self.prog_name = prog_name
         try:
-            command = argv[1]
+            command = argv[0]
         except IndexError:
-            command = "help"
-            argv.insert(1, "help")
+            command, argv = "help", ["help"]
         return self.execute(command, argv)
 
 

+ 65 - 17
celery/bin/celeryd.py

@@ -10,12 +10,12 @@
 
 .. cmdoption:: -f, --logfile
 
-    Path to log file. If no logfile is specified, ``stderr`` is used.
+    Path to log file. If no logfile is specified, `stderr` is used.
 
 .. cmdoption:: -l, --loglevel
 
-    Logging level, choose between ``DEBUG``, ``INFO``, ``WARNING``,
-    ``ERROR``, ``CRITICAL``, or ``FATAL``.
+    Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
+    `ERROR`, `CRITICAL`, or `FATAL`.
 
 .. cmdoption:: -n, --hostname
 
@@ -23,14 +23,14 @@
 
 .. cmdoption:: -B, --beat
 
-    Also run the ``celerybeat`` periodic task scheduler. Please note that
+    Also run the `celerybeat` periodic task scheduler. Please note that
     there must only be one instance of this service.
 
 .. cmdoption:: -Q, --queues
 
     List of queues to enable for this worker, separated by comma.
     By default all configured queues are enabled.
-    Example: ``-Q video,image``
+    Example: `-Q video,image`
 
 .. cmdoption:: -I, --include
 
@@ -39,13 +39,17 @@
 
 .. cmdoption:: -s, --schedule
 
-    Path to the schedule database if running with the ``-B`` option.
-    Defaults to ``celerybeat-schedule``. The extension ".db" will be
+    Path to the schedule database if running with the `-B` option.
+    Defaults to `celerybeat-schedule`. The extension ".db" will be
     appended to the filename.
 
+.. cmdoption:: --scheduler
+
+    Scheduler class to use. Default is celery.beat.PersistentScheduler
+
 .. cmdoption:: -E, --events
 
-    Send events that can be captured by monitors like ``celerymon``.
+    Send events that can be captured by monitors like `celerymon`.
 
 .. cmdoption:: --purge, --discard
 
@@ -67,25 +71,42 @@
     terminated and replaced by a new worker.
 
 """
-import multiprocessing
+import sys
+
+try:
+    from multiprocessing import freeze_support
+except ImportError:
+    freeze_support = lambda: True
 
 from celery.bin.base import Command, Option
 
 
 class WorkerCommand(Command):
+    namespace = "celeryd"
+    enable_config_from_cmdline = True
+    supports_args = False
 
     def run(self, *args, **kwargs):
-        from celery.apps.worker import Worker
-        kwargs["defaults"] = self.defaults
-        return Worker(**kwargs).run()
+        kwargs.pop("app", None)
+        # Pools like eventlet/gevent needs to patch libs as early
+        # as possible.
+        from celery import concurrency
+        kwargs["pool"] = concurrency.get_implementation(
+                    kwargs.get("pool") or self.app.conf.CELERYD_POOL)
+        return self.app.Worker(**kwargs).run()
 
     def get_options(self):
-        conf = self.defaults
+        conf = self.app.conf
         return (
             Option('-c', '--concurrency',
                 default=conf.CELERYD_CONCURRENCY,
                 action="store", dest="concurrency", type="int",
-                help="Number of child processes processing the queue."),
+                help="Number of worker threads/processes"),
+            Option('-P', '--pool',
+                default=conf.CELERYD_POOL,
+                action="store", dest="pool", type="str",
+                help="Pool implementation: "
+                     "processes (default), eventlet or gevent."),
             Option('--purge', '--discard', default=False,
                 action="store_true", dest="discard",
                 help="Discard all waiting tasks before the server is"
@@ -112,13 +133,17 @@ class WorkerCommand(Command):
                      "option. The extension '.db' will be appended to the "
                     "filename. Default: %s" % (
                         conf.CELERYBEAT_SCHEDULE_FILENAME, )),
-
+            Option('--scheduler',
+                default=None,
+                action="store", dest="scheduler_cls",
+                help="Scheduler class. Default is "
+                     "celery.beat.PersistentScheduler"),
             Option('-S', '--statedb', default=conf.CELERYD_STATE_DB,
                 action="store", dest="db",
                 help="Path to the state database. The extension '.db' will "
                      "be appended to the filename. Default: %s" % (
                         conf.CELERYD_STATE_DB, )),
-            Option('-E', '--events', default=conf.SEND_EVENTS,
+            Option('-E', '--events', default=conf.CELERY_SEND_EVENTS,
                 action="store_true", dest="events",
                 help="Send events so the worker can be monitored by "
                      "celeryev, celerymon and other monitors.."),
@@ -148,13 +173,36 @@ class WorkerCommand(Command):
                 help="Optional file used to store the workers pid. "
                      "The worker will not start if this file already exists "
                      "and the pid is still alive."),
+            Option('--autoscale', default=None,
+                help="Enable autoscaling by providing "
+                     "max_concurrency,min_concurrency. Example: "
+                     "--autoscale=10,3 (always keep 3 processes, "
+                     "but grow to 10 if necessary)."),
         )
 
 
 def main():
-    multiprocessing.freeze_support()
+    freeze_support()
     worker = WorkerCommand()
     worker.execute_from_commandline()
 
+
+def windows_main():
+    sys.stderr.write("""
+
+The celeryd command does not work on Windows.
+
+Instead, please use:
+
+    ..> python -m celery.bin.celeryd
+
+You can also supply arguments:
+
+    ..> python -m celery.bin.celeryd --concurrency=10 --loglevel=DEBUG
+
+
+    """.strip())
+
+
 if __name__ == "__main__":          # pragma: no cover
     main()

+ 13 - 22
celery/bin/celeryd_detach.py

@@ -1,31 +1,13 @@
 import os
 import sys
 
-from optparse import OptionParser, BadOptionError, make_option as Option
+from optparse import OptionParser, BadOptionError
 
 from celery import __version__
+from celery.bin.base import daemon_options
 from celery.platforms import create_daemon_context
 
-OPTION_LIST = (
-        Option('-f', '--logfile', default=None,
-               action="store", dest="logfile",
-               help="Path to the logfile"),
-        Option('--pidfile', default="celeryd.pid",
-               action="store", dest="pidfile",
-               help="Path to the pidfile."),
-        Option('--uid', default=None,
-               action="store", dest="uid",
-               help="Effective user id to run as when detached."),
-        Option('--gid', default=None,
-               action="store", dest="gid",
-               help="Effective group id to run as when detached."),
-        Option('--umask', default=0,
-               action="store", type="int", dest="umask",
-               help="Umask of the process when detached."),
-        Option('--workdir', default=None,
-               action="store", dest="working_directory",
-               help="Directory to change to when detached."),
-)
+OPTION_LIST = daemon_options(default_pidfile="celeryd.pid")
 
 
 class detached(object):
@@ -51,7 +33,16 @@ class detached(object):
                                 working_directory=self.working_directory)
         context.open()
         try:
-            os.execv(self.path, [self.path] + self.argv)
+            try:
+                os.execv(self.path, [self.path] + self.argv)
+            except Exception:
+                import logging
+                from celery.log import setup_logger
+                logger = setup_logger(logfile=self.logfile,
+                                      loglevel=logging.ERROR)
+                logger.critical("Can't exec %r" % (
+                    " ".join([self.path] + self.argv), ),
+                    exc_info=sys.exc_info())
         finally:
             on_stop()
 

+ 32 - 14
celery/bin/celeryd_multi.py

@@ -5,17 +5,24 @@ Examples
 
 ::
 
-    # Advanced example starting 10 workers in the background:
-    #   * Three of the workers processes the images and video queue
-    #   * Two of the workers processes the data queue with loglevel DEBUG
-    #   * the rest processes the default' queue.
-    $ celeryd-multi start 10 -l INFO -Q:1-3 images,video -Q:4,5:data
-        -Q default -L:4,5 DEBUG
+    # Single worker with explicit name and events enabled.
+    $ celeryd-multi start Leslie -E
 
-    # You can show the commands necessary to start the workers with
-    # the "show" command:
-    $ celeryd-multi show 10 -l INFO -Q:1-3 images,video -Q:4,5:data
-        -Q default -L:4,5 DEBUG
+    # Pidfiles and logfiles are stored in the current directory
+    # by default.  Use --pidfile and --logfile argument to change
+    # this.  The abbreviation %n will be expanded to the current
+    # node name.
+    $ celeryd-multi start Leslie -E --pidfile=/var/run/celery/%n.pid
+                                    --logfile=/var/log/celery/%n.log
+
+
+    # You need to add the same arguments when you restart,
+    # as these are not persisted anywhere.
+    $ celeryd-multi restart Leslie -E --pidfile=/var/run/celery/%n.pid
+                                      --logfile=/var/run/celery/%n.log
+
+    # To stop the node, you need to specify the same pidfile.
+    $ celeryd-multi stop Leslie --pidfile=/var/run/celery/%n.pid
 
     # 3 workers, with 3 processes each
     $ celeryd-multi start 3 -c 3
@@ -34,8 +41,20 @@ Examples
     celeryd -n celeryd1.worker.example.com -c 3
     celeryd -n celeryd2.worker.example.com -c 3
 
-    # Additionl options are added to each celeryd',
-    # but you can also modify the options for ranges of or single workers
+    # Advanced example starting 10 workers in the background:
+    #   * Three of the workers processes the images and video queue
+    #   * Two of the workers processes the data queue with loglevel DEBUG
+    #   * the rest processes the default' queue.
+    $ celeryd-multi start 10 -l INFO -Q:1-3 images,video -Q:4,5:data
+        -Q default -L:4,5 DEBUG
+
+    # You can show the commands necessary to start the workers with
+    # the "show" command:
+    $ celeryd-multi show 10 -l INFO -Q:1-3 images,video -Q:4,5:data
+        -Q default -L:4,5 DEBUG
+
+    # Additional options are added to each celeryd',
+    # but you can also modify the options for ranges of, or specific workers
 
     # 3 workers: Two with 3 processes, and one with 10 processes.
     $ celeryd-multi start 3 -c 3 -c:1 10
@@ -78,7 +97,7 @@ from time import sleep
 
 from celery import __version__
 from celery.utils import term
-from celery.utils.compat import defaultdict
+from celery.utils.compat import any, defaultdict
 
 SIGNAMES = set(sig for sig in dir(signal)
                         if sig.startswith("SIG") and "_" not in sig)
@@ -164,7 +183,6 @@ class MultiTool(object):
         wanted = argv[0]
         p = NamespacedOptionParser(argv[1:])
         for name, worker, _ in multi_args(p, cmd):
-            print("NAME: %s WANTED: %s" % (name, wanted))
             if name == wanted:
                 print(" ".join(worker))
                 return

+ 77 - 49
celery/bin/celeryev.py

@@ -1,66 +1,94 @@
-import logging
 import sys
 
-from optparse import OptionParser, make_option as Option
-
 from celery import platforms
-from celery.events.cursesmon import evtop
-from celery.events.dumper import evdump
-from celery.events.snapshot import evcam
+from celery.bin.base import Command, Option, daemon_options
+from celery.platforms import create_daemon_context
 
 
-OPTION_LIST = (
-    Option('-d', '--dump',
-        action="store_true", dest="dump",
-        help="Dump events to stdout."),
-    Option('-c', '--camera',
-        action="store", dest="camera",
-        help="Camera class to take event snapshots with."),
-    Option('-F', '--frequency', '--freq',
-        action="store", dest="frequency", type="float", default=1.0,
-        help="Recording: Snapshot frequency."),
-    Option('-r', '--maxrate',
-        action="store", dest="maxrate", default=None,
-        help="Recording: Shutter rate limit (e.g. 10/m)"),
-    Option('-l', '--loglevel',
-        action="store", dest="loglevel", default="INFO",
-        help="Loglevel. Default is WARNING."),
-    Option('-f', '--logfile',
-        action="store", dest="logfile", default=None,
-        help="Log file. Default is <stderr>"),
-)
+class EvCommand(Command):
+    supports_args = False
 
+    def run(self, dump=False, camera=None, frequency=1.0, maxrate=None,
+            loglevel="INFO", logfile=None, prog_name="celeryev",
+            pidfile=None, uid=None, gid=None, umask=None,
+            working_directory=None, detach=False, **kwargs):
+        self.prog_name = prog_name
 
-def set_process_status(prog, info=""):
-        info = "%s %s" % (info, platforms.strargv(sys.argv))
-        return platforms.set_process_title(prog,
-                                           info=info)
+        if dump:
+            return self.run_evdump()
+        if camera:
+            return self.run_evcam(camera, freq=frequency, maxrate=maxrate,
+                                  loglevel=loglevel, logfile=logfile,
+                                  pidfile=pidfile, uid=uid, gid=gid,
+                                  umask=umask,
+                                  working_directory=working_directory,
+                                  detach=detach)
+        return self.run_evtop()
 
+    def run_evdump(self):
+        from celery.events.dumper import evdump
+        self.set_process_status("dump")
+        return evdump(app=self.app)
 
-def run_celeryev(dump=False, camera=None, frequency=1.0, maxrate=None,
-        loglevel=logging.WARNING, logfile=None, prog_name="celeryev",
-        **kwargs):
-    if dump:
-        set_process_status("%s:dump" % prog_name)
-        return evdump()
-    if camera:
-        set_process_status("%s:cam" % prog_name)
-        return evcam(camera, frequency, maxrate,
-                     loglevel=loglevel, logfile=logfile)
-    set_process_status("%s:top" % prog_name)
-    return evtop()
+    def run_evtop(self):
+        from celery.events.cursesmon import evtop
+        self.set_process_status("top")
+        return evtop(app=self.app)
 
+    def run_evcam(self, camera, logfile=None, pidfile=None, uid=None,
+            gid=None, umask=None, working_directory=None,
+            detach=False, **kwargs):
+        from celery.events.snapshot import evcam
+        self.set_process_status("cam")
+        kwargs["app"] = self.app
+        if not detach:
+            return evcam(camera, logfile=logfile, pidfile=pidfile, **kwargs)
+        context, on_stop = create_daemon_context(
+                                logfile=logfile,
+                                pidfile=pidfile,
+                                uid=uid,
+                                gid=gid,
+                                umask=umask,
+                                working_directory=working_directory)
+        context.open()
+        try:
+            return evcam(camera, logfile=logfile, pidfile=pidfile, **kwargs)
+        finally:
+            on_stop()
+
+    def set_process_status(self, prog, info=""):
+        prog = "%s:%s" % (self.prog_name, prog)
+        info = "%s %s" % (info, platforms.strargv(sys.argv))
+        return platforms.set_process_title(prog, info=info)
 
-def parse_options(arguments):
-    """Parse the available options to ``celeryev``."""
-    parser = OptionParser(option_list=OPTION_LIST)
-    options, values = parser.parse_args(arguments)
-    return options
+    def get_options(self):
+        return (
+            Option('-d', '--dump',
+                   action="store_true", dest="dump",
+                   help="Dump events to stdout."),
+            Option('-c', '--camera',
+                   action="store", dest="camera",
+                   help="Camera class to take event snapshots with."),
+            Option('--detach',
+                default=False, action="store_true", dest="detach",
+                help="Recording: Detach and run in the background."),
+            Option('-F', '--frequency', '--freq',
+                   action="store", dest="frequency",
+                   type="float", default=1.0,
+                   help="Recording: Snapshot frequency."),
+            Option('-r', '--maxrate',
+                   action="store", dest="maxrate", default=None,
+                   help="Recording: Shutter rate limit (e.g. 10/m)"),
+            Option('-l', '--loglevel',
+                   action="store", dest="loglevel", default="INFO",
+                   help="Loglevel. Default is WARNING."),
+        ) + daemon_options(default_pidfile="celeryev.pid",
+                           default_logfile=None)
 
 
 def main():
-    options = parse_options(sys.argv[1:])
-    return run_celeryev(**vars(options))
+    ev = EvCommand()
+    ev.execute_from_commandline()
 
 if __name__ == "__main__":              # pragma: no cover
     main()

+ 13 - 0
celery/concurrency/__init__.py

@@ -0,0 +1,13 @@
+from celery.utils import get_cls_by_name
+
+ALIASES = {
+    "processes": "celery.concurrency.processes.TaskPool",
+    "eventlet": "celery.concurrency.evlet.TaskPool",
+    "gevent": "celery.concurrency.evg.TaskPool",
+    "threads": "celery.concurrency.threads.TaskPool",
+    "solo": "celery.concurrency.solo.TaskPool",
+}
+
+
+def get_implementation(cls):
+    return get_cls_by_name(cls, ALIASES)

+ 129 - 0
celery/concurrency/base.py

@@ -0,0 +1,129 @@
+import os
+import sys
+import time
+import traceback
+
+from celery import log
+from celery.datastructures import ExceptionInfo
+from celery.utils.functional import partial
+from celery.utils import timer2
+
+
+def apply_target(target, args=(), kwargs={}, callback=None,
+        accept_callback=None, pid=None):
+    if accept_callback:
+        accept_callback(pid or os.getpid(), time.time())
+    callback(target(*args, **kwargs))
+
+
+class BasePool(object):
+    RUN = 0x1
+    CLOSE = 0x2
+    TERMINATE = 0x3
+
+    Timer = timer2.Timer
+
+    signal_safe = True
+    is_green = False
+
+    _state = None
+    _pool = None
+
+    def __init__(self, limit=None, putlocks=True, logger=None, **options):
+        self.limit = limit
+        self.putlocks = putlocks
+        self.logger = logger or log.get_default_logger()
+        self.options = options
+
+    def on_start(self):
+        pass
+
+    def on_stop(self):
+        pass
+
+    def on_apply(self, *args, **kwargs):
+        pass
+
+    def on_terminate(self):
+        pass
+
+    def terminate_job(self, pid):
+        raise NotImplementedError(
+                "%s does not implement kill_job" % (self.__class__, ))
+
+    def stop(self):
+        self._state = self.CLOSE
+        self.on_stop()
+        self._state = self.TERMINATE
+
+    def terminate(self):
+        self._state = self.TERMINATE
+        self.on_terminate()
+
+    def start(self):
+        self.on_start()
+        self._state = self.RUN
+
+    def apply_async(self, target, args=None, kwargs=None, callbacks=None,
+            errbacks=None, accept_callback=None, timeout_callback=None,
+            **compat):
+        """Equivalent of the :func:`apply` built-in function.
+
+        All `callbacks` and `errbacks` should complete immediately since
+        otherwise the thread which handles the result will get blocked.
+
+        """
+        args = args or []
+        kwargs = kwargs or {}
+        callbacks = callbacks or []
+        errbacks = errbacks or []
+
+        on_ready = partial(self.on_ready, callbacks, errbacks)
+        on_worker_error = partial(self.on_worker_error, errbacks)
+
+        self.logger.debug("TaskPool: Apply %s (args:%s kwargs:%s)" % (
+            target, args, kwargs))
+
+        return self.on_apply(target, args, kwargs,
+                             callback=on_ready,
+                             accept_callback=accept_callback,
+                             timeout_callback=timeout_callback,
+                             error_callback=on_worker_error,
+                             waitforslot=self.putlocks)
+
+    def on_ready(self, callbacks, errbacks, ret_value):
+        """What to do when a worker task is ready and its return value has
+        been collected."""
+
+        if isinstance(ret_value, ExceptionInfo):
+            if isinstance(ret_value.exception, (
+                    SystemExit, KeyboardInterrupt)):
+                raise ret_value.exception
+            [self.safe_apply_callback(errback, ret_value)
+                    for errback in errbacks]
+        else:
+            [self.safe_apply_callback(callback, ret_value)
+                    for callback in callbacks]
+
+    def on_worker_error(self, errbacks, exc):
+        einfo = ExceptionInfo((exc.__class__, exc, None))
+        [errback(einfo) for errback in errbacks]
+
+    def safe_apply_callback(self, fun, *args):
+        try:
+            fun(*args)
+        except:
+            self.logger.error("Pool callback raised exception: %s" % (
+                traceback.format_exc(), ),
+                exc_info=sys.exc_info())
+
+    def _get_info(self):
+        return {}
+
+    @property
+    def info(self):
+        return self._get_info()
+
+    @property
+    def active(self):
+        return self._state == self.RUN

+ 106 - 0
celery/concurrency/evg.py

@@ -0,0 +1,106 @@
+import os
+import sys
+
+from time import time
+
+if not os.environ.get("GEVENT_NOPATCH"):
+    from gevent import monkey
+    monkey.patch_all()
+
+from celery.concurrency.base import apply_target, BasePool
+from celery.utils import timer2
+
+
+class Schedule(timer2.Schedule):
+
+    def __init__(self, *args, **kwargs):
+        from gevent.greenlet import Greenlet, GreenletExit
+
+        class _Greenlet(Greenlet):
+
+            def cancel(self):
+                self.kill()
+
+        self._Greenlet = _Greenlet
+        self._GreenletExit = GreenletExit
+        super(Schedule, self).__init__(*args, **kwargs)
+        self._queue = set()
+
+    def enter(self, entry, eta=None, priority=0):
+        try:
+            eta = timer2.to_timestamp(eta)
+        except OverflowError:
+            if not self.handle_error(sys.exc_info()):
+                raise
+
+        now = time()
+        if eta is None:
+            eta = now
+        secs = max(eta - now, 0)
+
+        g = self._Greenlet.spawn_later(secs, entry)
+        self._queue.add(g)
+        g.link(self._entry_exit)
+        g.entry = entry
+        g.eta = eta
+        g.priority = priority
+        g.cancelled = False
+
+        return g
+
+    def _entry_exit(self, g):
+        try:
+            g.kill()
+        finally:
+            self._queue.discard(g)
+
+    def clear(self):
+        queue = self._queue
+        while queue:
+            try:
+                queue.pop().kill()
+            except KeyError:
+                pass
+
+    @property
+    def queue(self):
+        return [(g.eta, g.priority, g.entry) for g in self._queue]
+
+
+class Timer(timer2.Timer):
+    Schedule = Schedule
+
+    def ensure_started(self):
+        pass
+
+    def stop(self):
+        self.schedule.clear()
+
+    def start(self):
+        pass
+
+
+class TaskPool(BasePool):
+    Timer = Timer
+
+    signal_safe = False
+    is_green = True
+
+    def __init__(self, *args, **kwargs):
+        from gevent import spawn_raw
+        from gevent.pool import Pool
+        self.Pool = Pool
+        self.spawn_n = spawn_raw
+        super(TaskPool, self).__init__(*args, **kwargs)
+
+    def on_start(self):
+        self._pool = self.Pool(self.limit)
+
+    def on_stop(self):
+        if self._pool is not None:
+            self._pool.join()
+
+    def on_apply(self, target, args=None, kwargs=None, callback=None,
+            accept_callback=None, **_):
+        return self._pool.spawn(apply_target, target, args, kwargs,
+                                callback, accept_callback)

+ 123 - 0
celery/concurrency/evlet.py

@@ -0,0 +1,123 @@
+import os
+import sys
+
+from time import time
+
+if not os.environ.get("EVENTLET_NOPATCH"):
+    import eventlet
+    import eventlet.debug
+    eventlet.monkey_patch()
+    eventlet.debug.hub_prevent_multiple_readers(False)
+
+from celery import signals
+from celery.concurrency import base
+from celery.utils import timer2
+
+
+def apply_target(target, args=(), kwargs={}, callback=None,
+                 accept_callback=None, getpid=None):
+    return base.apply_target(target, args, kwargs, callback, accept_callback,
+                             pid=getpid())
+
+
+class Schedule(timer2.Schedule):
+
+    def __init__(self, *args, **kwargs):
+        from eventlet.greenthread import spawn_after_local
+        from greenlet import GreenletExit
+        super(Schedule, self).__init__(*args, **kwargs)
+
+        self.GreenletExit = GreenletExit
+        self._spawn_after_local = spawn_after_local
+        self._queue = set()
+
+    def enter(self, entry, eta=None, priority=0):
+        try:
+            eta = timer2.to_timestamp(eta)
+        except OverflowError:
+            if not self.handle_error(sys.exc_info()):
+                raise
+
+        now = time()
+        if eta is None:
+            eta = now
+        secs = max(eta - now, 0)
+
+        g = self._spawn_after_local(secs, entry)
+        self._queue.add(g)
+        g.link(self._entry_exit, entry)
+        g.entry = entry
+        g.eta = eta
+        g.priority = priority
+        g.cancelled = False
+
+        return g
+
+    def _entry_exit(self, g, entry):
+        try:
+            try:
+                g.wait()
+            except self.GreenletExit:
+                entry.cancel()
+                g.cancelled = True
+        finally:
+            self._queue.discard(g)
+
+    def clear(self):
+        queue = self._queue
+        while queue:
+            try:
+                queue.pop().cancel()
+            except KeyError:
+                pass
+
+    @property
+    def queue(self):
+        return [(g.eta, g.priority, g.entry) for g in self._queue]
+
+
+class Timer(timer2.Timer):
+    Schedule = Schedule
+
+    def ensure_started(self):
+        pass
+
+    def stop(self):
+        self.schedule.clear()
+
+    def start(self):
+        pass
+
+
+class TaskPool(base.BasePool):
+    Timer = Timer
+
+    signal_safe = False
+    is_green = True
+
+    def __init__(self, *args, **kwargs):
+        from eventlet import greenthread
+        from eventlet.greenpool import GreenPool
+        self.Pool = GreenPool
+        self.getcurrent = greenthread.getcurrent
+        self.spawn_n = greenthread.spawn_n
+
+        super(TaskPool, self).__init__(*args, **kwargs)
+
+    def on_start(self):
+        self._pool = self.Pool(self.limit)
+        signals.eventlet_pool_started.send(sender=self)
+
+    def on_stop(self):
+        signals.eventlet_pool_preshutdown.send(sender=self)
+        if self._pool is not None:
+            self._pool.waitall()
+        signals.eventlet_pool_postshutdown.send(sender=self)
+
+    def on_apply(self, target, args=None, kwargs=None, callback=None,
+            accept_callback=None, **_):
+        signals.eventlet_pool_apply.send(sender=self,
+                target=target, args=args, kwargs=kwargs)
+        self._pool.spawn_n(apply_target, target, args, kwargs,
+                           callback, accept_callback,
+                           self.getcurrent)

+ 23 - 66
celery/concurrency/processes/__init__.py

@@ -3,25 +3,17 @@
 Process Pools.
 
 """
-import traceback
-
-from time import sleep, time
-
-from celery import log
-from celery.datastructures import ExceptionInfo
-from celery.utils.functional import curry
+import os
+import signal as _signal
 
+from celery.concurrency.base import BasePool
 from celery.concurrency.processes.pool import Pool, RUN
 
 
-def pingback(i):
-    return i
-
-
-class TaskPool(object):
+class TaskPool(BasePool):
     """Process Pool for processing tasks in parallel.
 
-    :param limit: see :attr:`limit`.
+    :param processes: see :attr:`processes`.
     :param logger: see :attr:`logger`.
 
 
@@ -36,71 +28,28 @@ class TaskPool(object):
     """
     Pool = Pool
 
-    def __init__(self, limit, logger=None, initializer=None,
-            maxtasksperchild=None, timeout=None, soft_timeout=None,
-            putlocks=True, initargs=()):
-        self.limit = limit
-        self.logger = logger or log.get_default_logger()
-        self.initializer = initializer
-        self.maxtasksperchild = maxtasksperchild
-        self.timeout = timeout
-        self.soft_timeout = soft_timeout
-        self.putlocks = putlocks
-        self.initargs = initargs
-        self._pool = None
-
-    def start(self):
+    def on_start(self):
         """Run the task pool.
 
         Will pre-fork all workers so they're ready to accept tasks.
 
         """
-        self._pool = self.Pool(processes=self.limit,
-                               initializer=self.initializer,
-                               initargs=self.initargs,
-                               timeout=self.timeout,
-                               soft_timeout=self.soft_timeout,
-                               maxtasksperchild=self.maxtasksperchild)
-
-    def stop(self):
+        self._pool = self.Pool(processes=self.limit, **self.options)
+        self.on_apply = self._pool.apply_async
+
+    def on_stop(self):
         """Gracefully stop the pool."""
         if self._pool is not None and self._pool._state == RUN:
             self._pool.close()
             self._pool.join()
             self._pool = None
 
-    def terminate(self):
+    def on_terminate(self):
         """Force terminate the pool."""
         if self._pool is not None:
             self._pool.terminate()
             self._pool = None
 
-    def diagnose(self, timeout=None):
-        pids = set(worker.pid for worker in self._pool._pool)
-        seen = set()
-        results = {}
-        time_start = time()
-
-        def callback(i):
-            for pid in results[i].worker_pids():
-                seen.add(pid)
-
-        i = 0
-        while pids ^ seen:
-            print("%r > %r" % (time() - time_start, timeout))
-            if timeout and time() - time_start > timeout:
-                print("TIMED OUT i==%r" % (i, ))
-                break
-            results[i] = self._pool.apply_async(pingback,
-                                                args=(i, ),
-                                                callback=callback)
-            sleep(0.1)
-            i += 1
-
-        return {"active": list(seen),
-                "waiting": list(pids ^ seen),
-                "iterations": i}
-
     def apply_async(self, target, args=None, kwargs=None, callbacks=None,
             errbacks=None, accept_callback=None, timeout_callback=None,
             soft_timeout=None, timeout=None, **compat):
@@ -155,10 +104,18 @@ class TaskPool(object):
             self.logger.error("Pool callback raised exception: %s" % (
                 traceback.format_exc(), ))
 
-    @property
-    def info(self):
+    def terminate_job(self, pid, signal=None):
+        os.kill(pid, signal or _signal.SIGTERM)
+
+    def grow(self, n=1):
+        return self._pool.grow(n)
+
+    def shrink(self, n=1):
+        return self._pool.shrink(n)
+
+    def _get_info(self):
         return {"max-concurrency": self.limit,
                 "processes": [p.pid for p in self._pool._pool],
-                "max-tasks-per-child": self.maxtasksperchild,
+                "max-tasks-per-child": self._pool._maxtasksperchild,
                 "put-guarded-by-semaphore": self.putlocks,
-                "timeouts": (self.soft_timeout, self.timeout)}
+                "timeouts": (self._pool.soft_timeout, self._pool.timeout)}

+ 184 - 65
celery/concurrency/processes/pool.py

@@ -13,6 +13,7 @@ __all__ = ['Pool']
 #
 
 import os
+import sys
 import errno
 import threading
 import Queue
@@ -22,11 +23,14 @@ import time
 import signal
 
 from multiprocessing import Process, cpu_count, TimeoutError
+from multiprocessing import util
 from multiprocessing.util import Finalize, debug
 
 from celery.exceptions import SoftTimeLimitExceeded, TimeLimitExceeded
 from celery.exceptions import WorkerLostError
 
+_Semaphore = threading._Semaphore
+
 #
 # Constants representing the state of a pool
 #
@@ -55,8 +59,33 @@ job_counter = itertools.count()
 def mapstar(args):
     return map(*args)
 
+
+def error(msg, *args, **kwargs):
+    if util._logger:
+        util._logger.error(msg, *args, **kwargs)
+
+
+class LaxBoundedSemaphore(threading._Semaphore):
+    """Semaphore that checks that # release is <= # acquires,
+    but ignores if # releases >= value."""
+
+    def __init__(self, value=1, verbose=None):
+        _Semaphore.__init__(self, value, verbose)
+        self._initial_value = value
+
+    def release(self):
+        if self._Semaphore__value < self._initial_value:
+            _Semaphore.release(self)
+        if __debug__:
+            self._note("%s.release: success, value=%s (unchanged)" % (
+                self, self._Semaphore__value))
+
+    def clear(self):
+        while self._Semaphore__value < self._initial_value:
+            _Semaphore.release(self)
+
 #
-# Code run by worker processes
+# Exceptions
 #
 
 
@@ -76,13 +105,21 @@ class MaybeEncodingError(Exception):
                     self.value, self.exc)
 
 
+class WorkersJoined(Exception):
+    """All workers have terminated."""
+
+
 def soft_timeout_sighandler(signum, frame):
     raise SoftTimeLimitExceeded()
 
+#
+# Code run by worker processes
+#
+
 
 def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
-    assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
     pid = os.getpid()
+    assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
     put = outqueue.put
     get = inqueue.get
 
@@ -137,7 +174,6 @@ def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
         completed += 1
     debug('worker exiting after %d tasks' % completed)
 
-
 #
 # Class representing a process pool
 #
@@ -150,6 +186,14 @@ class PoolThread(threading.Thread):
         self._state = RUN
         self.daemon = True
 
+    def run(self):
+        try:
+            return self.body()
+        except Exception, exc:
+            error("Thread %r crashed: %r" % (self.__class__.__name__, exc, ),
+                  exc_info=sys.exc_info())
+            os._exit(1)
+
     def terminate(self):
         self._state = TERMINATE
 
@@ -163,11 +207,11 @@ class Supervisor(PoolThread):
         self.pool = pool
         super(Supervisor, self).__init__()
 
-    def run(self):
+    def body(self):
         debug('worker handler starting')
         while self._state == RUN and self.pool._state == RUN:
             self.pool._maintain_pool()
-            time.sleep(0.1)
+            time.sleep(0.8)
         debug('worker handler exiting')
 
 
@@ -180,7 +224,7 @@ class TaskHandler(PoolThread):
         self.pool = pool
         super(TaskHandler, self).__init__()
 
-    def run(self):
+    def body(self):
         taskqueue = self.taskqueue
         outqueue = self.outqueue
         put = self.put
@@ -223,16 +267,18 @@ class TaskHandler(PoolThread):
 
 class TimeoutHandler(PoolThread):
 
-    def __init__(self, processes, cache, t_soft, t_hard):
+    def __init__(self, processes, cache, t_soft, t_hard, putlock):
         self.processes = processes
         self.cache = cache
         self.t_soft = t_soft
         self.t_hard = t_hard
+        self.putlock = putlock
         super(TimeoutHandler, self).__init__()
 
-    def run(self):
+    def body(self):
         processes = self.processes
         cache = self.cache
+        putlock = self.putlock
         t_hard, t_soft = self.t_hard, self.t_soft
         dirty = set()
 
@@ -269,18 +315,20 @@ class TimeoutHandler(PoolThread):
             dirty.add(i)
 
         def _on_hard_timeout(job, i, hard_timeout):
+            if job.ready():
+                return
             debug('hard time limit exceeded for %i', i)
-            # Remove from _pool
-            process, _index = _process_by_pid(job._worker_pid)
             # Remove from cache and set return value to an exception
             job._set(i, (False, TimeLimitExceeded(hard_timeout)))
+
+            # Remove from _pool
+            process, _index = _process_by_pid(job._worker_pid)
+
             # Run timeout callback
             if job._timeout_callback is not None:
                 job._timeout_callback(soft=False, timeout=hard_timeout)
-            if not process:
-                return
-            # Terminate the process
-            process.terminate()
+            if process:
+                process.terminate()
 
         # Inner-loop
         while self._state == RUN:
@@ -319,7 +367,7 @@ class ResultHandler(PoolThread):
         self.putlock = putlock
         super(ResultHandler, self).__init__()
 
-    def run(self):
+    def body(self):
         get = self.get
         outqueue = self.outqueue
         cache = self.cache
@@ -335,13 +383,15 @@ class ResultHandler(PoolThread):
                 pass
 
         def on_ready(job, i, obj):
-            if putlock is not None:
-                try:
+            try:
+                item = cache[job]
+            except KeyError:
+                return
+            if not item.ready():
+                if putlock is not None:
                     putlock.release()
-                except ValueError:
-                    pass
             try:
-                cache[job]._set(i, obj)
+                item._set(i, obj)
             except KeyError:
                 pass
 
@@ -374,13 +424,7 @@ class ResultHandler(PoolThread):
 
                 on_state_change(task)
 
-        # Notify waiting threads
-        if putlock is not None:
-            try:
-                putlock.release()
-            except ValueError:
-                pass
-
+        time_terminate = None
         while cache and self._state != TERMINATE:
             try:
                 ready, task = poll(0.2)
@@ -394,7 +438,19 @@ class ResultHandler(PoolThread):
                     continue
 
                 on_state_change(task)
-            join_exited_workers()
+            try:
+                join_exited_workers(shutdown=True)
+            except WorkersJoined:
+                now = time.time()
+                if not time_terminate:
+                    time_terminate = now
+                else:
+                    if now - time_terminate > 5.0:
+                        debug('result handler exiting: timed out')
+                        break
+                    debug('result handler: all workers terminated, '
+                          'timeout in %ss' % (
+                              abs(min(now - time_terminate - 5.0, 0))))
 
         if hasattr(outqueue, '_reader'):
             debug('ensuring that outqueue is not full')
@@ -457,8 +513,7 @@ class Pool(object):
         self._worker_handler = self.Supervisor(self)
         self._worker_handler.start()
 
-        self._putlock = threading.BoundedSemaphore(self._processes)
-
+        self._putlock = LaxBoundedSemaphore(self._processes)
         self._task_handler = self.TaskHandler(self._taskqueue,
                                               self._quick_put,
                                               self._outqueue,
@@ -469,7 +524,7 @@ class Pool(object):
         if self.timeout is not None or self.soft_timeout is not None:
             self._timeout_handler = self.TimeoutHandler(
                     self._pool, self._cache,
-                    self.soft_timeout, self.timeout)
+                    self.soft_timeout, self.timeout, self._putlock)
             self._timeout_handler.start()
         else:
             self._timeout_handler = None
@@ -504,40 +559,87 @@ class Pool(object):
         w.start()
         return w
 
-    def _join_exited_workers(self):
+    def _join_exited_workers(self, shutdown=False, lost_worker_timeout=10.0):
         """Cleanup after any worker processes which have exited due to
         reaching their specified lifetime. Returns True if any workers were
         cleaned up.
         """
+        now = None
+        # The worker may have published a result before being terminated,
+        # but we have no way to accurately tell if it did.  So we wait for
+        # 10 seconds before we mark the job with WorkerLostError.
+        for job in [job for job in self._cache.values()
+                if not job.ready() and job._worker_lost]:
+            now = now or time.time()
+            if now - job._worker_lost > lost_worker_timeout:
+                err = WorkerLostError("Worker exited prematurely.")
+                job._set(None, (False, err))
+
+        if shutdown and not len(self._pool):
+            raise WorkersJoined()
+
         cleaned = []
         for i in reversed(range(len(self._pool))):
             worker = self._pool[i]
             if worker.exitcode is not None:
                 # worker exited
-                debug('cleaning up worker %d' % i)
-                if self._putlock is not None:
-                    try:
-                        self._putlock.release()
-                    except ValueError:
-                        pass
+                debug('Supervisor: cleaning up worker %d' % i)
                 worker.join()
+                debug('Supervisor: worked %d joined' % i)
                 cleaned.append(worker.pid)
                 del self._pool[i]
         if cleaned:
             for job in self._cache.values():
                 for worker_pid in job.worker_pids():
-                    if worker_pid in cleaned:
-                        err = WorkerLostError("Worker exited prematurely.")
-                        job._set(None, (False, err))
+                    if worker_pid in cleaned and not job.ready():
+                        if self._putlock is not None:
+                            self._putlock.release()
+                        job._worker_lost = time.time()
                         continue
             return True
         return False
 
+    def shrink(self, n=1):
+        for i, worker in enumerate(self._iterinactive()):
+            self._processes -= 1
+            if self._putlock:
+                self._putlock._initial_value -= 1
+                self._putlock.acquire()
+            worker.terminate()
+            if i == n - 1:
+                return
+        raise ValueError("Can't shrink pool. All processes busy!")
+
+    def grow(self, n=1):
+        for i in xrange(n):
+            #assert len(self._pool) == self._processes
+            self._processes += 1
+            if self._putlock:
+                cond = self._putlock._Semaphore__cond
+                cond.acquire()
+                try:
+                    self._putlock._initial_value += 1
+                    self._putlock._Semaphore__value += 1
+                    cond.notify()
+                finally:
+                    cond.release()
+
+    def _iterinactive(self):
+        for worker in self._pool:
+            if not self._worker_active(worker):
+                yield worker
+        raise StopIteration()
+
+    def _worker_active(self, worker):
+        for job in self._cache.values():
+            if worker.pid in job.worker_pids():
+                return True
+        return False
+
     def _repopulate_pool(self):
         """Bring the number of pool processes up to the specified number,
         for use after reaping workers which have exited.
         """
-        debug('repopulating pool')
         for i in range(self._processes - len(self._pool)):
             if self._state != RUN:
                 return
@@ -547,8 +649,8 @@ class Pool(object):
     def _maintain_pool(self):
         """"Clean up any exited workers and start replacements for them.
         """
-        if self._join_exited_workers():
-            self._repopulate_pool()
+        self._join_exited_workers()
+        self._repopulate_pool()
 
     def _setup_queues(self):
         from multiprocessing.queues import SimpleQueue
@@ -637,8 +739,11 @@ class Pool(object):
         result = ApplyResult(self._cache, callback,
                              accept_callback, timeout_callback,
                              error_callback, soft_timeout, timeout)
-        if waitforslot:
+
+        if waitforslot and self._putlock is not None:
             self._putlock.acquire()
+            if self._state != RUN:
+                return
         self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
         return result
 
@@ -684,6 +789,8 @@ class Pool(object):
             self._worker_handler.close()
             self._worker_handler.join()
             self._taskqueue.put(None)
+            if self._putlock:
+                self._putlock.clear()
 
     def terminate(self):
         debug('terminating pool')
@@ -699,6 +806,7 @@ class Pool(object):
         self._task_handler.join()
         debug('joining result handler')
         self._result_handler.join()
+        debug('result handler joined')
         for i, p in enumerate(self._pool):
             debug('joining worker %s/%s (%r)' % (i, len(self._pool), p, ))
             p.join()
@@ -728,8 +836,6 @@ class Pool(object):
         debug('helping task handler/workers to finish')
         cls._help_stuff_finish(inqueue, task_handler, len(pool))
 
-        assert result_handler.is_alive() or len(cache) == 0
-
         result_handler.terminate()
         outqueue.put(None)                  # sentinel
 
@@ -760,6 +866,7 @@ class Pool(object):
                     # worker has not yet exited
                     debug('cleaning up worker %d' % p.pid)
                     p.join()
+            debug('pool workers joined')
 DynamicPool = Pool
 
 #
@@ -768,10 +875,12 @@ DynamicPool = Pool
 
 
 class ApplyResult(object):
+    _worker_lost = None
 
     def __init__(self, cache, callback, accept_callback=None,
             timeout_callback=None, error_callback=None, soft_timeout=None,
             timeout=None):
+        self._mutex = threading.Lock()
         self._cond = threading.Condition(threading.Lock())
         self._job = job_counter.next()
         self._cache = cache
@@ -819,28 +928,38 @@ class ApplyResult(object):
             raise self._value
 
     def _set(self, i, obj):
-        self._success, self._value = obj
-        if self._callback and self._success:
-            self._callback(self._value)
-        if self._errback and not self._success:
-            self._errback(self._value)
-        self._cond.acquire()
+        self._mutex.acquire()
         try:
-            self._ready = True
-            self._cond.notify()
+            self._success, self._value = obj
+            self._cond.acquire()
+            try:
+                self._ready = True
+                self._cond.notify()
+            finally:
+                self._cond.release()
+            if self._accepted:
+                self._cache.pop(self._job, None)
+
+            # apply callbacks last
+            if self._callback and self._success:
+                self._callback(self._value)
+            if self._errback and not self._success:
+                self._errback(self._value)
         finally:
-            self._cond.release()
-        if self._accepted:
-            self._cache.pop(self._job, None)
+            self._mutex.release()
 
     def _ack(self, i, time_accepted, pid):
-        self._accepted = True
-        self._time_accepted = time_accepted
-        self._worker_pid = pid
-        if self._accept_callback:
-            self._accept_callback()
-        if self._ready:
-            self._cache.pop(self._job, None)
+        self._mutex.acquire()
+        try:
+            self._accepted = True
+            self._time_accepted = time_accepted
+            self._worker_pid = pid
+            if self._ready:
+                self._cache.pop(self._job, None)
+            if self._accept_callback:
+                self._accept_callback(pid, time_accepted)
+        finally:
+            self._mutex.release()
 
 #
 # Class whose instances are returned by `Pool.map_async()`

+ 22 - 0
celery/concurrency/solo.py

@@ -0,0 +1,22 @@
+import os
+
+from celery.concurrency.base import BasePool, apply_target
+
+
+class TaskPool(BasePool):
+    """Solo task pool (blocking, inline)."""
+
+    def on_start(self):
+        self.pid = os.getpid()
+
+    def on_apply(self, target, args, kwargs, callback=None,
+            accept_callback=None, **_):
+        return apply_target(target, args, kwargs,
+                            callback, accept_callback, self.pid)
+
+    def _get_info(self):
+        return {"max-concurrency": 1,
+                "processes": [self.pid],
+                "max-tasks-per-child": None,
+                "put-guarded-by-semaphore": True,
+                "timeouts": ()}

+ 18 - 54
celery/concurrency/threads.py

@@ -1,52 +1,28 @@
+from celery.concurrency.base import apply_target, BasePool
 
-import threading
-from threadpool import ThreadPool, WorkRequest
 
-from celery import log
-from celery.utils.functional import curry
-from celery.datastructures import ExceptionInfo
+class TaskPool(BasePool):
 
+    def __init__(self, *args, **kwargs):
+        try:
+            import threadpool
+        except ImportError:
+            raise ImportError(
+                    "The threaded pool requires the threadpool module.")
+        self.WorkRequest = threadpool.WorkRequest
+        self.ThreadPool = threadpool.ThreadPool
+        super(TaskPool, self).__init__(*args, **kwargs)
 
-accept_lock = threading.Lock()
+    def on_start(self):
+        self._pool = self.ThreadPool(self.limit)
 
-
-def do_work(target, args=(), kwargs={}, callback=None,
-        accept_callback=None):
-    accept_lock.acquire()
-    try:
-        accept_callback()
-    finally:
-        accept_lock.release()
-    callback(target(*args, **kwargs))
-
-
-class TaskPool(object):
-
-    def __init__(self, limit, logger=None, **kwargs):
-        self.limit = limit
-        self.logger = logger or log.get_default_logger()
-        self._pool = None
-
-    def start(self):
-        self._pool = ThreadPool(self.limit)
-
-    def stop(self):
+    def on_stop(self):
         self._pool.dismissWorkers(self.limit, do_join=True)
 
-    def apply_async(self, target, args=None, kwargs=None, callbacks=None,
-            errbacks=None, accept_callback=None, **compat):
-        args = args or []
-        kwargs = kwargs or {}
-        callbacks = callbacks or []
-        errbacks = errbacks or []
-
-        on_ready = curry(self.on_ready, callbacks, errbacks)
-
-        self.logger.debug("ThreadPool: Apply %s (args:%s kwargs:%s)" % (
-            target, args, kwargs))
-
-        req = WorkRequest(do_work, (target, args, kwargs, on_ready,
-                                    accept_callback))
+    def on_apply(self, target, args=None, kwargs=None, callback=None,
+            accept_callback=None, **_):
+        req = self.WorkRequest(apply_target, (target, args, kwargs, callback,
+                                              accept_callback))
         self._pool.putRequest(req)
         # threadpool also has callback support,
         # but for some reason the callback is not triggered
@@ -54,15 +30,3 @@ class TaskPool(object):
         # Clear the results (if any), so it doesn't grow too large.
         self._pool._results_queue.queue.clear()
         return req
-
-    def on_ready(self, callbacks, errbacks, ret_value):
-        """What to do when a worker task is ready and its return value has
-        been collected."""
-
-        if isinstance(ret_value, ExceptionInfo):
-            if isinstance(ret_value.exception, (
-                    SystemExit, KeyboardInterrupt)):        # pragma: no cover
-                raise ret_value.exception
-            [errback(ret_value) for errback in errbacks]
-        else:
-            [callback(ret_value) for callback in callbacks]

+ 93 - 289
celery/conf.py

@@ -1,289 +1,93 @@
-import sys
-import warnings
-import platform as _platform
-
-from datetime import timedelta
-
-from celery import routes
-from celery.loaders import load_settings
-from celery.utils import LOG_LEVELS
-
-SYSTEM = _platform.system()
-IS_WINDOWS = SYSTEM == "Windows"
-
-DEFAULT_PROCESS_LOG_FMT = """
-    [%(asctime)s: %(levelname)s/%(processName)s] %(message)s
-""".strip()
-DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s'
-DEFAULT_TASK_LOG_FMT = " ".join("""
-    [%(asctime)s: %(levelname)s/%(processName)s]
-    [%(task_name)s(%(task_id)s)] %(message)s
-""".strip().split())
-
-settings = load_settings()
-
-_DEFAULTS = {
-    "BROKER_CONNECTION_TIMEOUT": 4,
-    "BROKER_CONNECTION_RETRY": True,
-    "BROKER_CONNECTION_MAX_RETRIES": 100,
-    "BROKER_HOST": "localhost",
-    "BROKER_PORT": None,
-    "BROKER_USER": "guest",
-    "BROKER_PASSWORD": "guest",
-    "BROKER_VHOST": "/",
-    "CELERY_REDIRECT_STDOUTS": True,
-    "CELERY_REDIRECT_STDOUTS_LEVEL": "WARNING",
-    "CELERY_RESULT_BACKEND": "database",
-    "CELERY_ALWAYS_EAGER": False,
-    "CELERY_EAGER_PROPAGATES_EXCEPTIONS": False,
-    "CELERY_TASK_RESULT_EXPIRES": timedelta(days=1),
-    "CELERY_AMQP_TASK_RESULT_EXPIRES": None,
-    "CELERY_SEND_EVENTS": False,
-    "CELERY_IGNORE_RESULT": False,
-    "CELERY_STORE_ERRORS_EVEN_IF_IGNORED": False,
-    "CELERY_TASK_SERIALIZER": "pickle",
-    "CELERY_DISABLE_RATE_LIMITS": False,
-    "CELERYD_TASK_TIME_LIMIT": None,
-    "CELERYD_TASK_SOFT_TIME_LIMIT": None,
-    "CELERYD_MAX_TASKS_PER_CHILD": None,
-    "CELERY_ROUTES": None,
-    "CELERY_CREATE_MISSING_QUEUES": True,
-    "CELERY_DEFAULT_ROUTING_KEY": "celery",
-    "CELERY_DEFAULT_QUEUE": "celery",
-    "CELERY_DEFAULT_EXCHANGE": "celery",
-    "CELERY_DEFAULT_EXCHANGE_TYPE": "direct",
-    "CELERY_DEFAULT_DELIVERY_MODE": 2,              # persistent
-    "CELERY_ACKS_LATE": False,
-    "CELERYD_POOL_PUTLOCKS": True,
-    "CELERYD_POOL": "celery.concurrency.processes.TaskPool",
-    "CELERYD_MEDIATOR": "celery.worker.controllers.Mediator",
-    "CELERYD_ETA_SCHEDULER": "celery.utils.timer2.Timer",
-    "CELERYD_LISTENER": "celery.worker.listener.CarrotListener",
-    "CELERYD_CONCURRENCY": 0,                       # defaults to cpu count
-    "CELERYD_PREFETCH_MULTIPLIER": 4,
-    "CELERYD_LOG_FORMAT": DEFAULT_PROCESS_LOG_FMT,
-    "CELERYD_TASK_LOG_FORMAT": DEFAULT_TASK_LOG_FMT,
-    "CELERYD_LOG_COLOR": False,
-    "CELERYD_LOG_LEVEL": "WARN",
-    "CELERYD_LOG_FILE": None,                       # stderr
-    "CELERYBEAT_SCHEDULER": "celery.beat.PersistentScheduler",
-    "CELERYBEAT_SCHEDULE": {},
-    "CELERYD_STATE_DB": None,
-    "CELERYD_ETA_SCHEDULER_PRECISION": 1,
-    "CELERYBEAT_SCHEDULE_FILENAME": "celerybeat-schedule",
-    "CELERYBEAT_MAX_LOOP_INTERVAL": 5 * 60,         # five minutes.
-    "CELERYBEAT_LOG_LEVEL": "INFO",
-    "CELERYBEAT_LOG_FILE": None,                    # stderr
-    "CELERYMON_LOG_LEVEL": "INFO",
-    "CELERYMON_LOG_FILE": None,                     # stderr
-    "CELERYMON_LOG_FORMAT": DEFAULT_LOG_FMT,
-    "CELERY_BROADCAST_QUEUE": "celeryctl",
-    "CELERY_BROADCAST_EXCHANGE": "celeryctl",
-    "CELERY_BROADCAST_EXCHANGE_TYPE": "fanout",
-    "CELERY_EVENT_QUEUE": "celeryevent",
-    "CELERY_EVENT_EXCHANGE": "celeryevent",
-    "CELERY_EVENT_EXCHANGE_TYPE": "direct",
-    "CELERY_EVENT_ROUTING_KEY": "celeryevent",
-    "CELERY_EVENT_SERIALIZER": "json",
-    "CELERY_RESULT_EXCHANGE": "celeryresults",
-    "CELERY_RESULT_EXCHANGE_TYPE": "direct",
-    "CELERY_RESULT_SERIALIZER": "pickle",
-    "CELERY_RESULT_PERSISTENT": False,
-    "CELERY_MAX_CACHED_RESULTS": 5000,
-    "CELERY_TRACK_STARTED": False,
-
-    # Default e-mail settings.
-    "SERVER_EMAIL": "celery@localhost",
-    "EMAIL_HOST": "localhost",
-    "EMAIL_PORT": 25,
-    "ADMINS": (),
-}
-
-
-def isatty(fh):
-    # Fixes bug with mod_wsgi:
-    #   mod_wsgi.Log object has no attribute isatty.
-    return getattr(fh, "isatty", None) and fh.isatty()
-
-
-_DEPRECATION_FMT = """
-%s is deprecated in favor of %s and is scheduled for removal in celery v1.4.
-""".strip()
-
-
-def prepare(m, source=settings, defaults=_DEFAULTS):
-
-    def _get(name, default=None, compat=None):
-        compat = compat or []
-        if default is None:
-            default = defaults.get(name)
-        compat = [name] + compat
-        for i, alias in enumerate(compat):
-            try:
-                value = getattr(source, alias)
-                i > 0 and warnings.warn(
-                        DeprecationWarning(_DEPRECATION_FMT % (alias, name)))
-                return value
-            except AttributeError:
-                pass
-        return default
-
-    # <--- Task                                    <-   --   --- - ----- -- #
-    m.ALWAYS_EAGER = _get("CELERY_ALWAYS_EAGER")
-    m.EAGER_PROPAGATES_EXCEPTIONS = _get("CELERY_EAGER_PROPAGATES_EXCEPTIONS")
-    m.RESULT_BACKEND = _get("CELERY_RESULT_BACKEND", compat=["CELERY_BACKEND"])
-    m.CELERY_BACKEND = m.RESULT_BACKEND              # FIXME Remove in 1.4
-    m.CACHE_BACKEND = _get("CELERY_CACHE_BACKEND") or _get("CACHE_BACKEND")
-    m.CACHE_BACKEND_OPTIONS = _get("CELERY_CACHE_BACKEND_OPTIONS") or {}
-    m.TASK_SERIALIZER = _get("CELERY_TASK_SERIALIZER")
-    m.TASK_RESULT_EXPIRES = _get("CELERY_TASK_RESULT_EXPIRES")
-    m.AMQP_TASK_RESULT_EXPIRES = _get("CELERY_AMQP_TASK_RESULT_EXPIRES")
-    m.IGNORE_RESULT = _get("CELERY_IGNORE_RESULT")
-    m.TRACK_STARTED = _get("CELERY_TRACK_STARTED")
-    m.ACKS_LATE = _get("CELERY_ACKS_LATE")
-    m.REDIRECT_STDOUTS = _get("CELERY_REDIRECT_STDOUTS")
-    m.REDIRECT_STDOUTS_LEVEL = _get("CELERY_REDIRECT_STDOUTS_LEVEL")
-
-    # Make sure TASK_RESULT_EXPIRES is a timedelta.
-    if isinstance(m.TASK_RESULT_EXPIRES, int):
-        m.TASK_RESULT_EXPIRES = timedelta(seconds=m.TASK_RESULT_EXPIRES)
-
-    # <--- SQLAlchemy                              <-   --   --- - ----- -- #
-    m.RESULT_DBURI = _get("CELERY_RESULT_DBURI")
-    m.RESULT_ENGINE_OPTIONS = _get("CELERY_RESULT_ENGINE_OPTIONS")
-
-    # <--- Client                                  <-   --   --- - ----- -- #
-
-    m.MAX_CACHED_RESULTS = _get("CELERY_MAX_CACHED_RESULTS")
-
-    # <--- Worker                                  <-   --   --- - ----- -- #
-
-    m.SEND_EVENTS = _get("CELERY_SEND_EVENTS")
-    m.DEFAULT_RATE_LIMIT = _get("CELERY_DEFAULT_RATE_LIMIT")
-    m.DISABLE_RATE_LIMITS = _get("CELERY_DISABLE_RATE_LIMITS")
-    m.CELERYD_TASK_TIME_LIMIT = _get("CELERYD_TASK_TIME_LIMIT")
-    m.CELERYD_TASK_SOFT_TIME_LIMIT = _get("CELERYD_TASK_SOFT_TIME_LIMIT")
-    m.CELERYD_MAX_TASKS_PER_CHILD = _get("CELERYD_MAX_TASKS_PER_CHILD")
-    m.STORE_ERRORS_EVEN_IF_IGNORED = \
-            _get("CELERY_STORE_ERRORS_EVEN_IF_IGNORED")
-    m.CELERY_SEND_TASK_ERROR_EMAILS = _get("CELERY_SEND_TASK_ERROR_EMAILS",
-                                           False,
-                                    compat=["SEND_CELERY_TASK_ERROR_EMAILS"])
-    m.CELERY_TASK_ERROR_WHITELIST = _get("CELERY_TASK_ERROR_WHITELIST")
-    m.CELERYD_LOG_FORMAT = _get("CELERYD_LOG_FORMAT",
-                          compat=["CELERYD_DAEMON_LOG_FORMAT"])
-    m.CELERYD_TASK_LOG_FORMAT = _get("CELERYD_TASK_LOG_FORMAT")
-    m.CELERYD_LOG_FILE = _get("CELERYD_LOG_FILE")
-    m.CELERYD_LOG_COLOR = _get("CELERYD_LOG_COLOR",
-                       m.CELERYD_LOG_FILE is None and isatty(sys.stderr))
-    if IS_WINDOWS:          # windows console does not support ANSI colors.
-        m.CELERYD_LOG_COLOR = False
-
-    m.CELERYD_LOG_LEVEL = _get("CELERYD_LOG_LEVEL",
-                            compat=["CELERYD_DAEMON_LOG_LEVEL"])
-    if not isinstance(m.CELERYD_LOG_LEVEL, int):
-        m.CELERYD_LOG_LEVEL = LOG_LEVELS[m.CELERYD_LOG_LEVEL.upper()]
-    m.CELERYD_STATE_DB = _get("CELERYD_STATE_DB")
-    m.CELERYD_CONCURRENCY = _get("CELERYD_CONCURRENCY")
-    m.CELERYD_PREFETCH_MULTIPLIER = _get("CELERYD_PREFETCH_MULTIPLIER")
-    m.CELERYD_POOL_PUTLOCKS = _get("CELERYD_POOL_PUTLOCKS")
-
-    m.CELERYD_POOL = _get("CELERYD_POOL")
-    m.CELERYD_LISTENER = _get("CELERYD_LISTENER")
-    m.CELERYD_MEDIATOR = _get("CELERYD_MEDIATOR")
-    m.CELERYD_ETA_SCHEDULER = _get("CELERYD_ETA_SCHEDULER")
-    m.CELERYD_ETA_SCHEDULER_PRECISION = _get("CELERYD_ETA_SCHEDULER_PRECISION")
-
-    # :--- Email settings                           <-   --   --- - ----- -- #
-    m.ADMINS = _get("ADMINS")
-    m.SERVER_EMAIL = _get("SERVER_EMAIL")
-    m.EMAIL_HOST = _get("EMAIL_HOST")
-    m.EMAIL_HOST_USER = _get("EMAIL_HOST_USER")
-    m.EMAIL_HOST_PASSWORD = _get("EMAIL_HOST_PASSWORD")
-    m.EMAIL_PORT = _get("EMAIL_PORT")
-
-    # :--- Broker connections                       <-   --   --- - ----- -- #
-    m.BROKER_HOST = _get("BROKER_HOST")
-    m.BROKER_PORT = _get("BROKER_PORT")
-    m.BROKER_USER = _get("BROKER_USER")
-    m.BROKER_PASSWORD = _get("BROKER_PASSWORD")
-    m.BROKER_VHOST = _get("BROKER_VHOST")
-    m.BROKER_USE_SSL = _get("BROKER_USE_SSL")
-    m.BROKER_INSIST = _get("BROKER_INSIST")
-    m.BROKER_CONNECTION_TIMEOUT = _get("BROKER_CONNECTION_TIMEOUT",
-                                compat=["CELERY_BROKER_CONNECTION_TIMEOUT"])
-    m.BROKER_CONNECTION_RETRY = _get("BROKER_CONNECTION_RETRY",
-                                compat=["CELERY_BROKER_CONNECTION_RETRY"])
-    m.BROKER_CONNECTION_MAX_RETRIES = _get("BROKER_CONNECTION_MAX_RETRIES",
-                            compat=["CELERY_BROKER_CONNECTION_MAX_RETRIES"])
-    m.BROKER_BACKEND = _get("BROKER_TRANSPORT") or \
-                            _get("BROKER_BACKEND") or \
-                                _get("CARROT_BACKEND")
-
-    # <--- Message routing                         <-   --   --- - ----- -- #
-    m.DEFAULT_QUEUE = _get("CELERY_DEFAULT_QUEUE")
-    m.DEFAULT_ROUTING_KEY = _get("CELERY_DEFAULT_ROUTING_KEY")
-    m.DEFAULT_EXCHANGE = _get("CELERY_DEFAULT_EXCHANGE")
-    m.DEFAULT_EXCHANGE_TYPE = _get("CELERY_DEFAULT_EXCHANGE_TYPE")
-    m.DEFAULT_DELIVERY_MODE = _get("CELERY_DEFAULT_DELIVERY_MODE")
-    m.QUEUES = _get("CELERY_QUEUES") or {
-                    m.DEFAULT_QUEUE: {
-                        "exchange": m.DEFAULT_EXCHANGE,
-                        "exchange_type": m.DEFAULT_EXCHANGE_TYPE,
-                        "binding_key": m.DEFAULT_ROUTING_KEY,
-                    },
-    }
-    m.CREATE_MISSING_QUEUES = _get("CELERY_CREATE_MISSING_QUEUES")
-    m.ROUTES = routes.prepare(_get("CELERY_ROUTES") or [])
-    # :--- Broadcast queue settings                 <-   --   --- - ----- -- #
-
-    m.BROADCAST_QUEUE = _get("CELERY_BROADCAST_QUEUE")
-    m.BROADCAST_EXCHANGE = _get("CELERY_BROADCAST_EXCHANGE")
-    m.BROADCAST_EXCHANGE_TYPE = _get("CELERY_BROADCAST_EXCHANGE_TYPE")
-
-    # :--- Event queue settings                     <-   --   --- - ----- -- #
-
-    m.EVENT_QUEUE = _get("CELERY_EVENT_QUEUE")
-    m.EVENT_EXCHANGE = _get("CELERY_EVENT_EXCHANGE")
-    m.EVENT_EXCHANGE_TYPE = _get("CELERY_EVENT_EXCHANGE_TYPE")
-    m.EVENT_ROUTING_KEY = _get("CELERY_EVENT_ROUTING_KEY")
-    m.EVENT_SERIALIZER = _get("CELERY_EVENT_SERIALIZER")
-
-    # :--- AMQP Backend settings                    <-   --   --- - ----- -- #
-
-    m.RESULT_EXCHANGE = _get("CELERY_RESULT_EXCHANGE")
-    m.RESULT_EXCHANGE_TYPE = _get("CELERY_RESULT_EXCHANGE_TYPE")
-    m.RESULT_SERIALIZER = _get("CELERY_RESULT_SERIALIZER")
-    m.RESULT_PERSISTENT = _get("CELERY_RESULT_PERSISTENT")
-
-    # :--- Celery Beat                              <-   --   --- - ----- -- #
-    m.CELERYBEAT_LOG_LEVEL = _get("CELERYBEAT_LOG_LEVEL")
-    m.CELERYBEAT_LOG_FILE = _get("CELERYBEAT_LOG_FILE")
-    m.CELERYBEAT_SCHEDULE = _get("CELERYBEAT_SCHEDULE")
-    m.CELERYBEAT_SCHEDULER = _get("CELERYBEAT_SCHEDULER")
-    m.CELERYBEAT_SCHEDULE_FILENAME = _get("CELERYBEAT_SCHEDULE_FILENAME")
-    m.CELERYBEAT_MAX_LOOP_INTERVAL = _get("CELERYBEAT_MAX_LOOP_INTERVAL")
-
-    # :--- Celery Monitor                           <-   --   --- - ----- -- #
-    m.CELERYMON_LOG_LEVEL = _get("CELERYMON_LOG_LEVEL")
-    m.CELERYMON_LOG_FILE = _get("CELERYMON_LOG_FILE")
-
-prepare(sys.modules[__name__])
-
-
-def _init_queues(queues):
-    """Convert configuration mapping to a table of queues digestible
-    by a :class:`carrot.messaging.ConsumerSet`."""
-
-    def _defaults(opts):
-        opts.setdefault("exchange", DEFAULT_EXCHANGE),
-        opts.setdefault("exchange_type", DEFAULT_EXCHANGE_TYPE)
-        opts.setdefault("binding_key", DEFAULT_EXCHANGE)
-        opts.setdefault("routing_key", opts.get("binding_key"))
-        return opts
-
-    return dict((queue, _defaults(opts)) for queue, opts in queues.items())
-
-
-def get_queues():
-    return _init_queues(QUEUES)
+"""
+
+**DEPRECATED**
+
+Use :mod:`celery.defaults` instead.
+
+
+"""
+from celery import current_app
+from celery.app import defaults
+
+_DEFAULTS = defaults.DEFAULTS
+conf = current_app.conf
+
+ALWAYS_EAGER = conf.CELERY_ALWAYS_EAGER
+EAGER_PROPAGATES_EXCEPTIONS = conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS
+RESULT_BACKEND = conf.CELERY_RESULT_BACKEND
+CACHE_BACKEND = conf.CELERY_CACHE_BACKEND
+CACHE_BACKEND_OPTIONS = conf.CELERY_CACHE_BACKEND_OPTIONS
+TASK_SERIALIZER = conf.CELERY_TASK_SERIALIZER
+TASK_RESULT_EXPIRES = conf.CELERY_TASK_RESULT_EXPIRES
+IGNORE_RESULT = conf.CELERY_IGNORE_RESULT
+TRACK_STARTED = conf.CELERY_TRACK_STARTED
+ACKS_LATE = conf.CELERY_ACKS_LATE
+REDIRECT_STDOUTS = conf.CELERY_REDIRECT_STDOUTS
+REDIRECT_STDOUTS_LEVEL = conf.CELERY_REDIRECT_STDOUTS_LEVEL
+RESULT_DBURI = conf.CELERY_RESULT_DBURI
+RESULT_ENGINE_OPTIONS = conf.CELERY_RESULT_ENGINE_OPTIONS
+MAX_CACHED_RESULTS = conf.CELERY_MAX_CACHED_RESULTS
+SEND_EVENTS = conf.CELERY_SEND_EVENTS
+DEFAULT_RATE_LIMIT = conf.CELERY_DEFAULT_RATE_LIMIT
+DISABLE_RATE_LIMITS = conf.CELERY_DISABLE_RATE_LIMITS
+CELERYD_TASK_TIME_LIMIT = conf.CELERYD_TASK_TIME_LIMIT
+CELERYD_TASK_SOFT_TIME_LIMIT = conf.CELERYD_TASK_SOFT_TIME_LIMIT
+CELERYD_MAX_TASKS_PER_CHILD = conf.CELERYD_MAX_TASKS_PER_CHILD
+STORE_ERRORS_EVEN_IF_IGNORED = conf.CELERY_STORE_ERRORS_EVEN_IF_IGNORED
+CELERY_SEND_TASK_ERROR_EMAILS = conf.CELERY_SEND_TASK_ERROR_EMAILS
+CELERY_TASK_ERROR_WHITELIST = conf.CELERY_TASK_ERROR_WHITELIST
+CELERYD_LOG_FORMAT = conf.CELERYD_LOG_FORMAT
+CELERYD_TASK_LOG_FORMAT = conf.CELERYD_TASK_LOG_FORMAT
+CELERYD_LOG_FILE = conf.CELERYD_LOG_FILE
+CELERYD_LOG_COLOR = conf.CELERYD_LOG_COLOR
+CELERYD_LOG_LEVEL = conf.CELERYD_LOG_LEVEL
+CELERYD_STATE_DB = conf.CELERYD_STATE_DB
+CELERYD_CONCURRENCY = conf.CELERYD_CONCURRENCY
+CELERYD_PREFETCH_MULTIPLIER = conf.CELERYD_PREFETCH_MULTIPLIER
+CELERYD_POOL_PUTLOCKS = conf.CELERYD_POOL_PUTLOCKS
+CELERYD_POOL = conf.CELERYD_POOL
+CELERYD_LISTENER = conf.CELERYD_CONSUMER
+CELERYD_MEDIATOR = conf.CELERYD_MEDIATOR
+CELERYD_ETA_SCHEDULER = conf.CELERYD_ETA_SCHEDULER
+CELERYD_ETA_SCHEDULER_PRECISION = conf.CELERYD_ETA_SCHEDULER_PRECISION
+ADMINS = conf.ADMINS
+SERVER_EMAIL = conf.SERVER_EMAIL
+EMAIL_HOST = conf.EMAIL_HOST
+EMAIL_HOST_USER = conf.EMAIL_HOST_USER
+EMAIL_HOST_PASSWORD = conf.EMAIL_HOST_PASSWORD
+EMAIL_PORT = conf.EMAIL_PORT
+BROKER_HOST = conf.BROKER_HOST
+BROKER_PORT = conf.BROKER_PORT
+BROKER_USER = conf.BROKER_USER
+BROKER_PASSWORD = conf.BROKER_PASSWORD
+BROKER_VHOST = conf.BROKER_VHOST
+BROKER_USE_SSL = conf.BROKER_USE_SSL
+BROKER_INSIST = conf.BROKER_INSIST
+BROKER_CONNECTION_TIMEOUT = conf.BROKER_CONNECTION_TIMEOUT
+BROKER_CONNECTION_RETRY = conf.BROKER_CONNECTION_RETRY
+BROKER_CONNECTION_MAX_RETRIES = conf.BROKER_CONNECTION_MAX_RETRIES
+BROKER_BACKEND = conf.BROKER_BACKEND
+DEFAULT_QUEUE = conf.CELERY_DEFAULT_QUEUE
+DEFAULT_ROUTING_KEY = conf.CELERY_DEFAULT_ROUTING_KEY
+DEFAULT_EXCHANGE = conf.CELERY_DEFAULT_EXCHANGE
+DEFAULT_EXCHANGE_TYPE = conf.CELERY_DEFAULT_EXCHANGE_TYPE
+DEFAULT_DELIVERY_MODE = conf.CELERY_DEFAULT_DELIVERY_MODE
+QUEUES = conf.CELERY_QUEUES
+CREATE_MISSING_QUEUES = conf.CELERY_CREATE_MISSING_QUEUES
+ROUTES = conf.CELERY_ROUTES
+BROADCAST_QUEUE = conf.CELERY_BROADCAST_QUEUE
+BROADCAST_EXCHANGE = conf.CELERY_BROADCAST_EXCHANGE
+BROADCAST_EXCHANGE_TYPE = conf.CELERY_BROADCAST_EXCHANGE_TYPE
+EVENT_SERIALIZER = conf.CELERY_EVENT_SERIALIZER
+RESULT_EXCHANGE = conf.CELERY_RESULT_EXCHANGE
+RESULT_EXCHANGE_TYPE = conf.CELERY_RESULT_EXCHANGE_TYPE
+RESULT_SERIALIZER = conf.CELERY_RESULT_SERIALIZER
+RESULT_PERSISTENT = conf.CELERY_RESULT_PERSISTENT
+CELERYBEAT_LOG_LEVEL = conf.CELERYBEAT_LOG_LEVEL
+CELERYBEAT_LOG_FILE = conf.CELERYBEAT_LOG_FILE
+CELERYBEAT_SCHEDULER = conf.CELERYBEAT_SCHEDULER
+CELERYBEAT_SCHEDULE = conf.CELERYBEAT_SCHEDULE
+CELERYBEAT_SCHEDULE_FILENAME = conf.CELERYBEAT_SCHEDULE_FILENAME
+CELERYBEAT_MAX_LOOP_INTERVAL = conf.CELERYBEAT_MAX_LOOP_INTERVAL
+CELERYMON_LOG_LEVEL = conf.CELERYMON_LOG_LEVEL
+CELERYMON_LOG_FILE = conf.CELERYMON_LOG_FILE

+ 4 - 4
celery/contrib/abortable.py

@@ -65,9 +65,9 @@ In the producer:
 
        ...
 
-After the ``async_result.abort()`` call, the task execution is not
+After the `async_result.abort()` call, the task execution is not
 aborted immediately. In fact, it is not guaranteed to abort at all. Keep
-checking the ``async_result`` status, or call ``async_result.wait()`` to
+checking the `async_result` status, or call `async_result.wait()` to
 have it block until the task is finished.
 
 .. note::
@@ -101,8 +101,8 @@ ABORTED = "ABORTED"
 class AbortableAsyncResult(AsyncResult):
     """Represents a abortable result.
 
-    Specifically, this gives the ``AsyncResult`` a :meth:`abort()` method,
-    which sets the state of the underlying Task to ``"ABORTED"``.
+    Specifically, this gives the `AsyncResult` a :meth:`abort()` method,
+    which sets the state of the underlying Task to `"ABORTED"`.
 
     """
 

+ 150 - 36
celery/contrib/batches.py

@@ -1,56 +1,170 @@
+"""
+celery.contrib.batches
+======================
+
+Collect messages and processes them as a list.
+
+**Example**
+
+A click counter that flushes the buffer every 100 messages, and every
+10 seconds.
+
+.. code-block:: python
+
+    from celery.task import task
+    from celery.contrib.batches import Batches
+
+    # Flush after 100 messages, or 10 seconds.
+    @task(base=Batches, flush_every=100, flush_interval=10)
+    def count_click(requests):
+        from collections import Counter
+        count = Counter(request.kwargs["url"] for request in requests)
+        for url, count in count.items():
+            print(">>> Clicks: %s -> %s" % (url, count))
+
+Registering the click is done as follows:
+
+    >>> count_click.delay(url="http://example.com")
+
+.. warning::
+
+    For this to work you have to set
+    :setting:`CELERYD_PREFETCH_MULTIPLIER` to zero, or some value where
+    the final multiplied value is higher than ``flush_every``.
+
+    In the future we hope to add the ability to direct batching tasks
+    to a channel with different QoS requirements than the task channel.
+
+:copyright: (c) 2009 - 2011 by Ask Solem.
+:license: BSD, see LICENSE for more details.
+
+"""
 from itertools import count
-from collections import deque, defaultdict
+from Queue import Queue
 
-from celery.task.base import Task
+from kombu.utils import cached_property
 
+from celery.datastructures import consume_queue
+from celery.task import Task
+from celery.utils import timer2
+from celery.worker import state
 
-class Batches(Task):
-    abstract = True
-    flush_every = 10
 
-    def __init__(self):
-        self._buffer = deque()
-        self._count = count().next
+def apply_batches_task(task, args, loglevel, logfile):
+    task.request.update({"loglevel": loglevel, "logfile": logfile})
+    try:
+        return task(*args)
+    finally:
+        task.request.clear()
 
-    def execute(self, wrapper, pool, loglevel, logfile):
-        self._buffer.append((wrapper, pool, loglevel, logfile))
 
-        if not self._count() % self.flush_every:
-            self.flush(self._buffer)
-            self._buffer.clear()
+class SimpleRequest(object):
+    """Pickleable request."""
+
+    #: task id
+    id = None
+
+    #: task name
+    name = None
+
+    #: positional arguments
+    args = ()
+
+    #: keyword arguments
+    kwargs = {}
+
+    #: message delivery information.
+    delivery_info = None
+
+    #: worker node name
+    hostname = None
 
-    def flush(self, tasks):
-        for wrapper, pool, loglevel, logfile in tasks:
-            wrapper.execute_using_pool(pool, loglevel, logfile)
+    def __init__(self, id, name, args, kwargs, delivery_info, hostname):
+        self.id = id
+        self.name = name
+        self.args = args
+        self.kwargs = kwargs
+        self.delivery_info = delivery_info
+        self.hostname = hostname
 
+    @classmethod
+    def from_request(cls, request):
+        return cls(request.task_id, request.task_name, request.args,
+                   request.kwargs, request.delivery_info, request.hostname)
 
-class Counter(Task):
+
+class Batches(Task):
     abstract = True
+
+    #: Maximum number of message in buffer.
     flush_every = 10
 
-    def __init__(self):
-        self._buffer = deque()
-        self._count = count().next
+    #: Timeout in seconds before buffer is flushed anyway.
+    flush_interval = 30
 
-    def execute(self, wrapper, pool, loglevel, logfile):
-        self._buffer.append((wrapper.args, wrapper.kwargs))
+    def __init__(self):
+        self._buffer = Queue()
+        self._count = count(1).next
+        self._tref = None
+        self._pool = None
+        self._logging = None
 
-        if not self._count() % self.flush_every:
-            self.flush(self._buffer)
-            self._buffer.clear()
+    def run(self, requests):
+        raise NotImplementedError("%r must implement run(requests)" % (self, ))
 
-    def flush(self, buffer):
-        raise NotImplementedError("Counters must implement 'flush'")
+    def flush(self, requests):
+        return self.apply_buffer(requests, ([SimpleRequest.from_request(r)
+                                                for r in requests], ))
 
+    def execute(self, request, pool, loglevel, logfile):
+        if not self._pool:         # just take pool from first task.
+            self._pool = pool
+        if not self._logging:
+            self._logging = loglevel, logfile
 
-class ClickCounter(Task):
-    flush_every = 1000
+        state.task_ready(request)  # immediately remove from worker state.
+        self._buffer.put(request)
 
-    def flush(self, buffer):
-        urlcount = defaultdict(lambda: 0)
-        for args, kwargs in buffer:
-            urlcount[kwargs["url"]] += 1
+        if self._tref is None:     # first request starts flush timer.
+            self._tref = timer2.apply_interval(self.flush_interval * 1000,
+                                               self._do_flush)
 
-        for url, count in urlcount.items():
-            print(">>> Clicks: %s -> %s" % (url, count))
-            # increment_in_db(url, n=count)
+        if not self._count() % self.flush_every:
+            self._do_flush()
+
+    def _do_flush(self):
+        self.debug("Wake-up to flush buffer...")
+        requests = None
+        if self._buffer.qsize():
+            requests = list(consume_queue(self._buffer))
+            if requests:
+                self.debug("Buffer complete: %s" % (len(requests), ))
+                self.flush(requests)
+        if not requests:
+            self.debug("Cancelling timer: Nothing in buffer.")
+            self._tref.cancel()  # cancel timer.
+            self._tref = None
+
+    def apply_buffer(self, requests, args=(), kwargs={}):
+        acks_late = [], []
+        [acks_late[r.task.acks_late].append(r) for r in requests]
+        assert requests and (acks_late[True] or acks_late[False])
+
+        def on_accepted(pid, time_accepted):
+            [req.acknowledge() for req in acks_late[False]]
+
+        def on_return(result):
+            [req.acknowledge() for req in acks_late[True]]
+
+        loglevel, logfile = self._logging
+        return self._pool.apply_async(apply_batches_task,
+                    (self, args, loglevel, logfile),
+                    accept_callback=on_accepted,
+                    callbacks=acks_late[True] and [on_return] or [])
+
+    def debug(self, msg):
+        self.logger.debug("%s: %s" % (self.name, msg))
+
+    @cached_property
+    def logger(self):
+        return self.app.log.get_default_logger()

+ 154 - 0
celery/contrib/rdb.py

@@ -0,0 +1,154 @@
+"""
+celery.contrib.rdb
+==================
+
+Remote debugger for Celery tasks running in multiprocessing pool workers.
+Inspired by http://snippets.dzone.com/posts/show/7248
+
+**Usage**
+
+.. code-block:: python
+
+    from celery.contrib import rdb
+    from celery.decorators import task
+
+    @task
+    def add(x, y):
+        result = x + y
+        rdb.set_trace()
+        return result
+
+
+**Environment Variables**
+
+.. envvar:: CELERY_RDB_HOST
+
+    Hostname to bind to.  Default is '127.0.01', which means the socket
+    will only be accessible from the local host.
+
+.. envvar:: CELERY_RDB_PORT
+
+    Base port to bind to.  Default is 6899.
+    The debugger will try to find an available port starting from the
+    base port.  The selected port will be logged by celeryd.
+
+:copyright: (c) 2009 - 2011 by Ask Solem.
+:license: BSD, see LICENSE for more details.
+
+"""
+
+import errno
+import os
+import socket
+import sys
+
+from pdb import Pdb
+
+default_port = 6899
+
+CELERY_RDB_HOST = os.environ.get("CELERY_RDB_HOST") or "127.0.0.1"
+CELERY_RDB_PORT = int(os.environ.get("CELERY_RDB_PORT") or default_port)
+
+#: Holds the currently active debugger.
+_current = [None]
+
+_frame = getattr(sys, "_getframe")
+
+
+class Rdb(Pdb):
+    me = "Remote Debugger"
+    _prev_outs = None
+    _sock = None
+
+    def __init__(self, host=CELERY_RDB_HOST, port=CELERY_RDB_PORT,
+            port_search_limit=100, port_skew=+0):
+        self.active = True
+
+        try:
+            from multiprocessing import current_process
+            _, port_skew = current_process().name.split('-')
+        except (ImportError, ValueError):
+            pass
+        port_skew = int(port_skew)
+
+        self._prev_handles = sys.stdin, sys.stdout
+        this_port = None
+        for i in xrange(port_search_limit):
+            self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            this_port = port + port_skew + i
+            try:
+                self._sock.bind((host, this_port))
+            except socket.error, exc:
+                if exc.errno in [errno.EADDRINUSE, errno.EINVAL]:
+                    continue
+                raise
+            else:
+                break
+        else:
+            raise Exception(
+                "%s: Could not find available port. Please set using "
+                "environment variable CELERY_RDB_PORT" % (self.me, ))
+
+        self._sock.listen(1)
+        me = "%s:%s" % (self.me, this_port)
+        context = self.context = {"me": me, "host": host, "port": this_port}
+        print("%(me)s: Please telnet %(host)s %(port)s."
+              "  Type `exit` in session to continue." % context)
+        print("%(me)s: Waiting for client..." % context)
+
+        self._client, address = self._sock.accept()
+        context["remote_addr"] = ":".join(map(str, address))
+        print("%(me)s: In session with %(remote_addr)s" % context)
+        self._handle = sys.stdin = sys.stdout = self._client.makefile("rw")
+        Pdb.__init__(self, completekey="tab",
+                           stdin=self._handle, stdout=self._handle)
+
+    def _close_session(self):
+        self.stdin, self.stdout = sys.stdin, sys.stdout = self._prev_handles
+        self._handle.close()
+        self._client.close()
+        self._sock.close()
+        self.active = False
+        print("%(me)s: Session %(remote_addr)s ended." % self.context)
+
+    def do_continue(self, arg):
+        self._close_session()
+        self.set_continue()
+        return 1
+    do_c = do_cont = do_continue
+
+    def do_quit(self, arg):
+        self._close_session()
+        self.set_quit()
+        return 1
+    do_q = do_exit = do_quit
+
+    def set_trace(self, frame=None):
+        if frame is None:
+            frame = _frame().f_back
+        try:
+            Pdb.set_trace(self, frame)
+        except socket.error, exc:
+            # connection reset by peer.
+            if exc.errno != errno.ECONNRESET:
+                raise
+
+    def set_quit(self):
+        # this raises a BdbQuit exception that we are unable to catch.
+        sys.settrace(None)
+
+
+def debugger():
+    """Returns the current debugger instance (if any),
+    or creates a new one."""
+    rdb = _current[0]
+    if rdb is None or not rdb.active:
+        rdb = _current[0] = Rdb()
+    return rdb
+
+
+def set_trace(frame=None):
+    """Set breakpoint at current location, or a specified frame"""
+    if frame is None:
+        frame = _frame().f_back
+    return debugger().set_trace(frame)

+ 162 - 123
celery/datastructures.py

@@ -1,18 +1,33 @@
+"""
+celery.datastructures
+=====================
+
+Custom data structures.
+
+:copyright: (c) 2009 - 2011 by Ask Solem.
+:license: BSD, see LICENSE for more details.
+
+"""
 from __future__ import generators
 
 import time
 import traceback
 
-from UserList import UserList
-from Queue import Queue, Empty as QueueEmpty
+from itertools import chain
+from Queue import Empty
 
 from celery.utils.compat import OrderedDict
 
 
-class AttributeDict(dict):
-    """Dict subclass with attribute access."""
+class AttributeDictMixin(object):
+    """Adds attribute access to mappings.
+
+    `d.key -> d[key]`
+
+    """
 
     def __getattr__(self, key):
+        """`d.key -> d[key]`"""
         try:
             return self[key]
         except KeyError:
@@ -20,68 +35,151 @@ class AttributeDict(dict):
                     self.__class__.__name__, key))
 
     def __setattr__(self, key, value):
+        """`d[key] = value -> d.key = value`"""
         self[key] = value
 
 
-class PositionQueue(UserList):
-    """A positional queue of a specific length, with slots that are either
-    filled or unfilled. When all of the positions are filled, the queue
-    is considered :meth:`full`.
+class AttributeDict(dict, AttributeDictMixin):
+    """Dict subclass with attribute access."""
+    pass
+
+
+class DictAttribute(object):
+    """Dict interface to attributes.
 
-    :param length: see :attr:`length`.
+    `obj[k] -> obj.k`
 
+    """
+
+    def __init__(self, obj):
+        self.obj = obj
+
+    def get(self, key, default=None):
+        try:
+            return self[key]
+        except KeyError:
+            return default
 
-    .. attribute:: length
+    def setdefault(self, key, default):
+        try:
+            return self[key]
+        except KeyError:
+            self[key] = default
+            return default
 
-        The number of items required for the queue to be considered full.
+    def __getitem__(self, key):
+        try:
+            return getattr(self.obj, key)
+        except AttributeError:
+            raise KeyError(key)
+
+    def __setitem__(self, key, value):
+        setattr(self.obj, key, value)
+
+    def __contains__(self, key):
+        return hasattr(self.obj, key)
+
+    def iteritems(self):
+        return vars(self.obj).iteritems()
+
+
+class ConfigurationView(AttributeDictMixin):
+    """A view over an applications configuration dicts.
+
+    If the key does not exist in ``changes``, the ``defaults`` dict
+    is consulted.
+
+    :param changes:  Dict containing changes to the configuration.
+    :param defaults: Dict containing the default configuration.
 
     """
+    changes = None
+    defaults = None
+
+    def __init__(self, changes, defaults):
+        self.__dict__["changes"] = changes
+        self.__dict__["defaults"] = defaults
+        self.__dict__["_order"] = [changes] + defaults
+
+    def __getitem__(self, key):
+        for d in self.__dict__["_order"]:
+            try:
+                return d[key]
+            except KeyError:
+                pass
+        raise KeyError(key)
 
-    class UnfilledPosition(object):
-        """Describes an unfilled slot."""
+    def __setitem__(self, key, value):
+        self.__dict__["changes"][key] = value
 
-        def __init__(self, position):
-            # This is not used, but is an argument from xrange
-            # so why not.
-            self.position = position
+    def get(self, key, default=None):
+        try:
+            return self[key]
+        except KeyError:
+            return default
 
-    def __init__(self, length):
-        self.length = length
-        self.data = map(self.UnfilledPosition, xrange(length))
+    def setdefault(self, key, default):
+        try:
+            return self[key]
+        except KeyError:
+            self[key] = default
+            return default
 
-    def full(self):
-        """Returns ``True`` if all of the slots has been filled."""
-        return len(self) >= self.length
+    def update(self, *args, **kwargs):
+        return self.__dict__["changes"].update(*args, **kwargs)
 
-    def __len__(self):
-        """``len(self)`` -> number of slots filled with real values."""
-        return len(self.filled)
+    def __contains__(self, key):
+        for d in self.__dict__["_order"]:
+            if key in d:
+                return True
+        return False
 
-    @property
-    def filled(self):
-        """Returns the filled slots as a list."""
-        return filter(lambda v: not isinstance(v, self.UnfilledPosition),
-                      self.data)
+    def __repr__(self):
+        return repr(dict(self.iteritems()))
 
+    def __iter__(self):
+        return self.iterkeys()
 
-class ExceptionInfo(object):
-    """Exception wrapping an exception and its traceback.
+    def _iter(self, op):
+        # defaults must be first in the stream, so values in
+        # changes takes precedence.
+        return chain(*[op(d) for d in reversed(self.__dict__["_order"])])
+
+    def iterkeys(self):
+        return self._iter(lambda d: d.iterkeys())
+
+    def iteritems(self):
+        return self._iter(lambda d: d.iteritems())
 
-    :param exc_info: The exception tuple info as returned by
-        :func:`traceback.format_exception`.
+    def itervalues(self):
+        return self._iter(lambda d: d.itervalues())
 
-    .. attribute:: exception
+    def keys(self):
+        return list(self.iterkeys())
 
-        The original exception.
+    def items(self):
+        return list(self.iteritems())
 
-    .. attribute:: traceback
+    def values(self):
+        return list(self.itervalues())
 
-        A traceback from the point when :attr:`exception` was raised.
+
+class ExceptionInfo(object):
+    """Exception wrapping an exception and its traceback.
+
+    :param exc_info: The exception info tuple as returned by
+        :func:`sys.exc_info`.
 
     """
 
+    #: The original exception.
+    exception = None
+
+    #: A traceback form the point when :attr:`exception` was raised.
+    traceback = None
+
     def __init__(self, exc_info):
-        type_, exception, tb = exc_info
+        _, exception, _ = exc_info
         self.exception = exception
         self.traceback = ''.join(traceback.format_exception(*exc_info))
 
@@ -89,10 +187,7 @@ class ExceptionInfo(object):
         return self.traceback
 
     def __repr__(self):
-        return "<%s.%s: %s>" % (
-                self.__class__.__module__,
-                self.__class__.__name__,
-                str(self.exception))
+        return "<ExceptionInfo: %r>" % (self.exception, )
 
 
 def consume_queue(queue):
@@ -101,7 +196,7 @@ def consume_queue(queue):
 
     The iterator stops as soon as the queue raises :exc:`Queue.Empty`.
 
-    Example
+    *Examples*
 
         >>> q = Queue()
         >>> map(q.put, range(4))
@@ -114,81 +209,19 @@ def consume_queue(queue):
     while 1:
         try:
             yield queue.get_nowait()
-        except QueueEmpty:
+        except Empty:
             break
 
 
-class SharedCounter(object):
-    """Thread-safe counter.
-
-    Please note that the final value is not synchronized, this means
-    that you should not update the value by using a previous value, the only
-    reliable operations are increment and decrement.
-
-    Example
-
-        >>> max_clients = SharedCounter(initial_value=10)
-
-        # Thread one
-        >>> max_clients += 1 # OK (safe)
-
-        # Thread two
-        >>> max_clients -= 3 # OK (safe)
-
-        # Main thread
-        >>> if client >= int(max_clients): # Max clients now at 8
-        ...    wait()
-
-
-        >>> max_client = max_clients + 10 # NOT OK (unsafe)
-
-    """
-
-    def __init__(self, initial_value):
-        self._value = initial_value
-        self._modify_queue = Queue()
-
-    def increment(self, n=1):
-        """Increment value."""
-        self += n
-        return int(self)
-
-    def decrement(self, n=1):
-        """Decrement value."""
-        self -= n
-        return int(self)
-
-    def _update_value(self):
-        self._value += sum(consume_queue(self._modify_queue))
-        return self._value
-
-    def __iadd__(self, y):
-        """``self += y``"""
-        self._modify_queue.put(y * +1)
-        return self
-
-    def __isub__(self, y):
-        """``self -= y``"""
-        self._modify_queue.put(y * -1)
-        return self
-
-    def __int__(self):
-        """``int(self) -> int``"""
-        return self._update_value()
-
-    def __repr__(self):
-        return "<SharedCounter: int(%s)>" % str(int(self))
-
-
 class LimitedSet(object):
     """Kind-of Set with limitations.
 
-    Good for when you need to test for membership (``a in set``),
+    Good for when you need to test for membership (`a in set`),
     but the list might become to big, so you want to limit it so it doesn't
     consume too much resources.
 
     :keyword maxlen: Maximum number of members before we start
-        deleting expired members.
+                     evicting expired members.
     :keyword expires: Time in seconds, before a membership expires.
 
     """
@@ -219,7 +252,7 @@ class LimitedSet(object):
                 if not self.expires or time.time() > when + self.expires:
                     try:
                         self.pop_value(value)
-                    except TypeError:                   # pragma: no cover
+                    except TypeError:  # pragma: no cover
                         continue
             break
 
@@ -278,22 +311,20 @@ class TokenBucket(object):
     Most of this code was stolen from an entry in the ASPN Python Cookbook:
     http://code.activestate.com/recipes/511490/
 
-    :param fill_rate: see :attr:`fill_rate`.
-    :keyword capacity: see :attr:`capacity`.
-
-    .. attribute:: fill_rate
+    .. admonition:: Thread safety
 
-        The rate in tokens/second that the bucket will be refilled.
+        This implementation may not be thread safe.
 
-    .. attribute:: capacity
+    """
 
-        Maximum number of tokens in the bucket. Default is ``1``.
+    #: The rate in tokens/second that the bucket will be refilled
+    fill_rate = None
 
-    .. attribute:: timestamp
+    #: Maximum number of tokensin the bucket.
+    capacity = 1
 
-        Timestamp of the last time a token was taken out of the bucket.
-
-    """
+    #: Timestamp of the last time a token was taken out of the bucket.
+    timestamp = None
 
     def __init__(self, fill_rate, capacity=1):
         self.capacity = float(capacity)
@@ -302,6 +333,8 @@ class TokenBucket(object):
         self.timestamp = time.time()
 
     def can_consume(self, tokens=1):
+        """Returns :const:`True` if `tokens` number of tokens can be consumed
+        from the bucket."""
         if tokens <= self._get_tokens():
             self._tokens -= tokens
             return True
@@ -309,7 +342,13 @@ class TokenBucket(object):
 
     def expected_time(self, tokens=1):
         """Returns the expected time in seconds when a new token should be
-        available. *Note: consumes a token from the bucket*"""
+        available.
+
+        .. admonition:: Warning
+
+            This consumes a token from the bucket.
+
+        """
         _tokens = self._get_tokens()
         tokens = max(tokens, _tokens)
         return (tokens - _tokens) / self.fill_rate

+ 4 - 4
celery/db/a805d4bd.py

@@ -2,14 +2,14 @@
 a805d4bd
 This module fixes a bug with pickling and relative imports in Python < 2.6.
 
-The problem is with pickling an e.g. ``exceptions.KeyError`` instance.
-As SQLAlchemy has its own ``exceptions`` module, pickle will try to
-lookup ``KeyError`` in the wrong module, resulting in this exception::
+The problem is with pickling an e.g. `exceptions.KeyError` instance.
+As SQLAlchemy has its own `exceptions` module, pickle will try to
+lookup :exc:`KeyError` in the wrong module, resulting in this exception::
 
     cPickle.PicklingError: Can't pickle <type 'exceptions.KeyError'>:
         attribute lookup exceptions.KeyError failed
 
-doing ``import exceptions`` just before the dump in ``sqlalchemy.types``
+doing `import exceptions` just before the dump in `sqlalchemy.types`
 reveals the source of the bug::
 
     EXCEPTIONS: <module 'sqlalchemy.exc' from '/var/lib/hudson/jobs/celery/

+ 6 - 4
celery/db/models.py

@@ -19,7 +19,7 @@ class Task(ResultModelBase):
     id = sa.Column(sa.Integer, sa.Sequence("task_id_sequence"),
                    primary_key=True,
                    autoincrement=True)
-    task_id = sa.Column(sa.String(255))
+    task_id = sa.Column(sa.String(255), unique=True)
     status = sa.Column(sa.String(50), default=states.PENDING)
     result = sa.Column(PickleType, nullable=True)
     date_done = sa.Column(sa.DateTime, default=datetime.now,
@@ -33,7 +33,8 @@ class Task(ResultModelBase):
         return {"task_id": self.task_id,
                 "status": self.status,
                 "result": self.result,
-                "traceback": self.traceback}
+                "traceback": self.traceback,
+                "date_done": self.date_done}
 
     def __repr__(self):
         return "<Task %s state: %s>" % (self.task_id, self.status)
@@ -46,7 +47,7 @@ class TaskSet(ResultModelBase):
 
     id = sa.Column(sa.Integer, sa.Sequence("taskset_id_sequence"),
                 autoincrement=True, primary_key=True)
-    taskset_id = sa.Column(sa.String(255))
+    taskset_id = sa.Column(sa.String(255), unique=True)
     result = sa.Column(sa.PickleType, nullable=True)
     date_done = sa.Column(sa.DateTime, default=datetime.now,
                        nullable=True)
@@ -57,7 +58,8 @@ class TaskSet(ResultModelBase):
 
     def to_dict(self):
         return {"taskset_id": self.taskset_id,
-                "result": self.result}
+                "result": self.result,
+                "date_done": self.date_done}
 
     def __repr__(self):
         return u"<TaskSet: %s>" % (self.taskset_id, )

+ 1 - 2
celery/db/session.py

@@ -2,7 +2,6 @@ from sqlalchemy import create_engine
 from sqlalchemy.orm import sessionmaker
 from sqlalchemy.ext.declarative import declarative_base
 
-from celery import conf
 from celery.utils.compat import defaultdict
 
 ResultModelBase = declarative_base()
@@ -28,7 +27,7 @@ def setup_results(engine):
         _SETUP["results"] = True
 
 
-def ResultSession(dburi=conf.RESULT_DBURI, **kwargs):
+def ResultSession(dburi, **kwargs):
     engine, session = create_session(dburi, **kwargs)
     setup_results(engine)
     return session()

+ 25 - 72
celery/decorators.py

@@ -1,87 +1,40 @@
+# -*- coding: utf-8 -*-
 """
+celery.decorators✞
+==================
 
-Decorators
+Deprecated decorators, use `celery.task.task`,
+and `celery.task.periodic_task` instead.
 
-"""
-from inspect import getargspec
-
-from celery import registry
-from celery.task.base import Task, PeriodicTask
-from celery.utils.functional import wraps
-
-
-def task(*args, **options):
-    """Decorator to create a task class out of any callable.
-
-    Examples:
-
-    .. code-block:: python
-
-        @task()
-        def refresh_feed(url):
-            return Feed.objects.get(url=url).refresh()
-
-    With setting extra options and using retry.
-
-    .. code-block:: python
+The new decorators does not support magic keyword arguments.
 
-        @task(exchange="feeds")
-        def refresh_feed(url, **kwargs):
-            try:
-                return Feed.objects.get(url=url).refresh()
-            except socket.error, exc:
-                refresh_feed.retry(args=[url], kwargs=kwargs, exc=exc)
+:copyright: (c) 2009 - 2011 by Ask Solem.
+:license: BSD, see LICENSE for more details.
 
-    Calling the resulting task:
-
-        >>> refresh_feed("http://example.com/rss") # Regular
-        <Feed: http://example.com/rss>
-        >>> refresh_feed.delay("http://example.com/rss") # Async
-        <AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d>
-
-
-    """
-
-    def inner_create_task_cls(**options):
-
-        def _create_task_cls(fun):
-            base = options.pop("base", Task)
-
-            @wraps(fun, assigned=("__module__", "__name__"))
-            def run(self, *args, **kwargs):
-                return fun(*args, **kwargs)
-
-            # Save the argspec for this task so we can recognize
-            # which default task kwargs we're going to pass to it later.
-            # (this happens in celery.utils.fun_takes_kwargs)
-            run.argspec = getargspec(fun)
+"""
+import warnings
 
-            cls_dict = dict(options, run=run,
-                            __module__=fun.__module__,
-                            __doc__=fun.__doc__)
-            T = type(fun.__name__, (base, ), cls_dict)()
-            return registry.tasks[T.name]               # global instance.
+from celery import task as _task
 
-        return _create_task_cls
 
-    if len(args) == 1 and callable(args[0]):
-        return inner_create_task_cls()(*args)
-    return inner_create_task_cls(**options)
+warnings.warn(PendingDeprecationWarning("""
+The `celery.decorators` module and the magic keyword arguments
+are pending deprecation and will be deprecated in 2.4, then removed
+in 3.0.
 
+`task.request` should be used instead of magic keyword arguments,
+and `celery.task.task` used instead of `celery.decorators.task`.
 
-def periodic_task(**options):
-    """Task decorator to create a periodic task.
+See the 2.2 Changelog for more information.
 
-    Example task, scheduling a task once every day:
+"""))
 
-    .. code-block:: python
 
-        from datetime import timedelta
+def task(*args, **kwargs):  # ✞
+    kwargs.setdefault("accept_magic_kwargs", True)
+    return _task.task(*args, **kwargs)
 
-        @periodic_task(run_every=timedelta(days=1))
-        def cronjob(**kwargs):
-            logger = cronjob.get_logger(**kwargs)
-            logger.warn("Task running...")
 
-    """
-    return task(**dict({"base": PeriodicTask}, **options))
+def periodic_task(*args, **kwargs):  # ✞
+    kwargs.setdefault("accept_magic_kwargs", True)
+    return _task.periodic_task(*args, **kwargs)

+ 138 - 40
celery/events/__init__.py

@@ -5,13 +5,18 @@ import threading
 from collections import deque
 from itertools import count
 
-from celery.messaging import EventPublisher, EventConsumer
+from kombu.entity import Exchange, Queue
+from kombu.messaging import Consumer, Producer
+
+from celery.app import app_or_default
+from celery.utils import gen_unique_id
+
+event_exchange = Exchange("celeryev", type="topic")
 
 
 def create_event(type, fields):
-    std = {"type": type,
-           "timestamp": fields.get("timestamp") or time.time()}
-    return dict(fields, **std)
+    return dict(fields, type=type,
+                        timestamp=fields.get("timestamp") or time.time())
 
 
 def Event(type, **fields):
@@ -26,37 +31,53 @@ def Event(type, **fields):
 class EventDispatcher(object):
     """Send events as messages.
 
-    :param connection: Carrot connection.
+    :param connection: Connection to the broker.
 
     :keyword hostname: Hostname to identify ourselves as,
         by default uses the hostname returned by :func:`socket.gethostname`.
 
-    :keyword enabled: Set to ``False`` to not actually publish any events,
+    :keyword enabled: Set to :const:`False` to not actually publish any events,
         making :meth:`send` a noop operation.
 
+    :keyword channel: Can be used instead of `connection` to specify
+        an exact channel to use when sending events.
+
+    :keyword buffer_while_offline: If enabled events will be buffered
+       while the connection is down. :meth:`flush` must be called
+       as soon as the connection is re-established.
+
     You need to :meth:`close` this after use.
 
     """
 
-    def __init__(self, connection, hostname=None, enabled=True):
+    def __init__(self, connection=None, hostname=None, enabled=True,
+            channel=None, buffer_while_offline=True, app=None,
+            serializer=None):
+        self.app = app_or_default(app)
         self.connection = connection
+        self.channel = channel
         self.hostname = hostname or socket.gethostname()
         self.enabled = enabled
+        self.buffer_while_offline = buffer_while_offline
         self._lock = threading.Lock()
         self.publisher = None
         self._outbound_buffer = deque()
+        self.serializer = serializer or self.app.conf.CELERY_EVENT_SERIALIZER
 
         if self.enabled:
             self.enable()
 
     def enable(self):
         self.enabled = True
-        self.publisher = EventPublisher(self.connection)
+        channel = self.channel or self.connection.channel()
+        self.publisher = Producer(channel, exchange=event_exchange,
+                                  serializer=self.serializer)
 
     def disable(self):
         self.enabled = False
         if self.publisher is not None:
-            self.publisher.close()
+            if not self.channel:  # close auto channel.
+                self.publisher.channel.close()
             self.publisher = None
 
     def send(self, type, **fields):
@@ -70,43 +91,62 @@ class EventDispatcher(object):
             return
 
         self._lock.acquire()
-        event = Event(type, hostname=self.hostname, **fields)
+        event = Event(type, hostname=self.hostname,
+                            clock=self.app.clock.forward(), **fields)
         try:
             try:
-                self.publisher.send(event)
+                self.publisher.publish(event,
+                                       routing_key=type.replace("-", "."))
             except Exception, exc:
-                self._outbound_buffer.append((event, exc))
+                if not self.buffer_while_offline:
+                    raise
+                self._outbound_buffer.append((type, fields, exc))
         finally:
             self._lock.release()
 
     def flush(self):
         while self._outbound_buffer:
-            event, _ = self._outbound_buffer.popleft()
-            self.publisher.send(event)
+            try:
+                type, fields, _ = self._outbound_buffer.popleft()
+            except IndexError:
+                return
+            self.send(type, **fields)
+
+    def copy_buffer(self, other):
+        self._outbound_buffer = other._outbound_buffer
 
     def close(self):
         """Close the event dispatcher."""
         self._lock.locked() and self._lock.release()
-        self.publisher and self.publisher.close()
+        self.publisher and self.publisher.channel.close()
 
 
 class EventReceiver(object):
     """Capture events.
 
-    :param connection: Carrot connection.
+    :param connection: Connection to the broker.
     :keyword handlers: Event handlers.
 
-    :attr:`handlers`` is a dict of event types and their handlers,
-    the special handler ``"*`"`` captures all events that doesn't have a
+    :attr:`handlers` is a dict of event types and their handlers,
+    the special handler `"*"` captures all events that doesn't have a
     handler.
 
     """
     handlers = {}
 
-    def __init__(self, connection, handlers=None):
+    def __init__(self, connection, handlers=None, routing_key="#",
+            node_id=None, app=None):
+        self.app = app_or_default(app)
         self.connection = connection
         if handlers is not None:
             self.handlers = handlers
+        self.routing_key = routing_key
+        self.node_id = node_id or gen_unique_id()
+        self.queue = Queue("%s.%s" % ("celeryev", self.node_id),
+                           exchange=event_exchange,
+                           routing_key=self.routing_key,
+                           auto_delete=True,
+                           durable=False)
 
     def process(self, type, event):
         """Process the received event by dispatching it to the appropriate
@@ -115,33 +155,91 @@ class EventReceiver(object):
         handler and handler(event)
 
     def consumer(self):
-        consumer = EventConsumer(self.connection)
-        consumer.register_callback(self._receive)
-        return consumer
+        """Create event consumer.
 
-    def capture(self, limit=None, timeout=None):
-        """Open up a consumer capturing events.
+        .. warning::
 
-        This has to run in the main process, and it will never
-        stop unless forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.
+            This creates a new channel that needs to be closed
+            by calling `consumer.channel.close()`.
 
         """
+        consumer = Consumer(self.connection.channel(),
+                            queues=[self.queue],
+                            no_ack=True)
+        consumer.register_callback(self._receive)
+        return consumer
+
+    def itercapture(self, limit=None, timeout=None, wakeup=True):
         consumer = self.consumer()
         consumer.consume()
+        if wakeup:
+            self.wakeup_workers(channel=consumer.channel)
+
+        yield consumer
+
         try:
-            for iteration in count(0):
-                if limit and iteration > limit:
-                    break
-                try:
-                    consumer.connection.drain_events(timeout=timeout)
-                except socket.timeout:
-                    if timeout:
-                        raise
-                except socket.error:
-                    pass
+            self.drain_events(limit=limit, timeout=timeout)
         finally:
-            consumer.close()
+            consumer.cancel()
+            consumer.channel.close()
+
+    def capture(self, limit=None, timeout=None, wakeup=True):
+        """Open up a consumer capturing events.
 
-    def _receive(self, message_data, message):
-        type = message_data.pop("type").lower()
-        self.process(type, create_event(type, message_data))
+        This has to run in the main process, and it will never
+        stop unless forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.
+
+        """
+        list(self.itercapture(limit=limit,
+                              timeout=timeout,
+                              wakeup=wakeup))
+
+    def wakeup_workers(self, channel=None):
+        self.app.control.broadcast("heartbeat",
+                                   connection=self.connection,
+                                   channel=channel)
+
+    def drain_events(self, limit=None, timeout=None):
+        for iteration in count(0):
+            if limit and iteration >= limit:
+                break
+            try:
+                self.connection.drain_events(timeout=timeout)
+            except socket.timeout:
+                if timeout:
+                    raise
+            except socket.error:
+                pass
+
+    def _receive(self, body, message):
+        type = body.pop("type").lower()
+        clock = body.get("clock")
+        if clock:
+            self.app.clock.adjust(clock)
+        self.process(type, create_event(type, body))
+
+
+class Events(object):
+
+    def __init__(self, app=None):
+        self.app = app
+
+    def Receiver(self, connection, handlers=None, routing_key="#",
+            node_id=None):
+        return EventReceiver(connection,
+                             handlers=handlers,
+                             routing_key=routing_key,
+                             node_id=node_id,
+                             app=self.app)
+
+    def Dispatcher(self, connection=None, hostname=None, enabled=True,
+            channel=None, buffer_while_offline=True):
+        return EventDispatcher(connection,
+                               hostname=hostname,
+                               enabled=enabled,
+                               channel=channel,
+                               app=self.app)
+
+    def State(self):
+        from celery.events.state import State as _State
+        return _State()

+ 131 - 54
celery/events/cursesmon.py

@@ -7,24 +7,29 @@ import time
 from datetime import datetime
 from itertools import count
 from textwrap import wrap
+from math import ceil
 
 from celery import states
-from celery.events import EventReceiver
-from celery.events.state import State
-from celery.messaging import establish_connection
-from celery.task import control
+from celery.app import app_or_default
 from celery.utils import abbr, abbrtask
 
+BORDER_SPACING = 4
+LEFT_BORDER_OFFSET = 3
+UUID_WIDTH = 36
+STATE_WIDTH = 8
+TIMESTAMP_WIDTH = 8
+MIN_WORKER_WIDTH = 15
+MIN_TASK_WIDTH = 16
+
 
 class CursesMonitor(object):
     keymap = {}
     win = None
     screen_width = None
-    screen_delay = 0.1
+    screen_delay = 10
     selected_task = None
     selected_position = 0
     selected_str = "Selected: "
-    limit = 20
     foreground = curses.COLOR_BLACK
     background = curses.COLOR_WHITE
     online_str = "Workers online: "
@@ -33,7 +38,8 @@ class CursesMonitor(object):
     greet = "celeryev %s" % celery.__version__
     info_str = "Info: "
 
-    def __init__(self, state, keymap=None):
+    def __init__(self, state, keymap=None, app=None):
+        self.app = app_or_default(app)
         self.keymap = keymap or self.keymap
         self.state = state
         default_keymap = {"J": self.move_selection_down,
@@ -45,22 +51,57 @@ class CursesMonitor(object):
                           "L": self.selection_rate_limit}
         self.keymap = dict(default_keymap, **self.keymap)
 
-    def format_row(self, uuid, worker, task, timestamp, state):
-        my, mx = self.win.getmaxyx()
-        mx = mx - 3
-        uuid_max = 36
-        if mx < 88:
-            uuid_max = mx - 52 - 2
-        uuid = abbr(uuid, uuid_max).ljust(uuid_max)
-        worker = abbr(worker, 16).ljust(16)
-        task = abbrtask(task, 16).ljust(16)
-        state = abbr(state, 8).ljust(8)
-        timestamp = timestamp.ljust(8)
+    def format_row(self, uuid, task, worker, timestamp, state):
+        mx = self.display_width
+
+        # include spacing
+        detail_width = mx - 1 - STATE_WIDTH - 1 - TIMESTAMP_WIDTH
+        uuid_space = detail_width - 1 - MIN_TASK_WIDTH - 1 - MIN_WORKER_WIDTH
+
+        if uuid_space < UUID_WIDTH:
+            uuid_width = uuid_space
+        else:
+            uuid_width = UUID_WIDTH
+
+        detail_width = detail_width - uuid_width - 1
+        task_width = int(ceil(detail_width / 2.0))
+        worker_width = detail_width - task_width - 1
+
+        uuid = abbr(uuid, uuid_width).ljust(uuid_width)
+        worker = abbr(worker, worker_width).ljust(worker_width)
+        task = abbrtask(task, task_width).ljust(task_width)
+        state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH)
+        timestamp = timestamp.ljust(TIMESTAMP_WIDTH)
+
         row = "%s %s %s %s %s " % (uuid, worker, task, timestamp, state)
         if self.screen_width is None:
             self.screen_width = len(row[:mx])
         return row[:mx]
 
+    @property
+    def screen_width(self):
+        _, mx = self.win.getmaxyx()
+        return mx
+
+    @property
+    def screen_height(self):
+        my, _ = self.win.getmaxyx()
+        return my
+
+    @property
+    def display_width(self):
+        _, mx = self.win.getmaxyx()
+        return mx - BORDER_SPACING
+
+    @property
+    def display_height(self):
+        my, _ = self.win.getmaxyx()
+        return my - 10
+
+    @property
+    def limit(self):
+        return self.display_height
+
     def find_position(self):
         if not self.tasks:
             return 0
@@ -129,7 +170,8 @@ class CursesMonitor(object):
         rlimit = self.readline(my - 2, 3 + len(r))
 
         if rlimit:
-            reply = control.rate_limit(task.name, rlimit.strip(), reply=True)
+            reply = self.app.control.rate_limit(task.name,
+                                                rlimit.strip(), reply=True)
             self.alert_remote_control_reply(reply)
 
     def alert_remote_control_reply(self, reply):
@@ -167,7 +209,7 @@ class CursesMonitor(object):
             while True:
                 ch = self.win.getch(x, y + i)
                 if ch != -1:
-                    if ch in (10, curses.KEY_ENTER):                # enter
+                    if ch in (10, curses.KEY_ENTER):            # enter
                         break
                     if ch in (27, ):
                         buffer = str()
@@ -181,7 +223,7 @@ class CursesMonitor(object):
     def revoke_selection(self):
         if not self.selected_task:
             return curses.beep()
-        reply = control.revoke(self.selected_task, reply=True)
+        reply = self.app.control.revoke(self.selected_task, reply=True)
         self.alert_remote_control_reply(reply)
 
     def selection_info(self):
@@ -189,6 +231,7 @@ class CursesMonitor(object):
             return
 
         def alert_callback(mx, my, xs):
+            my, mx = self.win.getmaxyx()
             y = count(xs).next
             task = self.state.tasks[self.selected_task]
             info = task.info(extra=["state"])
@@ -197,15 +240,24 @@ class CursesMonitor(object):
             for key, value in infoitems:
                 if key is None:
                     continue
+                value = str(value)
                 curline = y()
                 keys = key + ": "
                 self.win.addstr(curline, 3, keys, curses.A_BOLD)
-                wrapped = wrap(str(value), mx - 2)
+                wrapped = wrap(value, mx - 2)
                 if len(wrapped) == 1:
-                    self.win.addstr(curline, len(keys) + 3, wrapped[0])
+                    self.win.addstr(curline, len(keys) + 3,
+                            abbr(wrapped[0],
+                                 self.screen_width - (len(keys) + 3)))
                 else:
                     for subline in wrapped:
-                        self.win.addstr(y(), 3, " " * 4 + subline,
+                        nexty = y()
+                        if nexty >= my - 1:
+                            subline = " " * 4 + "[...]"
+                        elif nexty >= my:
+                            break
+                        self.win.addstr(nexty, 3,
+                                abbr(" " * 4 + subline, self.screen_width - 4),
                                 curses.A_NORMAL)
 
         return self.alert(alert_callback,
@@ -241,10 +293,28 @@ class CursesMonitor(object):
         return self.alert(alert_callback,
                 "Task Result for %s" % self.selected_task)
 
+    def display_task_row(self, lineno, task):
+        state_color = self.state_colors.get(task.state)
+        attr = curses.A_NORMAL
+        if task.uuid == self.selected_task:
+            attr = curses.A_STANDOUT
+        timestamp = datetime.fromtimestamp(
+                        task.timestamp or time.time())
+        timef = timestamp.strftime("%H:%M:%S")
+        line = self.format_row(task.uuid, task.name,
+                               task.worker.hostname,
+                               timef, task.state)
+        self.win.addstr(lineno, LEFT_BORDER_OFFSET, line, attr)
+
+        if state_color:
+            self.win.addstr(lineno,
+                            len(line) - STATE_WIDTH + BORDER_SPACING - 1,
+                            task.state, state_color | attr)
+
     def draw(self):
         win = self.win
         self.handle_keypress()
-        x = 3
+        x = LEFT_BORDER_OFFSET
         y = blank_line = count(2).next
         my, mx = win.getmaxyx()
         win.erase()
@@ -257,34 +327,23 @@ class CursesMonitor(object):
                 curses.A_BOLD | curses.A_UNDERLINE)
         tasks = self.tasks
         if tasks:
-            for uuid, task in tasks:
+            for row, (uuid, task) in enumerate(tasks):
+                if row > self.display_height:
+                    break
+
                 if task.uuid:
-                    state_color = self.state_colors.get(task.state)
-                    attr = curses.A_NORMAL
-                    if task.uuid == self.selected_task:
-                        attr = curses.A_STANDOUT
-                    timestamp = datetime.fromtimestamp(
-                                    task.timestamp or time.time())
-                    timef = timestamp.strftime("%H:%M:%S")
-                    line = self.format_row(uuid, task.name,
-                                           task.worker.hostname,
-                                           timef, task.state)
                     lineno = y()
-                    win.addstr(lineno, x, line, attr)
-                    if state_color:
-                        win.addstr(lineno, len(line) - len(task.state) + 1,
-                                task.state, state_color | attr)
-                    if task.ready:
-                        task.visited = time.time()
+                self.display_task_row(lineno, task)
 
         # -- Footer
         blank_line()
-        win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width)
+        win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width - 4)
 
         # Selected Task Info
         if self.selected_task:
             win.addstr(my - 5, x, self.selected_str, curses.A_BOLD)
             info = "Missing extended info"
+            detail = ""
             try:
                 selection = self.state.tasks[self.selected_task]
             except KeyError:
@@ -298,7 +357,16 @@ class CursesMonitor(object):
                     info["result"] = abbr(info["result"], 16)
                 info = " ".join("%s=%s" % (key, value)
                             for key, value in info.items())
-            win.addstr(my - 5, x + len(self.selected_str), info)
+                detail = "... -> key i"
+            infowin = abbr(info,
+                           self.screen_width - len(self.selected_str) - 2,
+                           detail)
+            win.addstr(my - 5, x + len(self.selected_str), infowin)
+            # Make ellipsis bold
+            if detail in infowin:
+                detailpos = len(infowin) - len(detail)
+                win.addstr(my - 5, x + len(self.selected_str) + detailpos,
+                        detail, curses.A_BOLD)
         else:
             win.addstr(my - 5, x, "No task selected", curses.A_NORMAL)
 
@@ -306,7 +374,7 @@ class CursesMonitor(object):
         if self.workers:
             win.addstr(my - 4, x, self.online_str, curses.A_BOLD)
             win.addstr(my - 4, x + len(self.online_str),
-                    ", ".join(self.workers), curses.A_NORMAL)
+                    ", ".join(sorted(self.workers)), curses.A_NORMAL)
         else:
             win.addstr(my - 4, x, "No workers discovered.")
 
@@ -321,10 +389,16 @@ class CursesMonitor(object):
                 curses.A_DIM)
 
         # Help
-        win.addstr(my - 2, x, self.help_title, curses.A_BOLD)
-        win.addstr(my - 2, x + len(self.help_title), self.help, curses.A_DIM)
+        self.safe_add_str(my - 2, x, self.help_title, curses.A_BOLD)
+        self.safe_add_str(my - 2, x + len(self.help_title), self.help,
+                          curses.A_DIM)
         win.refresh()
 
+    def safe_add_str(self, y, x, string, *args, **kwargs):
+        if x + len(string) > self.screen_width:
+            string = string[:self.screen_width - x]
+        self.win.addstr(y, x, string, *args, **kwargs)
+
     def init_screen(self):
         self.win = curses.initscr()
         self.win.nodelay(True)
@@ -357,7 +431,7 @@ class CursesMonitor(object):
         curses.endwin()
 
     def nap(self):
-        curses.napms(int(self.screen_delay * 1000))
+        curses.napms(self.screen_delay)
 
     @property
     def tasks(self):
@@ -383,17 +457,20 @@ class DisplayThread(threading.Thread):
             self.display.nap()
 
 
-def evtop():
+def evtop(app=None):
     sys.stderr.write("-> evtop: starting capture...\n")
-    state = State()
-    display = CursesMonitor(state)
+    app = app_or_default(app)
+    state = app.events.State()
+    conn = app.broker_connection()
+    recv = app.events.Receiver(conn, handlers={"*": state.event})
+    capture = recv.itercapture()
+    consumer = capture.next()
+    display = CursesMonitor(state, app=app)
     display.init_screen()
     refresher = DisplayThread(display)
     refresher.start()
-    conn = establish_connection()
-    recv = EventReceiver(conn, handlers={"*": state.event})
     try:
-        recv.capture(limit=None)
+        capture.next()
     except Exception:
         refresher.shutdown = True
         refresher.join()

+ 5 - 5
celery/events/dumper.py

@@ -2,9 +2,8 @@ import sys
 
 from datetime import datetime
 
+from celery.app import app_or_default
 from celery.datastructures import LocalCache
-from celery.events import EventReceiver
-from celery.messaging import establish_connection
 
 
 TASK_NAMES = LocalCache(0xFFF)
@@ -52,11 +51,12 @@ class Dumper(object):
                                     humanize_type(type), sep, task, fields))
 
 
-def evdump():
+def evdump(app=None):
     sys.stderr.write("-> evdump: starting capture...\n")
+    app = app_or_default(app)
     dumper = Dumper()
-    conn = establish_connection()
-    recv = EventReceiver(conn, handlers={"*": dumper.on_event})
+    conn = app.broker_connection()
+    recv = app.events.Receiver(conn, handlers={"*": dumper.on_event})
     try:
         recv.capture()
     except (KeyboardInterrupt, SystemExit):

+ 36 - 30
celery/events/snapshot.py

@@ -1,36 +1,39 @@
-from celery.utils import timer2
+import atexit
 
-from celery import conf
-from celery import log
+from celery import platforms
+from celery.app import app_or_default
 from celery.datastructures import TokenBucket
-from celery.events import EventReceiver
-from celery.events.state import State
-from celery.messaging import establish_connection
-from celery.utils import instantiate
+from celery.utils import timer2
+from celery.utils import instantiate, LOG_LEVELS
 from celery.utils.dispatch import Signal
 from celery.utils.timeutils import rate
 
 
 class Polaroid(object):
+    timer = timer2
     shutter_signal = Signal(providing_args=("state", ))
     cleanup_signal = Signal()
     clear_after = False
 
     _tref = None
+    _ctref = None
 
     def __init__(self, state, freq=1.0, maxrate=None,
-            cleanup_freq=3600.0, logger=None):
+            cleanup_freq=3600.0, logger=None, timer=None, app=None):
+        self.app = app_or_default(app)
         self.state = state
         self.freq = freq
         self.cleanup_freq = cleanup_freq
-        self.logger = logger or log.get_default_logger(name="celery.cam")
+        self.timer = timer or self.timer
+        self.logger = logger or \
+                self.app.log.get_default_logger(name="celery.cam")
         self.maxrate = maxrate and TokenBucket(rate(maxrate))
 
     def install(self):
-        self._tref = timer2.apply_interval(self.freq * 1000.0,
-                                           self.capture)
-        self._ctref = timer2.apply_interval(self.cleanup_freq * 1000.0,
-                                            self.cleanup)
+        self._tref = self.timer.apply_interval(self.freq * 1000.0,
+                                               self.capture)
+        self._ctref = self.timer.apply_interval(self.cleanup_freq * 1000.0,
+                                                self.cleanup)
 
     def on_shutter(self, state):
         pass
@@ -39,17 +42,13 @@ class Polaroid(object):
         pass
 
     def cleanup(self):
-        self.debug("Cleanup: Running...")
+        self.logger.debug("Cleanup: Running...")
         self.cleanup_signal.send(None)
         self.on_cleanup()
 
-    def debug(self, msg):
-        if self.logger:
-            self.logger.debug(msg)
-
     def shutter(self):
         if self.maxrate is None or self.maxrate.can_consume():
-            self.debug("Shutter: %s" % (self.state, ))
+            self.logger.debug("Shutter: %s" % (self.state, ))
             self.shutter_signal.send(self.state)
             self.on_shutter(self.state)
 
@@ -58,7 +57,7 @@ class Polaroid(object):
 
     def cancel(self):
         if self._tref:
-            self._tref()
+            self._tref()  # flush all received events.
             self._tref.cancel()
         if self._ctref:
             self._ctref.cancel()
@@ -72,21 +71,28 @@ class Polaroid(object):
 
 
 def evcam(camera, freq=1.0, maxrate=None, loglevel=0,
-        logfile=None):
+        logfile=None, pidfile=None, timer=None, app=None):
+    app = app_or_default(app)
+
+    if pidfile:
+        pidlock = platforms.create_pidlock(pidfile).acquire()
+        atexit.register(pidlock.release)
+
     if not isinstance(loglevel, int):
-        loglevel = conf.LOG_LEVELS[loglevel.upper()]
-    logger = log.setup_logger(loglevel=loglevel,
-                              logfile=logfile,
-                              name="celery.evcam")
+        loglevel = LOG_LEVELS[loglevel.upper()]
+    logger = app.log.setup_logger(loglevel=loglevel,
+                                  logfile=logfile,
+                                  name="celery.evcam")
     logger.info(
         "-> evcam: Taking snapshots with %s (every %s secs.)\n" % (
             camera, freq))
-    state = State()
-    cam = instantiate(camera, state,
-                      freq=freq, maxrate=maxrate, logger=logger)
+    state = app.events.State()
+    cam = instantiate(camera, state, app=app,
+                      freq=freq, maxrate=maxrate, logger=logger,
+                      timer=timer)
     cam.install()
-    conn = establish_connection()
-    recv = EventReceiver(conn, handlers={"*": state.event})
+    conn = app.broker_connection()
+    recv = app.events.Receiver(conn, handlers={"*": state.event})
     try:
         try:
             recv.capture(limit=None)

+ 68 - 49
celery/events/state.py

@@ -1,24 +1,22 @@
 import time
 import heapq
 
-from collections import deque
 from threading import Lock
 
-from carrot.utils import partition
+from kombu.utils import partition
 
 from celery import states
 from celery.datastructures import AttributeDict, LocalCache
 from celery.utils import kwdict
 
-HEARTBEAT_EXPIRE = 150                  # 2 minutes, 30 seconds
+#: Hartbeat expiry time in seconds.  The worker will be considered offline
+#: if no heartbeat is received within this time.
+#: Default is 2:30 minutes.
+HEARTBEAT_EXPIRE = 150
 
 
 class Element(AttributeDict):
-    """Base class for types."""
-    visited = False
-
-    def __init__(self, **fields):
-        dict.__init__(self, fields)
+    """Base class for worker state elements."""
 
 
 class Worker(Element):
@@ -30,19 +28,22 @@ class Worker(Element):
         self.heartbeats = []
 
     def on_online(self, timestamp=None, **kwargs):
+        """Callback for the `worker-online` event."""
         self._heartpush(timestamp)
 
     def on_offline(self, **kwargs):
+        """Callback for the `worker-offline` event."""
         self.heartbeats = []
 
     def on_heartbeat(self, timestamp=None, **kwargs):
+        """Callback for the `worker-heartbeat` event."""
         self._heartpush(timestamp)
 
     def _heartpush(self, timestamp):
         if timestamp:
             heapq.heappush(self.heartbeats, timestamp)
             if len(self.heartbeats) > self.heartbeat_max:
-                self.heartbeats = self.heartbeats[:self.heartbeat_max]
+                self.heartbeats = self.heartbeats[self.heartbeat_max:]
 
     def __repr__(self):
         return "<Worker: %s (%s)" % (self.hostname,
@@ -56,49 +57,47 @@ class Worker(Element):
 
 class Task(Element):
     """Task State."""
-    _info_fields = ("args", "kwargs", "retries",
-                    "result", "eta", "runtime", "expires",
-                    "exception")
-
-    _merge_rules = {states.RECEIVED: ("name", "args", "kwargs",
-                                      "retries", "eta", "expires")}
-
-    _defaults = dict(uuid=None,
-                     name=None,
-                     state=states.PENDING,
-                     received=False,
-                     started=False,
-                     succeeded=False,
-                     failed=False,
-                     retried=False,
-                     revoked=False,
-                     args=None,
-                     kwargs=None,
-                     eta=None,
-                     expires=None,
-                     retries=None,
-                     worker=None,
-                     result=None,
-                     exception=None,
-                     timestamp=None,
-                     runtime=None,
+
+    #: How to merge out of order events.
+    #: Disorder is detected by logical ordering (e.g. task-received must have
+    #: happened before a task-failed event).
+    #:
+    #: A merge rule consists of a state and a list of fields to keep from
+    #: that state. ``(RECEIVED, ("name", "args")``, means the name and args
+    #: fields are always taken from the RECEIVED state, and any values for
+    #: these fields received before or after is simply ignored.
+    merge_rules = {states.RECEIVED: ("name", "args", "kwargs",
+                                     "retries", "eta", "expires")}
+
+    #: meth:`info` displays these fields by default.
+    _info_fields = ("args", "kwargs", "retries", "result",
+                    "eta", "runtime", "expires", "exception")
+
+    #: Default values.
+    _defaults = dict(uuid=None, name=None, state=states.PENDING,
+                     received=False, sent=False, started=False,
+                     succeeded=False, failed=False, retried=False,
+                     revoked=False, args=None, kwargs=None, eta=None,
+                     expires=None, retries=None, worker=None, result=None,
+                     exception=None, timestamp=None, runtime=None,
                      traceback=None)
 
     def __init__(self, **fields):
         super(Task, self).__init__(**dict(self._defaults, **fields))
 
-    def info(self, fields=None, extra=[]):
-        if fields is None:
-            fields = self._info_fields
-        fields = list(fields) + list(extra)
-        return dict((key, getattr(self, key, None))
-                        for key in fields
-                            if getattr(self, key, None) is not None)
-
     def update(self, state, timestamp, fields):
+        """Update state from new event.
+
+        :param state: State from event.
+        :param timestamp: Timestamp from event.
+        :param fields: Event data.
+
+        """
         if self.worker:
             self.worker.on_heartbeat(timestamp=timestamp)
-        if states.state(state) < states.state(self.state):
+        if state != states.RETRY and self.state != states.RETRY and \
+                states.state(state) < states.state(self.state):
+            # this state logically happens-before the current state, so merge.
             self.merge(state, timestamp, fields)
         else:
             self.state = state
@@ -106,35 +105,55 @@ class Task(Element):
             super(Task, self).update(fields)
 
     def merge(self, state, timestamp, fields):
-        keep = self._merge_rules.get(state)
+        """Merge with out of order event."""
+        keep = self.merge_rules.get(state)
         if keep is not None:
-            fields = dict((key, fields[key]) for key in keep)
+            fields = dict((key, fields.get(key)) for key in keep)
             super(Task, self).update(fields)
 
+    def on_sent(self, timestamp=None, **fields):
+        """Callback for the ``task-sent`` event."""
+        self.sent = timestamp
+        self.update(states.PENDING, timestamp, fields)
+
     def on_received(self, timestamp=None, **fields):
+        """Callback for the ``task-received`` event."""
         self.received = timestamp
         self.update(states.RECEIVED, timestamp, fields)
 
     def on_started(self, timestamp=None, **fields):
+        """Callback for the ``task-started`` event."""
         self.started = timestamp
         self.update(states.STARTED, timestamp, fields)
 
     def on_failed(self, timestamp=None, **fields):
+        """Callback for the ``task-failed`` event."""
         self.failed = timestamp
         self.update(states.FAILURE, timestamp, fields)
 
     def on_retried(self, timestamp=None, **fields):
+        """Callback for the ``task-retried`` event."""
         self.retried = timestamp
         self.update(states.RETRY, timestamp, fields)
 
     def on_succeeded(self, timestamp=None, **fields):
+        """Callback for the ``task-succeeded`` event."""
         self.succeeded = timestamp
         self.update(states.SUCCESS, timestamp, fields)
 
     def on_revoked(self, timestamp=None, **fields):
+        """Callback for the ``task-revoked`` event."""
         self.revoked = timestamp
         self.update(states.REVOKED, timestamp, fields)
 
+    def info(self, fields=None, extra=[]):
+        """Information about this task suitable for on-screen display."""
+        if fields is None:
+            fields = self._info_fields
+        return dict((key, getattr(self, key, None))
+                        for key in list(fields) + list(extra)
+                            if getattr(self, key, None) is not None)
+
     def __repr__(self):
         return "<Task: %s(%s) %s>" % (self.name, self.uuid, self.state)
 
@@ -253,7 +272,7 @@ class State(object):
     def tasks_by_timestamp(self, limit=None):
         """Get tasks by timestamp.
 
-        Returns a list of ``(uuid, task)`` tuples.
+        Returns a list of `(uuid, task)` tuples.
 
         """
         return self._sort_tasks_by_time(self.tasks.items()[:limit])
@@ -266,7 +285,7 @@ class State(object):
     def tasks_by_type(self, name, limit=None):
         """Get all tasks by type.
 
-        Returns a list of ``(uuid, task)`` tuples.
+        Returns a list of `(uuid, task)` tuples.
 
         """
         return self._sort_tasks_by_time([(uuid, task)
@@ -276,7 +295,7 @@ class State(object):
     def tasks_by_worker(self, hostname, limit=None):
         """Get all tasks by worker.
 
-        Returns a list of ``(uuid, task)`` tuples.
+        Returns a list of `(uuid, task)` tuples.
 
         """
         return self._sort_tasks_by_time([(uuid, task)

+ 7 - 20
celery/exceptions.py

@@ -1,61 +1,50 @@
+UNREGISTERED_FMT = """\
+Task of kind %s is not registered, please make sure it's imported.\
 """
 
-Common Exceptions
 
-"""
-
-UNREGISTERED_FMT = """
-Task of kind %s is not registered, please make sure it's imported.
-""".strip()
+class SystemTerminate(SystemExit):
+    """Signals that the worker should terminate."""
 
 
 class QueueNotFound(KeyError):
     """Task routed to a queue not in CELERY_QUEUES."""
-    pass
 
 
 class TimeLimitExceeded(Exception):
     """The time limit has been exceeded and the job has been terminated."""
-    pass
 
 
 class SoftTimeLimitExceeded(Exception):
     """The soft time limit has been exceeded. This exception is raised
     to give the task a chance to clean up."""
-    pass
 
 
 class WorkerLostError(Exception):
     """The worker processing a job has exited prematurely."""
-    pass
 
 
 class ImproperlyConfigured(Exception):
     """Celery is somehow improperly configured."""
-    pass
 
 
 class NotRegistered(KeyError):
     """The task is not registered."""
 
-    def __init__(self, message, *args, **kwargs):
-        message = UNREGISTERED_FMT % str(message)
-        KeyError.__init__(self, message, *args, **kwargs)
+    def __repr__(self):
+        return UNREGISTERED_FMT % str(self)
 
 
 class AlreadyRegistered(Exception):
     """The task is already registered."""
-    pass
 
 
 class TimeoutError(Exception):
     """The operation timed out."""
-    pass
 
 
 class MaxRetriesExceededError(Exception):
     """The tasks max restart limit has been exceeded."""
-    pass
 
 
 class RetryTaskError(Exception):
@@ -63,13 +52,11 @@ class RetryTaskError(Exception):
 
     def __init__(self, message, exc, *args, **kwargs):
         self.exc = exc
-        Exception.__init__(self, message, exc, *args,
-                           **kwargs)
+        Exception.__init__(self, message, exc, *args, **kwargs)
 
 
 class TaskRevokedError(Exception):
     """The task has been revoked, so no result available."""
-    pass
 
 
 class NotConfigured(UserWarning):

+ 16 - 195
celery/execute/__init__.py

@@ -1,202 +1,23 @@
-from celery import conf
-from celery.datastructures import ExceptionInfo
-from celery.execute.trace import TaskTrace
-from celery.messaging import with_connection
-from celery.messaging import TaskPublisher
-from celery.registry import tasks
-from celery.result import AsyncResult, EagerResult
-from celery.routes import Router
-from celery.utils import gen_unique_id, fun_takes_kwargs, mattrgetter
+from celery import current_app
+from celery.utils import deprecated
 
-extract_exec_options = mattrgetter("queue", "routing_key", "exchange",
-                                   "immediate", "mandatory",
-                                   "priority", "serializer",
-                                   "delivery_mode")
+send_task = current_app.send_task
 
 
-@with_connection
-def apply_async(task, args=None, kwargs=None, countdown=None, eta=None,
-        task_id=None, publisher=None, connection=None, connect_timeout=None,
-        router=None, expires=None, **options):
-    """Run a task asynchronously by the celery daemon(s).
+@deprecated(removal="2.3", alternative="Use task.apply_async() instead.")
+def apply_async(task, *args, **kwargs):
+    """*[Deprecated]* Use `task.apply_async()`"""
+    return task.apply_async(*args, **kwargs)
 
-    :param task: The :class:`~celery.task.base.Task` to run.
 
-    :keyword args: The positional arguments to pass on to the
-      task (a :class:`list` or :class:`tuple`).
+@deprecated(removal="2.3", alternative="Use task.apply() instead.")
+def apply(task, *args, **kwargs):
+    """*[Deprecated]* Use `task.apply()`"""
+    return task.apply(*args, **kwargs)
 
-    :keyword kwargs: The keyword arguments to pass on to the
-      task (a :class:`dict`)
 
-    :keyword countdown: Number of seconds into the future that the task should
-      execute. Defaults to immediate delivery (Do not confuse that with
-      the ``immediate`` setting, they are unrelated).
-
-    :keyword eta: A :class:`~datetime.datetime` object that describes the
-      absolute time and date of when the task should execute. May not be
-      specified if ``countdown`` is also supplied. (Do not confuse this
-      with the ``immediate`` setting, they are unrelated).
-
-    :keyword expires: Either a :class:`int`, describing the number of seconds,
-      or a :class:`~datetime.datetime` object that describes the absolute time
-      and date of when the task should expire.
-      The task will not be executed after the expiration time.
-
-    :keyword connection: Re-use existing broker connection instead
-      of establishing a new one. The ``connect_timeout`` argument is
-      not respected if this is set.
-
-    :keyword connect_timeout: The timeout in seconds, before we give up
-      on establishing a connection to the AMQP server.
-
-    :keyword routing_key: The routing key used to route the task to a worker
-      server. Defaults to the tasks :attr:`~celery.task.base.Task.exchange`
-      attribute.
-
-    :keyword exchange: The named exchange to send the task to. Defaults to
-      the tasks :attr:`~celery.task.base.Task.exchange` attribute.
-
-    :keyword exchange_type: The exchange type to initalize the exchange as
-      if not already declared. Defaults to the tasks
-      :attr:`~celery.task.base.Task.exchange_type` attribute.
-
-    :keyword immediate: Request immediate delivery. Will raise an exception
-      if the task cannot be routed to a worker immediately.
-      (Do not confuse this parameter with the ``countdown`` and ``eta``
-      settings, as they are unrelated). Defaults to the tasks
-      :attr:`~celery.task.base.Task.immediate` attribute.
-
-    :keyword mandatory: Mandatory routing. Raises an exception if there's
-      no running workers able to take on this task. Defaults to the tasks
-      :attr:`~celery.task.base.Task.mandatory` attribute.
-
-    :keyword priority: The task priority, a number between ``0`` and ``9``.
-      Defaults to the tasks :attr:`~celery.task.base.Task.priority` attribute.
-
-    :keyword serializer: A string identifying the default serialization
-      method to use. Defaults to the :setting:`CELERY_TASK_SERIALIZER` setting.
-      Can be ``pickle`` ``json``, ``yaml``, or any custom serialization
-      methods that have been registered with
-      :mod:`carrot.serialization.registry`. Defaults to the tasks
-      :attr:`~celery.task.base.Task.serializer` attribute.
-
-    **Note**: If the :setting:`CELERY_ALWAYS_EAGER` setting is set, it will be
-    replaced by a local :func:`apply` call instead.
-
-    """
-    router = router or Router(conf.ROUTES, conf.get_queues(),
-                              conf.CREATE_MISSING_QUEUES)
-
-    if conf.ALWAYS_EAGER:
-        return apply(task, args, kwargs, task_id=task_id)
-
-    task = tasks[task.name]                 # get instance from registry
-
-    options = dict(extract_exec_options(task), **options)
-    options = router.route(options, task.name, args, kwargs)
-    exchange = options.get("exchange")
-    exchange_type = options.get("exchange_type")
-    expires = expires or task.expires
-
-    publish = publisher or task.get_publisher(connection, exchange=exchange,
-                                              exchange_type=exchange_type)
-    try:
-        task_id = publish.delay_task(task.name, args, kwargs, task_id=task_id,
-                                     countdown=countdown, eta=eta,
-                                     expires=expires, **options)
-    finally:
-        publisher or publish.close()
-
-    return task.AsyncResult(task_id)
-
-
-@with_connection
-def send_task(name, args=None, kwargs=None, countdown=None, eta=None,
-        task_id=None, publisher=None, connection=None, connect_timeout=None,
-        result_cls=AsyncResult, expires=None, **options):
-    """Send task by name.
-
-    Useful if you don't have access to the :class:`~celery.task.base.Task`
-    class.
-
-    :param name: Name of task to execute.
-
-    Supports the same arguments as :func:`apply_async`.
-
-    """
-    exchange = options.get("exchange")
-    exchange_type = options.get("exchange_type")
-
-    publish = publisher or TaskPublisher(connection, exchange=exchange,
-                                         exchange_type=exchange_type)
-    try:
-        task_id = publish.delay_task(name, args, kwargs, task_id=task_id,
-                                     countdown=countdown, eta=eta,
-                                     expires=expires, **options)
-    finally:
-        publisher or publish.close()
-
-    return result_cls(task_id)
-
-
-def delay_task(task_name, *args, **kwargs):
-    """Delay a task for execution by the ``celery`` daemon.
-
-    :param task_name: the name of a task registered in the task registry.
-    :param \*args: positional arguments to pass on to the task.
-    :param \*\*kwargs: keyword arguments to pass on to the task.
-
-    :raises celery.exceptions.NotRegistered: exception if no such task
-        has been registered in the task registry.
-
-    :returns :class:`celery.result.AsyncResult`:
-
-    Example
-
-        >>> r = delay_task("update_record", name="George Costanza", age=32)
-        >>> r.ready()
-        True
-        >>> r.result
-        "Record was updated"
-
-    """
-    return apply_async(tasks[task_name], args, kwargs)
-
-
-def apply(task, args, kwargs, **options):
-    """Apply the task locally.
-
-    :keyword throw: Re-raise task exceptions. Defaults to
-        the :setting:`CELERY_EAGER_PROPAGATES_EXCEPTIONS` setting.
-
-    This will block until the task completes, and returns a
-    :class:`celery.result.EagerResult` instance.
-
-    """
-    args = args or []
-    kwargs = kwargs or {}
-    task_id = options.get("task_id") or gen_unique_id()
-    retries = options.get("retries", 0)
-    throw = options.pop("throw", conf.EAGER_PROPAGATES_EXCEPTIONS)
-
-    task = tasks[task.name]     # make sure we get the instance, not class.
-
-    default_kwargs = {"task_name": task.name,
-                      "task_id": task_id,
-                      "task_retries": retries,
-                      "task_is_eager": True,
-                      "logfile": options.get("logfile"),
-                      "delivery_info": {"is_eager": True},
-                      "loglevel": options.get("loglevel", 0)}
-    supported_keys = fun_takes_kwargs(task.run, default_kwargs)
-    extend_with = dict((key, val) for key, val in default_kwargs.items()
-                            if key in supported_keys)
-    kwargs.update(extend_with)
-
-    trace = TaskTrace(task.name, task_id, args, kwargs, task=task)
-    retval = trace.execute()
-    if isinstance(retval, ExceptionInfo):
-        if throw:
-            raise retval.exception
-        retval = retval.exception
-    return EagerResult(task_id, retval, trace.status, traceback=trace.strtb)
+@deprecated(removal="2.3",
+            alternative="Use registry.tasks[name].delay instead.")
+def delay_task(task, *args, **kwargs):
+    from celery.registry import tasks
+    return tasks[task].apply_async(args, kwargs)

+ 29 - 8
celery/execute/trace.py

@@ -9,6 +9,7 @@ from celery.datastructures import ExceptionInfo
 
 
 class TraceInfo(object):
+
     def __init__(self, status=states.PENDING, retval=None, exc_info=None):
         self.status = status
         self.retval = retval
@@ -22,9 +23,13 @@ class TraceInfo(object):
             self.strtb = "\n".join(traceback.format_exception(*exc_info))
 
     @classmethod
-    def trace(cls, fun, args, kwargs):
+    def trace(cls, fun, args, kwargs, propagate=False):
         """Trace the execution of a function, calling the appropiate callback
-        if the function raises retry, an failure or returned successfully."""
+        if the function raises retry, an failure or returned successfully.
+
+        :keyword propagate: If true, errors will propagate to the caller.
+
+        """
         try:
             return cls(states.SUCCESS, retval=fun(*args, **kwargs))
         except (SystemExit, KeyboardInterrupt):
@@ -32,22 +37,29 @@ class TraceInfo(object):
         except RetryTaskError, exc:
             return cls(states.RETRY, retval=exc, exc_info=sys.exc_info())
         except Exception, exc:
+            if propagate:
+                raise
             return cls(states.FAILURE, retval=exc, exc_info=sys.exc_info())
-        except:
+        except:  # pragma: no cover
             # For Python2.4 where raising strings are still allowed.
+            if propagate:
+                raise
             return cls(states.FAILURE, retval=None, exc_info=sys.exc_info())
 
 
 class TaskTrace(object):
 
-    def __init__(self, task_name, task_id, args, kwargs, task=None, **_):
+    def __init__(self, task_name, task_id, args, kwargs, task=None,
+            request=None, propagate=None, **_):
         self.task_id = task_id
         self.task_name = task_name
         self.args = args
         self.kwargs = kwargs
         self.task = task or tasks[self.task_name]
+        self.request = request or {}
         self.status = states.PENDING
         self.strtb = None
+        self.propagate = propagate
         self._trace_handlers = {states.FAILURE: self.handle_failure,
                                 states.RETRY: self.handle_retry,
                                 states.SUCCESS: self.handle_success}
@@ -56,6 +68,8 @@ class TaskTrace(object):
         return self.execute()
 
     def execute(self):
+        self.task.request.update(self.request, args=self.args,
+                                               kwargs=self.kwargs)
         signals.task_prerun.send(sender=self.task, task_id=self.task_id,
                                  task=self.task, args=self.args,
                                  kwargs=self.kwargs)
@@ -64,16 +78,19 @@ class TaskTrace(object):
         signals.task_postrun.send(sender=self.task, task_id=self.task_id,
                                   task=self.task, args=self.args,
                                   kwargs=self.kwargs, retval=retval)
+        self.task.request.clear()
         return retval
 
     def _trace(self):
-        trace = TraceInfo.trace(self.task, self.args, self.kwargs)
+        trace = TraceInfo.trace(self.task, self.args, self.kwargs,
+                                propagate=self.propagate)
         self.status = trace.status
         self.strtb = trace.strtb
+        handler = self._trace_handlers[trace.status]
+        r = handler(trace.retval, trace.exc_type, trace.tb, trace.strtb)
         self.handle_after_return(trace.status, trace.retval,
                                  trace.exc_type, trace.tb, trace.strtb)
-        handler = self._trace_handlers[trace.status]
-        return handler(trace.retval, trace.exc_type, trace.tb, trace.strtb)
+        return r
 
     def handle_after_return(self, status, retval, type_, tb, strtb):
         einfo = None
@@ -92,7 +109,7 @@ class TaskTrace(object):
 
         # Create a simpler version of the RetryTaskError that stringifies
         # the original exception instead of including the exception instance.
-        # This is for reporting the retry in logs, e-mail etc, while
+        # This is for reporting the retry in logs, email etc, while
         # guaranteeing pickleability.
         message, orig_exc = exc.args
         expanded_msg = "%s: %s" % (message, str(orig_exc))
@@ -108,4 +125,8 @@ class TaskTrace(object):
         einfo = ExceptionInfo((type_, exc, tb))
         self.task.on_failure(exc, self.task_id,
                              self.args, self.kwargs, einfo=einfo)
+        signals.task_failure.send(sender=self.task, task_id=self.task_id,
+                                  exception=exc, args=self.args,
+                                  kwargs=self.kwargs, traceback=tb,
+                                  einfo=einfo)
         return einfo

+ 2 - 1
celery/loaders/__init__.py

@@ -2,7 +2,8 @@ import os
 
 from celery.utils import get_cls_by_name
 
-LOADER_ALIASES = {"default": "celery.loaders.default.Loader",
+LOADER_ALIASES = {"app": "celery.loaders.app.AppLoader",
+                  "default": "celery.loaders.default.Loader",
                   "django": "djcelery.loaders.DjangoLoader"}
 _loader = None
 _settings = None

+ 10 - 0
celery/loaders/app.py

@@ -0,0 +1,10 @@
+from celery.loaders.base import BaseLoader
+
+
+class AppLoader(BaseLoader):
+
+    def on_worker_init(self):
+        self.import_default_modules()
+
+    def read_configuration(self):
+        return {}

+ 119 - 27
celery/loaders/base.py

@@ -1,10 +1,24 @@
+import importlib
 import os
-import sys
+import re
+import warnings
 
-from importlib import import_module
+from anyjson import deserialize
+from kombu.utils import cached_property
+
+from celery.datastructures import DictAttribute
+from celery.exceptions import ImproperlyConfigured
+from celery.utils import get_cls_by_name
+from celery.utils import import_from_cwd as _import_from_cwd
 
 BUILTIN_MODULES = ["celery.task"]
 
+ERROR_ENVVAR_NOT_SET = (
+"""The environment variable %r is not set,
+and as such the configuration could not be loaded.
+Please set this variable and make it point to
+a configuration module.""")
+
 
 class BaseLoader(object):
     """The base class for loaders.
@@ -22,10 +36,14 @@ class BaseLoader(object):
         * What modules are imported to find tasks?
 
     """
-    _conf_cache = None
     worker_initialized = False
     override_backends = {}
     configured = False
+    _conf = None
+
+    def __init__(self, app=None, **kwargs):
+        from celery.app import app_or_default
+        self.app = app_or_default(app)
 
     def on_task_init(self, task_id, task):
         """This method is called before a task is executed."""
@@ -36,17 +54,23 @@ class BaseLoader(object):
         pass
 
     def on_worker_init(self):
-        """This method is called when the worker (``celeryd``) starts."""
+        """This method is called when the worker (:program:`celeryd`)
+        starts."""
         pass
 
     def import_task_module(self, module):
         return self.import_from_cwd(module)
 
     def import_module(self, module):
-        return import_module(module)
+        return importlib.import_module(module)
+
+    def import_from_cwd(self, module, imp=None):
+        if imp is None:
+            imp = self.import_module
+        return _import_from_cwd(module, imp)
 
     def import_default_modules(self):
-        imports = getattr(self.conf, "CELERY_IMPORTS", None) or []
+        imports = self.conf.get("CELERY_IMPORTS") or []
         imports = set(list(imports) + BUILTIN_MODULES)
         return map(self.import_task_module, imports)
 
@@ -55,30 +79,98 @@ class BaseLoader(object):
             self.worker_initialized = True
             self.on_worker_init()
 
-    def import_from_cwd(self, module, imp=None):
-        """Import module, but make sure it finds modules
-        located in the current directory.
+    def config_from_envvar(self, variable_name, silent=False):
+        module_name = os.environ.get(variable_name)
+        if not module_name:
+            if silent:
+                return False
+            raise ImproperlyConfigured(ERROR_ENVVAR_NOT_SET % (module_name, ))
+        return self.config_from_object(module_name, silent=silent)
 
-        Modules located in the current directory has
-        precedence over modules located in ``sys.path``.
-        """
-        if imp is None:
-            imp = self.import_module
-        cwd = os.getcwd()
-        if cwd in sys.path:
-            return imp(module)
-        sys.path.insert(0, cwd)
-        try:
-            return imp(module)
-        finally:
+    def config_from_object(self, obj, silent=False):
+        if isinstance(obj, basestring):
             try:
-                sys.path.remove(cwd)
-            except ValueError:          # pragma: no cover
-                pass
+                obj = get_cls_by_name(obj, imp=self.import_from_cwd)
+            except (ImportError, AttributeError):
+                if silent:
+                    return False
+                raise
+        if not hasattr(obj, "__getitem__"):
+            obj = DictAttribute(obj)
+        self._conf = obj
+        return True
+
+    def cmdline_config_parser(self, args, namespace="celery",
+                re_type=re.compile(r"\((\w+)\)"),
+                extra_types={"json": deserialize},
+                override_types={"tuple": "json",
+                                "list": "json",
+                                "dict": "json"}):
+        from celery.app.defaults import Option, NAMESPACES
+        namespace = namespace.upper()
+        typemap = dict(Option.typemap, **extra_types)
+
+        def getarg(arg):
+            """Parse a single configuration definition from
+            the command line."""
+
+            ## find key/value
+            # ns.key=value|ns_key=value (case insensitive)
+            key, value = arg.split('=', 1)
+            key = key.upper().replace(".", "_")
+
+            ## find namespace.
+            # .key=value|_key=value expands to default namespace.
+            if key[0] == '_':
+                ns, key = namespace, key[1:]
+            else:
+                # find namespace part of key
+                ns, key = key.split('_', 1)
+
+            ns_key = (ns and ns + "_" or "") + key
+
+            # (type)value makes cast to custom type.
+            cast = re_type.match(value)
+            if cast:
+                type_ = cast.groups()[0]
+                type_ = override_types.get(type_, type_)
+                value = value[len(cast.group()):]
+                value = typemap[type_](value)
+            else:
+                try:
+                    value = NAMESPACES[ns][key].to_python(value)
+                except ValueError, exc:
+                    # display key name in error message.
+                    raise ValueError("%r: %s" % (ns_key, exc))
+            return ns_key, value
+
+        return dict(map(getarg, args))
+
+    def mail_admins(self, subject, body, fail_silently=False,
+            sender=None, to=None, host=None, port=None,
+            user=None, password=None, timeout=None):
+        try:
+            message = self.mail.Message(sender=sender, to=to,
+                                        subject=subject, body=body)
+            mailer = self.mail.Mailer(host=host, port=port,
+                                      user=user, password=password,
+                                      timeout=timeout)
+            mailer.send(message)
+        except Exception, exc:
+            if not fail_silently:
+                raise
+            warnings.warn(self.mail.SendmailWarning(
+                "Mail could not be sent: %r %r" % (
+                    exc, {"To": to, "Subject": subject})))
 
     @property
     def conf(self):
         """Loader configuration."""
-        if not self._conf_cache:
-            self._conf_cache = self.read_configuration()
-        return self._conf_cache
+        if self._conf is None:
+            self._conf = self.read_configuration()
+        return self._conf
+
+    @cached_property
+    def mail(self):
+        from celery.utils import mail
+        return mail

+ 6 - 5
celery/loaders/default.py

@@ -2,6 +2,7 @@ import os
 import warnings
 from importlib import import_module
 
+from celery.datastructures import AttributeDict
 from celery.loaders.base import BaseLoader
 from celery.datastructures import AttributeDict
 from celery.exceptions import NotConfigured
@@ -21,7 +22,7 @@ DEFAULT_UNCONFIGURED_SETTINGS = {
 
 
 def wanted_module_item(item):
-    return not item.startswith("_")
+    return item[0].isupper() and not item.startswith("_")
 
 
 class Loader(BaseLoader):
@@ -41,16 +42,16 @@ class Loader(BaseLoader):
         return settings
 
     def read_configuration(self):
-        """Read configuration from ``celeryconfig.py`` and configure
+        """Read configuration from :file:`celeryconfig.py` and configure
         celery and Django so it can be used by regular Python."""
         configname = os.environ.get("CELERY_CONFIG_MODULE",
                                     DEFAULT_CONFIG_MODULE)
         try:
             celeryconfig = self.import_from_cwd(configname)
         except ImportError:
-            warnings.warn("No celeryconfig.py module found! Please make "
-                          "sure it exists and is available to Python.",
-                          NotConfigured)
+            warnings.warn(NotConfigured(
+                "No %r module found! Please make sure it exists and "
+                "is available to Python." % (configname, )))
             return self.setup_settings(DEFAULT_UNCONFIGURED_SETTINGS)
         else:
             usercfg = dict((key, getattr(celeryconfig, key))

+ 114 - 0
celery/local.py

@@ -0,0 +1,114 @@
+class LocalProxy(object):
+    """Code stolen from werkzeug.local.LocalProxy."""
+    __slots__ = ('__local', '__dict__', '__name__')
+
+    def __init__(self, local, name=None):
+        object.__setattr__(self, '_LocalProxy__local', local)
+        object.__setattr__(self, '__name__', name)
+
+    def _get_current_object(self):
+        """Return the current object.  This is useful if you want the real
+        object behind the proxy at a time for performance reasons or because
+        you want to pass the object into a different context.
+        """
+        if not hasattr(self.__local, '__release_local__'):
+            return self.__local()
+        try:
+            return getattr(self.__local, self.__name__)
+        except AttributeError:
+            raise RuntimeError('no object bound to %s' % self.__name__)
+
+    @property
+    def __dict__(self):
+        try:
+            return self._get_current_object().__dict__
+        except RuntimeError:
+            raise AttributeError('__dict__')
+
+    def __repr__(self):
+        try:
+            obj = self._get_current_object()
+        except RuntimeError:
+            return '<%s unbound>' % self.__class__.__name__
+        return repr(obj)
+
+    def __nonzero__(self):
+        try:
+            return bool(self._get_current_object())
+        except RuntimeError:
+            return False
+
+    def __unicode__(self):
+        try:
+            return unicode(self._get_current_object())
+        except RuntimeError:
+            return repr(self)
+
+    def __dir__(self):
+        try:
+            return dir(self._get_current_object())
+        except RuntimeError:
+            return []
+
+    def __getattr__(self, name):
+        if name == '__members__':
+            return dir(self._get_current_object())
+        return getattr(self._get_current_object(), name)
+
+    def __setitem__(self, key, value):
+        self._get_current_object()[key] = value
+
+    def __delitem__(self, key):
+        del self._get_current_object()[key]
+
+    def __setslice__(self, i, j, seq):
+        self._get_current_object()[i:j] = seq
+
+    def __delslice__(self, i, j):
+        del self._get_current_object()[i:j]
+
+    __setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
+    __delattr__ = lambda x, n: delattr(x._get_current_object(), n)
+    __str__ = lambda x: str(x._get_current_object())
+    __lt__ = lambda x, o: x._get_current_object() < o
+    __le__ = lambda x, o: x._get_current_object() <= o
+    __eq__ = lambda x, o: x._get_current_object() == o
+    __ne__ = lambda x, o: x._get_current_object() != o
+    __gt__ = lambda x, o: x._get_current_object() > o
+    __ge__ = lambda x, o: x._get_current_object() >= o
+    __cmp__ = lambda x, o: cmp(x._get_current_object(), o)
+    __hash__ = lambda x: hash(x._get_current_object())
+    __call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
+    __len__ = lambda x: len(x._get_current_object())
+    __getitem__ = lambda x, i: x._get_current_object()[i]
+    __iter__ = lambda x: iter(x._get_current_object())
+    __contains__ = lambda x, i: i in x._get_current_object()
+    __getslice__ = lambda x, i, j: x._get_current_object()[i:j]
+    __add__ = lambda x, o: x._get_current_object() + o
+    __sub__ = lambda x, o: x._get_current_object() - o
+    __mul__ = lambda x, o: x._get_current_object() * o
+    __floordiv__ = lambda x, o: x._get_current_object() // o
+    __mod__ = lambda x, o: x._get_current_object() % o
+    __divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
+    __pow__ = lambda x, o: x._get_current_object() ** o
+    __lshift__ = lambda x, o: x._get_current_object() << o
+    __rshift__ = lambda x, o: x._get_current_object() >> o
+    __and__ = lambda x, o: x._get_current_object() & o
+    __xor__ = lambda x, o: x._get_current_object() ^ o
+    __or__ = lambda x, o: x._get_current_object() | o
+    __div__ = lambda x, o: x._get_current_object().__div__(o)
+    __truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
+    __neg__ = lambda x: -(x._get_current_object())
+    __pos__ = lambda x: +(x._get_current_object())
+    __abs__ = lambda x: abs(x._get_current_object())
+    __invert__ = lambda x: ~(x._get_current_object())
+    __complex__ = lambda x: complex(x._get_current_object())
+    __int__ = lambda x: int(x._get_current_object())
+    __long__ = lambda x: long(x._get_current_object())
+    __float__ = lambda x: float(x._get_current_object())
+    __oct__ = lambda x: oct(x._get_current_object())
+    __hex__ = lambda x: hex(x._get_current_object())
+    __index__ = lambda x: x._get_current_object().__index__()
+    __coerce__ = lambda x, o: x.__coerce__(x, o)
+    __enter__ = lambda x: x.__enter__()
+    __exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)

+ 173 - 129
celery/log.py

@@ -1,70 +1,111 @@
 """celery.log"""
 import logging
 import threading
-import time
-import os
 import sys
 import traceback
 
-from multiprocessing import current_process
-from multiprocessing import util as mputil
+try:
+    from multiprocessing import current_process
+    from multiprocessing import util as mputil
+except ImportError:
+    current_process = mputil = None
 
-from celery import conf
 from celery import signals
-from celery.utils import noop, LOG_LEVELS
+from celery import current_app
+from celery.utils import LOG_LEVELS, isatty
 from celery.utils.compat import LoggerAdapter
+from celery.utils.compat import WatchedFileHandler
+from celery.utils.encoding import safe_str
 from celery.utils.patch import ensure_process_aware_logger
 from celery.utils.term import colored
 
-# The logging subsystem is only configured once per process.
-# setup_logging_subsystem sets this flag, and subsequent calls
-# will do nothing.
-_setup = False
-
-COLORS = {"DEBUG": "blue",
-          "WARNING": "yellow",
-          "ERROR": "red",
-          "CRITICAL": "magenta"}
-
 
 class ColorFormatter(logging.Formatter):
+    #: Loglevel -> Color mapping.
+    COLORS = colored().names
+    colors = {"DEBUG": COLORS["blue"], "WARNING": COLORS["yellow"],
+              "ERROR": COLORS["red"], "CRITICAL": COLORS["magenta"]}
 
     def __init__(self, msg, use_color=True):
         logging.Formatter.__init__(self, msg)
         self.use_color = use_color
 
+    def formatException(self, ei):
+        r = logging.Formatter.formatException(self, ei)
+        if isinstance(r, str):
+            return r.decode("utf-8", "replace")    # Convert to unicode
+        return r
+
     def format(self, record):
         levelname = record.levelname
+        color = self.colors.get(levelname)
 
-        if self.use_color and levelname in COLORS:
-            record.msg = str(colored().names[COLORS[levelname]](record.msg))
+        if self.use_color and color:
+            try:
+                record.msg = color(safe_str(record.msg))
+            except Exception:
+                record.msg = "<Unrepresentable %r: %r>" % (
+                        type(record.msg), traceback.format_stack())
 
         # Very ugly, but have to make sure processName is supported
         # by foreign logger instances.
         # (processName is always supported by Python 2.7)
         if "processName" not in record.__dict__:
-            record.__dict__["processName"] = current_process()._name
-        return logging.Formatter.format(self, record)
-
-
-def get_task_logger(loglevel=None, name=None):
-    logger = logging.getLogger(name or "celery.task.default")
-    if loglevel is not None:
-        logger.setLevel(loglevel)
-    return logger
-
-
-def setup_logging_subsystem(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
-        format=conf.CELERYD_LOG_FORMAT, colorize=conf.CELERYD_LOG_COLOR,
-        **kwargs):
-    global _setup
-    if not _setup:
-        try:
-            mputil._logger = None
-        except AttributeError:
-            pass
+            process_name = current_process and current_process()._name or ""
+            record.__dict__["processName"] = process_name
+        t = logging.Formatter.format(self, record)
+        if isinstance(t, unicode):
+            return t.encode("utf-8", "replace")
+        return t
+
+
+class Logging(object):
+    #: The logging subsystem is only configured once per process.
+    #: setup_logging_subsystem sets this flag, and subsequent calls
+    #: will do nothing.
+    _setup = False
+
+    def __init__(self, app):
+        self.app = app
+        self.loglevel = self.app.conf.CELERYD_LOG_LEVEL
+        self.format = self.app.conf.CELERYD_LOG_FORMAT
+        self.task_format = self.app.conf.CELERYD_TASK_LOG_FORMAT
+        self.colorize = self.app.conf.CELERYD_LOG_COLOR
+
+    def supports_color(self, logfile=None):
+        if self.app.IS_WINDOWS:
+            # Windows does not support ANSI color codes.
+            return False
+        if self.colorize is None:
+            # Only use color if there is no active log file
+            # and stderr is an actual terminal.
+            return logfile is None and isatty(sys.stderr)
+        return self.colorize
+
+    def colored(self, logfile=None):
+        return colored(enabled=self.supports_color(logfile))
+
+    def get_task_logger(self, loglevel=None, name=None):
+        logger = logging.getLogger(name or "celery.task.default")
+        if loglevel is not None:
+            logger.setLevel(loglevel)
+        return logger
+
+    def setup_logging_subsystem(self, loglevel=None, logfile=None,
+            format=None, colorize=None, **kwargs):
+        if Logging._setup:
+            return
+        loglevel = loglevel or self.loglevel
+        format = format or self.format
+        if colorize is None:
+            colorize = self.supports_color(logfile)
+
+        if mputil:
+            try:
+                mputil._logger = None
+            except AttributeError:
+                pass
         ensure_process_aware_logger()
-        logging.Logger.manager.loggerDict.clear()
         receivers = signals.setup_logging.send(sender=None,
                                                loglevel=loglevel,
                                                logfile=logfile,
@@ -72,112 +113,113 @@ def setup_logging_subsystem(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
                                                colorize=colorize)
         if not receivers:
             root = logging.getLogger()
-            mp = mputil.get_logger()
+
+            if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
+                root.handlers = []
+
+            mp = mputil and mputil.get_logger() or None
             for logger in (root, mp):
-                _setup_logger(logger, logfile, format, colorize, **kwargs)
-                logger.setLevel(loglevel)
-        _setup = True
+                if logger:
+                    self._setup_logger(logger, logfile, format,
+                                       colorize, **kwargs)
+                    logger.setLevel(loglevel)
+        Logging._setup = True
         return receivers
 
+    def _detect_handler(self, logfile=None):
+        """Create log handler with either a filename, an open stream
+        or :const:`None` (stderr)."""
+        if logfile is None:
+            logfile = sys.__stderr__
+        if hasattr(logfile, "write"):
+            return logging.StreamHandler(logfile)
+        return WatchedFileHandler(logfile)
 
-def _detect_handler(logfile=None):
-    """Create log handler with either a filename, an open stream
-    or ``None`` (stderr)."""
-    if not logfile or hasattr(logfile, "write"):
-        return logging.StreamHandler(logfile)
-    return logging.FileHandler(logfile)
+    def get_default_logger(self, loglevel=None, name="celery"):
+        """Get default logger instance.
 
+        :keyword loglevel: Initial log level.
 
-def get_default_logger(loglevel=None, name="celery"):
-    """Get default logger instance.
+        """
+        logger = logging.getLogger(name)
+        if loglevel is not None:
+            logger.setLevel(loglevel)
+        return logger
 
-    :keyword loglevel: Initial log level.
+    def setup_logger(self, loglevel=None, logfile=None,
+            format=None, colorize=None, name="celery", root=True,
+            app=None, **kwargs):
+        """Setup the :mod:`multiprocessing` logger.
 
-    """
-    logger = logging.getLogger(name)
-    if loglevel is not None:
-        logger.setLevel(loglevel)
-    return logger
+        If `logfile` is not specified, then `sys.stderr` is used.
 
+        Returns logger object.
 
-def setup_logger(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
-        format=conf.CELERYD_LOG_FORMAT, colorize=conf.CELERYD_LOG_COLOR,
-        name="celery", root=True, **kwargs):
-    """Setup the ``multiprocessing`` logger. If ``logfile`` is not specified,
-    then ``stderr`` is used.
+        """
+        loglevel = loglevel or self.loglevel
+        format = format or self.format
+        if colorize is None:
+            colorize = self.supports_color(logfile)
 
-    Returns logger object.
+        if not root or self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
+            return self._setup_logger(self.get_default_logger(loglevel, name),
+                                      logfile, format, colorize, **kwargs)
+        self.setup_logging_subsystem(loglevel, logfile,
+                                     format, colorize, **kwargs)
+        return self.get_default_logger(name=name)
 
-    """
-    if not root:
-        return _setup_logger(get_default_logger(loglevel, name),
-                             logfile, format, colorize, **kwargs)
-    setup_logging_subsystem(loglevel, logfile, format, colorize, **kwargs)
-    return get_default_logger(name=name)
+    def setup_task_logger(self, loglevel=None, logfile=None, format=None,
+            colorize=None, task_name=None, task_id=None, propagate=False,
+            app=None, **kwargs):
+        """Setup the task logger.
 
+        If `logfile` is not specified, then `sys.stderr` is used.
 
-def setup_task_logger(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
-        format=conf.CELERYD_TASK_LOG_FORMAT, colorize=conf.CELERYD_LOG_COLOR,
-        task_kwargs=None, **kwargs):
-    """Setup the task logger. If ``logfile`` is not specified, then
-    ``stderr`` is used.
+        Returns logger object.
 
-    Returns logger object.
+        """
+        loglevel = loglevel or self.loglevel
+        format = format or self.task_format
+        if colorize is None:
+            colorize = self.supports_color(logfile)
 
-    """
-    if task_kwargs is None:
-        task_kwargs = {}
-    task_kwargs.setdefault("task_id", "-?-")
-    task_name = task_kwargs.get("task_name")
-    task_kwargs.setdefault("task_name", "-?-")
-    logger = _setup_logger(get_task_logger(loglevel, task_name),
-                            logfile, format, colorize, **kwargs)
-    return LoggerAdapter(logger, task_kwargs)
+        logger = self._setup_logger(self.get_task_logger(loglevel, task_name),
+                                    logfile, format, colorize, **kwargs)
+        logger.propagate = int(propagate)    # this is an int for some reason.
+                                             # better to not question why.
+        return LoggerAdapter(logger, {"task_id": task_id,
+                                      "task_name": task_name})
 
+    def redirect_stdouts_to_logger(self, logger, loglevel=None):
+        """Redirect :class:`sys.stdout` and :class:`sys.stderr` to a
+        logging instance.
 
-def _setup_logger(logger, logfile, format, colorize,
-        formatter=ColorFormatter, **kwargs):
+        :param logger: The :class:`logging.Logger` instance to redirect to.
+        :param loglevel: The loglevel redirected messages will be logged as.
 
-    if logger.handlers:                 # already configured
+        """
+        proxy = LoggingProxy(logger, loglevel)
+        sys.stdout = sys.stderr = proxy
+        return proxy
+
+    def _setup_logger(self, logger, logfile, format, colorize,
+            formatter=ColorFormatter, **kwargs):
+
+        if logger.handlers:  # Logger already configured
+            return logger
+
+        handler = self._detect_handler(logfile)
+        handler.setFormatter(formatter(format, use_color=colorize))
+        logger.addHandler(handler)
         return logger
 
-    handler = _detect_handler(logfile)
-    handler.setFormatter(formatter(format, use_color=colorize))
-    logger.addHandler(handler)
-    return logger
-
-
-def emergency_error(logfile, message):
-    """Emergency error logging, for when there's no standard file
-    descriptors open because the process has been daemonized or for
-    some other reason."""
-    closefh = noop
-    logfile = logfile or sys.__stderr__
-    if hasattr(logfile, "write"):
-        logfh = logfile
-    else:
-        logfh = open(logfile, "a")
-        closefh = logfh.close
-    try:
-        logfh.write("[%(asctime)s: CRITICAL/%(pid)d]: %(message)s\n" % {
-                        "asctime": time.asctime(),
-                        "pid": os.getpid(),
-                        "message": message})
-    finally:
-        closefh()
-
-
-def redirect_stdouts_to_logger(logger, loglevel=None):
-    """Redirect :class:`sys.stdout` and :class:`sys.stderr` to a
-    logging instance.
-
-    :param logger: The :class:`logging.Logger` instance to redirect to.
-    :param loglevel: The loglevel redirected messages will be logged as.
 
-    """
-    proxy = LoggingProxy(logger, loglevel)
-    sys.stdout = sys.stderr = proxy
-    return proxy
+setup_logging_subsystem = current_app.log.setup_logging_subsystem
+get_default_logger = current_app.log.get_default_logger
+setup_logger = current_app.log.setup_logger
+setup_task_logger = current_app.log.setup_task_logger
+get_task_logger = current_app.log.get_task_logger
+redirect_stdouts_to_logger = current_app.log.redirect_stdouts_to_logger
 
 
 class LoggingProxy(object):
@@ -202,7 +244,7 @@ class LoggingProxy(object):
 
     def _safewrap_handlers(self):
         """Make the logger handlers dump internal errors to
-        ``sys.__stderr__`` instead of ``sys.stderr`` to circumvent
+        `sys.__stderr__` instead of `sys.stderr` to circumvent
         infinite loops."""
 
         def wrap_handler(handler):                  # pragma: no cover
@@ -228,7 +270,7 @@ class LoggingProxy(object):
 
     def write(self, data):
         if getattr(self._thread, "recurse_protection", False):
-            # logger is logging back to this file, so stop recursing.
+            # Logger is logging back to this file, so stop recursing.
             return
         """Write message to logging object."""
         data = data.strip()
@@ -240,7 +282,7 @@ class LoggingProxy(object):
                 self._thread.recurse_protection = False
 
     def writelines(self, sequence):
-        """``writelines(sequence_of_strings) -> None``.
+        """`writelines(sequence_of_strings) -> None`.
 
         Write the strings to the file.
 
@@ -248,7 +290,8 @@ class LoggingProxy(object):
         This is equivalent to calling :meth:`write` for each string.
 
         """
-        map(self.write, sequence)
+        for part in sequence:
+            self.write(part)
 
     def flush(self):
         """This object is not buffered so any :meth:`flush` requests
@@ -261,7 +304,7 @@ class LoggingProxy(object):
         self.closed = True
 
     def isatty(self):
-        """Always returns ``False``. Just here for file support."""
+        """Always returns :const:`False`. Just here for file support."""
         return False
 
     def fileno(self):
@@ -278,7 +321,8 @@ class SilenceRepeated(object):
 
     def __call__(self, *msgs):
         if self._iterations >= self.max_iterations:
-            map(self.action, msgs)
+            for msg in msgs:
+                self.action(msg)
             self._iterations = 0
         else:
             self._iterations += 1

+ 0 - 16
celery/management/commands/celeryd.py

@@ -1,16 +0,0 @@
-"""
-
-Start the celery daemon from the Django management command.
-
-"""
-from django.core.management.base import BaseCommand
-
-import celery.models            # <-- shows upgrade instructions at exit.
-
-
-class Command(BaseCommand):
-    """Run the celery daemon."""
-    help = 'Run the celery daemon'
-
-    def handle(self, *args, **options):
-        pass

+ 8 - 311
celery/messaging.py

@@ -1,311 +1,8 @@
-"""
-
-Sending and Receiving Messages
-
-"""
-import socket
-import warnings
-
-from datetime import datetime, timedelta
-from itertools import count
-
-from carrot.connection import BrokerConnection
-from carrot.messaging import Publisher, Consumer, ConsumerSet as _ConsumerSet
-
-from celery import conf
-from celery import signals
-from celery.utils import gen_unique_id, mitemgetter, noop
-from celery.utils.functional import wraps
-
-
-MSG_OPTIONS = ("mandatory", "priority", "immediate",
-               "routing_key", "serializer", "delivery_mode")
-
-get_msg_options = mitemgetter(*MSG_OPTIONS)
-extract_msg_options = lambda d: dict(zip(MSG_OPTIONS, get_msg_options(d)))
-default_queue = conf.get_queues()[conf.DEFAULT_QUEUE]
-
-_queues_declared = False
-_exchanges_declared = set()
-
-
-class TaskPublisher(Publisher):
-    """Publish tasks."""
-    exchange = default_queue["exchange"]
-    exchange_type = default_queue["exchange_type"]
-    routing_key = conf.DEFAULT_ROUTING_KEY
-    serializer = conf.TASK_SERIALIZER
-    auto_declare = False
-
-    def __init__(self, *args, **kwargs):
-        super(TaskPublisher, self).__init__(*args, **kwargs)
-
-        # Make sure all queues are declared.
-        global _queues_declared
-        if not _queues_declared:
-            consumers = get_consumer_set(self.connection)
-            consumers.close()
-            _queues_declared = True
-        self.declare()
-
-    def declare(self):
-        if self.exchange and self.exchange not in _exchanges_declared:
-            super(TaskPublisher, self).declare()
-            _exchanges_declared.add(self.exchange)
-
-    def delay_task(self, task_name, task_args=None, task_kwargs=None,
-            countdown=None, eta=None, task_id=None, taskset_id=None,
-            exchange=None, exchange_type=None, expires=None, **kwargs):
-        """Delay task for execution by the celery nodes."""
-
-        task_id = task_id or gen_unique_id()
-        task_args = task_args or []
-        task_kwargs = task_kwargs or {}
-        now = None
-        if countdown:                       # convert countdown to ETA.
-            now = datetime.now()
-            eta = now + timedelta(seconds=countdown)
-
-        if not isinstance(task_args, (list, tuple)):
-            raise ValueError("task args must be a list or tuple")
-        if not isinstance(task_kwargs, dict):
-            raise ValueError("task kwargs must be a dictionary")
-
-        if isinstance(expires, int):
-            now = now or datetime.now()
-            expires = now + timedelta(seconds=expires)
-
-        message_data = {
-            "task": task_name,
-            "id": task_id,
-            "args": task_args or [],
-            "kwargs": task_kwargs or {},
-            "retries": kwargs.get("retries", 0),
-            "eta": eta and eta.isoformat(),
-            "expires": expires and expires.isoformat(),
-        }
-
-        if taskset_id:
-            message_data["taskset"] = taskset_id
-
-        # custom exchange passed, need to declare it
-        if exchange and exchange not in _exchanges_declared:
-            exchange_type = exchange_type or self.exchange_type
-            self.backend.exchange_declare(exchange=exchange,
-                                          type=exchange_type,
-                                          durable=self.durable,
-                                          auto_delete=self.auto_delete)
-        self.send(message_data, exchange=exchange,
-                  **extract_msg_options(kwargs))
-
-        signals.task_sent.send(sender=task_name, **message_data)
-
-        return task_id
-
-
-class ConsumerSet(_ConsumerSet):
-    """ConsumerSet with an optional decode error callback.
-
-    For more information see :class:`carrot.messaging.ConsumerSet`.
-
-    .. attribute:: on_decode_error
-
-        Callback called if a message had decoding errors.
-        The callback is called with the signature::
-
-            callback(message, exception)
-
-    """
-    on_decode_error = None
-
-    def _receive_callback(self, raw_message):
-        message = self.backend.message_to_python(raw_message)
-        if self.auto_ack and not message.acknowledged:
-            message.ack()
-        try:
-            decoded = message.decode()
-        except Exception, exc:
-            if self.on_decode_error:
-                return self.on_decode_error(message, exc)
-            else:
-                raise
-        self.receive(decoded, message)
-
-
-class TaskConsumer(Consumer):
-    """Consume tasks"""
-    queue = conf.DEFAULT_QUEUE
-    exchange = default_queue["exchange"]
-    routing_key = default_queue["binding_key"]
-    exchange_type = default_queue["exchange_type"]
-
-
-class EventPublisher(Publisher):
-    """Publish events"""
-    exchange = conf.EVENT_EXCHANGE
-    exchange_type = conf.EVENT_EXCHANGE_TYPE
-    routing_key = conf.EVENT_ROUTING_KEY
-    serializer = conf.EVENT_SERIALIZER
-
-
-class EventConsumer(Consumer):
-    """Consume events"""
-    queue = conf.EVENT_QUEUE
-    exchange = conf.EVENT_EXCHANGE
-    exchange_type = conf.EVENT_EXCHANGE_TYPE
-    routing_key = conf.EVENT_ROUTING_KEY
-    no_ack = True
-
-
-class ControlReplyConsumer(Consumer):
-    exchange = "celerycrq"
-    exchange_type = "direct"
-    durable = False
-    exclusive = False
-    auto_delete = True
-    no_ack = True
-
-    def __init__(self, connection, ticket, **kwargs):
-        self.ticket = ticket
-        queue = "%s.%s" % (self.exchange, ticket)
-        super(ControlReplyConsumer, self).__init__(connection,
-                                                   queue=queue,
-                                                   routing_key=ticket,
-                                                   **kwargs)
-
-    def collect(self, limit=None, timeout=1, callback=None):
-        responses = []
-
-        def on_message(message_data, message):
-            if callback:
-                callback(message_data)
-            responses.append(message_data)
-
-        self.callbacks = [on_message]
-        self.consume()
-        for i in limit and range(limit) or count():
-            try:
-                self.connection.drain_events(timeout=timeout)
-            except socket.timeout:
-                break
-
-        return responses
-
-
-class ControlReplyPublisher(Publisher):
-    exchange = "celerycrq"
-    exchange_type = "direct"
-    delivery_mode = "non-persistent"
-    durable = False
-    auto_delete = True
-
-
-class BroadcastPublisher(Publisher):
-    """Publish broadcast commands"""
-
-    ReplyTo = ControlReplyConsumer
-
-    exchange = conf.BROADCAST_EXCHANGE
-    exchange_type = conf.BROADCAST_EXCHANGE_TYPE
-
-    def send(self, type, arguments, destination=None, reply_ticket=None):
-        """Send broadcast command."""
-        arguments["command"] = type
-        arguments["destination"] = destination
-        if reply_ticket:
-            arguments["reply_to"] = {"exchange": self.ReplyTo.exchange,
-                                     "routing_key": reply_ticket}
-        super(BroadcastPublisher, self).send({"control": arguments})
-
-
-class BroadcastConsumer(Consumer):
-    """Consume broadcast commands"""
-    queue = conf.BROADCAST_QUEUE
-    exchange = conf.BROADCAST_EXCHANGE
-    exchange_type = conf.BROADCAST_EXCHANGE_TYPE
-    no_ack = True
-
-    def __init__(self, *args, **kwargs):
-        self.hostname = kwargs.pop("hostname", None) or socket.gethostname()
-        self.queue = "%s_%s" % (self.queue, self.hostname)
-        super(BroadcastConsumer, self).__init__(*args, **kwargs)
-
-    def verify_exclusive(self):
-        # XXX Kombu material
-        channel = getattr(self.backend, "channel")
-        if channel and hasattr(channel, "queue_declare"):
-            try:
-                _, _, consumers = channel.queue_declare(self.queue,
-                                                        passive=True)
-            except ValueError:
-                pass
-            else:
-                if consumers:
-                    warnings.warn(UserWarning(
-                        "A node named %s is already using this process "
-                        "mailbox. Maybe you should specify a custom name "
-                        "for this node with the -n argument?" % self.hostname))
-
-    def consume(self, *args, **kwargs):
-        self.verify_exclusive()
-        return super(BroadcastConsumer, self).consume(*args, **kwargs)
-
-
-def establish_connection(hostname=None, userid=None, password=None,
-        virtual_host=None, port=None, ssl=None, insist=None,
-        connect_timeout=None, backend_cls=None, defaults=conf):
-    """Establish a connection to the message broker."""
-    if insist is None:
-        insist = defaults.BROKER_INSIST
-    if ssl is None:
-        ssl = defaults.BROKER_USE_SSL
-    if connect_timeout is None:
-        connect_timeout = defaults.BROKER_CONNECTION_TIMEOUT
-
-    return BrokerConnection(hostname or defaults.BROKER_HOST,
-                            userid or defaults.BROKER_USER,
-                            password or defaults.BROKER_PASSWORD,
-                            virtual_host or defaults.BROKER_VHOST,
-                            port or defaults.BROKER_PORT,
-                            backend_cls=backend_cls or defaults.BROKER_BACKEND,
-                            insist=insist, ssl=ssl,
-                            connect_timeout=connect_timeout)
-
-
-def with_connection(fun):
-    """Decorator for providing default message broker connection for functions
-    supporting the ``connection`` and ``connect_timeout`` keyword
-    arguments."""
-
-    @wraps(fun)
-    def _inner(*args, **kwargs):
-        connection = kwargs.get("connection")
-        timeout = kwargs.get("connect_timeout", conf.BROKER_CONNECTION_TIMEOUT)
-        kwargs["connection"] = conn = connection or \
-                establish_connection(connect_timeout=timeout)
-        close_connection = not connection and conn.close or noop
-
-        try:
-            return fun(*args, **kwargs)
-        finally:
-            close_connection()
-
-    return _inner
-
-
-def get_consumer_set(connection, queues=None, **options):
-    """Get the :class:`carrot.messaging.ConsumerSet`` for a queue
-    configuration.
-
-    Defaults to the queues in :const:`CELERY_QUEUES`.
-
-    """
-    queues = queues or conf.get_queues()
-    cset = ConsumerSet(connection)
-    for queue_name, queue_options in queues.items():
-        queue_options = dict(queue_options)
-        queue_options["routing_key"] = queue_options.pop("binding_key", None)
-        consumer = Consumer(connection, queue=queue_name,
-                            backend=cset.backend, **queue_options)
-        cset.consumers.append(consumer)
-    return cset
+from celery import current_app
+
+TaskPublisher = current_app.amqp.TaskPublisher
+ConsumerSet = current_app.amqp.ConsumerSet
+TaskConsumer = current_app.amqp.TaskConsumer
+establish_connection = current_app.broker_connection
+with_connection = current_app.with_default_connection
+get_consumer_set = current_app.amqp.get_task_consumer

+ 0 - 56
celery/models.py

@@ -1,56 +0,0 @@
-"""
-
-celery.models has been moved to djcelery.models.
-
-This file is deprecated and will be removed in Celery v2.1.0.
-
-"""
-import atexit
-
-from django.core.exceptions import ImproperlyConfigured
-
-
-@atexit.register
-def _display_help():
-    import sys
-
-    sys.stderr.write("""
-
-======================================================
-ERROR: celery can't be added to INSTALLED_APPS anymore
-======================================================
-
-Please install the django-celery package and add:
-
-    import djcelery
-    djcelery.setup_loader()
-    INSTALLED_APPS = ("djcelery", )
-
-to settings.py.
-
-To install django-celery you can do one of the following:
-
-* Download from PyPI:
-
-    http://pypi.python.org/pypi/django-celery
-
-* Install with pip:
-
-    pip install django-celery
-
-* Install with easy_install:
-
-    easy_install django-celery
-
-* Clone the development repository:
-
-    http://github.com/ask/django-celery
-
-
-If you weren't aware of this already you should read the
-Celery 2.0 Changelog as well:
-    http://celeryproject.org/docs/changelog.html
-
-""")
-
-raise ImproperlyConfigured("Please install django-celery")

+ 21 - 3
celery/platforms.py

@@ -217,7 +217,10 @@ def parse_uid(uid):
         return int(uid)
     except ValueError:
         if pwd:
-            return pwd.getpwnam(uid).pw_uid
+            try:
+                return pwd.getpwnam(uid).pw_uid
+            except KeyError:
+                raise KeyError("User does not exist: %r" % (uid, ))
         raise
 
 
@@ -232,7 +235,10 @@ def parse_gid(gid):
         return int(gid)
     except ValueError:
         if grp:
-            return grp.getgrnam(gid).gr_gid
+            try:
+                return grp.getgrnam(gid).gr_gid
+            except KeyError:
+                raise KeyError("Group does not exist: %r" % (gid, ))
         raise
 
 
@@ -274,6 +280,15 @@ def set_effective_user(uid=None, gid=None):
         gid and setegid(gid)
 
 
+def get_signal(signal_name):
+    """Get signal number from signal name."""
+    if not isinstance(signal_name, basestring) or not signal_name.isupper():
+        raise TypeError("signal name must be uppercase string.")
+    if not signal_name.startswith("SIG"):
+        signal_name = "SIG" + signal_name
+    return getattr(signal, signal_name)
+
+
 def reset_signal(signal_name):
     """Reset signal to the default signal handler.
 
@@ -342,7 +357,10 @@ def set_mp_process_title(progname, info=None, hostname=None):
     Only works if :mod:`setproctitle` is installed.
 
     """
-    from multiprocessing.process import current_process
+    try:
+        from multiprocessing.process import current_process
+    except ImportError:
+        return
     if hostname:
         progname = "%s@%s" % (progname, hostname.split(".")[0])
     return set_process_title("%s:%s" % (progname, current_process().name),

+ 13 - 18
celery/registry.py

@@ -1,12 +1,11 @@
 """celery.registry"""
 import inspect
-from UserDict import UserDict
 
 from celery.exceptions import NotRegistered
+from celery.utils.compat import UserDict
 
 
 class TaskRegistry(UserDict):
-    """Site registry for tasks."""
 
     NotRegistered = NotRegistered
 
@@ -29,14 +28,13 @@ class TaskRegistry(UserDict):
 
         """
         task = inspect.isclass(task) and task() or task
-        name = task.name
-        self.data[name] = task
+        self.data[task.name] = task
 
     def unregister(self, name):
         """Unregister task by name.
 
         :param name: name of the task to unregister, or a
-            :class:`celery.task.base.Task` with a valid ``name`` attribute.
+            :class:`celery.task.base.Task` with a valid `name` attribute.
 
         :raises celery.exceptions.NotRegistered: if the task has not
             been registered.
@@ -47,32 +45,29 @@ class TaskRegistry(UserDict):
             name = name.name
         except AttributeError:
             pass
-
         self.pop(name)
 
     def filter_types(self, type):
         """Return all tasks of a specific type."""
-        return dict((task_name, task)
-                        for task_name, task in self.data.items()
-                            if task.type == type)
+        return dict((task_name, task) for task_name, task in self.data.items()
+                                        if task.type == type)
 
     def __getitem__(self, key):
         try:
             return UserDict.__getitem__(self, key)
-        except KeyError, exc:
-            raise self.NotRegistered(str(exc))
+        except KeyError:
+            raise self.NotRegistered(key)
 
     def pop(self, key, *args):
         try:
             return UserDict.pop(self, key, *args)
-        except KeyError, exc:
-            raise self.NotRegistered(str(exc))
+        except KeyError:
+            raise self.NotRegistered(key)
 
 
-"""
-.. data:: tasks
+#: Global task registry.
+tasks = TaskRegistry()
 
-    The global task registry.
 
-"""
-tasks = TaskRegistry()
+def _unpickle_task(name):
+    return tasks[name]

+ 296 - 170
celery/result.py

@@ -5,12 +5,16 @@ import time
 from copy import copy
 from itertools import imap
 
+from celery import current_app
 from celery import states
-from celery.backends import default_backend
-from celery.datastructures import PositionQueue
+from celery.app import app_or_default
 from celery.exceptions import TimeoutError
-from celery.messaging import with_connection
-from celery.utils import any, all
+from celery.registry import _unpickle_task
+from celery.utils.compat import any, all
+
+
+def _unpickle_result(task_id, task_name):
+    return _unpickle_task(task_name).AsyncResult(task_id)
 
 
 class BaseAsyncResult(object):
@@ -19,21 +23,22 @@ class BaseAsyncResult(object):
     :param task_id: see :attr:`task_id`.
     :param backend: see :attr:`backend`.
 
-    .. attribute:: task_id
-
-        The unique identifier for this task.
-
-    .. attribute:: backend
-
-        The task result backend used.
-
     """
 
+    #: Error raised for timeouts.
     TimeoutError = TimeoutError
 
-    def __init__(self, task_id, backend):
+    #: The task uuid.
+    task_id = None
+
+    #: The task result backend to use.
+    backend = None
+
+    def __init__(self, task_id, backend, task_name=None, app=None):
         self.task_id = task_id
         self.backend = backend
+        self.task_name = task_name
+        self.app = app_or_default(app)
 
     def forget(self):
         """Forget about (and possibly remove the result of) this task."""
@@ -42,36 +47,47 @@ class BaseAsyncResult(object):
     def revoke(self, connection=None, connect_timeout=None):
         """Send revoke signal to all workers.
 
-        The workers will ignore the task if received.
+        Any worker receiving the task, or having reserved the
+        task, *must* ignore it.
 
         """
-        from celery.task import control
-        control.revoke(self.task_id, connection=connection,
-                       connect_timeout=connect_timeout)
+        self.app.control.revoke(self.task_id, connection=connection,
+                                connect_timeout=connect_timeout)
 
-    def wait(self, timeout=None):
-        """Wait for task, and return the result when it arrives.
+    def get(self, timeout=None, propagate=True, interval=0.5):
+        """Wait until task is ready, and return its result.
 
-        :keyword timeout: How long to wait, in seconds, before the
-            operation times out.
+        .. warning::
+
+           Waiting for tasks within a task may lead to deadlocks.
+           Please read :ref:`task-synchronous-subtasks`.
 
-        :raises celery.exceptions.TimeoutError: if ``timeout`` is not
-            :const:`None` and the result does not arrive within ``timeout``
+        :keyword timeout: How long to wait, in seconds, before the
+                          operation times out.
+        :keyword propagate: Re-raise exception if the task failed.
+        :keyword interval: Time to wait (in seconds) before retrying to
+           retrieve the result.  Note that this does not have any effect
+           when using the AMQP result store backend, as it does not
+           use polling.
+
+        :raises celery.exceptions.TimeoutError: if `timeout` is not
+            :const:`None` and the result does not arrive within `timeout`
             seconds.
 
-        If the remote call raised an exception then that
-        exception will be re-raised.
+        If the remote call raised an exception then that exception will
+        be re-raised.
 
         """
-        return self.backend.wait_for(self.task_id, timeout=timeout)
+        return self.backend.wait_for(self.task_id, timeout=timeout,
+                                                   propagate=propagate,
+                                                   interval=interval)
 
-    def get(self, timeout=None):
-        """Alias to :meth:`wait`."""
-        return self.wait(timeout=timeout)
+    def wait(self, *args, **kwargs):
+        """Deprecated alias to :meth:`get`."""
+        return self.get(*args, **kwargs)
 
     def ready(self):
-        """Returns :const:`True` if the task executed successfully, or raised
-        an exception.
+        """Returns :const:`True` if the task has been executed.
 
         If the task is still running, pending, or is waiting
         for retry then :const:`False` is returned.
@@ -84,15 +100,15 @@ class BaseAsyncResult(object):
         return self.status == states.SUCCESS
 
     def failed(self):
-        """Returns :const:`True` if the task failed by exception."""
+        """Returns :const:`True` if the task failed."""
         return self.status == states.FAILURE
 
     def __str__(self):
-        """``str(self) -> self.task_id``"""
+        """`str(self) -> self.task_id`"""
         return self.task_id
 
     def __hash__(self):
-        """``hash(self) -> hash(self.task_id)``"""
+        """`hash(self) -> hash(self.task_id)`"""
         return hash(self.task_id)
 
     def __repr__(self):
@@ -106,22 +122,23 @@ class BaseAsyncResult(object):
     def __copy__(self):
         return self.__class__(self.task_id, backend=self.backend)
 
+    def __reduce__(self):
+        if self.task_name:
+            return (_unpickle_result, (self.task_id, self.task_name))
+        else:
+            return (self.__class__, (self.task_id, self.backend,
+                                     None, self.app))
+
     @property
     def result(self):
         """When the task has been executed, this contains the return value.
-
-        If the task raised an exception, this will be the exception instance.
-
-        """
+        If the task raised an exception, this will be the exception
+        instance."""
         return self.backend.get_result(self.task_id)
 
     @property
     def info(self):
-        """Get state metadata.
-
-        Alias to :meth:`result`.
-
-        """
+        """Get state metadata.  Alias to :meth:`result`."""
         return self.result
 
     @property
@@ -129,16 +146,11 @@ class BaseAsyncResult(object):
         """Get the traceback of a failed task."""
         return self.backend.get_traceback(self.task_id)
 
-    @property
-    def status(self):
-        """Deprecated alias of :attr:`state`."""
-        return self.state
-
     @property
     def state(self):
-        """The current status of the task.
+        """The tasks current state.
 
-        Can be one of the following:
+        Possible values includes:
 
             *PENDING*
 
@@ -154,111 +166,132 @@ class BaseAsyncResult(object):
 
             *FAILURE*
 
-                The task raised an exception, or has been retried more times
-                than its limit. The :attr:`result` attribute contains the
-                exception raised.
+                The task raised an exception, or has exceeded the retry limit.
+                The :attr:`result` attribute then contains the
+                exception raised by the task.
 
             *SUCCESS*
 
                 The task executed successfully. The :attr:`result` attribute
-                contains the resulting value.
+                then contains the tasks return value.
 
         """
         return self.backend.get_status(self.task_id)
 
+    @property
+    def status(self):
+        """Deprecated alias of :attr:`state`."""
+        return self.state
+
 
 class AsyncResult(BaseAsyncResult):
     """Pending task result using the default backend.
 
-    :param task_id: see :attr:`task_id`.
+    :param task_id: The task uuid.
 
+    """
 
-    .. attribute:: task_id
+    #: Task result store backend to use.
+    backend = None
 
-        The unique identifier for this task.
+    def __init__(self, task_id, backend=None, task_name=None, app=None):
+        app = app_or_default(app)
+        backend = backend or app.backend
+        super(AsyncResult, self).__init__(task_id, backend,
+                                          task_name=task_name, app=app)
 
-    .. attribute:: backend
 
-        Instance of :class:`celery.backends.DefaultBackend`.
+class ResultSet(object):
+    """Working with more than one result.
 
-    """
+    :param results: List of result instances.
 
-    def __init__(self, task_id, backend=None):
-        super(AsyncResult, self).__init__(task_id, backend or default_backend)
+    """
 
+    #: List of results in in the set.
+    results = None
 
-class TaskSetResult(object):
-    """Working with :class:`~celery.task.TaskSet` results.
+    def __init__(self, results, app=None, **kwargs):
+        self.app = app_or_default(app)
+        self.results = results
 
-    An instance of this class is returned by
-    ``TaskSet``'s :meth:`~celery.task.TaskSet.apply_async()`. It enables
-    inspection of the subtasks status and return values as a single entity.
+    def add(self, result):
+        """Add :class:`AsyncResult` as a new member of the set.
 
-    :option taskset_id: see :attr:`taskset_id`.
-    :option subtasks: see :attr:`subtasks`.
+        Does nothing if the result is already a member.
 
-    .. attribute:: taskset_id
+        """
+        if result not in self.results:
+            self.results.append(result)
 
-        The UUID of the taskset itself.
+    def remove(self, result):
+        """Removes result from the set; it must be a member.
 
-    .. attribute:: subtasks
+        :raises KeyError: if the result is not a member.
 
-        A list of :class:`AsyncResult` instances for all of the subtasks.
+        """
+        if isinstance(result, basestring):
+            result = AsyncResult(result)
+        try:
+            self.results.remove(result)
+        except ValueError:
+            raise KeyError(result)
 
-    """
+    def discard(self, result):
+        """Remove result from the set if it is a member.
 
-    def __init__(self, taskset_id, subtasks):
-        self.taskset_id = taskset_id
-        self.subtasks = subtasks
+        If it is not a member, do nothing.
 
-    def itersubtasks(self):
-        """Taskset subtask iterator.
+        """
+        try:
+            self.remove(result)
+        except KeyError:
+            pass
 
-        :returns: an iterator for iterating over the tasksets
-            :class:`AsyncResult` objects.
+    def update(self, results):
+        """Update set with the union of itself and an iterable with
+        results."""
+        self.results.extend(r for r in results if r not in self.results)
 
-        """
-        return (subtask for subtask in self.subtasks)
+    def clear(self):
+        """Remove all results from this set."""
+        self.results[:] = []  # don't create new list.
 
     def successful(self):
-        """Was the taskset successful?
+        """Was all of the tasks successful?
 
-        :returns: :const:`True` if all of the tasks in the taskset finished
+        :returns: :const:`True` if all of the tasks finished
             successfully (i.e. did not raise an exception).
 
         """
-        return all(subtask.successful()
-                        for subtask in self.itersubtasks())
+        return all(result.successful() for result in self.results)
 
     def failed(self):
-        """Did the taskset fail?
+        """Did any of the tasks fail?
 
-        :returns: :const:`True` if any of the tasks in the taskset failed.
+        :returns: :const:`True` if any of the tasks failed.
             (i.e., raised an exception)
 
         """
-        return any(subtask.failed()
-                        for subtask in self.itersubtasks())
+        return any(result.failed() for result in self.results)
 
     def waiting(self):
-        """Is the taskset waiting?
+        """Are any of the tasks incomplete?
 
-        :returns: :const:`True` if any of the tasks in the taskset is still
+        :returns: :const:`True` if any of the tasks is still
             waiting for execution.
 
         """
-        return any(not subtask.ready()
-                        for subtask in self.itersubtasks())
+        return any(not result.ready() for result in self.results)
 
     def ready(self):
-        """Is the task ready?
+        """Did all of the tasks complete? (either by success of failure).
 
-        :returns: :const:`True` if all of the tasks in the taskset has been
+        :returns: :const:`True` if all of the tasks been
             executed.
 
         """
-        return all(subtask.ready()
-                        for subtask in self.itersubtasks())
+        return all(result.ready() for result in self.results)
 
     def completed_count(self):
         """Task completion count.
@@ -266,26 +299,29 @@ class TaskSetResult(object):
         :returns: the number of tasks completed.
 
         """
-        return sum(imap(int, (subtask.successful()
-                                for subtask in self.itersubtasks())))
+        return sum(imap(int, (result.successful() for result in self.results)))
 
     def forget(self):
-        """Forget about (and possible remove the result of) all the tasks
-        in this taskset."""
-        for subtask in self.subtasks:
-            subtask.forget()
+        """Forget about (and possible remove the result of) all the tasks."""
+        for result in self.results:
+            result.forget()
 
-    @with_connection
     def revoke(self, connection=None, connect_timeout=None):
-        for subtask in self.subtasks:
-            subtask.revoke(connection=connection)
+        """Revoke all tasks in the set."""
+
+        def _do_revoke(connection=None, connect_timeout=None):
+            for result in self.results:
+                result.revoke(connection=connection)
+
+        return self.app.with_default_connection(_do_revoke)(
+                connection=connection, connect_timeout=connect_timeout)
 
     def __iter__(self):
-        """``iter(res)`` -> ``res.iterate()``."""
         return self.iterate()
 
     def __getitem__(self, index):
-        return self.subtasks[index]
+        """`res[i] -> res.results[i]`"""
+        return self.results[index]
 
     def iterate(self):
         """Iterate over the return values of the tasks as they finish
@@ -294,9 +330,9 @@ class TaskSetResult(object):
         :raises: The exception if any of the tasks raised an exception.
 
         """
-        pending = list(self.subtasks)
-        results = dict((subtask.task_id, copy(subtask))
-                            for subtask in self.subtasks)
+        pending = list(self.results)
+        results = dict((result.task_id, copy(result))
+                            for result in self.results)
         while pending:
             for task_id in pending:
                 result = results[task_id]
@@ -309,107 +345,195 @@ class TaskSetResult(object):
                 elif result.status in states.PROPAGATE_STATES:
                     raise result.result
 
-    def join(self, timeout=None, propagate=True):
-        """Gather the results of all tasks in the taskset,
-        and returns a list ordered by the order of the set.
+    def join(self, timeout=None, propagate=True, interval=0.5):
+        """Gathers the results of all tasks as a list in order.
+
+        .. note::
+
+            This can be an expensive operation for result store
+            backends that must resort to polling (e.g. database).
 
-        :keyword timeout: The number of seconds to wait for results
-            before the operation times out.
+            You should consider using :meth:`join_native` if your backend
+            supports it.
 
-        :keyword propagate: If any of the subtasks raises an exception, the
-            exception will be reraised.
+        .. warning::
 
-        :raises celery.exceptions.TimeoutError: if ``timeout`` is not
-            :const:`None` and the operation takes longer than ``timeout``
+            Waiting for tasks within a task may lead to deadlocks.
+            Please see :ref:`task-synchronous-subtasks`.
+
+        :keyword timeout: The number of seconds to wait for results before
+                          the operation times out.
+
+        :keyword propagate: If any of the tasks raises an exception, the
+                            exception will be re-raised.
+
+        :keyword interval: Time to wait (in seconds) before retrying to
+                           retrieve a result from the set.  Note that this
+                           does not have any effect when using the AMQP
+                           result store backend, as it does not use polling.
+
+        :raises celery.exceptions.TimeoutError: if `timeout` is not
+            :const:`None` and the operation takes longer than `timeout`
             seconds.
 
-        :returns: list of return values for all subtasks in order.
+        """
+        time_start = time.time()
+        remaining = None
+
+        results = []
+        for result in self.results:
+            remaining = None
+            if timeout:
+                remaining = timeout - (time.time() - time_start)
+                if remaining <= 0.0:
+                    raise TimeoutError("join operation timed out")
+            results.append(result.wait(timeout=remaining,
+                                       propagate=propagate,
+                                       interval=interval))
+        return results
+
+    def iter_native(self, timeout=None):
+        backend = self.results[0].backend
+        ids = [result.task_id for result in self.results]
+        return backend.get_many(ids, timeout=timeout)
+
+    def join_native(self, timeout=None, propagate=True):
+        """Backend optimized version of :meth:`join`.
+
+        .. versionadded:: 2.2
+
+        Note that this does not support collecting the results
+        for different task types using different backends.
+
+        This is currently only supported by the AMQP result backend.
 
         """
+        backend = self.results[0].backend
+        results = [None for _ in xrange(len(self.results))]
 
-        time_start = time.time()
+        ids = [result.task_id for result in self.results]
+        states = dict(backend.get_many(ids, timeout=timeout))
+
+        for task_id, meta in states.items():
+            index = self.results.index(task_id)
+            results[index] = meta["result"]
+
+        return list(results)
+
+    @property
+    def total(self):
+        """Total number of tasks in the set."""
+        return len(self.results)
+
+    @property
+    def subtasks(self):
+        """Deprecated alias to :attr:`results`."""
+        return self.results
+
+
+class TaskSetResult(ResultSet):
+    """An instance of this class is returned by
+    `TaskSet`'s :meth:`~celery.task.TaskSet.apply_async` method.
+
+    It enables inspection of the tasks state and return values as
+    a single entity.
+
+    :param taskset_id: The id of the taskset.
+    :param results: List of result instances.
+
+    """
+
+    #: The UUID of the taskset.
+    taskset_id = None
+
+    #: List/iterator of results in the taskset
+    results = None
+
+    def __init__(self, taskset_id, results=None, **kwargs):
+        self.taskset_id = taskset_id
 
-        def on_timeout():
-            raise TimeoutError("The operation timed out.")
-
-        results = PositionQueue(length=self.total)
-
-        while True:
-            for position, pending_result in enumerate(self.subtasks):
-                state = pending_result.state
-                if state in states.READY_STATES:
-                    if propagate and state in states.PROPAGATE_STATES:
-                        raise pending_result.result
-                    results[position] = pending_result.result
-            if results.full():
-                # Make list copy, so the returned type is not a position
-                # queue.
-                return list(results)
-            else:
-                if (timeout is not None and
-                        time.time() >= time_start + timeout):
-                    on_timeout()
-
-    def save(self, backend=default_backend):
+        # XXX previously the "results" arg was named "subtasks".
+        if "subtasks" in kwargs:
+            results = kwargs["subtasks"]
+        super(TaskSetResult, self).__init__(results, **kwargs)
+
+    def save(self, backend=None):
         """Save taskset result for later retrieval using :meth:`restore`.
 
-        Example:
+        Example::
 
             >>> result.save()
-            >>> result = TaskSetResult.restore(task_id)
+            >>> result = TaskSetResult.restore(taskset_id)
 
         """
-        backend.save_taskset(self.taskset_id, self)
+        return (backend or self.app.backend).save_taskset(self.taskset_id,
+                                                          self)
+
+    def delete(self, backend=None):
+        """Remove this result if it was previously saved."""
+        (backend or self.app.backend).delete_taskset(self.taskset_id)
 
     @classmethod
-    def restore(self, taskset_id, backend=default_backend):
+    def restore(self, taskset_id, backend=None):
         """Restore previously saved taskset result."""
+        if backend is None:
+            backend = current_app.backend
         return backend.restore_taskset(taskset_id)
 
-    @property
-    def total(self):
-        """The total number of tasks in the :class:`~celery.task.TaskSet`."""
-        return len(self.subtasks)
+    def itersubtasks(self):
+        """Depreacted.   Use ``iter(self.results)`` instead."""
+        return iter(self.results)
+
+    def __reduce__(self):
+        return (self.__class__, (self.taskset_id, self.results))
 
 
 class EagerResult(BaseAsyncResult):
-    """Result that we know has already been executed.  """
+    """Result that we know has already been executed."""
     TimeoutError = TimeoutError
 
-    def __init__(self, task_id, ret_value, status, traceback=None):
+    def __init__(self, task_id, ret_value, state, traceback=None):
         self.task_id = task_id
         self._result = ret_value
-        self._status = status
+        self._state = state
         self._traceback = traceback
 
+    def __reduce__(self):
+        return (self.__class__, (self.task_id, self._result,
+                                 self._state, self._traceback))
+
+    def __copy__(self):
+        cls, args = self.__reduce__()
+        return cls(*args)
+
     def successful(self):
         """Returns :const:`True` if the task executed without failure."""
-        return self.status == states.SUCCESS
+        return self.state == states.SUCCESS
 
     def ready(self):
         """Returns :const:`True` if the task has been executed."""
         return True
 
-    def wait(self, timeout=None):
+    def get(self, timeout=None, propagate=True, **kwargs):
         """Wait until the task has been executed and return its result."""
-        if self.status == states.SUCCESS:
+        if self.state == states.SUCCESS:
+            return self.result
+        elif self.state in states.PROPAGATE_STATES:
+            if propagate:
+                raise self.result
             return self.result
-        elif self.status in states.PROPAGATE_STATES:
-            raise self.result
 
     def revoke(self):
-        self._status = states.REVOKED
+        self._state = states.REVOKED
+
+    def __repr__(self):
+        return "<EagerResult: %s>" % self.task_id
 
     @property
     def result(self):
         """The tasks return value"""
         return self._result
 
-    @property
-    def status(self):
-        """The tasks status (alias to :attr:`state`)."""
-        return self._status
-
     @property
     def state(self):
         """The tasks state."""
@@ -420,5 +544,7 @@ class EagerResult(BaseAsyncResult):
         """The traceback if the task failed."""
         return self._traceback
 
-    def __repr__(self):
-        return "<EagerResult: %s>" % self.task_id
+    @property
+    def status(self):
+        """The tasks status (alias to :attr:`state`)."""
+        return self._state

+ 26 - 27
celery/routes.py

@@ -1,11 +1,11 @@
 from celery.exceptions import QueueNotFound
-from celery.utils import instantiate, firstmethod, mpromise
+from celery.utils import firstmethod, instantiate, lpmerge, mpromise
 
 _first_route = firstmethod("route_for_task")
 
 
 class MapRoute(object):
-    """Makes a router out of a :class:`dict`."""
+    """Creates a router out of a :class:`dict`."""
 
     def __init__(self, map):
         self.map = map
@@ -18,7 +18,10 @@ class MapRoute(object):
 
 class Router(object):
 
-    def __init__(self, routes=None, queues=None, create_missing=False):
+    def __init__(self, routes=None, queues=None, create_missing=False,
+            app=None):
+        from celery.app import app_or_default
+        self.app = app_or_default(app)
         if queues is None:
             queues = {}
         if routes is None:
@@ -27,45 +30,39 @@ class Router(object):
         self.routes = routes
         self.create_missing = create_missing
 
-    def add_queue(self, queue):
-        q = self.queues[queue] = {"binding_key": queue,
-                                  "routing_key": queue,
-                                  "exchange": queue,
-                                  "exchange_type": "direct"}
-        return q
-
     def route(self, options, task, args=(), kwargs={}):
-        # Expand "queue" keys in options.
-        options = self.expand_destination(options)
+        options = self.expand_destination(options)  # expands 'queue'
         if self.routes:
             route = self.lookup_route(task, args, kwargs)
-            if route:
-                # Also expand "queue" keys in route.
-                return dict(options, **self.expand_destination(route))
+            if route:  # expands 'queue' in route.
+                return lpmerge(self.expand_destination(route), options)
+        if "queue" not in options:
+            options = lpmerge(self.expand_destination(
+                                self.app.conf.CELERY_DEFAULT_QUEUE), options)
         return options
 
     def expand_destination(self, route):
-        # The route can simply be a queue name,
-        # this is convenient for direct exchanges.
+        # Route can be a queue name: convenient for direct exchanges.
         if isinstance(route, basestring):
             queue, route = route, {}
         else:
-            # For topic exchanges you can use the defaults from a queue
-            # definition, and override e.g. just the routing_key.
+            # can use defaults from configured queue, but override specific
+            # things (like the routing_key): great for topic exchanges.
             queue = route.pop("queue", None)
 
-        if queue:
+        if queue:  # expand config from configured queue.
             try:
                 dest = dict(self.queues[queue])
             except KeyError:
-                if self.create_missing:
-                    dest = self.add_queue(queue)
-                else:
+                if not self.create_missing:
                     raise QueueNotFound(
-                        "Queue '%s' is not defined in CELERY_QUEUES" % queue)
+                        "Queue %r is not defined in CELERY_QUEUES" % queue)
+                dest = dict(self.app.amqp.queues.add(queue, queue, queue))
+            # needs to be declared by publisher
+            dest["queue"] = queue
+            # routing_key and binding_key are synonyms.
             dest.setdefault("routing_key", dest.get("binding_key"))
-            return dict(dest, **route)
-
+            return lpmerge(dest, route)
         return route
 
     def lookup_route(self, task, args=None, kwargs=None):
@@ -73,7 +70,7 @@ class Router(object):
 
 
 def prepare(routes):
-    """Expand ROUTES setting."""
+    """Expands the :setting:`CELERY_ROUTES` setting."""
 
     def expand_route(route):
         if isinstance(route, dict):
@@ -82,6 +79,8 @@ def prepare(routes):
             return mpromise(instantiate, route)
         return route
 
+    if routes is None:
+        return ()
     if not isinstance(routes, (list, tuple)):
         routes = (routes, )
     return map(expand_route, routes)

+ 21 - 14
celery/schedules.py

@@ -4,7 +4,8 @@ from pyparsing import (Word, Literal, ZeroOrMore, Optional,
                        Group, StringEnd, alphas)
 
 from celery.utils import is_iterable
-from celery.utils.timeutils import timedelta_seconds, weekday, remaining
+from celery.utils.timeutils import (timedelta_seconds, weekday,
+                                    remaining, humanize_seconds)
 
 
 class schedule(object):
@@ -19,15 +20,15 @@ class schedule(object):
         return remaining(last_run_at, self.run_every, relative=self.relative)
 
     def is_due(self, last_run_at):
-        """Returns tuple of two items ``(is_due, next_time_to_run)``,
+        """Returns tuple of two items `(is_due, next_time_to_run)`,
         where next time to run is in seconds.
 
         e.g.
 
-        * ``(True, 20)``, means the task should be run now, and the next
+        * `(True, 20)`, means the task should be run now, and the next
             time to run is in 20 seconds.
 
-        * ``(False, 12)``, means the task should be run in 12 seconds.
+        * `(False, 12)`, means the task should be run in 12 seconds.
 
         You can override this to decide the interval at runtime,
         but keep in mind the value of :setting:`CELERYBEAT_MAX_LOOP_INTERVAL`,
@@ -46,6 +47,10 @@ class schedule(object):
             return True, timedelta_seconds(self.run_every)
         return False, rem
 
+    def __repr__(self):
+        return "<freq: %s>" % humanize_seconds(
+                timedelta_seconds(self.run_every))
+
     def __eq__(self, other):
         if isinstance(other, schedule):
             return self.run_every == other.run_every
@@ -134,7 +139,7 @@ class crontab_parser(object):
 
     @staticmethod
     def _ignore_comma(toks):
-        return filter(lambda x: x != ',', toks)
+        return [x for x in toks if x != ',']
 
     @staticmethod
     def _join_to_set(toks):
@@ -145,7 +150,7 @@ class crontab_parser(object):
 
 
 class crontab(schedule):
-    """A crontab can be used as the ``run_every`` value of a
+    """A crontab can be used as the `run_every` value of a
     :class:`PeriodicTask` to add cron-like scheduling.
 
     Like a :manpage:`cron` job, you can specify units of time of when
@@ -251,6 +256,9 @@ class crontab(schedule):
     def remaining_estimate(self, last_run_at):
         """Returns when the periodic task should run next as a timedelta."""
         weekday = last_run_at.isoweekday()
+        if weekday == 7:    # Sunday is day 0, not day 7.
+            weekday = 0
+
         execute_this_hour = (weekday in self.day_of_week and
                                 last_run_at.hour in self.hour and
                                 last_run_at.minute < max(self.minute))
@@ -265,8 +273,7 @@ class crontab(schedule):
             next_minute = min(self.minute)
 
             execute_today = (weekday in self.day_of_week and
-                                (last_run_at.hour < max(self.hour) or
-                                    execute_this_hour))
+                                 last_run_at.hour < max(self.hour))
 
             if execute_today:
                 next_hour = min(hour for hour in self.hour
@@ -277,13 +284,13 @@ class crontab(schedule):
                                       microsecond=0)
             else:
                 next_hour = min(self.hour)
-                iso_next_day = min([day for day in self.day_of_week
-                                        if day > weekday] or
-                                   self.day_of_week)
-                add_week = iso_next_day == weekday
+                next_day = min([day for day in self.day_of_week
+                                    if day > weekday] or
+                               self.day_of_week)
+                add_week = next_day == weekday
 
                 delta = relativedelta(weeks=add_week and 1 or 0,
-                                      weekday=(iso_next_day - 1) % 7,
+                                      weekday=(next_day - 1) % 7,
                                       hour=next_hour,
                                       minute=next_minute,
                                       second=0,
@@ -292,7 +299,7 @@ class crontab(schedule):
         return remaining(last_run_at, delta, now=self.nowfun())
 
     def is_due(self, last_run_at):
-        """Returns tuple of two items ``(is_due, next_time_to_run)``,
+        """Returns tuple of two items `(is_due, next_time_to_run)`,
         where next time to run is in seconds.
 
         See :meth:`celery.schedules.schedule.is_due` for more information.

+ 281 - 0
celery/signals.py

@@ -1,3 +1,272 @@
+"""
+==============
+celery.signals
+==============
+
+Signals allows decoupled applications to receive notifications when
+certain actions occur elsewhere in the application.
+
+:copyright: (c) 2009 - 2011 by Ask Solem.
+:license: BSD, see LICENSE for more details.
+
+.. contents::
+    :local:
+
+.. _signal-basics:
+
+Basics
+======
+
+Several kinds of events trigger signals, you can connect to these signals
+to perform actions as they trigger.
+
+Example connecting to the :signal:`task_sent` signal:
+
+.. code-block:: python
+
+    from celery.signals import task_sent
+
+    def task_sent_handler(sender=None, task_id=None, task=None, args=None,
+                          kwargs=None, **kwds):
+        print("Got signal task_sent for task id %s" % (task_id, ))
+
+    task_sent.connect(task_sent_handler)
+
+
+Some signals also have a sender which you can filter by. For example the
+:signal:`task_sent` signal uses the task name as a sender, so you can
+connect your handler to be called only when tasks with name `"tasks.add"`
+has been sent by providing the `sender` argument to
+:class:`~celery.utils.dispatch.signal.Signal.connect`:
+
+.. code-block:: python
+
+    task_sent.connect(task_sent_handler, sender="tasks.add")
+
+.. _signal-ref:
+
+Signals
+=======
+
+Task Signals
+------------
+
+.. signal:: task_sent
+
+task_sent
+~~~~~~~~~
+
+Dispatched when a task has been sent to the broker.
+Note that this is executed in the client process, the one sending
+the task, not in the worker.
+
+Sender is the name of the task being sent.
+
+Provides arguments:
+
+* task_id
+    Id of the task to be executed.
+
+* task
+    The task being executed.
+
+* args
+    the tasks positional arguments.
+
+* kwargs
+    The tasks keyword arguments.
+
+* eta
+    The time to execute the task.
+
+* taskset
+    Id of the taskset this task is part of (if any).
+
+.. signal:: task_prerun
+
+task_prerun
+~~~~~~~~~~~
+
+Dispatched before a task is executed.
+
+Sender is the task class being executed.
+
+Provides arguments:
+
+* task_id
+    Id of the task to be executed.
+
+* task
+    The task being executed.
+
+* args
+    the tasks positional arguments.
+
+* kwargs
+    The tasks keyword arguments.
+
+.. signal:: task_postrun
+
+task_postrun
+~~~~~~~~~~~~
+
+Dispatched after a task has been executed.
+
+Sender is the task class executed.
+
+Provides arguments:
+
+* task_id
+    Id of the task to be executed.
+
+* task
+    The task being executed.
+
+* args
+    The tasks positional arguments.
+
+* kwargs
+    The tasks keyword arguments.
+
+* retval
+    The return value of the task.
+
+.. signal:: task_failure
+
+task_failure
+~~~~~~~~~~~~
+
+Dispatched when a task fails.
+
+Sender is the task class executed.
+
+Provides arguments:
+
+* task_id
+    Id of the task.
+
+* exception
+    Exception instance raised.
+
+* args
+    Positional arguments the task was called with.
+
+* kwargs
+    Keyword arguments the task was called with.
+
+* traceback
+    Stack trace object.
+
+* einfo
+    The :class:`celery.datastructures.ExceptionInfo` instance.
+
+Worker Signals
+--------------
+
+.. signal:: worker_init
+
+worker_init
+~~~~~~~~~~~
+
+Dispatched before the worker is started.
+
+.. signal:: worker_ready
+
+worker_ready
+~~~~~~~~~~~~
+
+Dispatched when the worker is ready to accept work.
+
+.. signal:: worker_process_init
+
+worker_process_init
+~~~~~~~~~~~~~~~~~~~
+
+Dispatched by each new pool worker process when it starts.
+
+.. signal:: worker_shutdown
+
+worker_shutdown
+~~~~~~~~~~~~~~~
+
+Dispatched when the worker is about to shut down.
+
+Celerybeat Signals
+------------------
+
+.. signal:: beat_init
+
+beat_init
+~~~~~~~~~
+
+Dispatched when celerybeat starts (either standalone or embedded).
+Sender is the :class:`celery.beat.Service` instance.
+
+.. signal:: beat_embedded_init
+
+beat_embedded_init
+~~~~~~~~~~~~~~~~~~
+
+Dispatched in addition to the :signal:`beat_init` signal when celerybeat is
+started as an embedded process.  Sender is the
+:class:`celery.beat.Service` instance.
+
+Eventlet Signals
+----------------
+
+.. signal:: eventlet_pool_started
+
+eventlet_pool_started
+~~~~~~~~~~~~~~~~~~~~~
+
+Sent when the eventlet pool has been started.
+
+Sender is the :class:`celery.concurrency.evlet.TaskPool` instance.
+
+.. signal:: eventlet_pool_preshutdown
+
+eventlet_pool_preshutdown
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Sent when the worker shutdown, just before the eventlet pool
+is requested to wait for remaining workers.
+
+Sender is the :class:`celery.concurrency.evlet.TaskPool` instance.
+
+.. signal:: eventlet_pool_postshutdown
+
+eventlet_pool_postshutdown
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Sent when the pool has been joined and the worker is ready to shutdown.
+
+Sender is the :class:`celery.concurrency.evlet.TaskPool` instance.
+
+.. signal:: eventlet_pool_apply
+
+eventlet_pool_apply
+~~~~~~~~~~~~~~~~~~~
+
+Sent whenever a task is applied to the pool.
+
+Sender is the :class:`celery.concurrency.evlet.TaskPool` instance.
+
+Provides arguments:
+
+* target
+
+    The target function.
+
+* args
+
+    Positional arguments.
+
+* kwargs
+
+    Keyword arguments.
+
+
+"""
 from celery.utils.dispatch import Signal
 
 task_sent = Signal(providing_args=["task_id", "task",
@@ -10,6 +279,10 @@ task_prerun = Signal(providing_args=["task_id", "task",
 task_postrun = Signal(providing_args=["task_id", "task",
                                       "args", "kwargs", "retval"])
 
+task_failure = Signal(providing_args=["task_id", "exception",
+                                      "args", "kwargs", "traceback",
+                                      "einfo"])
+
 worker_init = Signal(providing_args=[])
 worker_process_init = Signal(providing_args=[])
 worker_ready = Signal(providing_args=[])
@@ -17,3 +290,11 @@ worker_shutdown = Signal(providing_args=[])
 
 setup_logging = Signal(providing_args=["loglevel", "logfile",
                                        "format", "colorize"])
+
+beat_init = Signal(providing_args=[])
+beat_embedded_init = Signal(providing_args=[])
+
+eventlet_pool_started = Signal(providing_args=[])
+eventlet_pool_preshutdown = Signal(providing_args=[])
+eventlet_pool_postshutdown = Signal(providing_args=[])
+eventlet_pool_apply = Signal(providing_args=["target", "args", "kwargs"])

+ 16 - 3
celery/states.py

@@ -1,4 +1,12 @@
 """
+celery.states
+=============
+
+Built-in Task States.
+
+:copyright: (c) 2009 - 2011 by Ask Solem.
+:license: BSD, see LICENSE for more details.
+
 
 .. _states:
 
@@ -46,17 +54,22 @@ ALL_STATES
 
 Set of all possible states.
 
+
+Misc.
+-----
+
 """
 
-## State precedence.
-# None represents the precedence of an unknown state.
-# Lower index means higher precedence.
+#: State precedence.
+#: None represents the precedence of an unknown state.
+#: Lower index means higher precedence.
 PRECEDENCE = ["SUCCESS",
               "FAILURE",
               None,
               "REVOKED",
               "STARTED",
               "RECEIVED",
+              "RETRY",
               "PENDING"]
 
 

+ 72 - 54
celery/task/__init__.py

@@ -1,82 +1,100 @@
-"""
+# -*- coding: utf-8 -*-
+import warnings
 
-Working with tasks and task sets.
-
-"""
-
-from celery.execute import apply_async
-from celery.registry import tasks
-from celery.serialization import pickle
+from celery.app import app_or_default
 from celery.task.base import Task, PeriodicTask
-from celery.task.sets import TaskSet
-from celery.task.builtins import PingTask, ExecuteRemoteTask
-from celery.task.builtins import AsynchronousMapTask, _dmap
+from celery.task.sets import TaskSet, subtask
+from celery.task.chord import chord
 from celery.task.control import discard_all
-from celery.task.http import HttpDispatchTask
 
-__all__ = ["Task", "TaskSet", "PeriodicTask", "tasks", "discard_all",
-           "dmap", "dmap_async", "execute_remote", "ping", "HttpDispatchTask"]
+__all__ = ["Task", "TaskSet", "PeriodicTask", "subtask", "discard_all"]
 
 
-def dmap(fun, args, timeout=None):
-    """Distribute processing of the arguments and collect the results.
+def task(*args, **kwargs):
+    """Decorator to create a task class out of any callable.
 
-    Example
+    **Examples**
 
-        >>> from celery.task import dmap
-        >>> import operator
-        >>> dmap(operator.add, [[2, 2], [4, 4], [8, 8]])
-        [4, 8, 16]
-
-    """
-    return _dmap(fun, args, timeout)
+    .. code-block:: python
 
+        @task()
+        def refresh_feed(url):
+            return Feed.objects.get(url=url).refresh()
 
-def dmap_async(fun, args, timeout=None):
-    """Distribute processing of the arguments and collect the results
-    asynchronously.
+    With setting extra options and using retry.
 
-    :returns :class:`celery.result.AsyncResult`:
+    .. code-block:: python
 
-    Example
+        @task(max_retries=10)
+        def refresh_feed(url):
+            try:
+                return Feed.objects.get(url=url).refresh()
+            except socket.error, exc:
+                refresh_feed.retry(exc=exc)
 
-        >>> from celery.task import dmap_async
-        >>> import operator
-        >>> presult = dmap_async(operator.add, [[2, 2], [4, 4], [8, 8]])
-        >>> presult
-        <AsyncResult: 373550e8-b9a0-4666-bc61-ace01fa4f91d>
-        >>> presult.status
-        'SUCCESS'
-        >>> presult.result
-        [4, 8, 16]
+    Calling the resulting task:
 
+            >>> refresh_feed("http://example.com/rss") # Regular
+            <Feed: http://example.com/rss>
+            >>> refresh_feed.delay("http://example.com/rss") # Async
+            <AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d>
     """
-    return AsynchronousMapTask.delay(pickle.dumps(fun), args, timeout=timeout)
+    kwargs.setdefault("accept_magic_kwargs", False)
+    return app_or_default().task(*args, **kwargs)
+
+
+def periodic_task(*args, **options):
+    """Decorator to create a task class out of any callable.
+
+        .. admonition:: Examples
 
+            .. code-block:: python
 
-def execute_remote(fun, *args, **kwargs):
-    """Execute arbitrary function/object remotely.
+                @task()
+                def refresh_feed(url):
+                    return Feed.objects.get(url=url).refresh()
 
-    :param fun: A callable function or object.
-    :param \*args: Positional arguments to apply to the function.
-    :param \*\*kwargs: Keyword arguments to apply to the function.
+            With setting extra options and using retry.
 
-    The object must be picklable, so you can't use lambdas or functions
-    defined in the REPL (the objects must have an associated module).
+            .. code-block:: python
 
-    :returns class:`celery.result.AsyncResult`:
+                @task(exchange="feeds")
+                def refresh_feed(url, **kwargs):
+                    try:
+                        return Feed.objects.get(url=url).refresh()
+                    except socket.error, exc:
+                        refresh_feed.retry(args=[url], kwargs=kwargs, exc=exc)
+
+            Calling the resulting task:
+
+                >>> refresh_feed("http://example.com/rss") # Regular
+                <Feed: http://example.com/rss>
+                >>> refresh_feed.delay("http://example.com/rss") # Async
+                <AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d>
 
     """
-    return ExecuteRemoteTask.delay(pickle.dumps(fun), args, kwargs)
+    return task(**dict({"base": PeriodicTask}, **options))
+
+
+@task(name="celery.backend_cleanup")
+def backend_cleanup():
+    backend_cleanup.backend.cleanup()
+
+
+class PingTask(Task):  # ✞
+    name = "celery.ping"
+
+    def run(self, **kwargs):
+        return "pong"
 
 
-def ping():
-    """Test if the server is alive.
+def ping():  # ✞
+    """Deprecated and scheduled for removal in Celery 2.3.
 
-    Example:
+    Please use :meth:`celery.task.control.ping` instead.
 
-        >>> from celery.task import ping
-        >>> ping()
-        'pong'
     """
+    warnings.warn(DeprecationWarning(
+        "The ping task has been deprecated and will be removed in Celery "
+        "v2.3.  Please use inspect.ping instead."))
     return PingTask.apply_async().get()

File diff suppressed because it is too large
+ 481 - 342
celery/task/base.py


+ 0 - 68
celery/task/builtins.py

@@ -1,68 +0,0 @@
-from celery import conf
-from celery.schedules import crontab
-from celery.serialization import pickle
-from celery.task.base import Task
-from celery.task.sets import TaskSet
-
-
-class backend_cleanup(Task):
-    name = "celery.backend_cleanup"
-
-    def run(self):
-        self.backend.cleanup()
-
-if conf.TASK_RESULT_EXPIRES and \
-        backend_cleanup.name not in conf.CELERYBEAT_SCHEDULE:
-    conf.CELERYBEAT_SCHEDULE[backend_cleanup.name] = dict(
-            task=backend_cleanup.name,
-            schedule=crontab(minute="00", hour="04", day_of_week="*"))
-
-
-DeleteExpiredTaskMetaTask = backend_cleanup         # FIXME remove in 3.0
-
-
-class PingTask(Task):
-    """The task used by :func:`ping`."""
-    name = "celery.ping"
-
-    def run(self, **kwargs):
-        """:returns: the string ``"pong"``."""
-        return "pong"
-
-
-def _dmap(fun, args, timeout=None):
-    pickled = pickle.dumps(fun)
-    arguments = [((pickled, arg, {}), {}) for arg in args]
-    ts = TaskSet(ExecuteRemoteTask, arguments)
-    return ts.apply_async().join(timeout=timeout)
-
-
-class AsynchronousMapTask(Task):
-    """Task used internally by :func:`dmap_async` and
-    :meth:`TaskSet.map_async`.  """
-    name = "celery.map_async"
-
-    def run(self, serfun, args, timeout=None, **kwargs):
-        return _dmap(pickle.loads(serfun), args, timeout=timeout)
-
-
-class ExecuteRemoteTask(Task):
-    """Execute an arbitrary function or object.
-
-    *Note* You probably want :func:`execute_remote` instead, which this
-    is an internal component of.
-
-    The object must be pickleable, so you can't use lambdas or functions
-    defined in the REPL (that is the python shell, or ``ipython``).
-
-    """
-    name = "celery.execute_remote"
-
-    def run(self, ser_callable, fargs, fkwargs, **kwargs):
-        """
-        :param ser_callable: A pickled function or callable object.
-        :param fargs: Positional arguments to apply to the function.
-        :param fkwargs: Keyword arguments to apply to the function.
-
-        """
-        return pickle.loads(ser_callable)(*fargs, **fkwargs)

+ 46 - 0
celery/task/chord.py

@@ -0,0 +1,46 @@
+from kombu.utils import gen_unique_id
+
+from celery import current_app
+from celery.result import TaskSetResult
+from celery.task.sets import TaskSet, subtask
+
+
+@current_app.task(name="celery.chord_unlock", max_retries=None)
+def _unlock_chord(setid, callback, interval=1, max_retries=None):
+    result = TaskSetResult.restore(setid)
+    if result.ready():
+        subtask(callback).delay(result.join())
+        result.delete()
+    _unlock_chord.retry(countdown=interval, max_retries=max_retries)
+
+
+class Chord(current_app.Task):
+    accept_magic_kwargs = False
+    name = "celery.chord"
+
+    def run(self, set, body, interval=1, max_retries=None, **kwargs):
+        if not isinstance(set, TaskSet):
+            set = TaskSet(set)
+        r = []
+        setid = gen_unique_id()
+        for task in set.tasks:
+            uuid = gen_unique_id()
+            task.options.update(task_id=uuid, chord=body)
+            r.append(current_app.AsyncResult(uuid))
+        current_app.TaskSetResult(setid, r).save()
+        self.backend.on_chord_apply(setid, body, interval, max_retries)
+        return set.apply_async(taskset_id=setid)
+
+
+class chord(object):
+    Chord = Chord
+
+    def __init__(self, tasks, **options):
+        self.tasks = tasks
+        self.options = options
+
+    def __call__(self, body, **options):
+        uuid = body.options.setdefault("task_id", gen_unique_id())
+        self.Chord.apply_async((list(self.tasks), body), self.options,
+                                **options)
+        return body.type.app.AsyncResult(uuid)

+ 156 - 141
celery/task/control.py

@@ -1,91 +1,6 @@
-from celery import conf
-from celery.utils import gen_unique_id
-from celery.messaging import BroadcastPublisher, ControlReplyConsumer
-from celery.messaging import with_connection, get_consumer_set
-
-
-@with_connection
-def discard_all(connection=None,
-        connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
-    """Discard all waiting tasks.
-
-    This will ignore all tasks waiting for execution, and they will
-    be deleted from the messaging server.
-
-    :returns: the number of tasks discarded.
-
-    """
-    consumers = get_consumer_set(connection=connection)
-    try:
-        return consumers.discard_all()
-    finally:
-        consumers.close()
-
-
-def revoke(task_id, destination=None, **kwargs):
-    """Revoke a task by id.
-
-    If a task is revoked, the workers will ignore the task and not execute
-    it after all.
-
-    :param task_id: Id of the task to revoke.
-    :keyword destination: If set, a list of the hosts to send the command to,
-        when empty broadcast to all workers.
-    :keyword connection: Custom broker connection to use, if not set,
-        a connection will be established automatically.
-    :keyword connect_timeout: Timeout for new connection if a custom
-        connection is not provided.
-    :keyword reply: Wait for and return the reply.
-    :keyword timeout: Timeout in seconds to wait for the reply.
-    :keyword limit: Limit number of replies.
-
-    """
-    return broadcast("revoke", destination=destination,
-                               arguments={"task_id": task_id}, **kwargs)
-
-
-def ping(destination=None, timeout=1, **kwargs):
-    """Ping workers.
-
-    Returns answer from alive workers.
-
-    :keyword destination: If set, a list of the hosts to send the command to,
-        when empty broadcast to all workers.
-    :keyword connection: Custom broker connection to use, if not set,
-        a connection will be established automatically.
-    :keyword connect_timeout: Timeout for new connection if a custom
-        connection is not provided.
-    :keyword reply: Wait for and return the reply.
-    :keyword timeout: Timeout in seconds to wait for the reply.
-    :keyword limit: Limit number of replies.
-
-    """
-    return broadcast("ping", reply=True, destination=destination,
-                     timeout=timeout, **kwargs)
-
-
-def rate_limit(task_name, rate_limit, destination=None, **kwargs):
-    """Set rate limit for task by type.
-
-    :param task_name: Type of task to change rate limit for.
-    :param rate_limit: The rate limit as tasks per second, or a rate limit
-      string (``"100/m"``, etc. see :attr:`celery.task.base.Task.rate_limit`
-      for more information).
-    :keyword destination: If set, a list of the hosts to send the command to,
-        when empty broadcast to all workers.
-    :keyword connection: Custom broker connection to use, if not set,
-        a connection will be established automatically.
-    :keyword connect_timeout: Timeout for new connection if a custom
-        connection is not provided.
-    :keyword reply: Wait for and return the reply.
-    :keyword timeout: Timeout in seconds to wait for the reply.
-    :keyword limit: Limit number of replies.
-
-    """
-    return broadcast("rate_limit", destination=destination,
-                                   arguments={"task_name": task_name,
-                                              "rate_limit": rate_limit},
-                                   **kwargs)
+from kombu.pidbox import Mailbox
+
+from celery.app import app_or_default
 
 
 def flatten_reply(reply):
@@ -95,12 +10,13 @@ def flatten_reply(reply):
     return nodes
 
 
-class inspect(object):
+class Inspect(object):
 
-    def __init__(self, destination=None, timeout=1, callback=None):
+    def __init__(self, control, destination=None, timeout=1, callback=None,):
         self.destination = destination
         self.timeout = timeout
         self.callback = callback
+        self.control = control
 
     def _prepare(self, reply):
         if not reply:
@@ -112,7 +28,8 @@ class inspect(object):
         return by_node
 
     def _request(self, command, **kwargs):
-        return self._prepare(broadcast(command, arguments=kwargs,
+        return self._prepare(self.control.broadcast(command,
+                                      arguments=kwargs,
                                       destination=self.destination,
                                       callback=self.callback,
                                       timeout=self.timeout, reply=True))
@@ -153,53 +70,151 @@ class inspect(object):
     def cancel_consumer(self, queue, **kwargs):
         return self._request("cancel_consumer", queue=queue, **kwargs)
 
-
-@with_connection
-def broadcast(command, arguments=None, destination=None, connection=None,
-        connect_timeout=conf.BROKER_CONNECTION_TIMEOUT, reply=False,
-        timeout=1, limit=None, callback=None):
-    """Broadcast a control command to the celery workers.
-
-    :param command: Name of command to send.
-    :param arguments: Keyword arguments for the command.
-    :keyword destination: If set, a list of the hosts to send the command to,
-        when empty broadcast to all workers.
-    :keyword connection: Custom broker connection to use, if not set,
-        a connection will be established automatically.
-    :keyword connect_timeout: Timeout for new connection if a custom
-        connection is not provided.
-    :keyword reply: Wait for and return the reply.
-    :keyword timeout: Timeout in seconds to wait for the reply.
-    :keyword limit: Limit number of replies.
-    :keyword callback: Callback called immediately for each reply
-        received.
-
-    """
-    arguments = arguments or {}
-    reply_ticket = reply and gen_unique_id() or None
-
-    if destination is not None and not isinstance(destination, (list, tuple)):
-        raise ValueError("destination must be a list/tuple not %s" % (
-                type(destination)))
-
-    # Set reply limit to number of destinations (if specificed)
-    if limit is None and destination:
-        limit = destination and len(destination) or None
-
-    crq = None
-    if reply_ticket:
-        crq = ControlReplyConsumer(connection, reply_ticket)
-
-    broadcast = BroadcastPublisher(connection)
-    try:
-        broadcast.send(command, arguments, destination=destination,
-                       reply_ticket=reply_ticket)
-    finally:
-        broadcast.close()
-
-    if crq:
-        try:
-            return crq.collect(limit=limit, timeout=timeout,
-                               callback=callback)
-        finally:
-            crq.close()
+    def active_queues(self):
+        return self._request("active_queues")
+
+
+class Control(object):
+    Mailbox = Mailbox
+
+    def __init__(self, app):
+        self.app = app
+        self.mailbox = self.Mailbox("celeryd", type="fanout")
+
+    def inspect(self, destination=None, timeout=1, callback=None):
+        return Inspect(self, destination=destination, timeout=timeout,
+                             callback=callback)
+
+    def discard_all(self, connection=None, connect_timeout=None):
+        """Discard all waiting tasks.
+
+        This will ignore all tasks waiting for execution, and they will
+        be deleted from the messaging server.
+
+        :returns: the number of tasks discarded.
+
+        """
+
+        def _do_discard(connection=None, connect_timeout=None):
+            consumer = self.app.amqp.get_task_consumer(connection=connection)
+            try:
+                return consumer.discard_all()
+            finally:
+                consumer.close()
+
+        return self.app.with_default_connection(_do_discard)(
+                connection=connection, connect_timeout=connect_timeout)
+
+    def revoke(self, task_id, destination=None, terminate=False,
+            signal="SIGTERM", **kwargs):
+        """Revoke a task by id.
+
+        If a task is revoked, the workers will ignore the task and
+        not execute it after all.
+
+        :param task_id: Id of the task to revoke.
+        :keyword terminate: Also terminate the process currently working
+            on the task (if any).
+        :keyword signal: Name of signal to send to process if terminate.
+            Default is TERM.
+        :keyword destination: If set, a list of the hosts to send the
+            command to, when empty broadcast to all workers.
+        :keyword connection: Custom broker connection to use, if not set,
+            a connection will be established automatically.
+        :keyword connect_timeout: Timeout for new connection if a custom
+            connection is not provided.
+        :keyword reply: Wait for and return the reply.
+        :keyword timeout: Timeout in seconds to wait for the reply.
+        :keyword limit: Limit number of replies.
+
+        """
+        return self.broadcast("revoke", destination=destination,
+                              arguments={"task_id": task_id,
+                                         "terminate": terminate,
+                                         "signal": signal}, **kwargs)
+
+    def ping(self, destination=None, timeout=1, **kwargs):
+        """Ping workers.
+
+        Returns answer from alive workers.
+
+        :keyword destination: If set, a list of the hosts to send the
+            command to, when empty broadcast to all workers.
+        :keyword connection: Custom broker connection to use, if not set,
+            a connection will be established automatically.
+        :keyword connect_timeout: Timeout for new connection if a custom
+            connection is not provided.
+        :keyword reply: Wait for and return the reply.
+        :keyword timeout: Timeout in seconds to wait for the reply.
+        :keyword limit: Limit number of replies.
+
+        """
+        return self.broadcast("ping", reply=True, destination=destination,
+                              timeout=timeout, **kwargs)
+
+    def rate_limit(self, task_name, rate_limit, destination=None, **kwargs):
+        """Set rate limit for task by type.
+
+        :param task_name: Type of task to change rate limit for.
+        :param rate_limit: The rate limit as tasks per second, or a rate limit
+            string (`"100/m"`, etc.
+            see :attr:`celery.task.base.Task.rate_limit` for
+            more information).
+        :keyword destination: If set, a list of the hosts to send the
+            command to, when empty broadcast to all workers.
+        :keyword connection: Custom broker connection to use, if not set,
+            a connection will be established automatically.
+        :keyword connect_timeout: Timeout for new connection if a custom
+            connection is not provided.
+        :keyword reply: Wait for and return the reply.
+        :keyword timeout: Timeout in seconds to wait for the reply.
+        :keyword limit: Limit number of replies.
+
+        """
+        return self.broadcast("rate_limit", destination=destination,
+                              arguments={"task_name": task_name,
+                                         "rate_limit": rate_limit},
+                              **kwargs)
+
+    def broadcast(self, command, arguments=None, destination=None,
+            connection=None, connect_timeout=None, reply=False, timeout=1,
+            limit=None, callback=None, channel=None):
+        """Broadcast a control command to the celery workers.
+
+        :param command: Name of command to send.
+        :param arguments: Keyword arguments for the command.
+        :keyword destination: If set, a list of the hosts to send the
+            command to, when empty broadcast to all workers.
+        :keyword connection: Custom broker connection to use, if not set,
+            a connection will be established automatically.
+        :keyword connect_timeout: Timeout for new connection if a custom
+            connection is not provided.
+        :keyword reply: Wait for and return the reply.
+        :keyword timeout: Timeout in seconds to wait for the reply.
+        :keyword limit: Limit number of replies.
+        :keyword callback: Callback called immediately for each reply
+            received.
+
+        """
+        def _do_broadcast(connection=None, connect_timeout=None,
+                          channel=None):
+            return self.mailbox(connection)._broadcast(command, arguments,
+                                                       destination, reply,
+                                                       timeout, limit,
+                                                       callback,
+                                                       channel=channel)
+
+        if channel:
+            return _do_broadcast(connection, connect_timeout, channel)
+        else:
+            return self.app.with_default_connection(_do_broadcast)(
+                    connection=connection, connect_timeout=connect_timeout)
+
+
+_default_control = Control(app_or_default())
+broadcast = _default_control.broadcast
+rate_limit = _default_control.rate_limit
+ping = _default_control.ping
+revoke = _default_control.revoke
+discard_all = _default_control.discard_all
+inspect = _default_control.inspect

+ 5 - 13
celery/task/http.py

@@ -24,7 +24,7 @@ class UnknownStatusError(InvalidResponseError):
 
 
 def maybe_utf8(value):
-    """Encode utf-8 value, only if the value is actually utf-8."""
+    """Encode to utf-8, only if the value is Unicode."""
     if isinstance(value, unicode):
         return value.encode("utf-8")
     return value
@@ -77,7 +77,7 @@ class MutableURL(object):
     """
     def __init__(self, url):
         self.parts = urlparse(url)
-        self._query = dict(parse_qsl(self.parts[4]))
+        self.query = dict(parse_qsl(self.parts[4]))
 
     def __str__(self):
         scheme, netloc, path, params, query, fragment = self.parts
@@ -93,21 +93,13 @@ class MutableURL(object):
     def __repr__(self):
         return "<%s: %s>" % (self.__class__.__name__, str(self))
 
-    def _get_query(self):
-        return self._query
-
-    def _set_query(self, query):
-        self._query = query
-
-    query = property(_get_query, _set_query)
-
 
 class HttpDispatch(object):
     """Make task HTTP request and collect the task result.
 
     :param url: The URL to request.
-    :param method: HTTP method used. Currently supported methods are ``GET``
-        and ``POST``.
+    :param method: HTTP method used. Currently supported methods are `GET`
+        and `POST`.
     :param task_kwargs: Task keyword arguments.
     :param logger: Logger used for user/system feedback.
 
@@ -151,7 +143,7 @@ class HttpDispatchTask(BaseTask):
 
     :keyword url: The URL location of the HTTP callback task.
     :keyword method: Method to use when dispatching the callback. Usually
-        ``GET`` or ``POST``.
+        `GET` or `POST`.
     :keyword \*\*kwargs: Keyword arguments to pass on to the HTTP callback.
 
     .. attribute:: url

+ 4 - 0
celery/task/schedules.py

@@ -1 +1,5 @@
+import warnings
 from celery.schedules import schedule, crontab_parser, crontab
+
+warnings.warn(DeprecationWarning(
+    "celery.task.schedules is deprecated and renamed to celery.schedules"))

+ 65 - 84
celery/task/sets.py

@@ -1,14 +1,12 @@
 import warnings
 
-from UserList import UserList
+from kombu.utils import cached_property
 
-from celery import conf
 from celery import registry
+from celery.app import app_or_default
 from celery.datastructures import AttributeDict
-from celery.messaging import with_connection
-from celery.messaging import TaskPublisher
-from celery.result import TaskSetResult
 from celery.utils import gen_unique_id
+from celery.utils.compat import UserList
 
 TASKSET_DEPRECATION_TEXT = """\
 Using this invocation of TaskSet is deprecated and will be removed
@@ -22,7 +20,6 @@ this so the syntax has been changed to:
     ts = TaskSet(tasks=[
             %(cls)s.subtask(args1, kwargs1, options1),
             %(cls)s.subtask(args2, kwargs2, options2),
-            %(cls)s.subtask(args3, kwargs3, options3),
             ...
             %(cls)s.subtask(argsN, kwargsN, optionsN),
     ])
@@ -57,13 +54,11 @@ class subtask(AttributeDict):
 
     """
 
-    def __init__(self, task=None, args=None, kwargs=None, options=None,
-            **extra):
+    def __init__(self, task=None, args=None, kwargs=None, options=None, **ex):
         init = super(subtask, self).__init__
 
         if isinstance(task, dict):
-            # Use the values from a dict.
-            return init(task)
+            return init(task)  # works like dict(d)
 
         # Also supports using task class/instance instead of string name.
         try:
@@ -72,11 +67,11 @@ class subtask(AttributeDict):
             task_name = task
 
         init(task=task_name, args=tuple(args or ()),
-                             kwargs=dict(kwargs or {}, **extra),
+                             kwargs=dict(kwargs or {}, **ex),
                              options=options or {})
 
     def delay(self, *argmerge, **kwmerge):
-        """Shortcut to ``apply_async(argmerge, kwargs)``."""
+        """Shortcut to `apply_async(argmerge, kwargs)`."""
         return self.apply_async(args=argmerge, kwargs=kwmerge)
 
     def apply(self, args=(), kwargs={}, **options):
@@ -85,7 +80,7 @@ class subtask(AttributeDict):
         args = tuple(args) + tuple(self.args)
         kwargs = dict(self.kwargs, **kwargs)
         options = dict(self.options, **options)
-        return self.get_type().apply(args, kwargs, **options)
+        return self.type.apply(args, kwargs, **options)
 
     def apply_async(self, args=(), kwargs={}, **options):
         """Apply this task asynchronously."""
@@ -93,44 +88,48 @@ class subtask(AttributeDict):
         args = tuple(args) + tuple(self.args)
         kwargs = dict(self.kwargs, **kwargs)
         options = dict(self.options, **options)
-        return self.get_type().apply_async(args, kwargs, **options)
+        return self.type.apply_async(args, kwargs, **options)
 
     def get_type(self):
-        # For JSON serialization, the task class is lazily loaded,
+        return self.type
+
+    def __reduce__(self):
+        # for serialization, the task type is lazily loaded,
         # and not stored in the dict itself.
+        return (self.__class__, (dict(self), ), None)
+
+    def __repr__(self, kwformat=lambda i: "%s=%r" % i, sep=', '):
+        kw = self["kwargs"]
+        return "%s(%s%s%s)" % (self["task"], sep.join(map(repr, self["args"])),
+                kw and sep or "", sep.join(map(kwformat, kw.iteritems())))
+
+    @cached_property
+    def type(self):
         return registry.tasks[self.task]
 
 
 class TaskSet(UserList):
     """A task containing several subtasks, making it possible
-    to track how many, or when all of the tasks has been completed.
+    to track how many, or when all of the tasks have been completed.
 
     :param tasks: A list of :class:`subtask` instances.
 
-    .. attribute:: total
-
-        Total number of subtasks in this task set.
-
     Example::
 
-        >>> from djangofeeds.tasks import RefreshFeedTask
-        >>> from celery.task.sets import TaskSet, subtask
-        >>> urls = ("http://cnn.com/rss",
-        ...         "http://bbc.co.uk/rss",
-        ...         "http://xkcd.com/rss")
-        >>> subtasks = [RefreshFeedTask.subtask(kwargs={"feed_url": url})
-        ...                 for url in urls]
-        >>> taskset = TaskSet(tasks=subtasks)
+        >>> urls = ("http://cnn.com/rss", "http://bbc.co.uk/rss")
+        >>> taskset = TaskSet(refresh_feed.subtask((url, )) for url in urls)
         >>> taskset_result = taskset.apply_async()
-        >>> list_of_return_values = taskset_result.join()
+        >>> list_of_return_values = taskset_result.join()  # *expensive*
 
     """
-    Publisher = TaskPublisher
+    _task = None                # compat
+    _task_name = None           # compat
 
-    _task = None                                                # compat
-    _task_name = None                                           # compat
+    #: Total number of subtasks in this set.
+    total = None
 
-    def __init__(self, task=None, tasks=None):
+    def __init__(self, task=None, tasks=None, app=None, Publisher=None):
+        self.app = app_or_default(app)
         if task is not None:
             if hasattr(task, "__iter__"):
                 tasks = task
@@ -144,63 +143,45 @@ class TaskSet(UserList):
                 warnings.warn(TASKSET_DEPRECATION_TEXT % {
                                 "cls": task.__class__.__name__},
                               DeprecationWarning)
-
-        self.data = list(tasks)
+        self.data = list(tasks or [])
         self.total = len(self.tasks)
-
-    @with_connection
-    def apply_async(self, connection=None,
-            connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
-        """Run all tasks in the taskset.
-
-        Returns a :class:`celery.result.TaskSetResult` instance.
-
-        Example
-
-            >>> ts = TaskSet(tasks=(
-            ...         RefreshFeedTask.subtask(["http://foo.com/rss"]),
-            ...         RefreshFeedTask.subtask(["http://bar.com/rss"]),
-            ... ))
-            >>> result = ts.apply_async()
-            >>> result.taskset_id
-            "d2c9b261-8eff-4bfb-8459-1e1b72063514"
-            >>> result.subtask_ids
-            ["b4996460-d959-49c8-aeb9-39c530dcde25",
-            "598d2d18-ab86-45ca-8b4f-0779f5d6a3cb"]
-            >>> result.waiting()
-            True
-            >>> time.sleep(10)
-            >>> result.ready()
-            True
-            >>> result.successful()
-            True
-            >>> result.failed()
-            False
-            >>> result.join()
-            [True, True]
-
-        """
-        if conf.ALWAYS_EAGER:
-            return self.apply()
-
-        taskset_id = gen_unique_id()
-        publisher = self.Publisher(connection=connection)
+        self.Publisher = Publisher or self.app.amqp.TaskPublisher
+
+    def apply_async(self, connection=None, connect_timeout=None,
+            publisher=None, taskset_id=None):
+        """Apply taskset."""
+        return self.app.with_default_connection(self._apply_async)(
+                    connection=connection,
+                    connect_timeout=connect_timeout,
+                    publisher=publisher,
+                    taskset_id=taskset_id)
+
+    def _apply_async(self, connection=None, connect_timeout=None,
+            publisher=None, taskset_id=None):
+        if self.app.conf.CELERY_ALWAYS_EAGER:
+            return self.apply(taskset_id=taskset_id)
+
+        setid = taskset_id or gen_unique_id()
+        pub = publisher or self.Publisher(connection=connection)
         try:
-            results = [task.apply_async(taskset_id=taskset_id,
-                                        publisher=publisher)
-                            for task in self.tasks]
+            results = self._async_results(setid, pub)
         finally:
-            publisher.close()
+            if not publisher:  # created by us.
+                pub.close()
+
+        return self.app.TaskSetResult(setid, results)
 
-        return TaskSetResult(taskset_id, results)
+    def _async_results(self, taskset_id, publisher):
+        return [task.apply_async(taskset_id=taskset_id, publisher=publisher)
+                for task in self.tasks]
 
-    def apply(self):
-        """Applies the taskset locally."""
-        taskset_id = gen_unique_id()
+    def apply(self, taskset_id=None):
+        """Applies the taskset locally by blocking until all tasks return."""
+        setid = taskset_id or gen_unique_id()
+        return self.app.TaskSetResult(setid, self._sync_results(setid))
 
-        # This will be filled with EagerResults.
-        return TaskSetResult(taskset_id, [task.apply(taskset_id=taskset_id)
-                                            for task in self.tasks])
+    def _sync_results(self, taskset_id):
+        return [task.apply(taskset_id=taskset_id) for task in self.tasks]
 
     @property
     def tasks(self):

+ 62 - 7
celery/tests/__init__.py

@@ -1,21 +1,76 @@
+import logging
 import os
 import sys
 
-config = os.environ.setdefault("CELERY_TEST_CONFIG_MODULE",
-                               "celery.tests.config")
+from importlib import import_module
 
-os.environ["CELERY_CONFIG_MODULE"] = config
+config_module = os.environ.setdefault("CELERY_TEST_CONFIG_MODULE",
+                                      "celery.tests.config")
+
+os.environ.setdefault("CELERY_CONFIG_MODULE", config_module)
 os.environ["CELERY_LOADER"] = "default"
+os.environ["EVENTLET_NOPATCH"] = "yes"
+os.environ["GEVENT_NOPATCH"] = "yes"
+
+try:
+    WindowsError = WindowsError
+except NameError:
+    class WindowsError(Exception):
+        pass
 
 
 def teardown():
-    import threading
+    # Don't want SUBDEBUG log messages at finalization.
+    try:
+        from multiprocessing.util import get_logger
+    except ImportError:
+        pass
+    else:
+        get_logger().setLevel(logging.WARNING)
+
+    # Make sure test database is removed.
     import os
     if os.path.exists("test.db"):
-        os.remove("test.db")
+        try:
+            os.remove("test.db")
+        except WindowsError:
+            pass
+
+    # Make sure there are no remaining threads at shutdown.
+    import threading
     remaining_threads = [thread for thread in threading.enumerate()
-                            if thread.name != "MainThread"]
+                            if thread.getName() != "MainThread"]
     if remaining_threads:
         sys.stderr.write(
-            "\n\n**WARNING**: Remaning threads at teardown: %r...\n" % (
+            "\n\n**WARNING**: Remaining threads at teardown: %r...\n" % (
                 remaining_threads))
+
+
+def find_distribution_modules(name=__name__, file=__file__):
+    current_dist_depth = len(name.split(".")) - 1
+    current_dist = os.path.join(os.path.dirname(file),
+                                *([os.pardir] * current_dist_depth))
+    abs = os.path.abspath(current_dist)
+    dist_name = os.path.basename(abs)
+
+    for dirpath, dirnames, filenames in os.walk(abs):
+        package = (dist_name + dirpath[len(abs):]).replace("/", ".")
+        if "__init__.py" in filenames:
+            yield package
+            for filename in filenames:
+                if filename.endswith(".py") and filename != "__init__.py":
+                    yield ".".join([package, filename])[:-3]
+
+
+def import_all_modules(name=__name__, file=__file__,
+        skip=["celery.decorators", "celery.contrib.batches"]):
+    for module in find_distribution_modules(name, file):
+        if module not in skip:
+            try:
+                import_module(module)
+            except ImportError:
+                pass
+
+
+if os.environ.get("COVER_ALL_MODULES") or "--with-coverage3" in sys.argv:
+    import_all_modules()

+ 17 - 2
celery/tests/config.py

@@ -1,7 +1,12 @@
-CARROT_BACKEND = "memory"
+import os
 
+BROKER_BACKEND = "memory"
 
-CELERY_RESULT_BACKEND = "database"
+#: Don't want log output when running suite.
+CELERYD_HIJACK_ROOT_LOGGER = False
+
+CELERY_RESULT_BACKEND = "cache"
+CELERY_CACHE_BACKEND = "memory"
 CELERY_RESULT_DBURI = "sqlite:///test.db"
 CELERY_SEND_TASK_ERROR_EMAILS = False
 
@@ -11,3 +16,13 @@ CELERY_DEFAULT_ROUTING_KEY = "testcelery"
 CELERY_QUEUES = {"testcelery": {"binding_key": "testcelery"}}
 
 CELERYD_LOG_COLOR = False
+
+# Tyrant results tests (only executed if installed and running)
+TT_HOST = os.environ.get("TT_HOST") or "localhost"
+TT_PORT = int(os.environ.get("TT_PORT") or 1978)
+
+# Redis results tests (only executed if installed and running)
+REDIS_HOST = os.environ.get("REDIS_HOST") or "localhost"
+REDIS_PORT = int(os.environ.get("REDIS_PORT") or 6379)
+REDIS_DB = os.environ.get("REDIS_DB") or 0
+REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD")

+ 14 - 13
celery/tests/functional/case.py

@@ -5,14 +5,16 @@ import signal
 import socket
 import sys
 import traceback
-import unittest2 as unittest
 
 from itertools import count
+from time import time
 
 from celery.exceptions import TimeoutError
 from celery.task.control import ping, flatten_reply, inspect
 from celery.utils import get_full_cls_name
 
+from celery.tests.utils import unittest
+
 HOSTNAME = socket.gethostname()
 
 
@@ -21,8 +23,9 @@ def say(msg):
 
 
 def try_while(fun, reason="Timed out", timeout=10, interval=0.5):
+    time_start = time()
     for iterations in count(0):
-        if iterations * interval >= timeout:
+        if time() - time_start >= timeout:
             raise TimeoutError()
         ret = fun()
         if ret:
@@ -46,11 +49,9 @@ class Worker(object):
     def _fork_and_exec(self):
         pid = os.fork()
         if pid == 0:
-            os.execv(sys.executable,
-                    [sys.executable] + ["-m", "celery.bin.celeryd",
-                                        "-l", self.loglevel,
-                                        "-n", self.hostname])
-            os.exit()
+            from celery import current_app
+            current_app.worker_main(["celeryd", "--loglevel=DEBUG",
+                                                "-n", self.hostname])
         self.pid = pid
 
     def is_alive(self, timeout=1):
@@ -58,10 +59,10 @@ class Worker(object):
                  timeout=timeout)
         return self.hostname in flatten_reply(r)
 
-    def wait_until_started(self, timeout=10, interval=0.2):
+    def wait_until_started(self, timeout=10, interval=0.5):
         try_while(lambda: self.is_alive(interval),
                 "Worker won't start (after %s secs.)" % timeout,
-                interval=0.2, timeout=10)
+                interval=interval, timeout=timeout)
         say("--WORKER %s IS ONLINE--" % self.hostname)
 
     def ensure_shutdown(self, timeout=10, interval=0.5):
@@ -115,7 +116,7 @@ class WorkerCase(unittest.TestCase):
         self.assertTrue(self.worker.is_alive)
 
     def inspect(self, timeout=1):
-        return inspect(self.worker.hostname, timeout=timeout)
+        return inspect([self.worker.hostname], timeout=timeout)
 
     def my_response(self, response):
         return flatten_reply(response)[self.worker.hostname]
@@ -123,7 +124,7 @@ class WorkerCase(unittest.TestCase):
     def is_accepted(self, task_id, interval=0.5):
         active = self.inspect(timeout=interval).active()
         if active:
-            for task in active:
+            for task in active[self.worker.hostname]:
                 if task["id"] == task_id:
                     return True
         return False
@@ -131,7 +132,7 @@ class WorkerCase(unittest.TestCase):
     def is_reserved(self, task_id, interval=0.5):
         reserved = self.inspect(timeout=interval).reserved()
         if reserved:
-            for task in reserved:
+            for task in reserved[self.worker.hostname]:
                 if task["id"] == task_id:
                     return True
         return False
@@ -139,7 +140,7 @@ class WorkerCase(unittest.TestCase):
     def is_scheduled(self, task_id, interval=0.5):
         schedule = self.inspect(timeout=interval).scheduled()
         if schedule:
-            for item in schedule:
+            for item in schedule[self.worker.hostname]:
                 if item["request"]["id"] == task_id:
                     return True
         return False

+ 1 - 1
celery/tests/functional/tasks.py

@@ -1,6 +1,6 @@
 import time
 
-from celery.decorators import task
+from celery.task import task
 from celery.task.sets import subtask
 
 

+ 0 - 0
celery/management/__init__.py → celery/tests/test_app/__init__.py


+ 218 - 0
celery/tests/test_app/test_app.py

@@ -0,0 +1,218 @@
+import os
+
+from celery import Celery
+from celery.app import defaults
+from celery.app.base import BaseApp
+from celery.loaders.base import BaseLoader
+from celery.utils.serialization import pickle
+
+from celery.tests import config
+from celery.tests.utils import unittest
+
+THIS_IS_A_KEY = "this is a value"
+
+
+class Object(object):
+
+    def __init__(self, **kwargs):
+        for key, value in kwargs.items():
+            setattr(self, key, value)
+
+
+def _get_test_config():
+    return dict((key, getattr(config, key))
+                    for key in dir(config)
+                        if key.isupper() and not key.startswith("_"))
+
+test_config = _get_test_config()
+
+
+class test_App(unittest.TestCase):
+
+    def setUp(self):
+        self.app = Celery(set_as_current=False)
+        self.app.conf.update(test_config)
+
+    def test_task(self):
+        app = Celery("foozibari", set_as_current=False)
+
+        def fun():
+            pass
+
+        fun.__module__ = "__main__"
+        task = app.task(fun)
+        self.assertEqual(task.name, app.main + ".fun")
+
+    def test_TaskSet(self):
+        ts = self.app.TaskSet()
+        self.assertListEqual(ts.tasks, [])
+        self.assertIs(ts.app, self.app)
+
+    def test_pickle_app(self):
+        changes = dict(THE_FOO_BAR="bars",
+                       THE_MII_MAR="jars")
+        self.app.conf.update(changes)
+        saved = pickle.dumps(self.app)
+        self.assertLess(len(saved), 2048)
+        restored = pickle.loads(saved)
+        self.assertDictContainsSubset(changes, restored.conf)
+
+    def test_worker_main(self):
+        from celery.bin import celeryd
+
+        class WorkerCommand(celeryd.WorkerCommand):
+
+            def execute_from_commandline(self, argv):
+                return argv
+
+        prev, celeryd.WorkerCommand = celeryd.WorkerCommand, WorkerCommand
+        try:
+            ret = self.app.worker_main(argv=["--version"])
+            self.assertListEqual(ret, ["--version"])
+        finally:
+            celeryd.WorkerCommand = prev
+
+    def test_config_from_envvar(self):
+        os.environ["CELERYTEST_CONFIG_OBJECT"] = \
+                "celery.tests.test_app.test_app"
+        self.app.config_from_envvar("CELERYTEST_CONFIG_OBJECT")
+        self.assertEqual(self.app.conf.THIS_IS_A_KEY, "this is a value")
+
+    def test_config_from_object(self):
+
+        class Object(object):
+            LEAVE_FOR_WORK = True
+            MOMENT_TO_STOP = True
+            CALL_ME_BACK = 123456789
+            WANT_ME_TO = False
+            UNDERSTAND_ME = True
+
+        self.app.config_from_object(Object())
+
+        self.assertTrue(self.app.conf.LEAVE_FOR_WORK)
+        self.assertTrue(self.app.conf.MOMENT_TO_STOP)
+        self.assertEqual(self.app.conf.CALL_ME_BACK, 123456789)
+        self.assertFalse(self.app.conf.WANT_ME_TO)
+        self.assertTrue(self.app.conf.UNDERSTAND_ME)
+
+    def test_config_from_cmdline(self):
+        cmdline = [".always_eager=no",
+                   ".result_backend=/dev/null",
+                   '.task_error_whitelist=(list)["a", "b", "c"]',
+                   "celeryd.prefetch_multiplier=368",
+                   ".foobarstring=(string)300",
+                   ".foobarint=(int)300",
+                   '.result_engine_options=(dict){"foo": "bar"}']
+        self.app.config_from_cmdline(cmdline, namespace="celery")
+        self.assertFalse(self.app.conf.CELERY_ALWAYS_EAGER)
+        self.assertEqual(self.app.conf.CELERY_RESULT_BACKEND, "/dev/null")
+        self.assertEqual(self.app.conf.CELERYD_PREFETCH_MULTIPLIER, 368)
+        self.assertListEqual(self.app.conf.CELERY_TASK_ERROR_WHITELIST,
+                             ["a", "b", "c"])
+        self.assertEqual(self.app.conf.CELERY_FOOBARSTRING, "300")
+        self.assertEqual(self.app.conf.CELERY_FOOBARINT, 300)
+        self.assertDictEqual(self.app.conf.CELERY_RESULT_ENGINE_OPTIONS,
+                             {"foo": "bar"})
+
+    def test_compat_setting_CELERY_BACKEND(self):
+
+        self.app.config_from_object(Object(CELERY_BACKEND="set_by_us"))
+        self.assertEqual(self.app.conf.CELERY_RESULT_BACKEND, "set_by_us")
+
+    def test_setting_BROKER_TRANSPORT_OPTIONS(self):
+
+        _args = {'foo': 'bar', 'spam': 'baz'}
+
+        self.app.config_from_object(Object())
+        self.assertEqual(self.app.conf.BROKER_TRANSPORT_OPTIONS, {})
+
+        self.app.config_from_object(Object(BROKER_TRANSPORT_OPTIONS=_args))
+        self.assertEqual(self.app.conf.BROKER_TRANSPORT_OPTIONS, _args)
+
+    def test_Windows_log_color_disabled(self):
+        self.app.IS_WINDOWS = True
+        self.assertFalse(self.app.log.supports_color())
+
+    def test_compat_setting_CARROT_BACKEND(self):
+        self.app.config_from_object(Object(CARROT_BACKEND="set_by_us"))
+        self.assertEqual(self.app.conf.BROKER_BACKEND, "set_by_us")
+
+    def test_mail_admins(self):
+
+        class Loader(BaseLoader):
+
+            def mail_admins(*args, **kwargs):
+                return args, kwargs
+
+        self.app.loader = Loader()
+        self.app.conf.ADMINS = None
+        self.assertFalse(self.app.mail_admins("Subject", "Body"))
+        self.app.conf.ADMINS = [("George Costanza", "george@vandelay.com")]
+        self.assertTrue(self.app.mail_admins("Subject", "Body"))
+
+    def test_amqp_get_broker_info(self):
+        self.assertDictContainsSubset({"hostname": "localhost",
+                                       "userid": "guest",
+                                       "password": "guest",
+                                       "virtual_host": "/"},
+                                      self.app.broker_connection().info())
+        self.app.conf.BROKER_PORT = 1978
+        self.app.conf.BROKER_VHOST = "foo"
+        self.assertDictContainsSubset({"port": 1978,
+                                       "virtual_host": "foo"},
+                                      self.app.broker_connection().info())
+        conn = self.app.broker_connection(virtual_host="/value")
+        self.assertDictContainsSubset({"virtual_host": "/value"},
+                                      conn.info())
+
+    def test_send_task_sent_event(self):
+        from celery.app import amqp
+
+        class Dispatcher(object):
+            sent = []
+
+            def send(self, type, **fields):
+                self.sent.append((type, fields))
+
+        conn = self.app.broker_connection()
+        chan = conn.channel()
+        try:
+            for e in ("foo_exchange", "moo_exchange", "bar_exchange"):
+                chan.exchange_declare(e, "direct", durable=True)
+                chan.queue_declare(e, durable=True)
+                chan.queue_bind(e, e, e)
+        finally:
+            chan.close()
+        assert conn.transport_cls == "memory"
+
+        pub = self.app.amqp.TaskPublisher(conn, exchange="foo_exchange")
+        self.assertIn("foo_exchange", amqp._exchanges_declared)
+
+        dispatcher = Dispatcher()
+        self.assertTrue(pub.delay_task("footask", (), {},
+                                       exchange="moo_exchange",
+                                       routing_key="moo_exchange",
+                                       event_dispatcher=dispatcher))
+        self.assertTrue(dispatcher.sent)
+        self.assertEqual(dispatcher.sent[0][0], "task-sent")
+        self.assertTrue(pub.delay_task("footask", (), {},
+                                       event_dispatcher=dispatcher,
+                                       exchange="bar_exchange",
+                                       routing_key="bar_exchange"))
+        self.assertIn("bar_exchange", amqp._exchanges_declared)
+
+
+class test_BaseApp(unittest.TestCase):
+
+    def test_on_init(self):
+        BaseApp()
+
+
+class test_defaults(unittest.TestCase):
+
+    def test_str_to_bool(self):
+        for s in ("false", "no", "0"):
+            self.assertFalse(defaults.str_to_bool(s))
+        for s in ("true", "yes", "1"):
+            self.assertTrue(defaults.str_to_bool(s))
+        self.assertRaises(TypeError, defaults.str_to_bool, "unsure")

+ 2 - 2
celery/tests/test_messaging.py → celery/tests/test_app/test_app_amqp.py

@@ -1,6 +1,6 @@
-import unittest2 as unittest
+from celery.tests.utils import unittest
 
-from celery.messaging import MSG_OPTIONS, extract_msg_options
+from celery.app.amqp import MSG_OPTIONS, extract_msg_options
 
 
 class TestMsgOptions(unittest.TestCase):

+ 79 - 16
celery/tests/test_beat.py → celery/tests/test_app/test_beat.py

@@ -1,11 +1,15 @@
 import logging
-import unittest2 as unittest
+from celery.tests.utils import unittest
 
 from datetime import datetime, timedelta
 
+from nose import SkipTest
+
 from celery import beat
+from celery import registry
 from celery.result import AsyncResult
 from celery.schedules import schedule
+from celery.task.base import Task
 from celery.utils import gen_unique_id
 
 
@@ -147,6 +151,30 @@ always_pending = mocked_schedule(False, 1)
 
 class test_Scheduler(unittest.TestCase):
 
+    def test_custom_schedule_dict(self):
+        custom = {"foo": "bar"}
+        scheduler = mScheduler(schedule=custom, lazy=True)
+        self.assertIs(scheduler.data, custom)
+
+    def test_apply_async_uses_registered_task_instances(self):
+        through_task = [False]
+
+        class MockTask(Task):
+
+            @classmethod
+            def apply_async(cls, *args, **kwargs):
+                through_task[0] = True
+
+        assert MockTask.name in registry.tasks
+
+        scheduler = mScheduler()
+        scheduler.apply_async(scheduler.Entry(task=MockTask.name))
+        self.assertTrue(through_task[0])
+
+    def test_info(self):
+        scheduler = mScheduler()
+        self.assertIsInstance(scheduler.info, basestring)
+
     def test_due_tick(self):
         scheduler = mScheduler()
         scheduler.add(name="test_due_tick",
@@ -205,12 +233,6 @@ class test_Scheduler(unittest.TestCase):
         scheduler.setup_schedule()
         scheduler.close()
 
-    def test_set_schedule(self):
-        scheduler = mScheduler()
-        a, b = scheduler.schedule, {}
-        scheduler.schedule = b
-        self.assertIs(scheduler.schedule, b)
-
     def test_merge_inplace(self):
         a = mScheduler()
         b = mScheduler()
@@ -218,26 +240,41 @@ class test_Scheduler(unittest.TestCase):
                             "bar": {"schedule": mocked_schedule(True, 20)}})
         b.update_from_dict({"bar": {"schedule": mocked_schedule(True, 40)},
                             "baz": {"schedule": mocked_schedule(True, 10)}})
-        a.merge_inplace(b)
+        a.merge_inplace(b.schedule)
 
-        self.assertNotIn("foo", a)
-        self.assertIn("baz", a)
-        self.assertEqual(a["bar"].schedule._next_run_at, 40)
+        self.assertNotIn("foo", a.schedule)
+        self.assertIn("baz", a.schedule)
+        self.assertEqual(a.schedule["bar"].schedule._next_run_at, 40)
 
 
 class test_Service(unittest.TestCase):
 
-    def test_start(self):
+    def get_service(self):
         sh = MockShelve()
 
         class PersistentScheduler(beat.PersistentScheduler):
             persistence = Object()
             persistence.open = lambda *a, **kw: sh
+            tick_raises_exit = False
+            shutdown_service = None
+
+            def tick(self):
+                if self.tick_raises_exit:
+                    raise SystemExit()
+                if self.shutdown_service:
+                    self.shutdown_service._shutdown.set()
+                return 0.0
+
+        return beat.Service(scheduler_cls=PersistentScheduler), sh
 
-        s = beat.Service(scheduler_cls=PersistentScheduler)
-        self.assertIsInstance(s.schedule, dict)
+    def test_start(self):
+        s, sh = self.get_service()
+        schedule = s.scheduler.schedule
+        self.assertIsInstance(schedule, dict)
         self.assertIsInstance(s.scheduler, beat.Scheduler)
-        self.assertListEqual(s.schedule.keys(), sh.keys())
+        scheduled = schedule.keys()
+        for task_name in sh["entries"].keys():
+            self.assertIn(task_name, scheduled)
 
         s.sync()
         self.assertTrue(sh.closed)
@@ -256,12 +293,38 @@ class test_Service(unittest.TestCase):
         finally:
             s.scheduler._store = p
 
+    def test_start_embedded_process(self):
+        s, sh = self.get_service()
+        s._shutdown.set()
+        s.start(embedded_process=True)
+
+    def test_start_thread(self):
+        s, sh = self.get_service()
+        s._shutdown.set()
+        s.start(embedded_process=False)
+
+    def test_start_tick_raises_exit_error(self):
+        s, sh = self.get_service()
+        s.scheduler.tick_raises_exit = True
+        s.start()
+        self.assertTrue(s._shutdown.isSet())
+
+    def test_start_manages_one_tick_before_shutdown(self):
+        s, sh = self.get_service()
+        s.scheduler.shutdown_service = s
+        s.start()
+        self.assertTrue(s._shutdown.isSet())
+
 
 class test_EmbeddedService(unittest.TestCase):
 
     def test_start_stop_process(self):
+        try:
+            from multiprocessing import Process
+        except ImportError:
+            raise SkipTest("multiprocessing not available")
+
         s = beat.EmbeddedService()
-        from multiprocessing import Process
         self.assertIsInstance(s, Process)
         self.assertIsInstance(s.service, beat.Service)
         s.service = MockService()

+ 1 - 1
celery/tests/test_celery.py → celery/tests/test_app/test_celery.py

@@ -1,4 +1,4 @@
-import unittest2 as unittest
+from celery.tests.utils import unittest
 
 import celery
 

+ 257 - 0
celery/tests/test_app/test_loaders.py

@@ -0,0 +1,257 @@
+import os
+import sys
+
+from celery import task
+from celery import loaders
+from celery.app import app_or_default
+from celery.exceptions import ImproperlyConfigured
+from celery.loaders import base
+from celery.loaders import default
+from celery.loaders.app import AppLoader
+
+from celery.tests.compat import catch_warnings
+from celery.tests.utils import unittest
+from celery.tests.utils import with_environ, execute_context
+
+
+class ObjectConfig(object):
+    FOO = 1
+    BAR = 2
+
+object_config = ObjectConfig()
+dict_config = dict(FOO=10, BAR=20)
+
+
+class Object(object):
+
+    def __init__(self, **kwargs):
+        for k, v in kwargs.items():
+            setattr(self, k, v)
+
+
+class MockMail(object):
+
+    class SendmailWarning(UserWarning):
+        pass
+
+    class Message(Object):
+        pass
+
+    class Mailer(Object):
+        sent = []
+        raise_on_send = False
+
+        def send(self, message):
+            if self.__class__.raise_on_send:
+                raise KeyError("foo")
+            self.sent.append(message)
+
+
+class DummyLoader(base.BaseLoader):
+
+    def read_configuration(self):
+        return {"foo": "bar", "CELERY_IMPORTS": ("os", "sys")}
+
+    @property
+    def mail(self):
+        return MockMail()
+
+
+class TestLoaders(unittest.TestCase):
+
+    def test_get_loader_cls(self):
+
+        self.assertEqual(loaders.get_loader_cls("default"),
+                          default.Loader)
+
+    def test_current_loader(self):
+        loader1 = loaders.current_loader()
+        loader2 = loaders.current_loader()
+        self.assertIs(loader1, loader2)
+        self.assertIs(loader2, loaders._loader)
+
+    def test_load_settings(self):
+        loader = loaders.current_loader()
+        loaders._settings = None
+        settings = loaders.load_settings()
+        self.assertTrue(loaders._settings)
+        settings = loaders.load_settings()
+        self.assertIs(settings, loaders._settings)
+        self.assertIs(settings, loader.conf)
+
+    @with_environ("CELERY_LOADER", "default")
+    def test_detect_loader_CELERY_LOADER(self):
+        self.assertIsInstance(loaders.setup_loader(), default.Loader)
+
+
+class TestLoaderBase(unittest.TestCase):
+    message_options = {"subject": "Subject",
+                       "body": "Body",
+                       "sender": "x@x.com",
+                       "to": "y@x.com"}
+    server_options = {"host": "smtp.x.com",
+                      "port": 1234,
+                      "user": "x",
+                      "password": "qwerty",
+                      "timeout": 3}
+
+    def setUp(self):
+        self.loader = DummyLoader()
+
+    def test_handlers_pass(self):
+        self.loader.on_task_init("foo.task", "feedface-cafebabe")
+        self.loader.on_worker_init()
+
+    def test_import_task_module(self):
+        self.assertEqual(sys, self.loader.import_task_module("sys"))
+
+    def test_conf_property(self):
+        self.assertEqual(self.loader.conf["foo"], "bar")
+        self.assertEqual(self.loader._conf["foo"], "bar")
+        self.assertEqual(self.loader.conf["foo"], "bar")
+
+    def test_import_default_modules(self):
+        self.assertItemsEqual(self.loader.import_default_modules(),
+                              [os, sys, task])
+
+    def test_import_from_cwd_custom_imp(self):
+
+        def imp(module):
+            imp.called = True
+        imp.called = False
+
+        self.loader.import_from_cwd("foo", imp=imp)
+        self.assertTrue(imp.called)
+
+    def test_mail_admins_errors(self):
+        MockMail.Mailer.raise_on_send = True
+        opts = dict(self.message_options, **self.server_options)
+
+        def with_catch_warnings(log):
+            self.loader.mail_admins(fail_silently=True, **opts)
+            return log[0].message
+
+        warning = execute_context(catch_warnings(record=True),
+                                  with_catch_warnings)
+        self.assertIsInstance(warning, MockMail.SendmailWarning)
+        self.assertIn("KeyError", warning.args[0])
+
+        self.assertRaises(KeyError, self.loader.mail_admins,
+                          fail_silently=False, **opts)
+
+    def test_mail_admins(self):
+        MockMail.Mailer.raise_on_send = False
+        opts = dict(self.message_options, **self.server_options)
+
+        self.loader.mail_admins(**opts)
+        message = MockMail.Mailer.sent.pop()
+        self.assertDictContainsSubset(vars(message), self.message_options)
+
+    def test_mail_attribute(self):
+        from celery.utils import mail
+        loader = base.BaseLoader()
+        self.assertIs(loader.mail, mail)
+
+    def test_cmdline_config_ValueError(self):
+        self.assertRaises(ValueError, self.loader.cmdline_config_parser,
+                         ["broker.port=foobar"])
+
+
+class TestDefaultLoader(unittest.TestCase):
+
+    def test_wanted_module_item(self):
+        self.assertTrue(default.wanted_module_item("FOO"))
+        self.assertTrue(default.wanted_module_item("Foo"))
+        self.assertFalse(default.wanted_module_item("_FOO"))
+        self.assertFalse(default.wanted_module_item("__FOO"))
+        self.assertFalse(default.wanted_module_item("foo"))
+
+    def test_read_configuration(self):
+        from types import ModuleType
+
+        class ConfigModule(ModuleType):
+            pass
+
+        celeryconfig = ConfigModule("celeryconfig")
+        celeryconfig.CELERY_IMPORTS = ("os", "sys")
+        configname = os.environ.get("CELERY_CONFIG_MODULE") or "celeryconfig"
+
+        prevconfig = sys.modules[configname]
+        sys.modules[configname] = celeryconfig
+        try:
+            l = default.Loader()
+            settings = l.read_configuration()
+            self.assertTupleEqual(settings.CELERY_IMPORTS, ("os", "sys"))
+            settings = l.read_configuration()
+            self.assertTupleEqual(settings.CELERY_IMPORTS, ("os", "sys"))
+            l.on_worker_init()
+        finally:
+            sys.modules[configname] = prevconfig
+
+    def test_import_from_cwd(self):
+        l = default.Loader()
+        old_path = list(sys.path)
+        try:
+            sys.path.remove(os.getcwd())
+        except ValueError:
+            pass
+        celery = sys.modules.pop("celery", None)
+        try:
+            self.assertTrue(l.import_from_cwd("celery"))
+            sys.modules.pop("celery", None)
+            sys.path.insert(0, os.getcwd())
+            self.assertTrue(l.import_from_cwd("celery"))
+        finally:
+            sys.path = old_path
+            sys.modules["celery"] = celery
+
+    def test_unconfigured_settings(self):
+        context_executed = [False]
+
+        class _Loader(default.Loader):
+
+            def import_from_cwd(self, name):
+                raise ImportError(name)
+
+        def with_catch_warnings(log):
+            l = _Loader()
+            self.assertEqual(l.conf.CELERY_RESULT_BACKEND, "amqp")
+            context_executed[0] = True
+
+        context = catch_warnings(record=True)
+        execute_context(context, with_catch_warnings)
+        self.assertTrue(context_executed[0])
+
+
+class test_AppLoader(unittest.TestCase):
+
+    def setUp(self):
+        self.app = app_or_default()
+        self.loader = AppLoader(app=self.app)
+
+    def test_config_from_envvar(self, key="CELERY_HARNESS_CFG1"):
+        self.assertFalse(self.loader.config_from_envvar("HDSAJIHWIQHEWQU",
+                                                        silent=True))
+        self.assertRaises(ImproperlyConfigured,
+                          self.loader.config_from_envvar, "HDSAJIHWIQHEWQU",
+                          silent=False)
+        os.environ[key] = __name__ + ".object_config"
+        self.assertTrue(self.loader.config_from_envvar(key))
+        self.assertEqual(self.loader.conf["FOO"], 1)
+        self.assertEqual(self.loader.conf["BAR"], 2)
+
+        os.environ[key] = "unknown_asdwqe.asdwqewqe"
+        self.assertRaises(ImportError,
+                          self.loader.config_from_envvar, key, silent=False)
+        self.assertFalse(self.loader.config_from_envvar(key, silent=True))
+
+        os.environ[key] = __name__ + ".dict_config"
+        self.assertTrue(self.loader.config_from_envvar(key))
+        self.assertEqual(self.loader.conf["FOO"], 10)
+        self.assertEqual(self.loader.conf["BAR"], 20)
+
+    def test_on_worker_init(self):
+        self.loader.conf["CELERY_IMPORTS"] = ("subprocess", )
+        sys.modules.pop("subprocess", None)
+        self.loader.on_worker_init()
+        self.assertIn("subprocess", sys.modules)

+ 45 - 12
celery/tests/test_routes.py → celery/tests/test_app/test_routes.py

@@ -1,8 +1,7 @@
-import unittest2 as unittest
+from celery.tests.utils import unittest
 
-
-from celery import conf
 from celery import routes
+from celery import current_app
 from celery.utils import maybe_promise
 from celery.utils.functional import wraps
 from celery.exceptions import QueueNotFound
@@ -19,12 +18,16 @@ def with_queues(**queues):
     def patch_fun(fun):
         @wraps(fun)
         def __inner(*args, **kwargs):
-            prev_queues = conf.QUEUES
-            conf.QUEUES = queues
+            app = current_app
+            prev_queues = app.conf.CELERY_QUEUES
+            prev_Queues = app.amqp.queues
+            app.conf.CELERY_QUEUES = queues
+            app.amqp.queues = app.amqp.Queues(queues)
             try:
                 return fun(*args, **kwargs)
             finally:
-                conf.QUEUES = prev_queues
+                app.conf.CELERY_QUEUES = prev_queues
+                app.amqp.queues = prev_Queues
         return __inner
     return patch_fun
 
@@ -35,13 +38,16 @@ a_queue = {"exchange": "fooexchange",
 b_queue = {"exchange": "barexchange",
            "exchange_type": "topic",
            "binding_key": "b.b.#"}
+d_queue = {"exchange": current_app.conf.CELERY_DEFAULT_EXCHANGE,
+           "exchange_type": current_app.conf.CELERY_DEFAULT_EXCHANGE_TYPE,
+           "routing_key": current_app.conf.CELERY_DEFAULT_ROUTING_KEY}
 
 
 class test_MapRoute(unittest.TestCase):
 
     @with_queues(foo=a_queue, bar=b_queue)
     def test_route_for_task_expanded_route(self):
-        expand = E(conf.QUEUES)
+        expand = E(current_app.conf.CELERY_QUEUES)
         route = routes.MapRoute({"celery.ping": {"queue": "foo"}})
         self.assertDictContainsSubset(a_queue,
                              expand(route.route_for_task("celery.ping")))
@@ -49,14 +55,14 @@ class test_MapRoute(unittest.TestCase):
 
     @with_queues(foo=a_queue, bar=b_queue)
     def test_route_for_task(self):
-        expand = E(conf.QUEUES)
+        expand = E(current_app.conf.CELERY_QUEUES)
         route = routes.MapRoute({"celery.ping": b_queue})
         self.assertDictContainsSubset(b_queue,
                              expand(route.route_for_task("celery.ping")))
         self.assertIsNone(route.route_for_task("celery.awesome"))
 
     def test_expand_route_not_found(self):
-        expand = E(conf.QUEUES)
+        expand = E(current_app.conf.CELERY_QUEUES)
         route = routes.MapRoute({"a": {"queue": "x"}})
         self.assertRaises(QueueNotFound, expand, route.route_for_task("a"))
 
@@ -71,20 +77,47 @@ class test_lookup_route(unittest.TestCase):
     def test_lookup_takes_first(self):
         R = routes.prepare(({"celery.ping": {"queue": "bar"}},
                             {"celery.ping": {"queue": "foo"}}))
-        router = routes.Router(R, conf.QUEUES)
+        router = routes.Router(R, current_app.conf.CELERY_QUEUES)
         self.assertDictContainsSubset(b_queue,
                 router.route({}, "celery.ping",
                     args=[1, 2], kwargs={}))
 
+    @with_queues()
+    def test_expands_queue_in_options(self):
+        R = routes.prepare(())
+        router = routes.Router(R, current_app.conf.CELERY_QUEUES,
+                               create_missing=True)
+        # apply_async forwards all arguments, even exchange=None etc,
+        # so need to make sure it's merged correctly.
+        route = router.route({"queue": "testq",
+                              "exchange": None,
+                              "routing_key": None,
+                              "immediate": False},
+                             "celery.ping",
+                             args=[1, 2], kwargs={})
+        self.assertDictContainsSubset({"exchange": "testq",
+                                       "routing_key": "testq",
+                                       "immediate": False},
+                                       route)
+        self.assertIn("queue", route)
+
     @with_queues(foo=a_queue, bar=b_queue)
+    def test_expand_destaintion_string(self):
+        x = routes.Router({}, current_app.conf.CELERY_QUEUES)
+        dest = x.expand_destination("foo")
+        self.assertEqual(dest["exchange"], "fooexchange")
+
+    @with_queues(foo=a_queue, bar=b_queue, **{
+        current_app.conf.CELERY_DEFAULT_QUEUE: d_queue})
     def test_lookup_paths_traversed(self):
         R = routes.prepare(({"celery.xaza": {"queue": "bar"}},
                             {"celery.ping": {"queue": "foo"}}))
-        router = routes.Router(R, conf.QUEUES)
+        router = routes.Router(R, current_app.amqp.queues)
         self.assertDictContainsSubset(a_queue,
                 router.route({}, "celery.ping",
                     args=[1, 2], kwargs={}))
-        self.assertEqual(router.route({}, "celery.poza"), {})
+        self.assertEqual(router.route({}, "celery.poza"),
+                dict(d_queue, queue=current_app.conf.CELERY_DEFAULT_QUEUE))
 
 
 class test_prepare(unittest.TestCase):

+ 3 - 3
celery/tests/test_backends/__init__.py

@@ -1,15 +1,15 @@
-import unittest2 as unittest
+from celery.tests.utils import unittest
 
 from celery import backends
 from celery.backends.amqp import AMQPBackend
-from celery.backends.database import DatabaseBackend
+from celery.backends.cache import CacheBackend
 
 
 class TestBackends(unittest.TestCase):
 
     def test_get_backend_aliases(self):
         expects = [("amqp", AMQPBackend),
-                   ("database", DatabaseBackend)]
+                   ("cache", CacheBackend)]
         for expect_name, expect_cls in expects:
             self.assertIsInstance(backends.get_backend_cls(expect_name)(),
                                   expect_cls)

+ 0 - 60
celery/tests/test_backends/disabled_amqp.py

@@ -1,60 +0,0 @@
-import sys
-import unittest2 as unittest
-
-from celery import states
-from celery.utils import gen_unique_id
-from celery.backends.amqp import AMQPBackend
-from celery.datastructures import ExceptionInfo
-
-
-class SomeClass(object):
-
-    def __init__(self, data):
-        self.data = data
-
-
-class test_AMQPBackend(unittest.TestCase):
-
-    def create_backend(self):
-        return AMQPBackend(serializer="pickle", persistent=False)
-
-    def test_mark_as_done(self):
-        tb1 = self.create_backend()
-        tb2 = self.create_backend()
-
-        tid = gen_unique_id()
-
-        tb1.mark_as_done(tid, 42)
-        self.assertEqual(tb2.get_status(tid), states.SUCCESS)
-        self.assertEqual(tb2.get_result(tid), 42)
-        self.assertTrue(tb2._cache.get(tid))
-        self.assertTrue(tb2.get_result(tid), 42)
-
-    def test_is_pickled(self):
-        tb1 = self.create_backend()
-        tb2 = self.create_backend()
-
-        tid2 = gen_unique_id()
-        result = {"foo": "baz", "bar": SomeClass(12345)}
-        tb1.mark_as_done(tid2, result)
-        # is serialized properly.
-        rindb = tb2.get_result(tid2)
-        self.assertEqual(rindb.get("foo"), "baz")
-        self.assertEqual(rindb.get("bar").data, 12345)
-
-    def test_mark_as_failure(self):
-        tb1 = self.create_backend()
-        tb2 = self.create_backend()
-
-        tid3 = gen_unique_id()
-        try:
-            raise KeyError("foo")
-        except KeyError, exception:
-            einfo = ExceptionInfo(sys.exc_info())
-        tb1.mark_as_failure(tid3, exception, traceback=einfo.traceback)
-        self.assertEqual(tb2.get_status(tid3), states.FAILURE)
-        self.assertIsInstance(tb2.get_result(tid3), KeyError)
-        self.assertEqual(tb2.get_traceback(tid3), einfo.traceback)
-
-    def test_process_cleanup(self):
-        self.create_backend().process_cleanup()

Some files were not shown because too many files changed in this diff