Browse Source

Merge branch 'master' into events

Conflicts:
	README.rst
	celery/execute.py
	celery/management/commands/celerystats.py
	celery/messaging.py
	celery/monitoring.py
	celery/worker/__init__.py
Ask Solem 15 years ago
parent
commit
200101a8f2
100 changed files with 3532 additions and 2171 deletions
  1. 1 0
      AUTHORS
  2. 224 34
      Changelog
  3. 22 22
      FAQ
  4. 9 0
      Makefile
  5. 87 98
      README.rst
  6. 7 0
      bin/celerybeat
  7. 2 0
      bin/celeryd
  8. 3 1
      bin/celeryinit
  9. 1 1
      celery/__init__.py
  10. 31 36
      celery/backends/__init__.py
  11. 7 10
      celery/backends/amqp.py
  12. 23 19
      celery/backends/base.py
  13. 4 3
      celery/backends/cache.py
  14. 8 29
      celery/backends/database.py
  15. 12 102
      celery/backends/mongodb.py
  16. 6 5
      celery/backends/redis.py
  17. 5 5
      celery/backends/tyrant.py
  18. 189 0
      celery/beat.py
  19. 170 0
      celery/bin/celerybeat.py
  20. 46 42
      celery/bin/celeryd.py
  21. 2 1
      celery/bin/celeryinit.py
  22. 74 14
      celery/conf.py
  23. 0 0
      celery/contrib/__init__.py
  24. 19 0
      celery/contrib/test_runner.py
  25. 2 3
      celery/datastructures.py
  26. 73 0
      celery/decorators.py
  27. 85 157
      celery/execute.py
  28. 0 153
      celery/fields.py
  29. 11 8
      celery/loaders/__init__.py
  30. 1 0
      celery/loaders/default.py
  31. 10 3
      celery/log.py
  32. 18 0
      celery/management/commands/celerybeat.py
  33. 1 0
      celery/management/commands/celeryd.py
  34. 1 0
      celery/management/commands/celerystats.py
  35. 5 115
      celery/managers.py
  36. 4 6
      celery/messaging.py
  37. 11 43
      celery/models.py
  38. 29 0
      celery/patch.py
  39. 5 0
      celery/platform.py
  40. 0 258
      celery/pool.py
  41. 8 26
      celery/registry.py
  42. 19 22
      celery/result.py
  43. 0 116
      celery/serialization.py
  44. 0 120
      celery/supervisor.py
  45. 9 9
      celery/task/__init__.py
  46. 83 129
      celery/task/base.py
  47. 4 6
      celery/task/builtins.py
  48. 6 6
      celery/task/rest.py
  49. 1 0
      celery/task/strategy.py
  50. 8 6
      celery/tests/test_backends/test_base.py
  51. 5 9
      celery/tests/test_backends/test_cache.py
  52. 7 20
      celery/tests/test_backends/test_database.py
  53. 5 6
      celery/tests/test_backends/test_tyrant.py
  54. 214 0
      celery/tests/test_buckets.py
  55. 0 1
      celery/tests/test_log.py
  56. 1 1
      celery/tests/test_messaging.py
  57. 6 32
      celery/tests/test_models.py
  58. 2 1
      celery/tests/test_pickle.py
  59. 1 2
      celery/tests/test_pool.py
  60. 0 16
      celery/tests/test_registry.py
  61. 7 7
      celery/tests/test_result.py
  62. 3 3
      celery/tests/test_serialization.py
  63. 0 66
      celery/tests/test_supervisor.py
  64. 23 34
      celery/tests/test_task.py
  65. 4 2
      celery/tests/test_task_builtins.py
  66. 0 1
      celery/tests/test_utils.py
  67. 39 35
      celery/tests/test_worker.py
  68. 6 50
      celery/tests/test_worker_controllers.py
  69. 33 42
      celery/tests/test_worker_job.py
  70. 37 1
      celery/tests/utils.py
  71. 1 0
      celery/urls.py
  72. 10 5
      celery/utils.py
  73. 0 16
      celery/utils/functional.py
  74. 7 4
      celery/views.py
  75. 72 52
      celery/worker/__init__.py
  76. 299 0
      celery/worker/buckets.py
  77. 30 72
      celery/worker/controllers.py
  78. 111 26
      celery/worker/job.py
  79. 93 0
      celery/worker/pool.py
  80. 50 0
      celery/worker/scheduler.py
  81. 158 0
      contrib/debian/init.d/celerybeat
  82. 51 39
      contrib/debian/init.d/celeryd
  83. 6 4
      contrib/doc4allmods
  84. 10 0
      contrib/mac/watch-workers.applescript
  85. 49 0
      contrib/sphinx-to-rst.py
  86. 0 11
      contrib/test_runner.py
  87. 4 4
      contrib/testconn.py
  88. 1 1
      contrib/testdynpool.py
  89. 14 0
      contrib/verify-reference-index.sh
  90. 35 0
      docs/_theme/ADCThemePrint/README.rst
  91. 47 0
      docs/_theme/ADCThemePrint/layout.html
  92. 745 0
      docs/_theme/ADCThemePrint/static/adctheme.css
  93. BIN
      docs/_theme/ADCThemePrint/static/breadcrumb_background.png
  94. BIN
      docs/_theme/ADCThemePrint/static/documentation.png
  95. BIN
      docs/_theme/ADCThemePrint/static/header_sm_mid.png
  96. BIN
      docs/_theme/ADCThemePrint/static/scrn1.png
  97. BIN
      docs/_theme/ADCThemePrint/static/scrn2.png
  98. BIN
      docs/_theme/ADCThemePrint/static/searchfield_leftcap.png
  99. BIN
      docs/_theme/ADCThemePrint/static/searchfield_repeat.png
  100. BIN
      docs/_theme/ADCThemePrint/static/searchfield_rightcap.png

+ 1 - 0
AUTHORS

@@ -12,3 +12,4 @@ Ordered by date of first contribution:
   Wes Winham <winhamwr@gmail.com>
   Timo Sugliani
   Michael Elsdoerfer <michael@elsdoerfer.com>
+  Jason Baker <amnorvend@gmail.com>

+ 224 - 34
Changelog

@@ -2,10 +2,196 @@
 Change history
 ==============
 
-0.8.0 [2009-09-22 03:06 P.M CET]
---------------------------------
+1.0.0 [xxxx-xx-xx xx:xx x.x xxx]
+================================
+
+BACKWARD INCOMPATIBLE CHANGES
+-----------------------------
+
+* Default celeryd loglevel is now ``WARN``, to enable the previous log level
+  start celeryd with ``--loglevel=INFO``.
+
+* Tasks are automatically registered.
+
+  This means you no longer have to register your tasks manually.
+  You don't have to change your old code right away, as it doesn't matter if
+  a task is registered twice.
+
+  If you don't want your task to be automatically registered you can set
+  the ``abstract`` attribute
+
+  .. code-block:: python
+
+		class MyTask(Task):
+			abstract = True
+
+  By using ``abstract`` only tasks subclassing this task will be automatically
+  registered (this works like the Django ORM).
+
+  Incidentally, this change also fixes the problems with automatic name
+  assignment and relative imports. So you also don't have to specify a task name
+  anymore if you use relative imports.
+
+* You can no longer use regular functions as tasks. This change was added
+  because it makes the internals a lot more clean and simple. However, you can
+  now turn functions into tasks by using the ``@task`` decorator:
+
+  .. code-block:: python
+
+		from celery.decorators import task
+
+		@task()
+		def add(x, y):
+			return x + y
+
+  See the User Guide: :doc:`userguide/tasks` for more information.
+
+* The periodic task system has been rewritten to a centralized solution, this
+  means ``celeryd`` no longer schedules periodic tasks by default, but a new
+  daemon has been introduced: ``celerybeat``.
+
+  To launch the periodic task scheduler you have to run celerybeat::
+
+		$ celerybeat --detach
+
+  Make sure this is running on one server only, if you run it twice, all
+  periodic tasks will also be executed twice.
+
+  If you only have one worker server you can embed it into celeryd like this::
+
+		$ celeryd --detatch --beat # Embed celerybeat in celeryd.
+
+* The supervisor has been removed, please use something like
+  http://supervisord.org instead. This means the ``-S`` and ``--supervised``
+  options to ``celeryd`` is no longer supported.
+
+* ``TaskSet.join`` has been removed, use ``TaskSetResult.join`` instead.
+
+* The task status ``"DONE"`` has been renamed to `"SUCCESS"`.
+
+* ``AsyncResult.is_done`` has been removed, use ``AsyncResult.successful``
+  instead.
+
+NEWS
+----
+
+* Rate limiting support (per task type, or globally).
+
+* New periodic task system.
+
+* Automatic registration.
+
+* New cool task decorator syntax.
+
+
+CHANGES
+-------
+
+* New dependencies: billiard, python-dateutil, django-picklefield
+
+* ETA no longer sends datetime objects, but uses ISO 8601 date format in a
+  string for better compatibility with other platforms.
+
+* Task can now override the backend used to store results.
+
+* Refactored the ExecuteWrapper, ``apply`` and ``CELERY_ALWAYS_EAGER`` now
+  also executes the task callbacks and signals.
+
+* Now using a proper scheduler for the tasks with an ETA. This means waiting
+  eta tasks are sorted by time, so we don't have to poll the whole list all the
+  time.
+
+DOCUMENTATION
+-------------
+
+* Reference now split into two sections; API reference and internal module
+  reference.
+
 
-**BACKWARD INCOMPATIBLE CHANGES**
+0.8.1 [2009-11-16 05:21 P.M CEST]
+=================================
+
+VERY IMPORTANT NOTE
+-------------------
+
+This release (with carrot 0.8.0) enables AMQP QoS (quality of service), which
+means the workers will only receive as many messages as it can handle at a
+time. As with any release, you should test this version upgrade on your
+development servers before rolling it out to production!
+
+IMPORTANT CHANGES
+-----------------
+
+* If you're using Python < 2.6 and you use the multiprocessing backport, then
+  multiprocessing version 2.6.2.1 is required.
+
+* All AMQP_* settings has been renamed to BROKER_*, and in addition
+  AMQP_SERVER has been renamed to BROKER_HOST, so before where you had::
+
+		AMQP_SERVER = "localhost"
+		AMQP_PORT = 5678
+		AMQP_USER = "myuser"
+		AMQP_PASSWORD = "mypassword"
+		AMQP_VHOST = "celery"
+
+You need to change that to::
+
+		BROKER_HOST = "localhost"
+		BROKER_PORT = 5678
+		BROKER_USER = "myuser"
+		BROKER_PASSWORD = "mypassword"
+		BROKER_VHOST = "celery"
+
+* Custom carrot backends now need to include the backend class name, so before
+  where you had::
+
+		CARROT_BACKEND = "mycustom.backend.module"
+
+you need to change it to::
+
+		CARROT_BACKEND = "mycustom.backend.module.Backend"
+
+where ``Backend`` is the class name. This is probably ``"Backend"``, as
+that was the previously implied name.
+
+* New version requirement for carrot: 0.8.0
+
+CHANGES
+-------
+
+* Incorporated the multiprocessing backport patch that fixes the
+  ``processName`` error.
+
+* Ignore the result of PeriodicTask's by default.
+
+* Added a Redis result store backend
+
+* Allow /etc/default/celeryd to define additional options for the celeryd init
+  script.
+
+* MongoDB periodic tasks issue when using different time than UTC fixed.
+
+* Windows specific: Negate test for available os.fork (thanks miracle2k)
+
+* Now tried to handle broken PID files.
+
+* Added a Django test runner to contrib that sets CELERY_ALWAYS_EAGER = True for testing with the database backend
+
+* Added a CELERY_CACHE_BACKEND setting for using something other than the django-global cache backend.
+
+* Use custom implementation of functools.partial (curry) for Python 2.4 support
+  (Probably still problems with running on 2.4, but it will eventually be
+  supported)
+
+* Prepare exception to pickle when saving RETRY status for all backends.
+
+* SQLite no concurrency limit should only be effective if the db backend is used.
+
+0.8.0 [2009-09-22 03:06 P.M CEST]
+=================================
+
+BACKWARD INCOMPATIBLE CHANGES
+-----------------------------
 
 * Add traceback to result value on failure.
 	**NOTE** If you use the database backend you have to re-create the
@@ -23,7 +209,8 @@ Change history
 
 * Now depends on python-daemon 1.4.8
 
-**IMPORTANT CHANGES**
+IMPORTANT CHANGES
+-----------------
 
 * Celery can now be used in pure Python (outside of a Django project).
 	This means celery is no longer Django specific.
@@ -85,7 +272,8 @@ Change history
     * AMQP_CONNECTION_MAX_RETRIES.
         Maximum number of restarts before we give up. Default: ``100``.
 
-**NEWS**
+NEWS
+----
 
 *  Fix an incompatibility between python-daemon and multiprocessing,
 	which resulted in the ``[Errno 10] No child processes`` problem when
@@ -134,9 +322,10 @@ Change history
 	Thanks mikedizon
 
 0.6.0 [2009-08-07 06:54 A.M CET]
---------------------------------
+================================
 
-**IMPORTANT CHANGES**
+IMPORTANT CHANGES
+-----------------
 
 * Fixed a bug where tasks raising unpickleable exceptions crashed pool
 	workers. So if you've had pool workers mysteriously dissapearing, or
@@ -154,7 +343,8 @@ Change history
 	we didn't do this before. Some documentation is updated to not manually
 	specify a task name.
 
-**NEWS**
+NEWS
+----
 
 * Tested with Django 1.1
 
@@ -206,13 +396,13 @@ Change history
 * Convert statistics data to unicode for use as kwargs. Thanks Lucy!
 
 0.4.1 [2009-07-02 01:42 P.M CET]
---------------------------------
+================================
 
 * Fixed a bug with parsing the message options (``mandatory``,
   ``routing_key``, ``priority``, ``immediate``)
 
-0.4.0 [2009-07-01 07:29 P.M CET] 
---------------------------------
+0.4.0 [2009-07-01 07:29 P.M CET]
+================================
 
 * Adds eager execution. ``celery.execute.apply``|``Task.apply`` executes the
   function blocking until the task is done, for API compatiblity it
@@ -224,8 +414,8 @@ Change history
 
 * 99% coverage using python ``coverage`` 3.0.
 
-0.3.20 [2009-06-25 08:42 P.M CET] 
----------------------------------
+0.3.20 [2009-06-25 08:42 P.M CET]
+=================================
 
 * New arguments to ``apply_async`` (the advanced version of
   ``delay_task``), ``countdown`` and ``eta``;
@@ -362,14 +552,14 @@ Change history
 * Tyrant Backend: Now re-establishes the connection for every task
   executed.
 
-0.3.3 [2009-06-08 01:07 P.M CET] 
---------------------------------
+0.3.3 [2009-06-08 01:07 P.M CET]
+================================
 
-	* The ``PeriodicWorkController`` now sleeps for 1 second between checking
-		for periodic tasks to execute.
+* The ``PeriodicWorkController`` now sleeps for 1 second between checking
+  for periodic tasks to execute.
 
 0.3.2 [2009-06-08 01:07 P.M CET]
---------------------------------
+================================
 
 * celeryd: Added option ``--discard``: Discard (delete!) all waiting
   messages in the queue.
@@ -377,7 +567,7 @@ Change history
 * celeryd: The ``--wakeup-after`` option was not handled as a float.
 
 0.3.1 [2009-06-08 01:07 P.M CET]
---------------------------------
+================================
 
 * The `PeriodicTask`` worker is now running in its own thread instead
   of blocking the ``TaskController`` loop.
@@ -385,7 +575,7 @@ Change history
 * Default ``QUEUE_WAKEUP_AFTER`` has been lowered to ``0.1`` (was ``0.3``)
 
 0.3.0 [2009-06-08 12:41 P.M CET]
---------------------------------
+================================
 
 **NOTE** This is a development version, for the stable release, please
 see versions 0.2.x.
@@ -459,7 +649,7 @@ arguments, so be sure to flush your task queue before you upgrade.
   stability.
 
 0.2.0 [2009-05-20 05:14 P.M CET]
---------------------------------
+================================
 
 * Final release of 0.2.0
 
@@ -469,20 +659,20 @@ arguments, so be sure to flush your task queue before you upgrade.
   from the database backend.
 
 0.2.0-pre3 [2009-05-20 05:14 P.M CET]
--------------------------------------
+=====================================
 
 * *Internal release*. Improved handling of unpickled exceptions,
   ``get_result`` now tries to recreate something looking like the
   original exception.
 
 0.2.0-pre2 [2009-05-20 01:56 P.M CET]
--------------------------------------
+=====================================
 
 * Now handles unpickleable exceptions (like the dynimically generated
   subclasses of ``django.core.exception.MultipleObjectsReturned``).
 
 0.2.0-pre1 [2009-05-20 12:33 P.M CET]
--------------------------------------
+=====================================
 
 * It's getting quite stable, with a lot of new features, so bump
   version to 0.2. This is a pre-release.
@@ -492,20 +682,20 @@ arguments, so be sure to flush your task queue before you upgrade.
   and ``celery.backends.default_backend.mark_as_failure()`` instead.
 
 0.1.15 [2009-05-19 04:13 P.M CET]
----------------------------------
+=================================
 
 * The celery daemon was leaking AMQP connections, this should be fixed,
   if you have any problems with too many files open (like ``emfile``
   errors in ``rabbit.log``, please contact us!
 
 0.1.14 [2009-05-19 01:08 P.M CET]
----------------------------------
+=================================
 
 * Fixed a syntax error in the ``TaskSet`` class.  (No such variable
   ``TimeOutError``).
 
 0.1.13 [2009-05-19 12:36 P.M CET]
----------------------------------
+=================================
 
 * Forgot to add ``yadayada`` to install requirements.
 
@@ -526,7 +716,7 @@ arguments, so be sure to flush your task queue before you upgrade.
   and the result will be in ``docs/.build/html``.
 
 0.1.12 [2009-05-18 04:38 P.M CET]
----------------------------------
+=================================
 
 * ``delay_task()`` etc. now returns ``celery.task.AsyncResult`` object,
   which lets you check the result and any failure that might have
@@ -564,13 +754,13 @@ arguments, so be sure to flush your task queue before you upgrade.
 		TT_PORT = 6657; # Port of the Tokyo Tyrant server.
 
 0.1.11 [2009-05-12 02:08 P.M CET]
----------------------------------
+=================================
 
 * The logging system was leaking file descriptors, resulting in
   servers stopping with the EMFILES (too many open files) error. (fixed)
 
 0.1.10 [2009-05-11 12:46 P.M CET]
----------------------------------
+=================================
 
 * Tasks now supports both positional arguments and keyword arguments.
 
@@ -579,7 +769,7 @@ arguments, so be sure to flush your task queue before you upgrade.
 * The daemon now tries to reconnect if the connection is lost.
 
 0.1.8 [2009-05-07 12:27 P.M CET]
---------------------------------
+================================
 
 * Better test coverage
 * More documentation
@@ -587,7 +777,7 @@ arguments, so be sure to flush your task queue before you upgrade.
   ``settings.CELERYD_EMPTY_MSG_EMIT_EVERY`` is 0.
 
 0.1.7 [2009-04-30 1:50 P.M CET]
--------------------------------
+===============================
 
 * Added some unittests
 
@@ -602,7 +792,7 @@ arguments, so be sure to flush your task queue before you upgrade.
   and ``settings.CELERY_AMQP_CONSUMER_QUEUE``.
 
 0.1.6 [2009-04-28 2:13 P.M CET]
--------------------------------
+===============================
 
 * Introducing ``TaskSet``. A set of subtasks is executed and you can
   find out how many, or if all them, are done (excellent for progress
@@ -645,6 +835,6 @@ arguments, so be sure to flush your task queue before you upgrade.
   the name change request is in ``docs/name_change_request.txt``.
 
 0.1.0 [2009-04-24 11:28 A.M CET]
---------------------------------
+================================
 
 * Initial release

+ 22 - 22
FAQ

@@ -49,7 +49,7 @@ points are not specific to celery; If using Redis/database as a queue worked
 fine for you before, it probably will now. And you can always upgrade later.
 
 Is celery multi-lingual?
------------------------
+------------------------
 
 **Answer:** Yes.
 
@@ -61,10 +61,10 @@ messages. There's no other communication involved.
 Also, there's another way to be language indepedent, and that is to use REST
 tasks, instead of your tasks being functions, they're URLs. With this
 information you can even create simple web servers that enable preloading of
-code. For more information about REST tasks see: `Cookbook: Remote Tasks`_.
+code. For more information about REST tasks see: `User Guide: Remote Tasks`_.
 
-.. _`Cookbook: Remote Tasks`:
-    http://ask.github.com/celery/cookbook/remote-tasks.html
+.. _`User Guide: Remote Tasks`:
+    http://ask.github.com/celery/userguide/remote-tasks.html
 
 
 Troubleshooting
@@ -255,8 +255,8 @@ Use the following specific settings in your ``settings.py``:
     CARROT_BACKEND = "stomp"
 
     # STOMP hostname and port settings.
-    AMQP_SERVER = "localhost"
-    AMQP_PORT = 61613
+    BROKER_HOST = "localhost"
+    BROKER_PORT = 61613
 
     # The queue name to use (both queue and exchange must be set to the
     # same queue name when using STOMP)
@@ -299,11 +299,11 @@ configuration:
 
     .. code-block:: python
 
-        AMQP_SERVER = "rabbit"
-        AMQP_PORT = 5678
-        AMQP_USER = "myapp"
-        AMQP_PASSWORD = "secret"
-        AMQP_VHOST = "myapp"
+        BROKER_HOST = "rabbit"
+        BROKER_PORT = 5678
+        BROKER_USER = "myapp"
+        BROKER_PASSWORD = "secret"
+        BROKER_VHOST = "myapp"
 
         CELERY_AMQP_CONSUMER_QUEUE = "regular_tasks"
         CELERY_AMQP_EXCHANGE = "tasks"
@@ -315,12 +315,12 @@ configuration:
 
     .. code-block:: python
 
-        AMQP_SERVER = "rabbit"
-        AMQP_PORT = 5678
-        AMQP_USER = "myapp"
-        AMQP_PASSWORD = "secret"
-        AMQP_VHOST = "myapp"
-        
+        BROKER_HOST = "rabbit"
+        BROKER_PORT = 5678
+        BROKER_USER = "myapp"
+        BROKER_PASSWORD = "secret"
+        BROKER_VHOST = "myapp"
+
         CELERY_AMQP_EXCHANGE = "tasks"
         CELERY_AMQP_PUBLISHER_ROUTING_KEY = "task.regular"
         CELERY_AMQP_EXCHANGE_TYPE = "topic"
@@ -392,11 +392,11 @@ configuration using the database backend with MySQL:
 .. code-block:: python
 
     # Broker configuration
-    AMQP_SERVER = "localhost"
-    AMQP_PORT = "5672"
-    AMQP_VHOST = "celery"
-    AMQP_USER = "celery"
-    AMQP_PASSWORD = "celerysecret"
+    BROKER_HOST = "localhost"
+    BROKER_PORT = "5672"
+    BROKER_VHOST = "celery"
+    BROKER_USER = "celery"
+    BROKER_PASSWORD = "celerysecret"
     CARROT_BACKEND="amqp"
 
     # Using the database backend.

+ 9 - 0
Makefile

@@ -13,6 +13,15 @@ ghdocs:
 autodoc:
 	contrib/doc4allmods celery
 
+flakes:
+	find . -name "*.py" | xargs pyflakes
+
+clean_readme:
+	rm -f README.rst
+
+readme: clean_readme
+	python contrib/sphinx-to-rst.py docs/introduction.rst > README.rst
+
 bump:
 	contrib/bump -c celery
 

+ 87 - 98
README.rst

@@ -2,7 +2,7 @@
  celery - Distributed Task Queue
 =================================
 
-:Version: 0.8.0
+:Version: 0.9.0
 
 Introduction
 ============
@@ -24,15 +24,6 @@ languages see `Executing tasks on a remote web server`_.
 It is used for executing tasks *asynchronously*, routed to one or more
 worker servers, running concurrently using multiprocessing.
 
-It is designed to solve certain problems related to running websites
-demanding high-availability and performance.
-
-It is perfect for filling caches, posting updates to twitter, mass
-downloading data like syndication feeds or web scraping. Use-cases are
-plentiful. Implementing these features asynchronously using ``celery`` is
-easy and fun, and the performance improvements can make it more than
-worthwhile.
-
 Overview
 ========
 
@@ -40,10 +31,9 @@ This is a high level overview of the architecture.
 
 .. image:: http://cloud.github.com/downloads/ask/celery/Celery-Overview-v4.jpg
 
-The broker is an AMQP server pushing tasks to the worker servers.
+The broker pushes tasks to the worker servers.
 A worker server is a networked machine running ``celeryd``. This can be one or
-more machines, depending on the workload. See `A look inside the worker`_ to
-see how the worker server works.
+more machines, depending on the workload.
 
 The result of the task can be stored for later retrieval (called its
 "tombstone").
@@ -51,9 +41,10 @@ The result of the task can be stored for later retrieval (called its
 Features
 ========
 
-    * Uses AMQP messaging (RabbitMQ, ZeroMQ, Qpid) to route tasks to the
+    * Uses messaging (AMQP: RabbitMQ, ZeroMQ, Qpid) to route tasks to the
       worker servers. Experimental support for STOMP (ActiveMQ) is also 
-      available.
+      available. For simple setups it's also possible to use Redis or an
+      SQL database as the message queue.
 
     * You can run as many worker servers as you want, and still
       be *guaranteed that the task is only executed once.*
@@ -70,8 +61,15 @@ Features
       `MongoDB`_, `Redis`_ or `Tokyo Tyrant`_ back-end. For high-performance
       you can also use AMQP messages to publish results.
 
+    * Supports calling tasks over HTTP to support multiple programming
+      languages and systems.
+
+    * Supports several serialization schemes, like pickle, json, yaml and
+      supports registering custom encodings .
+
     * If the task raises an exception, the exception instance is stored,
-      instead of the return value.
+      instead of the return value, and it's possible to inspect the traceback
+      after the fact.
 
     * All tasks has a Universally Unique Identifier (UUID), which is the
       task id, used for querying task status and return values.
@@ -87,7 +85,8 @@ Features
       You can find out how many, or if all of the sub-tasks has been executed.
       Excellent for progress-bar like functionality.
 
-    * Has a ``map`` like function that uses tasks, called ``dmap``.
+    * Has a ``map`` like function that uses tasks,
+      called ``celery.task.dmap``.
 
     * However, you rarely want to wait for these results in a web-environment.
       You'd rather want to use Ajax to poll the task status, which is
@@ -176,9 +175,6 @@ allow that user access to that virtual host::
 
     $ rabbitmqctl add_vhost myvhost
 
-From RabbitMQ version 1.6.0 and onward you have to use the new ACL features
-to allow access::
-
     $ rabbitmqctl set_permissions -p myvhost myuser "" ".*" ".*"
 
 See the RabbitMQ `Admin Guide`_ for more information about `access control`_.
@@ -188,11 +184,6 @@ See the RabbitMQ `Admin Guide`_ for more information about `access control`_.
 .. _`access control`: http://www.rabbitmq.com/admin-guide.html#access-control
 
 
-If you are still using version 1.5.0 or below, please use ``map_user_vhost``::
-
-    $ rabbitmqctl map_user_vhost myuser myvhost
-
-
 Configuring your Django project to use Celery
 ---------------------------------------------
 
@@ -207,11 +198,11 @@ You only need three simple steps to use celery with your Django project.
     3. Configure celery to use the AMQP user and virtual host we created
         before, by adding the following to your ``settings.py``::
 
-            AMQP_SERVER = "localhost"
-            AMQP_PORT = 5672
-            AMQP_USER = "myuser"
-            AMQP_PASSWORD = "mypassword"
-            AMQP_VHOST = "myvhost"
+            BROKER_HOST = "localhost"
+            BROKER_PORT = 5672
+            BROKER_USER = "myuser"
+            BROKER_PASSWORD = "mypassword"
+            BROKER_VHOST = "myvhost"
 
 
 That's it.
@@ -252,61 +243,27 @@ Defining and executing tasks
 **Please note** All of these tasks has to be stored in a real module, they can't
 be defined in the python shell or ipython/bpython. This is because the celery
 worker server needs access to the task function to be able to run it.
-So while it looks like we use the python shell to define the tasks in these
-examples, you can't do it this way. Put them in the ``tasks`` module of your
+Put them in the ``tasks`` module of your
 Django application. The worker server will automatically load any ``tasks.py``
 file for all of the applications listed in ``settings.INSTALLED_APPS``.
 Executing tasks using ``delay`` and ``apply_async`` can be done from the
 python shell, but keep in mind that since arguments are pickled, you can't
 use custom classes defined in the shell session.
 
-While you can use regular functions, the recommended way is to define
-a task class. This way you can cleanly upgrade the task to use the more
-advanced features of celery later.
-
-This is a task that basically does nothing but take some arguments,
-and return a value:
-
-    >>> from celery.task import Task
-    >>> from celery.registry import tasks
-    >>> class MyTask(Task):
-    ...     def run(self, some_arg, **kwargs):
-    ...         logger = self.get_logger(**kwargs)
-    ...         logger.info("Did something: %s" % some_arg)
-    ...         return 42
-    >>> tasks.register(MyTask)
-
-As you can see the worker is sending some keyword arguments to this task,
-this is the default keyword arguments. A task can choose not to take these,
-or only list the ones it want (the worker will do the right thing).
-The current default keyword arguments are:
-
-    * logfile
-
-        The currently used log file, can be passed on to ``self.get_logger``
-        to gain access to the workers log file via a ``logger.Logging``
-        instance.
-
-    * loglevel
-
-        The current loglevel used.
-
-    * task_id
-
-        The unique id of the executing task.
-
-    * task_name
-
-        Name of the executing task.
+This is a task that adds two numbers:
+::
 
-    * task_retries
+    from celery.decorators import task
 
-        How many times the current task has been retried.
-        (an integer starting a ``0``).
+    @task()
+    def add(x, y):
+        return x + y
 
-Now if we want to execute this task, we can use the ``delay`` method of the
-task class (this is a handy shortcut to the ``apply_async`` method which gives
-you greater control of the task execution).
+Now if we want to execute this task, we can use the
+``delay`` method of the task class.
+This is a handy shortcut to the ``apply_async`` method which gives
+greater control of the task execution (see ``userguide/executing`` for more
+information).
 
     >>> from myapp.tasks import MyTask
     >>> MyTask.delay(some_arg="foo")
@@ -319,32 +276,42 @@ picked it up.
 that RabbitMQ is running, and that the user/password has access to the virtual
 host you configured earlier.
 
-Right now we have to check the celery worker logfiles to know what happened with
-the task. This is because we didn't keep the ``AsyncResult`` object returned
-by ``delay``.
+Right now we have to check the celery worker logfiles to know what happened
+with the task. This is because we didn't keep the ``AsyncResult`` object
+returned by ``delay``.
 
 The ``AsyncResult`` lets us find the state of the task, wait for the task to
 finish and get its return value (or exception if the task failed).
 
 So, let's execute the task again, but this time we'll keep track of the task:
 
+<<<<<<< HEAD
     >>> result = MyTask.delay("hello")
+=======
+    >>> result = add.delay(4, 4)
+>>>>>>> master
     >>> result.ready() # returns True if the task has finished processing.
     False
     >>> result.result # task is not ready, so no return value yet.
     None
-    >>> result.get()   # Waits until the task is done and return the retval.
-    42
-    >>> result.result
-    42
+    >>> result.get()   # Waits until the task is done and returns the retval.
+    8
+    >>> result.result # direct access to result, doesn't re-raise errors.
+    8
     >>> result.successful() # returns True if the task didn't end in failure.
     True
 
+If the task raises an exception, the return value of ``result.successful()``
+will be ``False``, and ``result.result`` will contain the exception instance
+raised by the task.
 
+<<<<<<< HEAD
 If the task raises an exception, the return value of ``result.successful()``
 will be ``False``, and ``result.result`` will contain the exception instance
 raised by the task.
 
+=======
+>>>>>>> master
 Worker auto-discovery of tasks
 ------------------------------
 
@@ -358,27 +325,48 @@ Periodic Tasks
 
 Periodic tasks are tasks that are run every ``n`` seconds. 
 Here's an example of a periodic task:
+::
 
-    >>> from celery.task import PeriodicTask
-    >>> from celery.registry import tasks
-    >>> from datetime import timedelta
-    >>> class MyPeriodicTask(PeriodicTask):
-    ...     run_every = timedelta(seconds=30)
-    ...
-    ...     def run(self, **kwargs):
-    ...         logger = self.get_logger(**kwargs)
-    ...         logger.info("Running periodic task!")
-    ...
+    from celery.task import PeriodicTask
+    from celery.registry import tasks
+    from datetime import timedelta
+
+    class MyPeriodicTask(PeriodicTask):
+        run_every = timedelta(seconds=30)
+
+        def run(self, **kwargs):
+            logger = self.get_logger(**kwargs)
+            logger.info("Running periodic task!")
     >>> tasks.register(MyPeriodicTask)
 
-**Note:** Periodic tasks does not support arguments, as this doesn't
-really make sense.
 
+If you want to use periodic tasks you need to start the ``celerybeat``
+service. You have to make sure only one instance of this server is running at
+any time, or else you will end up with multiple executions of the same task.
+
+To start the ``celerybeat`` service::
+
+    $ celerybeat --detach
 
-A look inside the worker
-========================
+or if using Django::
 
-.. image:: http://cloud.github.com/downloads/ask/celery/InsideTheWorker-v2.jpg
+    $ python manage.py celerybeat
+
+
+You can also start ``celerybeat`` with ``celeryd`` by using the ``-B`` option,
+this is convenient if you only have one server::
+
+    $ celeryd --detach -B
+
+or if using Django::
+
+    $ python manage.py celeryd --detach -B
+
+
+A look inside the components
+============================
+
+.. image:: http://cloud.github.com/downloads/ask/celery/Celery1.0-inside-worker.jpg
 
 Getting Help
 ============
@@ -423,3 +411,4 @@ This software is licensed under the ``New BSD License``. See the ``LICENSE``
 file in the top distribution directory for the full license text.
 
 .. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround
+

+ 7 - 0
bin/celerybeat

@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+import sys
+from celery.bin.celerybeat import run_clockservice, parse_options
+
+if __name__ == "__main__":
+    options = parse_options(sys.argv[1:])
+    run_clockservice(**vars(options))

+ 2 - 0
bin/celeryd

@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 import sys
+if not '' in sys.path:
+    sys.path.insert(0, '')
 from celery.bin.celeryd import run_worker, parse_options
 
 if __name__ == "__main__":

+ 3 - 1
bin/celeryinit

@@ -1,5 +1,7 @@
 #!/usr/bin/env python
-
+import sys
+if not '' in sys.path:
+    sys.path.insert(0, '')
 from celery.bin.celeryinit import main
 
 if __name__ == "__main__":

+ 1 - 1
celery/__init__.py

@@ -1,6 +1,6 @@
 """Distributed Task Queue"""
 
-VERSION = (0, 8, 0)
+VERSION = (0, 9, 0)
 
 __version__ = ".".join(map(str, VERSION))
 __author__ = "Ask Solem"

+ 31 - 36
celery/backends/__init__.py

@@ -1,7 +1,32 @@
-"""celery.backends"""
+import sys
 from functools import partial
+
 from celery import conf
-import sys
+
+BACKEND_ALIASES = {
+    "amqp": "celery.backends.amqp.AMQPBackend",
+    "database": "celery.backends.database.DatabaseBackend",
+    "db": "celery.backends.database.DatabaseBackend",
+    "redis": "celery.backends.redis.RedisBackend",
+    "cache": "celery.backends.cache.CacheBackend",
+    "mongodb": "celery.backends.mongodb.MongoBackend",
+    "tyrant": "celery.backends.tyrant.TyrantBackend",
+}
+
+_backend_cache = {}
+
+
+def resolve_backend(backend):
+    backend = BACKEND_ALIASES.get(backend, backend)
+    backend_module_name, _, backend_cls_name = backend.rpartition(".")
+    return backend_module_name, backend_cls_name
+
+
+def _get_backend_cls(backend):
+    backend_module_name, backend_cls_name = resolve_backend(backend)
+    __import__(backend_module_name)
+    backend_module = sys.modules[backend_module_name]
+    return getattr(backend_module, backend_cls_name)
 
 
 def get_backend_cls(backend):
@@ -12,11 +37,10 @@ def get_backend_cls(backend):
     ``"database"`` becomes ``"celery.backends.database"``.
 
     """
-    if backend.find(".") == -1:
-        backend = "celery.backends.%s" % backend
-    __import__(backend)
-    backend_module = sys.modules[backend]
-    return getattr(backend_module, "Backend")
+    if backend not in _backend_cache:
+        _backend_cache[backend] = _get_backend_cls(backend)
+    return _backend_cache[backend]
+
 
 """
 .. function:: get_default_backend_cls()
@@ -27,17 +51,6 @@ def get_backend_cls(backend):
 get_default_backend_cls = partial(get_backend_cls, conf.CELERY_BACKEND)
 
 
-"""
-.. function:: get_default_periodicstatus_backend_cls()
-
-    Get the backend class specified in
-    :setting:`CELERY_PERIODIC_STATUS_BACKEND`.
-
-"""
-get_default_periodicstatus_backend_cls = partial(get_backend_cls,
-                                        conf.CELERY_PERIODIC_STATUS_BACKEND)
-
-
 """
 .. class:: DefaultBackend
 
@@ -47,16 +60,6 @@ get_default_periodicstatus_backend_cls = partial(get_backend_cls,
 """
 DefaultBackend = get_default_backend_cls()
 
-
-"""
-.. class:: DefaultPeriodicStatusBackend
-
-    The default backend for storing periodic task metadata, specified
-    in :setting:`CELERY_PERIODIC_STATUS_BACKEND`.
-
-"""
-DefaultPeriodicStatusBackend = get_default_periodicstatus_backend_cls()
-
 """
 .. data:: default_backend
 
@@ -64,11 +67,3 @@ DefaultPeriodicStatusBackend = get_default_periodicstatus_backend_cls()
 
 """
 default_backend = DefaultBackend()
-
-"""
-.. data:: default_periodic_status_backend
-
-    An instance of :class:`DefaultPeriodicStatusBackend`.
-
-"""
-default_periodic_status_backend = DefaultPeriodicStatusBackend()

+ 7 - 10
celery/backends/amqp.py

@@ -1,12 +1,13 @@
 """celery.backends.amqp"""
-from carrot.connection import DjangoBrokerConnection
 from carrot.messaging import Consumer, Publisher
+from carrot.connection import DjangoBrokerConnection
+
 from celery.backends.base import BaseBackend
 
 RESULTSTORE_EXCHANGE = "celeryresults"
 
 
-class Backend(BaseBackend):
+class AMQPBackend(BaseBackend):
     """AMQP backend. Publish results by sending messages to the broker
     using the task id as routing key.
 
@@ -19,7 +20,7 @@ class Backend(BaseBackend):
     capabilities = ["ResultStore"]
 
     def __init__(self, *args, **kwargs):
-        super(Backend, self).__init__(*args, **kwargs)
+        super(AMQPBackend, self).__init__(*args, **kwargs)
         self.connection = DjangoBrokerConnection()
         self._cache = {}
 
@@ -56,11 +57,7 @@ class Backend(BaseBackend):
 
     def store_result(self, task_id, result, status, traceback=None):
         """Send task return value and status."""
-        if status == "DONE":
-            result = self.prepare_result(result)
-        elif status == "FAILURE":
-            result = self.prepare_exception(result)
-
+        result = self.encode_result(result, status)
 
         meta = {"task_id": task_id,
                 "result": result,
@@ -74,9 +71,9 @@ class Backend(BaseBackend):
 
         return result
 
-    def is_done(self, task_id):
+    def is_successful(self, task_id):
         """Returns ``True`` if task with ``task_id`` has been executed."""
-        return self.get_status(task_id) == "DONE"
+        return self.get_status(task_id) == "SUCCESS"
 
     def get_status(self, task_id):
         """Get the status of a task."""

+ 23 - 19
celery/backends/base.py

@@ -1,11 +1,14 @@
 """celery.backends.base"""
 import time
-import operator
-from celery.serialization import pickle
-from celery.serialization import get_pickled_exception
-from celery.serialization import get_pickleable_exception
+
+from billiard.serialization import pickle
+from billiard.serialization import get_pickled_exception
+from billiard.serialization import get_pickleable_exception
+
 from celery.exceptions import TimeoutError
 
+EXCEPTION_STATES = frozenset(["RETRY", "FAILURE"])
+
 
 class BaseBackend(object):
     """The base backend class. All backends should inherit from this."""
@@ -13,6 +16,12 @@ class BaseBackend(object):
     capabilities = []
     TimeoutError = TimeoutError
 
+    def encode_result(self, result, status):
+        if status == "SUCCESS":
+            return self.prepare_value(result)
+        elif status in EXCEPTION_STATES:
+            return self.prepare_exception(result)
+
     def store_result(self, task_id, result, status):
         """Store the result and status of a task."""
         raise NotImplementedError(
@@ -20,7 +29,7 @@ class BaseBackend(object):
 
     def mark_as_done(self, task_id, result):
         """Mark task as successfully executed."""
-        return self.store_result(task_id, result, status="DONE")
+        return self.store_result(task_id, result, status="SUCCESS")
 
     def mark_as_failure(self, task_id, exc, traceback=None):
         """Mark task as executed with failure. Stores the execption."""
@@ -46,10 +55,8 @@ class BaseBackend(object):
         raise NotImplementedError(
                 "get_status is not supported by this backend.")
 
-    def prepare_result(self, result):
-        """Prepare result for storage."""
-        if result is None:
-            return True
+    def prepare_value(self, result):
+        """Prepare value for storage."""
         return result
 
     def get_result(self, task_id):
@@ -62,9 +69,9 @@ class BaseBackend(object):
         raise NotImplementedError(
                 "get_traceback is not supported by this backend.")
 
-    def is_done(self, task_id):
+    def is_successful(self, task_id):
         """Returns ``True`` if the task was successfully executed."""
-        return self.get_status(task_id) == "DONE"
+        return self.get_status(task_id) == "SUCCESS"
 
     def cleanup(self):
         """Backend cleanup. Is run by
@@ -88,7 +95,7 @@ class BaseBackend(object):
 
         while True:
             status = self.get_status(task_id)
-            if status == "DONE":
+            if status == "SUCCESS":
                 return self.get_result(task_id)
             elif status == "FAILURE":
                 raise self.get_result(task_id)
@@ -127,10 +134,7 @@ class KeyValueStoreBackend(BaseBackend):
 
     def store_result(self, task_id, result, status, traceback=None):
         """Store task result and status."""
-        if status == "DONE":
-            result = self.prepare_result(result)
-        elif status == "FAILURE":
-            result = self.prepare_exception(result)
+        result = self.encode_result(result, status)
         meta = {"status": status, "result": result, "traceback": traceback}
         self.set(self.get_cache_key_for_task(task_id), pickle.dumps(meta))
         return result
@@ -152,9 +156,9 @@ class KeyValueStoreBackend(BaseBackend):
         meta = self._get_task_meta_for(task_id)
         return meta["traceback"]
 
-    def is_done(self, task_id):
+    def is_successful(self, task_id):
         """Returns ``True`` if the task executed successfully."""
-        return self.get_status(task_id) == "DONE"
+        return self.get_status(task_id) == "SUCCESS"
 
     def _get_task_meta_for(self, task_id):
         """Get task metadata for a task by id."""
@@ -164,6 +168,6 @@ class KeyValueStoreBackend(BaseBackend):
         if not meta:
             return {"status": "PENDING", "result": None}
         meta = pickle.loads(str(meta))
-        if meta.get("status") == "DONE":
+        if meta.get("status") == "SUCCESS":
             self._cache[task_id] = meta
         return meta

+ 4 - 3
celery/backends/cache.py

@@ -1,9 +1,10 @@
 """celery.backends.cache"""
+from django.utils.encoding import smart_str
 from django.core.cache import cache, get_cache
 from django.core.cache.backends.base import InvalidCacheBackendError
-from django.utils.encoding import smart_str
-from celery.backends.base import KeyValueStoreBackend
+
 from celery import conf
+from celery.backends.base import KeyValueStoreBackend
 
 # CELERY_CACHE_BACKEND overrides the django-global(tm) backend settings.
 if conf.CELERY_CACHE_BACKEND:
@@ -39,7 +40,7 @@ except InvalidCacheBackendError:
     pass
 
 
-class Backend(KeyValueStoreBackend):
+class CacheBackend(KeyValueStoreBackend):
     """Backend using the Django cache framework to store task metadata."""
 
     def get(self, key):

+ 8 - 29
celery/backends/database.py

@@ -1,48 +1,27 @@
 """celery.backends.database"""
-from celery.models import TaskMeta, PeriodicTaskMeta
+from celery.models import TaskMeta
 from celery.backends.base import BaseBackend
 
 
-class Backend(BaseBackend):
+class DatabaseBackend(BaseBackend):
     """The database backends. Using Django models to store task metadata."""
 
-    capabilities = ["ResultStore", "PeriodicStatus"]
+    capabilities = ["ResultStore"]
 
     def __init__(self, *args, **kwargs):
-        super(Backend, self).__init__(*args, **kwargs)
+        super(DatabaseBackend, self).__init__(*args, **kwargs)
         self._cache = {}
 
-    def init_periodic_tasks(self):
-        """Create entries for all periodic tasks in the database."""
-        PeriodicTaskMeta.objects.init_entries()
-
-    def run_periodic_tasks(self):
-        """Run all waiting periodic tasks.
-
-        :returns: a list of ``(task, task_id)`` tuples containing
-            the task class and id for the resulting tasks applied.
-
-        """
-        waiting_tasks = PeriodicTaskMeta.objects.get_waiting_tasks()
-        task_id_tuples = []
-        for waiting_task in waiting_tasks:
-            task_id = waiting_task.delay()
-            task_id_tuples.append((waiting_task, task_id))
-        return task_id_tuples
-
     def store_result(self, task_id, result, status, traceback=None):
         """Store return value and status of an executed task."""
-        if status == "DONE":
-            result = self.prepare_result(result)
-        elif status in ["FAILURE", "RETRY"]:
-            result = self.prepare_exception(result)
+        result = self.encode_result(result, status)
         TaskMeta.objects.store_result(task_id, result, status,
                                       traceback=traceback)
         return result
 
-    def is_done(self, task_id):
+    def is_successful(self, task_id):
         """Returns ``True`` if task with ``task_id`` has been executed."""
-        return self.get_status(task_id) == "DONE"
+        return self.get_status(task_id) == "SUCCESS"
 
     def get_status(self, task_id):
         """Get the status of a task."""
@@ -65,7 +44,7 @@ class Backend(BaseBackend):
         if task_id in self._cache:
             return self._cache[task_id]
         meta = TaskMeta.objects.get_task(task_id)
-        if meta.status == "DONE":
+        if meta.status == "SUCCESS":
             self._cache[task_id] = meta
         return meta
 

+ 12 - 102
celery/backends/mongodb.py

@@ -1,22 +1,16 @@
 """MongoDB backend for celery."""
-
-import random
-from datetime import datetime, timedelta
+from datetime import datetime
 
 from django.core.exceptions import ImproperlyConfigured
-from celery.serialization import pickle
-from celery.backends.base import BaseBackend
-from celery.loaders import settings
-from celery.conf import TASK_RESULT_EXPIRES
-from celery.registry import tasks
-
+from billiard.serialization import pickle
 try:
     import pymongo
 except ImportError:
     pymongo = None
 
-# taken from celery.managers.PeriodicTaskManager
-SERVER_DRIFT = timedelta(seconds=random.vonmisesvariate(1, 4))
+from celery.backends.base import BaseBackend
+from celery.loaders import settings
+from celery.conf import TASK_RESULT_EXPIRES
 
 
 class Bunch:
@@ -25,9 +19,9 @@ class Bunch:
         self.__dict__.update(kw)
 
 
-class Backend(BaseBackend):
+class MongoBackend(BaseBackend):
 
-    capabilities = ("ResultStore", "PeriodicStatus")
+    capabilities = ["ResultStore"]
 
     mongodb_host = 'localhost'
     mongodb_port = 27017
@@ -35,7 +29,6 @@ class Backend(BaseBackend):
     mongodb_password = None
     mongodb_database = 'celery'
     mongodb_taskmeta_collection = 'celery_taskmeta'
-    mongodb_periodictaskmeta_collection = 'celery_periodictaskmeta'
 
     def __init__(self, *args, **kwargs):
         """Initialize MongoDB backend instance.
@@ -65,11 +58,8 @@ class Backend(BaseBackend):
                     'database', self.mongodb_database)
             self.mongodb_taskmeta_collection = conf.get(
                 'taskmeta_collection', self.mongodb_taskmeta_collection)
-            self.mongodb_collection_periodictaskmeta = conf.get(
-                'periodictaskmeta_collection',
-                self.mongodb_periodictaskmeta_collection)
 
-        super(Backend, self).__init__(*args, **kwargs)
+        super(MongoBackend, self).__init__(*args, **kwargs)
         self._cache = {}
         self._connection = None
         self._database = None
@@ -104,91 +94,11 @@ class Backend(BaseBackend):
             # goes out of scope
             self._connection = None
 
-    def init_periodic_tasks(self):
-        """Create collection for periodic tasks in database."""
-        db = self._get_database()
-        collection = db[self.mongodb_periodictaskmeta_collection]
-        collection.ensure_index("name", pymongo.ASCENDING, unique=True)
-
-        periodic_tasks = tasks.get_all_periodic()
-        for task_name in periodic_tasks.keys():
-            if not collection.find_one({"name": task_name}):
-                collection.save({"name": task_name,
-                                 "last_run_at": datetime.fromtimestamp(0),
-                                 "total_run_count": 0}, safe=True)
-
-    def run_periodic_tasks(self):
-        """Run all waiting periodic tasks.
-
-        :returns: a list of ``(task, task_id)`` tuples containing
-            the task class and id for the resulting tasks applied.
-        """
-        db = self._get_database()
-        collection = db[self.mongodb_periodictaskmeta_collection]
-
-        waiting_tasks = self._get_waiting_tasks()
-        task_id_tuples = []
-        for waiting_task in waiting_tasks:
-            task = tasks[waiting_task['name']]
-            resp = task.delay()
-            collection.update({'_id': waiting_task['_id']},
-                              {"$inc": {"total_run_count": 1}})
-
-            task_meta = Bunch(name=waiting_task['name'],
-                              last_run_at=waiting_task['last_run_at'],
-                              total_run_count=waiting_task['total_run_count'])
-            task_id_tuples.append((task_meta, resp.task_id))
-
-        return task_id_tuples
-
-    def _is_time(self, last_run_at, run_every):
-        """Check if if it is time to run the periodic task.
-
-        :param last_run_at: Last time the periodic task was run.
-        :param run_every: How often to run the periodic task.
-
-        :rtype bool:
-
-        """
-        # code taken from celery.managers.PeriodicTaskManager
-        run_every_drifted = run_every + SERVER_DRIFT
-        run_at = last_run_at + run_every_drifted
-        if datetime.now() > run_at:
-            return True
-        return False
-
-    def _get_waiting_tasks(self):
-        """Get all waiting periodic tasks."""
-        db = self._get_database()
-        collection = db[self.mongodb_periodictaskmeta_collection]
-
-        periodic_tasks = tasks.get_all_periodic()
-
-        # find all periodic tasks to be run
-        waiting = []
-        for task_meta in collection.find():
-            if task_meta['name'] in periodic_tasks:
-                task = periodic_tasks[task_meta['name']]
-                run_every = task.run_every
-                if self._is_time(task_meta['last_run_at'], run_every):
-                    collection.update(
-                        {"name": task_meta['name'],
-                         "last_run_at": task_meta['last_run_at']},
-                        {"$set": {"last_run_at": datetime.now()}})
-
-                    if db.last_status()['updatedExisting']:
-                        waiting.append(task_meta)
-
-        return waiting
-
     def store_result(self, task_id, result, status, traceback=None):
         """Store return value and status of an executed task."""
         from pymongo.binary import Binary
 
-        if status == 'DONE':
-            result = self.prepare_result(result)
-        elif status == 'FAILURE':
-            result = self.prepare_exception(result)
+        result = self.encode_result(result, status)
 
         meta = {"_id": task_id,
                 "status": status,
@@ -201,9 +111,9 @@ class Backend(BaseBackend):
 
         taskmeta_collection.save(meta, safe=True)
 
-    def is_done(self, task_id):
+    def is_successful(self, task_id):
         """Returns ``True`` if the task executed successfully."""
-        return self.get_status(task_id) == "DONE"
+        return self.get_status(task_id) == "SUCCESS"
 
     def get_status(self, task_id):
         """Get status of a task."""
@@ -240,7 +150,7 @@ class Backend(BaseBackend):
             "date_done": obj["date_done"],
             "traceback": pickle.loads(str(obj["traceback"])),
         }
-        if meta["status"] == "DONE":
+        if meta["status"] == "SUCCESS":
             self._cache[task_id] = meta
 
         return meta

+ 6 - 5
celery/backends/redis.py

@@ -1,15 +1,15 @@
 """celery.backends.tyrant"""
 from django.core.exceptions import ImproperlyConfigured
 from celery.backends.base import KeyValueStoreBackend
-from celery.loaders import settings
-
 try:
     import redis
 except ImportError:
     redis = None
 
+from celery.loaders import settings
+
 
-class Backend(KeyValueStoreBackend):
+class RedisBackend(KeyValueStoreBackend):
     """Redis based task backend store.
 
     .. attribute:: redis_host
@@ -30,7 +30,8 @@ class Backend(KeyValueStoreBackend):
     redis_timeout = None
     redis_connect_retry = None
 
-    def __init__(self, redis_host=None, redis_port=None, redis_db=None):
+    def __init__(self, redis_host=None, redis_port=None, redis_db=None,
+            redis_timeout=None, redis_connect_timeout=None):
         if not redis:
             raise ImproperlyConfigured(
                     "You need to install the redis library in order to use "
@@ -53,7 +54,7 @@ class Backend(KeyValueStoreBackend):
             raise ImproperlyConfigured(
                 "In order to use the Redis result store backend, you have to "
                 "set the REDIS_HOST and REDIS_PORT settings")
-        super(Backend, self).__init__()
+        super(RedisBackend, self).__init__()
         self._connection = None
 
     def open(self):

+ 5 - 5
celery/backends/tyrant.py

@@ -1,15 +1,15 @@
 """celery.backends.tyrant"""
 from django.core.exceptions import ImproperlyConfigured
-from celery.backends.base import KeyValueStoreBackend
-from celery.loaders import settings
-
 try:
     import pytyrant
 except ImportError:
     pytyrant = None
 
+from celery.backends.base import KeyValueStoreBackend
+from celery.loaders import settings
+
 
-class Backend(KeyValueStoreBackend):
+class TyrantBackend(KeyValueStoreBackend):
     """Tokyo Cabinet based task backend store.
 
     .. attribute:: tyrant_host
@@ -46,7 +46,7 @@ class Backend(KeyValueStoreBackend):
             raise ImproperlyConfigured(
                 "To use the Tokyo Tyrant backend, you have to "
                 "set the TT_HOST and TT_PORT settings in your settings.py")
-        super(Backend, self).__init__()
+        super(TyrantBackend, self).__init__()
         self._connection = None
 
     def open(self):

+ 189 - 0
celery/beat.py

@@ -0,0 +1,189 @@
+import time
+import shelve
+import atexit
+import threading
+from UserDict import UserDict
+from datetime import datetime
+
+from celery import conf
+from celery import registry
+from celery.log import setup_logger
+from celery.exceptions import NotRegistered
+
+
+class SchedulingError(Exception):
+    """An error occured while scheduling task."""
+
+
+class ScheduleEntry(object):
+    """An entry in the scheduler.
+
+    :param task: The task class.
+    :keyword last_run_at: The time and date when this task was last run.
+    :keyword total_run_count: Total number of times this periodic task has
+        been executed.
+
+    """
+
+    def __init__(self, name, last_run_at=None,
+            total_run_count=None):
+        self.name = name
+        self.last_run_at = last_run_at or datetime.now()
+        self.total_run_count = total_run_count or 0
+
+    def next(self):
+        return self.__class__(self.name, datetime.now(),
+                              self.total_run_count + 1)
+
+    def is_due(self, run_every):
+        return datetime.now() > (self.last_run_at + run_every)
+
+
+class Scheduler(UserDict):
+    """Scheduler for periodic tasks.
+
+    :keyword registry: The task registry to use.
+    :keyword schedule: The schedule dictionary. Default is the global
+        persistent schedule ``celery.beat.schedule``.
+
+    """
+    interval = 1
+
+    def __init__(self, **kwargs):
+
+        def _get_default_logger():
+            import multiprocessing
+            return multiprocessing.get_logger()
+
+        attr_defaults = {"registry": lambda: {},
+                         "schedule": lambda: {},
+                         "interval": lambda: self.interval,
+                         "logger": _get_default_logger}
+
+        for attr_name, attr_default_gen in attr_defaults.items():
+            if attr_name in kwargs:
+                attr_value = kwargs[attr_name]
+            else:
+                attr_value = attr_default_gen()
+            setattr(self, attr_name, attr_value)
+
+        self.cleanup()
+        self.schedule_registry()
+
+    def tick(self):
+        """Run a tick, that is one iteration of the scheduler.
+        Executes all due tasks."""
+        for entry in self.get_due_tasks():
+            self.logger.debug("Scheduler: Sending due task %s" % (
+                    entry.name))
+            result = self.apply_async(entry)
+            self.logger.debug("Scheduler: %s sent. id->%s" % (
+                    entry.name, result.task_id))
+
+    def get_due_tasks(self):
+        """Get all the schedule entries that are due to execution."""
+        return filter(self.is_due, self.schedule.values())
+
+    def get_task(self, name):
+        try:
+            return self.registry[name]
+        except KeyError:
+            raise NotRegistered(name)
+
+    def is_due(self, entry):
+        return entry.is_due(self.get_task(entry.name).run_every)
+
+    def apply_async(self, entry):
+
+        # Update timestamps and run counts before we actually execute,
+        # so we have that done if an exception is raised (doesn't schedule
+        # forever.)
+        entry = self.schedule[entry.name] = entry.next()
+        task = self.get_task(entry.name)
+
+        try:
+            result = task.apply_async()
+        except Exception, exc:
+            raise SchedulingError(
+                    "Couldn't apply scheduled task %s: %s" % (
+                        task.name, exc))
+        return result
+
+    def schedule_registry(self):
+        """Add the current contents of the registry to the schedule."""
+        periodic_tasks = self.registry.get_all_periodic()
+        for name, task in self.registry.get_all_periodic().items():
+            if name not in self.schedule:
+                self.logger.debug(
+                        "Scheduler: Adding periodic task %s to schedule" % (
+                            task.name))
+            self.schedule.setdefault(name, ScheduleEntry(task.name))
+
+    def cleanup(self):
+        for task_name, entry in self.schedule.items():
+            if task_name not in self.registry:
+                self.schedule.pop(task_name, None)
+
+    @property
+    def schedule(self):
+        return self.data
+
+
+class ClockService(object):
+    scheduler_cls = Scheduler
+    schedule_filename = conf.CELERYBEAT_SCHEDULE_FILENAME
+    registry = registry.tasks
+
+    def __init__(self, logger=None, is_detached=False):
+        self.logger = logger
+        self._shutdown = threading.Event()
+        self._stopped = threading.Event()
+
+    def start(self):
+        self.logger.info("ClockService: Starting...")
+        schedule = shelve.open(filename=self.schedule_filename)
+        #atexit.register(schedule.close)
+        scheduler = self.scheduler_cls(schedule=schedule,
+                                       registry=self.registry,
+                                       logger=self.logger)
+        self.logger.debug(
+                "ClockService: Ticking with interval->%d, schedule->%s" % (
+                    scheduler.interval, self.schedule_filename))
+
+        synced = [False]
+        def _stop():
+            if not synced[0]:
+                self.logger.debug("ClockService: Syncing schedule to disk...")
+                schedule.sync()
+                schedule.close()
+                synced[0] = True
+                self._stopped.set()
+
+        try:
+            while True:
+                if self._shutdown.isSet():
+                    break
+                scheduler.tick()
+                time.sleep(scheduler.interval)
+        except (KeyboardInterrupt, SystemExit):
+            _stop()
+        finally:
+            _stop()
+
+    def stop(self, wait=False):
+        self._shutdown.set()
+        wait and self._stopped.wait() # block until shutdown done.
+
+
+class ClockServiceThread(threading.Thread):
+
+    def __init__(self, *args, **kwargs):
+        self.clockservice = ClockService(*args, **kwargs)
+        threading.Thread.__init__(self)
+        self.setDaemon(True)
+
+    def run(self):
+        self.clockservice.start()
+
+    def stop(self):
+        self.clockservice.stop(wait=True)

+ 170 - 0
celery/bin/celerybeat.py

@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+"""celerybeat
+
+.. program:: celerybeat
+
+.. cmdoption:: -f, --logfile
+
+    Path to log file. If no logfile is specified, ``stderr`` is used.
+
+.. cmdoption:: -l, --loglevel
+
+    Logging level, choose between ``DEBUG``, ``INFO``, ``WARNING``,
+    ``ERROR``, ``CRITICAL``, or ``FATAL``.
+
+.. cmdoption:: -p, --pidfile
+
+    Path to pidfile.
+
+.. cmdoption:: -d, --detach, --daemon
+
+    Run in the background as a daemon.
+
+.. cmdoption:: -u, --uid
+
+    User-id to run ``celerybeat`` as when in daemon mode.
+
+.. cmdoption:: -g, --gid
+
+    Group-id to run ``celerybeat`` as when in daemon mode.
+
+.. cmdoption:: --umask
+
+    umask of the process when in daemon mode.
+
+.. cmdoption:: --workdir
+
+    Directory to change to when in daemon mode.
+
+.. cmdoption:: --chroot
+
+    Change root directory to this path when in daemon mode.
+
+"""
+import os
+import sys
+import traceback
+import optparse
+
+from celery import conf
+from celery import platform
+from celery import __version__
+from celery.log import emergency_error
+from celery.beat import ClockService
+from celery.loaders import current_loader, settings
+
+STARTUP_INFO_FMT = """
+Configuration ->
+    * Broker -> amqp://%(vhost)s@%(host)s:%(port)s
+    * Exchange -> %(exchange)s (%(exchange_type)s)
+    * Consumer -> Queue:%(consumer_queue)s Routing:%(consumer_rkey)s
+""".strip()
+
+OPTION_LIST = (
+    optparse.make_option('-f', '--logfile', default=conf.CELERYBEAT_LOG_FILE,
+            action="store", dest="logfile",
+            help="Path to log file."),
+    optparse.make_option('-l', '--loglevel',
+            default=conf.CELERYBEAT_LOG_LEVEL,
+            action="store", dest="loglevel",
+            help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL/FATAL."),
+    optparse.make_option('-p', '--pidfile',
+            default=conf.CELERYBEAT_PID_FILE,
+            action="store", dest="pidfile",
+            help="Path to pidfile."),
+    optparse.make_option('-d', '--detach', '--daemon', default=False,
+            action="store_true", dest="detach",
+            help="Run in the background as a daemon."),
+    optparse.make_option('-u', '--uid', default=None,
+            action="store", dest="uid",
+            help="User-id to run celerybeat as when in daemon mode."),
+    optparse.make_option('-g', '--gid', default=None,
+            action="store", dest="gid",
+            help="Group-id to run celerybeat as when in daemon mode."),
+    optparse.make_option('--umask', default=0,
+            action="store", type="int", dest="umask",
+            help="umask of the process when in daemon mode."),
+    optparse.make_option('--workdir', default=None,
+            action="store", dest="working_directory",
+            help="Directory to change to when in daemon mode."),
+    optparse.make_option('--chroot', default=None,
+            action="store", dest="chroot",
+            help="Change root directory to this path when in daemon mode."),
+    )
+
+
+def run_clockservice(detach=False, loglevel=conf.CELERYBEAT_LOG_LEVEL,
+        logfile=conf.CELERYBEAT_LOG_FILE, pidfile=conf.CELERYBEAT_PID_FILE,
+        umask=0, uid=None, gid=None, working_directory=None, chroot=None,
+        **kwargs):
+    """Starts the celerybeat clock server."""
+
+    print("Celery Beat %s is starting." % __version__)
+
+    # Setup logging
+    if not isinstance(loglevel, int):
+        loglevel = conf.LOG_LEVELS[loglevel.upper()]
+    if not detach:
+        logfile = None # log to stderr when not running in the background.
+
+    # Dump configuration to screen so we have some basic information
+    # when users sends e-mails.
+    print(STARTUP_INFO_FMT % {
+            "vhost": getattr(settings, "AMQP_VHOST", "(default)"),
+            "host": getattr(settings, "AMQP_SERVER", "(default)"),
+            "port": getattr(settings, "AMQP_PORT", "(default)"),
+            "exchange": conf.AMQP_EXCHANGE,
+            "exchange_type": conf.AMQP_EXCHANGE_TYPE,
+            "consumer_queue": conf.AMQP_CONSUMER_QUEUE,
+            "consumer_rkey": conf.AMQP_CONSUMER_ROUTING_KEY,
+            "publisher_rkey": conf.AMQP_PUBLISHER_ROUTING_KEY,
+            "loglevel": loglevel,
+            "pidfile": pidfile,
+    })
+
+    print("Celery Beat has started.")
+    if detach:
+        from celery.log import setup_logger, redirect_stdouts_to_logger
+        context = platform.create_daemon_context(logfile, pidfile,
+                                        chroot_directory=chroot,
+                                        working_directory=working_directory,
+                                        umask=umask,
+                                        uid=uid,
+                                        gid=gid)
+        context.open()
+        logger = setup_logger(loglevel, logfile)
+        redirect_stdouts_to_logger(logger, loglevel)
+
+    # Run the worker init handler.
+    # (Usually imports task modules and such.)
+    current_loader.on_worker_init()
+
+    def _run_clock():
+        logger = setup_logger(loglevel, logfile)
+        clockservice = ClockService(logger=logger, is_detached=detach)
+
+        try:
+            clockservice.start()
+        except Exception, e:
+            emergency_error(logfile,
+                    "celerybeat raised exception %s: %s\n%s" % (
+                            e.__class__, e, traceback.format_exc()))
+
+    try:
+        _run_clock()
+    except:
+        if detach:
+            context.close()
+        raise
+
+
+def parse_options(arguments):
+    """Parse the available options to ``celeryd``."""
+    parser = optparse.OptionParser(option_list=OPTION_LIST)
+    options, values = parser.parse_args(arguments)
+    return options
+
+
+if __name__ == "__main__":
+    options = parse_options(sys.argv[1:])
+    run_clockservice(**vars(options))

+ 46 - 42
celery/bin/celeryd.py

@@ -21,6 +21,11 @@
 
     Path to pidfile.
 
+.. cmdoption:: -B, --beat
+
+    Also run the ``celerybeat`` periodic task scheduler. Please note that
+    there must only be one instance of this service.
+
 .. cmdoption:: -s, --statistics
 
     Turn on reporting of statistics (remember to flush the statistics message
@@ -30,10 +35,6 @@
 
     Run in the background as a daemon.
 
-.. cmdoption:: -S, --supervised
-
-    Restart the worker server if it dies.
-
 .. cmdoption:: --discard
 
     Discard all waiting tasks before the daemon is started.
@@ -63,21 +64,21 @@
 """
 import os
 import sys
-from celery.loaders import current_loader
-from celery.loaders import settings
+import multiprocessing
+import traceback
+import optparse
+
+from carrot.connection import DjangoBrokerConnection
+
+from celery import conf
+from celery import platform
 from celery import __version__
-from celery.supervisor import OFASupervisor
 from celery.log import emergency_error
-from celery.conf import LOG_LEVELS, DAEMON_LOG_FILE, DAEMON_LOG_LEVEL
-from celery.conf import DAEMON_CONCURRENCY, DAEMON_PID_FILE
-from celery import conf
-from celery import discovery
 from celery.task import discard_all
 from celery.worker import WorkController
-from celery import platform
-import multiprocessing
-import traceback
-import optparse
+from celery.loaders import current_loader, settings
+from celery.loaders import current_loader
+from celery.loaders import settings
 
 USE_STATISTICS = getattr(settings, "CELERY_STATISTICS", False)
 # Make sure the setting exists.
@@ -85,15 +86,17 @@ settings.CELERY_STATISTICS = USE_STATISTICS
 
 STARTUP_INFO_FMT = """
 Configuration ->
-    * Broker -> amqp://%(vhost)s@%(host)s:%(port)s
+    * Broker -> %(carrot_backend)s://%(vhost)s@%(host)s:%(port)s
     * Exchange -> %(exchange)s (%(exchange_type)s)
     * Consumer -> Queue:%(consumer_queue)s Routing:%(consumer_rkey)s
     * Concurrency -> %(concurrency)s
     * Statistics -> %(statistics)s
+    * Celerybeat -> %(celerybeat)s
 """.strip()
 
 OPTION_LIST = (
-    optparse.make_option('-c', '--concurrency', default=DAEMON_CONCURRENCY,
+    optparse.make_option('-c', '--concurrency',
+            default=conf.DAEMON_CONCURRENCY,
             action="store", dest="concurrency", type="int",
             help="Number of child processes processing the queue."),
     optparse.make_option('--discard', default=False,
@@ -104,21 +107,22 @@ OPTION_LIST = (
     optparse.make_option('-s', '--statistics', default=USE_STATISTICS,
             action="store_true", dest="statistics",
             help="Collect statistics."),
-    optparse.make_option('-f', '--logfile', default=DAEMON_LOG_FILE,
+    optparse.make_option('-f', '--logfile', default=conf.DAEMON_LOG_FILE,
             action="store", dest="logfile",
             help="Path to log file."),
-    optparse.make_option('-l', '--loglevel', default=DAEMON_LOG_LEVEL,
+    optparse.make_option('-l', '--loglevel', default=conf.DAEMON_LOG_LEVEL,
             action="store", dest="loglevel",
             help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL/FATAL."),
-    optparse.make_option('-p', '--pidfile', default=DAEMON_PID_FILE,
+    optparse.make_option('-p', '--pidfile', default=conf.DAEMON_PID_FILE,
             action="store", dest="pidfile",
             help="Path to pidfile."),
+    optparse.make_option('-B', '--beat', default=False,
+            action="store_true", dest="run_clockservice",
+            help="Also run the celerybeat periodic task scheduler. \
+                  Please note that only one instance must be running."),
     optparse.make_option('-d', '--detach', '--daemon', default=False,
             action="store_true", dest="detach",
             help="Run in the background as a daemon."),
-    optparse.make_option('-S', '--supervised', default=False,
-            action="store_true", dest="supervised",
-            help="Restart the worker server if it dies."),
     optparse.make_option('-u', '--uid', default=None,
             action="store", dest="uid",
             help="User-id to run celeryd as when in daemon mode."),
@@ -137,20 +141,15 @@ OPTION_LIST = (
     )
 
 
-def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False,
-        loglevel=DAEMON_LOG_LEVEL, logfile=DAEMON_LOG_FILE, discard=False,
-        pidfile=DAEMON_PID_FILE, umask=0, uid=None, gid=None,
-        supervised=False, working_directory=None, chroot=None,
-        statistics=None, **kwargs):
+def run_worker(concurrency=conf.DAEMON_CONCURRENCY, detach=False,
+        loglevel=conf.DAEMON_LOG_LEVEL, logfile=conf.DAEMON_LOG_FILE,
+        discard=False, pidfile=conf.DAEMON_PID_FILE, umask=0,
+        uid=None, gid=None, working_directory=None,
+        chroot=None, statistics=None, run_clockservice=False, **kwargs):
     """Starts the celery worker server."""
 
     print("Celery %s is starting." % __version__)
 
-    # set SIGCLD back to the default SIG_DFL (before python-daemon overrode
-    # it) lets the parent wait() for the terminated child process and stops
-    # the 'OSError: [Errno 10] No child processes' problem.
-    platform.reset_signal("SIGCLD")
-
     if statistics is not None:
         settings.CELERY_STATISTICS = statistics
 
@@ -168,7 +167,7 @@ def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False,
 
     # Setup logging
     if not isinstance(loglevel, int):
-        loglevel = LOG_LEVELS[loglevel.upper()]
+        loglevel = conf.LOG_LEVELS[loglevel.upper()]
     if not detach:
         logfile = None # log to stderr when not running in the background.
 
@@ -180,10 +179,16 @@ def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False,
 
     # Dump configuration to screen so we have some basic information
     # when users sends e-mails.
+    broker_connection = DjangoBrokerConnection()
+    carrot_backend = broker_connection.backend_cls
+    if carrot_backend and not isinstance(carrot_backend, str):
+        carrot_backend = carrot_backend.__name__
+
     print(STARTUP_INFO_FMT % {
-            "vhost": getattr(settings, "AMQP_VHOST", "(default)"),
-            "host": getattr(settings, "AMQP_SERVER", "(default)"),
-            "port": getattr(settings, "AMQP_PORT", "(default)"),
+            "carrot_backend": carrot_backend or "amqp",
+            "vhost": broker_connection.virtual_host or "(default)",
+            "host": broker_connection.hostname or "(default)",
+            "port": broker_connection.port or "(port)",
             "exchange": conf.AMQP_EXCHANGE,
             "exchange_type": conf.AMQP_EXCHANGE_TYPE,
             "consumer_queue": conf.AMQP_CONSUMER_QUEUE,
@@ -193,7 +198,9 @@ def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False,
             "loglevel": loglevel,
             "pidfile": pidfile,
             "statistics": settings.CELERY_STATISTICS and "ON" or "OFF",
+            "celerybeat": run_clockservice and "ON" or "OFF",
     })
+    del(broker_connection)
 
     print("Celery has started.")
     if detach:
@@ -216,6 +223,7 @@ def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False,
         worker = WorkController(concurrency=concurrency,
                                 loglevel=loglevel,
                                 logfile=logfile,
+                                embed_clockservice=run_clockservice,
                                 is_detached=detach)
 
         # Install signal handler that restarts celeryd on SIGHUP,
@@ -229,10 +237,7 @@ def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False,
                             e.__class__, e, traceback.format_exc()))
 
     try:
-        if supervised:
-            OFASupervisor(target=run_worker).start()
-        else:
-            run_worker()
+        run_worker()
     except:
         if detach:
             context.close()
@@ -257,7 +262,6 @@ def install_worker_restart_handler(worker):
     platform.install_signal_handler("SIGHUP", restart_worker_sig_handler)
 
 
-
 def parse_options(arguments):
     """Parse the available options to ``celeryd``."""
     parser = optparse.OptionParser(option_list=OPTION_LIST)

+ 2 - 1
celery/bin/celeryinit.py

@@ -1,3 +1,4 @@
+import sys
 
 
 def main():
@@ -5,7 +6,7 @@ def main():
     loader = Loader()
     conf = loader.read_configuration()
     from django.core.management import call_command, setup_environ
-    print("Creating database tables...")
+    sys.stderr.write("Creating database tables...\n")
     setup_environ(conf)
     call_command("syncdb")
 

+ 74 - 14
celery/conf.py

@@ -1,7 +1,8 @@
-"""celery.conf"""
-from celery.loaders import settings
-from datetime import timedelta
 import logging
+from datetime import timedelta
+
+from celery.registry import tasks
+from celery.loaders import settings
 
 DEFAULT_AMQP_EXCHANGE = "celery"
 DEFAULT_AMQP_PUBLISHER_ROUTING_KEY = "celery"
@@ -11,7 +12,7 @@ DEFAULT_AMQP_EXCHANGE_TYPE = "direct"
 DEFAULT_DAEMON_CONCURRENCY = 0 # defaults to cpu count
 DEFAULT_DAEMON_PID_FILE = "celeryd.pid"
 DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s/%(processName)s] %(message)s'
-DEFAULT_DAEMON_LOG_LEVEL = "INFO"
+DEFAULT_DAEMON_LOG_LEVEL = "WARN"
 DEFAULT_DAEMON_LOG_FILE = "celeryd.log"
 DEFAULT_AMQP_CONNECTION_TIMEOUT = 4
 DEFAULT_STATISTICS = False
@@ -21,7 +22,11 @@ DEFAULT_AMQP_CONNECTION_RETRY = True
 DEFAULT_AMQP_CONNECTION_MAX_RETRIES = 100
 DEFAULT_TASK_SERIALIZER = "pickle"
 DEFAULT_BACKEND = "database"
-DEFAULT_PERIODIC_STATUS_BACKEND = "database"
+DEFAULT_DISABLE_RATE_LIMITS = False
+DEFAULT_CELERYBEAT_PID_FILE = "celerybeat.pid"
+DEFAULT_CELERYBEAT_LOG_LEVEL = "INFO"
+DEFAULT_CELERYBEAT_LOG_FILE = "celerybeat.log"
+DEFAULT_CELERYBEAT_SCHEDULE_FILENAME = "celerybeat-schedule"
 
 
 """
@@ -245,22 +250,77 @@ CELERY_BACKEND = getattr(settings, "CELERY_BACKEND", DEFAULT_BACKEND)
 
 """
 
-.. data:: CELERY_PERIODIC_STATUS_BACKEND
+.. data:: CELERY_CACHE_BACKEND
+
+Use a custom cache backend for celery. If not set the django-global
+cache backend in ``CACHE_BACKEND`` will be used.
+
+"""
+CELERY_CACHE_BACKEND = getattr(settings, "CELERY_CACHE_BACKEND", None)
+
+"""
+
+.. data:: CELERYBEAT_PID_FILE
 
-The backend used to store the status of periodic tasks.
+Name of celerybeats pid file.
+Default is: ``celerybeat.pid``.
 
 """
-CELERY_PERIODIC_STATUS_BACKEND = getattr(settings,
-                                    "CELERY_PERIODIC_STATUS_BACKEND",
-                                    DEFAULT_PERIODIC_STATUS_BACKEND)
+CELERYBEAT_PID_FILE = getattr(settings, "CELERYBEAT_PID_FILE",
+                              DEFAULT_CELERYBEAT_PID_FILE)
 
 
 """
 
-.. data:: CELERY_CACHE_BACKEND
+.. data:: DEFAULT_RATE_LIMIT
 
-Use a custom cache backend for celery. If not set the django-global
-cache backend in ``CACHE_BACKEND`` will be used.
+The default rate limit applied to all tasks which doesn't have a custom
+rate limit defined. (Default: None)
 
 """
-CELERY_CACHE_BACKEND = getattr(settings, "CELERY_CACHE_BACKEND", None)
+DEFAULT_RATE_LIMIT = getattr(settings, "CELERY_DEFAULT_RATE_LIMIT", None)
+
+"""
+
+.. data:: DISABLE_RATE_LIMITS
+
+If ``True`` all rate limits will be disabled and all tasks will be executed
+as soon as possible.
+
+"""
+DISABLE_RATE_LIMITS = getattr(settings, "CELERY_DISABLE_RATE_LIMITS",
+                              DEFAULT_DISABLE_RATE_LIMITS)
+
+"""
+
+.. data:: CELERYBEAT_LOG_LEVEL
+
+Default log level for celerybeat.
+Default is: ``INFO``.
+
+"""
+CELERYBEAT_LOG_LEVEL = getattr(settings, "CELERYBEAT_LOG_LEVEL",
+                               DEFAULT_CELERYBEAT_LOG_LEVEL)
+
+"""
+
+.. data:: CELERYBEAT_LOG_FILE
+
+Default log file for celerybeat.
+Default is: ``celerybeat.log``.
+
+"""
+CELERYBEAT_LOG_FILE = getattr(settings, "CELERYBEAT_LOG_FILE",
+                              DEFAULT_CELERYBEAT_LOG_FILE)
+
+"""
+
+.. data:: CELERYBEAT_SCHEDULE_FILENAME
+
+Name of the persistent schedule database file.
+Default is: ``celerybeat-schedule``.
+
+"""
+CELERYBEAT_SCHEDULE_FILENAME = getattr(settings,
+                                       "CELERYBEAT_SCHEDULE_FILENAME",
+                                       DEFAULT_CELERYBEAT_SCHEDULE_FILENAME)

+ 0 - 0
celery/contrib/__init__.py


+ 19 - 0
celery/contrib/test_runner.py

@@ -0,0 +1,19 @@
+from django.conf import settings
+from django.test.simple import run_tests as run_tests_orig
+
+USAGE = """\
+Custom test runner to allow testing of celery delayed tasks.
+"""
+
+def run_tests(test_labels, *args, **kwargs):
+    """Django test runner allowing testing of celery delayed tasks.
+
+    All tasks are run locally, not in a worker.
+
+    To use this runner set ``settings.TEST_RUNNER``::
+
+        TEST_RUNNER = "celery.contrib.test_runner.run_tests"
+
+    """
+    settings.CELERY_ALWAYS_EAGER = True
+    return run_tests_orig(test_labels, *args, **kwargs)

+ 2 - 3
celery/datastructures.py

@@ -3,10 +3,9 @@
 Custom Datastructures
 
 """
-from UserList import UserList
-from Queue import Queue
-from Queue import Empty as QueueEmpty
 import traceback
+from UserList import UserList
+from Queue import Queue, Empty as QueueEmpty
 
 
 class PositionQueue(UserList):

+ 73 - 0
celery/decorators.py

@@ -0,0 +1,73 @@
+from inspect import getargspec
+
+from celery.task.base import Task, PeriodicTask
+
+
+def task(**options):
+    """Make a task out of any callable.
+
+        Examples:
+
+            >>> @task()
+            ... def refresh_feed(url):
+            ...     return Feed.objects.get(url=url).refresh()
+
+
+            >>> refresh_feed("http://example.com/rss") # Regular
+            <Feed: http://example.com/rss>
+            >>> refresh_feed.delay("http://example.com/rss") # Async
+            <AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d>
+
+            # With setting extra options and using retry.
+
+            >>> @task(exchange="feeds")
+            ... def refresh_feed(url, **kwargs):
+            ...     try:
+            ...         return Feed.objects.get(url=url).refresh()
+            ...     except socket.error, exc:
+            ...         refresh_feed.retry(args=[url], kwargs=kwargs,
+            ...                            exc=exc)
+
+
+    """
+
+    def _create_task_cls(fun):
+        base = options.pop("base", Task)
+
+        cls_name = fun.__name__
+
+        def run(self, *args, **kwargs):
+            return fun(*args, **kwargs)
+        run.__name__ = fun.__name__
+        run.argspec = getargspec(fun)
+
+        cls_dict = dict(options)
+        cls_dict["run"] = run
+        cls_dict["__module__"] = fun.__module__
+
+        task = type(cls_name, (base, ), cls_dict)()
+
+        return task
+
+    return _create_task_cls
+
+
+def periodic_task(**options):
+    """Task decorator to create a periodic task.
+
+    **Usage**
+
+    Run a task once every day:
+
+    .. code-block:: python
+
+        from datetime import timedelta
+
+        @periodic_task(run_every=timedelta(days=1))
+        def cronjob(**kwargs):
+            logger = cronjob.get_logger(**kwargs)
+            logger.warn("Task running...")
+
+    """
+    options["base"] = PeriodicTask
+    return task(**options)

+ 85 - 157
celery/execute.py

@@ -1,21 +1,19 @@
+import sys
+import inspect
+import traceback
+from datetime import datetime, timedelta
+
 from carrot.connection import DjangoBrokerConnection
+from billiard.utils.functional import curry
+
+from celery import signals
 from celery.conf import AMQP_CONNECTION_TIMEOUT
+from celery.utils import gen_unique_id, noop, fun_takes_kwargs
 from celery.result import AsyncResult, EagerResult
-from celery.messaging import TaskPublisher
 from celery.registry import tasks
-from celery.utils import gen_unique_id, noop, fun_takes_kwargs
-from celery.utils.functional import curry
-from datetime import datetime, timedelta
+from celery.messaging import TaskPublisher
 from celery.exceptions import RetryTaskError
 from celery.datastructures import ExceptionInfo
-from celery.backends import default_backend
-from celery.loaders import current_loader
-from celery.statistics import TaskTimerStats
-from celery import signals
-import sys
-import inspect
-import warnings
-import traceback
 
 
 def apply_async(task, args=None, kwargs=None, countdown=None, eta=None,
@@ -68,6 +66,9 @@ def apply_async(task, args=None, kwargs=None, countdown=None, eta=None,
         methods that have been registered with
         :mod:`carrot.serialization.registry`.
 
+    **Note**: If the ``CELERY_ALWAYS_EAGER`` setting is set, it will be
+    replaced by a local :func:`apply` call instead.
+
     """
     args = args or []
     kwargs = kwargs or {}
@@ -159,8 +160,7 @@ def apply(task, args, kwargs, **options):
     task_id = gen_unique_id()
     retries = options.get("retries", 0)
 
-    # If it's a Task class we need to have to instance
-    # for it to be callable.
+    # If it's a Task class we need to instantiate it, so it's callable.
     task = inspect.isclass(task) and task() or task
 
     default_kwargs = {"task_name": task.name,
@@ -169,172 +169,100 @@ def apply(task, args, kwargs, **options):
                       "task_is_eager": True,
                       "logfile": None,
                       "loglevel": 0}
-    fun = getattr(task, "run", task)
-    supported_keys = fun_takes_kwargs(fun, default_kwargs)
+    supported_keys = fun_takes_kwargs(task.run, default_kwargs)
     extend_with = dict((key, val) for key, val in default_kwargs.items()
                             if key in supported_keys)
     kwargs.update(extend_with)
 
-    try:
-        ret_value = task(*args, **kwargs)
-        status = "DONE"
-        strtb = None
-    except Exception, exc:
-        type_, value_, tb = sys.exc_info()
-        strtb = "\n".join(traceback.format_exception(type_, value_, tb))
-        ret_value = exc
-        status = "FAILURE"
-
-    return EagerResult(task_id, ret_value, status, traceback=strtb)
-
-
-class ExecuteWrapper(object):
-    """Wraps the task in a jail, which catches all exceptions, and
-    saves the status and result of the task execution to the task
-    meta backend.
-
-    If the call was successful, it saves the result to the task result
-    backend, and sets the task status to ``"DONE"``.
-
-    If the call raises :exc:`celery.exceptions.RetryTaskError`, it extracts
-    the original exception, uses that as the result and sets the task status
-    to ``"RETRY"``.
-
-    If the call results in an exception, it saves the exception as the task
-    result, and sets the task status to ``"FAILURE"``.
-
-    :param fun: Callable object to execute.
-    :param task_id: The unique id of the task.
-    :param task_name: Name of the task.
-    :param args: List of positional args to pass on to the function.
-    :param kwargs: Keyword arguments mapping to pass on to the function.
-
-    :returns: the function return value on success, or
-        the exception instance on failure.
-
-    """
-
-    def __init__(self, fun, task_id, task_name, args=None, kwargs=None):
-        self.fun = fun
-        self.task_id = task_id
-        self.task_name = task_name
-        self.args = args or []
-        self.kwargs = kwargs or {}
-
-    def __call__(self, *args, **kwargs):
-        return self.execute_safe()
-
-    def execute_safe(self, *args, **kwargs):
+    trace = TaskTrace(task.name, task_id, args, kwargs, task=task)
+    retval = trace.execute()
+
+    return EagerResult(task_id, retval, trace.status,
+                       traceback=trace.strtb)
+
+
+class TraceInfo(object):
+    def __init__(self, status="PENDING", retval=None, exc_info=None):
+        self.status = status
+        self.retval = retval
+        self.exc_info = exc_info
+        self.exc_type = None
+        self.exc_value = None
+        self.tb = None
+        self.strtb = None
+        if self.exc_info:
+            self.exc_type, self.exc_value, self.tb = exc_info
+            self.strtb = "\n".join(traceback.format_exception(*exc_info))
+
+    @classmethod
+    def trace(cls, fun, args, kwargs):
+        """Trace the execution of a function, calling the appropiate callback
+        if the function raises retry, an failure or returned successfully."""
         try:
-            return self.execute(*args, **kwargs)
-        except Exception, exc:
-            type_, value_, tb = sys.exc_info()
-            exc = default_backend.prepare_exception(exc)
-            warnings.warn("Exception happend outside of task body: %s: %s" % (
-                str(exc.__class__), str(exc)))
-            return ExceptionInfo((type_, exc, tb))
-
-    def execute(self):
-        # Convenience variables
-        fun = self.fun
-        task_id = self.task_id
-        task_name = self.task_name
-        args = self.args
-        kwargs = self.kwargs
-
-        # Run task loader init handler.
-        current_loader.on_task_init(task_id, fun)
-
-        # Backend process cleanup
-        default_backend.process_cleanup()
-
-        # Send pre-run signal.
-        signals.task_prerun.send(sender=fun, task_id=task_id, task=fun,
-                                 args=args, kwargs=kwargs)
-
-        retval = None
-        timer_stat = TaskTimerStats.start(task_id, task_name, args, kwargs)
-        try:
-            result = fun(*args, **kwargs)
+            return cls("SUCCESS", retval=fun(*args, **kwargs))
         except (SystemExit, KeyboardInterrupt):
             raise
         except RetryTaskError, exc:
-            retval = self.handle_retry(exc, sys.exc_info())
+            return cls("RETRY", retval=exc, exc_info=sys.exc_info())
         except Exception, exc:
-            retval = self.handle_failure(exc, sys.exc_info())
-        else:
-            retval = self.handle_success(result)
-        finally:
-            timer_stat.stop()
+            return cls("FAILURE", retval=exc, exc_info=sys.exc_info())
 
-        # Send post-run signal.
-        signals.task_postrun.send(sender=fun, task_id=task_id, task=fun,
-                                  args=args, kwargs=kwargs, retval=retval)
-
-        return retval
 
-    def handle_success(self, retval):
-        """Handle successful execution.
+class TaskTrace(object):
 
-        Saves the result to the current result store (skipped if the callable
-            has a ``ignore_result`` attribute set to ``True``).
-
-        If the callable has a ``on_success`` function, it as called with
-        ``retval`` as argument.
-
-        :param retval: The return value.
+    def __init__(self, task_name, task_id, args, kwargs, task=None):
+        self.task_id = task_id
+        self.task_name = task_name
+        self.args = args
+        self.kwargs = kwargs
+        self.task = task or tasks[self.task_name]
+        self.status = "PENDING"
+        self.strtb = None
+        self._trace_handlers = {"FAILURE": self.handle_failure,
+                                "RETRY": self.handle_retry,
+                                "SUCCESS": self.handle_success}
+
+    def __call__(self):
+        return self.execute()
 
-        """
-        if not getattr(self.fun, "ignore_result", False):
-            default_backend.mark_as_done(self.task_id, retval)
+    def execute(self):
+        signals.task_prerun.send(sender=self.task, task_id=self.task_id,
+                                 task=self.task, args=self.args,
+                                 kwargs=self.kwargs)
+        retval = self._trace()
+
+        signals.task_postrun.send(sender=self.task, task_id=self.task_id,
+                                  task=self.task, args=self.args,
+                                  kwargs=self.kwargs, retval=retval)
+        return retval
 
-        # Run success handler last to be sure the status is saved.
-        success_handler = getattr(self.fun, "on_success", noop)
-        success_handler(retval, self.task_id, self.args, self.kwargs)
+    def _trace(self):
+        trace = TraceInfo.trace(self.task, self.args, self.kwargs)
+        self.status = trace.status
+        self.strtb = trace.strtb
+        handler = self._trace_handlers[trace.status]
+        return handler(trace.retval, trace.exc_type, trace.tb, trace.strtb)
 
+    def handle_success(self, retval, *args):
+        """Handle successful execution."""
+        self.task.on_success(retval, self.task_id, self.args, self.kwargs)
         return retval
 
-    def handle_retry(self, exc, exc_info):
+    def handle_retry(self, exc, type_, tb, strtb):
         """Handle retry exception."""
-        ### Task is to be retried.
-        type_, value_, tb = exc_info
-        strtb = "\n".join(traceback.format_exception(type_, value_, tb))
-
-        # RetryTaskError stores both a small message describing the retry
-        # and the original exception.
-        message, orig_exc = exc.args
-        default_backend.mark_as_retry(self.task_id, orig_exc, strtb)
+        self.task.on_retry(exc, self.task_id, self.args, self.kwargs)
 
         # Create a simpler version of the RetryTaskError that stringifies
         # the original exception instead of including the exception instance.
         # This is for reporting the retry in logs, e-mail etc, while
         # guaranteeing pickleability.
+        message, orig_exc = exc.args
         expanded_msg = "%s: %s" % (message, str(orig_exc))
-        retval = ExceptionInfo((type_,
-                                type_(expanded_msg, None),
-                                tb))
-
-        # Run retry handler last to be sure the status is saved.
-        retry_handler = getattr(self.fun, "on_retry", noop)
-        retry_handler(exc, self.task_id, self.args, self.kwargs)
+        return ExceptionInfo((type_,
+                              type_(expanded_msg, None),
+                              tb))
 
-        return retval
-
-    def handle_failure(self, exc, exc_info):
+    def handle_failure(self, exc, type_, tb, strtb):
         """Handle exception."""
-        ### Task ended in failure.
-        type_, value_, tb = exc_info
-        strtb = "\n".join(traceback.format_exception(type_, value_, tb))
-
-        # mark_as_failure returns an exception that is guaranteed to
-        # be pickleable.
-        stored_exc = default_backend.mark_as_failure(self.task_id, exc, strtb)
-
-        # wrap exception info + traceback and return it to caller.
-        retval = ExceptionInfo((type_, stored_exc, tb))
-
-        # Run error handler last to be sure the status is stored.
-        error_handler = getattr(self.fun, "on_failure", noop)
-        error_handler(stored_exc, self.task_id, self.args, self.kwargs)
-
-        return retval
+        self.task.on_failure(exc, self.task_id, self.args, self.kwargs)
+        return ExceptionInfo((type_, exc, tb))

+ 0 - 153
celery/fields.py

@@ -1,153 +0,0 @@
-"""
-
-Custom Django Model Fields.
-
-"""
-
-from copy import deepcopy
-from base64 import b64encode, b64decode
-from zlib import compress, decompress
-try:
-    import cPickle as pickle
-except ImportError:
-    import pickle
-
-from django.db import models
-from django.utils.encoding import force_unicode
-
-
-class PickledObject(str):
-    """A subclass of string so it can be told whether a string is a pickled
-    object or not (if the object is an instance of this class then it must
-    [well, should] be a pickled one).
-
-    Only really useful for passing pre-encoded values to ``default``
-    with ``dbsafe_encode``, not that doing so is necessary. If you
-    remove PickledObject and its references, you won't be able to pass
-    in pre-encoded values anymore, but you can always just pass in the
-    python objects themselves.
-
-    """
-    pass
-
-
-def dbsafe_encode(value, compress_object=False):
-    """We use deepcopy() here to avoid a problem with cPickle, where dumps
-    can generate different character streams for same lookup value if
-    they are referenced differently.
-
-    The reason this is important is because we do all of our lookups as
-    simple string matches, thus the character streams must be the same
-    for the lookups to work properly. See tests.py for more
-    information.
-
-    """
-    if not compress_object:
-        value = b64encode(pickle.dumps(deepcopy(value)))
-    else:
-        value = b64encode(compress(pickle.dumps(deepcopy(value))))
-    return PickledObject(value)
-
-
-def dbsafe_decode(value, compress_object=False):
-    if not compress_object:
-        value = pickle.loads(b64decode(value))
-    else:
-        value = pickle.loads(decompress(b64decode(value)))
-    return value
-
-
-class PickledObjectField(models.Field):
-    """A field that will accept *any* python object and store it in the
-    database. PickledObjectField will optionally compress it's values if
-    declared with the keyword argument ``compress=True``.
-
-    Does not actually encode and compress ``None`` objects (although you
-    can still do lookups using None). This way, it is still possible to
-    use the ``isnull`` lookup type correctly. Because of this, the field
-    defaults to ``null=True``, as otherwise it wouldn't be able to store
-    None values since they aren't pickled and encoded.
-
-    """
-    __metaclass__ = models.SubfieldBase
-
-    def __init__(self, *args, **kwargs):
-        self.compress = kwargs.pop('compress', False)
-        self.protocol = kwargs.pop('protocol', 2)
-        kwargs.setdefault('null', True)
-        kwargs.setdefault('editable', False)
-        super(PickledObjectField, self).__init__(*args, **kwargs)
-
-    def get_default(self):
-        """Returns the default value for this field.
-
-        The default implementation on models.Field calls force_unicode
-        on the default, which means you can't set arbitrary Python
-        objects as the default. To fix this, we just return the value
-        without calling force_unicode on it. Note that if you set a
-        callable as a default, the field will still call it. It will
-        *not* try to pickle and encode it.
-
-        """
-        if self.has_default():
-            if callable(self.default):
-                return self.default()
-            return self.default
-        # If the field doesn't have a default, then we punt to models.Field.
-        return super(PickledObjectField, self).get_default()
-
-    def to_python(self, value):
-        """B64decode and unpickle the object, optionally decompressing it.
-
-        If an error is raised in de-pickling and we're sure the value is
-        a definite pickle, the error is allowed to propogate. If we
-        aren't sure if the value is a pickle or not, then we catch the
-        error and return the original value instead.
-
-        """
-
-        if value is not None:
-            try:
-                value = dbsafe_decode(value, self.compress)
-            except:
-                # If the value is a definite pickle; and an error is raised in
-                # de-pickling it should be allowed to propogate.
-                if isinstance(value, PickledObject):
-                    raise
-        return value
-
-    def get_db_prep_value(self, value):
-        """Pickle and b64encode the object, optionally compressing it.
-
-        The pickling protocol is specified explicitly (by default 2),
-        rather than as -1 or HIGHEST_PROTOCOL, because we don't want the
-        protocol to change over time. If it did, ``exact`` and ``in``
-        lookups would likely fail, since pickle would now be generating
-        a different string.
-
-        """
-
-        if value is not None and not isinstance(value, PickledObject):
-            # We call force_unicode here explicitly, so that the encoded
-            # string isn't rejected by the postgresql_psycopg2 backend.
-            # Alternatively, we could have just registered PickledObject with
-            # the psycopg marshaller (telling it to store it like it would a
-            # string), but since both of these methods result in the same
-            # value being stored, doing things this way is much easier.
-            value = force_unicode(dbsafe_encode(value, self.compress))
-        return value
-
-    def value_to_string(self, obj):
-        value = self._get_val_from_obj(obj)
-        return self.get_db_prep_value(value)
-
-    def get_internal_type(self):
-        return 'TextField'
-
-    def get_db_prep_lookup(self, lookup_type, value):
-        if lookup_type not in ['exact', 'in', 'isnull']:
-            raise TypeError('Lookup type %s is not supported.' % lookup_type)
-        # The Field model already calls get_db_prep_value before doing the
-        # actual lookup, so all we need to do is limit the lookup types.
-        return super(PickledObjectField, self).get_db_prep_lookup(lookup_type,
-                                                                  value)

+ 11 - 8
celery/loaders/__init__.py

@@ -1,9 +1,11 @@
 import os
-from celery.loaders.djangoapp import Loader as DjangoLoader
-from celery.loaders.default import Loader as DefaultLoader
+
 from django.conf import settings
 from django.core.management import setup_environ
 
+from celery.loaders.default import Loader as DefaultLoader
+from celery.loaders.djangoapp import Loader as DjangoLoader
+
 """
 .. class:: Loader
 
@@ -15,17 +17,18 @@ if settings.configured:
     Loader = DjangoLoader
 else:
     try:
-        # A settings module may be defined, but Django didn't attempt to 
+        # A settings module may be defined, but Django didn't attempt to
         # load it yet. As an alternative to calling the private _setup(),
         # we could also check whether DJANGO_SETTINGS_MODULE is set.
-        settings._setup() 
+        settings._setup()
     except ImportError:
         if not callable(getattr(os, "fork", None)):
             # Platform doesn't support fork()
-            # XXX On systems without fork, multiprocessing seems to be launching
-            # the processes in some other way which does not copy the memory
-            # of the parent process. This means that any configured env might
-            # be lost. This is a hack to make it work on Windows.
+            # XXX On systems without fork, multiprocessing seems to be
+            # launching the processes in some other way which does
+            # not copy the memory of the parent process. This means that any
+            # configured env might be lost. This is a hack to make it work
+            # on Windows.
             # A better way might be to use os.environ to set the currently
             # used configuration method so to propogate it to the "child"
             # processes. But this has to be experimented with.

+ 1 - 0
celery/loaders/default.py

@@ -1,4 +1,5 @@
 import os
+
 from celery.loaders.base import BaseLoader
 
 DEFAULT_CONFIG_MODULE = "celeryconfig"

+ 10 - 3
celery/log.py

@@ -4,7 +4,9 @@ import sys
 import time
 import logging
 import traceback
-from celery.conf import LOG_FORMAT, DAEMON_LOG_LEVEL
+
+from celery import conf
+from celery.patch import monkeypatch
 
 
 def get_default_logger(loglevel=None):
@@ -14,13 +16,18 @@ def get_default_logger(loglevel=None):
     return logger
 
 
-def setup_logger(loglevel=DAEMON_LOG_LEVEL, logfile=None, format=LOG_FORMAT,
-        **kwargs):
+_monkeypatched = [False]
+def setup_logger(loglevel=conf.DAEMON_LOG_LEVEL, logfile=None,
+        format=conf.LOG_FORMAT, **kwargs):
     """Setup the ``multiprocessing`` logger. If ``logfile`` is not specified,
     ``stderr`` is used.
 
     Returns logger object.
     """
+    if not _monkeypatched[0]:
+        monkeypatch()
+        _monkeypatched[0] = True
+
     logger = get_default_logger(loglevel=loglevel)
     if logger.handlers:
         # Logger already configured

+ 18 - 0
celery/management/commands/celerybeat.py

@@ -0,0 +1,18 @@
+"""
+
+Start the celery clock service from the Django management command.
+
+"""
+from django.core.management.base import BaseCommand
+
+from celery.bin.celerybeat import run_clockservice, OPTION_LIST
+
+
+class Command(BaseCommand):
+    """Run the celery daemon."""
+    option_list = BaseCommand.option_list + OPTION_LIST
+    help = 'Run the celery daemon'
+
+    def handle(self, *args, **options):
+        """Handle the management command."""
+        run_clockservice(**options)

+ 1 - 0
celery/management/commands/celeryd.py

@@ -4,6 +4,7 @@ Start the celery daemon from the Django management command.
 
 """
 from django.core.management.base import BaseCommand
+
 from celery.bin.celeryd import run_worker, OPTION_LIST
 
 

+ 1 - 0
celery/management/commands/celerystats.py

@@ -4,6 +4,7 @@ Start the celery daemon from the Django management command.
 
 """
 from django.core.management.base import BaseCommand
+
 from celery.statistics import StatsCollector
 
 

+ 5 - 115
celery/managers.py

@@ -1,63 +1,9 @@
 """celery.managers"""
+from datetime import datetime
 from django.db import models
-from django.db import connection, transaction
-from celery.registry import tasks
-from celery.conf import TASK_RESULT_EXPIRES
-from datetime import datetime, timedelta
-from django.conf import settings
-import random
-
-# server_drift can be negative, but timedelta supports addition on
-# negative seconds.
-SERVER_DRIFT = timedelta(seconds=random.vonmisesvariate(1, 4))
-
-
-class TableLock(object):
-    """Base class for database table locks. Also works as a NOOP lock."""
-
-    def __init__(self, table, type="read"):
-        self.table = table
-        self.type = type
-        self.cursor = None
-
-    def lock_table(self):
-        """Lock the table."""
-        pass
-
-    def unlock_table(self):
-        """Release previously locked tables."""
-        pass
+from django.db import transaction
 
-    @classmethod
-    def acquire(cls, table, type=None):
-        """Acquire table lock."""
-        lock = cls(table, type)
-        lock.lock_table()
-        return lock
-
-    def release(self):
-        """Release the lock."""
-        self.unlock_table()
-        if self.cursor:
-            self.cursor.close()
-            self.cursor = None
-
-
-class MySQLTableLock(TableLock):
-    """Table lock support for MySQL."""
-
-    def lock_table(self):
-        """Lock MySQL table."""
-        self.cursor = connection.cursor()
-        self.cursor.execute("LOCK TABLES %s %s" % (
-            self.table, self.type.upper()))
-
-    def unlock_table(self):
-        """Unlock MySQL table."""
-        self.cursor.execute("UNLOCK TABLES")
-
-TABLE_LOCK_FOR_ENGINE = {"mysql": MySQLTableLock}
-table_lock = TABLE_LOCK_FOR_ENGINE.get(settings.DATABASE_ENGINE, TableLock)
+from celery.conf import TASK_RESULT_EXPIRES
 
 
 class TaskManager(models.Manager):
@@ -68,9 +14,9 @@ class TaskManager(models.Manager):
         task, created = self.get_or_create(task_id=task_id)
         return task
 
-    def is_done(self, task_id):
+    def is_successful(self, task_id):
         """Returns ``True`` if the task was executed successfully."""
-        return self.get_task(task_id).status == "DONE"
+        return self.get_task(task_id).status == "SUCCESS"
 
     def get_all_expired(self):
         """Get all expired task results."""
@@ -119,59 +65,3 @@ class TaskManager(models.Manager):
                 self.store_result(task_id, result, status, traceback, False)
             else:
                 raise
-
-
-class PeriodicTaskManager(models.Manager):
-    """Manager for :class:`celery.models.PeriodicTask` models."""
-
-    def init_entries(self):
-        """Add entries for all registered periodic tasks.
-
-        Should be run at worker start.
-        """
-        periodic_tasks = tasks.get_all_periodic()
-        for task_name in periodic_tasks.keys():
-            task_meta, created = self.get_or_create(name=task_name)
-
-    def is_time(self, last_run_at, run_every):
-        """Check if if it is time to run the periodic task.
-
-        :param last_run_at: Last time the periodic task was run.
-        :param run_every: How often to run the periodic task.
-
-        :rtype bool:
-
-        """
-        run_every_drifted = run_every + SERVER_DRIFT
-        run_at = last_run_at + run_every_drifted
-        if datetime.now() > run_at:
-            return True
-        return False
-
-    def get_waiting_tasks(self):
-        """Get all waiting periodic tasks.
-
-        :returns: list of :class:`celery.models.PeriodicTaskMeta` objects.
-        """
-        periodic_tasks = tasks.get_all_periodic()
-        db_table = self.model._meta.db_table
-
-        # Find all periodic tasks to be run.
-        waiting = []
-        for task_meta in self.all():
-            if task_meta.name in periodic_tasks:
-                task = periodic_tasks[task_meta.name]
-                run_every = task.run_every
-                if self.is_time(task_meta.last_run_at, run_every):
-                    # Get the object again to be sure noone else
-                    # has already taken care of it.
-                    lock = table_lock.acquire(db_table, "write")
-                    try:
-                        secure = self.get(pk=task_meta.pk)
-                        if self.is_time(secure.last_run_at, run_every):
-                            secure.last_run_at = datetime.now()
-                            secure.save()
-                            waiting.append(secure)
-                    finally:
-                        lock.release()
-        return waiting

+ 4 - 6
celery/messaging.py

@@ -4,12 +4,11 @@ Sending and Receiving Messages
 
 """
 from carrot.messaging import Publisher, Consumer, ConsumerSet
+
 from celery import conf
 from celery import signals
 from celery.utils import gen_unique_id
 from celery.utils import mitemgetter
-from celery.serialization import pickle
-
 
 MSG_OPTIONS = ("mandatory", "priority",
                "immediate", "routing_key",
@@ -44,6 +43,8 @@ class TaskPublisher(Publisher):
         """INTERNAL"""
 
         task_id = task_id or gen_unique_id()
+        eta = kwargs.get("eta")
+        eta = eta and eta.isoformat()
 
         message_data = {
             "task": task_name,
@@ -51,7 +52,7 @@ class TaskPublisher(Publisher):
             "args": task_args or [],
             "kwargs": task_kwargs or {},
             "retries": kwargs.get("retries", 0),
-            "eta": kwargs.get("eta"),
+            "eta": eta,
         }
 
         if part_of_set:
@@ -101,6 +102,3 @@ class EventConsumer(Consumer):
     routing_key = "event"
     exchange_type = "direct"
     no_ack = True
-
-
-

+ 11 - 43
celery/models.py

@@ -3,21 +3,23 @@
 Django Models.
 
 """
+from datetime import datetime
+
 import django
 from django.db import models
-from celery.registry import tasks
-from celery.managers import TaskManager, PeriodicTaskManager
-from celery.fields import PickledObjectField
-from celery import conf
 from django.utils.translation import ugettext_lazy as _
-from datetime import datetime
+from picklefield.fields import PickledObjectField
+
+from celery import conf
+from celery.registry import tasks
+from celery.managers import TaskManager
 
 TASK_STATUS_PENDING = "PENDING"
 TASK_STATUS_RETRY = "RETRY"
 TASK_STATUS_FAILURE = "FAILURE"
-TASK_STATUS_DONE = "DONE"
+TASK_STATUS_SUCCESS = "SUCCESS"
 TASK_STATUSES = (TASK_STATUS_PENDING, TASK_STATUS_RETRY,
-                 TASK_STATUS_FAILURE, TASK_STATUS_DONE)
+                 TASK_STATUS_FAILURE, TASK_STATUS_SUCCESS)
 TASK_STATUSES_CHOICES = zip(TASK_STATUSES, TASK_STATUSES)
 
 
@@ -40,42 +42,8 @@ class TaskMeta(models.Model):
     def __unicode__(self):
         return u"<Task: %s done:%s>" % (self.task_id, self.status)
 
-
-class PeriodicTaskMeta(models.Model):
-    """Information about a Periodic Task."""
-    name = models.CharField(_(u"name"), max_length=255, unique=True)
-    last_run_at = models.DateTimeField(_(u"last time run"),
-                                       blank=True,
-                                       default=datetime.fromtimestamp(0))
-    total_run_count = models.PositiveIntegerField(_(u"total run count"),
-                                                  default=0)
-
-    objects = PeriodicTaskManager()
-
-    class Meta:
-        """Model meta-data."""
-        verbose_name = _(u"periodic task")
-        verbose_name_plural = _(u"periodic tasks")
-
-    def __unicode__(self):
-        return u"<PeriodicTask: %s [last-run:%s, total-run:%d]>" % (
-                self.name, self.last_run_at, self.total_run_count)
-
-    def delay(self, *args, **kwargs):
-        """Apply the periodic task immediately."""
-        self.task.delay()
-        self.total_run_count = self.total_run_count + 1
-        self.save()
-
-    @property
-    def task(self):
-        """The entry registered in the task registry for this task."""
-        return tasks[self.name]
-
-
 if (django.VERSION[0], django.VERSION[1]) >= (1, 1):
-    # keep models away from syncdb/reset if database backend is not being used.
+    # keep models away from syncdb/reset if database backend is not
+    # being used.
     if conf.CELERY_BACKEND != 'database':
         TaskMeta._meta.managed = False
-    if conf.CELERY_PERIODIC_STATUS_BACKEND != 'database':
-        PeriodicTaskMeta._meta.managed = False

+ 29 - 0
celery/patch.py

@@ -0,0 +1,29 @@
+import logging
+import sys
+
+
+def _check_logger_class():
+    """Make sure process name is recorded when loggers are used."""
+
+    from multiprocessing.process import current_process
+    logging._acquireLock()
+    try:
+        OldLoggerClass = logging.getLoggerClass()
+        if not getattr(OldLoggerClass, '_process_aware', False):
+
+            class ProcessAwareLogger(OldLoggerClass):
+                _process_aware = True
+
+                def makeRecord(self, *args, **kwds):
+                    record = OldLoggerClass.makeRecord(self, *args, **kwds)
+                    record.processName = current_process()._name
+                    return record
+            logging.setLoggerClass(ProcessAwareLogger)
+    finally:
+        logging._releaseLock()
+
+def monkeypatch():
+    major, minor = sys.version_info[:2]
+    if major == 2 and minor < 6: # python < 2.6
+        _check_logger_class()
+

+ 5 - 0
celery/platform.py

@@ -53,6 +53,11 @@ def create_daemon_context(logfile=None, pidfile=None, **options):
 
     from daemon import DaemonContext
 
+    # set SIGCLD back to the default SIG_DFL (before python-daemon overrode
+    # it) lets the parent wait() for the terminated child process and stops
+    # the 'OSError: [Errno 10] No child processes' problem.
+    reset_signal("SIGCLD")
+
     # Since without stderr any errors will be silently suppressed,
     # we need to know that we have access to the logfile
     if logfile:

+ 0 - 258
celery/pool.py

@@ -1,258 +0,0 @@
-"""
-
-Process Pools.
-
-"""
-import os
-import time
-import errno
-import multiprocessing
-
-from multiprocessing.pool import Pool, worker
-from celery.datastructures import ExceptionInfo
-from celery.utils import noop
-from celery.utils.functional import curry
-from operator import isNumberType
-
-
-def pid_is_dead(pid):
-    """Check if a process is not running by PID.
-
-    :rtype bool:
-
-    """
-    try:
-        return os.kill(pid, 0)
-    except OSError, err:
-        if err.errno == errno.ESRCH:
-            return True # No such process.
-        elif err.errno == errno.EPERM:
-            return False # Operation not permitted.
-        else:
-            raise
-
-
-def reap_process(pid):
-    """Reap process if the process is a zombie.
-
-    :returns: ``True`` if process was reaped or is not running,
-        ``False`` otherwise.
-
-    """
-    if pid_is_dead(pid):
-        return True
-
-    try:
-        is_dead, _ = os.waitpid(pid, os.WNOHANG)
-    except OSError, err:
-        if err.errno == errno.ECHILD:
-            return False # No child processes.
-        raise
-    return is_dead
-
-
-def process_is_dead(process):
-    """Check if process is not running anymore.
-
-    First it finds out if the process is running by sending
-    signal 0. Then if the process is a child process, and is running
-    it finds out if it's a zombie process and reaps it.
-    If the process is running and is not a zombie it tries to send
-    a ping through the process pipe.
-
-    :param process: A :class:`multiprocessing.Process` instance.
-
-    :returns: ``True`` if the process is not running, ``False`` otherwise.
-
-    """
-
-    # Only do this if os.kill exists for this platform (e.g. Windows doesn't
-    # support it).
-    if callable(getattr(os, "kill", None)) and reap_process(process.pid):
-        return True
-
-    # Then try to ping the process using its pipe.
-    try:
-        proc_is_alive = process.is_alive()
-    except OSError:
-        return True
-    else:
-        return not proc_is_alive
-
-
-class DynamicPool(Pool):
-    """Version of :class:`multiprocessing.Pool` that can dynamically grow
-    in size."""
-
-    def __init__(self, processes=None, initializer=None, initargs=()):
-
-        if processes is None:
-            try:
-                processes = cpu_count()
-            except NotImplementedError:
-                processes = 1
-
-        super(DynamicPool, self).__init__(processes=processes,
-                                          initializer=initializer,
-                                          initargs=initargs)
-        self._initializer = initializer
-        self._initargs = initargs
-        self._size = processes
-        self.logger = multiprocessing.get_logger()
-
-    def _my_cleanup(self):
-        from multiprocessing.process import _current_process
-        for p in list(_current_process._children):
-            discard = False
-            try:
-                status = p._popen.poll()
-            except OSError:
-                discard = True
-            else:
-                if status is not None:
-                    discard = True
-            if discard:
-                _current_process._children.discard(p)
-
-    def add_worker(self):
-        """Add another worker to the pool."""
-        self._my_cleanup()
-        w = self.Process(target=worker,
-                         args=(self._inqueue, self._outqueue,
-                               self._initializer, self._initargs))
-        w.name = w.name.replace("Process", "PoolWorker")
-        w.daemon = True
-        w.start()
-        self._pool.append(w)
-        self.logger.debug(
-            "DynamicPool: Started pool worker %s (PID: %s, Poolsize: %d)" %(
-                w.name, w.pid, len(self._pool)))
-
-    def grow(self, size=1):
-        """Add workers to the pool.
-
-        :keyword size: Number of workers to add (default: 1)
-
-        """
-        [self.add_worker() for i in range(size)]
-
-    def _is_dead(self, process):
-        """Try to find out if the process is dead.
-
-        :rtype bool:
-
-        """
-        if process_is_dead(process):
-            self.logger.info("DynamicPool: Found dead process (PID: %s)" % (
-                process.pid))
-            return True
-        return False
-
-    def _bring_out_the_dead(self):
-        """Sort out dead process from pool.
-
-        :returns: Tuple of two lists, the first list with dead processes,
-            the second with active processes.
-
-        """
-        dead, alive = [], []
-        for process in self._pool:
-            if process and process.pid and isNumberType(process.pid):
-                dest = dead if self._is_dead(process) else alive
-                dest.append(process)
-        return dead, alive
-
-    def replace_dead_workers(self):
-        """Replace dead workers in the pool by spawning new ones.
-
-        :returns: number of dead processes replaced, or ``None`` if all
-            processes are alive and running.
-
-        """
-        dead, alive = self._bring_out_the_dead()
-        if dead:
-            dead_count = len(dead)
-            self._pool = alive
-            self.grow(self._size if dead_count > self._size else dead_count)
-            return dead_count
-
-
-class TaskPool(object):
-    """Process Pool for processing tasks in parallel.
-
-    :param limit: see :attr:`limit` attribute.
-    :param logger: see :attr:`logger` attribute.
-
-
-    .. attribute:: limit
-
-        The number of processes that can run simultaneously.
-
-    .. attribute:: logger
-
-        The logger used for debugging.
-
-    """
-
-    def __init__(self, limit, logger=None):
-        self.limit = limit
-        self.logger = logger or multiprocessing.get_logger()
-        self._pool = None
-
-    def start(self):
-        """Run the task pool.
-
-        Will pre-fork all workers so they're ready to accept tasks.
-
-        """
-        self._pool = DynamicPool(processes=self.limit)
-
-    def stop(self):
-        """Terminate the pool."""
-        self._pool.terminate()
-        self._pool = None
-
-    def replace_dead_workers(self):
-        self.logger.debug("TaskPool: Finding dead pool processes...")
-        dead_count = self._pool.replace_dead_workers()
-        if dead_count:
-            self.logger.info(
-                "TaskPool: Replaced %d dead pool workers..." % (
-                    dead_count))
-
-    def apply_async(self, target, args=None, kwargs=None, callbacks=None,
-            errbacks=None, on_ack=noop):
-        """Equivalent of the :func:``apply`` built-in function.
-
-        All ``callbacks`` and ``errbacks`` should complete immediately since
-        otherwise the thread which handles the result will get blocked.
-
-        """
-        args = args or []
-        kwargs = kwargs or {}
-        callbacks = callbacks or []
-        errbacks = errbacks or []
-
-        on_ready = curry(self.on_ready, callbacks, errbacks, on_ack)
-
-        self.logger.debug("TaskPool: Apply %s (args:%s kwargs:%s)" % (
-            target, args, kwargs))
-
-        self.replace_dead_workers()
-
-        return self._pool.apply_async(target, args, kwargs,
-                                        callback=on_ready)
-
-    def on_ready(self, callbacks, errbacks, on_ack, ret_value):
-        """What to do when a worker task is ready and its return value has
-        been collected."""
-        # Acknowledge the task as being processed.
-        on_ack()
-
-        if isinstance(ret_value, ExceptionInfo):
-            if isinstance(ret_value.exception, (
-                    SystemExit, KeyboardInterrupt)):
-                raise ret_value.exception
-            [errback(ret_value) for errback in errbacks]
-        else:
-            [callback(ret_value) for callback in callbacks]

+ 8 - 26
celery/registry.py

@@ -1,14 +1,14 @@
 """celery.registry"""
+import inspect
+from UserDict import UserDict
+
 from celery import discovery
-from celery.utils import get_full_cls_name
 from celery.exceptions import NotRegistered, AlreadyRegistered
-from UserDict import UserDict
 
 
 class TaskRegistry(UserDict):
     """Site registry for tasks."""
 
-    AlreadyRegistered = AlreadyRegistered
     NotRegistered = NotRegistered
 
     def __init__(self):
@@ -18,33 +18,15 @@ class TaskRegistry(UserDict):
         """Autodiscovers tasks using :func:`celery.discovery.autodiscover`."""
         discovery.autodiscover()
 
-    def register(self, task, name=None):
+    def register(self, task):
         """Register a task in the task registry.
 
-        Task can either be a regular function, or a class inheriting
-        from :class:`celery.task.Task`.
-
-        :keyword name: By default the :attr:`Task.name` attribute on the
-            task is used as the name of the task, but you can override it
-            using this option.
-
-        :raises AlreadyRegistered: if the task is already registered.
-
+        The task will be automatically instantiated if it's a class
+        not an instance.
         """
-        is_class = hasattr(task, "run")
-        if is_class:
-            task = task() # instantiate Task class
-        if not name:
-            name = getattr(task, "name")
-
-        if name in self.data:
-            raise self.AlreadyRegistered(
-                    "Task with name %s is already registered." % name)
-
-        if not is_class:
-            task.name = name
-            task.type = "regular"
 
+        task = task() if inspect.isclass(task) else task
+        name = task.name
         self.data[name] = task
 
     def unregister(self, name):

+ 19 - 22
celery/result.py

@@ -3,11 +3,12 @@
 Asynchronous result types.
 
 """
+import time
+from itertools import imap
+
 from celery.backends import default_backend
-from celery.datastructures import PositionQueue
 from celery.exceptions import TimeoutError
-from itertools import imap
-import time
+from celery.datastructures import PositionQueue
 
 
 class BaseAsyncResult(object):
@@ -35,14 +36,6 @@ class BaseAsyncResult(object):
         self.task_id = task_id
         self.backend = backend
 
-    def is_done(self):
-        """Returns ``True`` if the task executed successfully.
-
-        :rtype: bool
-
-        """
-        return self.backend.is_done(self.task_id)
-
     def get(self):
         """Alias to :meth:`wait`."""
         return self.wait()
@@ -74,8 +67,12 @@ class BaseAsyncResult(object):
         return status not in ["PENDING", "RETRY"]
 
     def successful(self):
-        """Alias to :meth:`is_done`."""
-        return self.is_done()
+        """Returns ``True`` if the task executed successfully.
+
+        :rtype: bool
+
+        """
+        return self.backend.is_successful(self.task_id)
 
     def __str__(self):
         """``str(self)`` -> ``self.task_id``"""
@@ -91,7 +88,7 @@ class BaseAsyncResult(object):
         If the task raised an exception, this will be the exception instance.
 
         """
-        if self.status == "DONE" or self.status == "FAILURE":
+        if self.status == "SUCCESS" or self.status == "FAILURE":
             return self.backend.get_result(self.task_id)
         return None
 
@@ -120,7 +117,7 @@ class BaseAsyncResult(object):
                 than its limit. The :attr:`result` attribute contains the
                 exception raised.
 
-            *DONE*
+            *SUCCESS*
 
                 The task executed successfully. The :attr:`result` attribute
                 contains the resulting value.
@@ -247,7 +244,7 @@ class TaskSetResult(object):
                             for subtask in self.subtasks)
         while results:
             for task_id, pending_result in results.items():
-                if pending_result.status == "DONE":
+                if pending_result.status == "SUCCESS":
                     del(results[task_id])
                     yield pending_result.result
                 elif pending_result.status == "FAILURE":
@@ -280,7 +277,7 @@ class TaskSetResult(object):
 
         while True:
             for position, pending_result in enumerate(self.subtasks):
-                if pending_result.status == "DONE":
+                if pending_result.status == "SUCCESS":
                     results[position] = pending_result.result
                 elif pending_result.status == "FAILURE":
                     raise pending_result.result
@@ -309,20 +306,20 @@ class EagerResult(BaseAsyncResult):
         self._status = status
         self._traceback = traceback
 
-    def is_done(self):
+    def successful(self):
         """Returns ``True`` if the task executed without failure."""
-        return self.status == "DONE"
+        return self.status == "SUCCESS"
 
-    def is_ready(self):
+    def ready(self):
         """Returns ``True`` if the task has been executed."""
         return True
 
     def wait(self, timeout=None):
         """Wait until the task has been executed and return its result."""
-        if self.status == "DONE":
+        if self.status == "SUCCESS":
             return self.result
         elif self.status == "FAILURE":
-            raise self.result
+            raise self.result.exception
 
     @property
     def result(self):

+ 0 - 116
celery/serialization.py

@@ -1,116 +0,0 @@
-from celery.utils.functional import curry
-import operator
-try:
-    import cPickle as pickle
-except ImportError:
-    import pickle
-
-
-def find_nearest_pickleable_exception(exc):
-    """With an exception instance, iterate over its super classes (by mro)
-    and find the first super exception that is pickleable. It does
-    not go below :exc:`Exception` (i.e. it skips :exc:`Exception`,
-    :class:`BaseException` and :class:`object`). If that happens
-    you should use :exc:`UnpickleableException` instead.
-
-    :param exc: An exception instance.
-
-    :returns: the nearest exception if it's not :exc:`Exception` or below,
-        if it is it returns ``None``.
-
-    :rtype: :exc:`Exception`
-
-    """
-
-    unwanted = (Exception, BaseException, object)
-    is_unwanted = lambda exc: any(map(curry(operator.is_, exc), unwanted))
-
-    mro_ = getattr(exc.__class__, "mro", lambda: [])
-    for supercls in mro_():
-        if is_unwanted(supercls):
-            # only BaseException and object, from here on down,
-            # we don't care about these.
-            return None
-        try:
-            exc_args = getattr(exc, "args", [])
-            superexc = supercls(*exc_args)
-            pickle.dumps(superexc)
-        except:
-            pass
-        else:
-            return superexc
-    return None
-
-
-def create_exception_cls(name, module, parent=None):
-    """Dynamically create an exception class."""
-    if not parent:
-        parent = Exception
-    return type(name, (parent, ), {"__module__": module})
-
-
-class UnpickleableExceptionWrapper(Exception):
-    """Wraps unpickleable exceptions.
-
-    :param exc_module: see :attr:`exc_module`.
-
-    :param exc_cls_name: see :attr:`exc_cls_name`.
-
-    :param exc_args: see :attr:`exc_args`
-
-    .. attribute:: exc_module
-
-        The module of the original exception.
-
-    .. attribute:: exc_cls_name
-
-        The name of the original exception class.
-
-    .. attribute:: exc_args
-
-        The arguments for the original exception.
-
-    Example
-
-        >>> try:
-        ...     something_raising_unpickleable_exc()
-        >>> except Exception, e:
-        ...     exc = UnpickleableException(e.__class__.__module__,
-        ...                                 e.__class__.__name__,
-        ...                                 e.args)
-        ...     pickle.dumps(exc) # Works fine.
-
-    """
-
-    def __init__(self, exc_module, exc_cls_name, exc_args):
-        self.exc_module = exc_module
-        self.exc_cls_name = exc_cls_name
-        self.exc_args = exc_args
-        super(Exception, self).__init__(exc_module, exc_cls_name, exc_args)
-
-
-def get_pickleable_exception(exc):
-    """Make sure exception is pickleable."""
-    nearest = find_nearest_pickleable_exception(exc)
-    if nearest:
-        return nearest
-
-    try:
-        pickle.dumps(exc)
-    except pickle.PickleError:
-        excwrapper = UnpickleableExceptionWrapper(
-                        exc.__class__.__module__,
-                        exc.__class__.__name__,
-                        getattr(exc, "args", []))
-        return excwrapper
-    return exc
-
-
-def get_pickled_exception(exc):
-    """Get original exception from exception pickled using
-    :meth:`get_pickleable_exception`."""
-    if isinstance(exc, UnpickleableExceptionWrapper):
-        exc_cls = create_exception_cls(exc.exc_cls_name,
-                                       exc.exc_module)
-        return exc_cls(*exc.exc_args)
-    return exc

+ 0 - 120
celery/supervisor.py

@@ -1,120 +0,0 @@
-import multiprocessing
-import time
-from multiprocessing import TimeoutError
-
-JOIN_TIMEOUT = 2
-CHECK_INTERVAL = 2
-MAX_RESTART_FREQ = 3
-MAX_RESTART_FREQ_TIME = 10
-
-
-class MaxRestartsExceededError(Exception):
-    """Restarts exceeded the maximum restart frequency."""
-
-
-class OFASupervisor(object):
-    """Process supervisor using the `one_for_all`_ strategy.
-
-    .. _`one_for_all`:
-        http://erlang.org/doc/design_principles/sup_princ.html#5.3.2
-
-    However, instead of registering a list of processes, you have one
-    process which runs a pool. Makes for an easy implementation.
-
-    :param target: see :attr:`target`.
-    :param args: see :attr:`args`.
-    :param kwargs: see :attr:`kwargs`.
-    :param max_restart_freq: see :attr:`max_restart_freq`.
-    :param max_restart_freq_time: see :attr:`max_restart_freq_time`.
-    :param check_interval: see :attr:`max_restart_freq_time`.
-
-    .. attribute:: target
-
-        The target callable to be launched in a new process.
-
-    .. attribute:: args
-
-        The positional arguments to apply to :attr:`target`.
-
-    .. attribute:: kwargs
-
-        The keyword arguments to apply to :attr:`target`.
-
-    .. attribute:: max_restart_freq
-
-        Limit the number of restarts which can occur in a given time interval.
-
-        The max restart frequency is the number of restarts that can occur
-        within the interval :attr:`max_restart_freq_time`.
-
-        The restart mechanism prevents situations where the process repeatedly
-        dies for the same reason. If this happens both the process and the
-        supervisor is terminated.
-
-    .. attribute:: max_restart_freq_time
-
-        See :attr:`max_restart_freq`.
-
-    .. attribute:: check_interval
-
-        The time in seconds, between process pings.
-
-    """
-    Process = multiprocessing.Process
-
-    def __init__(self, target, args=None, kwargs=None,
-            max_restart_freq=MAX_RESTART_FREQ,
-            join_timeout=JOIN_TIMEOUT,
-            max_restart_freq_time=MAX_RESTART_FREQ_TIME,
-            check_interval=CHECK_INTERVAL):
-        self.target = target
-        self.join_timeout = join_timeout
-        self.args = args or []
-        self.kwargs = kwargs or {}
-        self.check_interval = check_interval
-        self.max_restart_freq = max_restart_freq
-        self.max_restart_freq_time = max_restart_freq_time
-        self.restarts_in_frame = 0
-
-    def start(self):
-        """Launches the :attr:`target` in a seperate process and starts
-        supervising it."""
-        target = self.target
-
-        def _start_supervised_process():
-            """Start the :attr:`target` in a new process."""
-            process = self.Process(target=target,
-                                   args=self.args, kwargs=self.kwargs)
-            process.start()
-            return process
-
-        def _restart(process):
-            """Terminate the process and restart."""
-            process.join(timeout=self.join_timeout)
-            process.terminate()
-            self.restarts_in_frame += 1
-            process = _start_supervised_process()
-
-        process = _start_supervised_process()
-        try:
-            restart_frame = 0
-            while True:
-                if restart_frame > self.max_restart_freq_time:
-                    if self.restarts_in_frame >= self.max_restart_freq:
-                        raise MaxRestartsExceededError(
-                                "Supervised: Max restart frequency reached")
-                restart_frame = 0
-                self.restarts_in_frame = 0
-
-                try:
-                    proc_is_alive = process.is_alive()
-                except TimeoutError:
-                    proc_is_alive = False
-
-                if not proc_is_alive:
-                    _restart(process)
-
-                time.sleep(self.check_interval)
-                restart_frame += self.check_interval
-        finally:
-            process.join()

+ 9 - 9
celery/task/__init__.py

@@ -4,17 +4,17 @@ Working with tasks and task sets.
 
 """
 from carrot.connection import DjangoBrokerConnection
-from celery.messaging import TaskConsumer
+from billiard.serialization import pickle
+
 from celery.conf import AMQP_CONNECTION_TIMEOUT
+from celery.execute import apply_async
 from celery.registry import tasks
 from celery.backends import default_backend
+from celery.messaging import TaskConsumer
 from celery.task.base import Task, TaskSet, PeriodicTask
-from celery.task.base import ExecuteRemoteTask
-from celery.task.base import AsynchronousMapTask
-from celery.task.builtins import DeleteExpiredTaskMetaTask, PingTask
-from celery.execute import apply_async, delay_task
-from celery.serialization import pickle
+from celery.task.base import ExecuteRemoteTask, AsynchronousMapTask
 from celery.task.rest import RESTProxyTask
+from celery.task.builtins import DeleteExpiredTaskMetaTask, PingTask
 
 
 def discard_all(connect_timeout=AMQP_CONNECTION_TIMEOUT):
@@ -35,13 +35,13 @@ def discard_all(connect_timeout=AMQP_CONNECTION_TIMEOUT):
     return discarded_count
 
 
-def is_done(task_id):
+def is_successful(task_id):
     """Returns ``True`` if task with ``task_id`` has been executed.
 
     :rtype: bool
 
     """
-    return default_backend.is_done(task_id)
+    return default_backend.is_successful(task_id)
 
 
 def dmap(func, args, timeout=None):
@@ -72,7 +72,7 @@ def dmap_async(func, args, timeout=None):
         >>> presult
         <AsyncResult: 373550e8-b9a0-4666-bc61-ace01fa4f91d>
         >>> presult.status
-        'DONE'
+        'SUCCESS'
         >>> presult.result
         [4, 8, 16]
 

+ 83 - 129
celery/task/base.py

@@ -1,18 +1,61 @@
+import sys
+from datetime import timedelta
+from Queue import Queue
+
 from carrot.connection import DjangoBrokerConnection
+from billiard.serialization import pickle
+
 from celery import conf
-from celery.messaging import TaskPublisher, TaskConsumer
 from celery.log import setup_logger
-from celery.result import TaskSetResult, EagerResult
-from celery.execute import apply_async, delay_task, apply
 from celery.utils import gen_unique_id, get_full_cls_name
+from celery.result import TaskSetResult, EagerResult
+from celery.execute import apply_async, apply
 from celery.registry import tasks
-from celery.serialization import pickle
+from celery.backends import default_backend
+from celery.messaging import TaskPublisher, TaskConsumer
 from celery.exceptions import MaxRetriesExceededError, RetryTaskError
-from datetime import timedelta
+
+
+class TaskType(type):
+    """Metaclass for tasks.
+
+    Automatically registers the task in the task registry, except
+    if the ``abstract`` attribute is set.
+
+    If no ``name`` attribute is provided, the name is automatically
+    set to the name of the module it was defined in, and the class name.
+
+    """
+
+    def __new__(cls, name, bases, attrs):
+        super_new = super(TaskType, cls).__new__
+        task_module = attrs["__module__"]
+
+        # Abstract class, remove the abstract attribute so
+        # any class inheriting from this won't be abstract by default.
+        if attrs.pop("abstract", None):
+            return super_new(cls, name, bases, attrs)
+
+        # Automatically generate missing name.
+        if not attrs.get("name"):
+            task_module = sys.modules[task_module]
+            task_name = ".".join([task_module.__name__, name])
+            attrs["name"] = task_name
+
+        # Because of the way import happens (recursively)
+        # we may or may not be the first time the task tries to register
+        # with the framework. There should only be one class for each task
+        # name, so we always return the registered version.
+
+        task_name = attrs["name"]
+        if task_name not in tasks:
+            task_cls = super_new(cls, name, bases, attrs)
+            tasks.register(task_cls)
+        return tasks[task_name].__class__
 
 
 class Task(object):
-    """A task that can be delayed for execution by the ``celery`` daemon.
+    """A celery task.
 
     All subclasses of :class:`Task` must define the :meth:`run` method,
     which is the actual method the ``celery`` daemon executes.
@@ -20,13 +63,11 @@ class Task(object):
     The :meth:`run` method can take use of the default keyword arguments,
     as listed in the :meth:`run` documentation.
 
-    The :meth:`run` method supports both positional, and keyword arguments.
-
     .. attribute:: name
+        Name of the task.
 
-        *REQUIRED* All subclasses of :class:`Task` has to define the
-        :attr:`name` attribute. This is the name of the task, registered
-        in the task registry, and passed to :func:`delay_task`.
+    .. attribute:: abstract
+        If ``True`` the task is an abstract base class.
 
     .. attribute:: type
 
@@ -44,22 +85,17 @@ class Task(object):
 
     .. attribute:: mandatory
 
-        If set, the message has mandatory routing. By default the message
-        is silently dropped by the broker if it can't be routed to a queue.
-        However - If the message is mandatory, an exception will be raised
-        instead.
+        Mandatory message routing. An exception will be raised if the task
+        can't be routed to a queue.
 
     .. attribute:: immediate:
 
-        Request immediate delivery. If the message cannot be routed to a
-        task worker immediately, an exception will be raised. This is
-        instead of the default behaviour, where the broker will accept and
-        queue the message, but with no guarantee that the message will ever
-        be consumed.
+        Request immediate delivery. An exception will be raised if the task
+        can't be routed to a worker immediately.
 
     .. attribute:: priority:
-
-        The message priority. A number from ``0`` to ``9``.
+        The message priority. A number from ``0`` to ``9``, where ``0`` is the
+        highest. Note that RabbitMQ doesn't support priorities yet.
 
     .. attribute:: max_retries
 
@@ -67,16 +103,18 @@ class Task(object):
 
     .. attribute:: default_retry_delay
 
-        Defeault time in seconds before a retry of the task should be
+        Default time in seconds before a retry of the task should be
         executed. Default is a 1 minute delay.
 
+    .. attribute:: rate_limit
+
+        Set the rate limit for this task type, Examples: ``None`` (no rate
+        limit), ``"100/s"`` (hundred tasks a second), ``"100/m"`` (hundred
+        tasks a minute), ``"100/h"`` (hundred tasks an hour)
+
     .. attribute:: ignore_result
 
-        Don't store the status and return value. This means you can't
-        use the :class:`celery.result.AsyncResult` to check if the task is
-        done, or get its return value. Only use if you need the performance
-        and is able live without these features. Any exceptions raised will
-        store the return value/status as usual.
+        Don't store the return value of this task.
 
     .. attribute:: disable_error_emails
 
@@ -85,48 +123,21 @@ class Task(object):
 
     .. attribute:: serializer
 
-        A string identifying the default serialization
-        method to use. Defaults to the ``CELERY_TASK_SERIALIZER`` setting.
-        Can be ``pickle`` ``json``, ``yaml``, or any custom serialization
-        methods that have been registered with
-        :mod:`carrot.serialization.registry`.
+        The name of a serializer that has been registered with
+        :mod:`carrot.serialization.registry`. Example: ``"json"``.
+
+    .. attribute:: backend
 
-    :raises NotImplementedError: if the :attr:`name` attribute is not set.
+        The result store backend used for this task.
 
     The resulting class is callable, which if called will apply the
     :meth:`run` method.
 
-    Examples
-
-    This is a simple task just logging a message,
-
-        >>> from celery.task import tasks, Task
-        >>> class MyTask(Task):
-        ...
-        ...     def run(self, some_arg=None, **kwargs):
-        ...         logger = self.get_logger(**kwargs)
-        ...         logger.info("Running MyTask with arg some_arg=%s" %
-        ...                     some_arg))
-        ...         return 42
-        ... tasks.register(MyTask)
-
-    You can delay the task using the classmethod :meth:`delay`...
-
-        >>> result = MyTask.delay(some_arg="foo")
-        >>> result.status # after some time
-        'DONE'
-        >>> result.result
-        42
-
-    ...or using the :func:`delay_task` function, by passing the name of
-    the task.
-
-        >>> from celery.task import delay_task
-        >>> result = delay_task(MyTask.name, some_arg="foo")
-
-
     """
+    __metaclass__ = TaskType
+
     name = None
+    abstract = True
     type = "regular"
     exchange = None
     routing_key = None
@@ -138,6 +149,9 @@ class Task(object):
     max_retries = 3
     default_retry_delay = 3 * 60
     serializer = conf.TASK_SERIALIZER
+    rate_limit = conf.DEFAULT_RATE_LIMIT
+    rate_limit_queue_type = Queue
+    backend = default_backend
 
     MaxRetriesExceededError = MaxRetriesExceededError
 
@@ -155,53 +169,16 @@ class Task(object):
         by the worker if the function/method supports them:
 
             * task_id
-
-                Unique id of the currently executing task.
-
             * task_name
-
-                Name of the currently executing task (same as :attr:`name`)
-
             * task_retries
-
-                How many times the current task has been retried
-                (an integer starting at ``0``).
-
             * logfile
-
-                Name of the worker log file.
-
             * loglevel
 
-                The current loglevel, an integer mapping to one of the
-                following values: ``logging.DEBUG``, ``logging.INFO``,
-                ``logging.ERROR``, ``logging.CRITICAL``, ``logging.WARNING``,
-                ``logging.FATAL``.
-
         Additional standard keyword arguments may be added in the future.
         To take these default arguments, the task can either list the ones
         it wants explicitly or just take an arbitrary list of keyword
         arguments (\*\*kwargs).
 
-        Example using an explicit list of default arguments to take:
-
-        .. code-block:: python
-
-            def run(self, x, y, logfile=None, loglevel=None):
-                self.get_logger(loglevel=loglevel, logfile=logfile)
-                return x * y
-
-
-        Example taking all default keyword arguments, and any extra arguments
-        passed on by the caller:
-
-        .. code-block:: python
-
-            def run(self, x, y, **kwargs): # CORRECT!
-                logger = self.get_logger(**kwargs)
-                adjust = kwargs.get("adjust", 0)
-                return x * y - adjust
-
         """
         raise NotImplementedError("Tasks must define a run method.")
 
@@ -253,7 +230,8 @@ class Task(object):
 
     @classmethod
     def delay(cls, *args, **kwargs):
-        """Delay this task for execution by the ``celery`` daemon(s).
+        """Shortcut to :meth:`apply_async` but with star arguments,
+        and doesn't support the extra options.
 
         :param \*args: positional arguments passed on to the task.
 
@@ -261,8 +239,6 @@ class Task(object):
 
         :rtype: :class:`celery.result.AsyncResult`
 
-        See :func:`celery.execute.delay_task`.
-
         """
         return apply_async(cls, args, kwargs)
 
@@ -429,7 +405,6 @@ class ExecuteRemoteTask(Task):
         """
         callable_ = pickle.loads(ser_callable)
         return callable_(*fargs, **fkwargs)
-tasks.register(ExecuteRemoteTask)
 
 
 class AsynchronousMapTask(Task):
@@ -441,7 +416,6 @@ class AsynchronousMapTask(Task):
         """The method run by ``celeryd``."""
         timeout = kwargs.get("timeout")
         return TaskSet.map(pickle.loads(serfunc), args, timeout=timeout)
-tasks.register(AsynchronousMapTask)
 
 
 class TaskSet(object):
@@ -477,7 +451,7 @@ class TaskSet(object):
         ... ])
 
         >>> taskset_result = taskset.run()
-        >>> list_of_return_values = taskset.join()
+        >>> list_of_return_values = taskset_result.join()
 
     """
 
@@ -542,25 +516,6 @@ class TaskSet(object):
         conn.close()
         return TaskSetResult(taskset_id, subtasks)
 
-    def join(self, timeout=None):
-        """Gather the results for all of the tasks in the taskset,
-        and return a list with them ordered by the order of which they
-        were called.
-
-        :keyword timeout: The time in seconds, how long
-            it will wait for results, before the operation times out.
-
-        :raises TimeoutError: if ``timeout`` is not ``None``
-            and the operation takes longer than ``timeout`` seconds.
-
-        If any of the tasks raises an exception, the exception
-        will be reraised by :meth:`join`.
-
-        :returns: list of return values for all tasks in the taskset.
-
-        """
-        return self.run().join(timeout=timeout)
-
     @classmethod
     def remote_execute(cls, func, args):
         """Apply ``args`` to function by distributing the args to the
@@ -573,7 +528,7 @@ class TaskSet(object):
     def map(cls, func, args, timeout=None):
         """Distribute processing of the arguments and collect the results."""
         remote_task = cls.remote_execute(func, args)
-        return remote_task.join(timeout=timeout)
+        return remote_task.run().join(timeout=timeout)
 
     @classmethod
     def map_async(cls, func, args, timeout=None):
@@ -599,8 +554,6 @@ class PeriodicTask(Task):
     :raises NotImplementedError: if the :attr:`run_every` attribute is
         not defined.
 
-    You have to register the periodic task in the task registry.
-
     Example
 
         >>> from celery.task import tasks, PeriodicTask
@@ -612,10 +565,11 @@ class PeriodicTask(Task):
         ...     def run(self, **kwargs):
         ...         logger = self.get_logger(**kwargs)
         ...         logger.info("Running MyPeriodicTask")
-        >>> tasks.register(MyPeriodicTask)
 
     """
+    abstract = True
     run_every = timedelta(days=1)
+    ignore_result = True
     type = "periodic"
 
     def __init__(self):

+ 4 - 6
celery/task/builtins.py

@@ -1,8 +1,8 @@
-from celery.task.base import Task, TaskSet, PeriodicTask
-from celery.registry import tasks
-from celery.backends import default_backend
 from datetime import timedelta
-from celery.serialization import pickle
+
+from celery.task.base import Task, PeriodicTask
+from celery.backends import default_backend
+from celery.registry import tasks
 
 
 class DeleteExpiredTaskMetaTask(PeriodicTask):
@@ -20,7 +20,6 @@ class DeleteExpiredTaskMetaTask(PeriodicTask):
         logger = self.get_logger(**kwargs)
         logger.info("Deleting expired task meta objects...")
         default_backend.cleanup()
-tasks.register(DeleteExpiredTaskMetaTask)
 
 
 class PingTask(Task):
@@ -30,4 +29,3 @@ class PingTask(Task):
     def run(self, **kwargs):
         """:returns: the string ``"pong"``."""
         return "pong"
-tasks.register(PingTask)

+ 6 - 6
celery/task/rest.py

@@ -1,12 +1,13 @@
-from celery.task.base import Task as BaseTask
-from celery.registry import tasks
-from celery import __version__ as celery_version
+import urllib2
 from cgi import parse_qsl
 from urllib import urlencode
 from urlparse import urlparse
+
 from anyjson import serialize, deserialize
-import httplib
-import urllib2
+
+from celery import __version__ as celery_version
+from celery.registry import tasks
+from celery.task.base import Task as BaseTask
 
 
 class InvalidResponseError(Exception):
@@ -135,7 +136,6 @@ class RESTProxyTask(BaseTask):
         logger = self.get_logger(**kwargs)
         proxy = RESTProxy(url, kwargs, logger)
         return proxy.execute()
-tasks.register(RESTProxyTask)
 
 
 def task_response(fun, *args, **kwargs):

+ 1 - 0
celery/task/strategy.py

@@ -1,4 +1,5 @@
 from carrot.connection import DjangoBrokerConnection
+
 from celery.utils import chunks
 
 

+ 8 - 6
celery/tests/test_backends/test_base.py

@@ -1,10 +1,12 @@
 import unittest
 import types
-from celery.backends.base import BaseBackend, KeyValueStoreBackend
-from celery.serialization import find_nearest_pickleable_exception as fnpe
-from celery.serialization import UnpickleableExceptionWrapper
-from celery.serialization import get_pickleable_exception as gpe
+
 from django.db.models.base import subclass_exception
+from billiard.serialization import find_nearest_pickleable_exception as fnpe
+from billiard.serialization import UnpickleableExceptionWrapper
+from billiard.serialization import get_pickleable_exception as gpe
+
+from celery.backends.base import BaseBackend, KeyValueStoreBackend
 
 
 class wrapobject(object):
@@ -24,11 +26,11 @@ class TestBaseBackendInterface(unittest.TestCase):
 
     def test_get_status(self):
         self.assertRaises(NotImplementedError,
-                b.is_done, "SOMExx-N0Nex1stant-IDxx-")
+                b.is_successful, "SOMExx-N0Nex1stant-IDxx-")
 
     def test_store_result(self):
         self.assertRaises(NotImplementedError,
-                b.store_result, "SOMExx-N0nex1stant-IDxx-", 42, "DONE")
+                b.store_result, "SOMExx-N0nex1stant-IDxx-", 42, "SUCCESS")
 
     def test_get_result(self):
         self.assertRaises(NotImplementedError,

+ 5 - 9
celery/tests/test_backends/test_cache.py

@@ -1,10 +1,6 @@
-import sys
 import unittest
-import errno
-import socket
-from celery.backends.cache import Backend as CacheBackend
+from celery.backends.cache import CacheBackend
 from celery.utils import gen_unique_id
-from django.conf import settings
 
 
 class SomeClass(object):
@@ -20,13 +16,13 @@ class TestCacheBackend(unittest.TestCase):
 
         tid = gen_unique_id()
 
-        self.assertFalse(cb.is_done(tid))
+        self.assertFalse(cb.is_successful(tid))
         self.assertEquals(cb.get_status(tid), "PENDING")
         self.assertEquals(cb.get_result(tid), None)
 
         cb.mark_as_done(tid, 42)
-        self.assertTrue(cb.is_done(tid))
-        self.assertEquals(cb.get_status(tid), "DONE")
+        self.assertTrue(cb.is_successful(tid))
+        self.assertEquals(cb.get_status(tid), "SUCCESS")
         self.assertEquals(cb.get_result(tid), 42)
         self.assertTrue(cb._cache.get(tid))
         self.assertTrue(cb.get_result(tid), 42)
@@ -51,7 +47,7 @@ class TestCacheBackend(unittest.TestCase):
         except KeyError, exception:
             pass
         cb.mark_as_failure(tid3, exception)
-        self.assertFalse(cb.is_done(tid3))
+        self.assertFalse(cb.is_successful(tid3))
         self.assertEquals(cb.get_status(tid3), "FAILURE")
         self.assertTrue(isinstance(cb.get_result(tid3), KeyError))
 

+ 7 - 20
celery/tests/test_backends/test_database.py

@@ -1,10 +1,9 @@
 import unittest
-from celery.backends.database import Backend
+from celery.backends.database import DatabaseBackend
 from celery.utils import gen_unique_id
 from celery.task import PeriodicTask
 from celery import registry
-from celery.models import PeriodicTaskMeta
-from datetime import datetime, timedelta
+from datetime import timedelta
 
 
 class SomeClass(object):
@@ -19,33 +18,21 @@ class MyPeriodicTask(PeriodicTask):
 
     def run(self, **kwargs):
         return 42
-registry.tasks.register(MyPeriodicTask)
 
 
 class TestDatabaseBackend(unittest.TestCase):
 
-    def test_run_periodic_tasks(self):
-        #obj, created = PeriodicTaskMeta.objects.get_or_create(
-        #                    name=MyPeriodicTask.name,
-        #                    defaults={"last_run_at": datetime.now() -
-        #                        timedelta(days=-4)})
-        #if not created:
-        #    obj.last_run_at = datetime.now() - timedelta(days=4)
-        #    obj.save()
-        b = Backend()
-        b.run_periodic_tasks()
-
     def test_backend(self):
-        b = Backend()
+        b = DatabaseBackend()
         tid = gen_unique_id()
 
-        self.assertFalse(b.is_done(tid))
+        self.assertFalse(b.is_successful(tid))
         self.assertEquals(b.get_status(tid), "PENDING")
         self.assertTrue(b.get_result(tid) is None)
 
         b.mark_as_done(tid, 42)
-        self.assertTrue(b.is_done(tid))
-        self.assertEquals(b.get_status(tid), "DONE")
+        self.assertTrue(b.is_successful(tid))
+        self.assertEquals(b.get_status(tid), "SUCCESS")
         self.assertEquals(b.get_result(tid), 42)
         self.assertTrue(b._cache.get(tid))
         self.assertTrue(b.get_result(tid), 42)
@@ -64,6 +51,6 @@ class TestDatabaseBackend(unittest.TestCase):
         except KeyError, exception:
             pass
         b.mark_as_failure(tid3, exception)
-        self.assertFalse(b.is_done(tid3))
+        self.assertFalse(b.is_successful(tid3))
         self.assertEquals(b.get_status(tid3), "FAILURE")
         self.assertTrue(isinstance(b.get_result(tid3), KeyError))

+ 5 - 6
celery/tests/test_backends/test_tyrant.py

@@ -3,8 +3,7 @@ import unittest
 import errno
 import socket
 from celery.backends import tyrant
-from celery.backends.tyrant import Backend as TyrantBackend
-from django.conf import settings
+from celery.backends.tyrant import TyrantBackend
 from celery.utils import gen_unique_id
 from django.core.exceptions import ImproperlyConfigured
 
@@ -64,13 +63,13 @@ class TestTyrantBackend(unittest.TestCase):
 
         tid = gen_unique_id()
 
-        self.assertFalse(tb.is_done(tid))
+        self.assertFalse(tb.is_successful(tid))
         self.assertEquals(tb.get_status(tid), "PENDING")
         self.assertEquals(tb.get_result(tid), None)
 
         tb.mark_as_done(tid, 42)
-        self.assertTrue(tb.is_done(tid))
-        self.assertEquals(tb.get_status(tid), "DONE")
+        self.assertTrue(tb.is_successful(tid))
+        self.assertEquals(tb.get_status(tid), "SUCCESS")
         self.assertEquals(tb.get_result(tid), 42)
         self.assertTrue(tb._cache.get(tid))
         self.assertTrue(tb.get_result(tid), 42)
@@ -99,7 +98,7 @@ class TestTyrantBackend(unittest.TestCase):
         except KeyError, exception:
             pass
         tb.mark_as_failure(tid3, exception)
-        self.assertFalse(tb.is_done(tid3))
+        self.assertFalse(tb.is_successful(tid3))
         self.assertEquals(tb.get_status(tid3), "FAILURE")
         self.assertTrue(isinstance(tb.get_result(tid3), KeyError))
 

+ 214 - 0
celery/tests/test_buckets.py

@@ -0,0 +1,214 @@
+import os
+import sys
+sys.path.insert(0, os.getcwd())
+import time
+import unittest
+from itertools import chain, izip
+
+from celery.worker import buckets
+from celery.utils import gen_unique_id
+from celery.registry import TaskRegistry
+from celery.task.base import Task
+
+
+class MockJob(object):
+
+    def __init__(self, task_id, task_name, args, kwargs):
+        self.task_id = task_id
+        self.task_name = task_name
+        self.args = args
+        self.kwargs = kwargs
+
+    def __eq__(self, other):
+        if isinstance(other, self.__class__):
+            return bool(self.task_id == other.task_id \
+                    and self.task_name == other.task_name \
+                    and self.args == other.args \
+                    and self.kwargs == other.kwargs)
+        else:
+            return self == other
+
+    def __repr__(self):
+        return "<MockJob: task:%s id:%s args:%s kwargs:%s" % (
+                self.task_name, self.task_id, self.args, self.kwargs)
+
+
+class TestTokenBucketQueue(unittest.TestCase):
+
+    def empty_queue_yields_QueueEmpty(self):
+        x = buckets.TokenBucketQueue(fill_rate=10)
+        self.assertRaises(buckets.QueueEmpty, x.get)
+
+    def test_bucket__put_get(self):
+        x = buckets.TokenBucketQueue(fill_rate=10)
+        x.put("The quick brown fox")
+        self.assertEquals(x.get(), "The quick brown fox")
+
+        x.put_nowait("The lazy dog")
+        time.sleep(0.2)
+        self.assertEquals(x.get_nowait(), "The lazy dog")
+
+    def test_fill_rate(self):
+        x = buckets.TokenBucketQueue(fill_rate=10)
+        # 20 items should take at least one second to complete
+        time_start = time.time()
+        [x.put(str(i)) for i in xrange(20)]
+        for i in xrange(20):
+            sys.stderr.write("x")
+            x.wait()
+        self.assertTrue(time.time() - time_start > 1.5)
+
+    def test_can_consume(self):
+        x = buckets.TokenBucketQueue(fill_rate=1)
+        x.put("The quick brown fox")
+        self.assertEqual(x.get(), "The quick brown fox")
+        time.sleep(0.1)
+        # Not yet ready for another token
+        x.put("The lazy dog")
+        self.assertRaises(x.RateLimitExceeded, x.get)
+
+    def test_expected_time(self):
+        x = buckets.TokenBucketQueue(fill_rate=1)
+        x.put_nowait("The quick brown fox")
+        self.assertEqual(x.get_nowait(), "The quick brown fox")
+        self.assertTrue(x.expected_time())
+
+    def test_qsize(self):
+        x = buckets.TokenBucketQueue(fill_rate=1)
+        x.put("The quick brown fox")
+        self.assertEqual(x.qsize(), 1)
+        self.assertTrue(x.get_nowait(), "The quick brown fox")
+
+
+class TestRateLimitString(unittest.TestCase):
+
+    def test_conversion(self):
+        self.assertEquals(buckets.parse_ratelimit_string(999), 999)
+        self.assertEquals(buckets.parse_ratelimit_string("1456/s"), 1456)
+        self.assertEquals(buckets.parse_ratelimit_string("100/m"),
+                          100 / 60.0)
+        self.assertEquals(buckets.parse_ratelimit_string("10/h"),
+                          10 / 60.0 / 60.0)
+        self.assertEquals(buckets.parse_ratelimit_string("0xffec/s"), 0xffec)
+        self.assertEquals(buckets.parse_ratelimit_string("0xcda/m"),
+                          0xcda / 60.0)
+        self.assertEquals(buckets.parse_ratelimit_string("0xF/h"),
+                          0xf / 60.0 / 60.0)
+
+        for zero in ("0x0", "0b0", "0o0", 0, None, "0/m", "0/h", "0/s"):
+            self.assertEquals(buckets.parse_ratelimit_string(zero), 0)
+
+
+class TaskA(Task):
+    rate_limit = 10
+
+
+class TaskB(Task):
+    rate_limit = None
+
+
+class TaskC(Task):
+    rate_limit = "1/s"
+
+
+class TaskD(Task):
+    rate_limit = "1000/m"
+
+
+class TestTaskBuckets(unittest.TestCase):
+
+    def setUp(self):
+        self.registry = TaskRegistry()
+        self.task_classes = (TaskA, TaskB, TaskC)
+        for task_cls in self.task_classes:
+            self.registry.register(task_cls)
+
+    def test_auto_add_on_missing(self):
+        b = buckets.TaskBucket(task_registry=self.registry)
+        for task_cls in self.task_classes:
+            self.assertTrue(task_cls.name in b.buckets.keys())
+        self.registry.register(TaskD)
+        self.assertTrue(b.get_bucket_for_type(TaskD.name))
+        self.assertTrue(TaskD.name in b.buckets.keys())
+        self.registry.unregister(TaskD)
+
+    def test_has_rate_limits(self):
+        b = buckets.TaskBucket(task_registry=self.registry)
+        self.assertEqual(b.buckets[TaskA.name].fill_rate, 10)
+        self.assertTrue(isinstance(b.buckets[TaskB.name], buckets.Queue))
+        self.assertEqual(b.buckets[TaskC.name].fill_rate, 1)
+        self.registry.register(TaskD)
+        b.init_with_registry()
+        try:
+            self.assertEqual(b.buckets[TaskD.name].fill_rate, 1000 / 60.0)
+        finally:
+            self.registry.unregister(TaskD)
+
+    def test_on_empty_buckets__get_raises_empty(self):
+        b = buckets.TaskBucket(task_registry=self.registry)
+        self.assertRaises(buckets.QueueEmpty, b.get)
+        self.assertEqual(b.qsize(), 0)
+
+    def test_put__get(self):
+        b = buckets.TaskBucket(task_registry=self.registry)
+        job = MockJob(gen_unique_id(), TaskA.name, ["theqbf"], {"foo": "bar"})
+        b.put(job)
+        self.assertEquals(b.get(), job)
+
+    def test_fill_rate(self):
+        b = buckets.TaskBucket(task_registry=self.registry)
+
+        cjob = lambda i: MockJob(gen_unique_id(), TaskA.name, [i], {})
+        jobs = [cjob(i) for i in xrange(20)]
+        [b.put(job) for job in jobs]
+
+        self.assertEqual(b.qsize(), 20)
+
+        # 20 items should take at least one second to complete
+        time_start = time.time()
+        for i, job in enumerate(jobs):
+            sys.stderr.write("i")
+            self.assertEqual(b.get(), job)
+        self.assertTrue(time.time() - time_start > 1.5)
+
+    def test__very_busy_queue_doesnt_block_others(self):
+        b = buckets.TaskBucket(task_registry=self.registry)
+
+        cjob = lambda i, t: MockJob(gen_unique_id(), t.name, [i], {})
+        ajobs = [cjob(i, TaskA) for i in xrange(10)]
+        bjobs = [cjob(i, TaskB) for i in xrange(20)]
+        jobs = list(chain(*izip(bjobs, ajobs)))
+        map(b.put, jobs)
+
+        got_ajobs = 0
+        for job in (b.get() for i in xrange(20)):
+            if job.task_name == TaskA.name:
+                got_ajobs += 1
+
+        self.assertTrue(got_ajobs > 2)
+
+    def test_thorough__multiple_types(self):
+        self.registry.register(TaskD)
+        try:
+            b = buckets.TaskBucket(task_registry=self.registry)
+
+            cjob = lambda i, t: MockJob(gen_unique_id(), t.name, [i], {})
+
+            ajobs = [cjob(i, TaskA) for i in xrange(10)]
+            bjobs = [cjob(i, TaskB) for i in xrange(10)]
+            cjobs = [cjob(i, TaskC) for i in xrange(10)]
+            djobs = [cjob(i, TaskD) for i in xrange(10)]
+
+            # Spread the jobs around.
+            jobs = list(chain(*izip(ajobs, bjobs, cjobs, djobs)))
+
+            [b.put(job) for job in jobs]
+            for i, job in enumerate(jobs):
+                sys.stderr.write("0")
+                self.assertTrue(b.get(), job)
+            self.assertEqual(i+1, len(jobs))
+        finally:
+            self.registry.unregister(TaskD)
+
+if __name__ == "__main__":
+    unittest.main()

+ 0 - 1
celery/tests/test_log.py

@@ -3,7 +3,6 @@ import os
 import sys
 import logging
 import unittest
-import multiprocessing
 from StringIO import StringIO
 from celery.log import setup_logger, emergency_error
 from celery.tests.utils import override_stdouts

+ 1 - 1
celery/tests/test_messaging.py

@@ -1,5 +1,5 @@
 import unittest
-from celery.messaging import MSG_OPTIONS, get_msg_options, extract_msg_options
+from celery.messaging import MSG_OPTIONS, extract_msg_options
 
 
 class TestMsgOptions(unittest.TestCase):

+ 6 - 32
celery/tests/test_models.py

@@ -1,16 +1,9 @@
 import unittest
 from datetime import datetime, timedelta
-from celery.models import TaskMeta, PeriodicTaskMeta
-from celery.task import PeriodicTask
-from celery.registry import tasks
+from celery.models import TaskMeta
 from celery.utils import gen_unique_id
 
 
-class TestPeriodicTask(PeriodicTask):
-    name = "celery.unittest.test_models.test_periodic_task"
-    run_every = timedelta(minutes=30)
-
-
 class TestModels(unittest.TestCase):
 
     def createTaskMeta(self):
@@ -18,11 +11,6 @@ class TestModels(unittest.TestCase):
         taskmeta, created = TaskMeta.objects.get_or_create(task_id=id)
         return taskmeta
 
-    def createPeriodicTaskMeta(self, name):
-        ptaskmeta, created = PeriodicTaskMeta.objects.get_or_create(name=name,
-                defaults={"last_run_at": datetime.now()})
-        return ptaskmeta
-
     def test_taskmeta(self):
         m1 = self.createTaskMeta()
         m2 = self.createTaskMeta()
@@ -33,11 +21,11 @@ class TestModels(unittest.TestCase):
 
         self.assertEquals(TaskMeta.objects.get_task(m1.task_id).task_id,
                 m1.task_id)
-        self.assertFalse(TaskMeta.objects.is_done(m1.task_id))
-        TaskMeta.objects.store_result(m1.task_id, True, status="DONE")
-        TaskMeta.objects.store_result(m2.task_id, True, status="DONE")
-        self.assertTrue(TaskMeta.objects.is_done(m1.task_id))
-        self.assertTrue(TaskMeta.objects.is_done(m2.task_id))
+        self.assertFalse(TaskMeta.objects.is_successful(m1.task_id))
+        TaskMeta.objects.store_result(m1.task_id, True, status="SUCCESS")
+        TaskMeta.objects.store_result(m2.task_id, True, status="SUCCESS")
+        self.assertTrue(TaskMeta.objects.is_successful(m1.task_id))
+        self.assertTrue(TaskMeta.objects.is_successful(m2.task_id))
 
         # Have to avoid save() because it applies the auto_now=True.
         TaskMeta.objects.filter(task_id=m1.task_id).update(
@@ -50,17 +38,3 @@ class TestModels(unittest.TestCase):
 
         TaskMeta.objects.delete_expired()
         self.assertFalse(m1 in TaskMeta.objects.all())
-
-    def test_periodic_taskmeta(self):
-        tasks.register(TestPeriodicTask)
-        p = self.createPeriodicTaskMeta(TestPeriodicTask.name)
-        # check that repr works.
-        self.assertTrue(unicode(p).startswith("<PeriodicTask:"))
-        self.assertFalse(p in PeriodicTaskMeta.objects.get_waiting_tasks())
-        p.last_run_at = datetime.now() - (TestPeriodicTask.run_every +
-                timedelta(seconds=10))
-        p.save()
-        self.assertTrue(p in PeriodicTaskMeta.objects.get_waiting_tasks())
-        self.assertTrue(isinstance(p.task, TestPeriodicTask))
-
-        p.delay()

+ 2 - 1
celery/tests/test_pickle.py

@@ -1,5 +1,6 @@
 import unittest
-from celery.serialization import pickle
+
+from billiard.serialization import pickle
 
 
 class RegularException(Exception):

+ 1 - 2
celery/tests/test_pool.py

@@ -2,7 +2,7 @@ import unittest
 import logging
 import itertools
 import time
-from celery.pool import TaskPool
+from celery.worker.pool import TaskPool
 from celery.datastructures import ExceptionInfo
 import sys
 
@@ -12,7 +12,6 @@ def do_something(i):
 
 
 def long_something():
-    import time
     time.sleep(1)
 
 

+ 0 - 16
celery/tests/test_registry.py

@@ -2,8 +2,6 @@ import unittest
 from celery import registry
 from celery.task import Task, PeriodicTask
 
-FUNC_TASK_NAME = "celery.unittest.func_task"
-
 
 class TestTask(Task):
     name = "celery.unittest.test_task"
@@ -20,23 +18,17 @@ class TestPeriodicTask(PeriodicTask):
         return True
 
 
-def func_task(**kwargs):
-    return True
-
-
 class TestTaskRegistry(unittest.TestCase):
 
     def assertRegisterUnregisterCls(self, r, task):
         self.assertRaises(r.NotRegistered, r.unregister, task)
         r.register(task)
         self.assertTrue(task.name in r)
-        self.assertRaises(r.AlreadyRegistered, r.register, task)
 
     def assertRegisterUnregisterFunc(self, r, task, task_name):
         self.assertRaises(r.NotRegistered, r.unregister, task_name)
         r.register(task, task_name)
         self.assertTrue(task_name in r)
-        self.assertRaises(r.AlreadyRegistered, r.register, task, task_name)
 
     def test_task_registry(self):
         r = registry.TaskRegistry()
@@ -44,37 +36,29 @@ class TestTaskRegistry(unittest.TestCase):
                 "TaskRegistry has composited dict")
 
         self.assertRegisterUnregisterCls(r, TestTask)
-        self.assertRegisterUnregisterFunc(r, func_task, FUNC_TASK_NAME)
         self.assertRegisterUnregisterCls(r, TestPeriodicTask)
 
         tasks = r.get_all()
         self.assertTrue(isinstance(tasks.get(TestTask.name), TestTask))
         self.assertTrue(isinstance(tasks.get(TestPeriodicTask.name),
                                    TestPeriodicTask))
-        self.assertEquals(tasks.get(FUNC_TASK_NAME), func_task)
 
         regular = r.get_all_regular()
         self.assertTrue(TestTask.name in regular)
         self.assertFalse(TestPeriodicTask.name in regular)
-        self.assertTrue(FUNC_TASK_NAME in regular)
 
         periodic = r.get_all_periodic()
         self.assertFalse(TestTask.name in periodic)
         self.assertTrue(TestPeriodicTask.name in periodic)
-        self.assertFalse(FUNC_TASK_NAME in periodic)
 
         self.assertTrue(isinstance(r.get_task(TestTask.name), TestTask))
         self.assertTrue(isinstance(r.get_task(TestPeriodicTask.name),
                                    TestPeriodicTask))
-        self.assertEquals(r.get_task(FUNC_TASK_NAME), func_task)
 
         r.unregister(TestTask)
         self.assertFalse(TestTask.name in r)
         r.unregister(TestPeriodicTask)
         self.assertFalse(TestPeriodicTask.name in r)
-        r.unregister(FUNC_TASK_NAME)
-        self.assertFalse(FUNC_TASK_NAME in r)
 
-        self.assertTrue(func_task())
         self.assertTrue(TestTask().run())
         self.assertTrue(TestPeriodicTask().run())

+ 7 - 7
celery/tests/test_result.py

@@ -11,14 +11,14 @@ def mock_task(name, status, result):
 
 
 def save_result(task):
-    if task["status"] == "DONE":
+    if task["status"] == "SUCCESS":
         default_backend.mark_as_done(task["id"], task["result"])
     else:
         default_backend.mark_as_failure(task["id"], task["result"])
 
 
 def make_mock_taskset(size=10):
-    tasks = [mock_task("ts%d" % i, "DONE", i) for i in xrange(size)]
+    tasks = [mock_task("ts%d" % i, "SUCCESS", i) for i in xrange(size)]
     [save_result(task) for task in tasks]
     return [AsyncResult(task["id"]) for task in tasks]
 
@@ -26,19 +26,19 @@ def make_mock_taskset(size=10):
 class TestAsyncResult(unittest.TestCase):
 
     def setUp(self):
-        self.task1 = mock_task("task1", "DONE", "the")
-        self.task2 = mock_task("task2", "DONE", "quick")
+        self.task1 = mock_task("task1", "SUCCESS", "the")
+        self.task2 = mock_task("task2", "SUCCESS", "quick")
         self.task3 = mock_task("task3", "FAILURE", KeyError("brown"))
 
         for task in (self.task1, self.task2, self.task3):
             save_result(task)
 
-    def test_is_done(self):
+    def test_successful(self):
         ok_res = AsyncResult(self.task1["id"])
         nok_res = AsyncResult(self.task3["id"])
 
-        self.assertTrue(ok_res.is_done())
-        self.assertFalse(nok_res.is_done())
+        self.assertTrue(ok_res.successful())
+        self.assertFalse(nok_res.successful())
 
     def test_sucessful(self):
         ok_res = AsyncResult(self.task1["id"])

+ 3 - 3
celery/tests/test_serialization.py

@@ -7,9 +7,9 @@ class TestAAPickle(unittest.TestCase):
 
     def test_no_cpickle(self):
         from celery.tests.utils import mask_modules
-        prev = sys.modules.pop("celery.serialization")
+        prev = sys.modules.pop("billiard.serialization")
         with mask_modules("cPickle"):
-            from celery.serialization import pickle
+            from billiard.serialization import pickle
             import pickle as orig_pickle
             self.assertTrue(pickle.dumps is orig_pickle.dumps)
-        sys.modules["celery.serialization"] = prev
+        sys.modules["billiard.serialization"] = prev

+ 0 - 66
celery/tests/test_supervisor.py

@@ -1,66 +0,0 @@
-import unittest
-from celery.supervisor import OFASupervisor
-from celery.supervisor import TimeoutError, MaxRestartsExceededError
-
-
-def target_one(x, y, z):
-    return x * y * z
-
-
-class MockProcess(object):
-    _started = False
-    _stopped = False
-    _terminated = False
-    _joined = False
-    alive = True
-    timeout_on_is_alive = False
-
-    def __init__(self, target, args, kwargs):
-        self.target = target
-        self.args = args
-        self.kwargs = kwargs
-
-    def start(self):
-        self._stopped = False
-        self._started = True
-
-    def stop(self):
-        self._stopped = True
-        self._started = False
-
-    def terminate(self):
-        self._terminated = False
-
-    def is_alive(self):
-        if self._started and self.alive:
-            if self.timeout_on_is_alive:
-                raise TimeoutError("Supervised: timed out.")
-            return True
-        return False
-
-    def join(self, timeout=None):
-        self._joined = True
-
-
-class TestOFASupervisor(unittest.TestCase):
-
-    def test_init(self):
-        s = OFASupervisor(target=target_one, args=[2, 4, 8], kwargs={})
-        s.Process = MockProcess
-
-    def test_start(self):
-        MockProcess.alive = False
-        s = OFASupervisor(target=target_one, args=[2, 4, 8], kwargs={},
-                          max_restart_freq=0, max_restart_freq_time=0)
-        s.Process = MockProcess
-        self.assertRaises(MaxRestartsExceededError, s.start)
-        MockProcess.alive = True
-
-    def test_start_is_alive_timeout(self):
-        MockProcess.alive = True
-        MockProcess.timeout_on_is_alive = True
-        s = OFASupervisor(target=target_one, args=[2, 4, 8], kwargs={},
-                          max_restart_freq=0, max_restart_freq_time=0)
-        s.Process = MockProcess
-        self.assertRaises(MaxRestartsExceededError, s.start)
-        MockProcess.timeout_on_is_alive = False

+ 23 - 34
celery/tests/test_task.py

@@ -1,21 +1,21 @@
 import unittest
-import uuid
-import logging
 from StringIO import StringIO
 
 from celery import task
 from celery import registry
-from celery.log import setup_logger
 from celery import messaging
 from celery.result import EagerResult
 from celery.backends import default_backend
 from datetime import datetime, timedelta
+from celery.decorators import task as task_dec
+from celery.worker import parse_iso8601
 
-
-def return_True(self, **kwargs):
+def return_True(*args, **kwargs):
     # Task run functions can't be closures/lambdas, as they're pickled.
     return True
-registry.tasks.register(return_True, "cu.return-true")
+
+
+return_True_task = task_dec()(return_True)
 
 
 def raise_exception(self, **kwargs):
@@ -120,9 +120,10 @@ class TestTaskRetries(unittest.TestCase):
 class TestCeleryTasks(unittest.TestCase):
 
     def createTaskCls(self, cls_name, task_name=None):
-        attrs = {}
+        attrs = {"__module__": self.__module__}
         if task_name:
             attrs["name"] = task_name
+
         cls = type(cls_name, (task.Task, ), attrs)
         cls.run = return_True
         return cls
@@ -166,7 +167,9 @@ class TestCeleryTasks(unittest.TestCase):
         self.assertEquals(task_data["task"], task_name)
         task_kwargs = task_data.get("kwargs", {})
         if test_eta:
-            self.assertTrue(isinstance(task_data.get("eta"), datetime))
+            self.assertTrue(isinstance(task_data.get("eta"), basestring))
+            to_datetime = parse_iso8601(task_data.get("eta"))
+            self.assertTrue(isinstance(to_datetime, datetime))
         for arg_name, arg_value in kwargs.items():
             self.assertEquals(task_kwargs.get(arg_name), arg_value)
 
@@ -190,7 +193,6 @@ class TestCeleryTasks(unittest.TestCase):
         T2 = self.createTaskCls("T2")
         self.assertEquals(T2().name, "celery.tests.test_task.T2")
 
-        registry.tasks.register(T1)
         t1 = T1()
         consumer = t1.get_consumer()
         self.assertRaises(NotImplementedError, consumer.receive, "foo", "foo")
@@ -202,7 +204,7 @@ class TestCeleryTasks(unittest.TestCase):
         self.assertNextTaskDataEquals(consumer, presult, t1.name)
 
         # With arguments.
-        presult2 = task.delay_task(t1.name, name="George Constanza")
+        presult2 = t1.apply_async(kwargs=dict(name="George Constanza"))
         self.assertNextTaskDataEquals(consumer, presult2, t1.name,
                 name="George Constanza")
 
@@ -218,20 +220,17 @@ class TestCeleryTasks(unittest.TestCase):
         self.assertNextTaskDataEquals(consumer, presult2, t1.name,
                 name="George Constanza", test_eta=True)
 
-        self.assertRaises(registry.tasks.NotRegistered, task.delay_task,
-                "some.task.that.should.never.exist.X.X.X.X.X")
-
         # Discarding all tasks.
         task.discard_all()
-        tid3 = task.delay_task(t1.name)
+        tid3 = task.apply_async(t1)
         self.assertEquals(task.discard_all(), 1)
         self.assertTrue(consumer.fetch() is None)
 
-        self.assertFalse(task.is_done(presult.task_id))
-        self.assertFalse(presult.is_done())
+        self.assertFalse(task.is_successful(presult.task_id))
+        self.assertFalse(presult.successful())
         default_backend.mark_as_done(presult.task_id, result=None)
-        self.assertTrue(task.is_done(presult.task_id))
-        self.assertTrue(presult.is_done())
+        self.assertTrue(task.is_successful(presult.task_id))
+        self.assertTrue(presult.successful())
 
 
         publisher = t1.get_publisher()
@@ -250,7 +249,7 @@ class TestTaskSet(unittest.TestCase):
     def test_function_taskset(self):
         from celery import conf
         conf.ALWAYS_EAGER = True
-        ts = task.TaskSet("cu.return-true", [
+        ts = task.TaskSet(return_True_task.name, [
             [[1], {}], [[2], {}], [[3], {}], [[4], {}], [[5], {}]])
         res = ts.run()
         self.assertEquals(res.join(), [True, True, True, True, True])
@@ -280,7 +279,7 @@ class TestTaskSet(unittest.TestCase):
         subtasks = taskset_res.subtasks
         taskset_id = taskset_res.taskset_id
         for subtask in subtasks:
-            m = consumer.decoder(consumer.fetch().body)
+            m = consumer.fetch().payload
             self.assertEquals(m.get("taskset"), taskset_id)
             self.assertEquals(m.get("task"), IncrementCounterTask.name)
             self.assertEquals(m.get("id"), subtask.task_id)
@@ -304,22 +303,12 @@ class TestTaskApply(unittest.TestCase):
         e = IncrementCounterTask.apply(kwargs={"increment_by": 4})
         self.assertEquals(e.get(), 6)
 
-        self.assertTrue(e.is_done())
-        self.assertTrue(e.is_ready())
+        self.assertTrue(e.successful())
+        self.assertTrue(e.ready())
         self.assertTrue(repr(e).startswith("<EagerResult:"))
 
         f = RaisingTask.apply()
-        self.assertTrue(f.is_ready())
-        self.assertFalse(f.is_done())
+        self.assertTrue(f.ready())
+        self.assertFalse(f.successful())
         self.assertTrue(f.traceback)
         self.assertRaises(KeyError, f.get)
-
-
-class TestPeriodicTask(unittest.TestCase):
-
-    def test_interface(self):
-
-        class MyPeriodicTask(task.PeriodicTask):
-            run_every = None
-
-        self.assertRaises(NotImplementedError, MyPeriodicTask)

+ 4 - 2
celery/tests/test_task_builtins.py

@@ -1,7 +1,9 @@
 import unittest
-from celery.task.builtins import PingTask, DeleteExpiredTaskMetaTask
+
+from billiard.serialization import pickle
+
 from celery.task.base import ExecuteRemoteTask
-from celery.serialization import pickle
+from celery.task.builtins import PingTask, DeleteExpiredTaskMetaTask
 
 
 def some_func(i):

+ 0 - 1
celery/tests/test_utils.py

@@ -1,4 +1,3 @@
-import sys
 import unittest
 from celery.utils import chunks
 

+ 39 - 35
celery/tests/test_worker.py

@@ -1,20 +1,23 @@
 import unittest
 from Queue import Queue, Empty
-from carrot.connection import BrokerConnection
-from celery.messaging import TaskConsumer
-from celery.worker.job import TaskWrapper
-from celery.worker import AMQPListener, WorkController
+from datetime import datetime, timedelta
 from multiprocessing import get_logger
+
+from carrot.connection import BrokerConnection
 from carrot.backends.base import BaseMessage
+from billiard.serialization import pickle
+
 from celery import registry
-from celery.serialization import pickle
 from celery.utils import gen_unique_id
-from datetime import datetime, timedelta
+from celery.worker import CarrotListener, WorkController
+from celery.worker.job import TaskWrapper
+from celery.worker.scheduler import Scheduler
+from celery.decorators import task as task_dec
 
 
+@task_dec()
 def foo_task(x, y, z, **kwargs):
     return x * y * z
-registry.tasks.register(foo_task, name="c.u.foo")
 
 
 class MockLogger(object):
@@ -80,16 +83,16 @@ def create_message(backend, **data):
                        content_encoding="binary")
 
 
-class TestAMQPListener(unittest.TestCase):
+class TestCarrotListener(unittest.TestCase):
 
     def setUp(self):
-        self.bucket_queue = Queue()
-        self.hold_queue = Queue()
+        self.ready_queue = Queue()
+        self.eta_scheduler = Scheduler(self.ready_queue)
         self.logger = get_logger()
         self.logger.setLevel(0)
 
     def test_connection(self):
-        l = AMQPListener(self.bucket_queue, self.hold_queue, self.logger)
+        l = CarrotListener(self.ready_queue, self.eta_scheduler, self.logger)
 
         c = l.reset_connection()
         self.assertTrue(isinstance(l.amqp_connection, BrokerConnection))
@@ -106,44 +109,46 @@ class TestAMQPListener(unittest.TestCase):
         self.assertTrue(l.task_consumer is None)
 
     def test_receieve_message(self):
-        l = AMQPListener(self.bucket_queue, self.hold_queue, self.logger)
+        l = CarrotListener(self.ready_queue, self.eta_scheduler, self.logger)
         backend = MockBackend()
-        m = create_message(backend, task="c.u.foo", args=[2, 4, 8], kwargs={})
+        m = create_message(backend, task=foo_task.name,
+                           args=[2, 4, 8], kwargs={})
 
         l.receive_message(m.decode(), m)
 
-        in_bucket = self.bucket_queue.get_nowait()
+        in_bucket = self.ready_queue.get_nowait()
         self.assertTrue(isinstance(in_bucket, TaskWrapper))
-        self.assertEquals(in_bucket.task_name, "c.u.foo")
+        self.assertEquals(in_bucket.task_name, foo_task.name)
         self.assertEquals(in_bucket.execute(), 2 * 4 * 8)
-        self.assertRaises(Empty, self.hold_queue.get_nowait)
+        self.assertTrue(self.eta_scheduler.empty())
 
     def test_receieve_message_not_registered(self):
-        l = AMQPListener(self.bucket_queue, self.hold_queue, self.logger)
+        l = CarrotListener(self.ready_queue, self.eta_scheduler, self.logger)
         backend = MockBackend()
         m = create_message(backend, task="x.X.31x", args=[2, 4, 8], kwargs={})
 
         self.assertFalse(l.receive_message(m.decode(), m))
-        self.assertRaises(Empty, self.bucket_queue.get_nowait)
-        self.assertRaises(Empty, self.hold_queue.get_nowait)
+        self.assertRaises(Empty, self.ready_queue.get_nowait)
+        self.assertTrue(self.eta_scheduler.empty())
 
     def test_receieve_message_eta(self):
-        l = AMQPListener(self.bucket_queue, self.hold_queue, self.logger)
+        l = CarrotListener(self.ready_queue, self.eta_scheduler, self.logger)
         backend = MockBackend()
-        m = create_message(backend, task="c.u.foo", args=[2, 4, 8], kwargs={},
-                           eta=datetime.now() + timedelta(days=1))
+        m = create_message(backend, task=foo_task.name,
+                           args=[2, 4, 8], kwargs={},
+                           eta=(datetime.now() +
+                               timedelta(days=1)).isoformat())
 
         l.receive_message(m.decode(), m)
 
-        in_hold = self.hold_queue.get_nowait()
-        self.assertEquals(len(in_hold), 3)
-        task, eta, on_accept = in_hold
+        in_hold = self.eta_scheduler.queue[0]
+        self.assertEquals(len(in_hold), 4)
+        eta, priority, task, on_accept = in_hold
         self.assertTrue(isinstance(task, TaskWrapper))
-        self.assertTrue(isinstance(eta, datetime))
         self.assertTrue(callable(on_accept))
-        self.assertEquals(task.task_name, "c.u.foo")
+        self.assertEquals(task.task_name, foo_task.name)
         self.assertEquals(task.execute(), 2 * 4 * 8)
-        self.assertRaises(Empty, self.bucket_queue.get_nowait)
+        self.assertRaises(Empty, self.ready_queue.get_nowait)
 
 
 class TestWorkController(unittest.TestCase):
@@ -155,11 +160,10 @@ class TestWorkController(unittest.TestCase):
 
     def test_attrs(self):
         worker = self.worker
-        self.assertTrue(isinstance(worker.bucket_queue, Queue))
-        self.assertTrue(isinstance(worker.hold_queue, Queue))
-        self.assertTrue(worker.periodic_work_controller)
+        self.assertTrue(isinstance(worker.eta_scheduler, Scheduler))
+        self.assertTrue(worker.schedule_controller)
         self.assertTrue(worker.pool)
-        self.assertTrue(worker.amqp_listener)
+        self.assertTrue(worker.broker_listener)
         self.assertTrue(worker.mediator)
         self.assertTrue(worker.components)
 
@@ -167,7 +171,7 @@ class TestWorkController(unittest.TestCase):
         worker = self.worker
         worker.pool = MockPool()
         backend = MockBackend()
-        m = create_message(backend, task="c.u.foo", args=[4, 8, 10],
+        m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
                            kwargs={})
         task = TaskWrapper.from_message(m, m.decode())
         worker.safe_process_task(task)
@@ -177,7 +181,7 @@ class TestWorkController(unittest.TestCase):
         worker = self.worker
         worker.pool = MockPool(raise_base=True)
         backend = MockBackend()
-        m = create_message(backend, task="c.u.foo", args=[4, 8, 10],
+        m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
                            kwargs={})
         task = TaskWrapper.from_message(m, m.decode())
         worker.safe_process_task(task)
@@ -187,7 +191,7 @@ class TestWorkController(unittest.TestCase):
         worker = self.worker
         worker.pool = MockPool(raise_regular=True)
         backend = MockBackend()
-        m = create_message(backend, task="c.u.foo", args=[4, 8, 10],
+        m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
                            kwargs={})
         task = TaskWrapper.from_message(m, m.decode())
         worker.safe_process_task(task)

+ 6 - 50
celery/tests/test_worker_controllers.py

@@ -1,10 +1,9 @@
 import unittest
 import time
-import multiprocessing
 from Queue import Queue, Empty
 from datetime import datetime, timedelta
 
-from celery.worker.controllers import Mediator, PeriodicWorkController
+from celery.worker.controllers import Mediator
 from celery.worker.controllers import BackgroundThread
 
 
@@ -19,7 +18,6 @@ class MockTask(object):
 class MyBackgroundThread(BackgroundThread):
 
     def on_iteration(self):
-        import time
         time.sleep(1)
 
 
@@ -48,8 +46,8 @@ class TestBackgroundThread(unittest.TestCase):
 class TestMediator(unittest.TestCase):
 
     def test_mediator_start__stop(self):
-        bucket_queue = Queue()
-        m = Mediator(bucket_queue, lambda t: t)
+        ready_queue = Queue()
+        m = Mediator(ready_queue, lambda t: t)
         m.start()
         self.assertFalse(m._shutdown.isSet())
         self.assertFalse(m._stopped.isSet())
@@ -59,57 +57,15 @@ class TestMediator(unittest.TestCase):
         self.assertTrue(m._stopped.isSet())
 
     def test_mediator_on_iteration(self):
-        bucket_queue = Queue()
+        ready_queue = Queue()
         got = {}
 
         def mycallback(value):
             got["value"] = value.value
 
-        m = Mediator(bucket_queue, mycallback)
-        bucket_queue.put(MockTask("George Constanza"))
+        m = Mediator(ready_queue, mycallback)
+        ready_queue.put(MockTask("George Constanza"))
 
         m.on_iteration()
 
         self.assertEquals(got["value"], "George Constanza")
-
-
-class TestPeriodicWorkController(unittest.TestCase):
-
-    def test_process_hold_queue(self):
-        bucket_queue = Queue()
-        hold_queue = Queue()
-        m = PeriodicWorkController(bucket_queue, hold_queue)
-        m.process_hold_queue()
-
-        scratchpad = {}
-
-        def on_accept():
-            scratchpad["accepted"] = True
-
-        hold_queue.put((MockTask("task1"),
-                        datetime.now() - timedelta(days=1),
-                        on_accept))
-
-        m.process_hold_queue()
-        self.assertRaises(Empty, hold_queue.get_nowait)
-        self.assertTrue(scratchpad.get("accepted"))
-        self.assertEquals(bucket_queue.get_nowait().value, "task1")
-        tomorrow = datetime.now() + timedelta(days=1)
-        hold_queue.put((MockTask("task2"), tomorrow, on_accept))
-        m.process_hold_queue()
-        self.assertRaises(Empty, bucket_queue.get_nowait)
-        value, eta, on_accept = hold_queue.get_nowait()
-        self.assertEquals(value.value, "task2")
-        self.assertEquals(eta, tomorrow)
-
-    def test_run_periodic_tasks(self):
-        bucket_queue = Queue()
-        hold_queue = Queue()
-        m = PeriodicWorkController(bucket_queue, hold_queue)
-        m.run_periodic_tasks()
-
-    def test_on_iteration(self):
-        bucket_queue = Queue()
-        hold_queue = Queue()
-        m = PeriodicWorkController(bucket_queue, hold_queue)
-        m.on_iteration()

+ 33 - 42
celery/tests/test_worker_job.py

@@ -1,17 +1,17 @@
 # -*- coding: utf-8 -*-
 import sys
 import unittest
-from celery.execute import ExecuteWrapper
-from celery.worker.job import TaskWrapper
+from celery.worker.job import WorkerTaskTrace, TaskWrapper
 from celery.datastructures import ExceptionInfo
 from celery.models import TaskMeta
 from celery.registry import tasks, NotRegistered
-from celery.pool import TaskPool
+from celery.worker.pool import TaskPool
 from celery.utils import gen_unique_id
 from carrot.backends.base import BaseMessage
 from StringIO import StringIO
 from celery.log import setup_logger
 from django.core import cache
+from celery.decorators import task as task_dec
 import simplejson
 import logging
 
@@ -19,35 +19,36 @@ scratch = {"ACK": False}
 some_kwargs_scratchpad = {}
 
 
-def jail(task_id, task_name, fun, args, kwargs):
-    return ExecuteWrapper(fun, task_id, task_name, args, kwargs)()
+def jail(task_id, task_name, args, kwargs):
+    return WorkerTaskTrace(task_name, task_id, args, kwargs)()
 
 
 def on_ack():
     scratch["ACK"] = True
 
 
+@task_dec()
 def mytask(i, **kwargs):
     return i ** i
-tasks.register(mytask, name="cu.mytask")
 
 
+@task_dec()
 def mytask_no_kwargs(i):
     return i ** i
-tasks.register(mytask_no_kwargs, name="mytask_no_kwargs")
 
 
+@task_dec()
 def mytask_some_kwargs(i, logfile):
     some_kwargs_scratchpad["logfile"] = logfile
     return i ** i
-tasks.register(mytask_some_kwargs, name="mytask_some_kwargs")
 
 
+@task_dec()
 def mytask_raising(i, **kwargs):
     raise KeyError(i)
-tasks.register(mytask_raising, name="cu.mytask-raising")
 
 
+@task_dec()
 def get_db_connection(i, **kwargs):
     from django.db import connection
     return id(connection)
@@ -57,11 +58,12 @@ get_db_connection.ignore_result = True
 class TestJail(unittest.TestCase):
 
     def test_execute_jail_success(self):
-        ret = jail(gen_unique_id(), gen_unique_id(), mytask, [2], {})
+        ret = jail(gen_unique_id(), mytask.name, [2], {})
         self.assertEquals(ret, 4)
 
     def test_execute_jail_failure(self):
-        ret = jail(gen_unique_id(), gen_unique_id(), mytask_raising, [4], {})
+        ret = jail(gen_unique_id(), mytask_raising.name,
+                   [4], {})
         self.assertTrue(isinstance(ret, ExceptionInfo))
         self.assertEquals(ret.exception.args, (4, ))
 
@@ -76,8 +78,8 @@ class TestJail(unittest.TestCase):
 
         connection.close = monkeypatched_connection_close
 
-        ret = jail(gen_unique_id(), gen_unique_id(),
-                   get_db_connection, [2], {})
+        ret = jail(gen_unique_id(),
+                   get_db_connection.name, [2], {})
         self.assertTrue(connection._was_closed)
 
         connection.close = old_connection_close
@@ -96,7 +98,7 @@ class TestJail(unittest.TestCase):
 
         cache.cache.close = monkeypatched_cache_close
 
-        jail(gen_unique_id(), gen_unique_id(), mytask, [4], {})
+        jail(gen_unique_id(), mytask.name, [4], {})
         self.assertTrue(cache._was_closed)
         cache.cache.close = old_cache_close
         cache.settings.CACHE_BACKEND = old_backend
@@ -116,7 +118,7 @@ class TestJail(unittest.TestCase):
 
         cache.cache.close = monkeypatched_cache_close
 
-        jail(gen_unique_id(), gen_unique_id(), mytask, [4], {})
+        jail(gen_unique_id(), mytask.name, [4], {})
         self.assertTrue(cache._was_closed)
         cache.cache.close = old_cache_close
         cache.settings.CACHE_BACKEND = old_backend
@@ -128,19 +130,12 @@ class TestJail(unittest.TestCase):
 
 class TestTaskWrapper(unittest.TestCase):
 
-    def test_task_wrapper_attrs(self):
-        tw = TaskWrapper(gen_unique_id(), gen_unique_id(),
-                         mytask, [1], {"f": "x"})
-        for attr in ("task_name", "task_id", "args", "kwargs", "logger"):
-            self.assertTrue(getattr(tw, attr, None))
-
     def test_task_wrapper_repr(self):
-        tw = TaskWrapper(gen_unique_id(), gen_unique_id(),
-                         mytask, [1], {"f": "x"})
+        tw = TaskWrapper(mytask.name, gen_unique_id(), [1], {"f": "x"})
         self.assertTrue(repr(tw))
 
     def test_task_wrapper_mail_attrs(self):
-        tw = TaskWrapper(gen_unique_id(), gen_unique_id(), mytask, [], {})
+        tw = TaskWrapper(mytask.name, gen_unique_id(), [], {})
         x = tw.success_msg % {"name": tw.task_name,
                               "id": tw.task_id,
                               "return_value": 10}
@@ -157,7 +152,7 @@ class TestTaskWrapper(unittest.TestCase):
         self.assertTrue(x)
 
     def test_from_message(self):
-        body = {"task": "cu.mytask", "id": gen_unique_id(),
+        body = {"task": mytask.name, "id": gen_unique_id(),
                 "args": [2], "kwargs": {u"æØåveéðƒeæ": "bar"}}
         m = BaseMessage(body=simplejson.dumps(body), backend="foo",
                         content_type="application/json",
@@ -170,7 +165,6 @@ class TestTaskWrapper(unittest.TestCase):
         self.assertEquals(tw.kwargs.keys()[0],
                           u"æØåveéðƒeæ".encode("utf-8"))
         self.assertFalse(isinstance(tw.kwargs.keys()[0], unicode))
-        self.assertEquals(id(mytask), id(tw.task_func))
         self.assertTrue(tw.logger)
 
     def test_from_message_nonexistant_task(self):
@@ -184,45 +178,42 @@ class TestTaskWrapper(unittest.TestCase):
 
     def test_execute(self):
         tid = gen_unique_id()
-        tw = TaskWrapper("cu.mytask", tid, mytask, [4], {"f": "x"})
+        tw = TaskWrapper(mytask.name, tid, [4], {"f": "x"})
         self.assertEquals(tw.execute(), 256)
         meta = TaskMeta.objects.get(task_id=tid)
         self.assertEquals(meta.result, 256)
-        self.assertEquals(meta.status, "DONE")
+        self.assertEquals(meta.status, "SUCCESS")
 
     def test_execute_success_no_kwargs(self):
         tid = gen_unique_id()
-        tw = TaskWrapper("cu.mytask_no_kwargs", tid, mytask_no_kwargs,
-                         [4], {})
+        tw = TaskWrapper(mytask_no_kwargs.name, tid, [4], {})
         self.assertEquals(tw.execute(), 256)
         meta = TaskMeta.objects.get(task_id=tid)
         self.assertEquals(meta.result, 256)
-        self.assertEquals(meta.status, "DONE")
+        self.assertEquals(meta.status, "SUCCESS")
 
     def test_execute_success_some_kwargs(self):
         tid = gen_unique_id()
-        tw = TaskWrapper("cu.mytask_some_kwargs", tid, mytask_some_kwargs,
-                         [4], {})
+        tw = TaskWrapper(mytask_some_kwargs.name, tid, [4], {})
         self.assertEquals(tw.execute(logfile="foobaz.log"), 256)
         meta = TaskMeta.objects.get(task_id=tid)
         self.assertEquals(some_kwargs_scratchpad.get("logfile"), "foobaz.log")
         self.assertEquals(meta.result, 256)
-        self.assertEquals(meta.status, "DONE")
+        self.assertEquals(meta.status, "SUCCESS")
 
     def test_execute_ack(self):
         tid = gen_unique_id()
-        tw = TaskWrapper("cu.mytask", tid, mytask, [4], {"f": "x"},
+        tw = TaskWrapper(mytask.name, tid, [4], {"f": "x"},
                         on_ack=on_ack)
         self.assertEquals(tw.execute(), 256)
         meta = TaskMeta.objects.get(task_id=tid)
         self.assertTrue(scratch["ACK"])
         self.assertEquals(meta.result, 256)
-        self.assertEquals(meta.status, "DONE")
+        self.assertEquals(meta.status, "SUCCESS")
 
     def test_execute_fail(self):
         tid = gen_unique_id()
-        tw = TaskWrapper("cu.mytask-raising", tid, mytask_raising, [4],
-                         {"f": "x"})
+        tw = TaskWrapper(mytask_raising.name, tid, [4], {"f": "x"})
         self.assertTrue(isinstance(tw.execute(), ExceptionInfo))
         meta = TaskMeta.objects.get(task_id=tid)
         self.assertEquals(meta.status, "FAILURE")
@@ -230,7 +221,7 @@ class TestTaskWrapper(unittest.TestCase):
 
     def test_execute_using_pool(self):
         tid = gen_unique_id()
-        tw = TaskWrapper("cu.mytask", tid, mytask, [4], {"f": "x"})
+        tw = TaskWrapper(mytask.name, tid, [4], {"f": "x"})
         p = TaskPool(2)
         p.start()
         asyncres = tw.execute_using_pool(p)
@@ -239,7 +230,7 @@ class TestTaskWrapper(unittest.TestCase):
 
     def test_default_kwargs(self):
         tid = gen_unique_id()
-        tw = TaskWrapper("cu.mytask", tid, mytask, [4], {"f": "x"})
+        tw = TaskWrapper(mytask.name, tid, [4], {"f": "x"})
         self.assertEquals(tw.extend_with_default_kwargs(10, "some_logfile"), {
             "f": "x",
             "logfile": "some_logfile",
@@ -250,7 +241,7 @@ class TestTaskWrapper(unittest.TestCase):
 
     def test_on_failure(self):
         tid = gen_unique_id()
-        tw = TaskWrapper("cu.mytask", tid, mytask, [4], {"f": "x"})
+        tw = TaskWrapper(mytask.name, tid, [4], {"f": "x"})
         try:
             raise Exception("Inside unit tests")
         except Exception:
@@ -265,7 +256,7 @@ class TestTaskWrapper(unittest.TestCase):
 
         tw.on_failure(exc_info)
         logvalue = logfh.getvalue()
-        self.assertTrue("cu.mytask" in logvalue)
+        self.assertTrue(mytask.name in logvalue)
         self.assertTrue(tid in logvalue)
         self.assertTrue("ERROR" in logvalue)
 

+ 37 - 1
celery/tests/utils.py

@@ -1,11 +1,47 @@
 from __future__ import with_statement
 from contextlib import contextmanager
 from StringIO import StringIO
-import os
+from functools import wraps
 import sys
 import __builtin__
 
 
+def _skip_test(reason, sign):
+
+    def _wrap_test(fun):
+
+        @wraps(fun)
+        def _skipped_test(*args, **kwargs):
+            sys.stderr.write("(%s: %s) " % (sign, reason))
+
+        return _skipped_test
+    return _wrap_test
+
+
+def todo(reason):
+    """TODO test decorator."""
+    return _skip_test(reason, "TODO")
+
+
+def skip(reason):
+    """Skip test decorator."""
+    return _skip_test(reason, "SKIP")
+
+
+def skip_if(predicate, reason):
+    """Skip test if predicate is ``True``."""
+
+    def _inner(fun):
+        return skip(reason)(fun) if predicate else fun
+
+    return _inner
+
+
+def skip_unless(predicate, reason):
+    """Skip test if predicate is ``False``."""
+    return skip_if(not predicate, reason)
+
+
 @contextmanager
 def mask_modules(*modnames):
     """Ban some modules from being importable inside the context

+ 1 - 0
celery/urls.py

@@ -4,6 +4,7 @@ URLs defined for celery.
 
 """
 from django.conf.urls.defaults import patterns, url
+
 from celery import views
 
 

+ 10 - 5
celery/utils/__init__.py → celery/utils.py

@@ -4,15 +4,16 @@ Utility functions
 
 """
 import time
-from itertools import repeat
-from inspect import getargspec
-from uuid import UUID, uuid4, _uuid_generate_random
-from celery.utils.functional import curry
 import operator
 try:
     import ctypes
 except ImportError:
     ctypes = None
+from uuid import UUID, uuid4, _uuid_generate_random
+from inspect import getargspec
+from itertools import repeat
+
+from billiard.utils.functional import curry
 
 noop = lambda *args, **kwargs: None
 
@@ -124,6 +125,9 @@ def fun_takes_kwargs(fun, kwlist=[]):
     """With a function, and a list of keyword arguments, returns arguments
     in the list which the function takes.
 
+    If the object has an ``argspec`` attribute that is used instead
+    of using the :meth:`inspect.getargspec`` introspection.
+
     :param fun: The function to inspect arguments of.
     :param kwlist: The list of keyword arguments.
 
@@ -139,7 +143,8 @@ def fun_takes_kwargs(fun, kwlist=[]):
         ["logfile", "loglevel", "task_id"]
 
     """
-    args, _varargs, keywords, _defaults = getargspec(fun)
+    argspec = getattr(fun, "argspec", getargspec(fun))
+    args, _varargs, keywords, _defaults = argspec
     if keywords != None:
         return kwlist
     return filter(curry(operator.contains, args), kwlist)

+ 0 - 16
celery/utils/functional.py

@@ -1,16 +0,0 @@
-"""Functional utilities for Python 2.4 compatability."""
-
-
-def _compat_curry(fun, *args, **kwargs):
-    """New function with partial application of the given arguments
-    and keywords."""
-
-    def _curried(*addargs, **addkwargs):
-        return fun(*(args+addargs), **dict(kwargs, **addkwargs))
-    return _curried
-
-
-try:
-    from functools import partial as curry
-except ImportError:
-    curry = _compat_curry

+ 7 - 4
celery/views.py

@@ -1,8 +1,9 @@
 """celery.views"""
+from anyjson import serialize as JSON_dump
 from django.http import HttpResponse, Http404
-from celery.task import tasks, is_done, apply_async
+
+from celery.task import tasks, is_successful, apply_async
 from celery.result import AsyncResult
-from anyjson import serialize as JSON_dump
 
 
 def apply(request, task_name, *args):
@@ -28,10 +29,12 @@ def apply(request, task_name, *args):
     return HttpResponse(JSON_dump(response_data), mimetype="application/json")
 
 
-def is_task_done(request, task_id):
+def is_task_successful(request, task_id):
     """Returns task execute status in JSON format."""
-    response_data = {"task": {"id": task_id, "executed": is_done(task_id)}}
+    response_data = {"task": {"id": task_id,
+                              "executed": is_successful(task_id)}}
     return HttpResponse(JSON_dump(response_data), mimetype="application/json")
+is_task_done = is_task_successful # Backward compatible
 
 
 def task_status(request, task_id):

+ 72 - 52
celery/worker/__init__.py

@@ -3,38 +3,45 @@
 The Multiprocessing Worker Server
 
 """
+import traceback
+import logging
+import socket
+from Queue import Queue
+from datetime import datetime
+
+from dateutil.parser import parse as parse_iso8601
 from carrot.connection import DjangoBrokerConnection, AMQPConnectionException
-from celery.worker.controllers import Mediator, PeriodicWorkController
-from celery.worker.job import TaskWrapper
-from celery.exceptions import NotRegistered
-from celery.messaging import get_consumer_set
-from celery.conf import DAEMON_CONCURRENCY, DAEMON_LOG_FILE
-from celery.conf import AMQP_CONNECTION_RETRY, AMQP_CONNECTION_MAX_RETRIES
+
+from celery import conf
+from celery import registry
 from celery.log import setup_logger
-from celery.pool import TaskPool
+from celery.beat import ClockServiceThread
 from celery.utils import retry_over_time
+from celery.worker.pool import TaskPool
+from celery.worker.job import TaskWrapper
+from celery.worker.scheduler import Scheduler
+from celery.worker.controllers import Mediator, ScheduleController
+from celery.worker.buckets import TaskBucket
+from celery.messaging import get_consumer_set
+from celery.exceptions import NotRegistered
 from celery.datastructures import SharedCounter
 from celery.events import EventDispatcher
-from Queue import Queue
-import traceback
-import logging
-import socket
 
 
-class AMQPListener(object):
+class CarrotListener(object):
     """Listen for messages received from the AMQP broker and
     move them the the bucket queue for task processing.
 
-    :param bucket_queue: See :attr:`bucket_queue`.
-    :param hold_queue: See :attr:`hold_queue`.
+    :param ready_queue: See :attr:`ready_queue`.
+    :param eta_scheduler: See :attr:`eta_scheduler`.
 
-    .. attribute:: bucket_queue
+    .. attribute:: ready_queue
 
         The queue that holds tasks ready for processing immediately.
 
-    .. attribute:: hold_queue
+    .. attribute:: eta_scheduler
 
-        The queue that holds paused tasks. Reasons for being paused include
+        Scheduler for paused tasks. Reasons for being paused include
         a countdown/eta or that it's waiting for retry.
 
     .. attribute:: logger
@@ -43,12 +50,12 @@ class AMQPListener(object):
 
     """
 
-    def __init__(self, bucket_queue, hold_queue, logger,
+    def __init__(self, ready_queue, eta_scheduler, logger,
             initial_prefetch_count=2):
         self.amqp_connection = None
         self.task_consumer = None
-        self.bucket_queue = bucket_queue
-        self.hold_queue = hold_queue
+        self.ready_queue = ready_queue
+        self.eta_scheduler = eta_scheduler
         self.logger = logger
         self.prefetch_count = SharedCounter(initial_prefetch_count)
         self.event_dispatcher = None
@@ -66,17 +73,17 @@ class AMQPListener(object):
             try:
                 self.consume_messages()
             except (socket.error, AMQPConnectionException, IOError):
-                self.logger.error("AMQPListener: Connection to broker lost. "
-                                + "Trying to re-establish connection...")
+                self.logger.error("CarrotListener: Connection to broker lost."
+                                + " Trying to re-establish connection...")
 
     def consume_messages(self):
         """Consume messages forever (or until an exception is raised)."""
         task_consumer = self.task_consumer
 
-        self.logger.debug("AMQPListener: Starting message consumer...")
+        self.logger.debug("CarrotListener: Starting message consumer...")
         it = task_consumer.iterconsume(limit=None)
 
-        self.logger.debug("AMQPListener: Ready to accept tasks!")
+        self.logger.debug("CarrotListener: Ready to accept tasks!")
 
         while True:
             self.task_consumer.qos(prefetch_count=int(self.prefetch_count))
@@ -107,14 +114,18 @@ class AMQPListener(object):
         self.event_dispatcher.send("task-received", **message_data)
 
         if eta:
+            if not isinstance(eta, datetime):
+                eta = parse_iso8601(eta)
             self.prefetch_count.increment()
             self.logger.info("Got task from broker: %s[%s] eta:[%s]" % (
                     task.task_name, task.task_id, eta))
-            self.hold_queue.put((task, eta, self.prefetch_count.decrement))
+            self.eta_scheduler.enter(task,
+                                     eta=eta,
+                                     callback=self.prefetch_count.decrement)
         else:
             self.logger.info("Got task from broker: %s[%s]" % (
                     task.task_name, task.task_id))
-            self.bucket_queue.put(task)
+            self.ready_queue.put(task)
 
     def close_connection(self):
         """Close the AMQP connection."""
@@ -123,7 +134,7 @@ class AMQPListener(object):
             self.task_consumer = None
         if self.amqp_connection:
             self.logger.debug(
-                    "AMQPListener: Closing connection to the broker...")
+                    "CarrotListener: Closing connection to the broker...")
             self.amqp_connection.close()
             self.amqp_connection = None
         self.event_dispatcher = None
@@ -136,7 +147,7 @@ class AMQPListener(object):
 
         """
         self.logger.debug(
-                "AMQPListener: Re-establishing connection to the broker...")
+                "CarrotListener: Re-establishing connection to the broker...")
         self.close_connection()
         self.amqp_connection = self._open_connection()
         self.task_consumer = get_consumer_set(connection=self.amqp_connection)
@@ -161,13 +172,13 @@ class AMQPListener(object):
             connected = conn.connection # Connection is established lazily.
             return conn
 
-        if not AMQP_CONNECTION_RETRY:
+        if not conf.AMQP_CONNECTION_RETRY:
             return _establish_connection()
 
         conn = retry_over_time(_establish_connection, (socket.error, IOError),
                                errback=_connection_error_handler,
-                               max_retries=AMQP_CONNECTION_MAX_RETRIES)
-        self.logger.debug("AMQPListener: Connection Established.")
+                               max_retries=conf.AMQP_CONNECTION_MAX_RETRIES)
+        self.logger.debug("CarrotListener: Connection Established.")
         return conn
 
 
@@ -205,7 +216,7 @@ class WorkController(object):
 
         The :class:`multiprocessing.Pool` instance used.
 
-    .. attribute:: bucket_queue
+    .. attribute:: ready_queue
 
         The :class:`Queue.Queue` that holds tasks ready for immediate
         processing.
@@ -216,26 +227,26 @@ class WorkController(object):
         back the task include waiting for ``eta`` to pass or the task is being
         retried.
 
-    .. attribute:: periodic_work_controller
+    .. attribute:: schedule_controller
 
-        Instance of :class:`celery.worker.controllers.PeriodicWorkController`.
+        Instance of :class:`celery.worker.controllers.ScheduleController`.
 
     .. attribute:: mediator
 
         Instance of :class:`celery.worker.controllers.Mediator`.
 
-    .. attribute:: amqp_listener
+    .. attribute:: broker_listener
 
-        Instance of :class:`AMQPListener`.
+        Instance of :class:`CarrotListener`.
 
     """
     loglevel = logging.ERROR
-    concurrency = DAEMON_CONCURRENCY
-    logfile = DAEMON_LOG_FILE
+    concurrency = conf.DAEMON_CONCURRENCY
+    logfile = conf.DAEMON_LOG_FILE
     _state = None
 
     def __init__(self, concurrency=None, logfile=None, loglevel=None,
-            is_detached=False):
+            is_detached=False, embed_clockservice=False):
 
         # Options
         self.loglevel = loglevel or self.loglevel
@@ -243,30 +254,39 @@ class WorkController(object):
         self.logfile = logfile or self.logfile
         self.is_detached = is_detached
         self.logger = setup_logger(loglevel, logfile)
+        self.embed_clockservice = embed_clockservice
 
         # Queues
-        self.bucket_queue = Queue()
-        self.hold_queue = Queue()
+        if conf.DISABLE_RATE_LIMITS:
+            self.ready_queue = Queue()
+        else:
+            self.ready_queue = TaskBucket(task_registry=registry.tasks)
+        self.eta_scheduler = Scheduler(self.ready_queue)
 
         self.logger.debug("Instantiating thread components...")
 
         # Threads+Pool
-        self.periodic_work_controller = PeriodicWorkController(
-                                                    self.bucket_queue,
-                                                    self.hold_queue)
+        self.schedule_controller = ScheduleController(self.eta_scheduler)
         self.pool = TaskPool(self.concurrency, logger=self.logger)
-        self.amqp_listener = AMQPListener(self.bucket_queue, self.hold_queue,
-                                          logger=self.logger,
-                                          initial_prefetch_count=concurrency)
-        self.mediator = Mediator(self.bucket_queue, self.safe_process_task)
+        self.broker_listener = CarrotListener(self.ready_queue,
+                                        self.eta_scheduler,
+                                        logger=self.logger,
+                                        initial_prefetch_count=concurrency)
+        self.mediator = Mediator(self.ready_queue, self.safe_process_task)
+
+        self.clockservice = None
+        if self.embed_clockservice:
+            self.clockservice = ClockServiceThread(logger=self.logger,
+                                                is_detached=self.is_detached)
 
         # The order is important here;
         #   the first in the list is the first to start,
         # and they must be stopped in reverse order.
-        self.components = [self.pool,
-                           self.mediator,
-                           self.periodic_work_controller,
-                           self.amqp_listener]
+        self.components = filter(None, (self.pool,
+                                        self.mediator,
+                                        self.schedule_controller,
+                                        self.clockservice,
+                                        self.broker_listener))
 
     def start(self):
         """Starts the workers main loop."""

+ 299 - 0
celery/worker/buckets.py

@@ -0,0 +1,299 @@
+import time
+from Queue import Queue, Empty as QueueEmpty
+
+RATE_MODIFIER_MAP = {"s": lambda n: n,
+                     "m": lambda n: n / 60.0,
+                     "h": lambda n: n / 60.0 / 60.0}
+
+BASE_IDENTIFIERS = {"0x": 16,
+                    "0o": 8,
+                    "0b": 2}
+
+
+class RateLimitExceeded(Exception):
+    """The token buckets rate limit has been exceeded."""
+
+
+def parse_ratelimit_string(rate_limit):
+    """Parse rate limit configurations such as ``"100/m"`` or ``"2/h"``
+        and convert them into seconds.
+
+    Returns ``0`` for no rate limit.
+
+    """
+
+    if rate_limit:
+        if isinstance(rate_limit, basestring):
+            base = BASE_IDENTIFIERS.get(rate_limit[:2], 10)
+            try:
+                return int(rate_limit, base)
+            except ValueError:
+                ops, _, modifier = rate_limit.partition("/")
+                return RATE_MODIFIER_MAP[modifier](int(ops, base)) or 0
+        return rate_limit or 0
+    return 0
+
+
+class TaskBucket(object):
+    """This is a collection of token buckets, each task type having
+    its own token bucket. If the task type doesn't have a rate limit,
+    it will have a plain Queue object instead of a token bucket queue.
+
+    The :meth:`put` operation forwards the task to its appropriate bucket,
+    while the :meth:`get` operation iterates over the buckets and retrieves
+    the first available item.
+
+    Say we have three types of tasks in the registry: ``celery.ping``,
+    ``feed.refresh`` and ``video.compress``, the TaskBucket will consist
+    of the following items::
+
+        {"celery.ping": TokenBucketQueue(fill_rate=300),
+         "feed.refresh": Queue(),
+         "video.compress": TokenBucketQueue(fill_rate=2)}
+
+    The get operation will iterate over these until one of the buckets
+    is able to return an item. The underlying datastructure is a ``dict``,
+    so the order is ignored here.
+
+    :param task_registry: The task registry used to get the task
+        type class for a given task name.
+
+
+    """
+    min_wait = 0.0
+
+    def __init__(self, task_registry):
+        self.task_registry = task_registry
+        self.buckets = {}
+        self.init_with_registry()
+        self.immediate = Queue()
+
+    def put(self, job):
+        """Put a task into the appropiate bucket."""
+        self.buckets[job.task_name].put_nowait(job)
+    put_nowait = put
+
+    def _get(self):
+        # If the first bucket is always returning items, we would never
+        # get to fetch items from the other buckets. So we always iterate over
+        # all the buckets and put any ready items into a queue called
+        # "immediate". This queue is always checked for cached items first.
+        if self.immediate:
+            try:
+                return 0, self.immediate.get_nowait()
+            except QueueEmpty:
+                pass
+
+        remaining_times = []
+        for bucket in self.buckets.values():
+            remaining = bucket.expected_time()
+            if not remaining:
+                try:
+                    # Just put any ready items into the immediate queue.
+                    self.immediate.put_nowait(bucket.get_nowait())
+                except QueueEmpty:
+                    pass
+                except RateLimitExceeded:
+                    remaining_times.append(bucket.expected_time())
+            else:
+                remaining_times.append(remaining)
+
+        # Try the immediate queue again.
+        try:
+            return 0, self.immediate.get_nowait()
+        except QueueEmpty:
+            if not remaining_times:
+                # No items in any of the buckets.
+                raise
+
+            # There's items, but have to wait before we can retrieve them,
+            # return the shortest remaining time.
+            return min(remaining_times), None
+
+    def get(self, block=True, timeout=None):
+        """Retrive the task from the first available bucket.
+
+        Available as in, there is an item in the queue and you can
+        consume tokens from it.
+
+        """
+        time_start = time.time()
+        did_timeout = lambda: timeout and time.time() - time_start > timeout
+
+        while True:
+            remaining_time, item = self._get()
+            if remaining_time:
+                if not block or did_timeout():
+                    raise QueueEmpty
+                time.sleep(remaining_time)
+            else:
+                return item
+
+    def get_nowait(self):
+        return self.get(block=False)
+
+    def init_with_registry(self):
+        """Initialize with buckets for all the task types in the registry."""
+        map(self.add_bucket_for_type, self.task_registry.keys())
+
+    def get_bucket_for_type(self, task_name):
+        """Get the bucket for a particular task type."""
+        if task_name not in self.buckets:
+            return self.add_bucket_for_type(task_name)
+        return self.buckets[task_name]
+
+    def add_bucket_for_type(self, task_name):
+        """Add a bucket for a task type.
+
+        Will read the tasks rate limit and create a :class:`TokenBucketQueue`
+        if it has one. If the task doesn't have a rate limit a regular Queue
+        will be used.
+
+        """
+        if task_name in self.buckets:
+            return
+        task_type = self.task_registry[task_name]
+        task_queue = task_type.rate_limit_queue_type()
+        rate_limit = getattr(task_type, "rate_limit", None)
+        rate_limit = parse_ratelimit_string(rate_limit)
+        if rate_limit:
+            task_queue = TokenBucketQueue(rate_limit, queue=task_queue)
+        else:
+            task_queue.expected_time = lambda: 0
+
+        self.buckets[task_name] = task_queue
+        return task_queue
+
+    def qsize(self):
+        """Get the total size of all the queues."""
+        return sum(bucket.qsize() for bucket in self.buckets.values())
+
+    def empty(self):
+        return all(bucket.empty() for bucket in self.buckets.values())
+
+
+class TokenBucketQueue(object):
+    """Queue with rate limited get operations.
+
+    This uses the token bucket algorithm to rate limit the queue on get
+    operations.
+    See http://en.wikipedia.org/wiki/Token_Bucket
+    Most of this code was stolen from an entry in the ASPN Python Cookbook:
+    http://code.activestate.com/recipes/511490/
+
+    :param fill_rate: see :attr:`fill_rate`.
+    :keyword capacity: see :attr:`capacity`.
+
+    .. attribute:: fill_rate
+
+        The rate in tokens/second that the bucket will be refilled.
+
+    .. attribute:: capacity
+
+        Maximum number of tokens in the bucket. Default is ``1``.
+
+    .. attribute:: timestamp
+
+        Timestamp of the last time a token was taken out of the bucket.
+
+    """
+    RateLimitExceeded = RateLimitExceeded
+
+    def __init__(self, fill_rate, queue=None, capacity=1):
+        self.capacity = float(capacity)
+        self._tokens = self.capacity
+        self.queue = queue
+        if not self.queue:
+            self.queue = Queue()
+        self.fill_rate = float(fill_rate)
+        self.timestamp = time.time()
+
+    def put(self, item, block=True):
+        """Put an item into the queue.
+
+        Also see :meth:`Queue.Queue.put`.
+
+        """
+        put = self.queue.put if block else self.queue.put_nowait
+        put(item)
+
+    def put_nowait(self, item):
+        """Put an item into the queue without blocking.
+
+        :raises Queue.Full: If a free slot is not immediately available.
+
+        Also see :meth:`Queue.Queue.put_nowait`
+
+        """
+        return self.put(item, block=False)
+
+    def get(self, block=True):
+        """Remove and return an item from the queue.
+
+        :raises RateLimitExceeded: If a token could not be consumed from the
+            token bucket (consuming from the queue too fast).
+        :raises Queue.Empty: If an item is not immediately available.
+
+        Also see :meth:`Queue.Queue.get`.
+
+        """
+        get = self.queue.get if block else self.queue.get_nowait
+
+        if not self.can_consume(1):
+            raise RateLimitExceeded
+
+        return get()
+
+    def get_nowait(self):
+        """Remove and return an item from the queue without blocking.
+
+        :raises RateLimitExceeded: If a token could not be consumed from the
+            token bucket (consuming from the queue too fast).
+        :raises Queue.Empty: If an item is not immediately available.
+
+        Also see :meth:`Queue.Queue.get_nowait`.
+
+        """
+        return self.get(block=False)
+
+    def qsize(self):
+        """Returns the size of the queue.
+
+        See :meth:`Queue.Queue.qsize`.
+
+        """
+        return self.queue.qsize()
+
+    def empty(self):
+        return self.queue.empty()
+
+    def wait(self, block=False):
+        """Wait until a token can be retrieved from the bucket and return
+        the next item."""
+        while True:
+            remaining = self.expected_time()
+            if not remaining:
+                return self.get(block=block)
+            time.sleep(remaining)
+
+    def can_consume(self, tokens=1):
+        """Consume tokens from the bucket. Returns True if there were
+        sufficient tokens otherwise False."""
+        if tokens <= self._get_tokens():
+            self._tokens -= tokens
+            return True
+        return False
+
+    def expected_time(self, tokens=1):
+        """Returns the expected time in seconds when a new token should be
+        available."""
+        tokens = max(tokens, self._get_tokens())
+        return (tokens - self._get_tokens()) / self.fill_rate
+
+    def _get_tokens(self):
+        if self._tokens < self.capacity:
+            now = time.time()
+            delta = self.fill_rate * (now - self.timestamp)
+            self._tokens = min(self.capacity, self._tokens + delta)
+            self.timestamp = now
+        return self._tokens

+ 30 - 72
celery/worker/controllers.py

@@ -3,13 +3,12 @@
 Worker Controller Threads
 
 """
-from celery.backends import default_periodic_status_backend
+import time
+import threading
 from Queue import Empty as QueueEmpty
 from datetime import datetime
+
 from celery.log import get_default_logger
-import traceback
-import threading
-import time
 
 
 class BackgroundThread(threading.Thread):
@@ -67,100 +66,59 @@ class BackgroundThread(threading.Thread):
 class Mediator(BackgroundThread):
     """Thread continuously sending tasks in the queue to the pool.
 
-    .. attribute:: bucket_queue
+    .. attribute:: ready_queue
 
         The task queue, a :class:`Queue.Queue` instance.
 
     .. attribute:: callback
 
         The callback used to process tasks retrieved from the
-        :attr:`bucket_queue`.
+        :attr:`ready_queue`.
 
     """
 
-    def __init__(self, bucket_queue, callback):
+    def __init__(self, ready_queue, callback):
         super(Mediator, self).__init__()
-        self.bucket_queue = bucket_queue
+        self.ready_queue = ready_queue
         self.callback = callback
 
     def on_iteration(self):
         """Get tasks from bucket queue and apply the task callback."""
         logger = get_default_logger()
         try:
-            logger.debug("Mediator: Trying to get message from bucket_queue")
             # This blocks until there's a message in the queue.
-            task = self.bucket_queue.get(timeout=1)
+            task = self.ready_queue.get(timeout=1)
         except QueueEmpty:
-            logger.debug("Mediator: Bucket queue is empty.")
+            time.sleep(1)
         else:
             logger.debug("Mediator: Running callback for task: %s[%s]" % (
                 task.task_name, task.task_id))
             self.callback(task)
 
 
-class PeriodicWorkController(BackgroundThread):
-    """A thread that continuously checks if there are
-    :class:`celery.task.PeriodicTask` tasks waiting for execution,
-    and executes them. It also finds tasks in the hold queue that is
-    ready for execution and moves them to the bucket queue.
+class ScheduleController(BackgroundThread):
+    """Schedules tasks with an ETA by moving them to the bucket queue."""
 
-    (Tasks in the hold queue are tasks waiting for retry, or with an
-    ``eta``/``countdown``.)
-
-    """
-
-    def __init__(self, bucket_queue, hold_queue):
-        super(PeriodicWorkController, self).__init__()
-        self.hold_queue = hold_queue
-        self.bucket_queue = bucket_queue
-
-    def on_start(self):
-        """Do backend-specific periodic task initialization."""
-        default_periodic_status_backend.init_periodic_tasks()
+    def __init__(self, eta_schedule):
+        super(ScheduleController, self).__init__()
+        self._scheduler = iter(eta_schedule)
+        self.iterations = 0
 
     def on_iteration(self):
-        """Run periodic tasks and process the hold queue."""
+        """Wake-up scheduler"""
         logger = get_default_logger()
-        logger.debug("PeriodicWorkController: Running periodic tasks...")
-        try:
-            self.run_periodic_tasks()
-        except Exception, exc:
-            logger.error(
-                "PeriodicWorkController got exception: %s\n%s" % (
-                    exc, traceback.format_exc()))
-        logger.debug("PeriodicWorkController: Processing hold queue...")
-        self.process_hold_queue()
-        logger.debug("PeriodicWorkController: Going to sleep...")
-        time.sleep(1)
-
-    def run_periodic_tasks(self):
-        logger = get_default_logger()
-        applied = default_periodic_status_backend.run_periodic_tasks()
-        for task, task_id in applied:
-            logger.debug(
-                "PeriodicWorkController: Periodic task %s applied (%s)" % (
-                    task.name, task_id))
-
-    def process_hold_queue(self):
-        """Finds paused tasks that are ready for execution and move
-        them to the :attr:`bucket_queue`."""
-        logger = get_default_logger()
-        try:
-            logger.debug(
-                "PeriodicWorkController: Getting next task from hold queue..")
-            task, eta, on_accept = self.hold_queue.get_nowait()
-        except QueueEmpty:
-            logger.debug("PeriodicWorkController: Hold queue is empty")
-            return
-
-        if datetime.now() >= eta:
-            logger.debug(
-                "PeriodicWorkController: Time to run %s[%s] (%s)..." % (
-                    task.task_name, task.task_id, eta))
-            on_accept() # Run the accept task callback.
-            self.bucket_queue.put(task)
-        else:
+        delay = self._scheduler.next()
+        debug_log = True
+        if delay is None:
+            delay = 1
+            if self.iterations == 10:
+                self.iterations = 0
+            else:
+                debug_log = False
+                self.iterations += 1
+        if debug_log:
+            logger.debug("ScheduleController: Scheduler wake-up")
             logger.debug(
-                "PeriodicWorkController: ETA not ready for %s[%s] (%s)..." % (
-                    task.task_name, task.task_id, eta))
-            self.hold_queue.put((task, eta, on_accept))
+                "ScheduleController: Next wake-up eta %s seconds..." % (
+                    delay))
+        time.sleep(delay)

+ 111 - 26
celery/worker/job.py

@@ -3,13 +3,20 @@
 Jobs Executable by the Worker Server.
 
 """
+import sys
+import socket
+import warnings
+
+from django.core.mail import mail_admins
+
+from celery.log import get_default_logger
+from celery.utils import noop, fun_takes_kwargs
+from celery.loaders import current_loader
+from celery.execute import TaskTrace
 from celery.registry import tasks
 from celery.exceptions import NotRegistered
-from celery.execute import ExecuteWrapper
-from celery.utils import noop, fun_takes_kwargs
-from celery.log import get_default_logger
-from django.core.mail import mail_admins
-import socket
+from celery.monitoring import TaskTimerStats
+from celery.datastructures import ExceptionInfo
 
 # pep8.py borks on a inline signature separator and
 # says "trailing whitespace" ;)
@@ -35,6 +42,91 @@ class AlreadyExecutedError(Exception):
     world-wide state."""
 
 
+class WorkerTaskTrace(TaskTrace):
+    """Wraps the task in a jail, catches all exceptions, and
+    saves the status and result of the task execution to the task
+    meta backend.
+
+    If the call was successful, it saves the result to the task result
+    backend, and sets the task status to ``"SUCCESS"``.
+
+    If the call raises :exc:`celery.exceptions.RetryTaskError`, it extracts
+    the original exception, uses that as the result and sets the task status
+    to ``"RETRY"``.
+
+    If the call results in an exception, it saves the exception as the task
+    result, and sets the task status to ``"FAILURE"``.
+
+    :param task_name: The name of the task to execute.
+    :param task_id: The unique id of the task.
+    :param args: List of positional args to pass on to the function.
+    :param kwargs: Keyword arguments mapping to pass on to the function.
+
+    :returns: the function return value on success, or
+        the exception instance on failure.
+
+    """
+
+    def __init__(self, *args, **kwargs):
+        self.loader = kwargs.pop("loader", current_loader)
+        super(WorkerTaskTrace, self).__init__(*args, **kwargs)
+
+    def execute_safe(self, *args, **kwargs):
+        try:
+            return self.execute(*args, **kwargs)
+        except Exception, exc:
+            type_, value_, tb = sys.exc_info()
+            exc = self.task.backend.prepare_exception(exc)
+            warnings.warn("Exception happend outside of task body: %s: %s" % (
+                str(exc.__class__), str(exc)))
+            return ExceptionInfo((type_, exc, tb))
+
+    def execute(self):
+        # Run task loader init handler.
+        self.loader.on_task_init(self.task_id, self.task)
+
+        # Backend process cleanup
+        self.task.backend.process_cleanup()
+
+        timer_stat = TaskTimerStats.start(self.task_id, self.task_name,
+                                          self.args, self.kwargs)
+        try:
+            return self._trace()
+        finally:
+            timer_stat.stop()
+
+    def handle_success(self, retval, *args):
+        """Handle successful execution.
+
+        Saves the result to the current result store (skipped if the task's
+            ``ignore_result`` attribute is set to ``True``).
+
+        """
+        if not self.task.ignore_result:
+            self.task.backend.mark_as_done(self.task_id, retval)
+        return super(WorkerTaskTrace, self).handle_success(retval, *args)
+
+    def handle_retry(self, exc, type_, tb, strtb):
+        """Handle retry exception."""
+        message, orig_exc = exc.args
+        self.task.backend.mark_as_retry(self.task_id, orig_exc, strtb)
+        return super(WorkerTaskTrace, self).handle_retry(exc, type_,
+                                                         tb, strtb)
+
+    def handle_failure(self, exc, type_, tb, strtb):
+        """Handle exception."""
+        # mark_as_failure returns an exception that is guaranteed to
+        # be pickleable.
+        stored_exc = self.task.backend.mark_as_failure(self.task_id,
+                                                       exc, strtb)
+        return super(WorkerTaskTrace, self).handle_failure(
+                stored_exc, type_, tb, strtb)
+
+
+def execute_and_trace(*args, **kwargs):
+    return WorkerTaskTrace(*args, **kwargs).execute_safe()
+
+
 class TaskWrapper(object):
     """Class wrapping a task to be passed around and finally
     executed inside of the worker.
@@ -43,8 +135,6 @@ class TaskWrapper(object):
 
     :param task_id: see :attr:`task_id`.
 
-    :param task_func: see :attr:`task_func`
-
     :param args: see :attr:`args`
 
     :param kwargs: see :attr:`kwargs`.
@@ -57,10 +147,6 @@ class TaskWrapper(object):
 
         UUID of the task.
 
-    .. attribute:: task_func
-
-        The tasks callable object.
-
     .. attribute:: args
 
         List of positional arguments to apply to the task.
@@ -88,11 +174,10 @@ class TaskWrapper(object):
     """
     fail_email_body = TASK_FAIL_EMAIL_BODY
 
-    def __init__(self, task_name, task_id, task_func, args, kwargs,
+    def __init__(self, task_name, task_id, args, kwargs,
             on_ack=noop, retries=0, **opts):
         self.task_name = task_name
         self.task_id = task_id
-        self.task_func = task_func
         self.retries = retries
         self.args = args
         self.kwargs = kwargs
@@ -104,6 +189,9 @@ class TaskWrapper(object):
             setattr(self, opt, opts.get(opt, getattr(self, opt, None)))
         if not self.logger:
             self.logger = get_default_logger()
+        if self.task_name not in tasks:
+            raise NotRegistered(self.task_name)
+        self.task = tasks[self.task_name]
 
     def __repr__(self):
         return '<%s: {name:"%s", id:"%s", args:"%s", kwargs:"%s"}>' % (
@@ -132,10 +220,7 @@ class TaskWrapper(object):
         kwargs = dict((key.encode("utf-8"), value)
                         for key, value in kwargs.items())
 
-        if task_name not in tasks:
-            raise NotRegistered(task_name)
-        task_func = tasks[task_name]
-        return cls(task_name, task_id, task_func, args, kwargs,
+        return cls(task_name, task_id, args, kwargs,
                     retries=retries, on_ack=message.ack, logger=logger)
 
     def extend_with_default_kwargs(self, loglevel, logfile):
@@ -153,18 +238,17 @@ class TaskWrapper(object):
                             "task_id": self.task_id,
                             "task_name": self.task_name,
                             "task_retries": self.retries}
-        fun = getattr(self.task_func, "run", self.task_func)
+        fun = self.task.run
         supported_keys = fun_takes_kwargs(fun, default_kwargs)
         extend_with = dict((key, val) for key, val in default_kwargs.items()
                                 if key in supported_keys)
         kwargs.update(extend_with)
         return kwargs
 
-    def _executeable(self, loglevel=None, logfile=None):
-        """Get the :class:`celery.execute.ExecuteWrapper` for this task."""
+    def _get_tracer_args(self, loglevel=None, logfile=None):
+        """Get the :class:`WorkerTaskTrace` tracer for this task."""
         task_func_kwargs = self.extend_with_default_kwargs(loglevel, logfile)
-        return ExecuteWrapper(self.task_func, self.task_id, self.task_name,
-                              self.args, task_func_kwargs)
+        return self.task_name, self.task_id, self.args, task_func_kwargs
 
     def _set_executed_bit(self):
         """Set task as executed to make sure it's not executed again."""
@@ -175,7 +259,7 @@ class TaskWrapper(object):
         self.executed = True
 
     def execute(self, loglevel=None, logfile=None):
-        """Execute the task in a :class:`celery.execute.ExecuteWrapper`.
+        """Execute the task in a :class:`WorkerTaskTrace`.
 
         :keyword loglevel: The loglevel used by the task.
 
@@ -188,7 +272,8 @@ class TaskWrapper(object):
         # acknowledge task as being processed.
         self.on_ack()
 
-        return self._executeable(loglevel, logfile)()
+        tracer = WorkerTaskTrace(*self._get_tracer_args(loglevel, logfile))
+        return tracer.execute()
 
     def execute_using_pool(self, pool, loglevel=None, logfile=None):
         """Like :meth:`execute`, but using the :mod:`multiprocessing` pool.
@@ -205,8 +290,8 @@ class TaskWrapper(object):
         # Make sure task has not already been executed.
         self._set_executed_bit()
 
-        wrapper = self._executeable(loglevel, logfile)
-        return pool.apply_async(wrapper,
+        args = self._get_tracer_args(loglevel, logfile)
+        return pool.apply_async(execute_and_trace, args=args,
                 callbacks=[self.on_success], errbacks=[self.on_failure],
                 on_ack=self.on_ack)
 

+ 93 - 0
celery/worker/pool.py

@@ -0,0 +1,93 @@
+"""
+
+Process Pools.
+
+"""
+from multiprocessing.util import get_logger
+
+from billiard.pool import DynamicPool
+from billiard.utils.functional import curry
+
+from celery.utils import noop
+from celery.datastructures import ExceptionInfo
+
+
+class TaskPool(object):
+    """Process Pool for processing tasks in parallel.
+
+    :param limit: see :attr:`limit` attribute.
+    :param logger: see :attr:`logger` attribute.
+
+
+    .. attribute:: limit
+
+        The number of processes that can run simultaneously.
+
+    .. attribute:: logger
+
+        The logger used for debugging.
+
+    """
+
+    def __init__(self, limit, logger=None):
+        self.limit = limit
+        self.logger = logger or get_logger()
+        self._pool = None
+
+    def start(self):
+        """Run the task pool.
+
+        Will pre-fork all workers so they're ready to accept tasks.
+
+        """
+        self._pool = DynamicPool(processes=self.limit)
+
+    def stop(self):
+        """Terminate the pool."""
+        self._pool.terminate()
+        self._pool = None
+
+    def replace_dead_workers(self):
+        self.logger.debug("TaskPool: Finding dead pool processes...")
+        dead_count = self._pool.replace_dead_workers()
+        if dead_count:
+            self.logger.info(
+                "TaskPool: Replaced %d dead pool workers..." % (
+                    dead_count))
+
+    def apply_async(self, target, args=None, kwargs=None, callbacks=None,
+            errbacks=None, on_ack=noop):
+        """Equivalent of the :func:``apply`` built-in function.
+
+        All ``callbacks`` and ``errbacks`` should complete immediately since
+        otherwise the thread which handles the result will get blocked.
+
+        """
+        args = args or []
+        kwargs = kwargs or {}
+        callbacks = callbacks or []
+        errbacks = errbacks or []
+
+        on_ready = curry(self.on_ready, callbacks, errbacks, on_ack)
+
+        self.logger.debug("TaskPool: Apply %s (args:%s kwargs:%s)" % (
+            target, args, kwargs))
+
+        self.replace_dead_workers()
+
+        return self._pool.apply_async(target, args, kwargs,
+                                        callback=on_ready)
+
+    def on_ready(self, callbacks, errbacks, on_ack, ret_value):
+        """What to do when a worker task is ready and its return value has
+        been collected."""
+        # Acknowledge the task as being processed.
+        on_ack()
+
+        if isinstance(ret_value, ExceptionInfo):
+            if isinstance(ret_value.exception, (
+                    SystemExit, KeyboardInterrupt)):
+                raise ret_value.exception
+            [errback(ret_value) for errback in errbacks]
+        else:
+            [callback(ret_value) for callback in callbacks]

+ 50 - 0
celery/worker/scheduler.py

@@ -0,0 +1,50 @@
+import time
+import heapq
+
+
+class Scheduler(object):
+
+    def __init__(self, ready_queue):
+        self.ready_queue = ready_queue
+        self._queue = []
+
+    def enter(self, item, eta=None, priority=0, callback=None):
+        eta = time.mktime(eta.timetuple()) if eta else time.time()
+        heapq.heappush(self._queue, (eta, priority, item, callback))
+
+    def __iter__(self):
+        """The iterator yields the time to sleep for between runs."""
+
+        # localize variable access
+        q = self._queue
+        nowfun = time.time
+        pop = heapq.heappop
+        ready_queue = self.ready_queue
+
+        while True:
+            if q:
+                eta, priority, item, callback = verify = q[0]
+                now = nowfun()
+
+                if now < eta:
+                    yield eta - now
+                else:
+                    event = pop(q)
+                    print("eta->%s priority->%s item->%s" % (
+                        eta, priority, item))
+
+                    if event is verify:
+                        ready_queue.put(item)
+                        callback and callback()
+                        yield 0
+                    else:
+                        heapq.heappush(q, event)
+            yield None
+
+    def empty(self):
+        return not self._queue
+
+    @property
+    def queue(self):
+        events = list(self._queue)
+        return map(heapq.heappop, [events]*len(events))

+ 158 - 0
contrib/debian/init.d/celerybeat

@@ -0,0 +1,158 @@
+#!/bin/sh
+
+### BEGIN INIT INFO
+# Provides:		celerybeat
+# Required-Start:	
+# Required-Stop:	
+# Default-Start:	2 3 4 5
+# Default-Stop:		1
+# Short-Description:	celery periodic task scheduler
+### END INIT INFO
+
+# To use this with Django set your DJANGO_PROJECT_DIR in /etc/default/celeryd:
+#
+#   echo "DJANGO_PROJECT_DIR=/opt/Myapp" > /etc/default/celeryd
+#
+# The django project dir is the directory that contains settings and
+# manage.py.
+
+# celerybeat uses the celeryd default file by default, but you can also
+# add a /etc/default/celerybeat file to set different settings for celerybeat.
+
+set -e
+
+DJANGO_SETTINGS_MODULE=settings
+CELERYBEAT_PID_FILE="/var/run/celerybeat.pid"
+CELERYBEAT_LOG_FILE="/var/log/celerybeat.log"
+CELERYBEAT_LOG_LEVEL="INFO"
+DEFAULT_CELERYBEAT="celerybeat"
+
+# /etc/init.d/ssh: start and stop the celery task worker daemon.
+
+if test -f /etc/default/celeryd; then
+    . /etc/default/celeryd
+fi
+if test -f /etc/default/celerybeat; then
+    . /etc/default/celerybeat
+fi
+
+export DJANGO_SETTINGS_MODULE
+export DJANGO_PROJECT_DIR
+
+if [ -z "$CELERYBEAT" ]; then
+    if [ ! -z "$DJANGO_PROJECT_DIR" ]; then
+        CELERYBEAT="$DJANGO_PROJECT_DIR/manage.py"
+        CELERYBEAT_OPTS="celerybeat"
+    else
+        CELERYBEAT=$DEFAULT_CELERYBEAT
+    fi
+fi
+
+. /lib/lsb/init-functions
+
+cd $DJANGO_PROJECT_DIR
+
+CELERYBEAT_OPTS="$CELERYBEAT_OPTS -f $CELERYBEAT_LOG_FILE -l $CELERYBEAT_LOG_LEVEL -p \
+                    $CELERYBEAT_PID_FILE -d"
+
+if [ -n "$2" ]; then
+    CELERYBEAT_OPTS="$CELERYBEAT_OPTS $2"
+fi
+
+# Are we running from init?
+run_by_init() {
+    ([ "$previous" ] && [ "$runlevel" ]) || [ "$runlevel" = S ]
+}
+
+
+check_dev_null() {
+    if [ ! -c /dev/null ]; then
+	if [ "$1" = log_end_msg ]; then
+	    log_end_msg 1 || true
+	fi
+	if ! run_by_init; then
+	    log_action_msg "/dev/null is not a character device!"
+	fi
+	exit 1
+    fi
+}
+
+
+export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
+if [ ! -z "$VIRTUALENV" ]; then
+    export PATH="$VIRTUALENV/bin:$PATH"
+    CELERYBEAT="$VIRTUALENV/bin/$CELERYBEAT"
+fi
+
+
+case "$1" in
+  start)
+    check_dev_null
+    log_daemon_msg "Starting celery periodic task scheduler" "celerybeat"
+    if start-stop-daemon --start --quiet --oknodo --pidfile $CELERYBEAT_PID_FILE --exec $CELERYBEAT -- $CELERYBEAT_OPTS; then
+        log_end_msg 0
+    else
+        log_end_msg 1
+    fi
+    ;;
+  stop)
+    log_daemon_msg "Stopping celery periodic task scheduler" "celerybeat"
+    if start-stop-daemon --stop --quiet --oknodo --pidfile $CELERYBEAT_PID_FILE; then log_end_msg 0 else
+        log_end_msg 1
+    fi
+    ;;
+
+  reload|force-reload)
+    echo "Use start+stop"
+    ;;
+
+  restart)
+    log_daemon_msg "Restarting celery periodic task scheduler" "celerybeat"
+    start-stop-daemon --stop --quiet --oknodo --retry 30 --pidfile $CELERYBEAT_PID_FILE
+    check_dev_null log_end_msg
+    if start-stop-daemon --start --quiet --oknodo --pidfile $CELERYBEAT_PID_FILE --exec $CELERYBEAT -- $CELERYBEAT_OPTS; then
+        log_end_msg 0
+    else
+        log_end_msg 1
+    fi
+    ;;
+
+  try-restart)
+    log_daemon_msg "Restarting celery periodic task scheduler" "celerybeat"
+    set +e
+    start-stop-daemon --stop --quiet --retry 30 --pidfile $CELERYBEAT_PID_FILE
+    RET="$?"
+    set -e
+    case $RET in
+        0)
+		# old daemon stopped
+		check_dev_null log_end_msg
+		if start-stop-daemon --start --quiet --oknodo --pidfile $CELERYBEAT_PID_FILE --exec $CELERYBEAT -- $CELERYBEAT_OPTS; then
+		    log_end_msg 0
+		else
+		    log_end_msg 1
+		fi
+		;;
+	    1)
+		# daemon not running
+		log_progress_msg "(not running)"
+		log_end_msg 0
+		;;
+	    *)
+		# failed to stop
+		log_progress_msg "(failed to stop)"
+		log_end_msg 1
+		;;
+	esac
+	;;
+
+  status)
+	status_of_proc -p $CELERYBEAT_PID_FILE $CELERYBEAT celerybeat && exit 0 || exit $?
+	;;
+
+  *)
+	log_action_msg "Usage: /etc/init.d/celerybeat {start|stop|force-reload|restart|try-restart|status}"
+	exit 1
+esac
+
+exit 0

+ 51 - 39
contrib/debian/init.d/celeryd

@@ -1,4 +1,4 @@
-#! /bin/sh
+#!/bin/sh
 
 ### BEGIN INIT INFO
 # Provides:		celeryd
@@ -9,18 +9,20 @@
 # Short-Description:	celery task worker daemon
 ### END INIT INFO
 
+# To use this with Django set your DJANGO_PROJECT_DIR in /etc/default/celeryd:
+#
+#   echo "DJANGO_PROJECT_DIR=/opt/Myapp" > /etc/default/celeryd
+#
+# The django project dir is the directory that contains settings and
+# manage.py.
+
 set -e
 
-VIRTUALENV=/opt/Opal/current
-DJANGO_PROJECT_DIR=/opt/Opal/release/opal
 DJANGO_SETTINGS_MODULE=settings
 CELERYD_PID_FILE="/var/run/celeryd.pid"
 CELERYD_LOG_FILE="/var/log/celeryd.log"
 CELERYD_LOG_LEVEL="INFO"
-CELERYD="celeryd"
-
-export DJANGO_SETTINGS_MODULE
-export DJANGO_PROJECT_DIR
+DEFAULT_CELERYD="celeryd"
 
 # /etc/init.d/ssh: start and stop the celery task worker daemon.
 
@@ -28,6 +30,17 @@ if test -f /etc/default/celeryd; then
     . /etc/default/celeryd
 fi
 
+export DJANGO_SETTINGS_MODULE
+export DJANGO_PROJECT_DIR
+
+if [ -z "$CELERYD" ]; then
+    if [ ! -z "$DJANGO_PROJECT_DIR" ]; then
+        CELERYD="$DJANGO_PROJECT_DIR/manage.py"
+        CELERYD_OPTS="celeryd"
+    else
+        CELERYD=$DEFAULT_CELERYD
+    fi
+fi
 
 . /lib/lsb/init-functions
 
@@ -61,7 +74,6 @@ check_dev_null() {
 
 export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
 if [ ! -z "$VIRTUALENV" ]; then
-    . "$VIRTUALENV/bin/activate"
     export PATH="$VIRTUALENV/bin:$PATH"
     CELERYD="$VIRTUALENV/bin/$CELERYD"
 fi
@@ -69,45 +81,45 @@ fi
 
 case "$1" in
   start)
-	check_dev_null
-	log_daemon_msg "Starting celery task worker server" "celeryd"
-	if start-stop-daemon --start --quiet --oknodo --pidfile $CELERYD_PID_FILE --exec $CELERYD -- $CELERYD_OPTS; then
-	    log_end_msg 0
-	else
-	    log_end_msg 1
-	fi
-	;;
+    check_dev_null
+    log_daemon_msg "Starting celery task worker server" "celeryd"
+    if start-stop-daemon --start --quiet --oknodo --pidfile $CELERYD_PID_FILE --exec $CELERYD -- $CELERYD_OPTS; then
+        log_end_msg 0
+    else
+        log_end_msg 1
+    fi
+    ;;
   stop)
-	log_daemon_msg "Stopping celery task worker server" "celeryd"
-	if start-stop-daemon --stop --quiet --oknodo --pidfile $CELERYD_PID_FILE; then log_end_msg 0
-	else
-	    log_end_msg 1
-	fi
-	;;
+    log_daemon_msg "Stopping celery task worker server" "celeryd"
+    if start-stop-daemon --stop --quiet --oknodo --pidfile $CELERYD_PID_FILE; then log_end_msg 0
+    else
+        log_end_msg 1
+    fi
+    ;;
 
   reload|force-reload)
     echo "Use start+stop"
-	;;
+    ;;
 
   restart)
-	log_daemon_msg "Restarting celery task worker server" "celeryd"
-	start-stop-daemon --stop --quiet --oknodo --retry 30 --pidfile $CELERYD_PID_FILE
-	check_dev_null log_end_msg
-	if start-stop-daemon --start --quiet --oknodo --pidfile $CELERYD_PID_FILE --exec $CELERYD -- $CELERYD_OPTS; then
-	    log_end_msg 0
-	else
-	    log_end_msg 1
-	fi
-	;;
+    log_daemon_msg "Restarting celery task worker server" "celeryd"
+    start-stop-daemon --stop --quiet --oknodo --retry 30 --pidfile $CELERYD_PID_FILE
+    check_dev_null log_end_msg
+    if start-stop-daemon --start --quiet --oknodo --pidfile $CELERYD_PID_FILE --exec $CELERYD -- $CELERYD_OPTS; then
+        log_end_msg 0
+    else
+        log_end_msg 1
+    fi
+    ;;
 
   try-restart)
-	log_daemon_msg "Restarting celery task worker server" "celeryd"
-	set +e
-	start-stop-daemon --stop --quiet --retry 30 --pidfile $CELERYD_PID_FILE
-	RET="$?"
-	set -e
-	case $RET in
-	    0)
+    log_daemon_msg "Restarting celery task worker server" "celeryd"
+    set +e
+    start-stop-daemon --stop --quiet --retry 30 --pidfile $CELERYD_PID_FILE
+    RET="$?"
+    set -e
+    case $RET in
+        0)
 		# old daemon stopped
 		check_dev_null log_end_msg
 		if start-stop-daemon --start --quiet --oknodo --pidfile $CELERYD_PID_FILE --exec $CELERYD -- $CELERYD_OPTS; then

+ 6 - 4
contrib/doc4allmods

@@ -2,7 +2,7 @@
 
 PACKAGE="$1"
 SKIP_PACKAGES="$PACKAGE tests management urls"
-SKIP_FILES="celery.bin.rst celery.serialization.rst"
+SKIP_FILES="celery.bin.rst celery.contrib.rst"
 
 modules=$(find "$PACKAGE" -name "*.py")
 
@@ -19,11 +19,13 @@ for module in $modules; do
     for skip_file in $SKIP_FILES; do
         [ "$skip_file" == "$rst" ] && skip=1
     done
-    
+
     if [ $skip -eq 0 ]; then
         if [ ! -f "docs/reference/$rst" ]; then
-            echo $rst :: FAIL
-            failed=1
+            if [ ! -f "docs/internals/reference/$rst" ]; then
+                echo $rst :: FAIL
+                failed=1
+            fi
         fi
     fi
 done

+ 10 - 0
contrib/mac/watch-workers.applescript

@@ -1,5 +1,6 @@
 set broker to "h8.opera.com"
 set workers to {"h6.opera.com", "h8.opera.com", "h9.opera.com", "h10.opera.com"}
+set clock to "h6.opera.com"
 tell application "iTerm"
     activate
     set myterm to (make new terminal)
@@ -17,6 +18,15 @@ tell application "iTerm"
                 write text "ssh root@" & workerhost & " 'tail -f /var/log/celeryd.log'"
             end tell
         end repeat
+        set celerybeat to (make new session at the end of sessions)
+        tell celerybeat
+            set name to "celerybeat.log"
+            set foreground color to "white"
+            set background color to "black"
+            set transparency to 0.1
+            exec command "/bin/sh -i"
+            write text "ssh root@" & clock & " 'tail -f /var/log/celerybeat.log'"
+        end tell
         set rabbit to (make new session at the end of sessions)
         tell rabbit
             set name to "rabbit.log"

+ 49 - 0
contrib/sphinx-to-rst.py

@@ -0,0 +1,49 @@
+#!/usr/bin/even/python
+from __future__ import with_statement
+import re
+import sys
+
+RE_CODE_BLOCK = re.compile(r'.. code-block:: (.+?)\s*$')
+RE_REFERENCE = re.compile(r':(.+?):`(.+?)`')
+
+
+def replace_code_block(lines, pos):
+    lines[pos] = ""
+    curpos = pos - 1
+    # Find the first previous line with text to append "::" to it.
+    while True:
+        prev_line = lines[curpos]
+        if not prev_line.isspace():
+            prev_line_with_text = curpos
+            break
+        curpos -= 1
+
+    if lines[prev_line_with_text].endswith(":"):
+        lines[prev_line_with_text] += ":"
+    else:
+        lines[prev_line_with_text] += "::"
+
+TO_RST_MAP = {RE_CODE_BLOCK: replace_code_block,
+              RE_REFERENCE: r'``\2``'}
+
+
+def _process(lines):
+    lines = list(lines) # non-destructive
+    for i, line in enumerate(lines):
+        for regex, alt in TO_RST_MAP.items():
+            if callable(alt):
+                if regex.match(line):
+                    alt(lines, i)
+                    line = lines[i]
+            else:
+                lines[i] = regex.sub(alt, line)
+    return lines
+
+
+def sphinx_to_rst(fh):
+    return "".join(_process(fh))
+
+
+if __name__ == "__main__":
+    with open(sys.argv[1]) as fh:
+        print(sphinx_to_rst(fh))

+ 0 - 11
contrib/test_runner.py

@@ -1,11 +0,0 @@
-from django.conf import settings
-from django.test.simple import run_tests as run_tests_orig
-
-USAGE = """\
-Custom test runner to allow testing of celery .delay() tasks.
-"""
-
-def run_tests(test_labels, *args, **kwargs):
-    settings.CELERY_ALWAYS_EAGER = True
-    return run_tests_orig(test_labels, *args, **kwargs)
-

+ 4 - 4
contrib/testconn.py

@@ -27,10 +27,10 @@ class MyMessager(Messaging):
 
 def _create_conn():
     from django.conf import settings
-    conn = amqp.Connection(host=settings.AMQP_SERVER,
-                           userid=settings.AMQP_USER,
-                           password=settings.AMQP_PASSWORD,
-                           virtual_host=settings.AMQP_VHOST,
+    conn = amqp.Connection(host=settings.BROKER_SERVER,
+                           userid=settings.BROKER_USER,
+                           password=settings.BROKER_PASSWORD,
+                           virtual_host=settings.BROKER_VHOST,
                            insist=False)
     return conn
 

+ 1 - 1
contrib/testdynpool.py

@@ -1,4 +1,4 @@
-from celery.pool import DynamicPool
+from billiard.pool import DynamicPool
 from multiprocessing import get_logger, log_to_stderr
 import logging
 

+ 14 - 0
contrib/verify-reference-index.sh

@@ -0,0 +1,14 @@
+#!/bin/bash
+modules=$(grep "celery." docs/reference/index.rst | \
+            perl -ple's/^\s*|\s*$//g;s{\.}{/}g;')
+retval=0
+for module in $modules; do
+    if [ ! -f "$module.py" ]; then
+        if [ ! -f "$module/__init__.py" ]; then
+            echo "Outdated reference: $module"
+            retval=1
+        fi
+    fi
+done
+
+exit $retval

+ 35 - 0
docs/_theme/ADCThemePrint/README.rst

@@ -0,0 +1,35 @@
+==============
+How To Install
+==============
+
+Install in Sphinx
+-----------------
+
+Copy this directory into the ``sphinx/templates`` directory where Shpinx is installed. For example, a standard install of sphinx on Mac OS X is at ``/Library/Python/2.6/site-packages/Sphinx-0.6.3-py2.6.egg/``
+
+Install Somewhere Else
+----------------------
+
+If you want to install this theme somewhere else, you will have to modify the ``conf.py`` file. ::
+
+    templates_path = ['/absolute/path/to/dir/','relative/path/']
+
+Making Sphinx Use the Theme
+---------------------------
+
+Edit the ``conf.py`` file and make the following setting: ::
+
+    html_theme = 'ADCtheme'
+
+Screen Shots
+------------
+
+.. image:: http://github.com/coordt/ADCtheme/raw/master/static/scrn1.png
+
+.. image:: http://github.com/coordt/ADCtheme/raw/master/static/scrn2.png
+
+To Do
+-----
+
+ * Gotta get the javascript working so the Table of Contents is hide-able.
+ * Probably lots of css cleanup.

+ 47 - 0
docs/_theme/ADCThemePrint/layout.html

@@ -0,0 +1,47 @@
+{% extends "basic/layout.html" %}
+{%- block doctype -%}
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+{%- endblock %}
+{%- set reldelim1 = reldelim1 is not defined and ' &raquo;' or reldelim1 %}
+{%- set reldelim2 = reldelim2 is not defined and ' |' or reldelim2 %}
+{%- block linktags %}
+{%- endblock %}
+{%- block extrahead %} {% endblock %}
+{%- block header %}{% endblock %}
+{%- block relbar1 %}
+{% endblock %}
+
+{%- block sidebar1 %}
+{% endblock %}
+{%- block document %}
+      <div class="documentwrapper">
+      {%- if not embedded %}{% if not theme_nosidebar|tobool %}
+        <div class="bodywrapper">
+      {%- endif %}{% endif %}
+          <div class="body">
+            {% block body %} {% endblock %}
+          </div>
+      {%- if not embedded %}{% if not theme_nosidebar|tobool %} 
+        </div>
+      {%- endif %}{% endif %}
+      </div>
+    <div class="footer">
+    <p>{%- if hasdoc('copyright') %}
+      {% trans path=pathto('copyright'), copyright=copyright|e %}&copy; <a href="{{ path }}">Copyright</a> {{ copyright }}.{% endtrans %}
+    {%- else %}
+      {% trans copyright=copyright|e %}&copy; Copyright {{ copyright }}.{% endtrans %}
+    {%- endif %}
+    {%- if last_updated %}
+      {% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
+    {%- endif %}
+    {%- if show_sphinx %}
+      {% trans sphinx_version=sphinx_version|e %}Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> {{ sphinx_version }}.{% endtrans %}
+    {%- endif %}
+	</p>
+    </div>
+{%- endblock %}
+{%- block sidebar2 %}{% endblock %}
+{%- block relbar2 %}{% endblock %}
+{%- block footer %}
+{%- endblock %}

+ 745 - 0
docs/_theme/ADCThemePrint/static/adctheme.css

@@ -0,0 +1,745 @@
+/**
+ * Sphinx stylesheet -- basic theme
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+/* -- main layout ----------------------------------------------------------- */
+
+div.clearer {
+    clear: both;
+}
+
+/* -- header ---------------------------------------------------------------- */
+
+#header #title {
+    background:#29334F url(title_background.png) repeat-x scroll 0 0;
+    /*border-bottom:1px solid #B6B6B6;
+    height:25px;*/
+    overflow:hidden;
+}
+#headerButtons {
+    position: absolute;
+    list-style: none outside;
+    top: 26px;
+    left: 0px;
+    right: 0px;
+    margin: 0px;
+    padding: 0px;
+    border-top: 1px solid #2B334F;
+    border-bottom: 1px solid #EDEDED;
+    height: 20px;
+    font-size: 8pt;
+    overflow: hidden;
+    background-color: #D8D8D8;
+}
+
+#headerButtons li {
+    background-repeat:no-repeat;
+    display:inline;
+    margin-top:0;
+    padding:0;
+}
+
+.headerButton {
+    display: inline;
+    height:20px;
+}
+
+.headerButton a {
+    text-decoration: none;
+    float: right;
+    height: 20px;
+    padding: 4px 15px;
+    border-left: 1px solid #ACACAC;
+    font-family:'Lucida Grande',Geneva,Helvetica,Arial,sans-serif;
+    color: black;
+}
+.headerButton a:hover {
+    color: white;
+    background-color: #787878;
+    
+}
+
+li#toc_button {
+    text-align:left;
+}
+
+li#toc_button .headerButton a {
+    width:198px;
+    padding-top: 4px;
+    font-family:'Lucida Grande',Geneva,Helvetica,Arial,sans-serif;
+    color: black;
+    float: left;
+    padding-left:15px;
+    border-right:1px solid #ACACAC;
+    background:transparent url(triangle_closed.png) no-repeat scroll 4px 6px;
+}
+
+
+
+li#page_buttons {
+position:absolute;
+right:0;
+}
+
+#breadcrumbs {
+    color: black;
+    background-image:url(breadcrumb_background.png);
+    border-top:1px solid #2B334F;
+    bottom:0;
+    font-size:10px;
+    height:15px;
+    left:0;
+    overflow:hidden;
+    padding:3px 10px 0;
+    position:absolute;
+    right:0;
+    white-space:nowrap;
+    z-index:901;
+}
+#breadcrumbs a {
+    color: black;
+    text-decoration: none;
+}
+#breadcrumbs a:hover {
+    text-decoration: underline;
+}
+
+/* -- sidebar --------------------------------------------------------------- */
+#sphinxsidebar {
+    position: absolute;
+    top: 84px;
+    bottom: 19px;
+    left: 0px;
+    width: 229px; 
+    background-color: #E4EBF7;
+    border-right: 1px solid #ACACAC;
+    border-top: 1px solid #2B334F;
+    overflow-x: hidden;
+    overflow-y: auto;
+    padding: 0px 0px 0px 0px;
+    font-size:11px;
+}
+
+div.sphinxsidebarwrapper {
+    padding: 10px 5px 0 10px;
+}
+
+#sphinxsidebar li {
+    margin: 0px;
+    padding: 0px;
+    font-weight: normal;
+    margin: 0px 0px 7px 0px;
+    overflow: hidden;
+    text-overflow: ellipsis;
+    font-size: 11px;
+}
+
+#sphinxsidebar ul {
+    list-style: none;
+    margin: 0px 0px 0px 0px;
+    padding: 0px 5px 0px 5px;
+}
+
+#sphinxsidebar ul ul,
+#sphinxsidebar ul.want-points {
+    list-style: square;
+}
+
+#sphinxsidebar ul ul {
+    margin-top: 0;
+    margin-bottom: 0;
+}
+
+#sphinxsidebar form {
+    margin-top: 10px;
+}
+
+#sphinxsidebar input {
+    border: 1px solid #787878;
+    font-family: sans-serif;
+    font-size: 1em;
+}
+
+img {
+    border: 0;
+}
+
+#sphinxsidebar li.toctree-l1 a {
+    font-weight: bold;
+    color: #000;
+    text-decoration: none;
+}
+
+#sphinxsidebar li.toctree-l2 a {
+    font-weight: bold;
+    color: #4f4f4f;
+    text-decoration: none;
+}
+
+/* -- search page ----------------------------------------------------------- */
+
+ul.search {
+    margin: 10px 0 0 20px;
+    padding: 0;
+}
+
+ul.search li {
+    padding: 5px 0 5px 20px;
+    background-image: url(file.png);
+    background-repeat: no-repeat;
+    background-position: 0 7px;
+}
+
+ul.search li a {
+    font-weight: bold;
+}
+
+ul.search li div.context {
+    color: #888;
+    margin: 2px 0 0 30px;
+    text-align: left;
+}
+
+ul.keywordmatches li.goodmatch a {
+    font-weight: bold;
+}
+#sphinxsidebar input.prettysearch {border:none;}
+input.searchbutton {
+    float: right;
+}
+.search-wrapper {width: 100%; height: 25px;}
+.search-wrapper input.prettysearch { border: none; width:200px; height: 16px; background: url(searchfield_repeat.png) center top repeat-x; border: 0px; margin: 0; padding: 3px 0 0 0; font: 11px "Lucida Grande", "Lucida Sans Unicode", Arial, sans-serif; }
+.search-wrapper input.prettysearch { width: 184px; margin-left: 20px; *margin-top:-1px; *margin-right:-2px; *margin-left:10px; }
+.search-wrapper .search-left { display: block; position: absolute; width: 20px; height: 19px; background: url(searchfield_leftcap.png) left top no-repeat; }
+.search-wrapper .search-right { display: block; position: relative; left: 204px; top: -19px; width: 10px; height: 19px; background: url(searchfield_rightcap.png) right top no-repeat; }
+
+/* -- index page ------------------------------------------------------------ */
+
+table.contentstable {
+    width: 90%;
+}
+
+table.contentstable p.biglink {
+    line-height: 150%;
+}
+
+a.biglink {
+    font-size: 1.3em;
+}
+
+span.linkdescr {
+    font-style: italic;
+    padding-top: 5px;
+    font-size: 90%;
+}
+
+/* -- general index --------------------------------------------------------- */
+
+table.indextable td {
+    text-align: left;
+    vertical-align: top;
+}
+
+table.indextable dl, table.indextable dd {
+    margin-top: 0;
+    margin-bottom: 0;
+}
+
+table.indextable tr.pcap {
+    height: 10px;
+}
+
+table.indextable tr.cap {
+    margin-top: 10px;
+    background-color: #f2f2f2;
+}
+
+img.toggler {
+    margin-right: 3px;
+    margin-top: 3px;
+    cursor: pointer;
+}
+
+/* -- general body styles --------------------------------------------------- */
+.document {
+    /*border-top:1px solid #2B334F;*/
+    overflow:auto;
+    /*padding-left:2em;
+    padding-right:2em;
+    position:absolute;
+    z-index:1;
+    top:84px;
+    bottom:19px;
+    right:0;
+    left:230px;*/
+    padding-left: 2em;
+    padding-right: 2em;
+}
+
+a.headerlink {
+    visibility: hidden;
+}
+
+h1:hover > a.headerlink,
+h2:hover > a.headerlink,
+h3:hover > a.headerlink,
+h4:hover > a.headerlink,
+h5:hover > a.headerlink,
+h6:hover > a.headerlink,
+dt:hover > a.headerlink {
+    visibility: visible;
+}
+
+div.body p.caption {
+    text-align: inherit;
+}
+
+div.body td {
+    text-align: left;
+}
+
+.field-list ul {
+    padding-left: 1em;
+}
+
+.first {
+    margin-top: 0 !important;
+}
+
+p.rubric {
+    margin-top: 30px;
+    font-weight: bold;
+}
+
+/* -- sidebars -------------------------------------------------------------- */
+
+/*div.sidebar {
+    margin: 0 0 0.5em 1em;
+    border: 1px solid #ddb;
+    padding: 7px 7px 0 7px;
+    background-color: #ffe;
+    width: 40%;
+    float: right;
+}
+
+p.sidebar-title {
+    font-weight: bold;
+}
+*/
+/* -- topics ---------------------------------------------------------------- */
+
+div.topic {
+    border: 1px solid #ccc;
+    padding: 7px 7px 0 7px;
+    margin: 10px 0 10px 0;
+}
+
+p.topic-title {
+    font-size: 1.1em;
+    font-weight: bold;
+    margin-top: 10px;
+}
+
+/* -- admonitions ----------------------------------------------------------- */
+.admonition {
+    border: 1px solid #a1a5a9;
+    background-color: #f7f7f7;
+    margin: 20px;
+    padding: 0px 8px 7px 9px;
+    text-align: left;
+}
+.warning {
+    background-color:#E8E8E8;
+    border:1px solid #111111;
+    margin:30px;
+}
+.admonition p { 
+    font: 12px 'Lucida Grande', Geneva, Helvetica, Arial, sans-serif;
+    margin-top: 7px;
+    margin-bottom: 0px;
+}
+
+div.admonition dt {
+    font-weight: bold;
+}
+
+div.admonition dl {
+    margin-bottom: 0;
+}
+
+p.admonition-title {
+    margin: 0px 10px 5px 0px;
+    font-weight: bold;
+    padding-top: 3px;
+}
+
+div.body p.centered {
+    text-align: left;
+    margin-top: 25px;
+}
+
+/* -- tables ---------------------------------------------------------------- */
+
+table.docutils {
+    border-collapse: collapse;
+    border-top: 1px solid #919699;
+    border-left: 1px solid #919699;
+    border-right: 1px solid #919699;
+    font-size:12px;
+    padding:8px;
+    text-align:left;
+    vertical-align:top;
+}
+
+table.docutils td, table.docutils th {
+    padding: 8px;
+    font-size: 12px;
+    text-align: left;
+    vertical-align: top;
+    border-bottom: 1px solid #919699;
+}
+
+table.docutils th {
+    font-weight: bold;
+}
+/* This alternates colors in up to six table rows (light blue for odd, white for even)*/      
+.docutils tr {
+        background: #F0F5F9;
+}
+
+.docutils tr + tr {
+        background: #FFFFFF;
+}
+
+.docutils tr + tr + tr {
+        background: #F0F5F9;
+}
+
+.docutils tr + tr + tr + tr {
+        background: #FFFFFF;
+}
+
+.docutils tr + tr + tr +tr + tr {
+        background: #F0F5F9;
+}
+
+.docutils tr + tr + tr + tr + tr + tr {
+        background: #FFFFFF;
+}
+
+.docutils tr + tr + tr + tr + tr + tr + tr {
+        background: #F0F5F9;
+}
+
+table.footnote td, table.footnote th {
+    border: 0 !important;
+}
+
+th {
+    text-align: left;
+    padding-right: 5px;
+}
+
+/* -- other body styles ----------------------------------------------------- */
+
+dl {
+    margin-bottom: 15px;
+}
+
+dd p {
+    margin-top: 0px;
+}
+
+dd ul, dd table {
+    margin-bottom: 10px;
+}
+
+dd {
+    margin-top: 3px;
+    margin-bottom: 10px;
+    margin-left: 30px;
+}
+
+dt:target, .highlight {
+    background-color: #fbe54e;
+}
+
+dl.glossary dt {
+    font-weight: bold;
+    font-size: 1.1em;
+}
+
+.field-list ul {
+    vertical-align: top;
+    margin: 0;
+    padding-bottom: 0;
+    list-style: none inside;
+}
+
+.field-list ul li {
+    margin-top: 0;
+}
+
+.field-list p {
+    margin: 0;
+}
+
+.refcount {
+    color: #060;
+}
+
+.optional {
+    font-size: 1.3em;
+}
+
+.versionmodified {
+    font-style: italic;
+}
+
+.system-message {
+    background-color: #fda;
+    padding: 5px;
+    border: 3px solid red;
+}
+
+.footnote:target  {
+    background-color: #ffa
+}
+
+/* -- code displays --------------------------------------------------------- */
+
+pre {
+    overflow: auto;
+    background-color:#F1F5F9;
+    border:1px solid #C9D1D7;
+    border-spacing:0;
+    font-family:"Bitstream Vera Sans Mono",Monaco,"Lucida Console",Courier,Consolas,monospace;
+    font-size:11px;
+    padding: 10px;
+}
+
+td.linenos pre {
+    padding: 5px 0px;
+    border: 0;
+    background-color: transparent;
+    color: #aaa;
+}
+
+table.highlighttable {
+    margin-left: 0.5em;
+}
+
+table.highlighttable td {
+    padding: 0 0.5em 0 0.5em;
+}
+
+tt.descname {
+    background-color: transparent;
+    font-weight: bold;
+    font-size: 1.2em;
+}
+
+tt.descclassname {
+    background-color: transparent;
+}
+
+tt.xref, a tt {
+    background-color: transparent;
+    font-weight: bold;
+}
+
+h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt {
+    background-color: transparent;
+}
+
+/* -- math display ---------------------------------------------------------- */
+
+img.math {
+    vertical-align: middle;
+}
+
+div.body div.math p {
+    text-align: center;
+}
+
+span.eqno {
+    float: right;
+}
+
+/* -- printout stylesheet --------------------------------------------------- */
+
+@media print {
+    div.document,
+    div.documentwrapper,
+    div.bodywrapper {
+        margin: 0;
+        width: 100%;
+    }
+
+    div.sphinxsidebar,
+    div.related,
+    div.footer,
+    #top-link {
+        display: none;
+    }
+}
+
+body {
+    font-family:'Lucida Grande',Geneva,Helvetica,Arial,sans-serif;
+}
+
+dl.class dt {
+    padding: 3px;
+    border-top: 2px solid #999;
+}
+
+tt.descname {
+    font-size: 1em;
+}
+
+em.property {
+    font-style: normal;
+}
+
+dl.class dd p {
+}
+
+dl.class dd dl.exception dt {
+    padding: 3px;
+    background-color: #FFD6D6;
+    border-top: none;
+}
+
+dl.class dd dl.method dt {
+   padding: 3px;
+   background-color: #e9e9e9;
+   border-top: none;
+   
+}
+
+dl.function dt {
+    padding: 3px;
+    border-top: 2px solid #999;
+}
+
+ul {
+list-style-image:none;
+list-style-position:outside;
+list-style-type:square;
+margin:0 0 0 30px;
+padding:0 0 12px 6px;
+}
+#docstitle {
+    height: 36px; 
+    background-image: url(header_sm_mid.png);
+    left: 0;
+    top: 0;
+    position: absolute;
+    width: 100%;
+}
+#docstitle p {
+    padding:7px 0 0 45px;
+    margin: 0;
+    color: white;
+    text-shadow:0 1px 0 #787878;
+    background: transparent url(documentation.png) no-repeat scroll 10px 3px;
+    height: 36px;
+    font-size: 15px;
+}
+#header {
+height:45px;
+left:0;
+position:absolute;
+right:0;
+top:36px;
+z-index:900;
+}
+
+#header h1 {
+font-size:10pt;
+margin:0;
+padding:5px 0 0 10px;
+text-shadow:0 1px 0 #D5D5D5;
+white-space:nowrap;
+}
+
+h1 {
+-x-system-font:none;
+color:#000000;
+font-family:'Lucida Grande',Geneva,Helvetica,Arial,sans-serif;
+font-size:30px;
+font-size-adjust:none;
+font-stretch:normal;
+font-style:normal;
+font-variant:normal;
+font-weight:bold;
+line-height:normal;
+margin-bottom:25px;
+margin-top:1em;
+}
+
+.footer {
+border-top:1px solid #DDDDDD;
+clear:both;
+padding-top:9px;
+width:100%;
+font-size:10px;
+}
+
+p {
+-x-system-font:none;
+font-family:'Lucida Grande',Geneva,Helvetica,Arial,sans-serif;
+font-size:12px;
+font-size-adjust:none;
+font-stretch:normal;
+font-style:normal;
+font-variant:normal;
+font-weight:normal;
+line-height:normal;
+margin-bottom:10px;
+margin-top:0;
+}
+
+h2 {
+border-bottom:1px solid #919699;
+color:#000000;
+font-size:24px;
+margin-top:2.5em;
+padding-bottom:2px;
+}
+
+a:link:hover {
+color:#093D92;
+text-decoration:underline;
+}
+
+a:link {
+color:#093D92;
+text-decoration:none;
+}
+
+
+ol {
+list-style-position:outside;
+list-style-type:decimal;
+margin:0 0 0 30px;
+padding:0 0 12px 6px;
+}
+li {
+margin-top:7px;
+font-family:'Lucida Grande',Geneva,Helvetica,Arial,sans-serif;
+font-size:12px;
+font-size-adjust:none;
+font-stretch:normal;
+font-style:normal;
+font-variant:normal;
+font-weight:normal;
+line-height:normal;
+}
+li > p {
+display:inline;
+}
+li p {
+margin-top:8px;
+}

BIN
docs/_theme/ADCThemePrint/static/breadcrumb_background.png


BIN
docs/_theme/ADCThemePrint/static/documentation.png


BIN
docs/_theme/ADCThemePrint/static/header_sm_mid.png


BIN
docs/_theme/ADCThemePrint/static/scrn1.png


BIN
docs/_theme/ADCThemePrint/static/scrn2.png


BIN
docs/_theme/ADCThemePrint/static/searchfield_leftcap.png


BIN
docs/_theme/ADCThemePrint/static/searchfield_repeat.png


BIN
docs/_theme/ADCThemePrint/static/searchfield_rightcap.png


Some files were not shown because too many files changed in this diff