Browse Source

Merge branch 'master' into bradjasper/save_taskset

Conflicts:
	celery/backends/database.py
	celery/managers.py
	celery/models.py
	celery/task/base.py
	celery/tests/test_backends/test_base.py
	celery/tests/test_models.py
Ask Solem 15 years ago
parent
commit
6517f5dd6a
100 changed files with 5848 additions and 3870 deletions
  1. 2 0
      .gitignore
  2. 6 0
      AUTHORS
  3. 401 34
      Changelog
  4. 185 49
      FAQ
  5. 37 7
      Makefile
  6. 123 306
      README.rst
  7. 7 0
      bin/celerybeat
  8. 2 0
      bin/celeryd
  9. 3 1
      bin/celeryinit
  10. 1 1
      celery/__init__.py
  11. 32 42
      celery/backends/__init__.py
  12. 20 17
      celery/backends/amqp.py
  13. 33 26
      celery/backends/base.py
  14. 19 6
      celery/backends/cache.py
  15. 8 31
      celery/backends/database.py
  16. 15 103
      celery/backends/mongodb.py
  17. 95 0
      celery/backends/pyredis.py
  18. 6 5
      celery/backends/tyrant.py
  19. 232 0
      celery/beat.py
  20. 115 0
      celery/bin/celerybeat.py
  21. 104 210
      celery/bin/celeryd.py
  22. 2 1
      celery/bin/celeryinit.py
  23. 202 258
      celery/conf.py
  24. 0 0
      celery/contrib/__init__.py
  25. 19 0
      celery/contrib/test_runner.py
  26. 91 16
      celery/datastructures.py
  27. 76 0
      celery/decorators.py
  28. 0 26
      celery/discovery.py
  29. 105 0
      celery/events.py
  30. 9 1
      celery/exceptions.py
  31. 0 340
      celery/execute.py
  32. 152 0
      celery/execute/__init__.py
  33. 94 0
      celery/execute/trace.py
  34. 0 153
      celery/fields.py
  35. 108 48
      celery/loaders/__init__.py
  36. 7 0
      celery/loaders/base.py
  37. 18 16
      celery/loaders/default.py
  38. 56 14
      celery/loaders/djangoapp.py
  39. 82 27
      celery/log.py
  40. 18 0
      celery/management/commands/celerybeat.py
  41. 1 0
      celery/management/commands/celeryd.py
  42. 37 0
      celery/management/commands/celerymon.py
  43. 0 22
      celery/management/commands/celerystats.py
  44. 35 123
      celery/managers.py
  45. 118 52
      celery/messaging.py
  46. 11 48
      celery/models.py
  47. 0 236
      celery/monitoring.py
  48. 50 0
      celery/platform.py
  49. 0 258
      celery/pool.py
  50. 27 47
      celery/registry.py
  51. 42 30
      celery/result.py
  52. 0 116
      celery/serialization.py
  53. 10 79
      celery/signals.py
  54. 0 120
      celery/supervisor.py
  55. 17 47
      celery/task/__init__.py
  56. 256 209
      celery/task/base.py
  57. 4 7
      celery/task/builtins.py
  58. 85 0
      celery/task/control.py
  59. 212 0
      celery/task/http.py
  60. 14 153
      celery/task/rest.py
  61. 0 52
      celery/task/strategy.py
  62. 27 0
      celery/tests/test_backends/__init__.py
  63. 63 0
      celery/tests/test_backends/test_amqp.py
  64. 11 6
      celery/tests/test_backends/test_base.py
  65. 58 9
      celery/tests/test_backends/test_cache.py
  66. 9 22
      celery/tests/test_backends/test_database.py
  67. 156 0
      celery/tests/test_backends/test_redis.py
  68. 13 14
      celery/tests/test_backends/test_tyrant.py
  69. 213 0
      celery/tests/test_beat.py
  70. 233 0
      celery/tests/test_buckets.py
  71. 13 24
      celery/tests/test_conf.py
  72. 80 1
      celery/tests/test_datastructures.py
  73. 2 3
      celery/tests/test_discovery.py
  74. 75 0
      celery/tests/test_events.py
  75. 127 0
      celery/tests/test_loaders.py
  76. 54 15
      celery/tests/test_log.py
  77. 1 1
      celery/tests/test_messaging.py
  78. 8 33
      celery/tests/test_models.py
  79. 0 96
      celery/tests/test_monitoring.py
  80. 2 1
      celery/tests/test_pickle.py
  81. 1 2
      celery/tests/test_pool.py
  82. 6 21
      celery/tests/test_registry.py
  83. 105 18
      celery/tests/test_result.py
  84. 3 3
      celery/tests/test_serialization.py
  85. 0 66
      celery/tests/test_supervisor.py
  86. 123 34
      celery/tests/test_task.py
  87. 4 2
      celery/tests/test_task_builtins.py
  88. 63 0
      celery/tests/test_task_control.py
  89. 78 4
      celery/tests/test_utils.py
  90. 62 0
      celery/tests/test_utils_info.py
  91. 129 0
      celery/tests/test_views.py
  92. 221 53
      celery/tests/test_worker.py
  93. 58 0
      celery/tests/test_worker_control.py
  94. 47 45
      celery/tests/test_worker_controllers.py
  95. 51 0
      celery/tests/test_worker_heartbeat.py
  96. 188 56
      celery/tests/test_worker_job.py
  97. 12 0
      celery/tests/test_worker_revoke.py
  98. 47 0
      celery/tests/test_worker_scheduler.py
  99. 98 2
      celery/tests/utils.py
  100. 3 2
      celery/urls.py

+ 2 - 0
.gitignore

@@ -9,3 +9,5 @@ build/
 .build/
 pip-log.txt
 .directory
+erl_crash.dump
+*.db

+ 6 - 0
AUTHORS

@@ -9,3 +9,9 @@ Ordered by date of first contribution:
   Jonatan Heyman <jonatan@heyman.info>
   Mark Hellewell <mark.hellewell@gmail.com>
   Jerzy Kozera <jerzy.kozera@gmail.com>
+  Wes Winham <winhamwr@gmail.com>
+  Timo Sugliani
+  Michael Elsdoerfer <michael@elsdoerfer.com>
+  Jason Baker <amnorvend@gmail.com>
+  Wes Turner <wes.turner@gmail.com>
+  Maxim Bodyansky <bodyansky@gmail.com>

+ 401 - 34
Changelog

@@ -2,10 +2,373 @@
 Change history
 ==============
 
-0.8.0 [2009-09-22 03:06 P.M CET]
---------------------------------
+1.0.0 [xxxx-xx-xx xx:xx x.x xxx]
+================================
+
+BACKWARD INCOMPATIBLE CHANGES
+-----------------------------
+
+* Celery does not support detaching anymore, so you have to use the tools
+  available on your platform, or something like supervisord to make
+  celeryd/celerybeat/celerymon into background processes.
+
+  We've had too many problems with celeryd daemonizing itself, so it was
+  decided it has to be removed. Example startup scripts has been added to
+  ``contrib/``:
+
+      * Debian, Ubuntu, (start-stop-daemon)
+
+           ``contrib/debian/init.d/celeryd``
+           ``contrib/debian/init.d/celerybeat``
+
+      * Mac OS X launchd
+
+            ``contrib/mac/org.celeryq.celeryd.plist``
+            ``contrib/mac/org.celeryq.celerybeat.plist``
+            ``contrib/mac/org.celeryq.celerymon.plist``
+
+      * Supervisord (http://supervisord.org)
+
+            ``contrib/supervisord/supervisord.conf``
+
+  In addition to ``--detach``, the following program arguments has been
+  removed: ``--uid``, ``--gid``, ``--workdir``, ``--chroot``, ``--pidfile``,
+  ``--umask``. All good daemonization tools should support equivalent
+  functionality, so don't worry.
+
+  Also the following configuration keys has been removed:
+  ``CELERYD_PID_FILE``, ``CELERYBEAT_PID_FILE``, ``CELERYMON_PID_FILE``.
+
+* Default celeryd loglevel is now ``WARN``, to enable the previous log level
+  start celeryd with ``--loglevel=INFO``.
+
+* Tasks are automatically registered.
+
+  This means you no longer have to register your tasks manually.
+  You don't have to change your old code right away, as it doesn't matter if
+  a task is registered twice.
+
+  If you don't want your task to be automatically registered you can set
+  the ``abstract`` attribute
+
+  .. code-block:: python
+
+		class MyTask(Task):
+			abstract = True
+
+  By using ``abstract`` only tasks subclassing this task will be automatically
+  registered (this works like the Django ORM).
+
+  If you don't want subclasses to be registered either, you can set the
+  ``autoregister`` attribute to ``False``.
+
+  Incidentally, this change also fixes the problems with automatic name
+  assignment and relative imports. So you also don't have to specify a task name
+  anymore if you use relative imports.
+
+* You can no longer use regular functions as tasks. This change was added
+  because it makes the internals a lot more clean and simple. However, you can
+  now turn functions into tasks by using the ``@task`` decorator:
+
+  .. code-block:: python
+
+		from celery.decorators import task
+
+		@task
+		def add(x, y):
+			return x + y
+
+  See the User Guide: :doc:`userguide/tasks` for more information.
+
+* The periodic task system has been rewritten to a centralized solution, this
+  means ``celeryd`` no longer schedules periodic tasks by default, but a new
+  daemon has been introduced: ``celerybeat``.
+
+  To launch the periodic task scheduler you have to run celerybeat::
+
+		$ celerybeat
+
+  Make sure this is running on one server only, if you run it twice, all
+  periodic tasks will also be executed twice.
+
+  If you only have one worker server you can embed it into celeryd like this::
+
+		$ celeryd --beat # Embed celerybeat in celeryd.
+
+* The supervisor has been removed, please use something like
+  http://supervisord.org instead. This means the ``-S`` and ``--supervised``
+  options to ``celeryd`` is no longer supported.
+
+* ``TaskSet.join`` has been removed, use ``TaskSetResult.join`` instead.
+
+* The task status ``"DONE"`` has been renamed to `"SUCCESS"`.
+
+* ``AsyncResult.is_done`` has been removed, use ``AsyncResult.successful``
+  instead.
+
+* The worker no longer stores errors if ``Task.ignore_result`` is set, to
+  revert to the previous behaviour set
+  ``CELERY_STORE_ERRORS_EVEN_IF_IGNORED`` to ``True``.
+
+* The staticstics functionality has been removed in favor of events,
+  so the ``-S`` and ``--statistics`` switches has been removed.
+
+* The module ``celery.task.strategy`` has been removed.
+
+* ``celery.discovery`` has been removed, and it's ``autodiscover`` function is
+  now in ``celery.loaders.djangoapp``. Reason: Internal API.
+
+* ``CELERY_LOADER`` now needs loader class name in addition to module name,
+  e.g. where you previously had: ``"celery.loaders.default"``, you now need
+  ``"celery.loaders.default.Loader"``, using the previous syntax will result
+  in a DeprecationWarning.
+
+* Detecting the loader is now lazy, and so is not done when importing
+  ``celery.loaders``. To make this happen ``celery.loaders.settings`` has
+  been renamed to ``load_settings`` and is now a function returning the
+  settings object. ``celery.loaders.current_loader`` is now also
+  a function, returning the current loader.
+
+  So::
+
+    	loader = current_loader
+
+  needs to be changed to::
+
+    	loader = current_loader()
+
+DEPRECATIONS
+------------
+
+* The following configuration variables has been renamed and will be
+  deprecated in v1.2:
+
+  	* CELERYD_DAEMON_LOG_FORMAT -> CELERYD_LOG_FORMAT
+  	* CELERYD_DAEMON_LOG_LEVEL -> CELERYD_LOG_LEVEL
+  	* CELERY_AMQP_CONNECTION_TIMEOUT -> CELERY_BROKER_CONNECTION_TIMEOUT
+  	* CELERY_AMQP_CONNECTION_RETRY -> CELERY_BROKER_CONNECTION_RETRY
+  	* CELERY_AMQP_CONNECTION_MAX_RETRIES -> CELERY_BROKER_CONNECTION_MAX_RETRIES
+  	* SEND_CELERY_TASK_ERROR_EMAILS -> CELERY_SEND_TASK_ERROR_EMAILS
+
+* The public api names in celery.conf has also changed to a consistent naming
+  scheme.
+
+* We now support consuming from an arbitrary number of queues, but to do this
+  we had to rename the configuration syntax. If you use any of the custom
+  AMQP routing options (queue/exchange/routing_key, etc), you should read the
+  new FAQ entry: http://bit.ly/aiWoH. The previous syntax is deprecated and
+  scheduled for removal in v1.2.
+
+* ``TaskSet.run`` has been renamed to ``TaskSet.apply_async``.
+  ``run`` is still deprecated, and is scheduled for removal in v1.2.
+
+
+NEWS
+----
+
+* Rate limiting support (per task type, or globally).
+
+* New periodic task system.
+
+* Automatic registration.
+
+* New cool task decorator syntax.
+
+* celeryd now sends events if enabled with the ``-E`` argument.
+  Excellent for monitoring tools, one is already in the making
+  (http://github.com/ask/celerymon).
+
+  Current events include: worker-heartbeat,
+  task-[received/succeeded/failed/retried],
+  worker-online, worker-offline.
+
+* You can now delete (revoke) tasks that has already been applied.
+
+* You can now set the hostname celeryd identifies as using the ``--hostname``
+  argument.
+
+* Cache backend now respects ``CELERY_TASK_RESULT_EXPIRES``.
+
+* Message format has been standardized and now uses ISO-8601 format
+  for dates instead of datetime.
+
+* ``celeryd`` now responds to the ``HUP`` signal by restarting itself.
+
+* Periodic tasks are now scheduled on the clock, i.e. ``timedelta(hours=1)``
+  means every hour at :00 minutes, not every hour from the server starts.
+  To revert to the previous behaviour you can set
+  ``PeriodicTask.relative = True``.
+
+* Now supports passing execute options to a TaskSets list of args, e.g.:
+
+    >>> ts = TaskSet(add, [([2, 2], {}, {"countdown": 1}),
+    ...                   ([4, 4], {}, {"countdown": 2}),
+    ...                   ([8, 8], {}, {"countdown": 3})])
+    >>> ts.run()
 
-**BACKWARD INCOMPATIBLE CHANGES**
+* Got a 3x performance gain by setting the prefetch count to four times the 
+  concurrency, (from an average task round-trip of 0.1s to 0.03s!). A new
+  setting has been added: ``CELERYD_PREFETCH_MULTIPLIER``, which is set
+  to ``4`` by default.
+
+CHANGES
+-------
+
+* Now depends on carrot >= 0.8.1
+
+* New dependencies: billiard, python-dateutil, django-picklefield
+
+* No longer depends on python-daemon
+
+* The ``uuid`` distribution is added as a dependency when running Python 2.4.
+
+* Now remembers the previously detected loader by keeping it in
+  the ``CELERY_LOADER`` environment variable. This may help on windows where
+  fork emulation is used.
+
+* ETA no longer sends datetime objects, but uses ISO 8601 date format in a
+  string for better compatibility with other platforms.
+
+* No longer sends error mails for retried tasks.
+
+* Task can now override the backend used to store results.
+
+* Refactored the ExecuteWrapper, ``apply`` and ``CELERY_ALWAYS_EAGER`` now
+  also executes the task callbacks and signals.
+
+* Now using a proper scheduler for the tasks with an ETA. This means waiting
+  eta tasks are sorted by time, so we don't have to poll the whole list all the
+  time.
+
+* Now also imports modules listed in CELERY_IMPORTS when running
+  with django (as documented).
+
+* Loglevel for stdout/stderr changed from INFO to ERROR
+
+* ImportErrors are now properly propogated when autodiscovering tasks.
+
+* You can now use ``celery.messaging.establish_connection`` to establish a
+  connection to the broker.
+
+* When running as a separate service the periodic task scheduler does some
+  smart moves to not poll too regularly, if you need faster poll times you
+  can lower the value of ``CELERYBEAT_MAX_LOOP_INTERVAL``.
+
+* You can now change periodic task intervals at runtime, by making
+  ``run_every`` a property, or subclassing ``PeriodicTask.is_due``.
+
+* The worker now supports control commands enabled through the use of a
+  broadcast queue, you can remotely revoke tasks or set the rate limit for
+  a task type. See :mod:`celery.task.control`.
+
+* The services now sets informative process names (as shown in ``ps``
+  listings) if the :mod:`setproctitle` module is installed.
+
+* :exc:`celery.exceptions.NotRegistered` now inherits from :exc:`KeyError`,
+  and ``TaskRegistry.__getitem__``+``pop`` raises ``NotRegistered`` instead
+
+* You can set the loader via the ``CELERY_LOADER`` environment variable.
+
+* You can now set ``CELERY_IGNORE_RESULT`` to ignore task results by default
+  (if enabled, tasks doesn't save results or errors to the backend used).
+
+BUGS
+----
+
+* Fixed a race condition that could happen while storing task results in the
+  database.
+
+DOCUMENTATION
+-------------
+
+* Reference now split into two sections; API reference and internal module
+  reference.
+
+
+0.8.1 [2009-11-16 05:21 P.M CEST]
+=================================
+
+VERY IMPORTANT NOTE
+-------------------
+
+This release (with carrot 0.8.0) enables AMQP QoS (quality of service), which
+means the workers will only receive as many messages as it can handle at a
+time. As with any release, you should test this version upgrade on your
+development servers before rolling it out to production!
+
+IMPORTANT CHANGES
+-----------------
+
+* If you're using Python < 2.6 and you use the multiprocessing backport, then
+  multiprocessing version 2.6.2.1 is required.
+
+* All AMQP_* settings has been renamed to BROKER_*, and in addition
+  AMQP_SERVER has been renamed to BROKER_HOST, so before where you had::
+
+		AMQP_SERVER = "localhost"
+		AMQP_PORT = 5678
+		AMQP_USER = "myuser"
+		AMQP_PASSWORD = "mypassword"
+		AMQP_VHOST = "celery"
+
+You need to change that to::
+
+		BROKER_HOST = "localhost"
+		BROKER_PORT = 5678
+		BROKER_USER = "myuser"
+		BROKER_PASSWORD = "mypassword"
+		BROKER_VHOST = "celery"
+
+* Custom carrot backends now need to include the backend class name, so before
+  where you had::
+
+		CARROT_BACKEND = "mycustom.backend.module"
+
+you need to change it to::
+
+		CARROT_BACKEND = "mycustom.backend.module.Backend"
+
+where ``Backend`` is the class name. This is probably ``"Backend"``, as
+that was the previously implied name.
+
+* New version requirement for carrot: 0.8.0
+
+CHANGES
+-------
+
+* Incorporated the multiprocessing backport patch that fixes the
+  ``processName`` error.
+
+* Ignore the result of PeriodicTask's by default.
+
+* Added a Redis result store backend
+
+* Allow /etc/default/celeryd to define additional options for the celeryd init
+  script.
+
+* MongoDB periodic tasks issue when using different time than UTC fixed.
+
+* Windows specific: Negate test for available os.fork (thanks miracle2k)
+
+* Now tried to handle broken PID files.
+
+* Added a Django test runner to contrib that sets CELERY_ALWAYS_EAGER = True for testing with the database backend
+
+* Added a CELERY_CACHE_BACKEND setting for using something other than the django-global cache backend.
+
+* Use custom implementation of functools.partial (curry) for Python 2.4 support
+  (Probably still problems with running on 2.4, but it will eventually be
+  supported)
+
+* Prepare exception to pickle when saving RETRY status for all backends.
+
+* SQLite no concurrency limit should only be effective if the db backend is used.
+
+0.8.0 [2009-09-22 03:06 P.M CEST]
+=================================
+
+BACKWARD INCOMPATIBLE CHANGES
+-----------------------------
 
 * Add traceback to result value on failure.
 	**NOTE** If you use the database backend you have to re-create the
@@ -23,7 +386,8 @@ Change history
 
 * Now depends on python-daemon 1.4.8
 
-**IMPORTANT CHANGES**
+IMPORTANT CHANGES
+-----------------
 
 * Celery can now be used in pure Python (outside of a Django project).
 	This means celery is no longer Django specific.
@@ -85,7 +449,8 @@ Change history
     * AMQP_CONNECTION_MAX_RETRIES.
         Maximum number of restarts before we give up. Default: ``100``.
 
-**NEWS**
+NEWS
+----
 
 *  Fix an incompatibility between python-daemon and multiprocessing,
 	which resulted in the ``[Errno 10] No child processes`` problem when
@@ -134,9 +499,10 @@ Change history
 	Thanks mikedizon
 
 0.6.0 [2009-08-07 06:54 A.M CET]
---------------------------------
+================================
 
-**IMPORTANT CHANGES**
+IMPORTANT CHANGES
+-----------------
 
 * Fixed a bug where tasks raising unpickleable exceptions crashed pool
 	workers. So if you've had pool workers mysteriously dissapearing, or
@@ -154,7 +520,8 @@ Change history
 	we didn't do this before. Some documentation is updated to not manually
 	specify a task name.
 
-**NEWS**
+NEWS
+----
 
 * Tested with Django 1.1
 
@@ -206,13 +573,13 @@ Change history
 * Convert statistics data to unicode for use as kwargs. Thanks Lucy!
 
 0.4.1 [2009-07-02 01:42 P.M CET]
---------------------------------
+================================
 
 * Fixed a bug with parsing the message options (``mandatory``,
   ``routing_key``, ``priority``, ``immediate``)
 
-0.4.0 [2009-07-01 07:29 P.M CET] 
---------------------------------
+0.4.0 [2009-07-01 07:29 P.M CET]
+================================
 
 * Adds eager execution. ``celery.execute.apply``|``Task.apply`` executes the
   function blocking until the task is done, for API compatiblity it
@@ -224,8 +591,8 @@ Change history
 
 * 99% coverage using python ``coverage`` 3.0.
 
-0.3.20 [2009-06-25 08:42 P.M CET] 
----------------------------------
+0.3.20 [2009-06-25 08:42 P.M CET]
+=================================
 
 * New arguments to ``apply_async`` (the advanced version of
   ``delay_task``), ``countdown`` and ``eta``;
@@ -362,14 +729,14 @@ Change history
 * Tyrant Backend: Now re-establishes the connection for every task
   executed.
 
-0.3.3 [2009-06-08 01:07 P.M CET] 
---------------------------------
+0.3.3 [2009-06-08 01:07 P.M CET]
+================================
 
-	* The ``PeriodicWorkController`` now sleeps for 1 second between checking
-		for periodic tasks to execute.
+* The ``PeriodicWorkController`` now sleeps for 1 second between checking
+  for periodic tasks to execute.
 
 0.3.2 [2009-06-08 01:07 P.M CET]
---------------------------------
+================================
 
 * celeryd: Added option ``--discard``: Discard (delete!) all waiting
   messages in the queue.
@@ -377,7 +744,7 @@ Change history
 * celeryd: The ``--wakeup-after`` option was not handled as a float.
 
 0.3.1 [2009-06-08 01:07 P.M CET]
---------------------------------
+================================
 
 * The `PeriodicTask`` worker is now running in its own thread instead
   of blocking the ``TaskController`` loop.
@@ -385,7 +752,7 @@ Change history
 * Default ``QUEUE_WAKEUP_AFTER`` has been lowered to ``0.1`` (was ``0.3``)
 
 0.3.0 [2009-06-08 12:41 P.M CET]
---------------------------------
+================================
 
 **NOTE** This is a development version, for the stable release, please
 see versions 0.2.x.
@@ -459,7 +826,7 @@ arguments, so be sure to flush your task queue before you upgrade.
   stability.
 
 0.2.0 [2009-05-20 05:14 P.M CET]
---------------------------------
+================================
 
 * Final release of 0.2.0
 
@@ -469,20 +836,20 @@ arguments, so be sure to flush your task queue before you upgrade.
   from the database backend.
 
 0.2.0-pre3 [2009-05-20 05:14 P.M CET]
--------------------------------------
+=====================================
 
 * *Internal release*. Improved handling of unpickled exceptions,
   ``get_result`` now tries to recreate something looking like the
   original exception.
 
 0.2.0-pre2 [2009-05-20 01:56 P.M CET]
--------------------------------------
+=====================================
 
 * Now handles unpickleable exceptions (like the dynimically generated
   subclasses of ``django.core.exception.MultipleObjectsReturned``).
 
 0.2.0-pre1 [2009-05-20 12:33 P.M CET]
--------------------------------------
+=====================================
 
 * It's getting quite stable, with a lot of new features, so bump
   version to 0.2. This is a pre-release.
@@ -492,20 +859,20 @@ arguments, so be sure to flush your task queue before you upgrade.
   and ``celery.backends.default_backend.mark_as_failure()`` instead.
 
 0.1.15 [2009-05-19 04:13 P.M CET]
----------------------------------
+=================================
 
 * The celery daemon was leaking AMQP connections, this should be fixed,
   if you have any problems with too many files open (like ``emfile``
   errors in ``rabbit.log``, please contact us!
 
 0.1.14 [2009-05-19 01:08 P.M CET]
----------------------------------
+=================================
 
 * Fixed a syntax error in the ``TaskSet`` class.  (No such variable
   ``TimeOutError``).
 
 0.1.13 [2009-05-19 12:36 P.M CET]
----------------------------------
+=================================
 
 * Forgot to add ``yadayada`` to install requirements.
 
@@ -526,7 +893,7 @@ arguments, so be sure to flush your task queue before you upgrade.
   and the result will be in ``docs/.build/html``.
 
 0.1.12 [2009-05-18 04:38 P.M CET]
----------------------------------
+=================================
 
 * ``delay_task()`` etc. now returns ``celery.task.AsyncResult`` object,
   which lets you check the result and any failure that might have
@@ -564,13 +931,13 @@ arguments, so be sure to flush your task queue before you upgrade.
 		TT_PORT = 6657; # Port of the Tokyo Tyrant server.
 
 0.1.11 [2009-05-12 02:08 P.M CET]
----------------------------------
+=================================
 
 * The logging system was leaking file descriptors, resulting in
   servers stopping with the EMFILES (too many open files) error. (fixed)
 
 0.1.10 [2009-05-11 12:46 P.M CET]
----------------------------------
+=================================
 
 * Tasks now supports both positional arguments and keyword arguments.
 
@@ -579,7 +946,7 @@ arguments, so be sure to flush your task queue before you upgrade.
 * The daemon now tries to reconnect if the connection is lost.
 
 0.1.8 [2009-05-07 12:27 P.M CET]
---------------------------------
+================================
 
 * Better test coverage
 * More documentation
@@ -587,7 +954,7 @@ arguments, so be sure to flush your task queue before you upgrade.
   ``settings.CELERYD_EMPTY_MSG_EMIT_EVERY`` is 0.
 
 0.1.7 [2009-04-30 1:50 P.M CET]
--------------------------------
+===============================
 
 * Added some unittests
 
@@ -602,7 +969,7 @@ arguments, so be sure to flush your task queue before you upgrade.
   and ``settings.CELERY_AMQP_CONSUMER_QUEUE``.
 
 0.1.6 [2009-04-28 2:13 P.M CET]
--------------------------------
+===============================
 
 * Introducing ``TaskSet``. A set of subtasks is executed and you can
   find out how many, or if all them, are done (excellent for progress
@@ -645,6 +1012,6 @@ arguments, so be sure to flush your task queue before you upgrade.
   the name change request is in ``docs/name_change_request.txt``.
 
 0.1.0 [2009-04-24 11:28 A.M CET]
---------------------------------
+================================
 
 * Initial release

+ 185 - 49
FAQ

@@ -2,6 +2,71 @@
  Frequently Asked Questions
 ============================
 
+Misconceptions
+==============
+
+Is celery dependent on pickle?
+------------------------------
+
+**Answer:** No.
+
+Celery can support any serialization scheme and has support for JSON/YAML and
+Pickle by default. You can even send one task using pickle, and another one
+with JSON seamlessly, this is because every task is associated with a
+content-type. The default serialization scheme is pickle because it's the most
+used, and it has support for sending complex objects as task arguments.
+
+You can set a global default serializer, the default serializer for a
+particular Task, and even what serializer to use when sending a single task
+instance.
+
+Is celery for Django only?
+--------------------------
+
+**Answer:** No.
+
+While django itself is a dependency, you can still use all of celerys features
+outside of a django project.
+
+Do I have to use AMQP/RabbitMQ?
+-------------------------------
+
+**Answer**: No.
+
+You can also use Redis or an SQL database, for instructions see `Using other
+queues`_.
+
+.. _`Using other queues`:
+    http://ask.github.com/celery/tutorials/otherqueues.html
+
+Redis or a database won't meet up to the standards
+of an AMQP broker. If you have strict reliability requirements you are
+encouraged to use RabbitMQ or another AMQP broker. Redis/database also uses
+pulling, so they are likely to consume more resources. However, if you for
+some reason is not able to use AMQP, feel free to use these alternatives.
+They will probably work fine for most use cases, and note that the above
+points are not specific to celery; If using Redis/database as a queue worked
+fine for you before, it probably will now. And you can always upgrade later.
+
+Is celery multi-lingual?
+------------------------
+
+**Answer:** Yes.
+
+celeryd is an implementation of celery in python. If the language has an AMQP
+client, there shouldn't be much work to create a worker in your language.
+A celery worker is just a program connecting to the broker to consume
+messages. There's no other communication involved.
+
+Also, there's another way to be language indepedent, and that is to use REST
+tasks, instead of your tasks being functions, they're URLs. With this
+information you can even create simple web servers that enable preloading of
+code. For more information about REST tasks see: `User Guide: Remote Tasks`_.
+
+.. _`User Guide: Remote Tasks`:
+    http://ask.github.com/celery/userguide/remote-tasks.html
+
+
 Troubleshooting
 ===============
 
@@ -44,6 +109,10 @@ Why won't celeryd run on FreeBSD?
 implementation which isn't enabled in FreeBSD by default. You have to enable
 POSIX semaphores in the kernel and manually recompile multiprocessing.
 
+Luckily, Viktor Petersson has written a tutorial to get you started with
+Celery on FreeBSD here:
+http://www.playingwithwire.com/2009/10/how-to-get-celeryd-to-work-on-freebsd/
+
 I'm having ``IntegrityError: Duplicate Key`` errors. Why?
 ----------------------------------------------------------
 
@@ -89,14 +158,14 @@ Why won't my Task run?
 (or in some other module Django loads by default, like ``models.py``?).
 Also there might be syntax errors preventing the tasks module being imported.
 
-You can find out if the celery daemon is able to run the task by executing the
+You can find out if celery is able to run the task by executing the
 task manually:
 
     >>> from myapp.tasks import MyPeriodicTask
     >>> MyPeriodicTask.delay()
 
-Watch celery daemons logfile (or output if not running as a daemon), to see
-if it's able to find the task, or if some other error is happening.
+Watch celeryds logfile to see if it's able to find the task, or if some
+other error is happening.
 
 Why won't my Periodic Task run?
 -------------------------------
@@ -186,14 +255,18 @@ Use the following specific settings in your ``settings.py``:
     CARROT_BACKEND = "stomp"
 
     # STOMP hostname and port settings.
-    AMQP_HOST = "localhost"
-    AMQP_PORT = 61613
+    BROKER_HOST = "localhost"
+    BROKER_PORT = 61613
 
     # The queue name to use (both queue and exchange must be set to the
     # same queue name when using STOMP)
-    CELERY_AMQP_CONSUMER_QUEUE = "/queue/celery"
-    CELERY_AMQP_EXCHANGE = "/queue/celery" 
-   
+    CELERY_DEFAULT_QUEUE = "/queue/celery"
+    CELERY_DEFAULT_EXCHANGE = "/queue/celery" 
+
+    CELERY_QUEUES = {
+        "/queue/celery": {"exchange": "/queue/celery"}
+    }
+
 Now you can go on reading the tutorial in the README, ignoring any AMQP
 specific options. 
 
@@ -217,47 +290,44 @@ Features
 Can I send some tasks to only some servers?
 --------------------------------------------
 
-**Answer:** As of now there is only one use-case that works like this,
-and that is tasks of type ``A`` can be sent to servers ``x`` and ``y``,
-while tasks of type ``B`` can be sent to server ``z``. One server can't
-handle more than one routing_key, but this is coming in a later release.
+**Answer:** Yes. You can route tasks to an arbitrary server using AMQP,
+and a worker can bind to as many queues as it wants.
 
 Say you have two servers, ``x``, and ``y`` that handles regular tasks,
 and one server ``z``, that only handles feed related tasks, you can use this
 configuration:
 
-    * Servers ``x`` and ``y``: settings.py:
+* Servers ``x`` and ``y``: settings.py:
 
-    .. code-block:: python
+.. code-block:: python
 
-        AMQP_SERVER = "rabbit"
-        AMQP_PORT = 5678
-        AMQP_USER = "myapp"
-        AMQP_PASSWORD = "secret"
-        AMQP_VHOST = "myapp"
+    CELERY_DEFAULT_QUEUE = "regular_tasks"
+    CELERY_QUEUES = {
+        "regular_tasks": {
+            "binding_key": "task.#",
+        },
+    }
+    CELERY_DEFAULT_EXCHANGE = "tasks"
+    CELERY_DEFAULT_EXCHANGE_TYPE = "topic"
+    CELERY_DEFAULT_ROUTING_KEY = "task.regular"
 
-        CELERY_AMQP_CONSUMER_QUEUE = "regular_tasks"
-        CELERY_AMQP_EXCHANGE = "tasks"
-        CELERY_AMQP_PUBLISHER_ROUTING_KEY = "task.regular"
-        CELERY_AMQP_CONSUMER_ROUTING_KEY = "task.#"
-        CELERY_AMQP_EXCHANGE_TYPE = "topic"
+* Server ``z``: settings.py:
 
-    * Server ``z``: settings.py:
+.. code-block:: python
 
-    .. code-block:: python
+        CELERY_DEFAULT_QUEUE = "feed_tasks"
+        CELERY_QUEUES = {
+            "feed_tasks": {
+                "binding_key": "feed.#",
+            },
+        }
+        CELERY_DEFAULT_EXCHANGE = "tasks"
+        CELERY_DEFAULT_ROUTING_KEY = "task.regular"
+        CELERY_DEFAULT_EXCHANGE_TYPE = "topic"
 
-        AMQP_SERVER = "rabbit"
-        AMQP_PORT = 5678
-        AMQP_USER = "myapp"
-        AMQP_PASSWORD = "secret"
-        AMQP_VHOST = "myapp"
-        
-        CELERY_AMQP_EXCHANGE = "tasks"
-        CELERY_AMQP_PUBLISHER_ROUTING_KEY = "task.regular"
-        CELERY_AMQP_EXCHANGE_TYPE = "topic"
-        # This is the settings different for this server:
-        CELERY_AMQP_CONSUMER_QUEUE = "feed_tasks"
-        CELERY_AMQP_CONSUMER_ROUTING_KEY = "feed.#"
+``CELERY_QUEUES`` is a map of queue names and their exchange/type/binding_key,
+if you don't set exchange or exchange type, they will be taken from the
+``CELERY_DEFAULT_EXCHANGE``/``CELERY_DEFAULT_EXCHANGE_TYPE`` settings.
 
 Now to make a Task run on the ``z`` server you need to set its
 ``routing_key`` attribute so it starts with the words ``"task.feed."``:
@@ -265,24 +335,72 @@ Now to make a Task run on the ``z`` server you need to set its
 .. code-block:: python
 
     from feedaggregator.models import Feed
-    from celery.task import Task
+    from celery.decorators import task
+
+    @task(routing_key="feed.importer")
+    def import_feed(feed_url):
+        Feed.objects.import_feed(feed_url)
+
+or if subclassing the ``Task`` class directly:
+
+.. code-block:: python
 
     class FeedImportTask(Task):
         routing_key = "feed.importer"
 
         def run(self, feed_url):
-            # something importing the feed
             Feed.objects.import_feed(feed_url)
 
 
 You can also override this using the ``routing_key`` argument to
 :func:`celery.task.apply_async`:
 
-    >>> from celery.task import apply_async
     >>> from myapp.tasks import RefreshFeedTask
-    >>> apply_async(RefreshFeedTask, args=["http://cnn.com/rss"],
-    ...             routing_key="feed.importer")
+    >>> RefreshFeedTask.apply_async(args=["http://cnn.com/rss"],
+    ...                             routing_key="feed.importer")
+
+
+ If you want, you can even have your feed processing worker handle regular
+ tasks as well, maybe in times when there's a lot of work to do.
+ Just add a new queue to server ``z``'s ``CELERY_QUEUES``:
+
+ .. code-block:: python
+
+        CELERY_QUEUES = {
+            "feed_tasks": {
+                "binding_key": "feed.#",
+            },
+            "regular_tasks": {
+                "binding_key": "task.#",
+            },
+        }
+
+Since the default exchange is ``tasks``, they will both use the same
+exchange.
+
+If you have another queue but on another exchange you want to add,
+just specify a custom exchange and exchange type:
 
+.. code-block:: python
+
+    CELERY_QUEUES = {
+            "feed_tasks": {
+                "binding_key": "feed.#",
+            },
+            "regular_tasks": {
+                "binding_key": "task.#",
+            }
+            "image_tasks": {
+                "binding_key": "image.compress",
+                "exchange": "mediatasks",
+                "exchange_type": "direct",
+            },
+        }
+
+Easy? No? If you're confused about these terms, you should read up on
+AMQP and RabbitMQ. It might be hard to grok the concepts of
+queues, exchanges and routing/binding keys at first, but it's all very simple,
+I assure you.
 
 Can I use celery without Django?
 --------------------------------
@@ -323,11 +441,11 @@ configuration using the database backend with MySQL:
 .. code-block:: python
 
     # Broker configuration
-    AMQP_SERVER = "localhost"
-    AMQP_PORT = "5672"
-    AMQP_VHOST = "celery"
-    AMQP_USER = "celery"
-    AMQP_PASSWORD = "celerysecret"
+    BROKER_HOST = "localhost"
+    BROKER_PORT = "5672"
+    BROKER_VHOST = "celery"
+    BROKER_USER = "celery"
+    BROKER_PASSWORD = "celerysecret"
     CARROT_BACKEND="amqp"
 
     # Using the database backend.
@@ -370,7 +488,7 @@ The celery test-suite is failing
 tests are failing and celerys tests are failing in that context?
 If so, read on for a trick, if not please report the test failure to our issue
 tracker at GitHub.
-    
+
     http://github.com/ask/celery/issues/
 
 That Django is running tests for all applications in ``INSTALLED_APPS``
@@ -403,3 +521,21 @@ If you just want to skip celery you could use:
     INSTALLED_APPS = (.....)
     TEST_RUNNER = "celery.tests.runners.run_tests"
     TEST_APPS = filter(lambda k: k != "celery", INSTALLED_APPS)
+
+
+Can I change the interval of a periodic task at runtime?
+--------------------------------------------------------
+
+**Answer**: Yes. You can override ``PeriodicTask.is_due`` or turn
+``PeriodicTask.run_every`` into a property:
+
+.. code-block:: python
+
+    class MyPeriodic(PeriodicTask):
+
+        def run(self):
+            # ...
+
+        @property
+        def run_every(self):
+            return get_interval_from_database(...)

+ 37 - 7
Makefile

@@ -8,32 +8,62 @@ cycomplex:
 	find celery -type f -name "*.py" | xargs pygenie.py complexity
 
 ghdocs:
+	rm -rf docs/.build
 	contrib/doc2ghpages
 
+upload_github_docs: ghdocs
+
+upload_pypi_docs:
+	python setup.py build_sphinx && python setup.py upload_sphinx
+
+upload_docs: upload_github_docs upload_pypi_docs
+
 autodoc:
 	contrib/doc4allmods celery
 
+verifyindex:
+	contrib/verify-reference-index.sh
+
+flakes:
+	find . -name "*.py" | xargs pyflakes
+
+clean_readme:
+	rm -f README.rst README
+
+readme: clean_readme
+	python contrib/sphinx-to-rst.py docs/templates/readme.txt > README.rst
+	ln -s README.rst README
+
 bump:
 	contrib/bump -c celery
 
-coverage2:
-	[ -d testproj/temp ] || mkdir -p testproj/temp
-	(cd testproj; python manage.py test --figleaf)
-
-coverage:
-	[ -d testproj/temp ] || mkdir -p testproj/temp
+cover:
 	(cd testproj; python manage.py test --coverage)
 
+coverage: cover
+
+quickcover:
+	(cd testproj; env QUICKTEST=1 SKIP_RLIMITS=1 python manage.py test --coverage)
+
 test:
 	(cd testproj; python manage.py test)
 
+quicktest:
+	(cd testproj; SKIP_RLIMITS=1 python manage.py test)
+
 testverbose:
 	(cd testproj; python manage.py test --verbosity=2)
 
-releaseok: pep8 autodoc test
+releaseok: pep8 autodoc verifyindex test gitclean
 
 removepyc:
 	find . -name "*.pyc" | xargs rm
 
 release: releaseok ghdocs removepyc
 
+gitclean: removepyc
+	git clean -xdn
+
+gitcleanforce: removepyc
+	git clean -xdf
+

+ 123 - 306
README.rst

@@ -2,36 +2,28 @@
  celery - Distributed Task Queue
 =================================
 
-:Version: 0.8.0
+.. image:: http://cloud.github.com/downloads/ask/celery/celery_favicon_128.png
 
-Introduction
-============
-
-Celery is a distributed task queue.
+:Version: 1.0.0-pre2
+:Keywords: task queue, job queue, asynchronous, rabbitmq, amqp, redis,
+  django, python, webhooks, queue, distributed
 
-It was first created for Django, but is now usable from Python.
-It can also operate with other languages via HTTP+JSON.
+--
 
-This introduction is written for someone who wants to use
-Celery from within a Django project. For information about using it from
-pure Python see `Can I use Celery without Django?`_, for calling out to other
-languages see `Executing tasks on a remote web server`_.
 
-.. _`Can I use Celery without Django?`: http://bit.ly/WPa6n
+Celery is a task queue/job queue based on distributed message passing.
+It is focused on real-time operation, but has support for scheduling as well.
 
-.. _`Executing tasks on a remote web server`: http://bit.ly/CgXSc
+The execution units, called tasks, are executed concurrently on one or more
+worker servers, asynchronously (in the background) or synchronously
+(wait until ready).
 
-It is used for executing tasks *asynchronously*, routed to one or more
-worker servers, running concurrently using multiprocessing.
+Celery is already used in production to process millions of tasks a day.
 
-It is designed to solve certain problems related to running websites
-demanding high-availability and performance.
+It was first created for Django, but is now usable from Python as well.
+It can also `operate with other languages via HTTP+JSON`_.
 
-It is perfect for filling caches, posting updates to twitter, mass
-downloading data like syndication feeds or web scraping. Use-cases are
-plentiful. Implementing these features asynchronously using ``celery`` is
-easy and fun, and the performance improvements can make it more than
-worthwhile.
+.. _`operate with other languages via HTTP+JSON`: http://bit.ly/CgXSc
 
 Overview
 ========
@@ -40,81 +32,132 @@ This is a high level overview of the architecture.
 
 .. image:: http://cloud.github.com/downloads/ask/celery/Celery-Overview-v4.jpg
 
-The broker is an AMQP server pushing tasks to the worker servers.
+The broker pushes tasks to the worker servers.
 A worker server is a networked machine running ``celeryd``. This can be one or
-more machines, depending on the workload. See `A look inside the worker`_ to
-see how the worker server works.
+more machines, depending on the workload.
 
 The result of the task can be stored for later retrieval (called its
 "tombstone").
 
-Features
-========
-
-    * Uses AMQP messaging (RabbitMQ, ZeroMQ, Qpid) to route tasks to the
-      worker servers. Experimental support for STOMP (ActiveMQ) is also 
-      available.
-
-    * You can run as many worker servers as you want, and still
-      be *guaranteed that the task is only executed once.*
-
-    * Tasks are executed *concurrently* using the Python 2.6
-      ``multiprocessing`` module (also available as a back-port
-      to older python versions)
-
-    * Supports *periodic tasks*, which makes it a (better) replacement
-      for cronjobs.
-
-    * When a task has been executed, the return value can be stored using
-      either a MySQL/Oracle/PostgreSQL/SQLite database, Memcached,
-      `MongoDB`_ or `Tokyo Tyrant`_ back-end. For high-performance you can
-      also use AMQP messages to publish results.
-
-    * If the task raises an exception, the exception instance is stored,
-      instead of the return value.
-
-    * All tasks has a Universally Unique Identifier (UUID), which is the
-      task id, used for querying task status and return values.
-
-    * Tasks can be retried if they fail, with a configurable maximum number
-      of retries.
+Example
+=======
 
-    * Tasks can be configured to run at a specific time and date in the
-      future (ETA) or you can set a countdown in seconds for when the
-      task should be executed.
+You probably want to see some code by now, so I'll give you an example task
+adding two numbers:
+::
 
-    * Supports *task-sets*, which is a task consisting of several sub-tasks.
-      You can find out how many, or if all of the sub-tasks has been executed.
-      Excellent for progress-bar like functionality.
+    from celery.decorators import task
 
-    * Has a ``map`` like function that uses tasks, called ``dmap``.
+    @task
+    def add(x, y):
+        return x + y
 
-    * However, you rarely want to wait for these results in a web-environment.
-      You'd rather want to use Ajax to poll the task status, which is
-      available from a URL like ``celery/<task_id>/status/``. This view
-      returns a JSON-serialized data structure containing the task status,
-      and the return value if completed, or exception on failure.
+You can execute the task in the background, or wait for it to finish::
 
-    * The worker can collect statistics, like, how many tasks has been
-      executed by type, and the time it took to process them. Very useful
-      for monitoring and profiling.
+    >>> result = add.delay(4, 4)
+    >>> result.wait() # wait for and return the result
+    8
 
-    * Pool workers are supervised, so if for some reason a worker crashes
-        it is automatically replaced by a new worker.
+Simple!
 
-    * Can be configured to send e-mails to the administrators when a task
-      fails.
+Features
+========
 
+    +-----------------+----------------------------------------------------+
+    | Messaging       | Supported brokers include `RabbitMQ`_, `Stomp`_,   |
+    |                 | `Redis`_, and the most common SQL databases.       |
+    +-----------------+----------------------------------------------------+
+    | Robust          | Using `RabbitMQ`, celery survives most error       |
+    |                 | scenarios, and your tasks will never be lost.      |
+    +-----------------+----------------------------------------------------+
+    | Distributed     | Runs on one or more machines. Supports             |
+    |                 | `clustering`_ when used in combination with        |
+    |                 | `RabbitMQ`_. You can set up new workers without    |
+    |                 | central configuration (e.g. use your dads laptop   |
+    |                 | while the queue is temporarily overloaded).        |
+    +-----------------+----------------------------------------------------+
+    | Concurrency     | Tasks are executed in parallel using the           |
+    |                 | ``multiprocessing`` module.                        |
+    +-----------------+----------------------------------------------------+
+    | Scheduling      | Supports recurring tasks like cron, or specifying  |
+    |                 | an exact date or countdown for when after the task |
+    |                 | should be executed.                                |
+    +-----------------+----------------------------------------------------+
+    | Performance     | Able to execute tasks while the user waits.        |
+    +-----------------+----------------------------------------------------+
+    | Return Values   | Task return values can be saved to the selected    |
+    |                 | result store backend. You can wait for the result, |
+    |                 | retrieve it later, or ignore it.                   |
+    +-----------------+----------------------------------------------------+
+    | Result Stores   | Database, `MongoDB`_, `Redis`_, `Tokyo Tyrant`,    |
+    |                 | `AMQP`_ (high performance).                        |
+    +-----------------+----------------------------------------------------+
+    | Webhooks        | Your tasks can also be HTTP callbacks, enabling    |
+    |                 | cross-language communication.                      |
+    +-----------------+----------------------------------------------------+
+    | Rate limiting   | Supports rate limiting by using the token bucket   |
+    |                 | algorithm, which accounts for bursts of traffic.   |
+    |                 | Rate limits can be set for each task type, or      |
+    |                 | globally for all.                                  |
+    +-----------------+----------------------------------------------------+
+    | Routing         | Using AMQP you can route tasks arbitrarily to      |
+    |                 | different workers.                                 |
+    +-----------------+----------------------------------------------------+
+    | Remote-control  | You can rate limit and delete (revoke) tasks       |
+    |                 | remotely.                                          |
+    +-----------------+----------------------------------------------------+
+    | Monitoring      | You can capture everything happening with the      |
+    |                 | workers in real-time by subscribing to events.     |
+    |                 | A real-time web monitor is in development.         |
+    +-----------------+----------------------------------------------------+
+    | Serialization   | Supports Pickle, JSON, YAML, or easily defined     |
+    |                 | custom schemes. One task invocation can have a     |
+    |                 | different scheme than another.                     |
+    +-----------------+----------------------------------------------------+
+    | Tracebacks      | Errors and tracebacks are stored and can be        |
+    |                 | investigated after the fact.                       |
+    +-----------------+----------------------------------------------------+
+    | UUID            | Every task has an UUID (Universally Unique         |
+    |                 | Identifier), which is the task id used to query    |
+    |                 | task status and return value.                      |
+    +-----------------+----------------------------------------------------+
+    | Retries         | Tasks can be retried if they fail, with            |
+    |                 | configurable maximum number of retries, and delays |
+    |                 | between each retry.                                |
+    +-----------------+----------------------------------------------------+
+    | Task Sets       | A Task set is a task consisting of several         |
+    |                 | sub-tasks. You can find out how many, or if all    |
+    |                 | of the sub-tasks has been executed, and even       |
+    |                 | retrieve the results in order. Progress bars,      |
+    |                 | anyone?                                            |
+    +-----------------+----------------------------------------------------+
+    | Made for Web    | You can query status and results via URLs,         |
+    |                 | enabling the ability to poll task status using     |
+    |                 | Ajax.                                              |
+    +-----------------+----------------------------------------------------+
+    | Error e-mails   | Can be configured to send e-mails to the           |
+    |                 | administrators when tasks fails.                   |
+    +-----------------+----------------------------------------------------+
+    | Supervised      | Pool workers are supervised and automatically      |
+    |                 | replaced if they crash.                            |
+    +-----------------+----------------------------------------------------+
+
+
+.. _`RabbitMQ`: http://www.rabbitmq.com/
+.. _`clustering`: http://www.rabbitmq.com/clustering.html
+.. _`AMQP`: http://www.amqp.org/
+.. _`Stomp`: http://stomp.codehaus.org/
 .. _`MongoDB`: http://www.mongodb.org/
+.. _`Redis`: http://code.google.com/p/redis/
 .. _`Tokyo Tyrant`: http://tokyocabinet.sourceforge.net/
 
-API Reference Documentation
-===========================
+Documentation
+=============
 
-The `API Reference`_ is hosted at Github
-(http://ask.github.com/celery)
+The `latest documentation`_ with user guides, tutorials and API reference
+is hosted at Github.
 
-.. _`API Reference`: http://ask.github.com/celery/
+.. _`latest documentation`: http://ask.github.com/celery/
 
 Installation
 =============
@@ -151,233 +194,6 @@ You can clone the repository by doing the following::
     $ git clone git://github.com/ask/celery.git
 
 
-Usage
-=====
-
-Installing RabbitMQ
--------------------
-
-See `Installing RabbitMQ`_ over at RabbitMQ's website. For Mac OS X
-see `Installing RabbitMQ on OS X`_.
-
-.. _`Installing RabbitMQ`: http://www.rabbitmq.com/install.html
-.. _`Installing RabbitMQ on OS X`:
-    http://playtype.net/past/2008/10/9/installing_rabbitmq_on_osx/
-
-
-Setting up RabbitMQ
--------------------
-
-To use celery we need to create a RabbitMQ user, a virtual host and
-allow that user access to that virtual host::
-
-    $ rabbitmqctl add_user myuser mypassword
-
-    $ rabbitmqctl add_vhost myvhost
-
-From RabbitMQ version 1.6.0 and onward you have to use the new ACL features
-to allow access::
-
-    $ rabbitmqctl set_permissions -p myvhost myuser "" ".*" ".*"
-
-See the RabbitMQ `Admin Guide`_ for more information about `access control`_.
-
-.. _`Admin Guide`: http://www.rabbitmq.com/admin-guide.html
-
-.. _`access control`: http://www.rabbitmq.com/admin-guide.html#access-control
-
-
-If you are still using version 1.5.0 or below, please use ``map_user_vhost``::
-
-    $ rabbitmqctl map_user_vhost myuser myvhost
-
-
-Configuring your Django project to use Celery
----------------------------------------------
-
-You only need three simple steps to use celery with your Django project.
-
-    1. Add ``celery`` to ``INSTALLED_APPS``.
-
-    2. Create the celery database tables::
-
-            $ python manage.py syncdb
-
-    3. Configure celery to use the AMQP user and virtual host we created
-        before, by adding the following to your ``settings.py``::
-
-            AMQP_SERVER = "localhost"
-            AMQP_PORT = 5672
-            AMQP_USER = "myuser"
-            AMQP_PASSWORD = "mypassword"
-            AMQP_VHOST = "myvhost"
-
-
-That's it.
-
-There are more options available, like how many processes you want to process
-work in parallel (the ``CELERY_CONCURRENCY`` setting), and the backend used
-for storing task statuses. But for now, this should do. For all of the options
-available, please consult the `API Reference`_
-
-**Note**: If you're using SQLite as the Django database back-end,
-``celeryd`` will only be able to process one task at a time, this is
-because SQLite doesn't allow concurrent writes.
-
-Running the celery worker server
---------------------------------
-
-To test this we'll be running the worker server in the foreground, so we can
-see what's going on without consulting the logfile::
-
-    $ python manage.py celeryd
-
-
-However, in production you probably want to run the worker in the
-background, as a daemon:: 
-
-    $ python manage.py celeryd --detach
-
-
-For a complete listing of the command line arguments available, with a short
-description, you can use the help command::
-
-    $ python manage.py help celeryd
-
-
-Defining and executing tasks
-----------------------------
-
-**Please note** All of these tasks has to be stored in a real module, they can't
-be defined in the python shell or ipython/bpython. This is because the celery
-worker server needs access to the task function to be able to run it.
-So while it looks like we use the python shell to define the tasks in these
-examples, you can't do it this way. Put them in the ``tasks`` module of your
-Django application. The worker server will automatically load any ``tasks.py``
-file for all of the applications listed in ``settings.INSTALLED_APPS``.
-Executing tasks using ``delay`` and ``apply_async`` can be done from the
-python shell, but keep in mind that since arguments are pickled, you can't
-use custom classes defined in the shell session.
-
-While you can use regular functions, the recommended way is to define
-a task class. This way you can cleanly upgrade the task to use the more
-advanced features of celery later.
-
-This is a task that basically does nothing but take some arguments,
-and return a value:
-
-    >>> from celery.task import Task
-    >>> from celery.registry import tasks
-    >>> class MyTask(Task):
-    ...     def run(self, some_arg, **kwargs):
-    ...         logger = self.get_logger(**kwargs)
-    ...         logger.info("Did something: %s" % some_arg)
-    ...         return 42
-    >>> tasks.register(MyTask)
-
-As you can see the worker is sending some keyword arguments to this task,
-this is the default keyword arguments. A task can choose not to take these,
-or only list the ones it want (the worker will do the right thing).
-The current default keyword arguments are:
-
-    * logfile
-
-        The currently used log file, can be passed on to ``self.get_logger``
-        to gain access to the workers log file via a ``logger.Logging``
-        instance.
-
-    * loglevel
-
-        The current loglevel used.
-
-    * task_id
-
-        The unique id of the executing task.
-
-    * task_name
-
-        Name of the executing task.
-
-    * task_retries
-
-        How many times the current task has been retried.
-        (an integer starting a ``0``).
-
-Now if we want to execute this task, we can use the ``delay`` method of the
-task class (this is a handy shortcut to the ``apply_async`` method which gives
-you greater control of the task execution).
-
-    >>> from myapp.tasks import MyTask
-    >>> MyTask.delay(some_arg="foo")
-
-At this point, the task has been sent to the message broker. The message
-broker will hold on to the task until a celery worker server has successfully
-picked it up.
-
-*Note* If everything is just hanging when you execute ``delay``, please check
-that RabbitMQ is running, and that the user/password has access to the virtual
-host you configured earlier.
-
-Right now we have to check the celery worker logfiles to know what happened with
-the task. This is because we didn't keep the ``AsyncResult`` object returned
-by ``delay``.
-
-The ``AsyncResult`` lets us find the state of the task, wait for the task to
-finish and get its return value (or exception if the task failed).
-
-So, let's execute the task again, but this time we'll keep track of the task:
-
-    >>> result = MyTask.delay("do_something", some_arg="foo bar baz")
-    >>> result.ready() # returns True if the task has finished processing.
-    False
-    >>> result.result # task is not ready, so no return value yet.
-    None
-    >>> result.get()   # Waits until the task is done and return the retval.
-    42
-    >>> result.result
-    42
-    >>> result.successful() # returns True if the task didn't end in failure.
-    True
-
-
-If the task raises an exception, the ``result.success()`` will be ``False``,
-and ``result.result`` will contain the exception instance raised.
-
-Auto-discovery of tasks
------------------------
-
-``celery`` has an auto-discovery feature like the Django Admin, that
-automatically loads any ``tasks.py`` module in the applications listed
-in ``settings.INSTALLED_APPS``. This autodiscovery is used by the celery
-worker to find registered tasks for your Django project.
-
-Periodic Tasks
----------------
-
-Periodic tasks are tasks that are run every ``n`` seconds. 
-Here's an example of a periodic task:
-
-    >>> from celery.task import PeriodicTask
-    >>> from celery.registry import tasks
-    >>> from datetime import timedelta
-    >>> class MyPeriodicTask(PeriodicTask):
-    ...     run_every = timedelta(seconds=30)
-    ...
-    ...     def run(self, **kwargs):
-    ...         logger = self.get_logger(**kwargs)
-    ...         logger.info("Running periodic task!")
-    ...
-    >>> tasks.register(MyPeriodicTask)
-
-**Note:** Periodic tasks does not support arguments, as this doesn't
-really make sense.
-
-
-A look inside the worker
-========================
-
-.. image:: http://cloud.github.com/downloads/ask/celery/InsideTheWorker-v2.jpg
-
 Getting Help
 ============
 
@@ -421,3 +237,4 @@ This software is licensed under the ``New BSD License``. See the ``LICENSE``
 file in the top distribution directory for the full license text.
 
 .. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround
+

+ 7 - 0
bin/celerybeat

@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+import sys
+from celery.bin.celerybeat import run_clockservice, parse_options
+
+if __name__ == "__main__":
+    options = parse_options(sys.argv[1:])
+    run_clockservice(**vars(options))

+ 2 - 0
bin/celeryd

@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 import sys
+if not '' in sys.path:
+    sys.path.insert(0, '')
 from celery.bin.celeryd import run_worker, parse_options
 
 if __name__ == "__main__":

+ 3 - 1
bin/celeryinit

@@ -1,5 +1,7 @@
 #!/usr/bin/env python
-
+import sys
+if not '' in sys.path:
+    sys.path.insert(0, '')
 from celery.bin.celeryinit import main
 
 if __name__ == "__main__":

+ 1 - 1
celery/__init__.py

@@ -1,6 +1,6 @@
 """Distributed Task Queue"""
 
-VERSION = (0, 8, 0)
+VERSION = (0, 9, 6)
 
 __version__ = ".".join(map(str, VERSION))
 __author__ = "Ask Solem"

+ 32 - 42
celery/backends/__init__.py

@@ -1,41 +1,49 @@
-"""celery.backends"""
-from functools import partial
+import importlib
+
+from billiard.utils.functional import curry
+from carrot.utils import rpartition
+
 from celery import conf
-import sys
 
+BACKEND_ALIASES = {
+    "amqp": "celery.backends.amqp.AMQPBackend",
+    "database": "celery.backends.database.DatabaseBackend",
+    "db": "celery.backends.database.DatabaseBackend",
+    "redis": "celery.backends.pyredis.RedisBackend",
+    "cache": "celery.backends.cache.CacheBackend",
+    "mongodb": "celery.backends.mongodb.MongoBackend",
+    "tyrant": "celery.backends.tyrant.TyrantBackend",
+}
 
-def get_backend_cls(backend):
-    """Get backend class by name.
+_backend_cache = {}
 
-    If the name does not include "``.``" (is not fully qualified),
-    ``"celery.backends."`` will be prepended to the name. e.g.
-    ``"database"`` becomes ``"celery.backends.database"``.
 
-    """
-    if backend.find(".") == -1:
-        backend = "celery.backends.%s" % backend
-    __import__(backend)
-    backend_module = sys.modules[backend]
-    return getattr(backend_module, "Backend")
+def resolve_backend(backend):
+    backend = BACKEND_ALIASES.get(backend, backend)
+    backend_module_name, _, backend_cls_name = rpartition(backend, ".")
+    return backend_module_name, backend_cls_name
 
-"""
-.. function:: get_default_backend_cls()
 
-    Get the backend class specified in :setting:`CELERY_BACKEND`.
+def _get_backend_cls(backend):
+    backend_module_name, backend_cls_name = resolve_backend(backend)
+    backend_module = importlib.import_module(backend_module_name)
+    return getattr(backend_module, backend_cls_name)
 
-"""
-get_default_backend_cls = partial(get_backend_cls, conf.CELERY_BACKEND)
+
+def get_backend_cls(backend):
+    """Get backend class by name/alias"""
+    if backend not in _backend_cache:
+        _backend_cache[backend] = _get_backend_cls(backend)
+    return _backend_cache[backend]
 
 
 """
-.. function:: get_default_periodicstatus_backend_cls()
+.. function:: get_default_backend_cls()
 
-    Get the backend class specified in
-    :setting:`CELERY_PERIODIC_STATUS_BACKEND`.
+    Get the backend class specified in :setting:`CELERY_BACKEND`.
 
 """
-get_default_periodicstatus_backend_cls = partial(get_backend_cls,
-                                        conf.CELERY_PERIODIC_STATUS_BACKEND)
+get_default_backend_cls = curry(get_backend_cls, conf.CELERY_BACKEND)
 
 
 """
@@ -47,16 +55,6 @@ get_default_periodicstatus_backend_cls = partial(get_backend_cls,
 """
 DefaultBackend = get_default_backend_cls()
 
-
-"""
-.. class:: DefaultPeriodicStatusBackend
-
-    The default backend for storing periodic task metadata, specified
-    in :setting:`CELERY_PERIODIC_STATUS_BACKEND`.
-
-"""
-DefaultPeriodicStatusBackend = get_default_periodicstatus_backend_cls()
-
 """
 .. data:: default_backend
 
@@ -64,11 +62,3 @@ DefaultPeriodicStatusBackend = get_default_periodicstatus_backend_cls()
 
 """
 default_backend = DefaultBackend()
-
-"""
-.. data:: default_periodic_status_backend
-
-    An instance of :class:`DefaultPeriodicStatusBackend`.
-
-"""
-default_periodic_status_backend = DefaultPeriodicStatusBackend()

+ 20 - 17
celery/backends/amqp.py

@@ -1,12 +1,12 @@
 """celery.backends.amqp"""
-from carrot.connection import DjangoBrokerConnection
 from carrot.messaging import Consumer, Publisher
-from celery.backends.base import BaseBackend
 
-RESULTSTORE_EXCHANGE = "celeryresults"
+from celery import conf
+from celery.messaging import establish_connection
+from celery.backends.base import BaseBackend
 
 
-class Backend(BaseBackend):
+class AMQPBackend(BaseBackend):
     """AMQP backend. Publish results by sending messages to the broker
     using the task id as routing key.
 
@@ -16,30 +16,37 @@ class Backend(BaseBackend):
 
     """
 
+    exchange = conf.RESULT_EXCHANGE
     capabilities = ["ResultStore"]
+    _connection = None
 
     def __init__(self, *args, **kwargs):
-        super(Backend, self).__init__(*args, **kwargs)
-        self.connection = DjangoBrokerConnection()
+        super(AMQPBackend, self).__init__(*args, **kwargs)
         self._cache = {}
 
+    @property
+    def connection(self):
+        if not self._connection:
+            self._connection = establish_connection()
+        return self._connection
+
     def _declare_queue(self, task_id, connection):
         routing_key = task_id.replace("-", "")
         backend = connection.create_backend()
         backend.queue_declare(queue=routing_key, durable=True,
                                 exclusive=False, auto_delete=True)
-        backend.exchange_declare(exchange=RESULTSTORE_EXCHANGE,
+        backend.exchange_declare(exchange=self.exchange,
                                  type="direct",
                                  durable=True,
                                  auto_delete=False)
-        backend.queue_bind(queue=routing_key, exchange=RESULTSTORE_EXCHANGE,
+        backend.queue_bind(queue=routing_key, exchange=self.exchange,
                            routing_key=routing_key)
         backend.close()
 
     def _publisher_for_task_id(self, task_id, connection):
         routing_key = task_id.replace("-", "")
         self._declare_queue(task_id, connection)
-        p = Publisher(connection, exchange=RESULTSTORE_EXCHANGE,
+        p = Publisher(connection, exchange=self.exchange,
                       exchange_type="direct",
                       routing_key=routing_key)
         return p
@@ -48,7 +55,7 @@ class Backend(BaseBackend):
         routing_key = task_id.replace("-", "")
         self._declare_queue(task_id, connection)
         return Consumer(connection, queue=routing_key,
-                        exchange=RESULTSTORE_EXCHANGE,
+                        exchange=self.exchange,
                         exchange_type="direct",
                         no_ack=False, auto_ack=False,
                         auto_delete=True,
@@ -56,11 +63,7 @@ class Backend(BaseBackend):
 
     def store_result(self, task_id, result, status, traceback=None):
         """Send task return value and status."""
-        if status == "DONE":
-            result = self.prepare_result(result)
-        elif status == "FAILURE":
-            result = self.prepare_exception(result)
-
+        result = self.encode_result(result, status)
 
         meta = {"task_id": task_id,
                 "result": result,
@@ -74,9 +77,9 @@ class Backend(BaseBackend):
 
         return result
 
-    def is_done(self, task_id):
+    def is_successful(self, task_id):
         """Returns ``True`` if task with ``task_id`` has been executed."""
-        return self.get_status(task_id) == "DONE"
+        return self.get_status(task_id) == "SUCCESS"
 
     def get_status(self, task_id):
         """Get the status of a task."""

+ 33 - 26
celery/backends/base.py

@@ -1,18 +1,34 @@
 """celery.backends.base"""
 import time
-import operator
-from celery.serialization import pickle
-from celery.serialization import get_pickled_exception
-from celery.serialization import get_pickleable_exception
+
+from billiard.serialization import pickle
+from billiard.serialization import get_pickled_exception
+from billiard.serialization import get_pickleable_exception
+
 from celery.exceptions import TimeoutError
 
+READY_STATES = frozenset(["SUCCESS", "FAILURE"])
+UNREADY_STATES = frozenset(["PENDING", "RETRY"])
+EXCEPTION_STATES = frozenset(["RETRY", "FAILURE"])
+
 
 class BaseBackend(object):
     """The base backend class. All backends should inherit from this."""
 
-    capabilities = []
+    READY_STATES = READY_STATES
+    UNREADY_STATES = UNREADY_STATES
+    EXCEPTION_STATES = EXCEPTION_STATES
+
     TimeoutError = TimeoutError
 
+    capabilities = []
+
+    def encode_result(self, result, status):
+        if status == "SUCCESS":
+            return self.prepare_value(result)
+        elif status in self.EXCEPTION_STATES:
+            return self.prepare_exception(result)
+
     def store_result(self, task_id, result, status):
         """Store the result and status of a task."""
         raise NotImplementedError(
@@ -20,7 +36,7 @@ class BaseBackend(object):
 
     def mark_as_done(self, task_id, result):
         """Mark task as successfully executed."""
-        return self.store_result(task_id, result, status="DONE")
+        return self.store_result(task_id, result, status="SUCCESS")
 
     def mark_as_failure(self, task_id, exc, traceback=None):
         """Mark task as executed with failure. Stores the execption."""
@@ -46,10 +62,8 @@ class BaseBackend(object):
         raise NotImplementedError(
                 "get_status is not supported by this backend.")
 
-    def prepare_result(self, result):
-        """Prepare result for storage."""
-        if result is None:
-            return True
+    def prepare_value(self, result):
+        """Prepare value for storage."""
         return result
 
     def get_result(self, task_id):
@@ -62,9 +76,9 @@ class BaseBackend(object):
         raise NotImplementedError(
                 "get_traceback is not supported by this backend.")
 
-    def is_done(self, task_id):
+    def is_successful(self, task_id):
         """Returns ``True`` if the task was successfully executed."""
-        return self.get_status(task_id) == "DONE"
+        return self.get_status(task_id) == "SUCCESS"
 
     def cleanup(self):
         """Backend cleanup. Is run by
@@ -88,7 +102,7 @@ class BaseBackend(object):
 
         while True:
             status = self.get_status(task_id)
-            if status == "DONE":
+            if status == "SUCCESS":
                 return self.get_result(task_id)
             elif status == "FAILURE":
                 raise self.get_result(task_id)
@@ -99,11 +113,7 @@ class BaseBackend(object):
                 raise TimeoutError("The operation timed out.")
 
     def process_cleanup(self):
-        """Cleanup actions to do at the end of a task worker process.
-
-        See :func:`celery.worker.jail`.
-
-        """
+        """Cleanup actions to do at the end of a task worker process."""
         pass
 
     def store_taskset(self, taskset_id, result):
@@ -140,10 +150,7 @@ class KeyValueStoreBackend(BaseBackend):
 
     def store_result(self, task_id, result, status, traceback=None):
         """Store task result and status."""
-        if status == "DONE":
-            result = self.prepare_result(result)
-        elif status == "FAILURE":
-            result = self.prepare_exception(result)
+        result = self.encode_result(result, status)
         meta = {"status": status, "result": result, "traceback": traceback}
         self.set(self.get_cache_key_for_task(task_id), pickle.dumps(meta))
         return result
@@ -155,7 +162,7 @@ class KeyValueStoreBackend(BaseBackend):
     def get_result(self, task_id):
         """Get the result of a task."""
         meta = self._get_task_meta_for(task_id)
-        if meta["status"] == "FAILURE":
+        if meta["status"] in self.EXCEPTION_STATES:
             return self.exception_to_python(meta["result"])
         else:
             return meta["result"]
@@ -165,9 +172,9 @@ class KeyValueStoreBackend(BaseBackend):
         meta = self._get_task_meta_for(task_id)
         return meta["traceback"]
 
-    def is_done(self, task_id):
+    def is_successful(self, task_id):
         """Returns ``True`` if the task executed successfully."""
-        return self.get_status(task_id) == "DONE"
+        return self.get_status(task_id) == "SUCCESS"
 
     def _get_task_meta_for(self, task_id):
         """Get task metadata for a task by id."""
@@ -177,6 +184,6 @@ class KeyValueStoreBackend(BaseBackend):
         if not meta:
             return {"status": "PENDING", "result": None}
         meta = pickle.loads(str(meta))
-        if meta.get("status") == "DONE":
+        if meta.get("status") == "SUCCESS":
             self._cache[task_id] = meta
         return meta

+ 19 - 6
celery/backends/cache.py

@@ -1,9 +1,13 @@
 """celery.backends.cache"""
+from datetime import timedelta
+
+from django.utils.encoding import smart_str
 from django.core.cache import cache, get_cache
 from django.core.cache.backends.base import InvalidCacheBackendError
-from django.utils.encoding import smart_str
-from celery.backends.base import KeyValueStoreBackend
+
 from celery import conf
+from celery.utils import timedelta_seconds
+from celery.backends.base import KeyValueStoreBackend
 
 # CELERY_CACHE_BACKEND overrides the django-global(tm) backend settings.
 if conf.CELERY_CACHE_BACKEND:
@@ -31,19 +35,28 @@ class DjangoMemcacheWrapper(object):
 # Check if django is using memcache as the cache backend. If so, wrap the
 # cache object in a DjangoMemcacheWrapper that fixes a bug with retrieving
 # pickled data
+from django.core.cache.backends.base import InvalidCacheBackendError
 try:
     from django.core.cache.backends.memcached import CacheClass
-    if isinstance(cache, CacheClass):
-        cache = DjangoMemcacheWrapper(cache)
 except InvalidCacheBackendError:
     pass
+else:
+    if isinstance(cache, CacheClass):
+        cache = DjangoMemcacheWrapper(cache)
 
 
-class Backend(KeyValueStoreBackend):
+class CacheBackend(KeyValueStoreBackend):
     """Backend using the Django cache framework to store task metadata."""
 
+    def __init__(self, *args, **kwargs):
+        super(CacheBackend, self).__init__(self, *args, **kwargs)
+        expires = conf.TASK_RESULT_EXPIRES
+        if isinstance(expires, timedelta):
+            expires = timedelta_seconds(conf.TASK_RESULT_EXPIRES)
+        self.expires = expires
+
     def get(self, key):
         return cache.get(key)
 
     def set(self, key, value):
-        cache.set(key, value)
+        cache.set(key, value, self.expires)

+ 8 - 31
celery/backends/database.py

@@ -1,41 +1,19 @@
-"""celery.backends.database"""
-from celery.models import TaskMeta, TaskSetMeta, PeriodicTaskMeta
+from celery.models import TaskMeta, TaskSetMeta
 from celery.backends.base import BaseBackend
 
 
-class Backend(BaseBackend):
+class DatabaseBackend(BaseBackend):
     """The database backends. Using Django models to store task metadata."""
 
-    capabilities = ["ResultStore", "PeriodicStatus"]
+    capabilities = ["ResultStore"]
 
     def __init__(self, *args, **kwargs):
-        super(Backend, self).__init__(*args, **kwargs)
+        super(DatabaseBackend, self).__init__(*args, **kwargs)
         self._cache = {}
 
-    def init_periodic_tasks(self):
-        """Create entries for all periodic tasks in the database."""
-        PeriodicTaskMeta.objects.init_entries()
-
-    def run_periodic_tasks(self):
-        """Run all waiting periodic tasks.
-
-        :returns: a list of ``(task, task_id)`` tuples containing
-            the task class and id for the resulting tasks applied.
-
-        """
-        waiting_tasks = PeriodicTaskMeta.objects.get_waiting_tasks()
-        task_id_tuples = []
-        for waiting_task in waiting_tasks:
-            task_id = waiting_task.delay()
-            task_id_tuples.append((waiting_task, task_id))
-        return task_id_tuples
-
     def store_result(self, task_id, result, status, traceback=None):
         """Store return value and status of an executed task."""
-        if status == "DONE":
-            result = self.prepare_result(result)
-        elif status in ["FAILURE", "RETRY"]:
-            result = self.prepare_exception(result)
+        result = self.encode_result(result, status)
         TaskMeta.objects.store_result(task_id, result, status,
                                       traceback=traceback)
         return result
@@ -45,9 +23,9 @@ class Backend(BaseBackend):
         TaskSetMeta.objects.store_result(taskset_id, result)
         return result
 
-    def is_done(self, task_id):
+    def is_successful(self, task_id):
         """Returns ``True`` if task with ``task_id`` has been executed."""
-        return self.get_status(task_id) == "DONE"
+        return self.get_status(task_id) == "SUCCESS"
 
     def get_status(self, task_id):
         """Get the status of a task."""
@@ -70,7 +48,7 @@ class Backend(BaseBackend):
         if task_id in self._cache:
             return self._cache[task_id]
         meta = TaskMeta.objects.get_task(task_id)
-        if meta.status == "DONE":
+        if meta.status == "SUCCESS":
             self._cache[task_id] = meta
         return meta
 
@@ -93,4 +71,3 @@ class Backend(BaseBackend):
         """Delete expired metadata."""
         TaskMeta.objects.delete_expired()
         TaskSetMeta.objects.delete_expired()
-

+ 15 - 103
celery/backends/mongodb.py

@@ -1,22 +1,16 @@
 """MongoDB backend for celery."""
-
-import random
-from datetime import datetime, timedelta
+from datetime import datetime
 
 from django.core.exceptions import ImproperlyConfigured
-from celery.serialization import pickle
-from celery.backends.base import BaseBackend
-from celery.loaders import settings
-from celery.conf import TASK_RESULT_EXPIRES
-from celery.registry import tasks
-
+from billiard.serialization import pickle
 try:
     import pymongo
 except ImportError:
     pymongo = None
 
-# taken from celery.managers.PeriodicTaskManager
-SERVER_DRIFT = timedelta(seconds=random.vonmisesvariate(1, 4))
+from celery.backends.base import BaseBackend
+from celery.loaders import load_settings
+from celery.conf import TASK_RESULT_EXPIRES
 
 
 class Bunch:
@@ -25,9 +19,9 @@ class Bunch:
         self.__dict__.update(kw)
 
 
-class Backend(BaseBackend):
+class MongoBackend(BaseBackend):
 
-    capabilities = ("ResultStore", "PeriodicStatus")
+    capabilities = ["ResultStore"]
 
     mongodb_host = 'localhost'
     mongodb_port = 27017
@@ -35,7 +29,6 @@ class Backend(BaseBackend):
     mongodb_password = None
     mongodb_database = 'celery'
     mongodb_taskmeta_collection = 'celery_taskmeta'
-    mongodb_periodictaskmeta_collection = 'celery_periodictaskmeta'
 
     def __init__(self, *args, **kwargs):
         """Initialize MongoDB backend instance.
@@ -50,6 +43,8 @@ class Backend(BaseBackend):
                 "You need to install the pymongo library to use the "
                 "MongoDB backend.")
 
+        settings = load_settings()
+
         conf = getattr(settings, "CELERY_MONGODB_BACKEND_SETTINGS", None)
         if conf is not None:
             if not isinstance(conf, dict):
@@ -65,11 +60,8 @@ class Backend(BaseBackend):
                     'database', self.mongodb_database)
             self.mongodb_taskmeta_collection = conf.get(
                 'taskmeta_collection', self.mongodb_taskmeta_collection)
-            self.mongodb_collection_periodictaskmeta = conf.get(
-                'periodictaskmeta_collection',
-                self.mongodb_periodictaskmeta_collection)
 
-        super(Backend, self).__init__(*args, **kwargs)
+        super(MongoBackend, self).__init__(*args, **kwargs)
         self._cache = {}
         self._connection = None
         self._database = None
@@ -104,96 +96,16 @@ class Backend(BaseBackend):
             # goes out of scope
             self._connection = None
 
-    def init_periodic_tasks(self):
-        """Create collection for periodic tasks in database."""
-        db = self._get_database()
-        collection = db[self.mongodb_periodictaskmeta_collection]
-        collection.ensure_index("name", pymongo.ASCENDING, unique=True)
-
-        periodic_tasks = tasks.get_all_periodic()
-        for task_name in periodic_tasks.keys():
-            if not collection.find_one({"name": task_name}):
-                collection.save({"name": task_name,
-                                 "last_run_at": datetime.fromtimestamp(0),
-                                 "total_run_count": 0}, safe=True)
-
-    def run_periodic_tasks(self):
-        """Run all waiting periodic tasks.
-
-        :returns: a list of ``(task, task_id)`` tuples containing
-            the task class and id for the resulting tasks applied.
-        """
-        db = self._get_database()
-        collection = db[self.mongodb_periodictaskmeta_collection]
-
-        waiting_tasks = self._get_waiting_tasks()
-        task_id_tuples = []
-        for waiting_task in waiting_tasks:
-            task = tasks[waiting_task['name']]
-            resp = task.delay()
-            collection.update({'_id': waiting_task['_id']},
-                              {"$inc": {"total_run_count": 1}})
-
-            task_meta = Bunch(name=waiting_task['name'],
-                              last_run_at=waiting_task['last_run_at'],
-                              total_run_count=waiting_task['total_run_count'])
-            task_id_tuples.append((task_meta, resp.task_id))
-
-        return task_id_tuples
-
-    def _is_time(self, last_run_at, run_every):
-        """Check if if it is time to run the periodic task.
-
-        :param last_run_at: Last time the periodic task was run.
-        :param run_every: How often to run the periodic task.
-
-        :rtype bool:
-
-        """
-        # code taken from celery.managers.PeriodicTaskManager
-        run_every_drifted = run_every + SERVER_DRIFT
-        run_at = last_run_at + run_every_drifted
-        if datetime.now() > run_at:
-            return True
-        return False
-
-    def _get_waiting_tasks(self):
-        """Get all waiting periodic tasks."""
-        db = self._get_database()
-        collection = db[self.mongodb_periodictaskmeta_collection]
-
-        periodic_tasks = tasks.get_all_periodic()
-
-        # find all periodic tasks to be run
-        waiting = []
-        for task_meta in collection.find():
-            if task_meta['name'] in periodic_tasks:
-                task = periodic_tasks[task_meta['name']]
-                run_every = task.run_every
-                if self._is_time(task_meta['last_run_at'], run_every):
-                    collection.update(
-                        {"name": task_meta['name'],
-                         "last_run_at": task_meta['last_run_at']},
-                        {"$set": {"last_run_at": datetime.utcnow()}})
-
-                    if db.last_status()['updatedExisting']:
-                        waiting.append(task_meta)
-
-        return waiting
-
     def store_result(self, task_id, result, status, traceback=None):
         """Store return value and status of an executed task."""
         from pymongo.binary import Binary
 
-        if status == 'DONE':
-            result = self.prepare_result(result)
-        elif status == 'FAILURE':
-            result = self.prepare_exception(result)
+        result = self.encode_result(result, status)
 
         meta = {"_id": task_id,
                 "status": status,
                 "result": Binary(pickle.dumps(result)),
-                "date_done": datetime.utcnow(),
+                "date_done": datetime.now(),
                 "traceback": Binary(pickle.dumps(traceback))}
 
         db = self._get_database()
@@ -201,9 +113,9 @@ class Backend(BaseBackend):
 
         taskmeta_collection.save(meta, safe=True)
 
-    def is_done(self, task_id):
+    def is_successful(self, task_id):
         """Returns ``True`` if the task executed successfully."""
-        return self.get_status(task_id) == "DONE"
+        return self.get_status(task_id) == "SUCCESS"
 
     def get_status(self, task_id):
         """Get status of a task."""
@@ -240,7 +152,7 @@ class Backend(BaseBackend):
             "date_done": obj["date_done"],
             "traceback": pickle.loads(str(obj["traceback"])),
         }
-        if meta["status"] == "DONE":
+        if meta["status"] == "SUCCESS":
             self._cache[task_id] = meta
 
         return meta

+ 95 - 0
celery/backends/pyredis.py

@@ -0,0 +1,95 @@
+from django.core.exceptions import ImproperlyConfigured
+
+from celery.backends.base import KeyValueStoreBackend
+from celery.loaders import load_settings
+
+try:
+    import redis
+except ImportError:
+    redis = None
+
+
+class RedisBackend(KeyValueStoreBackend):
+    """Redis based task backend store.
+
+    .. attribute:: redis_host
+
+        The hostname to the Redis server.
+
+    .. attribute:: redis_port
+
+        The port to the Redis server.
+
+        Raises :class:`django.core.exceptions.ImproperlyConfigured` if
+        :setting:`REDIS_HOST` or :setting:`REDIS_PORT` is not set.
+
+    """
+    redis_host = "localhost"
+    redis_port = 6379
+    redis_db = "celery_results"
+    redis_timeout = None
+    redis_connect_retry = None
+
+    def __init__(self, redis_host=None, redis_port=None, redis_db=None,
+            redis_timeout=None,
+            redis_connect_retry=None,
+            redis_connect_timeout=None):
+        if redis is None:
+            raise ImproperlyConfigured(
+                    "You need to install the redis library in order to use "
+                  + "Redis result store backend.")
+
+        settings = load_settings()
+        self.redis_host = redis_host or \
+                            getattr(settings, "REDIS_HOST", self.redis_host)
+        self.redis_port = redis_port or \
+                            getattr(settings, "REDIS_PORT", self.redis_port)
+        self.redis_db = redis_db or \
+                            getattr(settings, "REDIS_DB", self.redis_db)
+        self.redis_timeout = redis_timeout or \
+                            getattr(settings, "REDIS_TIMEOUT",
+                                    self.redis_timeout)
+        self.redis_connect_retry = redis_connect_retry or \
+                            getattr(settings, "REDIS_CONNECT_RETRY",
+                                    self.redis_connect_retry)
+        if self.redis_port:
+            self.redis_port = int(self.redis_port)
+        if not self.redis_host or not self.redis_port:
+            raise ImproperlyConfigured(
+                "In order to use the Redis result store backend, you have to "
+                "set the REDIS_HOST and REDIS_PORT settings")
+        super(RedisBackend, self).__init__()
+        self._connection = None
+
+    def open(self):
+        """Get :class:`redis.Redis`` instance with the current
+        server configuration.
+
+        The connection is then cached until you do an
+        explicit :meth:`close`.
+
+        """
+        # connection overrides bool()
+        if self._connection is None:
+            self._connection = redis.Redis(host=self.redis_host,
+                                    port=self.redis_port,
+                                    db=self.redis_db,
+                                    timeout=self.redis_timeout,
+                                    retry_connection=self.redis_connect_retry)
+            self._connection.connect()
+        return self._connection
+
+    def close(self):
+        """Close the connection to redis."""
+        if self._connection is not None:
+            self._connection.disconnect()
+            self._connection = None
+
+    def process_cleanup(self):
+        self.close()
+
+    def get(self, key):
+        return self.open().get(key)
+
+    def set(self, key, value):
+        self.open().set(key, value)

+ 6 - 5
celery/backends/tyrant.py

@@ -1,15 +1,15 @@
 """celery.backends.tyrant"""
 from django.core.exceptions import ImproperlyConfigured
-from celery.backends.base import KeyValueStoreBackend
-from celery.loaders import settings
-
 try:
     import pytyrant
 except ImportError:
     pytyrant = None
 
+from celery.backends.base import KeyValueStoreBackend
+from celery.loaders import load_settings
+
 
-class Backend(KeyValueStoreBackend):
+class TyrantBackend(KeyValueStoreBackend):
     """Tokyo Cabinet based task backend store.
 
     .. attribute:: tyrant_host
@@ -36,6 +36,7 @@ class Backend(KeyValueStoreBackend):
             raise ImproperlyConfigured(
                     "You need to install the pytyrant library to use the "
                   + "Tokyo Tyrant backend.")
+        settings = load_settings()
         self.tyrant_host = tyrant_host or \
                             getattr(settings, "TT_HOST", self.tyrant_host)
         self.tyrant_port = tyrant_port or \
@@ -46,7 +47,7 @@ class Backend(KeyValueStoreBackend):
             raise ImproperlyConfigured(
                 "To use the Tokyo Tyrant backend, you have to "
                 "set the TT_HOST and TT_PORT settings in your settings.py")
-        super(Backend, self).__init__()
+        super(TyrantBackend, self).__init__()
         self._connection = None
 
     def open(self):

+ 232 - 0
celery/beat.py

@@ -0,0 +1,232 @@
+import time
+import shelve
+import threading
+from datetime import datetime
+from UserDict import UserDict
+
+from celery import log
+from celery import conf
+from celery import registry as _registry
+from celery.utils.info import humanize_seconds
+
+
+class SchedulingError(Exception):
+    """An error occured while scheduling a task."""
+
+
+class ScheduleEntry(object):
+    """An entry in the scheduler.
+
+    :param task: see :attr:`task`.
+    :keyword last_run_at: see :attr:`last_run_at`.
+    :keyword total_run_count: see :attr:`total_run_count`.
+
+    .. attribute:: task
+
+        The task class.
+
+    .. attribute:: last_run_at
+
+        The time and date of when this task was last run.
+
+    .. attribute:: total_run_count
+
+        Total number of times this periodic task has been executed.
+
+    """
+
+    def __init__(self, name, last_run_at=None, total_run_count=None):
+        self.name = name
+        self.last_run_at = last_run_at or datetime.now()
+        self.total_run_count = total_run_count or 0
+
+    def next(self):
+        """Returns a new instance of the same class, but with
+        its date and count fields updated."""
+        return self.__class__(self.name,
+                              datetime.now(),
+                              self.total_run_count + 1)
+
+    def is_due(self, task):
+        """See :meth:`celery.task.base.PeriodicTask.is_due`."""
+        return task.is_due(self.last_run_at)
+
+
+class Scheduler(UserDict):
+    """Scheduler for periodic tasks.
+
+    :keyword registry: see :attr:`registry`.
+    :keyword schedule: see :attr:`schedule`.
+    :keyword logger:  see :attr:`logger`.
+    :keyword max_interval: see :attr:`max_interval`.
+
+    .. attribute:: registry
+
+        The task registry to use.
+
+    .. attribute:: schedule
+
+        The schedule dict/shelve.
+
+    .. attribute:: logger
+
+        The logger to use.
+
+    .. attribute:: max_interval
+
+        Maximum time to sleep between re-checking the schedule.
+
+    """
+
+    def __init__(self, registry=None, schedule=None, logger=None,
+            max_interval=None):
+        self.registry = registry or _registry.TaskRegistry()
+        self.data = schedule or {}
+        self.logger = logger or log.get_default_logger()
+        self.max_interval = max_interval or conf.CELERYBEAT_MAX_LOOP_INTERVAL
+
+        self.cleanup()
+        self.schedule_registry()
+
+    def tick(self):
+        """Run a tick, that is one iteration of the scheduler.
+        Executes all due tasks."""
+        debug = self.logger.debug
+        error = self.logger.error
+
+        remaining_times = []
+        for entry in self.schedule.values():
+            is_due, next_time_to_run = self.is_due(entry)
+            if is_due:
+                debug("Scheduler: Sending due task %s" % entry.name)
+                try:
+                    result = self.apply_async(entry)
+                except SchedulingError, exc:
+                    error("Scheduler: %s" % exc)
+                else:
+                    debug("%s sent. id->%s" % (entry.name, result.task_id))
+            if next_time_to_run:
+                remaining_times.append(next_time_to_run)
+
+        return min(remaining_times + [self.max_interval])
+
+    def get_task(self, name):
+        return self.registry[name]
+
+    def is_due(self, entry):
+        return entry.is_due(self.get_task(entry.name))
+
+    def apply_async(self, entry):
+
+        # Update timestamps and run counts before we actually execute,
+        # so we have that done if an exception is raised (doesn't schedule
+        # forever.)
+        entry = self.schedule[entry.name] = entry.next()
+        task = self.get_task(entry.name)
+
+        try:
+            result = task.apply_async()
+        except Exception, exc:
+            raise SchedulingError("Couldn't apply scheduled task %s: %s" % (
+                    task.name, exc))
+        return result
+
+    def schedule_registry(self):
+        """Add the current contents of the registry to the schedule."""
+        for name, task in self.registry.periodic().items():
+            if name not in self.schedule:
+                self.logger.debug("Scheduler: "
+                    "Added periodic task %s to schedule" % name)
+            self.schedule.setdefault(name, ScheduleEntry(task.name))
+
+    def cleanup(self):
+        for task_name, entry in self.schedule.items():
+            if task_name not in self.registry:
+                self.schedule.pop(task_name, None)
+
+    @property
+    def schedule(self):
+        return self.data
+
+
+class ClockService(object):
+    scheduler_cls = Scheduler
+    registry = _registry.tasks
+    open_schedule = lambda self, filename: shelve.open(filename)
+
+    def __init__(self, logger=None,
+            max_interval=conf.CELERYBEAT_MAX_LOOP_INTERVAL,
+            schedule_filename=conf.CELERYBEAT_SCHEDULE_FILENAME):
+        self.logger = logger or log.get_default_logger()
+        self.max_interval = max_interval
+        self.schedule_filename = schedule_filename
+        self._shutdown = threading.Event()
+        self._stopped = threading.Event()
+        self._schedule = None
+        self._scheduler = None
+        self._in_sync = False
+        silence = self.max_interval < 60 and 10 or 1
+        self.debug = log.SilenceRepeated(self.logger.debug,
+                                         max_iterations=silence)
+
+    def start(self):
+        self.logger.info("ClockService: Starting...")
+        self.logger.debug("ClockService: "
+            "Ticking with max interval->%s, schedule->%s" % (
+                    humanize_seconds(self.max_interval),
+                    self.schedule_filename))
+
+        try:
+            while True:
+                if self._shutdown.isSet():
+                    break
+                interval = self.scheduler.tick()
+                self.debug("ClockService: Waking up %s." % (
+                        humanize_seconds(interval, prefix="in ")))
+                time.sleep(interval)
+        except (KeyboardInterrupt, SystemExit):
+            self.sync()
+        finally:
+            self.sync()
+
+    def sync(self):
+        if self._schedule is not None and not self._in_sync:
+            self.logger.debug("ClockService: Syncing schedule to disk...")
+            self._schedule.sync()
+            self._schedule.close()
+            self._in_sync = True
+            self._stopped.set()
+
+    def stop(self, wait=False):
+        self._shutdown.set()
+        wait and self._stopped.wait() # block until shutdown done.
+
+    @property
+    def schedule(self):
+        if self._schedule is None:
+            filename = self.schedule_filename
+            self._schedule = self.open_schedule(filename=filename)
+        return self._schedule
+
+    @property
+    def scheduler(self):
+        if self._scheduler is None:
+            self._scheduler = self.scheduler_cls(schedule=self.schedule,
+                                            registry=self.registry,
+                                            logger=self.logger,
+                                            max_interval=self.max_interval)
+        return self._scheduler
+
+
+class ClockServiceThread(threading.Thread):
+
+    def __init__(self, *args, **kwargs):
+        self.clockservice = ClockService(*args, **kwargs)
+        threading.Thread.__init__(self)
+        self.setDaemon(True)
+
+    def run(self):
+        self.clockservice.start()
+
+    def stop(self):
+        self.clockservice.stop(wait=True)

+ 115 - 0
celery/bin/celerybeat.py

@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+"""celerybeat
+
+.. program:: celerybeat
+
+.. cmdoption:: -s, --schedule
+
+    Path to the schedule database. Defaults to celerybeat-schedule.
+    The extension ".db" will be appended to the filename.
+
+.. cmdoption:: -f, --logfile
+
+    Path to log file. If no logfile is specified, ``stderr`` is used.
+
+.. cmdoption:: -l, --loglevel
+
+    Logging level, choose between ``DEBUG``, ``INFO``, ``WARNING``,
+    ``ERROR``, ``CRITICAL``, or ``FATAL``.
+
+"""
+import sys
+import optparse
+import traceback
+
+import celery
+from celery import conf
+from celery import platform
+from celery.log import emergency_error
+from celery.beat import ClockService
+from celery.utils import info
+
+STARTUP_INFO_FMT = """
+Configuration ->
+    . broker -> %(conninfo)s
+    . schedule -> %(schedule)s
+    . logfile -> %(logfile)s@%(loglevel)s
+""".strip()
+
+OPTION_LIST = (
+    optparse.make_option('-s', '--schedule',
+            default=conf.CELERYBEAT_SCHEDULE_FILENAME,
+            action="store", dest="schedule",
+            help="Path to the schedule database. The extension \
+                    '.db' will be appended to the filename. Default: %s" % (
+                    conf.CELERYBEAT_SCHEDULE_FILENAME)),
+    optparse.make_option('-f', '--logfile', default=conf.CELERYBEAT_LOG_FILE,
+            action="store", dest="logfile",
+            help="Path to log file."),
+    optparse.make_option('-l', '--loglevel',
+            default=conf.CELERYBEAT_LOG_LEVEL,
+            action="store", dest="loglevel",
+            help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL/FATAL."),
+)
+
+
+def run_clockservice(loglevel=conf.CELERYBEAT_LOG_LEVEL,
+        logfile=conf.CELERYBEAT_LOG_FILE,
+        schedule=conf.CELERYBEAT_SCHEDULE_FILENAME, **kwargs):
+    """Starts the celerybeat clock server."""
+
+    print("celerybeat %s is starting." % celery.__version__)
+
+    # Setup logging
+    if not isinstance(loglevel, int):
+        loglevel = conf.LOG_LEVELS[loglevel.upper()]
+
+    # Run the worker init handler.
+    # (Usually imports task modules and such.)
+    from celery.loaders import current_loader
+    current_loader().on_worker_init()
+
+
+    # Dump configuration to screen so we have some basic information
+    # when users sends e-mails.
+
+    print(STARTUP_INFO_FMT % {
+            "conninfo": info.format_broker_info(),
+            "logfile": logfile or "@stderr",
+            "loglevel": conf.LOG_LEVELS[loglevel],
+            "schedule": schedule,
+    })
+
+    print("celerybeat has started.")
+    arg_start = "manage" in sys.argv[0] and 2 or 1
+    platform.set_process_title("celerybeat",
+                               info=" ".join(sys.argv[arg_start:]))
+
+    def _run_clock():
+        from celery.log import setup_logger
+        logger = setup_logger(loglevel, logfile)
+        clockservice = ClockService(logger=logger, schedule_filename=schedule)
+
+        try:
+            clockservice.start()
+        except Exception, e:
+            emergency_error(logfile,
+                    "celerybeat raised exception %s: %s\n%s" % (
+                            e.__class__, e, traceback.format_exc()))
+
+    _run_clock()
+
+
+def parse_options(arguments):
+    """Parse the available options to ``celeryd``."""
+    parser = optparse.OptionParser(option_list=OPTION_LIST)
+    options, values = parser.parse_args(arguments)
+    return options
+
+
+def main():
+    options = parse_options(sys.argv[1:])
+    run_clockservice(**vars(options))
+
+if __name__ == "__main__":
+    main()

+ 104 - 210
celery/bin/celeryd.py

@@ -17,22 +17,18 @@
     Logging level, choose between ``DEBUG``, ``INFO``, ``WARNING``,
     ``ERROR``, ``CRITICAL``, or ``FATAL``.
 
-.. cmdoption:: -p, --pidfile
+.. cmdoption:: -n, --hostname
 
-    Path to pidfile.
+    Set custom hostname.
 
-.. cmdoption:: -s, --statistics
+.. cmdoption:: -B, --beat
 
-    Turn on reporting of statistics (remember to flush the statistics message
-    queue from time to time).
+    Also run the ``celerybeat`` periodic task scheduler. Please note that
+    there must only be one instance of this service.
 
-.. cmdoption:: -d, --detach, --daemon
+.. cmdoption:: -E, --events
 
-    Run in the background as a daemon.
-
-.. cmdoption:: -S, --supervised
-
-    Restart the worker server if it dies.
+    Send events that can be captured by monitors like ``celerymon``.
 
 .. cmdoption:: --discard
 
@@ -40,67 +36,41 @@
     **WARNING**: This is unrecoverable, and the tasks will be
     deleted from the messaging server.
 
-.. cmdoption:: -u, --uid
-
-    User-id to run ``celeryd`` as when in daemon mode.
-
-.. cmdoption:: -g, --gid
-
-    Group-id to run ``celeryd`` as when in daemon mode.
-
-.. cmdoption:: --umask
-
-    umask of the process when in daemon mode.
-
-.. cmdoption:: --workdir
-
-    Directory to change to when in daemon mode.
-
-.. cmdoption:: --chroot
-
-    Change root directory to this path when in daemon mode.
-
 """
 import os
 import sys
-CAN_DETACH = True
-try:
-    import resource
-except ImportError:
-    CAN_DETACH = False
-
-from celery.loaders import current_loader
-from celery.loaders import settings
-from celery import __version__
-from celery.supervisor import OFASupervisor
-from celery.log import emergency_error
-from celery.conf import LOG_LEVELS, DAEMON_LOG_FILE, DAEMON_LOG_LEVEL
-from celery.conf import DAEMON_CONCURRENCY, DAEMON_PID_FILE
+import socket
+import logging
+import optparse
+import traceback
+import multiprocessing
+
+import celery
 from celery import conf
-from celery import discovery
+from celery import platform
+from celery.log import emergency_error
 from celery.task import discard_all
+from celery.utils import info
 from celery.worker import WorkController
-import signal
-import multiprocessing
-import traceback
-import optparse
-import atexit
-
-USE_STATISTICS = getattr(settings, "CELERY_STATISTICS", False)
-# Make sure the setting exists.
-settings.CELERY_STATISTICS = USE_STATISTICS
 
 STARTUP_INFO_FMT = """
 Configuration ->
-    * Broker -> amqp://%(vhost)s@%(host)s:%(port)s
-    * Exchange -> %(exchange)s (%(exchange_type)s)
-    * Consumer -> Queue:%(consumer_queue)s Routing:%(consumer_rkey)s
-    * Concurrency -> %(concurrency)s
-    * Statistics -> %(statistics)s
+    . broker -> %(conninfo)s
+    . queues ->
+%(queues)s
+    . concurrency -> %(concurrency)s
+    . loader -> %(loader)s
+    . logfile -> %(logfile)s@%(loglevel)s
+    . events -> %(events)s
+    . beat -> %(celerybeat)s
+%(tasks)s
 """.strip()
 
+TASK_LIST_FMT = """    . tasks ->\n%s"""
+
 OPTION_LIST = (
-    optparse.make_option('-c', '--concurrency', default=DAEMON_CONCURRENCY,
+    optparse.make_option('-c', '--concurrency',
+            default=conf.CELERYD_CONCURRENCY,
             action="store", dest="concurrency", type="int",
             help="Number of child processes processing the queue."),
     optparse.make_option('--discard', default=False,
@@ -108,92 +78,38 @@ OPTION_LIST = (
             help="Discard all waiting tasks before the server is started. "
                  "WARNING: This is unrecoverable, and the tasks will be "
                  "deleted from the messaging server."),
-    optparse.make_option('-s', '--statistics', default=USE_STATISTICS,
-            action="store_true", dest="statistics",
-            help="Collect statistics."),
-    optparse.make_option('-f', '--logfile', default=DAEMON_LOG_FILE,
+    optparse.make_option('-f', '--logfile', default=conf.CELERYD_LOG_FILE,
             action="store", dest="logfile",
             help="Path to log file."),
-    optparse.make_option('-l', '--loglevel', default=DAEMON_LOG_LEVEL,
+    optparse.make_option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL,
             action="store", dest="loglevel",
             help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL/FATAL."),
-    optparse.make_option('-p', '--pidfile', default=DAEMON_PID_FILE,
-            action="store", dest="pidfile",
-            help="Path to pidfile."),
-    optparse.make_option('-d', '--detach', '--daemon', default=False,
-            action="store_true", dest="detach",
-            help="Run in the background as a daemon."),
-    optparse.make_option('-S', '--supervised', default=False,
-            action="store_true", dest="supervised",
-            help="Restart the worker server if it dies."),
-    optparse.make_option('-u', '--uid', default=None,
-            action="store", dest="uid",
-            help="User-id to run celeryd as when in daemon mode."),
-    optparse.make_option('-g', '--gid', default=None,
-            action="store", dest="gid",
-            help="Group-id to run celeryd as when in daemon mode."),
-    optparse.make_option('--umask', default=0,
-            action="store", type="int", dest="umask",
-            help="umask of the process when in daemon mode."),
-    optparse.make_option('--workdir', default=None,
-            action="store", dest="working_directory",
-            help="Directory to change to when in daemon mode."),
-    optparse.make_option('--chroot', default=None,
-            action="store", dest="chroot",
-            help="Change root directory to this path when in daemon mode."),
-    )
-
-
-def acquire_pidlock(pidfile):
-    """Get the :class:`daemon.pidlockfile.PIDLockFile` handler for
-    ``pidfile``.
-
-    If the ``pidfile`` already exists, but the process is not running the
-    ``pidfile`` will be removed, a ``"stale pidfile"`` message is emitted
-    and execution continues as normally. However, if the process is still
-    running the program will exit complaning that the program is already
-    running in the background somewhere.
-
-    """
-    from daemon.pidlockfile import PIDLockFile
-    import errno
-    pidlock = PIDLockFile(pidfile)
-    if not pidlock.is_locked():
-        return pidlock
-    pid = pidlock.read_pid()
-    try:
-        os.kill(pid, 0)
-    except os.error, exc:
-        if exc.errno == errno.ESRCH:
-            sys.stderr.write("Stale pidfile exists. Removing it.\n")
-            os.unlink(pidfile)
-            return PIDLockFile(pidfile)
-    else:
-        raise SystemExit(
-                "ERROR: Pidfile (%s) already exists.\n"
-                "Seems celeryd is already running? (PID: %d)" % (
-                    pidfile, pid))
-    return pidlock
-
-
-def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False,
-        loglevel=DAEMON_LOG_LEVEL, logfile=DAEMON_LOG_FILE, discard=False,
-        pidfile=DAEMON_PID_FILE, umask=0, uid=None, gid=None,
-        supervised=False, working_directory=None, chroot=None,
-        statistics=None, **kwargs):
+    optparse.make_option('-n', '--hostname', default=None,
+            action="store", dest="hostname",
+            help="Set custom host name. E.g. 'foo.example.com'."),
+    optparse.make_option('-B', '--beat', default=False,
+            action="store_true", dest="run_clockservice",
+            help="Also run the celerybeat periodic task scheduler. \
+                  Please note that only one instance must be running."),
+    optparse.make_option('-E', '--events', default=conf.SEND_EVENTS,
+            action="store_true", dest="events",
+            help="Send events so celery can be monitored by e.g. celerymon."),
+)
+
+
+def run_worker(concurrency=conf.CELERYD_CONCURRENCY,
+        loglevel=conf.CELERYD_LOG_LEVEL, logfile=conf.CELERYD_LOG_FILE,
+        hostname=None,
+        discard=False, run_clockservice=False, events=False, **kwargs):
     """Starts the celery worker server."""
 
-    # set SIGCLD back to the default SIG_DFL (before python-daemon overrode
-    # it) lets the parent wait() for the terminated child process and stops
-    # the 'OSError: [Errno 10] No child processes' problem.
-
-    if hasattr(signal, "SIGCLD"): # Make sure the platform supports signals.
-        signal.signal(signal.SIGCLD, signal.SIG_DFL)
+    hostname = hostname or socket.gethostname()
 
-    print("Celery %s is starting." % __version__)
+    print("celery@%s v%s is starting." % (hostname, celery.__version__))
 
-    if statistics is not None:
-        settings.CELERY_STATISTICS = statistics
+    from celery.loaders import current_loader, load_settings
+    loader = current_loader()
+    settings = load_settings()
 
     if not concurrency:
         concurrency = multiprocessing.cpu_count()
@@ -209,9 +125,7 @@ def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False,
 
     # Setup logging
     if not isinstance(loglevel, int):
-        loglevel = LOG_LEVELS[loglevel.upper()]
-    if not detach:
-        logfile = None # log to stderr when not running in the background.
+        loglevel = conf.LOG_LEVELS[loglevel.upper()]
 
     if discard:
         discarded_count = discard_all()
@@ -219,65 +133,50 @@ def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False,
         print("discard: Erased %d %s from the queue.\n" % (
                 discarded_count, what))
 
+    # Run the worker init handler.
+    # (Usually imports task modules and such.)
+    loader.on_worker_init()
+
     # Dump configuration to screen so we have some basic information
     # when users sends e-mails.
+
+    tasklist = ""
+    if loglevel <= logging.INFO:
+        from celery.registry import tasks
+        tasklist = tasks.keys()
+        if not loglevel <= logging.DEBUG:
+            tasklist = filter(lambda s: not s.startswith("celery."), tasklist)
+        tasklist = TASK_LIST_FMT % "\n".join("        . %s" % task
+                                                for task in sorted(tasklist))
+
     print(STARTUP_INFO_FMT % {
-            "vhost": getattr(settings, "AMQP_VHOST", "(default)"),
-            "host": getattr(settings, "AMQP_SERVER", "(default)"),
-            "port": getattr(settings, "AMQP_PORT", "(default)"),
-            "exchange": conf.AMQP_EXCHANGE,
-            "exchange_type": conf.AMQP_EXCHANGE_TYPE,
-            "consumer_queue": conf.AMQP_CONSUMER_QUEUE,
-            "consumer_rkey": conf.AMQP_CONSUMER_ROUTING_KEY,
-            "publisher_rkey": conf.AMQP_PUBLISHER_ROUTING_KEY,
+            "conninfo": info.format_broker_info(),
+            "queues": info.format_routing_table(indent=8),
             "concurrency": concurrency,
-            "loglevel": loglevel,
-            "pidfile": pidfile,
-            "statistics": settings.CELERY_STATISTICS and "ON" or "OFF",
+            "loglevel": conf.LOG_LEVELS[loglevel],
+            "logfile": logfile or "[stderr]",
+            "celerybeat": run_clockservice and "ON" or "OFF",
+            "events": events and "ON" or "OFF",
+            "tasks": tasklist,
+            "loader": loader.__class__.__module__,
     })
 
     print("Celery has started.")
-    if detach:
-        if not CAN_DETACH:
-            raise RuntimeError(
-                    "This operating system doesn't support detach. ")
-        from daemon import DaemonContext
-        from celery.log import setup_logger, redirect_stdouts_to_logger
-
-        # Since without stderr any errors will be silently suppressed,
-        # we need to know that we have access to the logfile
-        if logfile:
-            open(logfile, "a").close()
-
-        pidlock = acquire_pidlock(pidfile)
-        if not umask:
-            umask = 0
-        uid = uid and int(uid) or os.geteuid()
-        gid = gid and int(gid) or os.getegid()
-        working_directory = working_directory or os.getcwd()
-        context = DaemonContext(chroot_directory=chroot,
-                                working_directory=working_directory,
-                                umask=umask,
-                                pidfile=pidlock,
-                                uid=uid,
-                                gid=gid)
-        context.open()
-        logger = setup_logger(loglevel, logfile)
-        redirect_stdouts_to_logger(logger, loglevel)
-
-    # Run the worker init handler.
-    # (Usually imports task modules and such.)
-    current_loader.on_worker_init()
+    set_process_status("Running...")
 
     def run_worker():
         worker = WorkController(concurrency=concurrency,
                                 loglevel=loglevel,
                                 logfile=logfile,
-                                is_detached=detach)
+                                hostname=hostname,
+                                embed_clockservice=run_clockservice,
+                                send_events=events)
 
-        # Install signal handler that restarts celeryd on SIGHUP,
-        # (only on POSIX systems)
-        install_restart_signal_handler(worker)
+        # Install signal handler so SIGHUP restarts the worker.
+        install_worker_restart_handler(worker)
+
+        from celery import signals
+        signals.worker_init.send(sender=worker)
 
         try:
             worker.start()
@@ -286,37 +185,22 @@ def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False,
                             e.__class__, e, traceback.format_exc()))
 
     try:
-        if supervised:
-            OFASupervisor(target=run_worker).start()
-        else:
-            run_worker()
+        run_worker()
     except:
-        if detach:
-            context.close()
+        set_process_status("Exiting...")
         raise
 
 
-def install_restart_signal_handler(worker):
-    """Installs a signal handler that restarts the current program
-    when it receives the ``SIGHUP`` signal.
-    """
-    if not hasattr(signal, "SIGHUP"):
-        return  # platform is not POSIX
+def install_worker_restart_handler(worker):
 
-    def restart_self(signum, frame):
+    def restart_worker_sig_handler(signum, frame):
         """Signal handler restarting the current python program."""
-        worker.logger.info("Restarting celeryd (%s)" % (
+        worker.logger.warn("Restarting celeryd (%s)" % (
             " ".join(sys.argv)))
-        if worker.is_detached:
-            pid = os.fork()
-            if pid:
-                worker.stop()
-                sys.exit(0)
-        else:
-            worker.stop()
+        worker.stop()
         os.execv(sys.executable, [sys.executable] + sys.argv)
 
-    signal.signal(signal.SIGHUP, restart_self)
+    platform.install_signal_handler("SIGHUP", restart_worker_sig_handler)
 
 
 def parse_options(arguments):
@@ -326,6 +210,16 @@ def parse_options(arguments):
     return options
 
 
-if __name__ == "__main__":
+def set_process_status(info):
+    arg_start = "manage" in sys.argv[0] and 2 or 1
+    if sys.argv[arg_start:]:
+        info = "%s (%s)" % (info, " ".join(sys.argv[arg_start:]))
+    platform.set_mp_process_title("celeryd", info=info)
+
+
+def main():
     options = parse_options(sys.argv[1:])
     run_worker(**vars(options))
+
+if __name__ == "__main__":
+    main()

+ 2 - 1
celery/bin/celeryinit.py

@@ -1,3 +1,4 @@
+import sys
 
 
 def main():
@@ -5,7 +6,7 @@ def main():
     loader = Loader()
     conf = loader.read_configuration()
     from django.core.management import call_command, setup_environ
-    print("Creating database tables...")
+    sys.stderr.write("Creating database tables...\n")
     setup_environ(conf)
     call_command("syncdb")
 

+ 202 - 258
celery/conf.py

@@ -1,266 +1,210 @@
-"""celery.conf"""
-from celery.loaders import settings
-from datetime import timedelta
 import logging
+import warnings
+from datetime import timedelta
 
-DEFAULT_AMQP_EXCHANGE = "celery"
-DEFAULT_AMQP_PUBLISHER_ROUTING_KEY = "celery"
-DEFAULT_AMQP_CONSUMER_ROUTING_KEY = "celery"
-DEFAULT_AMQP_CONSUMER_QUEUE = "celery"
-DEFAULT_AMQP_EXCHANGE_TYPE = "direct"
-DEFAULT_DAEMON_CONCURRENCY = 0 # defaults to cpu count
-DEFAULT_DAEMON_PID_FILE = "celeryd.pid"
-DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s/%(processName)s] %(message)s'
-DEFAULT_DAEMON_LOG_LEVEL = "INFO"
-DEFAULT_DAEMON_LOG_FILE = "celeryd.log"
-DEFAULT_AMQP_CONNECTION_TIMEOUT = 4
-DEFAULT_STATISTICS = False
-DEFAULT_ALWAYS_EAGER = False
-DEFAULT_TASK_RESULT_EXPIRES = timedelta(days=5)
-DEFAULT_AMQP_CONNECTION_RETRY = True
-DEFAULT_AMQP_CONNECTION_MAX_RETRIES = 100
-DEFAULT_TASK_SERIALIZER = "pickle"
-DEFAULT_BACKEND = "database"
-DEFAULT_PERIODIC_STATUS_BACKEND = "database"
-
-
-"""
-.. data:: LOG_LEVELS
-
-    Mapping of log level names to :mod:`logging` module constants.
-
-"""
-LOG_LEVELS = {
-    "DEBUG": logging.DEBUG,
-    "INFO": logging.INFO,
-    "WARNING": logging.WARNING,
-    "WARN": logging.WARNING,
-    "ERROR": logging.ERROR,
-    "CRITICAL": logging.CRITICAL,
-    "FATAL": logging.FATAL,
-}
-
-"""
-.. data:: LOG_FORMAT
-
-    The format to use for log messages.
-
-"""
-LOG_FORMAT = getattr(settings, "CELERYD_DAEMON_LOG_FORMAT",
-                     DEFAULT_LOG_FMT)
-
-"""
-.. data:: DAEMON_LOG_FILE
-
-    Filename of the daemon log file.
-
-"""
-DAEMON_LOG_FILE = getattr(settings, "CELERYD_LOG_FILE",
-                          DEFAULT_DAEMON_LOG_FILE)
-
-"""
-.. data:: DAEMON_LOG_LEVEL
-
-
-"""
-DAEMON_LOG_LEVEL = LOG_LEVELS[getattr(settings, "CELERYD_DAEMON_LOG_LEVEL",
-                                      DEFAULT_DAEMON_LOG_LEVEL).upper()]
-
-"""
-.. data:: DAEMON_PID_FILE
-
-    Full path to the daemon pidfile.
-
-"""
-DAEMON_PID_FILE = getattr(settings, "CELERYD_PID_FILE",
-                          DEFAULT_DAEMON_PID_FILE)
-
-"""
-.. data:: DAEMON_CONCURRENCY
-
-    The number of concurrent worker processes.
-
-"""
-DAEMON_CONCURRENCY = getattr(settings, "CELERYD_CONCURRENCY",
-                             DEFAULT_DAEMON_CONCURRENCY)
-
-"""
-.. data:: AMQP_EXCHANGE
-
-    Name of the AMQP exchange.
-
-"""
-AMQP_EXCHANGE = getattr(settings, "CELERY_AMQP_EXCHANGE",
-                        DEFAULT_AMQP_EXCHANGE)
-
-
-"""
-.. data:: AMQP_EXCHANGE_TYPE
-
-The exchange type.
-
-"""
-AMQP_EXCHANGE_TYPE = getattr(settings, "CELERY_AMQP_EXCHANGE_TYPE",
-                        DEFAULT_AMQP_EXCHANGE_TYPE)
-
-"""
-.. data:: AMQP_PUBLISHER_ROUTING_KEY
-
-    The default AMQP routing key used when publishing tasks.
-
-"""
-AMQP_PUBLISHER_ROUTING_KEY = getattr(settings,
-                                "CELERY_AMQP_PUBLISHER_ROUTING_KEY",
-                                DEFAULT_AMQP_PUBLISHER_ROUTING_KEY)
-
-"""
-.. data:: AMQP_CONSUMER_ROUTING_KEY
-
-    The AMQP routing key used when consuming tasks.
-
-"""
-AMQP_CONSUMER_ROUTING_KEY = getattr(settings,
-                                "CELERY_AMQP_CONSUMER_ROUTING_KEY",
-                                DEFAULT_AMQP_CONSUMER_ROUTING_KEY)
-
-"""
-.. data:: AMQP_CONSUMER_QUEUE
-
-    The name of the AMQP queue.
-
-"""
-AMQP_CONSUMER_QUEUE = getattr(settings, "CELERY_AMQP_CONSUMER_QUEUE",
-                              DEFAULT_AMQP_CONSUMER_QUEUE)
-
-
-"""
-.. data:: AMQP_CONSUMER_QUEUES
-
-    Dictionary defining multiple AMQP queues.
-
-"""
-DEFAULT_AMQP_CONSUMER_QUEUES = {
-        AMQP_CONSUMER_QUEUE: {
-            "exchange": AMQP_EXCHANGE,
-            "routing_key": AMQP_CONSUMER_ROUTING_KEY,
-            "exchange_type": AMQP_EXCHANGE_TYPE,
-        }
+from celery.loaders import load_settings
+
+DEFAULT_P_LOG_FMT = '[%(asctime)s: %(levelname)s/%(processName)s] %(message)s'
+DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s'
+
+LOG_LEVELS = dict(logging._levelNames)
+LOG_LEVELS["FATAL"] = logging.FATAL
+LOG_LEVELS[logging.FATAL] = "FATAL"
+
+settings = load_settings()
+
+_DEFAULTS = {
+    "CELERY_BACKEND": "database",
+    "CELERY_ALWAYS_EAGER": False,
+    "CELERY_TASK_RESULT_EXPIRES": timedelta(days=5),
+    "CELERY_SEND_EVENTS": False,
+    "CELERY_IGNORE_RESULT": False,
+    "CELERY_STORE_ERRORS_EVEN_IF_IGNORED": False,
+    "CELERY_TASK_SERIALIZER": "pickle",
+    "CELERY_DISABLE_RATE_LIMITS": False,
+    "CELERY_DEFAULT_ROUTING_KEY": "celery",
+    "CELERY_DEFAULT_QUEUE": "celery",
+    "CELERY_DEFAULT_EXCHANGE": "celery",
+    "CELERY_DEFAULT_EXCHANGE_TYPE": "direct",
+    "CELERY_BROKER_CONNECTION_TIMEOUT": 4,
+    "CELERY_BROKER_CONNECTION_RETRY": True,
+    "CELERY_BROKER_CONNECTION_MAX_RETRIES": 100,
+    "CELERYD_CONCURRENCY": 0, # defaults to cpu count
+    "CELERYD_PREFETCH_MULTIPLIER": 4,
+    "CELERYD_LOG_FORMAT": DEFAULT_P_LOG_FMT,
+    "CELERYD_LOG_LEVEL": "WARN",
+    "CELERYD_LOG_FILE": None, # stderr
+    "CELERYBEAT_SCHEDULE_FILENAME": "celerybeat-schedule",
+    "CELERYBEAT_MAX_LOOP_INTERVAL": 5 * 60, # five minutes.
+    "CELERYBEAT_LOG_LEVEL": "INFO",
+    "CELERYBEAT_LOG_FILE": None, # stderr
+    "CELERYMON_LOG_LEVEL": "INFO",
+    "CELERYMON_LOG_FILE": None, # stderr
+    "CELERYMON_LOG_FORMAT": DEFAULT_LOG_FMT,
+    "CELERY_BROADCAST_QUEUE": "celeryctl",
+    "CELERY_BROADCAST_EXCHANGE": "celeryctl",
+    "CELERY_BROADCAST_EXCHANGE_TYPE": "fanout",
+    "CELERY_EVENT_QUEUE": "celeryevent",
+    "CELERY_EVENT_EXCHANGE": "celeryevent",
+    "CELERY_EVENT_EXCHANGE_TYPE": "direct",
+    "CELERY_EVENT_ROUTING_KEY": "celeryevent",
+    "CELERY_RESULT_EXCHANGE": "celeryresults",
 }
 
-AMQP_CONSUMER_QUEUES = getattr(settings, "CELERY_AMQP_CONSUMER_QUEUES",
-                              DEFAULT_AMQP_CONSUMER_QUEUES)
-
-"""
-.. data:: AMQP_CONNECTION_TIMEOUT
-
-    The timeout in seconds before we give up establishing a connection
-    to the AMQP server.
-
-"""
-AMQP_CONNECTION_TIMEOUT = getattr(settings, "CELERY_AMQP_CONNECTION_TIMEOUT",
-                                  DEFAULT_AMQP_CONNECTION_TIMEOUT)
-
-"""
-.. data:: SEND_CELERY_TASK_ERROR_EMAILS
-
-    If set to ``True``, errors in tasks will be sent to admins by e-mail.
-    If unset, it will send the e-mails if ``settings.DEBUG`` is False.
-
-"""
-SEND_CELERY_TASK_ERROR_EMAILS = getattr(settings,
-                                        "SEND_CELERY_TASK_ERROR_EMAILS",
-                                        not settings.DEBUG)
-
-"""
-.. data:: ALWAYS_EAGER
-
-    Always execute tasks locally, don't send to the queue.
-
-"""
-ALWAYS_EAGER = getattr(settings, "CELERY_ALWAYS_EAGER",
-                       DEFAULT_ALWAYS_EAGER)
-
-"""
-.. data: TASK_RESULT_EXPIRES
-
-    Task tombstone expire time in seconds.
-
-"""
-TASK_RESULT_EXPIRES = getattr(settings, "CELERY_TASK_RESULT_EXPIRES",
-                              DEFAULT_TASK_RESULT_EXPIRES)
-
+_DEPRECATION_FMT = """
+%s is deprecated in favor of %s and is schedule for removal in celery v1.2.
+""".strip()
+
+def _get(name, default=None, compat=None):
+    compat = compat or []
+    if default is None:
+        default = _DEFAULTS.get(name)
+    compat = [name] + compat
+    for i, alias in enumerate(compat):
+        try:
+            value = getattr(settings, name)
+            i > 0 and warnings.warn(DeprecationWarning(_DEPRECATION_FMT % (
+                                                        alias, name)))
+            return value
+        except AttributeError:
+            pass
+    return default
+
+# <--- Task options                                <-   --   --- - ----- -- #
+ALWAYS_EAGER = _get("CELERY_ALWAYS_EAGER")
+CELERY_BACKEND = _get("CELERY_BACKEND")
+CELERY_CACHE_BACKEND = _get("CELERY_CACHE_BACKEND")
+TASK_SERIALIZER = _get("CELERY_TASK_SERIALIZER")
+TASK_RESULT_EXPIRES = _get("CELERY_TASK_RESULT_EXPIRES")
+IGNORE_RESULT = _get("CELERY_IGNORE_RESULT")
 # Make sure TASK_RESULT_EXPIRES is a timedelta.
 if isinstance(TASK_RESULT_EXPIRES, int):
     TASK_RESULT_EXPIRES = timedelta(seconds=TASK_RESULT_EXPIRES)
 
-"""
-.. data:: AMQP_CONNECTION_RETRY
-
-Automatically try to re-establish the connection to the AMQP broker if
-it's lost.
-
-"""
-AMQP_CONNECTION_RETRY = getattr(settings, "CELERY_AMQP_CONNECTION_RETRY",
-                                DEFAULT_AMQP_CONNECTION_RETRY)
-
-"""
-.. data:: AMQP_CONNECTION_MAX_RETRIES
-
-Maximum number of retries before we give up re-establishing a connection
-to the AMQP broker.
-
-If this is set to ``0`` or ``None``, we will retry forever.
-
-Default is ``100`` retries.
-
-"""
-AMQP_CONNECTION_MAX_RETRIES = getattr(settings,
-                                      "CELERY_AMQP_CONNECTION_MAX_RETRIES",
-                                      DEFAULT_AMQP_CONNECTION_MAX_RETRIES)
-
-"""
-.. data:: TASK_SERIALIZER
-
-A string identifying the default serialization
-method to use. Can be ``pickle`` (default),
-``json``, ``yaml``, or any custom serialization methods that have
-been registered with :mod:`carrot.serialization.registry`.
-
-Default is ``pickle``.
-
-"""
-TASK_SERIALIZER = getattr(settings, "CELERY_TASK_SERIALIZER",
-                          DEFAULT_TASK_SERIALIZER)
-
-
-"""
-
-.. data:: CELERY_BACKEND
-
-The backend used to store task results (tombstones).
-
-"""
-CELERY_BACKEND = getattr(settings, "CELERY_BACKEND", DEFAULT_BACKEND)
-
-
-"""
-
-.. data:: CELERY_PERIODIC_STATUS_BACKEND
-
-The backend used to store the status of periodic tasks.
-
-"""
-CELERY_PERIODIC_STATUS_BACKEND = getattr(settings,
-                                    "CELERY_PERIODIC_STATUS_BACKEND",
-                                    DEFAULT_PERIODIC_STATUS_BACKEND)
-
-
-"""
-
-.. data:: CELERY_CACHE_BACKEND
-
-Use a custom cache backend for celery. If not set the django-global
-cache backend in ``CACHE_BACKEND`` will be used.
-
-"""
-CELERY_CACHE_BACKEND = getattr(settings, "CELERY_CACHE_BACKEND", None)
+# <--- Worker                                      <-   --   --- - ----- -- #
+
+SEND_EVENTS = _get("CELERY_SEND_EVENTS")
+DEFAULT_RATE_LIMIT = _get("CELERY_DEFAULT_RATE_LIMIT")
+DISABLE_RATE_LIMITS = _get("CELERY_DISABLE_RATE_LIMITS")
+STORE_ERRORS_EVEN_IF_IGNORED = _get("CELERY_STORE_ERRORS_EVEN_IF_IGNORED")
+CELERY_SEND_TASK_ERROR_EMAILS = _get("CELERY_SEND_TASK_ERROR_EMAILS",
+                                     not settings.DEBUG,
+                                     compat=["SEND_CELERY_TASK_ERROR_EMAILS"])
+CELERYD_LOG_FORMAT = _get("CELERYD_LOG_FORMAT",
+                          compat=["CELERYD_DAEMON_LOG_FORMAT"])
+CELERYD_LOG_FILE = _get("CELERYD_LOG_FILE")
+CELERYD_LOG_LEVEL = _get("CELERYD_LOG_LEVEL",
+                        compat=["CELERYD_DAEMON_LOG_LEVEL"])
+CELERYD_LOG_LEVEL = LOG_LEVELS[CELERYD_LOG_LEVEL.upper()]
+CELERYD_CONCURRENCY = _get("CELERYD_CONCURRENCY")
+CELERYD_PREFETCH_MULTIPLIER = _get("CELERYD_PREFETCH_MULTIPLIER")
+
+# <--- Message routing                             <-   --   --- - ----- -- #
+QUEUES = _get("CELERY_QUEUES")
+DEFAULT_QUEUE = _get("CELERY_DEFAULT_QUEUE")
+DEFAULT_ROUTING_KEY = _get("CELERY_DEFAULT_ROUTING_KEY")
+DEFAULT_EXCHANGE = _get("CELERY_DEFAULT_EXCHANGE")
+DEFAULT_EXCHANGE_TYPE = _get("CELERY_DEFAULT_EXCHANGE_TYPE")
+
+_DEPRECATIONS = {"CELERY_AMQP_CONSUMER_QUEUES": "CELERY_QUEUES",
+                 "CELERY_AMQP_CONSUMER_QUEUE": "CELERY_QUEUES",
+                 "CELERY_AMQP_EXCHANGE": "CELERY_DEFAULT_EXCHANGE",
+                 "CELERY_AMQP_EXCHANGE_TYPE": "CELERY_DEFAULT_EXCHANGE_TYPE",
+                 "CELERY_AMQP_CONSUMER_ROUTING_KEY": "CELERY_QUEUES",
+                 "CELERY_AMQP_PUBLISHER_ROUTING_KEY":
+                 "CELERY_DEFAULT_ROUTING_KEY"}
+
+
+_DEPRECATED_QUEUE_SETTING_FMT = """
+%s is deprecated in favor of %s and scheduled for removal in celery v1.0.
+Please visit http://bit.ly/5DsSuX for more information.
+
+We're sorry for the inconvenience.
+""".strip()
+
+
+def _find_deprecated_queue_settings():
+    global DEFAULT_QUEUE, DEFAULT_ROUTING_KEY
+    global DEFAULT_EXCHANGE, DEFAULT_EXCHANGE_TYPE
+    binding_key = None
+
+    multi = _get("CELERY_AMQP_CONSUMER_QUEUES")
+    if multi:
+        return multi
+
+    single = _get("CELERY_AMQP_CONSUMER_QUEUE")
+    if single:
+        DEFAULT_QUEUE = single
+        DEFAULT_EXCHANGE = _get("CELERY_AMQP_EXCHANGE", DEFAULT_EXCHANGE)
+        DEFAULT_EXCHANGE_TYPE = _get("CELERY_AMQP_EXCHANGE_TYPE",
+                                     DEFAULT_EXCHANGE_TYPE)
+        binding_key = _get("CELERY_AMQP_CONSUMER_ROUTING_KEY",
+                            DEFAULT_ROUTING_KEY)
+        DEFAULT_ROUTING_KEY = _get("CELERY_AMQP_PUBLISHER_ROUTING_KEY",
+                                   DEFAULT_ROUTING_KEY)
+    binding_key = binding_key or DEFAULT_ROUTING_KEY
+    return {DEFAULT_QUEUE: {"exchange": DEFAULT_EXCHANGE,
+                            "exchange_type": DEFAULT_EXCHANGE_TYPE,
+                            "binding_key": binding_key}}
+
+
+def _warn_if_deprecated_queue_settings():
+    for setting, new_setting in _DEPRECATIONS.items():
+        if _get(setting):
+            warnings.warn(DeprecationWarning(_DEPRECATED_QUEUE_SETTING_FMT % (
+                setting, _DEPRECATIONS[setting])))
+            break
+
+_warn_if_deprecated_queue_settings()
+if not QUEUES:
+    QUEUES = _find_deprecated_queue_settings()
+
+# :--- Broadcast queue settings                     <-   --   --- - ----- -- #
+
+BROADCAST_QUEUE = _get("CELERY_BROADCAST_QUEUE")
+BROADCAST_EXCHANGE = _get("CELERY_BROADCAST_EXCHANGE")
+BROADCAST_EXCHANGE_TYPE = _get("CELERY_BROADCAST_EXCHANGE_TYPE")
+
+# :--- Event queue settings                         <-   --   --- - ----- -- #
+
+EVENT_QUEUE = _get("CELERY_EVENT_QUEUE")
+EVENT_EXCHANGE = _get("CELERY_EVENT_EXCHANGE")
+EVENT_EXCHANGE_TYPE = _get("CELERY_EVENT_EXCHANGE_TYPE")
+EVENT_ROUTING_KEY = _get("CELERY_EVENT_ROUTING_KEY")
+
+# :--- Broker connections                           <-   --   --- - ----- -- #
+BROKER_CONNECTION_TIMEOUT = _get("CELERY_BROKER_CONNECTION_TIMEOUT",
+                                compat=["CELERY_AMQP_CONNECTION_TIMEOUT"])
+BROKER_CONNECTION_RETRY = _get("CELERY_BROKER_CONNECTION_RETRY",
+                                compat=["CELERY_AMQP_CONNECTION_RETRY"])
+BROKER_CONNECTION_MAX_RETRIES = _get("CELERY_BROKER_CONNECTION_MAX_RETRIES",
+                                compat=["CELERY_AMQP_CONNECTION_MAX_RETRIES"])
+
+# :--- Backend settings                             <-   --   --- - ----- -- #
+
+RESULT_EXCHANGE = _get("CELERY_RESULT_EXCHANGE")
+
+# :--- Celery Beat                                  <-   --   --- - ----- -- #
+CELERYBEAT_LOG_LEVEL = _get("CELERYBEAT_LOG_LEVEL")
+CELERYBEAT_LOG_FILE = _get("CELERYBEAT_LOG_FILE")
+CELERYBEAT_SCHEDULE_FILENAME = _get("CELERYBEAT_SCHEDULE_FILENAME")
+CELERYBEAT_MAX_LOOP_INTERVAL = _get("CELERYBEAT_MAX_LOOP_INTERVAL")
+
+# :--- Celery Monitor                               <-   --   --- - ----- -- #
+CELERYMON_LOG_LEVEL = _get("CELERYMON_LOG_LEVEL")
+CELERYMON_LOG_FILE = _get("CELERYMON_LOG_FILE")
+
+
+def _init_routing_table():
+    """Convert configuration mapping to a table of queues digestible
+    by a :class:`carrot.messaging.ConsumerSet`."""
+
+    def _defaults(opts):
+        opts.setdefault("exchange", DEFAULT_EXCHANGE),
+        opts.setdefault("exchange_type", DEFAULT_EXCHANGE_TYPE)
+        opts.setdefault("binding_key", "")
+        return opts
+
+    return dict((queue, _defaults(opts)) for queue, opts in QUEUES.items())
+
+routing_table = _init_routing_table()

+ 0 - 0
celery/contrib/__init__.py


+ 19 - 0
celery/contrib/test_runner.py

@@ -0,0 +1,19 @@
+from django.conf import settings
+from django.test.simple import run_tests as run_tests_orig
+
+USAGE = """\
+Custom test runner to allow testing of celery delayed tasks.
+"""
+
+def run_tests(test_labels, *args, **kwargs):
+    """Django test runner allowing testing of celery delayed tasks.
+
+    All tasks are run locally, not in a worker.
+
+    To use this runner set ``settings.TEST_RUNNER``::
+
+        TEST_RUNNER = "celery.contrib.test_runner.run_tests"
+
+    """
+    settings.CELERY_ALWAYS_EAGER = True
+    return run_tests_orig(test_labels, *args, **kwargs)

+ 91 - 16
celery/datastructures.py

@@ -3,10 +3,10 @@
 Custom Datastructures
 
 """
-from UserList import UserList
-from Queue import Queue
-from Queue import Empty as QueueEmpty
+import time
 import traceback
+from UserList import UserList
+from Queue import Queue, Empty as QueueEmpty
 
 
 class PositionQueue(UserList):
@@ -54,7 +54,6 @@ class ExceptionInfo(object):
     :param exc_info: The exception tuple info as returned by
         :func:`traceback.format_exception`.
 
-
     .. attribute:: exception
 
         The original exception.
@@ -68,28 +67,20 @@ class ExceptionInfo(object):
     def __init__(self, exc_info):
         type_, exception, tb = exc_info
         self.exception = exception
-        self.traceback = '\n'.join(traceback.format_exception(*exc_info))
+        self.traceback = ''.join(traceback.format_exception(*exc_info))
 
     def __str__(self):
-        return str(self.exception)
+        return self.traceback
 
     def __repr__(self):
-        return "<%s.%s: %s" % (
+        return "<%s.%s: %s>" % (
                 self.__class__.__module__,
                 self.__class__.__name__,
                 str(self.exception))
 
 
-def consume_queue(queue):
-    while True:
-        try:
-            yield queue.get_nowait()
-        except QueueEmpty:
-            break
-
-
 class SharedCounter(object):
-    """An integer that can be updated by several threads at once.
+    """Thread-safe counter.
 
     Please note that the final value is not synchronized, this means
     that you should not update the value by using a previous value, the only
@@ -146,3 +137,87 @@ class SharedCounter(object):
 
     def __repr__(self):
         return "<SharedCounter: int(%s)>" % str(int(self))
+
+
+class LimitedSet(object):
+    """Kind-of Set with limitations.
+
+    Good for when you need to test for membership (``a in set``),
+    but the list might become to big, so you want to limit it so it doesn't
+    consume too much resources.
+
+    :keyword maxlen: Maximum number of members before we start
+        deleting expired members.
+    :keyword expires: Time in seconds, before a membership expires.
+
+    """
+
+    def __init__(self, maxlen=None, expires=None):
+        self.maxlen = maxlen
+        self.expires = expires
+        self._data = {}
+
+    def add(self, value):
+        """Add a new member."""
+        self._expire_item()
+        self._data[value] = time.time()
+
+    def pop_value(self, value):
+        """Remove membership by finding value."""
+        self._data.pop(value, None)
+
+    def _expire_item(self):
+        """Hunt down and remove an expired item."""
+        while 1:
+            if self.maxlen and len(self) >= self.maxlen:
+                value, when = self.first
+                if not self.expires or time.time() > when + self.expires:
+                    try:
+                        self.pop_value(value)
+                    except TypeError: # pragma: no cover
+                        continue
+            break
+
+    def __contains__(self, value):
+        return value in self._data
+
+    def __iter__(self):
+        return iter(self._data.keys())
+
+    def __len__(self):
+        return len(self._data.keys())
+
+    def __repr__(self):
+        return "LimitedSet([%s])" % (repr(self._data.keys()))
+
+    @property
+    def chronologically(self):
+        return sorted(self._data.items(), key=lambda (value, when): when)
+
+    @property
+    def first(self):
+        """Get the oldest member."""
+        return self.chronologically[0]
+
+
+def consume_queue(queue):
+    """Iterator yielding all immediately available items in a
+    :class:`Queue.Queue`.
+
+    The iterator stops as soon as the queue raises :exc:`Queue.Empty`.
+
+    Example
+
+        >>> q = Queue()
+        >>> map(q.put, range(4))
+        >>> list(consume_queue(q))
+        [0, 1, 2, 3]
+        >>> list(consume_queue(q))
+        []
+
+    """
+    while 1:
+        try:
+            yield queue.get_nowait()
+        except QueueEmpty:
+            break

+ 76 - 0
celery/decorators.py

@@ -0,0 +1,76 @@
+from inspect import getargspec
+
+from billiard.utils.functional import wraps
+
+from celery.task.base import Task, PeriodicTask
+
+
+def task(*args, **options):
+    """Decorator to create a task class out of any callable.
+
+    Examples:
+
+    .. code-block:: python
+
+        @task()
+        def refresh_feed(url):
+            return Feed.objects.get(url=url).refresh()
+
+    With setting extra options and using retry.
+
+    .. code-block:: python
+
+        @task(exchange="feeds")
+        def refresh_feed(url, **kwargs):
+            try:
+                return Feed.objects.get(url=url).refresh()
+            except socket.error, exc:
+                refresh_feed.retry(args=[url], kwargs=kwargs, exc=exc)
+
+    Calling the resulting task:
+
+        >>> refresh_feed("http://example.com/rss") # Regular
+        <Feed: http://example.com/rss>
+        >>> refresh_feed.delay("http://example.com/rss") # Async
+        <AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d>
+
+
+    """
+
+    def inner_create_task_cls(**options):
+
+        def _create_task_cls(fun):
+            base = options.pop("base", Task)
+
+            @wraps(fun)
+            def run(self, *args, **kwargs):
+                return fun(*args, **kwargs)
+            run.argspec = getargspec(fun)
+
+            cls_dict = dict(options, run=run, __module__=fun.__module__)
+            return type(fun.__name__, (base, ), cls_dict)()
+
+        return _create_task_cls
+
+    if len(args) == 1 and callable(args[0]):
+        return inner_create_task_cls()(*args)
+    return inner_create_task_cls(**options)
+
+
+def periodic_task(**options):
+    """Task decorator to create a periodic task.
+
+    Run a task once every day:
+
+    .. code-block:: python
+
+        from datetime import timedelta
+
+        @periodic_task(run_every=timedelta(days=1))
+        def cronjob(**kwargs):
+            logger = cronjob.get_logger(**kwargs)
+            logger.warn("Task running...")
+
+    """
+    options.setdefault("base", PeriodicTask)
+    return task(**options)

+ 0 - 26
celery/discovery.py

@@ -1,26 +0,0 @@
-"""celery.discovery"""
-from django.conf import settings
-
-
-def autodiscover():
-    """Include tasks for all applications in :setting:`INSTALLED_APPS`."""
-    return filter(None, [find_related_module(app, "tasks")
-                            for app in settings.INSTALLED_APPS])
-
-
-def find_related_module(app, related_name):
-    """Given an application name and a module name, tries to find that
-    module in the application, and running handler' if it finds it.
-    """
-
-    try:
-        module = __import__(app, {}, {}, [related_name])
-    except ImportError:
-        return None
-
-    try:
-        related_module = getattr(module, related_name)
-    except AttributeError:
-        return None
-
-    return related_module

+ 105 - 0
celery/events.py

@@ -0,0 +1,105 @@
+import time
+import socket
+import threading
+
+from celery.messaging import EventPublisher, EventConsumer
+
+
+def create_event(type, fields):
+    std = {"type": type,
+           "timestamp": fields.get("timestamp") or time.time()}
+    return dict(fields, **std)
+
+
+def Event(type, **fields):
+    """Create an event.
+
+    An event is a dictionary, the only required field is the type.
+
+    """
+    return create_event(type, fields)
+
+
+class EventDispatcher(object):
+    """Send events as messages.
+
+    :param connection: Carrot connection.
+
+    :keyword hostname: Hostname to identify ourselves as,
+        by default uses the hostname returned by :func:`socket.gethostname`.
+
+    :keyword enabled: Set to ``False`` to not actually publish any events,
+        making :meth:`send` a noop operation.
+
+    You need to :meth:`close` this after use.
+
+    """
+
+    def __init__(self, connection, hostname=None, enabled=True,
+            publisher=None):
+        self.connection = connection
+        self.publisher = publisher or EventPublisher(self.connection)
+        self.hostname = hostname or socket.gethostname()
+        self.enabled = enabled
+        self._lock = threading.Lock()
+
+    def send(self, type, **fields):
+        """Send event.
+
+        :param type: Kind of event.
+        :keyword \*\*fields: Event arguments.
+
+        """
+        if not self.enabled:
+            return
+        self._lock.acquire()
+        try:
+            fields["timestamp"] = time.time()
+            fields["hostname"] = self.hostname
+            self.publisher.send(Event(type, **fields))
+        finally:
+            self._lock.release()
+
+    def close(self):
+        """Close the event dispatcher."""
+        self._lock.locked() and self._lock.release()
+        self.publisher and self.publisher.close()
+
+
+class EventReceiver(object):
+    """Capture events.
+
+    :param connection: Carrot connection.
+    :keyword handlers: Event handlers.
+
+    :attr:`handlers`` is a dict of event types and their handlers,
+    the special handler ``"*`"`` captures all events that doesn't have a
+    handler.
+
+    """
+    handlers = {}
+
+    def __init__(self, connection, handlers=None):
+        self.connection = connection
+        if handlers is not None:
+            self.handlers = handlers
+
+    def process(self, type, event):
+        """Process the received event by dispatching it to the appropriate
+        handler."""
+        print("Received event: %s" % event)
+        handler = self.handlers.get(type) or self.handlers.get("*")
+        handler and handler(event)
+
+    def capture(self, limit=None):
+        """Open up a consumer capturing events. This has to be running
+        in the main process, and it will never stop unless forced"""
+        consumer = EventConsumer(self.connection)
+        consumer.register_callback(self._receive)
+        it = consumer.iterconsume(limit=limit)
+        while True:
+            it.next()
+
+    def _receive(self, message_data, message):
+        type = message_data.pop("type").lower()
+        self.process(type, create_event(type, message_data))

+ 9 - 1
celery/exceptions.py

@@ -1,5 +1,9 @@
 """celery.exceptions"""
 
+UNREGISTERED_FMT = """
+Task of kind %s is not registered, please make sure it's imported.
+""".strip()
+
 
 class MaxRetriesExceededError(Exception):
     """The tasks max restart limit has been exceeded."""
@@ -13,9 +17,13 @@ class RetryTaskError(Exception):
         super(RetryTaskError, self).__init__(message, exc, *args, **kwargs)
 
 
-class NotRegistered(Exception):
+class NotRegistered(KeyError):
     """The task is not registered."""
 
+    def __init__(self, message, *args, **kwargs):
+        message = UNREGISTERED_FMT % str(message)
+        super(NotRegistered, self).__init__(message, *args, **kwargs)
+
 
 class AlreadyRegistered(Exception):
     """The task is already registered."""

+ 0 - 340
celery/execute.py

@@ -1,340 +0,0 @@
-from carrot.connection import DjangoBrokerConnection
-from celery.conf import AMQP_CONNECTION_TIMEOUT
-from celery.result import AsyncResult, EagerResult
-from celery.messaging import TaskPublisher
-from celery.registry import tasks
-from celery.utils import gen_unique_id, noop, fun_takes_kwargs
-from celery.utils.functional import curry
-from datetime import datetime, timedelta
-from celery.exceptions import RetryTaskError
-from celery.datastructures import ExceptionInfo
-from celery.backends import default_backend
-from celery.loaders import current_loader
-from celery.monitoring import TaskTimerStats
-from celery import signals
-import sys
-import inspect
-import warnings
-import traceback
-
-
-def apply_async(task, args=None, kwargs=None, countdown=None, eta=None,
-        routing_key=None, exchange=None, task_id=None,
-        immediate=None, mandatory=None, priority=None, connection=None,
-        connect_timeout=AMQP_CONNECTION_TIMEOUT, serializer=None, **opts):
-    """Run a task asynchronously by the celery daemon(s).
-
-    :param task: The task to run (a callable object, or a :class:`Task`
-        instance
-
-    :param args: The positional arguments to pass on to the task (a ``list``).
-
-    :param kwargs: The keyword arguments to pass on to the task (a ``dict``)
-
-    :param countdown: Number of seconds into the future that the task should
-        execute. Defaults to immediate delivery (Do not confuse that with
-        the ``immediate`` setting, they are unrelated).
-
-    :param eta: A :class:`datetime.datetime` object that describes the
-        absolute time when the task should execute. May not be specified
-        if ``countdown`` is also supplied. (Do not confuse this with the
-        ``immediate`` setting, they are unrelated).
-
-    :keyword routing_key: The routing key used to route the task to a worker
-        server.
-
-    :keyword exchange: The named exchange to send the task to. Defaults to
-        :attr:`celery.task.base.Task.exchange`.
-
-    :keyword immediate: Request immediate delivery. Will raise an exception
-        if the task cannot be routed to a worker immediately.
-        (Do not confuse this parameter with the ``countdown`` and ``eta``
-        settings, as they are unrelated).
-
-    :keyword mandatory: Mandatory routing. Raises an exception if there's
-        no running workers able to take on this task.
-
-    :keyword connection: Re-use existing AMQP connection.
-        The ``connect_timeout`` argument is not respected if this is set.
-
-    :keyword connect_timeout: The timeout in seconds, before we give up
-        on establishing a connection to the AMQP server.
-
-    :keyword priority: The task priority, a number between ``0`` and ``9``.
-
-    :keyword serializer: A string identifying the default serialization
-        method to use. Defaults to the ``CELERY_TASK_SERIALIZER`` setting.
-        Can be ``pickle`` ``json``, ``yaml``, or any custom serialization
-        methods that have been registered with
-        :mod:`carrot.serialization.registry`.
-
-    """
-    args = args or []
-    kwargs = kwargs or {}
-    routing_key = routing_key or getattr(task, "routing_key", None)
-    exchange = exchange or getattr(task, "exchange", None)
-    if immediate is None:
-        immediate = getattr(task, "immediate", None)
-    if mandatory is None:
-        mandatory = getattr(task, "mandatory", None)
-    if priority is None:
-        priority = getattr(task, "priority", None)
-    serializer = serializer or getattr(task, "serializer", None)
-    taskset_id = opts.get("taskset_id")
-    publisher = opts.get("publisher")
-    retries = opts.get("retries", 0)
-    if countdown:
-        eta = datetime.now() + timedelta(seconds=countdown)
-
-    from celery.conf import ALWAYS_EAGER
-    if ALWAYS_EAGER:
-        return apply(task, args, kwargs)
-
-    need_to_close_connection = False
-    if not publisher:
-        if not connection:
-            connection = DjangoBrokerConnection(
-                            connect_timeout=connect_timeout)
-            need_to_close_connection = True
-        publisher = TaskPublisher(connection=connection)
-
-    delay_task = publisher.delay_task
-    if taskset_id:
-        delay_task = curry(publisher.delay_task_in_set, taskset_id)
-
-    task_id = delay_task(task.name, args, kwargs,
-                         task_id=task_id, retries=retries,
-                         routing_key=routing_key, exchange=exchange,
-                         mandatory=mandatory, immediate=immediate,
-                         serializer=serializer, priority=priority,
-                         eta=eta)
-
-    if need_to_close_connection:
-        publisher.close()
-        connection.close()
-
-    return AsyncResult(task_id)
-
-
-def delay_task(task_name, *args, **kwargs):
-    """Delay a task for execution by the ``celery`` daemon.
-
-    :param task_name: the name of a task registered in the task registry.
-
-    :param \*args: positional arguments to pass on to the task.
-
-    :param \*\*kwargs: keyword arguments to pass on to the task.
-
-    :raises celery.exceptions.NotRegistered: exception if no such task
-        has been registered in the task registry.
-
-    :rtype: :class:`celery.result.AsyncResult`.
-
-    Example
-
-        >>> r = delay_task("update_record", name="George Constanza", age=32)
-        >>> r.ready()
-        True
-        >>> r.result
-        "Record was updated"
-
-    """
-    if task_name not in tasks:
-        raise tasks.NotRegistered(
-                "Task with name %s not registered in the task registry." % (
-                    task_name))
-    task = tasks[task_name]
-    return apply_async(task, args, kwargs)
-
-
-def apply(task, args, kwargs, **options):
-    """Apply the task locally.
-
-    This will block until the task completes, and returns a
-    :class:`celery.result.EagerResult` instance.
-
-    """
-    args = args or []
-    kwargs = kwargs or {}
-    task_id = gen_unique_id()
-    retries = options.get("retries", 0)
-
-    # If it's a Task class we need to have to instance
-    # for it to be callable.
-    task = inspect.isclass(task) and task() or task
-
-    default_kwargs = {"task_name": task.name,
-                      "task_id": task_id,
-                      "task_retries": retries,
-                      "task_is_eager": True,
-                      "logfile": None,
-                      "loglevel": 0}
-    fun = getattr(task, "run", task)
-    supported_keys = fun_takes_kwargs(fun, default_kwargs)
-    extend_with = dict((key, val) for key, val in default_kwargs.items()
-                            if key in supported_keys)
-    kwargs.update(extend_with)
-
-    try:
-        ret_value = task(*args, **kwargs)
-        status = "DONE"
-        strtb = None
-    except Exception, exc:
-        type_, value_, tb = sys.exc_info()
-        strtb = "\n".join(traceback.format_exception(type_, value_, tb))
-        ret_value = exc
-        status = "FAILURE"
-
-    return EagerResult(task_id, ret_value, status, traceback=strtb)
-
-
-class ExecuteWrapper(object):
-    """Wraps the task in a jail, which catches all exceptions, and
-    saves the status and result of the task execution to the task
-    meta backend.
-
-    If the call was successful, it saves the result to the task result
-    backend, and sets the task status to ``"DONE"``.
-
-    If the call raises :exc:`celery.exceptions.RetryTaskError`, it extracts
-    the original exception, uses that as the result and sets the task status
-    to ``"RETRY"``.
-
-    If the call results in an exception, it saves the exception as the task
-    result, and sets the task status to ``"FAILURE"``.
-
-    :param fun: Callable object to execute.
-    :param task_id: The unique id of the task.
-    :param task_name: Name of the task.
-    :param args: List of positional args to pass on to the function.
-    :param kwargs: Keyword arguments mapping to pass on to the function.
-
-    :returns: the function return value on success, or
-        the exception instance on failure.
-
-    """
-
-    def __init__(self, fun, task_id, task_name, args=None, kwargs=None):
-        self.fun = fun
-        self.task_id = task_id
-        self.task_name = task_name
-        self.args = args or []
-        self.kwargs = kwargs or {}
-
-    def __call__(self, *args, **kwargs):
-        return self.execute_safe()
-
-    def execute_safe(self, *args, **kwargs):
-        try:
-            return self.execute(*args, **kwargs)
-        except Exception, exc:
-            type_, value_, tb = sys.exc_info()
-            exc = default_backend.prepare_exception(exc)
-            warnings.warn("Exception happend outside of task body: %s: %s" % (
-                str(exc.__class__), str(exc)))
-            return ExceptionInfo((type_, exc, tb))
-
-    def execute(self):
-        # Convenience variables
-        fun = self.fun
-        task_id = self.task_id
-        task_name = self.task_name
-        args = self.args
-        kwargs = self.kwargs
-
-        # Run task loader init handler.
-        current_loader.on_task_init(task_id, fun)
-
-        # Backend process cleanup
-        default_backend.process_cleanup()
-
-        # Send pre-run signal.
-        signals.task_prerun.send(sender=fun, task_id=task_id, task=fun,
-                                 args=args, kwargs=kwargs)
-
-        retval = None
-        timer_stat = TaskTimerStats.start(task_id, task_name, args, kwargs)
-        try:
-            result = fun(*args, **kwargs)
-        except (SystemExit, KeyboardInterrupt):
-            raise
-        except RetryTaskError, exc:
-            retval = self.handle_retry(exc, sys.exc_info())
-        except Exception, exc:
-            retval = self.handle_failure(exc, sys.exc_info())
-        else:
-            retval = self.handle_success(result)
-        finally:
-            timer_stat.stop()
-
-        # Send post-run signal.
-        signals.task_postrun.send(sender=fun, task_id=task_id, task=fun,
-                                  args=args, kwargs=kwargs, retval=retval)
-
-        return retval
-
-    def handle_success(self, retval):
-        """Handle successful execution.
-
-        Saves the result to the current result store (skipped if the callable
-            has a ``ignore_result`` attribute set to ``True``).
-
-        If the callable has a ``on_success`` function, it as called with
-        ``retval`` as argument.
-
-        :param retval: The return value.
-
-        """
-        if not getattr(self.fun, "ignore_result", False):
-            default_backend.mark_as_done(self.task_id, retval)
-
-        # Run success handler last to be sure the status is saved.
-        success_handler = getattr(self.fun, "on_success", noop)
-        success_handler(retval, self.task_id, self.args, self.kwargs)
-
-        return retval
-
-    def handle_retry(self, exc, exc_info):
-        """Handle retry exception."""
-        ### Task is to be retried.
-        type_, value_, tb = exc_info
-        strtb = "\n".join(traceback.format_exception(type_, value_, tb))
-
-        # RetryTaskError stores both a small message describing the retry
-        # and the original exception.
-        message, orig_exc = exc.args
-        default_backend.mark_as_retry(self.task_id, orig_exc, strtb)
-
-        # Create a simpler version of the RetryTaskError that stringifies
-        # the original exception instead of including the exception instance.
-        # This is for reporting the retry in logs, e-mail etc, while
-        # guaranteeing pickleability.
-        expanded_msg = "%s: %s" % (message, str(orig_exc))
-        retval = ExceptionInfo((type_,
-                                type_(expanded_msg, None),
-                                tb))
-
-        # Run retry handler last to be sure the status is saved.
-        retry_handler = getattr(self.fun, "on_retry", noop)
-        retry_handler(exc, self.task_id, self.args, self.kwargs)
-
-        return retval
-
-    def handle_failure(self, exc, exc_info):
-        """Handle exception."""
-        ### Task ended in failure.
-        type_, value_, tb = exc_info
-        strtb = "\n".join(traceback.format_exception(type_, value_, tb))
-
-        # mark_as_failure returns an exception that is guaranteed to
-        # be pickleable.
-        stored_exc = default_backend.mark_as_failure(self.task_id, exc, strtb)
-
-        # wrap exception info + traceback and return it to caller.
-        retval = ExceptionInfo((type_, stored_exc, tb))
-
-        # Run error handler last to be sure the status is stored.
-        error_handler = getattr(self.fun, "on_failure", noop)
-        error_handler(stored_exc, self.task_id, self.args, self.kwargs)
-
-        return retval

+ 152 - 0
celery/execute/__init__.py

@@ -0,0 +1,152 @@
+from datetime import datetime, timedelta
+
+from celery import conf
+from celery.utils import gen_unique_id, fun_takes_kwargs, mattrgetter
+from celery.result import EagerResult
+from celery.execute.trace import TaskTrace
+from celery.registry import tasks
+from celery.messaging import with_connection
+
+extract_exec_options = mattrgetter("routing_key", "exchange",
+                                   "immediate", "mandatory",
+                                   "priority", "serializer")
+
+
+def apply_async(task, args=None, kwargs=None, countdown=None, eta=None,
+        task_id=None, publisher=None, connection=None, connect_timeout=None,
+        **options):
+    """Run a task asynchronously by the celery daemon(s).
+
+    :param task: The task to run (a callable object, or a :class:`Task`
+        instance
+
+    :keyword args: The positional arguments to pass on to the
+        task (a ``list``).
+
+    :keyword kwargs: The keyword arguments to pass on to the task (a ``dict``)
+
+    :keyword countdown: Number of seconds into the future that the task should
+        execute. Defaults to immediate delivery (Do not confuse that with
+        the ``immediate`` setting, they are unrelated).
+
+    :keyword eta: A :class:`datetime.datetime` object that describes the
+        absolute time when the task should execute. May not be specified
+        if ``countdown`` is also supplied. (Do not confuse this with the
+        ``immediate`` setting, they are unrelated).
+
+    :keyword routing_key: The routing key used to route the task to a worker
+        server.
+
+    :keyword exchange: The named exchange to send the task to. Defaults to
+        :attr:`celery.task.base.Task.exchange`.
+
+    :keyword immediate: Request immediate delivery. Will raise an exception
+        if the task cannot be routed to a worker immediately.
+        (Do not confuse this parameter with the ``countdown`` and ``eta``
+        settings, as they are unrelated).
+
+    :keyword mandatory: Mandatory routing. Raises an exception if there's
+        no running workers able to take on this task.
+
+    :keyword connection: Re-use existing AMQP connection.
+        The ``connect_timeout`` argument is not respected if this is set.
+
+    :keyword connect_timeout: The timeout in seconds, before we give up
+        on establishing a connection to the AMQP server.
+
+    :keyword priority: The task priority, a number between ``0`` and ``9``.
+
+    :keyword serializer: A string identifying the default serialization
+        method to use. Defaults to the ``CELERY_TASK_SERIALIZER`` setting.
+        Can be ``pickle`` ``json``, ``yaml``, or any custom serialization
+        methods that have been registered with
+        :mod:`carrot.serialization.registry`.
+
+    **Note**: If the ``CELERY_ALWAYS_EAGER`` setting is set, it will be
+    replaced by a local :func:`apply` call instead.
+
+    """
+    if conf.ALWAYS_EAGER:
+        return apply(task, args, kwargs)
+    return _apply_async(task, args=args, kwargs=kwargs, countdown=countdown,
+                        eta=eta, task_id=task_id, publisher=publisher,
+                        connection=connection,
+                        connect_timeout=connect_timeout, **options)
+
+
+@with_connection
+def _apply_async(task, args=None, kwargs=None, countdown=None, eta=None,
+        task_id=None, publisher=None, connection=None, connect_timeout=None,
+        **options):
+
+    task = tasks[task.name] # Get instance.
+    exchange = options.get("exchange")
+    options = dict(extract_exec_options(task), **options)
+
+    if countdown: # Convert countdown to ETA.
+        eta = datetime.now() + timedelta(seconds=countdown)
+
+    publish = publisher or task.get_publisher(connection, exchange=exchange)
+    try:
+        task_id = publish.delay_task(task.name, args or [], kwargs or {},
+                                     task_id=task_id,
+                                     eta=eta,
+                                     **options)
+    finally:
+        publisher or publish.close()
+
+    return task.AsyncResult(task_id)
+
+
+def delay_task(task_name, *args, **kwargs):
+    """Delay a task for execution by the ``celery`` daemon.
+
+    :param task_name: the name of a task registered in the task registry.
+    :param \*args: positional arguments to pass on to the task.
+    :param \*\*kwargs: keyword arguments to pass on to the task.
+
+    :raises celery.exceptions.NotRegistered: exception if no such task
+        has been registered in the task registry.
+
+    :returns: :class:`celery.result.AsyncResult`.
+
+    Example
+
+        >>> r = delay_task("update_record", name="George Constanza", age=32)
+        >>> r.ready()
+        True
+        >>> r.result
+        "Record was updated"
+
+    """
+    return apply_async(tasks[task_name], args, kwargs)
+
+
+def apply(task, args, kwargs, **options):
+    """Apply the task locally.
+
+    This will block until the task completes, and returns a
+    :class:`celery.result.EagerResult` instance.
+
+    """
+    args = args or []
+    kwargs = kwargs or {}
+    task_id = gen_unique_id()
+    retries = options.get("retries", 0)
+
+    task = tasks[task.name] # Make sure we get the instance, not class.
+
+    default_kwargs = {"task_name": task.name,
+                      "task_id": task_id,
+                      "task_retries": retries,
+                      "task_is_eager": True,
+                      "logfile": None,
+                      "loglevel": 0}
+    supported_keys = fun_takes_kwargs(task.run, default_kwargs)
+    extend_with = dict((key, val) for key, val in default_kwargs.items()
+                            if key in supported_keys)
+    kwargs.update(extend_with)
+
+    trace = TaskTrace(task.name, task_id, args, kwargs, task=task)
+    retval = trace.execute()
+    return EagerResult(task_id, retval, trace.status, traceback=trace.strtb)

+ 94 - 0
celery/execute/trace.py

@@ -0,0 +1,94 @@
+import sys
+import traceback
+
+from celery import signals
+from celery.registry import tasks
+from celery.exceptions import RetryTaskError
+from celery.datastructures import ExceptionInfo
+
+
+class TraceInfo(object):
+    def __init__(self, status="PENDING", retval=None, exc_info=None):
+        self.status = status
+        self.retval = retval
+        self.exc_info = exc_info
+        self.exc_type = None
+        self.exc_value = None
+        self.tb = None
+        self.strtb = None
+        if self.exc_info:
+            self.exc_type, self.exc_value, self.tb = exc_info
+            self.strtb = "\n".join(traceback.format_exception(*exc_info))
+
+    @classmethod
+    def trace(cls, fun, args, kwargs):
+        """Trace the execution of a function, calling the appropiate callback
+        if the function raises retry, an failure or returned successfully."""
+        try:
+            return cls("SUCCESS", retval=fun(*args, **kwargs))
+        except (SystemExit, KeyboardInterrupt):
+            raise
+        except RetryTaskError, exc:
+            return cls("RETRY", retval=exc, exc_info=sys.exc_info())
+        except Exception, exc:
+            return cls("FAILURE", retval=exc, exc_info=sys.exc_info())
+
+
+class TaskTrace(object):
+
+    def __init__(self, task_name, task_id, args, kwargs, task=None):
+        self.task_id = task_id
+        self.task_name = task_name
+        self.args = args
+        self.kwargs = kwargs
+        self.task = task or tasks[self.task_name]
+        self.status = "PENDING"
+        self.strtb = None
+        self._trace_handlers = {"FAILURE": self.handle_failure,
+                                "RETRY": self.handle_retry,
+                                "SUCCESS": self.handle_success}
+
+    def __call__(self):
+        return self.execute()
+
+    def execute(self):
+        signals.task_prerun.send(sender=self.task, task_id=self.task_id,
+                                 task=self.task, args=self.args,
+                                 kwargs=self.kwargs)
+        retval = self._trace()
+
+        signals.task_postrun.send(sender=self.task, task_id=self.task_id,
+                                  task=self.task, args=self.args,
+                                  kwargs=self.kwargs, retval=retval)
+        return retval
+
+    def _trace(self):
+        trace = TraceInfo.trace(self.task, self.args, self.kwargs)
+        self.status = trace.status
+        self.strtb = trace.strtb
+        handler = self._trace_handlers[trace.status]
+        return handler(trace.retval, trace.exc_type, trace.tb, trace.strtb)
+
+    def handle_success(self, retval, *args):
+        """Handle successful execution."""
+        self.task.on_success(retval, self.task_id, self.args, self.kwargs)
+        return retval
+
+    def handle_retry(self, exc, type_, tb, strtb):
+        """Handle retry exception."""
+        self.task.on_retry(exc, self.task_id, self.args, self.kwargs)
+
+        # Create a simpler version of the RetryTaskError that stringifies
+        # the original exception instead of including the exception instance.
+        # This is for reporting the retry in logs, e-mail etc, while
+        # guaranteeing pickleability.
+        message, orig_exc = exc.args
+        expanded_msg = "%s: %s" % (message, str(orig_exc))
+        return ExceptionInfo((type_,
+                              type_(expanded_msg, None),
+                              tb))
+
+    def handle_failure(self, exc, type_, tb, strtb):
+        """Handle exception."""
+        self.task.on_failure(exc, self.task_id, self.args, self.kwargs)
+        return ExceptionInfo((type_, exc, tb))

+ 0 - 153
celery/fields.py

@@ -1,153 +0,0 @@
-"""
-
-Custom Django Model Fields.
-
-"""
-
-from copy import deepcopy
-from base64 import b64encode, b64decode
-from zlib import compress, decompress
-try:
-    import cPickle as pickle
-except ImportError:
-    import pickle
-
-from django.db import models
-from django.utils.encoding import force_unicode
-
-
-class PickledObject(str):
-    """A subclass of string so it can be told whether a string is a pickled
-    object or not (if the object is an instance of this class then it must
-    [well, should] be a pickled one).
-
-    Only really useful for passing pre-encoded values to ``default``
-    with ``dbsafe_encode``, not that doing so is necessary. If you
-    remove PickledObject and its references, you won't be able to pass
-    in pre-encoded values anymore, but you can always just pass in the
-    python objects themselves.
-
-    """
-    pass
-
-
-def dbsafe_encode(value, compress_object=False):
-    """We use deepcopy() here to avoid a problem with cPickle, where dumps
-    can generate different character streams for same lookup value if
-    they are referenced differently.
-
-    The reason this is important is because we do all of our lookups as
-    simple string matches, thus the character streams must be the same
-    for the lookups to work properly. See tests.py for more
-    information.
-
-    """
-    if not compress_object:
-        value = b64encode(pickle.dumps(deepcopy(value)))
-    else:
-        value = b64encode(compress(pickle.dumps(deepcopy(value))))
-    return PickledObject(value)
-
-
-def dbsafe_decode(value, compress_object=False):
-    if not compress_object:
-        value = pickle.loads(b64decode(value))
-    else:
-        value = pickle.loads(decompress(b64decode(value)))
-    return value
-
-
-class PickledObjectField(models.Field):
-    """A field that will accept *any* python object and store it in the
-    database. PickledObjectField will optionally compress it's values if
-    declared with the keyword argument ``compress=True``.
-
-    Does not actually encode and compress ``None`` objects (although you
-    can still do lookups using None). This way, it is still possible to
-    use the ``isnull`` lookup type correctly. Because of this, the field
-    defaults to ``null=True``, as otherwise it wouldn't be able to store
-    None values since they aren't pickled and encoded.
-
-    """
-    __metaclass__ = models.SubfieldBase
-
-    def __init__(self, *args, **kwargs):
-        self.compress = kwargs.pop('compress', False)
-        self.protocol = kwargs.pop('protocol', 2)
-        kwargs.setdefault('null', True)
-        kwargs.setdefault('editable', False)
-        super(PickledObjectField, self).__init__(*args, **kwargs)
-
-    def get_default(self):
-        """Returns the default value for this field.
-
-        The default implementation on models.Field calls force_unicode
-        on the default, which means you can't set arbitrary Python
-        objects as the default. To fix this, we just return the value
-        without calling force_unicode on it. Note that if you set a
-        callable as a default, the field will still call it. It will
-        *not* try to pickle and encode it.
-
-        """
-        if self.has_default():
-            if callable(self.default):
-                return self.default()
-            return self.default
-        # If the field doesn't have a default, then we punt to models.Field.
-        return super(PickledObjectField, self).get_default()
-
-    def to_python(self, value):
-        """B64decode and unpickle the object, optionally decompressing it.
-
-        If an error is raised in de-pickling and we're sure the value is
-        a definite pickle, the error is allowed to propogate. If we
-        aren't sure if the value is a pickle or not, then we catch the
-        error and return the original value instead.
-
-        """
-
-        if value is not None:
-            try:
-                value = dbsafe_decode(value, self.compress)
-            except:
-                # If the value is a definite pickle; and an error is raised in
-                # de-pickling it should be allowed to propogate.
-                if isinstance(value, PickledObject):
-                    raise
-        return value
-
-    def get_db_prep_value(self, value):
-        """Pickle and b64encode the object, optionally compressing it.
-
-        The pickling protocol is specified explicitly (by default 2),
-        rather than as -1 or HIGHEST_PROTOCOL, because we don't want the
-        protocol to change over time. If it did, ``exact`` and ``in``
-        lookups would likely fail, since pickle would now be generating
-        a different string.
-
-        """
-
-        if value is not None and not isinstance(value, PickledObject):
-            # We call force_unicode here explicitly, so that the encoded
-            # string isn't rejected by the postgresql_psycopg2 backend.
-            # Alternatively, we could have just registered PickledObject with
-            # the psycopg marshaller (telling it to store it like it would a
-            # string), but since both of these methods result in the same
-            # value being stored, doing things this way is much easier.
-            value = force_unicode(dbsafe_encode(value, self.compress))
-        return value
-
-    def value_to_string(self, obj):
-        value = self._get_val_from_obj(obj)
-        return self.get_db_prep_value(value)
-
-    def get_internal_type(self):
-        return 'TextField'
-
-    def get_db_prep_lookup(self, lookup_type, value):
-        if lookup_type not in ['exact', 'in', 'isnull']:
-            raise TypeError('Lookup type %s is not supported.' % lookup_type)
-        # The Field model already calls get_db_prep_value before doing the
-        # actual lookup, so all we need to do is limit the lookup types.
-        return super(PickledObjectField, self).get_db_prep_lookup(lookup_type,
-                                                                  value)

+ 108 - 48
celery/loaders/__init__.py

@@ -1,50 +1,110 @@
 import os
-from celery.loaders.djangoapp import Loader as DjangoLoader
+import string
+import warnings
+import importlib
+
+from carrot.utils import rpartition
+
+from celery.utils import get_full_cls_name
 from celery.loaders.default import Loader as DefaultLoader
-from django.conf import settings
-from django.core.management import setup_environ
-
-"""
-.. class:: Loader
-
-The current loader class.
-
-"""
-Loader = DefaultLoader
-if settings.configured:
-    Loader = DjangoLoader
-else:
-    if callable(getattr(os, "fork", None)): # Platform doesn't support fork()
-        # XXX On systems without fork, multiprocessing seems to be launching
-        # the processes in some other way which does not copy the memory
-        # of the parent process. This means that any configured env might
-        # be lost. This is a hack to make it work on Windows.
-        # A better way might be to use os.environ to set the currently
-        # used configuration method so to propogate it to the "child"
-        # processes. But this has to be experimented with.
-        # [asksol/heyman]
-        try:
-            settings_mod = os.environ.get("DJANGO_SETTINGS_MODULE",
-                                          "settings")
-            project_settings = __import__(settings_mod, {}, {}, [''])
-            setup_environ(project_settings)
-            Loader = DjangoLoader
-        except ImportError:
-            pass
-
-"""
-.. data:: current_loader
-
-The current loader instance.
-
-"""
-current_loader = Loader()
-
-
-"""
-.. data:: settings
-
-The global settings object.
-
-"""
-settings = current_loader.conf
+from celery.loaders.djangoapp import Loader as DjangoLoader
+
+_DEFAULT_LOADER_CLASS_NAME = "Loader"
+LOADER_ALIASES = {"django": "celery.loaders.djangoapp.Loader",
+                  "default": "celery.loaders.default.Loader"}
+_loader_cache = {}
+_loader = None
+_settings = None
+
+
+def first_letter(s):
+    for char in s:
+        if char in string.letters:
+            return char
+
+
+def resolve_loader(loader):
+    loader = LOADER_ALIASES.get(loader, loader)
+    loader_module_name, _, loader_cls_name = rpartition(loader, ".")
+    if first_letter(loader_cls_name) not in string.uppercase:
+        warnings.warn(DeprecationWarning(
+            "CELERY_LOADER now needs loader class name, e.g. %s.%s" % (
+                loader, _DEFAULT_LOADER_CLASS_NAME)))
+        return loader, _DEFAULT_LOADER_CLASS_NAME
+    return loader_module_name, loader_cls_name
+
+
+def _get_loader_cls(loader):
+    loader_module_name, loader_cls_name = resolve_loader(loader)
+    loader_module = importlib.import_module(loader_module_name)
+    return getattr(loader_module, loader_cls_name)
+
+
+def get_loader_cls(loader):
+    """Get loader class by name/alias"""
+    if loader not in _loader_cache:
+        _loader_cache[loader] = _get_loader_cls(loader)
+    return _loader_cache[loader]
+
+
+def detect_loader():
+    loader = os.environ.get("CELERY_LOADER")
+    if loader:
+        return get_loader_cls(loader)
+
+    loader = _detect_loader()
+    os.environ["CELERY_LOADER"] = get_full_cls_name(loader)
+
+    return loader
+
+
+def _detect_loader(): # pragma: no cover
+    from django.conf import settings
+    if settings.configured:
+        return DjangoLoader
+    try:
+        # A settings module may be defined, but Django didn't attempt to
+        # load it yet. As an alternative to calling the private _setup(),
+        # we could also check whether DJANGO_SETTINGS_MODULE is set.
+        settings._setup()
+    except ImportError:
+        if not callable(getattr(os, "fork", None)):
+            # Platform doesn't support fork()
+            # XXX On systems without fork, multiprocessing seems to be
+            # launching the processes in some other way which does
+            # not copy the memory of the parent process. This means
+            # any configured env might be lost. This is a hack to make
+            # it work on Windows.
+            # A better way might be to use os.environ to set the currently
+            # used configuration method so to propogate it to the "child"
+            # processes. But this has to be experimented with.
+            # [asksol/heyman]
+            from django.core.management import setup_environ
+            try:
+                settings_mod = os.environ.get("DJANGO_SETTINGS_MODULE",
+                                                "settings")
+                project_settings = __import__(settings_mod, {}, {}, [''])
+                setup_environ(project_settings)
+                return DjangoLoader
+            except ImportError:
+                pass
+    else:
+        return DjangoLoader
+
+    return DefaultLoader
+
+
+def current_loader():
+    """Detect and return the current loader."""
+    global _loader
+    if _loader is None:
+        _loader = detect_loader()()
+    return _loader
+
+
+def load_settings():
+    """Load the global settings object."""
+    global _settings
+    if _settings is None:
+        _settings = current_loader().conf
+    return _settings

+ 7 - 0
celery/loaders/base.py

@@ -26,6 +26,13 @@ class BaseLoader(object):
         """This method is called when the worker (``celeryd``) starts."""
         pass
 
+    def import_task_module(self, module):
+        return __import__(module, [], [], [''])
+
+    def import_default_modules(self):
+        imports = getattr(self.conf, "CELERY_IMPORTS", None) or []
+        return map(self.import_task_module, imports)
+
     @property
     def conf(self):
         """Loader configuration."""

+ 18 - 16
celery/loaders/default.py

@@ -1,4 +1,5 @@
 import os
+
 from celery.loaders.base import BaseLoader
 
 DEFAULT_CONFIG_MODULE = "celeryconfig"
@@ -12,8 +13,7 @@ DEFAULT_SETTINGS = {
 
 
 def wanted_module_item(item):
-    is_private = item.startswith("_")
-    return not is_private
+    return not item.startswith("_")
 
 
 class Loader(BaseLoader):
@@ -23,26 +23,30 @@ class Loader(BaseLoader):
 
     """
 
+    def setup_django_env(self, settingsdict):
+        config = dict(DEFAULT_SETTINGS, **settingsdict)
+
+        from django.conf import settings
+        if not settings.configured:
+            settings.configure()
+        for config_key, config_value in config.items():
+            setattr(settings, config_key, config_value)
+        installed_apps = set(list(DEFAULT_SETTINGS["INSTALLED_APPS"]) + \
+                             list(settings.INSTALLED_APPS))
+        settings.INSTALLED_APPS = tuple(installed_apps)
+
+        return settings
+
     def read_configuration(self):
         """Read configuration from ``celeryconfig.py`` and configure
         celery and Django so it can be used by regular Python."""
-        config = dict(DEFAULT_SETTINGS)
         configname = os.environ.get("CELERY_CONFIG_MODULE",
                                     DEFAULT_CONFIG_MODULE)
         celeryconfig = __import__(configname, {}, {}, [''])
         usercfg = dict((key, getattr(celeryconfig, key))
                             for key in dir(celeryconfig)
                                 if wanted_module_item(key))
-        config.update(usercfg)
-        from django.conf import settings
-        if not settings.configured:
-            settings.configure()
-        for config_key, config_value in usercfg.items():
-            setattr(settings, config_key, config_value)
-        installed_apps = set(DEFAULT_SETTINGS["INSTALLED_APPS"] + \
-                             settings.INSTALLED_APPS)
-        settings.INSTALLED_APPS = tuple(installed_apps)
-        return settings
+        return self.setup_django_env(usercfg)
 
     def on_worker_init(self):
         """Imports modules at worker init so tasks can be registered
@@ -52,6 +56,4 @@ class Loader(BaseLoader):
         setting in ``celeryconf.py``.
 
         """
-        imports = getattr(self.conf, "CELERY_IMPORTS", [])
-        for module in imports:
-            __import__(module, [], [], [''])
+        self.import_default_modules()

+ 56 - 14
celery/loaders/djangoapp.py

@@ -1,5 +1,10 @@
+import imp
+import importlib
+
 from celery.loaders.base import BaseLoader
 
+_RACE_PROTECTION = False
+
 
 class Loader(BaseLoader):
     """The Django loader."""
@@ -22,28 +27,65 @@ class Loader(BaseLoader):
         from django.db import connection
         connection.close()
 
-        # Reset cache connection only if using memcached/libmemcached
+        # ## Reset cache connection only if using memcached/libmemcached
         from django.core import cache
-        # XXX At Opera we use a custom memcached backend that uses libmemcached
-        # instead of libmemcache (cmemcache). Should find a better solution for
-        # this, but for now "memcached" should probably be unique enough of a
-        # string to not make problems.
+        # XXX At Opera we use a custom memcached backend that uses
+        # libmemcached instead of libmemcache (cmemcache). Should find a
+        # better solution for this, but for now "memcached" should probably
+        # be unique enough of a string to not make problems.
         cache_backend = cache.settings.CACHE_BACKEND
-        if hasattr(cache, "parse_backend_uri"):
-            cache_scheme = cache.parse_backend_uri(cache_backend)[0]
-        else:
-            # Django <= 1.0.2
-            cache_scheme = cache_backend.split(":", 1)[0]
+        try:
+            parse_backend = cache.parse_backend_uri
+        except AttributeError:
+            parse_backend = lambda backend: backend.split(":", 1)
+        cache_scheme = parse_backend(cache_backend)[0]
+
         if "memcached" in cache_scheme:
             cache.cache.close()
 
     def on_worker_init(self):
         """Called when the worker starts.
 
-        Uses :func:`celery.discovery.autodiscover` to automatically discover
-        any ``tasks.py`` files in the applications listed in
-        ``INSTALLED_APPS``.
+        Automatically discovers any ``tasks.py`` files in the applications
+        listed in ``INSTALLED_APPS``.
 
         """
-        from celery.discovery import autodiscover
+        self.import_default_modules()
         autodiscover()
+
+
+def autodiscover():
+    """Include tasks for all applications in :setting:`INSTALLED_APPS`."""
+    from django.conf import settings
+    global _RACE_PROTECTION
+
+    if _RACE_PROTECTION:
+        return
+    _RACE_PROTECTION = True
+    try:
+        return filter(None, [find_related_module(app, "tasks")
+                                for app in settings.INSTALLED_APPS])
+    finally:
+        _RACE_PROTECTION = False
+
+
+def find_related_module(app, related_name):
+    """Given an application name and a module name, tries to find that
+    module in the application."""
+
+    try:
+        app_path = importlib.import_module(app).__path__
+    except AttributeError:
+        return
+
+    try:
+        imp.find_module(related_name, app_path)
+    except ImportError:
+        return
+
+    module = importlib.import_module("%s.%s" % (app, related_name))
+
+    try:
+        return getattr(module, related_name)
+    except AttributeError:
+        return

+ 82 - 27
celery/log.py

@@ -4,38 +4,78 @@ import sys
 import time
 import logging
 import traceback
-from celery.conf import LOG_FORMAT, DAEMON_LOG_LEVEL
+
+from celery import conf
+from celery.utils import noop
+
+_hijacked = False
+_monkeypatched = False
+
+def _ensure_process_aware_logger():
+    global _monkeypatched
+
+    if not _monkeypatched:
+        from celery.utils.patch import monkeypatch
+        monkeypatch()
+        _monkeypatched = True
+
+
+def _hijack_multiprocessing_logger():
+    from multiprocessing import util as mputil
+    global _hijacked
+
+    if _hijacked:
+        return mputil.get_logger()
+
+    _ensure_process_aware_logger()
+
+    logging.Logger.manager.loggerDict.clear()
+
+    try:
+        if mputil._logger is not None:
+            mputil.logger = None
+    except AttributeError:
+        pass
+
+    _hijacked = True
+    return mputil.get_logger()
 
 
 def get_default_logger(loglevel=None):
-    import multiprocessing
-    logger = multiprocessing.get_logger()
-    loglevel is not None and logger.setLevel(loglevel)
+    """Get default logger instance.
+
+    :keyword loglevel: Initial log level.
+
+    """
+    logger = _hijack_multiprocessing_logger()
+    if loglevel is not None:
+        logger.setLevel(loglevel)
     return logger
 
 
-def setup_logger(loglevel=DAEMON_LOG_LEVEL, logfile=None, format=LOG_FORMAT,
-        **kwargs):
+def setup_logger(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
+        format=conf.CELERYD_LOG_FORMAT, **kwargs):
     """Setup the ``multiprocessing`` logger. If ``logfile`` is not specified,
     ``stderr`` is used.
 
     Returns logger object.
     """
+
     logger = get_default_logger(loglevel=loglevel)
     if logger.handlers:
         # Logger already configured
         return logger
     if logfile:
+        handler = logging.FileHandler
         if hasattr(logfile, "write"):
-            log_file_handler = logging.StreamHandler(logfile)
-        else:
-            log_file_handler = logging.FileHandler(logfile)
+            handler = logging.StreamHandler
+        loghandler = handler(logfile)
         formatter = logging.Formatter(format)
-        log_file_handler.setFormatter(formatter)
-        logger.addHandler(log_file_handler)
+        loghandler.setFormatter(formatter)
+        logger.addHandler(loghandler)
     else:
-        import multiprocessing
-        multiprocessing.log_to_stderr()
+        from multiprocessing.util import log_to_stderr
+        log_to_stderr()
     return logger
 
 
@@ -43,20 +83,20 @@ def emergency_error(logfile, message):
     """Emergency error logging, for when there's no standard file
     descriptors open because the process has been daemonized or for
     some other reason."""
-    logfh_needs_to_close = False
-    if not logfile:
-        logfile = sys.__stderr__
+    closefh = noop
+    logfile = logfile or sys.__stderr__
     if hasattr(logfile, "write"):
         logfh = logfile
     else:
         logfh = open(logfile, "a")
-        logfh_needs_to_close = True
-    logfh.write("[%(asctime)s: FATAL/%(pid)d]: %(message)s\n" % {
-                    "asctime": time.asctime(),
-                    "pid": os.getpid(),
-                    "message": message})
-    if logfh_needs_to_close:
-        logfh.close()
+        closefh = logfh.close
+    try:
+        logfh.write("[%(asctime)s: CRITICAL/%(pid)d]: %(message)s\n" % {
+                        "asctime": time.asctime(),
+                        "pid": os.getpid(),
+                        "message": message})
+    finally:
+        closefh()
 
 
 def redirect_stdouts_to_logger(logger, loglevel=None):
@@ -68,8 +108,7 @@ def redirect_stdouts_to_logger(logger, loglevel=None):
 
     """
     proxy = LoggingProxy(logger, loglevel)
-    sys.stdout = proxy
-    sys.stderr = proxy
+    sys.stdout = sys.stderr = proxy
     return proxy
 
 
@@ -83,7 +122,7 @@ class LoggingProxy(object):
     mode = "w"
     name = None
     closed = False
-    loglevel = logging.INFO
+    loglevel = logging.ERROR
 
     def __init__(self, logger, loglevel=None):
         self.logger = logger
@@ -95,7 +134,7 @@ class LoggingProxy(object):
         ``sys.__stderr__`` instead of ``sys.stderr`` to circumvent
         infinite loops."""
 
-        def wrap_handler(handler):
+        def wrap_handler(handler): # pragma: no cover
 
             class WithSafeHandleError(logging.Handler):
 
@@ -146,3 +185,19 @@ class LoggingProxy(object):
 
     def fileno(self):
         return None
+
+
+class SilenceRepeated(object):
+    """Only log action every n iterations."""
+
+    def __init__(self, action, max_iterations=10):
+        self.action = action
+        self.max_iterations = max_iterations
+        self._iterations = 0
+
+    def __call__(self, *msgs):
+        if self._iterations >= self.max_iterations:
+            map(self.action, msgs)
+            self._iterations = 0
+        else:
+            self._iterations += 1

+ 18 - 0
celery/management/commands/celerybeat.py

@@ -0,0 +1,18 @@
+"""
+
+Start the celery clock service from the Django management command.
+
+"""
+from django.core.management.base import BaseCommand
+
+from celery.bin.celerybeat import run_clockservice, OPTION_LIST
+
+
+class Command(BaseCommand):
+    """Run the celery periodic task scheduler."""
+    option_list = BaseCommand.option_list + OPTION_LIST
+    help = 'Run the celery periodic task scheduler'
+
+    def handle(self, *args, **options):
+        """Handle the management command."""
+        run_clockservice(**options)

+ 1 - 0
celery/management/commands/celeryd.py

@@ -4,6 +4,7 @@ Start the celery daemon from the Django management command.
 
 """
 from django.core.management.base import BaseCommand
+
 from celery.bin.celeryd import run_worker, OPTION_LIST
 
 

+ 37 - 0
celery/management/commands/celerymon.py

@@ -0,0 +1,37 @@
+"""
+
+Start the celery clock service from the Django management command.
+
+"""
+import sys
+from django.core.management.base import BaseCommand
+
+#try:
+from celerymonitor.bin.celerymond import run_monitor, OPTION_LIST
+#except ImportError:
+#    OPTION_LIST = ()
+#    run_monitor = None
+
+MISSING = """
+You don't have celerymon installed, please install it by running the following
+command:
+
+    $ easy_install celerymon
+
+or if you're using pip (like you should be):
+
+    $ pip install celerymon
+"""
+
+
+class Command(BaseCommand):
+    """Run the celery monitor."""
+    option_list = BaseCommand.option_list + OPTION_LIST
+    help = 'Run the celery monitor'
+
+    def handle(self, *args, **options):
+        """Handle the management command."""
+        if run_monitor is None:
+            sys.stderr.write(MISSING)
+        else:
+            run_monitor(**options)

+ 0 - 22
celery/management/commands/celerystats.py

@@ -1,22 +0,0 @@
-"""
-
-Start the celery daemon from the Django management command.
-
-"""
-from django.core.management.base import BaseCommand
-from celery.monitoring import StatsCollector
-
-
-class Command(BaseCommand):
-    """Collect/flush and dump a report from the currently available
-    statistics."""
-    option_list = BaseCommand.option_list
-    help = "Collect/flush and dump a report from the currently available " + \
-            "statistics"
-
-    def handle(self, *args, **options):
-        """Handle the management command."""
-        stats = StatsCollector()
-        print("* Gathering statistics...")
-        stats.collect()
-        stats.report()

+ 35 - 123
celery/managers.py

@@ -1,75 +1,38 @@
 """celery.managers"""
+from datetime import datetime
 from django.db import models
-from django.db import connection, transaction
-from celery.registry import tasks
-from celery.conf import TASK_RESULT_EXPIRES
-from datetime import datetime, timedelta
-from django.conf import settings
-import random
-
-# server_drift can be negative, but timedelta supports addition on
-# negative seconds.
-SERVER_DRIFT = timedelta(seconds=random.vonmisesvariate(1, 4))
-
-
-class TableLock(object):
-    """Base class for database table locks. Also works as a NOOP lock."""
-
-    def __init__(self, table, type="read"):
-        self.table = table
-        self.type = type
-        self.cursor = None
-
-    def lock_table(self):
-        """Lock the table."""
-        pass
-
-    def unlock_table(self):
-        """Release previously locked tables."""
-        pass
-
-    @classmethod
-    def acquire(cls, table, type=None):
-        """Acquire table lock."""
-        lock = cls(table, type)
-        lock.lock_table()
-        return lock
-
-    def release(self):
-        """Release the lock."""
-        self.unlock_table()
-        if self.cursor:
-            self.cursor.close()
-            self.cursor = None
-
+from django.db import transaction
 
-class MySQLTableLock(TableLock):
-    """Table lock support for MySQL."""
-
-    def lock_table(self):
-        """Lock MySQL table."""
-        self.cursor = connection.cursor()
-        self.cursor.execute("LOCK TABLES %s %s" % (
-            self.table, self.type.upper()))
-
-    def unlock_table(self):
-        """Unlock MySQL table."""
-        self.cursor.execute("UNLOCK TABLES")
+from celery.conf import TASK_RESULT_EXPIRES
 
-TABLE_LOCK_FOR_ENGINE = {"mysql": MySQLTableLock}
-table_lock = TABLE_LOCK_FOR_ENGINE.get(settings.DATABASE_ENGINE, TableLock)
 
 class TaskManager(models.Manager):
     """Manager for :class:`celery.models.Task` models."""
 
-    def get_task(self, task_id):
-        """Get task meta for task by ``task_id``."""
-        task, created = self.get_or_create(task_id=task_id)
+    def get_task(self, task_id, exception_retry_count=1):
+        """Get task meta for task by ``task_id``.
+
+        :keyword exception_retry_count: How many times to retry by
+            transaction rollback on exception. This could theoretically
+            happen in a race condition if another worker is trying to
+            create the same task. The default is to retry once.
+
+        """
+        try:
+            task, created = self.get_or_create(task_id=task_id)
+        except Exception: # pragma: no cover
+            # We don't have a map of the different exceptions backends can
+            # throw, so we have to catch everything.
+            if exception_retry_count > 0:
+                transaction.rollback_unless_managed()
+                return self.get_task(task_id, exception_retry_count - 1)
+            else:
+                raise
         return task
 
-    def is_done(self, task_id):
+    def is_successful(self, task_id):
         """Returns ``True`` if the task was executed successfully."""
-        return self.get_task(task_id).status == "DONE"
+        return self.get_task(task_id).status == "SUCCESS"
 
     def get_all_expired(self):
         """Get all expired task results."""
@@ -80,7 +43,7 @@ class TaskManager(models.Manager):
         self.get_all_expired().delete()
 
     def store_result(self, task_id, result, status, traceback=None,
-            exception_retry=True):
+            exception_retry_count=2):
         """Store the result and status of a task.
 
         :param task_id: task id
@@ -95,8 +58,11 @@ class TaskManager(models.Manager):
         :keyword traceback: The traceback at the point of exception (if the
             task failed).
 
-        :keyword exception_retry: If True, we try a single retry with
-            transaction rollback on exception
+        :keyword exception_retry_count: How many times to retry by
+            transaction rollback on exception. This could theoretically
+            happen in a race condition if another worker is trying to
+            create the same task. The default is to retry twice.
+
         """
         try:
             task, created = self.get_or_create(task_id=task_id, defaults={
@@ -108,17 +74,19 @@ class TaskManager(models.Manager):
                 task.result = result
                 task.traceback = traceback
                 task.save()
-        except Exception, exc:
+        except Exception: # pragma: no cover
             # depending on the database backend we can get various exceptions.
             # for excample, psycopg2 raises an exception if some operation
             # breaks transaction, and saving task result won't be possible
             # until we rollback transaction
-            if exception_retry:
+            if exception_retry_count > 0:
                 transaction.rollback_unless_managed()
-                self.store_result(task_id, result, status, traceback, False)
+                self.store_result(task_id, result, status, traceback,
+                                  exception_retry_count - 1)
             else:
                 raise
 
+
 class TaskSetManager(models.Manager):
     """Manager for :class:`celery.models.TaskSet` models."""
 
@@ -161,59 +129,3 @@ class TaskSetManager(models.Manager):
                 self.store_result(taskset_id, result, False)
             else:
                 raise
-
-
-class PeriodicTaskManager(models.Manager):
-    """Manager for :class:`celery.models.PeriodicTask` models."""
-
-    def init_entries(self):
-        """Add entries for all registered periodic tasks.
-
-        Should be run at worker start.
-        """
-        periodic_tasks = tasks.get_all_periodic()
-        for task_name in periodic_tasks.keys():
-            task_meta, created = self.get_or_create(name=task_name)
-
-    def is_time(self, last_run_at, run_every):
-        """Check if if it is time to run the periodic task.
-
-        :param last_run_at: Last time the periodic task was run.
-        :param run_every: How often to run the periodic task.
-
-        :rtype bool:
-
-        """
-        run_every_drifted = run_every + SERVER_DRIFT
-        run_at = last_run_at + run_every_drifted
-        if datetime.now() > run_at:
-            return True
-        return False
-
-    def get_waiting_tasks(self):
-        """Get all waiting periodic tasks.
-
-        :returns: list of :class:`celery.models.PeriodicTaskMeta` objects.
-        """
-        periodic_tasks = tasks.get_all_periodic()
-        db_table = self.model._meta.db_table
-
-        # Find all periodic tasks to be run.
-        waiting = []
-        for task_meta in self.all():
-            if task_meta.name in periodic_tasks:
-                task = periodic_tasks[task_meta.name]
-                run_every = task.run_every
-                if self.is_time(task_meta.last_run_at, run_every):
-                    # Get the object again to be sure noone else
-                    # has already taken care of it.
-                    lock = table_lock.acquire(db_table, "write")
-                    try:
-                        secure = self.get(pk=task_meta.pk)
-                        if self.is_time(secure.last_run_at, run_every):
-                            secure.last_run_at = datetime.now()
-                            secure.save()
-                            waiting.append(secure)
-                    finally:
-                        lock.release()
-        return waiting

+ 118 - 52
celery/messaging.py

@@ -3,12 +3,15 @@
 Sending and Receiving Messages
 
 """
+import socket
+
+from carrot.connection import DjangoBrokerConnection, AMQPConnectionException
 from carrot.messaging import Publisher, Consumer, ConsumerSet
+from billiard.utils.functional import wraps
+
 from celery import conf
 from celery import signals
-from celery.utils import gen_unique_id
-from celery.utils import mitemgetter
-from celery.serialization import pickle
+from celery.utils import gen_unique_id, mitemgetter, noop
 
 
 MSG_OPTIONS = ("mandatory", "priority",
@@ -16,35 +19,36 @@ MSG_OPTIONS = ("mandatory", "priority",
                "serializer")
 
 get_msg_options = mitemgetter(*MSG_OPTIONS)
-
 extract_msg_options = lambda d: dict(zip(MSG_OPTIONS, get_msg_options(d)))
+default_queue = conf.routing_table[conf.DEFAULT_QUEUE]
+
+_queues_declared = False
 
 
 class TaskPublisher(Publisher):
-    """The AMQP Task Publisher class."""
-    exchange = conf.AMQP_EXCHANGE
-    exchange_type = conf.AMQP_EXCHANGE_TYPE
-    routing_key = conf.AMQP_PUBLISHER_ROUTING_KEY
+    """Publish tasks."""
+    exchange = default_queue["exchange"]
+    exchange_type = default_queue["exchange_type"]
+    routing_key = conf.DEFAULT_ROUTING_KEY
     serializer = conf.TASK_SERIALIZER
-    encoder = pickle.dumps
 
-    def delay_task(self, task_name, task_args, task_kwargs, **kwargs):
-        """Delay task for execution by the celery nodes."""
-        return self._delay_task(task_name=task_name, task_args=task_args,
-                                task_kwargs=task_kwargs, **kwargs)
+    def __init__(self, *args, **kwargs):
+        super(TaskPublisher, self).__init__(*args, **kwargs)
 
-    def delay_task_in_set(self, taskset_id, task_name, task_args, task_kwargs,
-            **kwargs):
-        """Delay a task which part of a task set."""
-        return self._delay_task(task_name=task_name, part_of_set=taskset_id,
-                                task_args=task_args, task_kwargs=task_kwargs,
-                                **kwargs)
+        # Make sure all queues are declared.
+        global _queues_declared
+        if not _queues_declared:
+            consumers = get_consumer_set(self.connection)
+            consumers.close()
+            _queues_declared = True
 
-    def _delay_task(self, task_name, task_id=None, part_of_set=None,
-            task_args=None, task_kwargs=None, **kwargs):
-        """INTERNAL"""
+    def delay_task(self, task_name, task_args=None, task_kwargs=None,
+            task_id=None, taskset_id=None, **kwargs):
+        """Delay task for execution by the celery nodes."""
 
         task_id = task_id or gen_unique_id()
+        eta = kwargs.get("eta")
+        eta = eta and eta.isoformat()
 
         message_data = {
             "task": task_name,
@@ -52,11 +56,11 @@ class TaskPublisher(Publisher):
             "args": task_args or [],
             "kwargs": task_kwargs or {},
             "retries": kwargs.get("retries", 0),
-            "eta": kwargs.get("eta"),
+            "eta": eta,
         }
 
-        if part_of_set:
-            message_data["taskset"] = part_of_set
+        if taskset_id:
+            message_data["taskset"] = taskset_id
 
         self.send(message_data, **extract_msg_options(kwargs))
         signals.task_sent.send(sender=task_name, **message_data)
@@ -64,31 +68,93 @@ class TaskPublisher(Publisher):
         return task_id
 
 
-def get_consumer_set(connection, queues=conf.AMQP_CONSUMER_QUEUES, **options):
-    return ConsumerSet(connection, from_dict=queues, **options)
-
-
 class TaskConsumer(Consumer):
-    """The AMQP Task Consumer class."""
-    queue = conf.AMQP_CONSUMER_QUEUE
-    exchange = conf.AMQP_EXCHANGE
-    routing_key = conf.AMQP_CONSUMER_ROUTING_KEY
-    exchange_type = conf.AMQP_EXCHANGE_TYPE
-    decoder = pickle.loads
-    auto_ack = False
-    no_ack = False
-
-
-class StatsPublisher(Publisher):
-    exchange = "celerygraph"
-    routing_key = "stats"
-    encoder = pickle.dumps
-
-
-class StatsConsumer(Consumer):
-    queue = "celerygraph"
-    exchange = "celerygraph"
-    routing_key = "stats"
-    exchange_type = "direct"
-    decoder = pickle.loads
-    no_ack=True
+    """Consume tasks"""
+    queue = conf.DEFAULT_QUEUE
+    exchange = default_queue["exchange"]
+    routing_key = default_queue["binding_key"]
+    exchange_type = default_queue["exchange_type"]
+
+
+class EventPublisher(Publisher):
+    """Publish events"""
+    exchange = conf.EVENT_EXCHANGE
+    exchange_type = conf.EVENT_EXCHANGE_TYPE
+    routing_key = conf.EVENT_ROUTING_KEY
+
+
+class EventConsumer(Consumer):
+    """Consume events"""
+    queue = conf.EVENT_QUEUE
+    exchange = conf.EVENT_EXCHANGE
+    exchange_type = conf.EVENT_EXCHANGE_TYPE
+    routing_key = conf.EVENT_ROUTING_KEY
+    no_ack = True
+
+
+class BroadcastPublisher(Publisher):
+    """Publish broadcast commands"""
+    exchange = conf.BROADCAST_EXCHANGE
+    exchange_type = conf.BROADCAST_EXCHANGE_TYPE
+
+    def send(self, type, arguments, destination=None):
+        """Send broadcast command."""
+        arguments["command"] = type
+        arguments["destination"] = destination
+        super(BroadcastPublisher, self).send({"control": arguments})
+
+
+class BroadcastConsumer(Consumer):
+    """Consume broadcast commands"""
+    queue = conf.BROADCAST_QUEUE
+    exchange = conf.BROADCAST_EXCHANGE
+    exchange_type = conf.BROADCAST_EXCHANGE_TYPE
+    no_ack = True
+
+    def __init__(self, *args, **kwargs):
+        hostname = kwargs.pop("hostname", None) or socket.gethostname()
+        self.queue = "%s_%s" % (self.queue, hostname)
+        super(BroadcastConsumer, self).__init__(*args, **kwargs)
+
+
+def establish_connection(connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
+    """Establish a connection to the message broker."""
+    return DjangoBrokerConnection(connect_timeout=connect_timeout)
+
+
+def with_connection(fun):
+    """Decorator for providing default message broker connection for functions
+    supporting the ``connection`` and ``connect_timeout`` keyword
+    arguments."""
+
+    @wraps(fun)
+    def _inner(*args, **kwargs):
+        connection = kwargs.get("connection")
+        timeout = kwargs.get("connect_timeout", conf.BROKER_CONNECTION_TIMEOUT)
+        kwargs["connection"] = conn = connection or \
+                establish_connection(connect_timeout=timeout)
+        close_connection = not connection and conn.close or noop
+
+        try:
+            return fun(*args, **kwargs)
+        finally:
+            close_connection()
+
+    return _inner
+
+
+def get_consumer_set(connection, queues=None, **options):
+    """Get the :class:`carrot.messaging.ConsumerSet`` for a queue
+    configuration.
+
+    Defaults to the queues in ``CELERY_QUEUES``.
+
+    """
+    queues = queues or conf.routing_table
+    cset = ConsumerSet(connection)
+    for queue_name, queue_options in queues.items():
+        queue_options["routing_key"] = queue_options.pop("binding_key", None)
+        consumer = Consumer(connection, queue=queue_name,
+                            backend=cset.backend, **queue_options)
+        cset.consumers.append(consumer)
+    return cset

+ 11 - 48
celery/models.py

@@ -1,23 +1,18 @@
-"""
-
-Django Models.
-
-"""
 import django
 from django.db import models
-from celery.registry import tasks
-from celery.managers import TaskManager, TaskSetManager, PeriodicTaskManager
-from celery.fields import PickledObjectField
-from celery import conf
 from django.utils.translation import ugettext_lazy as _
-from datetime import datetime
+
+from picklefield.fields import PickledObjectField
+
+from celery import conf
+from celery.managers import TaskManager, TaskSetManager
 
 TASK_STATUS_PENDING = "PENDING"
 TASK_STATUS_RETRY = "RETRY"
 TASK_STATUS_FAILURE = "FAILURE"
-TASK_STATUS_DONE = "DONE"
+TASK_STATUS_SUCCESS = "SUCCESS"
 TASK_STATUSES = (TASK_STATUS_PENDING, TASK_STATUS_RETRY,
-                 TASK_STATUS_FAILURE, TASK_STATUS_DONE)
+                 TASK_STATUS_FAILURE, TASK_STATUS_SUCCESS)
 TASK_STATUSES_CHOICES = zip(TASK_STATUSES, TASK_STATUSES)
 
 
@@ -38,7 +33,8 @@ class TaskMeta(models.Model):
         verbose_name_plural = _(u"task meta")
 
     def __unicode__(self):
-        return u"<Task: %s done:%s>" % (self.task_id, self.status)
+        return u"<Task: %s successful: %s>" % (self.task_id, self.status)
+
 
 class TaskSetMeta(models.Model):
     """TaskSet result"""
@@ -56,42 +52,9 @@ class TaskSetMeta(models.Model):
     def __unicode__(self):
         return u"<TaskSet: %s>" % (self.taskset_id)
 
-class PeriodicTaskMeta(models.Model):
-    """Information about a Periodic Task."""
-    name = models.CharField(_(u"name"), max_length=255, unique=True)
-    last_run_at = models.DateTimeField(_(u"last time run"),
-                                       blank=True,
-                                       default=datetime.fromtimestamp(0))
-    total_run_count = models.PositiveIntegerField(_(u"total run count"),
-                                                  default=0)
-
-    objects = PeriodicTaskManager()
-
-    class Meta:
-        """Model meta-data."""
-        verbose_name = _(u"periodic task")
-        verbose_name_plural = _(u"periodic tasks")
-
-    def __unicode__(self):
-        return u"<PeriodicTask: %s [last-run:%s, total-run:%d]>" % (
-                self.name, self.last_run_at, self.total_run_count)
-
-    def delay(self, *args, **kwargs):
-        """Apply the periodic task immediately."""
-        self.task.delay()
-        self.total_run_count = self.total_run_count + 1
-        self.save()
-
-    @property
-    def task(self):
-        """The entry registered in the task registry for this task."""
-        return tasks[self.name]
-
-
 if (django.VERSION[0], django.VERSION[1]) >= (1, 1):
-    # keep models away from syncdb/reset if database backend is not being used.
+    # keep models away from syncdb/reset if database backend is not
+    # being used.
     if conf.CELERY_BACKEND != 'database':
         TaskMeta._meta.managed = False
         TaskSetMeta._meta.managed = False
-    if conf.CELERY_PERIODIC_STATUS_BACKEND != 'database':
-        PeriodicTaskMeta._meta.managed = False

+ 0 - 236
celery/monitoring.py

@@ -1,236 +0,0 @@
-"""
-
-    Publishing Statistics and Monitoring Celery.
-
-"""
-from carrot.connection import DjangoBrokerConnection
-from celery.messaging import StatsPublisher, StatsConsumer
-from celery.loaders import settings
-from django.core.cache import cache
-import time
-
-DEFAULT_CACHE_KEY_PREFIX = "celery-statistics"
-
-
-class Statistics(object):
-    """Base class for classes publishing celery statistics.
-
-    .. attribute:: type
-
-        **REQUIRED** The type of statistics this class handles.
-
-    **Required handlers**
-
-        * on_start()
-
-        * on_stop()
-
-    """
-    type = None
-
-    def __init__(self, **kwargs):
-        self.enabled = getattr(settings, "CELERY_STATISTICS", False)
-        if not self.type:
-            raise NotImplementedError(
-                "Statistic classes must define their type.")
-
-    def publish(self, **data):
-        """Publish statistics to be collected later by
-        :class:`StatsCollector`.
-
-        :param data: An arbitrary Python object containing the statistics
-            to be published.
-
-        """
-        if not self.enabled:
-            return
-        connection = DjangoBrokerConnection()
-        publisher = StatsPublisher(connection=connection)
-        publisher.send({"type": self.type, "data": data})
-        publisher.close()
-        connection.close()
-
-    @classmethod
-    def start(cls, *args, **kwargs):
-        """Convenience method instantiating and running :meth:`run` in
-        one swoop."""
-        stat = cls()
-        stat.run(*args, **kwargs)
-        return stat
-
-    def run(self, *args, **kwargs):
-        """Start producing statistics."""
-        if self.enabled:
-            return self.on_start(*args, **kwargs)
-
-    def stop(self, *args, **kwargs):
-        """Stop producing and publish statistics."""
-        if self.enabled:
-            return self.on_finish(*args, **kwargs)
-
-    def on_start(self, *args, **kwargs):
-        """What to do when the :meth:`run` method is called."""
-        raise NotImplementedError(
-                "Statistics classes must define a on_start handler.")
-
-    def on_stop(self, *args, **kwargs):
-        """What to do when the :meth:`stop` method is called."""
-        raise NotImplementedError(
-                "Statistics classes must define a on_stop handler.")
-
-
-class TimerStats(Statistics):
-    """A generic timer producing ``celery`` statistics.
-
-    .. attribute:: time_start
-
-        The time when this class was instantiated (in :func:`time.time`
-        format.)
-
-    """
-    time_start = None
-
-    def on_start(self, task_id, task_name, args, kwargs):
-        """What to do when the timers :meth:`run` method is called."""
-        self.task_id = task_id
-        self.task_name = task_name
-        self.args = args
-        self.kwargs = kwargs
-        self.time_start = time.time()
-
-    def on_finish(self):
-        """What to do when the timers :meth:`stop` method is called.
-
-        :returns: the time in seconds it took between calling :meth:`start` on
-            this class and :meth:`stop`.
-        """
-        nsecs = time.time() - self.time_start
-        self.publish(task_id=self.task_id,
-                     task_name=self.task_name,
-                     args=self.args,
-                     kwargs=self.kwargs,
-                     nsecs=str(nsecs))
-        return nsecs
-
-
-class TaskTimerStats(TimerStats):
-    """Time a running :class:`celery.task.Task`."""
-    type = "task_time_running"
-
-
-class StatsCollector(object):
-    """Collect and report Celery statistics.
-
-    **NOTE**: Please run only one collector at any time, or your stats
-        will be skewed.
-
-    .. attribute:: total_tasks_processed
-
-        The number of tasks executed in total since the first time
-        :meth:`collect` was executed on this class instance.
-
-    .. attribute:: total_tasks_processed_by_type
-
-        A dictionary of task names and how many times they have been
-        executed in total since the first time :meth:`collect` was executed
-        on this class instance.
-
-    .. attribute:: total_task_time_running
-
-        The total time, in seconds, it took to process all the tasks executed
-        since the first time :meth:`collect` was executed on this class
-        instance.
-
-    .. attribute:: total_task_time_running_by_type
-
-        A dictionary of task names and their total running time in seconds,
-        counting all the tasks that has been run since the first time
-        :meth:`collect` was executed on this class instance.
-
-    **NOTE**: You have to run :meth:`collect` for these attributes
-        to be filled.
-
-
-    """
-
-    allowed_types = ["task_time_running"]
-
-    def __init__(self):
-        self.total_tasks_processed = 0
-        self.total_tasks_processed_by_type = {}
-        self.total_task_time_running = 0.0
-        self.total_task_time_running_by_type = {}
-
-    def collect(self):
-        """Collect any new statistics available since the last time
-        :meth:`collect` was executed."""
-        connection = DjangoBrokerConnection()
-        consumer = StatsConsumer(connection=connection)
-        it = consumer.iterqueue(infinite=False)
-        for message in it:
-            stats_entry = message.decode()
-            stat_type = stats_entry["type"]
-            if stat_type in self.allowed_types:
-                # Decode keys to unicode for use as kwargs.
-                data = dict((key.encode("utf-8"), value)
-                                for key, value in stats_entry["data"].items())
-                handler = getattr(self, stat_type)
-                handler(**data)
-
-    def dump_to_cache(self, cache_key_prefix=DEFAULT_CACHE_KEY_PREFIX):
-        """Store collected statistics in the cache."""
-        cache.set("%s-total_tasks_processed" % cache_key_prefix,
-                self.total_tasks_processed)
-        cache.set("%s-total_tasks_processed_by_type" % cache_key_prefix,
-                    self.total_tasks_processed_by_type)
-        cache.set("%s-total_task_time_running" % cache_key_prefix,
-                    self.total_task_time_running)
-        cache.set("%s-total_task_time_running_by_type" % cache_key_prefix,
-                    self.total_task_time_running_by_type)
-
-    def task_time_running(self, task_id, task_name, args, kwargs, nsecs):
-        """Process statistics regarding how long a task has been running
-        (the :class:TaskTimerStats` class is responsible for sending these).
-
-        :param task_id: The UUID of the task.
-        :param task_name: The name of task.
-        :param args: The tasks positional arguments.
-        :param kwargs: The tasks keyword arguments.
-        :param nsecs: The number of seconds (in :func:`time.time` format)
-            it took to execute the task.
-
-        """
-        nsecs = float(nsecs)
-        self.total_tasks_processed += 1
-        self.total_task_time_running += nsecs
-        if task_name not in self.total_task_time_running_by_type:
-            self.total_task_time_running_by_type[task_name] = nsecs
-        else:
-            self.total_task_time_running_by_type[task_name] += nsecs
-        if task_name not in self.total_tasks_processed_by_type:
-            self.total_tasks_processed_by_type[task_name] = 1
-        else:
-            self.total_tasks_processed_by_type[task_name] += 1
-
-    def report(self):
-        """Dump a nice statistics report from the data collected since
-        the first time :meth:`collect` was executed on this instance.
-
-        It outputs the following information:
-
-            * Total processing time by task type and how many times each
-                task has been excuted.
-
-            * Total task processing time.
-
-            * Total number of tasks executed
-
-        """
-        print("Total processing time by task type:")
-        for task_name, nsecs in self.total_task_time_running_by_type.items():
-            print("\t%s: %s secs. (for a total of %d executed.)" % (
-                    task_name, nsecs,
-                    self.total_tasks_processed_by_type.get(task_name)))
-        print("Total task processing time: %s secs." % (
-            self.total_task_time_running))
-        print("Total tasks processed: %d" % self.total_tasks_processed)

+ 50 - 0
celery/platform.py

@@ -0,0 +1,50 @@
+import signal
+try:
+    from setproctitle import setproctitle as _setproctitle
+except ImportError:
+    _setproctitle = None
+
+
+def reset_signal(signal_name):
+    """Reset signal to the default signal handler.
+
+    Does nothing if the platform doesn't support signals,
+    or the specified signal in particular.
+
+    """
+    if hasattr(signal, signal_name):
+        signal.signal(getattr(signal, signal_name), signal.SIG_DFL)
+
+
+def install_signal_handler(signal_name, handler):
+    """Install a handler.
+
+    Does nothing if the current platform doesn't support signals,
+    or the specified signal in particular.
+
+    """
+    if not hasattr(signal, signal_name):
+        return
+
+    signum = getattr(signal, signal_name)
+    signal.signal(signum, handler)
+
+
+def set_process_title(progname, info=None):
+    """Set the ps name for the currently running process
+    if :mod`setproctitle` is installed."""
+    if _setproctitle:
+        proctitle = "[%s]" % progname
+        proctitle = info and "%s %s" % (proctitle, info) or proctitle
+        _setproctitle(proctitle)
+
+
+def set_mp_process_title(progname, info=None):
+    """Set the ps name using the multiprocessing process name.
+
+    Only works if :mod:`setproctitle` is installed.
+
+    """
+    from multiprocessing.process import current_process
+    return set_process_title("%s.%s" % (progname, current_process().name),
+                             info=info)

+ 0 - 258
celery/pool.py

@@ -1,258 +0,0 @@
-"""
-
-Process Pools.
-
-"""
-import os
-import time
-import errno
-import multiprocessing
-
-from multiprocessing.pool import Pool, worker
-from celery.datastructures import ExceptionInfo
-from celery.utils import noop
-from celery.utils.functional import curry
-from operator import isNumberType
-
-
-def pid_is_dead(pid):
-    """Check if a process is not running by PID.
-
-    :rtype bool:
-
-    """
-    try:
-        return os.kill(pid, 0)
-    except OSError, err:
-        if err.errno == errno.ESRCH:
-            return True # No such process.
-        elif err.errno == errno.EPERM:
-            return False # Operation not permitted.
-        else:
-            raise
-
-
-def reap_process(pid):
-    """Reap process if the process is a zombie.
-
-    :returns: ``True`` if process was reaped or is not running,
-        ``False`` otherwise.
-
-    """
-    if pid_is_dead(pid):
-        return True
-
-    try:
-        is_dead, _ = os.waitpid(pid, os.WNOHANG)
-    except OSError, err:
-        if err.errno == errno.ECHILD:
-            return False # No child processes.
-        raise
-    return is_dead
-
-
-def process_is_dead(process):
-    """Check if process is not running anymore.
-
-    First it finds out if the process is running by sending
-    signal 0. Then if the process is a child process, and is running
-    it finds out if it's a zombie process and reaps it.
-    If the process is running and is not a zombie it tries to send
-    a ping through the process pipe.
-
-    :param process: A :class:`multiprocessing.Process` instance.
-
-    :returns: ``True`` if the process is not running, ``False`` otherwise.
-
-    """
-
-    # Only do this if os.kill exists for this platform (e.g. Windows doesn't
-    # support it).
-    if callable(getattr(os, "kill", None)) and reap_process(process.pid):
-        return True
-
-    # Then try to ping the process using its pipe.
-    try:
-        proc_is_alive = process.is_alive()
-    except OSError:
-        return True
-    else:
-        return not proc_is_alive
-
-
-class DynamicPool(Pool):
-    """Version of :class:`multiprocessing.Pool` that can dynamically grow
-    in size."""
-
-    def __init__(self, processes=None, initializer=None, initargs=()):
-
-        if processes is None:
-            try:
-                processes = cpu_count()
-            except NotImplementedError:
-                processes = 1
-
-        super(DynamicPool, self).__init__(processes=processes,
-                                          initializer=initializer,
-                                          initargs=initargs)
-        self._initializer = initializer
-        self._initargs = initargs
-        self._size = processes
-        self.logger = multiprocessing.get_logger()
-
-    def _my_cleanup(self):
-        from multiprocessing.process import _current_process
-        for p in list(_current_process._children):
-            discard = False
-            try:
-                status = p._popen.poll()
-            except OSError:
-                discard = True
-            else:
-                if status is not None:
-                    discard = True
-            if discard:
-                _current_process._children.discard(p)
-
-    def add_worker(self):
-        """Add another worker to the pool."""
-        self._my_cleanup()
-        w = self.Process(target=worker,
-                         args=(self._inqueue, self._outqueue,
-                               self._initializer, self._initargs))
-        w.name = w.name.replace("Process", "PoolWorker")
-        w.daemon = True
-        w.start()
-        self._pool.append(w)
-        self.logger.debug(
-            "DynamicPool: Started pool worker %s (PID: %s, Poolsize: %d)" %(
-                w.name, w.pid, len(self._pool)))
-
-    def grow(self, size=1):
-        """Add workers to the pool.
-
-        :keyword size: Number of workers to add (default: 1)
-
-        """
-        [self.add_worker() for i in range(size)]
-
-    def _is_dead(self, process):
-        """Try to find out if the process is dead.
-
-        :rtype bool:
-
-        """
-        if process_is_dead(process):
-            self.logger.info("DynamicPool: Found dead process (PID: %s)" % (
-                process.pid))
-            return True
-        return False
-
-    def _bring_out_the_dead(self):
-        """Sort out dead process from pool.
-
-        :returns: Tuple of two lists, the first list with dead processes,
-            the second with active processes.
-
-        """
-        dead, alive = [], []
-        for process in self._pool:
-            if process and process.pid and isNumberType(process.pid):
-                dest = dead if self._is_dead(process) else alive
-                dest.append(process)
-        return dead, alive
-
-    def replace_dead_workers(self):
-        """Replace dead workers in the pool by spawning new ones.
-
-        :returns: number of dead processes replaced, or ``None`` if all
-            processes are alive and running.
-
-        """
-        dead, alive = self._bring_out_the_dead()
-        if dead:
-            dead_count = len(dead)
-            self._pool = alive
-            self.grow(self._size if dead_count > self._size else dead_count)
-            return dead_count
-
-
-class TaskPool(object):
-    """Process Pool for processing tasks in parallel.
-
-    :param limit: see :attr:`limit` attribute.
-    :param logger: see :attr:`logger` attribute.
-
-
-    .. attribute:: limit
-
-        The number of processes that can run simultaneously.
-
-    .. attribute:: logger
-
-        The logger used for debugging.
-
-    """
-
-    def __init__(self, limit, logger=None):
-        self.limit = limit
-        self.logger = logger or multiprocessing.get_logger()
-        self._pool = None
-
-    def start(self):
-        """Run the task pool.
-
-        Will pre-fork all workers so they're ready to accept tasks.
-
-        """
-        self._pool = DynamicPool(processes=self.limit)
-
-    def stop(self):
-        """Terminate the pool."""
-        self._pool.terminate()
-        self._pool = None
-
-    def replace_dead_workers(self):
-        self.logger.debug("TaskPool: Finding dead pool processes...")
-        dead_count = self._pool.replace_dead_workers()
-        if dead_count:
-            self.logger.info(
-                "TaskPool: Replaced %d dead pool workers..." % (
-                    dead_count))
-
-    def apply_async(self, target, args=None, kwargs=None, callbacks=None,
-            errbacks=None, on_ack=noop):
-        """Equivalent of the :func:``apply`` built-in function.
-
-        All ``callbacks`` and ``errbacks`` should complete immediately since
-        otherwise the thread which handles the result will get blocked.
-
-        """
-        args = args or []
-        kwargs = kwargs or {}
-        callbacks = callbacks or []
-        errbacks = errbacks or []
-
-        on_ready = curry(self.on_ready, callbacks, errbacks, on_ack)
-
-        self.logger.debug("TaskPool: Apply %s (args:%s kwargs:%s)" % (
-            target, args, kwargs))
-
-        self.replace_dead_workers()
-
-        return self._pool.apply_async(target, args, kwargs,
-                                        callback=on_ready)
-
-    def on_ready(self, callbacks, errbacks, on_ack, ret_value):
-        """What to do when a worker task is ready and its return value has
-        been collected."""
-        # Acknowledge the task as being processed.
-        on_ack()
-
-        if isinstance(ret_value, ExceptionInfo):
-            if isinstance(ret_value.exception, (
-                    SystemExit, KeyboardInterrupt)):
-                raise ret_value.exception
-            [errback(ret_value) for errback in errbacks]
-        else:
-            [callback(ret_value) for callback in callbacks]

+ 27 - 47
celery/registry.py

@@ -1,57 +1,43 @@
 """celery.registry"""
-from celery import discovery
-from celery.utils import get_full_cls_name
-from celery.exceptions import NotRegistered, AlreadyRegistered
+import inspect
 from UserDict import UserDict
 
+from celery.exceptions import NotRegistered
+
 
 class TaskRegistry(UserDict):
     """Site registry for tasks."""
 
-    AlreadyRegistered = AlreadyRegistered
     NotRegistered = NotRegistered
 
     def __init__(self):
         self.data = {}
 
-    def autodiscover(self):
-        """Autodiscovers tasks using :func:`celery.discovery.autodiscover`."""
-        discovery.autodiscover()
-
-    def register(self, task, name=None):
-        """Register a task in the task registry.
+    def regular(self):
+        """Get all regular task types."""
+        return self.filter_types("regular")
 
-        Task can either be a regular function, or a class inheriting
-        from :class:`celery.task.Task`.
+    def periodic(self):
+        """Get all periodic task types."""
+        return self.filter_types("periodic")
 
-        :keyword name: By default the :attr:`Task.name` attribute on the
-            task is used as the name of the task, but you can override it
-            using this option.
+    def register(self, task):
+        """Register a task in the task registry.
 
-        :raises AlreadyRegistered: if the task is already registered.
+        The task will be automatically instantiated if not already an
+        instance.
 
         """
-        is_class = hasattr(task, "run")
-        if is_class:
-            task = task() # instantiate Task class
-        if not name:
-            name = getattr(task, "name")
-
-        if name in self.data:
-            raise self.AlreadyRegistered(
-                    "Task with name %s is already registered." % name)
-
-        if not is_class:
-            task.name = name
-            task.type = "regular"
 
+        task = inspect.isclass(task) and task() or task
+        name = task.name
         self.data[name] = task
 
     def unregister(self, name):
         """Unregister task by name.
 
         :param name: name of the task to unregister, or a
-            :class:`celery.task.Task` class with a valid ``name`` attribute.
+            :class:`celery.task.base.Task` with a valid ``name`` attribute.
 
         :raises celery.exceptions.NotRegistered: if the task has not
             been registered.
@@ -59,14 +45,7 @@ class TaskRegistry(UserDict):
         """
         if hasattr(name, "run"):
             name = name.name
-        if name not in self.data:
-            raise self.NotRegistered(
-                    "Task with name %s is not registered." % name)
-        del self.data[name]
-
-    def get_all(self):
-        """Get all task types."""
-        return self.data
+        self.pop(name)
 
     def filter_types(self, type):
         """Return all tasks of a specific type."""
@@ -74,17 +53,18 @@ class TaskRegistry(UserDict):
                         for task_name, task in self.data.items()
                             if task.type == type)
 
-    def get_all_regular(self):
-        """Get all regular task types."""
-        return self.filter_types(type="regular")
+    def __getitem__(self, key):
+        try:
+            return UserDict.__getitem__(self, key)
+        except KeyError, exc:
+            raise self.NotRegistered(exc)
 
-    def get_all_periodic(self):
-        """Get all periodic task types."""
-        return self.filter_types(type="periodic")
+    def pop(self, key, *args):
+        try:
+            return UserDict.pop(self, key, *args)
+        except KeyError, exc:
+            raise self.NotRegistered(exc)
 
-    def get_task(self, name):
-        """Get task by name."""
-        return self.data[name]
 
 """
 .. data:: tasks

+ 42 - 30
celery/result.py

@@ -3,22 +3,22 @@
 Asynchronous result types.
 
 """
+import time
+from itertools import imap
+
+from celery.utils import any, all
 from celery.backends import default_backend
-from celery.datastructures import PositionQueue
+from celery.messaging import with_connection
 from celery.exceptions import TimeoutError
-from itertools import imap
-import time
+from celery.datastructures import PositionQueue
 
 
 class BaseAsyncResult(object):
-    """Base class for pending result, supports custom
-    task meta :attr:`backend`
+    """Base class for pending result, supports custom task result backend.
 
     :param task_id: see :attr:`task_id`.
-
     :param backend: see :attr:`backend`.
 
-
     .. attribute:: task_id
 
         The unique identifier for this task.
@@ -35,22 +35,24 @@ class BaseAsyncResult(object):
         self.task_id = task_id
         self.backend = backend
 
-    def is_done(self):
-        """Returns ``True`` if the task executed successfully.
+    @with_connection
+    def revoke(self, connection=None, connect_timeout=None):
+        """Send revoke signal to all workers.
 
-        :rtype: bool
+        The workers will ignore the task if received.
 
         """
-        return self.backend.is_done(self.task_id)
+        from celery.task import control
+        control.revoke(self.task_id)
 
-    def get(self):
+    def get(self, timeout=None):
         """Alias to :meth:`wait`."""
-        return self.wait()
+        return self.wait(timeout=timeout)
 
     def wait(self, timeout=None):
         """Wait for task, and return the result when it arrives.
 
-        :keyword timeout: How long to wait in seconds, before the
+        :keyword timeout: How long to wait, in seconds, before the
             operation times out.
 
         :raises celery.exceptions.TimeoutError: if ``timeout`` is not ``None``
@@ -71,11 +73,15 @@ class BaseAsyncResult(object):
 
         """
         status = self.backend.get_status(self.task_id)
-        return status not in ["PENDING", "RETRY"]
+        return status not in self.backend.UNREADY_STATES
 
     def successful(self):
-        """Alias to :meth:`is_done`."""
-        return self.is_done()
+        """Returns ``True`` if the task executed successfully.
+
+        :rtype: bool
+
+        """
+        return self.backend.is_successful(self.task_id)
 
     def __str__(self):
         """``str(self)`` -> ``self.task_id``"""
@@ -91,9 +97,7 @@ class BaseAsyncResult(object):
         If the task raised an exception, this will be the exception instance.
 
         """
-        if self.status == "DONE" or self.status == "FAILURE":
-            return self.backend.get_result(self.task_id)
-        return None
+        return self.backend.get_result(self.task_id)
 
     @property
     def traceback(self):
@@ -120,7 +124,7 @@ class BaseAsyncResult(object):
                 than its limit. The :attr:`result` attribute contains the
                 exception raised.
 
-            *DONE*
+            *SUCCESS*
 
                 The task executed successfully. The :attr:`result` attribute
                 contains the resulting value.
@@ -232,6 +236,11 @@ class TaskSetResult(object):
         return sum(imap(int, (subtask.successful()
                                 for subtask in self.itersubtasks())))
 
+    @with_connection
+    def revoke(self, connection=None, connect_timeout=None):
+        for subtask in self.subtasks:
+            subtask.revoke(connection=connection)
+
     def __iter__(self):
         """``iter(res)`` -> ``res.iterate()``."""
         return self.iterate()
@@ -243,12 +252,12 @@ class TaskSetResult(object):
         :raises: The exception if any of the tasks raised an exception.
 
         """
-        results = dict((subtask.task_id, AsyncResult(subtask.task_id))
+        results = dict((subtask.task_id, subtask.__class__(subtask.task_id))
                             for subtask in self.subtasks)
         while results:
             for task_id, pending_result in results.items():
-                if pending_result.status == "DONE":
-                    del(results[task_id])
+                if pending_result.status == "SUCCESS":
+                    results.pop(task_id, None)
                     yield pending_result.result
                 elif pending_result.status == "FAILURE":
                     raise pending_result.result
@@ -280,7 +289,7 @@ class TaskSetResult(object):
 
         while True:
             for position, pending_result in enumerate(self.subtasks):
-                if pending_result.status == "DONE":
+                if pending_result.status == "SUCCESS":
                     results[position] = pending_result.result
                 elif pending_result.status == "FAILURE":
                     raise pending_result.result
@@ -309,20 +318,23 @@ class EagerResult(BaseAsyncResult):
         self._status = status
         self._traceback = traceback
 
-    def is_done(self):
+    def successful(self):
         """Returns ``True`` if the task executed without failure."""
-        return self.status == "DONE"
+        return self.status == "SUCCESS"
 
-    def is_ready(self):
+    def ready(self):
         """Returns ``True`` if the task has been executed."""
         return True
 
     def wait(self, timeout=None):
         """Wait until the task has been executed and return its result."""
-        if self.status == "DONE":
+        if self.status == "SUCCESS":
             return self.result
         elif self.status == "FAILURE":
-            raise self.result
+            raise self.result.exception
+
+    def revoke(self):
+        pass
 
     @property
     def result(self):

+ 0 - 116
celery/serialization.py

@@ -1,116 +0,0 @@
-from celery.utils.functional import curry
-import operator
-try:
-    import cPickle as pickle
-except ImportError:
-    import pickle
-
-
-def find_nearest_pickleable_exception(exc):
-    """With an exception instance, iterate over its super classes (by mro)
-    and find the first super exception that is pickleable. It does
-    not go below :exc:`Exception` (i.e. it skips :exc:`Exception`,
-    :class:`BaseException` and :class:`object`). If that happens
-    you should use :exc:`UnpickleableException` instead.
-
-    :param exc: An exception instance.
-
-    :returns: the nearest exception if it's not :exc:`Exception` or below,
-        if it is it returns ``None``.
-
-    :rtype: :exc:`Exception`
-
-    """
-
-    unwanted = (Exception, BaseException, object)
-    is_unwanted = lambda exc: any(map(curry(operator.is_, exc), unwanted))
-
-    mro_ = getattr(exc.__class__, "mro", lambda: [])
-    for supercls in mro_():
-        if is_unwanted(supercls):
-            # only BaseException and object, from here on down,
-            # we don't care about these.
-            return None
-        try:
-            exc_args = getattr(exc, "args", [])
-            superexc = supercls(*exc_args)
-            pickle.dumps(superexc)
-        except:
-            pass
-        else:
-            return superexc
-    return None
-
-
-def create_exception_cls(name, module, parent=None):
-    """Dynamically create an exception class."""
-    if not parent:
-        parent = Exception
-    return type(name, (parent, ), {"__module__": module})
-
-
-class UnpickleableExceptionWrapper(Exception):
-    """Wraps unpickleable exceptions.
-
-    :param exc_module: see :attr:`exc_module`.
-
-    :param exc_cls_name: see :attr:`exc_cls_name`.
-
-    :param exc_args: see :attr:`exc_args`
-
-    .. attribute:: exc_module
-
-        The module of the original exception.
-
-    .. attribute:: exc_cls_name
-
-        The name of the original exception class.
-
-    .. attribute:: exc_args
-
-        The arguments for the original exception.
-
-    Example
-
-        >>> try:
-        ...     something_raising_unpickleable_exc()
-        >>> except Exception, e:
-        ...     exc = UnpickleableException(e.__class__.__module__,
-        ...                                 e.__class__.__name__,
-        ...                                 e.args)
-        ...     pickle.dumps(exc) # Works fine.
-
-    """
-
-    def __init__(self, exc_module, exc_cls_name, exc_args):
-        self.exc_module = exc_module
-        self.exc_cls_name = exc_cls_name
-        self.exc_args = exc_args
-        super(Exception, self).__init__(exc_module, exc_cls_name, exc_args)
-
-
-def get_pickleable_exception(exc):
-    """Make sure exception is pickleable."""
-    nearest = find_nearest_pickleable_exception(exc)
-    if nearest:
-        return nearest
-
-    try:
-        pickle.dumps(exc)
-    except pickle.PickleError:
-        excwrapper = UnpickleableExceptionWrapper(
-                        exc.__class__.__module__,
-                        exc.__class__.__name__,
-                        getattr(exc, "args", []))
-        return excwrapper
-    return exc
-
-
-def get_pickled_exception(exc):
-    """Get original exception from exception pickled using
-    :meth:`get_pickleable_exception`."""
-    if isinstance(exc, UnpickleableExceptionWrapper):
-        exc_cls = create_exception_cls(exc.exc_cls_name,
-                                       exc.exc_module)
-        return exc_cls(*exc.exc_args)
-    return exc

+ 10 - 79
celery/signals.py

@@ -1,84 +1,15 @@
 from django.dispatch import Signal
 
-"""
+task_sent = Signal(providing_args=["task_id", "task",
+                                   "args", "kwargs",
+                                   "eta", "taskset"])
 
-.. DATA: task_sent
+task_prerun = Signal(providing_args=["task_id", "task",
+                                     "args", "kwargs"])
 
-Triggered when a task has been sent to the broker.
+task_postrun = Signal(providing_args=["task_id", "task",
+                                      "args", "kwargs", "retval"])
 
-Provides arguments:
-
-* task_id
-    Id of the task to be executed.
-
-* task
-    The task being executed.
-
-* args
-    the tasks positional arguments.
-
-* kwargs
-    The tasks keyword arguments.
-
-* eta
-    The time to execute the task.
-
-* taskset
-    Id of the taskset this task is part of (if any).
-
-
-"""
-task_sent = Signal(providing_args=[
-                        "task_id", "task", "args", "kwargs", "eta",
-                        "taskset"])
-
-"""
-.. DATA: task_prerun
-
-Triggered before a task is executed.
-
-Provides arguments:
-
-* task_id
-    Id of the task to be executed.
-
-* task
-    The task being executed.
-
-* args
-    the tasks positional arguments.
-
-* kwargs
-    The tasks keyword arguments.
-
-"""
-task_prerun = Signal(providing_args=[
-                        "task_id", "task", "args", "kwargs"])
-
-"""
-
-.. DATA: task_postrun
-
-Triggered after a task has been executed.
-
-Provides arguments:
-
-* task_id
-    Id of the task to be executed.
-
-* task
-    The task being executed.
-
-* args
-    the tasks positional arguments.
-
-* kwargs
-    The tasks keyword arguments.
-
-* retval
-
-    The return value of the task.
-
-"""
-task_postrun = Signal(providing_args=[
-                        "task_id", "task", "args", "kwargs", "retval"])
+worker_init = Signal(providing_args=[])
+worker_ready = Signal(providing_args=[])
+worker_shutdown = Signal(providing_args=[])

+ 0 - 120
celery/supervisor.py

@@ -1,120 +0,0 @@
-import multiprocessing
-import time
-from multiprocessing import TimeoutError
-
-JOIN_TIMEOUT = 2
-CHECK_INTERVAL = 2
-MAX_RESTART_FREQ = 3
-MAX_RESTART_FREQ_TIME = 10
-
-
-class MaxRestartsExceededError(Exception):
-    """Restarts exceeded the maximum restart frequency."""
-
-
-class OFASupervisor(object):
-    """Process supervisor using the `one_for_all`_ strategy.
-
-    .. _`one_for_all`:
-        http://erlang.org/doc/design_principles/sup_princ.html#5.3.2
-
-    However, instead of registering a list of processes, you have one
-    process which runs a pool. Makes for an easy implementation.
-
-    :param target: see :attr:`target`.
-    :param args: see :attr:`args`.
-    :param kwargs: see :attr:`kwargs`.
-    :param max_restart_freq: see :attr:`max_restart_freq`.
-    :param max_restart_freq_time: see :attr:`max_restart_freq_time`.
-    :param check_interval: see :attr:`max_restart_freq_time`.
-
-    .. attribute:: target
-
-        The target callable to be launched in a new process.
-
-    .. attribute:: args
-
-        The positional arguments to apply to :attr:`target`.
-
-    .. attribute:: kwargs
-
-        The keyword arguments to apply to :attr:`target`.
-
-    .. attribute:: max_restart_freq
-
-        Limit the number of restarts which can occur in a given time interval.
-
-        The max restart frequency is the number of restarts that can occur
-        within the interval :attr:`max_restart_freq_time`.
-
-        The restart mechanism prevents situations where the process repeatedly
-        dies for the same reason. If this happens both the process and the
-        supervisor is terminated.
-
-    .. attribute:: max_restart_freq_time
-
-        See :attr:`max_restart_freq`.
-
-    .. attribute:: check_interval
-
-        The time in seconds, between process pings.
-
-    """
-    Process = multiprocessing.Process
-
-    def __init__(self, target, args=None, kwargs=None,
-            max_restart_freq=MAX_RESTART_FREQ,
-            join_timeout=JOIN_TIMEOUT,
-            max_restart_freq_time=MAX_RESTART_FREQ_TIME,
-            check_interval=CHECK_INTERVAL):
-        self.target = target
-        self.join_timeout = join_timeout
-        self.args = args or []
-        self.kwargs = kwargs or {}
-        self.check_interval = check_interval
-        self.max_restart_freq = max_restart_freq
-        self.max_restart_freq_time = max_restart_freq_time
-        self.restarts_in_frame = 0
-
-    def start(self):
-        """Launches the :attr:`target` in a seperate process and starts
-        supervising it."""
-        target = self.target
-
-        def _start_supervised_process():
-            """Start the :attr:`target` in a new process."""
-            process = self.Process(target=target,
-                                   args=self.args, kwargs=self.kwargs)
-            process.start()
-            return process
-
-        def _restart(process):
-            """Terminate the process and restart."""
-            process.join(timeout=self.join_timeout)
-            process.terminate()
-            self.restarts_in_frame += 1
-            process = _start_supervised_process()
-
-        process = _start_supervised_process()
-        try:
-            restart_frame = 0
-            while True:
-                if restart_frame > self.max_restart_freq_time:
-                    if self.restarts_in_frame >= self.max_restart_freq:
-                        raise MaxRestartsExceededError(
-                                "Supervised: Max restart frequency reached")
-                restart_frame = 0
-                self.restarts_in_frame = 0
-
-                try:
-                    proc_is_alive = process.is_alive()
-                except TimeoutError:
-                    proc_is_alive = False
-
-                if not proc_is_alive:
-                    _restart(process)
-
-                time.sleep(self.check_interval)
-                restart_frame += self.check_interval
-        finally:
-            process.join()

+ 17 - 47
celery/task/__init__.py

@@ -3,48 +3,20 @@
 Working with tasks and task sets.
 
 """
-from carrot.connection import DjangoBrokerConnection
-from celery.messaging import TaskConsumer
-from celery.conf import AMQP_CONNECTION_TIMEOUT
-from celery.registry import tasks
-from celery.backends import default_backend
-from celery.task.base import Task, TaskSet, PeriodicTask
-from celery.task.base import ExecuteRemoteTask
-from celery.task.base import AsynchronousMapTask
-from celery.task.builtins import DeleteExpiredTaskMetaTask, PingTask
-from celery.execute import apply_async, delay_task
-from celery.serialization import pickle
-from celery.task.rest import RESTProxyTask
-
-
-def discard_all(connect_timeout=AMQP_CONNECTION_TIMEOUT):
-    """Discard all waiting tasks.
-
-    This will ignore all tasks waiting for execution, and they will
-    be deleted from the messaging server.
-
-    :returns: the number of tasks discarded.
-
-    :rtype: int
-
-    """
-    amqp_connection = DjangoBrokerConnection(connect_timeout=connect_timeout)
-    consumer = TaskConsumer(connection=amqp_connection)
-    discarded_count = consumer.discard_all()
-    amqp_connection.close()
-    return discarded_count
-
+from billiard.serialization import pickle
 
-def is_done(task_id):
-    """Returns ``True`` if task with ``task_id`` has been executed.
+from celery.execute import apply_async
+from celery.registry import tasks
+from celery.task.base import Task, TaskSet, PeriodicTask, ExecuteRemoteTask
+from celery.task.control import discard_all
+from celery.task.builtins import PingTask
+from celery.task.http import HttpDispatchTask
 
-    :rtype: bool
+__all__ = ["Task", "TaskSet", "PeriodicTask", "tasks", "discard_all",
+           "dmap", "dmap_async", "execute_remote", "ping", "HttpDispatchTask"]
 
-    """
-    return default_backend.is_done(task_id)
 
-
-def dmap(func, args, timeout=None):
+def dmap(fun, args, timeout=None):
     """Distribute processing of the arguments and collect the results.
 
     Example
@@ -55,10 +27,10 @@ def dmap(func, args, timeout=None):
         [4, 8, 16]
 
     """
-    return TaskSet.map(func, args, timeout=timeout)
+    return TaskSet.map(fun, args, timeout=timeout)
 
 
-def dmap_async(func, args, timeout=None):
+def dmap_async(fun, args, timeout=None):
     """Distribute processing of the arguments and collect the results
     asynchronously.
 
@@ -72,21 +44,19 @@ def dmap_async(func, args, timeout=None):
         >>> presult
         <AsyncResult: 373550e8-b9a0-4666-bc61-ace01fa4f91d>
         >>> presult.status
-        'DONE'
+        'SUCCESS'
         >>> presult.result
         [4, 8, 16]
 
     """
-    return TaskSet.map_async(func, args, timeout=timeout)
+    return TaskSet.map_async(fun, args, timeout=timeout)
 
 
-def execute_remote(func, *args, **kwargs):
+def execute_remote(fun, *args, **kwargs):
     """Execute arbitrary function/object remotely.
 
-    :param func: A callable function or object.
-
+    :param fun: A callable function or object.
     :param \*args: Positional arguments to apply to the function.
-
     :param \*\*kwargs: Keyword arguments to apply to the function.
 
     The object must be picklable, so you can't use lambdas or functions
@@ -95,7 +65,7 @@ def execute_remote(func, *args, **kwargs):
     :returns: class:`celery.result.AsyncResult`.
 
     """
-    return ExecuteRemoteTask.delay(pickle.dumps(func), args, kwargs)
+    return ExecuteRemoteTask.delay(pickle.dumps(fun), args, kwargs)
 
 
 def ping():

+ 256 - 209
celery/task/base.py

@@ -1,19 +1,62 @@
-from carrot.connection import DjangoBrokerConnection
+import sys
+import warnings
+from datetime import datetime, timedelta
+from Queue import Queue
+
+from billiard.serialization import pickle
+
 from celery import conf
-from celery.messaging import TaskPublisher, TaskConsumer
 from celery.log import setup_logger
-from celery.result import TaskSetResult, EagerResult
-from celery.execute import apply_async, delay_task, apply
-from celery.utils import gen_unique_id, get_full_cls_name
+from celery.utils import gen_unique_id, mexpand, timedelta_seconds
+from celery.result import BaseAsyncResult, TaskSetResult, EagerResult
+from celery.execute import apply_async, apply
 from celery.registry import tasks
-from celery.serialization import pickle
-from celery.exceptions import MaxRetriesExceededError, RetryTaskError
 from celery.backends import default_backend
-from datetime import timedelta
+from celery.messaging import TaskPublisher, TaskConsumer
+from celery.messaging import establish_connection as _establish_connection
+from celery.exceptions import MaxRetriesExceededError, RetryTaskError
+
+
+class TaskType(type):
+    """Metaclass for tasks.
+
+    Automatically registers the task in the task registry, except
+    if the ``abstract`` attribute is set.
+
+    If no ``name`` attribute is provided, the name is automatically
+    set to the name of the module it was defined in, and the class name.
+
+    """
+
+    def __new__(cls, name, bases, attrs):
+        super_new = super(TaskType, cls).__new__
+        task_module = attrs["__module__"]
+
+        # Abstract class, remove the abstract attribute so
+        # any class inheriting from this won't be abstract by default.
+        if attrs.pop("abstract", None) or not attrs.get("autoregister", True):
+            return super_new(cls, name, bases, attrs)
+
+        # Automatically generate missing name.
+        if not attrs.get("name"):
+            task_module = sys.modules[task_module]
+            task_name = ".".join([task_module.__name__, name])
+            attrs["name"] = task_name
+
+        # Because of the way import happens (recursively)
+        # we may or may not be the first time the task tries to register
+        # with the framework. There should only be one class for each task
+        # name, so we always return the registered version.
+
+        task_name = attrs["name"]
+        if task_name not in tasks:
+            task_cls = super_new(cls, name, bases, attrs)
+            tasks.register(task_cls)
+        return tasks[task_name].__class__
 
 
 class Task(object):
-    """A task that can be delayed for execution by the ``celery`` daemon.
+    """A celery task.
 
     All subclasses of :class:`Task` must define the :meth:`run` method,
     which is the actual method the ``celery`` daemon executes.
@@ -21,13 +64,11 @@ class Task(object):
     The :meth:`run` method can take use of the default keyword arguments,
     as listed in the :meth:`run` documentation.
 
-    The :meth:`run` method supports both positional, and keyword arguments.
-
     .. attribute:: name
+        Name of the task.
 
-        *REQUIRED* All subclasses of :class:`Task` has to define the
-        :attr:`name` attribute. This is the name of the task, registered
-        in the task registry, and passed to :func:`delay_task`.
+    .. attribute:: abstract
+        If ``True`` the task is an abstract base class.
 
     .. attribute:: type
 
@@ -45,22 +86,17 @@ class Task(object):
 
     .. attribute:: mandatory
 
-        If set, the message has mandatory routing. By default the message
-        is silently dropped by the broker if it can't be routed to a queue.
-        However - If the message is mandatory, an exception will be raised
-        instead.
+        Mandatory message routing. An exception will be raised if the task
+        can't be routed to a queue.
 
     .. attribute:: immediate:
 
-        Request immediate delivery. If the message cannot be routed to a
-        task worker immediately, an exception will be raised. This is
-        instead of the default behaviour, where the broker will accept and
-        queue the message, but with no guarantee that the message will ever
-        be consumed.
+        Request immediate delivery. An exception will be raised if the task
+        can't be routed to a worker immediately.
 
     .. attribute:: priority:
-
-        The message priority. A number from ``0`` to ``9``.
+        The message priority. A number from ``0`` to ``9``, where ``0`` is the
+        highest. Note that RabbitMQ doesn't support priorities yet.
 
     .. attribute:: max_retries
 
@@ -68,16 +104,24 @@ class Task(object):
 
     .. attribute:: default_retry_delay
 
-        Defeault time in seconds before a retry of the task should be
+        Default time in seconds before a retry of the task should be
         executed. Default is a 1 minute delay.
 
+    .. attribute:: rate_limit
+
+        Set the rate limit for this task type, Examples: ``None`` (no rate
+        limit), ``"100/s"`` (hundred tasks a second), ``"100/m"`` (hundred
+        tasks a minute), ``"100/h"`` (hundred tasks an hour)
+
+    .. attribute:: rate_limit_queue_type
+
+        Type of queue used by the rate limiter for this kind of tasks.
+        Default is a :class:`Queue.Queue`, but you can change this to
+        a :class:`Queue.LifoQueue` or an invention of your own.
+
     .. attribute:: ignore_result
 
-        Don't store the status and return value. This means you can't
-        use the :class:`celery.result.AsyncResult` to check if the task is
-        done, or get its return value. Only use if you need the performance
-        and is able live without these features. Any exceptions raised will
-        store the return value/status as usual.
+        Don't store the return value of this task.
 
     .. attribute:: disable_error_emails
 
@@ -86,66 +130,44 @@ class Task(object):
 
     .. attribute:: serializer
 
-        A string identifying the default serialization
-        method to use. Defaults to the ``CELERY_TASK_SERIALIZER`` setting.
-        Can be ``pickle`` ``json``, ``yaml``, or any custom serialization
-        methods that have been registered with
-        :mod:`carrot.serialization.registry`.
-
-    :raises NotImplementedError: if the :attr:`name` attribute is not set.
-
-    The resulting class is callable, which if called will apply the
-    :meth:`run` method.
-
-    Examples
-
-    This is a simple task just logging a message,
+        The name of a serializer that has been registered with
+        :mod:`carrot.serialization.registry`. Example: ``"json"``.
 
-        >>> from celery.task import tasks, Task
-        >>> class MyTask(Task):
-        ...
-        ...     def run(self, some_arg=None, **kwargs):
-        ...         logger = self.get_logger(**kwargs)
-        ...         logger.info("Running MyTask with arg some_arg=%s" %
-        ...                     some_arg))
-        ...         return 42
-        ... tasks.register(MyTask)
+    .. attribute:: backend
 
-    You can delay the task using the classmethod :meth:`delay`...
+        The result store backend used for this task.
 
-        >>> result = MyTask.delay(some_arg="foo")
-        >>> result.status # after some time
-        'DONE'
-        >>> result.result
-        42
+    .. attribute:: autoregister
+        If ``True`` the task is automatically registered in the task
+        registry, which is the default behaviour.
 
-    ...or using the :func:`delay_task` function, by passing the name of
-    the task.
-
-        >>> from celery.task import delay_task
-        >>> result = delay_task(MyTask.name, some_arg="foo")
 
+    The resulting class is callable, which if called will apply the
+    :meth:`run` method.
 
     """
+    __metaclass__ = TaskType
+
     name = None
+    abstract = True
+    autoregister = True
     type = "regular"
     exchange = None
     routing_key = None
     immediate = False
     mandatory = False
     priority = None
-    ignore_result = False
+    ignore_result = conf.IGNORE_RESULT
     disable_error_emails = False
     max_retries = 3
     default_retry_delay = 3 * 60
     serializer = conf.TASK_SERIALIZER
+    rate_limit = conf.DEFAULT_RATE_LIMIT
+    rate_limit_queue_type = Queue
+    backend = default_backend
 
     MaxRetriesExceededError = MaxRetriesExceededError
 
-    def __init__(self):
-        if not self.__class__.name:
-            self.__class__.name = get_full_cls_name(self.__class__)
-
     def __call__(self, *args, **kwargs):
         return self.run(*args, **kwargs)
 
@@ -156,67 +178,37 @@ class Task(object):
         by the worker if the function/method supports them:
 
             * task_id
-
-                Unique id of the currently executing task.
-
             * task_name
-
-                Name of the currently executing task (same as :attr:`name`)
-
             * task_retries
-
-                How many times the current task has been retried
-                (an integer starting at ``0``).
-
             * logfile
-
-                Name of the worker log file.
-
             * loglevel
 
-                The current loglevel, an integer mapping to one of the
-                following values: ``logging.DEBUG``, ``logging.INFO``,
-                ``logging.ERROR``, ``logging.CRITICAL``, ``logging.WARNING``,
-                ``logging.FATAL``.
-
         Additional standard keyword arguments may be added in the future.
         To take these default arguments, the task can either list the ones
         it wants explicitly or just take an arbitrary list of keyword
         arguments (\*\*kwargs).
 
-        Example using an explicit list of default arguments to take:
-
-        .. code-block:: python
-
-            def run(self, x, y, logfile=None, loglevel=None):
-                self.get_logger(loglevel=loglevel, logfile=logfile)
-                return x * y
-
-
-        Example taking all default keyword arguments, and any extra arguments
-        passed on by the caller:
-
-        .. code-block:: python
-
-            def run(self, x, y, **kwargs): # CORRECT!
-                logger = self.get_logger(**kwargs)
-                adjust = kwargs.get("adjust", 0)
-                return x * y - adjust
-
         """
-        raise NotImplementedError("Tasks must define a run method.")
+        raise NotImplementedError("Tasks must define the run method.")
 
-    def get_logger(self, **kwargs):
+    @classmethod
+    def get_logger(self, loglevel=None, logfile=None, **kwargs):
         """Get process-aware logger object.
 
         See :func:`celery.log.setup_logger`.
 
         """
-        logfile = kwargs.get("logfile")
-        loglevel = kwargs.get("loglevel")
         return setup_logger(loglevel=loglevel, logfile=logfile)
 
-    def get_publisher(self, connect_timeout=conf.AMQP_CONNECTION_TIMEOUT):
+    @classmethod
+    def establish_connection(self,
+            connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
+        """Establish a connection to the message broker."""
+        return _establish_connection(connect_timeout)
+
+    @classmethod
+    def get_publisher(self, connection=None, exchange=None,
+            connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
         """Get a celery task message publisher.
 
         :rtype: :class:`celery.messaging.TaskPublisher`.
@@ -229,13 +221,16 @@ class Task(object):
             >>> publisher.connection.close()
 
         """
-
-        connection = DjangoBrokerConnection(connect_timeout=connect_timeout)
+        if exchange is None:
+            exchange = self.exchange
+        connection = connection or self.establish_connection(connect_timeout)
         return TaskPublisher(connection=connection,
-                             exchange=self.exchange,
+                             exchange=exchange,
                              routing_key=self.routing_key)
 
-    def get_consumer(self, connect_timeout=conf.AMQP_CONNECTION_TIMEOUT):
+    @classmethod
+    def get_consumer(self, connection=None,
+            connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
         """Get a celery task message consumer.
 
         :rtype: :class:`celery.messaging.TaskConsumer`.
@@ -248,27 +243,25 @@ class Task(object):
             >>> consumer.connection.close()
 
         """
-        connection = DjangoBrokerConnection(connect_timeout=connect_timeout)
+        connection = connection or self.establish_connection(connect_timeout)
         return TaskConsumer(connection=connection, exchange=self.exchange,
                             routing_key=self.routing_key)
 
     @classmethod
-    def delay(cls, *args, **kwargs):
-        """Delay this task for execution by the ``celery`` daemon(s).
+    def delay(self, *args, **kwargs):
+        """Shortcut to :meth:`apply_async`, with star arguments,
+        but doesn't support the extra options.
 
         :param \*args: positional arguments passed on to the task.
-
         :param \*\*kwargs: keyword arguments passed on to the task.
 
-        :rtype: :class:`celery.result.AsyncResult`
-
-        See :func:`celery.execute.delay_task`.
+        :returns: :class:`celery.result.AsyncResult`
 
         """
-        return apply_async(cls, args, kwargs)
+        return self.apply_async(args, kwargs)
 
     @classmethod
-    def apply_async(cls, args=None, kwargs=None, **options):
+    def apply_async(self, args=None, kwargs=None, **options):
         """Delay this task for execution by the ``celery`` daemon(s).
 
         :param args: positional arguments passed on to the task.
@@ -282,8 +275,9 @@ class Task(object):
 
 
         """
-        return apply_async(cls, args, kwargs, **options)
+        return apply_async(self, args, kwargs, **options)
 
+    @classmethod
     def retry(self, args, kwargs, exc=None, throw=True, **options):
         """Retry the task.
 
@@ -292,12 +286,12 @@ class Task(object):
         :keyword exc: Optional exception to raise instead of
             :exc:`MaxRestartsExceededError` when the max restart limit has
             been exceeded.
-        :keyword throw: Do not raise the
-            :exc:`celery.exceptions.RetryTaskError` exception,
-            that tells the worker that the task is to be retried.
         :keyword countdown: Time in seconds to delay the retry for.
         :keyword eta: Explicit time and date to run the retry at (must be a
             :class:`datetime.datetime` instance).
+        :keyword throw: If this is ``False``, do not raise the
+            :exc:`celery.exceptions.RetryTaskError` exception,
+            that tells the worker that the task is to be retried.
         :keyword \*\*options: Any extra options to pass on to
             meth:`apply_async`. See :func:`celery.execute.apply_async`.
 
@@ -335,8 +329,7 @@ class Task(object):
         if kwargs.get("task_is_eager", False):
             result = self.apply(args=args, kwargs=kwargs, **options)
             if isinstance(result, EagerResult):
-                # get() propogates any exceptions.
-                return result.get()
+                return result.get() # propogates exceptions.
             return result
 
         self.apply_async(args=args, kwargs=kwargs, **options)
@@ -345,6 +338,24 @@ class Task(object):
             message = "Retry in %d seconds." % options["countdown"]
             raise RetryTaskError(message, exc)
 
+    @classmethod
+    def apply(self, args=None, kwargs=None, **options):
+        """Execute this task at once, by blocking until the task
+        has finished executing.
+
+        :param args: positional arguments passed on to the task.
+        :param kwargs: keyword arguments passed on to the task.
+        :rtype: :class:`celery.result.EagerResult`
+
+        See :func:`celery.execute.apply`.
+
+        """
+        return apply(self, args, kwargs, **options)
+
+    @classmethod
+    def AsyncResult(self, task_id):
+        return BaseAsyncResult(task_id, backend=self.backend)
+
     def on_retry(self, exc, task_id, args, kwargs):
         """Retry handler.
 
@@ -378,7 +389,7 @@ class Task(object):
     def on_success(self, retval, task_id, args, kwargs):
         """Success handler.
 
-        This is run by the worker when the task executed successfully.
+        Run by the worker if the task executes successfully.
 
         :param retval: The return value of the task.
         :param task_id: Unique id of the executed task.
@@ -390,22 +401,6 @@ class Task(object):
         """
         pass
 
-    @classmethod
-    def apply(cls, args=None, kwargs=None, **options):
-        """Execute this task at once, by blocking until the task
-        has finished executing.
-
-        :param args: positional arguments passed on to the task.
-
-        :param kwargs: keyword arguments passed on to the task.
-
-        :rtype: :class:`celery.result.EagerResult`
-
-        See :func:`celery.execute.apply`.
-
-        """
-        return apply(cls, args, kwargs, **options)
-
 
 class ExecuteRemoteTask(Task):
     """Execute an arbitrary function or object.
@@ -422,15 +417,11 @@ class ExecuteRemoteTask(Task):
     def run(self, ser_callable, fargs, fkwargs, **kwargs):
         """
         :param ser_callable: A pickled function or callable object.
-
         :param fargs: Positional arguments to apply to the function.
-
         :param fkwargs: Keyword arguments to apply to the function.
 
         """
-        callable_ = pickle.loads(ser_callable)
-        return callable_(*fargs, **fkwargs)
-tasks.register(ExecuteRemoteTask)
+        return pickle.loads(ser_callable)(*fargs, **fkwargs)
 
 
 class AsynchronousMapTask(Task):
@@ -438,11 +429,9 @@ class AsynchronousMapTask(Task):
     :meth:`TaskSet.map_async`.  """
     name = "celery.map_async"
 
-    def run(self, serfunc, args, **kwargs):
-        """The method run by ``celeryd``."""
-        timeout = kwargs.get("timeout")
-        return TaskSet.map(pickle.loads(serfunc), args, timeout=timeout)
-tasks.register(AsynchronousMapTask)
+    def run(self, ser_callable, args, timeout=None, **kwargs):
+        """:see :meth:`TaskSet.dmap_async`."""
+        return TaskSet.map(pickle.loads(ser_callable), args, timeout=timeout)
 
 
 class TaskSet(object):
@@ -472,13 +461,13 @@ class TaskSet(object):
 
         >>> from djangofeeds.tasks import RefreshFeedTask
         >>> taskset = TaskSet(RefreshFeedTask, args=[
-        ...                 [], {"feed_url": "http://cnn.com/rss"},
-        ...                 [], {"feed_url": "http://bbc.com/rss"},
-        ...                 [], {"feed_url": "http://xkcd.com/rss"}])
-
-        >>> taskset_result = taskset.run()
-        >>> list_of_return_values = taskset.join()
+        ...                 ([], {"feed_url": "http://cnn.com/rss"}),
+        ...                 ([], {"feed_url": "http://bbc.com/rss"}),
+        ...                 ([], {"feed_url": "http://xkcd.com/rss"})
+        ... ])
 
+        >>> taskset_result = taskset.apply_async()
+        >>> list_of_return_values = taskset_result.join()
 
     """
 
@@ -490,22 +479,32 @@ class TaskSet(object):
             task_name = task
             task_obj = tasks[task_name]
 
+        # Get task instance
+        task_obj = tasks[task_obj.name]
+
         self.task = task_obj
         self.task_name = task_name
         self.arguments = args
         self.total = len(args)
 
-    def run(self, connect_timeout=conf.AMQP_CONNECTION_TIMEOUT):
+    def run(self, *args, **kwargs):
+        """Deprecated alias to :meth:`apply_async`"""
+        warnings.warn(PendingDeprecationWarning(
+            "TaskSet.run will be deprecated in favor of TaskSet.apply_async "
+            "in celery v1.2.0"))
+        return self.apply_async(*args, **kwargs)
+
+    def apply_async(self, connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
         """Run all tasks in the taskset.
 
         :returns: A :class:`celery.result.TaskSetResult` instance.
 
         Example
 
-            >>> ts = TaskSet(RefreshFeedTask, [
-            ...         ["http://foo.com/rss", {}],
-            ...         ["http://bar.com/rss", {}],
-            ... )
+            >>> ts = TaskSet(RefreshFeedTask, args=[
+            ...         (["http://foo.com/rss"], {}),
+            ...         (["http://bar.com/rss"], {}),
+            ... ])
             >>> result = ts.run()
             >>> result.taskset_id
             "d2c9b261-8eff-4bfb-8459-1e1b72063514"
@@ -525,44 +524,33 @@ class TaskSet(object):
             [True, True]
 
         """
-        taskset_id = gen_unique_id()
+        if conf.ALWAYS_EAGER:
+            return self.apply()
 
-        from celery.conf import ALWAYS_EAGER
-        if ALWAYS_EAGER:
-            subtasks = [apply(self.task, args, kwargs)
-                            for args, kwargs in self.arguments]
-            return TaskSetResult(taskset_id, subtasks)
-
-        conn = DjangoBrokerConnection(connect_timeout=connect_timeout)
-        publisher = TaskPublisher(connection=conn,
-                                  exchange=self.task.exchange)
-        subtasks = [apply_async(self.task, args, kwargs,
-                                taskset_id=taskset_id, publisher=publisher)
-                        for args, kwargs in self.arguments]
-        publisher.close()
-        conn.close()
+        taskset_id = gen_unique_id()
+        conn = self.task.establish_connection(connect_timeout=connect_timeout)
+        publisher = self.task.get_publisher(connection=conn)
+        try:
+            subtasks = [self.apply_part(arglist, taskset_id, publisher)
+                            for arglist in self.arguments]
+        finally:
+            publisher.close()
+            conn.close()
         result = TaskSetResult(taskset_id, subtasks)
-        default_backend.store_taskset(taskset_id, result)
-        return result
-
-    def join(self, timeout=None):
-        """Gather the results for all of the tasks in the taskset,
-        and return a list with them ordered by the order of which they
-        were called.
-
-        :keyword timeout: The time in seconds, how long
-            it will wait for results, before the operation times out.
-
-        :raises TimeoutError: if ``timeout`` is not ``None``
-            and the operation takes longer than ``timeout`` seconds.
+        self.task.backend.store_taskset(taskset_id, result)
 
-        If any of the tasks raises an exception, the exception
-        will be reraised by :meth:`join`.
+        return result
 
-        :returns: list of return values for all tasks in the taskset.
+    def apply_part(self, arglist, taskset_id, publisher):
+        args, kwargs, opts = mexpand(arglist, 3, default={})
+        return apply_async(self.task, args, kwargs,
+                           taskset_id=taskset_id, publisher=publisher, **opts)
 
-        """
-        return self.run().join(timeout=timeout)
+    def apply(self):
+        taskset_id = gen_unique_id()
+        subtasks = [apply(self.task, args, kwargs)
+                        for args, kwargs in self.arguments]
+        return TaskSetResult(taskset_id, subtasks)
 
     @classmethod
     def remote_execute(cls, func, args):
@@ -576,7 +564,7 @@ class TaskSet(object):
     def map(cls, func, args, timeout=None):
         """Distribute processing of the arguments and collect the results."""
         remote_task = cls.remote_execute(func, args)
-        return remote_task.join(timeout=timeout)
+        return remote_task.run().join(timeout=timeout)
 
     @classmethod
     def map_async(cls, func, args, timeout=None):
@@ -593,36 +581,42 @@ class TaskSet(object):
 class PeriodicTask(Task):
     """A periodic task is a task that behaves like a :manpage:`cron` job.
 
+    Results of periodic tasks are not stored by default.
+
     .. attribute:: run_every
 
         *REQUIRED* Defines how often the task is run (its interval),
         it can be either a :class:`datetime.timedelta` object or an
         integer specifying the time in seconds.
 
+    .. attribute:: relative
+
+        If set to ``True``, run times are relative to the time when the
+        server was started. This was the previous behaviour, periodic tasks
+        are now scheduled by the clock.
+
     :raises NotImplementedError: if the :attr:`run_every` attribute is
         not defined.
 
-    You have to register the periodic task in the task registry.
-
     Example
 
         >>> from celery.task import tasks, PeriodicTask
         >>> from datetime import timedelta
         >>> class MyPeriodicTask(PeriodicTask):
-        ...     name = "my_periodic_task"
         ...     run_every = timedelta(seconds=30)
         ...
         ...     def run(self, **kwargs):
         ...         logger = self.get_logger(**kwargs)
         ...         logger.info("Running MyPeriodicTask")
-        >>> tasks.register(MyPeriodicTask)
 
     """
-    run_every = timedelta(days=1)
+    abstract = True
+    ignore_result = True
     type = "periodic"
+    relative = False
 
     def __init__(self):
-        if not self.run_every:
+        if not hasattr(self, "run_every"):
             raise NotImplementedError(
                     "Periodic tasks must have a run_every attribute")
 
@@ -633,3 +627,56 @@ class PeriodicTask(Task):
             self.__class__.run_every = timedelta(seconds=self.run_every)
 
         super(PeriodicTask, self).__init__()
+
+    def remaining_estimate(self, last_run_at):
+        """Returns when the periodic task should run next as a timedelta."""
+        next_run_at = last_run_at + self.run_every
+        if not self.relative:
+            next_run_at = self.delta_resolution(next_run_at, self.run_every)
+        return next_run_at - datetime.now()
+
+    def timedelta_seconds(self, delta):
+        """Convert :class:`datetime.timedelta` to seconds.
+
+        Doesn't account for negative timedeltas.
+
+        """
+        return timedelta_seconds(delta)
+
+    def is_due(self, last_run_at):
+        """Returns tuple of two items ``(is_due, next_time_to_run)``,
+        where next time to run is in seconds.
+
+        e.g.
+
+        * ``(True, 20)``, means the task should be run now, and the next
+            time to run is in 20 seconds.
+
+        * ``(False, 12)``, means the task should be run in 12 seconds.
+
+        You can override this to decide the interval at runtime,
+        but keep in mind the value of ``CELERYBEAT_MAX_LOOP_INTERVAL``, which
+        decides the maximum number of seconds celerybeat can sleep between
+        re-checking the periodic task intervals. So if you dynamically change
+        the next run at value, and the max interval is set to 5 minutes, it
+        will take 5 minutes for the change to take effect, so you may
+        consider lowering the value of ``CELERYBEAT_MAX_LOOP_INTERVAL`` if
+        responsiveness if of importance to you.
+
+        """
+        rem_delta = self.remaining_estimate(last_run_at)
+        rem = self.timedelta_seconds(rem_delta)
+        if rem == 0:
+            return True, self.timedelta_seconds(self.run_every)
+        return False, rem
+
+    def delta_resolution(self, dt, delta):
+        resolution = {3: lambda x: x / 86400,
+                      4: lambda x: x / 3600,
+                      5: lambda x: x / 60}
+        args = dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second
+        r = None
+        for res, calc in resolution.items():
+            if calc(self.timedelta_seconds(delta)):
+                r = res
+        return datetime(*args[:r])

+ 4 - 7
celery/task/builtins.py

@@ -1,8 +1,7 @@
-from celery.task.base import Task, TaskSet, PeriodicTask
-from celery.registry import tasks
-from celery.backends import default_backend
 from datetime import timedelta
-from celery.serialization import pickle
+
+from celery.task.base import Task, PeriodicTask
+from celery.backends import default_backend
 
 
 class DeleteExpiredTaskMetaTask(PeriodicTask):
@@ -16,11 +15,10 @@ class DeleteExpiredTaskMetaTask(PeriodicTask):
     run_every = timedelta(days=1)
 
     def run(self, **kwargs):
-        """The method run by ``celeryd``."""
+        """:returns: None"""
         logger = self.get_logger(**kwargs)
         logger.info("Deleting expired task meta objects...")
         default_backend.cleanup()
-tasks.register(DeleteExpiredTaskMetaTask)
 
 
 class PingTask(Task):
@@ -30,4 +28,3 @@ class PingTask(Task):
     def run(self, **kwargs):
         """:returns: the string ``"pong"``."""
         return "pong"
-tasks.register(PingTask)

+ 85 - 0
celery/task/control.py

@@ -0,0 +1,85 @@
+from celery import conf
+from celery.messaging import TaskConsumer, BroadcastPublisher, with_connection
+
+
+@with_connection
+def discard_all(connection=None,
+        connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
+    """Discard all waiting tasks.
+
+    This will ignore all tasks waiting for execution, and they will
+    be deleted from the messaging server.
+
+    :returns: the number of tasks discarded.
+
+    """
+    consumer = TaskConsumer(connection=connection)
+    try:
+        return consumer.discard_all()
+    finally:
+        consumer.close()
+
+
+def revoke(task_id, destination=None, connection=None,
+        connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
+    """Revoke a task by id.
+
+    If a task is revoked, the workers will ignore the task and not execute
+    it after all.
+
+    :param task_id: Id of the task to revoke.
+    :keyword destination: If set, a list of the hosts to send the command to,
+        when empty broadcast to all workers.
+    :keyword connection: Custom broker connection to use, if not set,
+        a connection will be established automatically.
+    :keyword connect_timeout: Timeout for new connection if a custom
+        connection is not provided.
+
+    """
+    return broadcast("revoke", destination=destination,
+                               arguments={"task_id": task_id})
+
+
+def rate_limit(task_name, rate_limit, destination=None, connection=None,
+        connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
+    """Set rate limit for task by type.
+
+    :param task_name: Type of task to change rate limit for.
+    :param rate_limit: The rate limit as tasks per second, or a rate limit
+      string (``"100/m"``, etc. see :attr:`celery.task.base.Task.rate_limit`
+      for more information).
+    :keyword destination: If set, a list of the hosts to send the command to,
+        when empty broadcast to all workers.
+    :keyword connection: Custom broker connection to use, if not set,
+        a connection will be established automatically.
+    :keyword connect_timeout: Timeout for new connection if a custom
+        connection is not provided.
+
+    """
+    return broadcast("rate_limit", destination=destination,
+                                   arguments={"task_name": task_name,
+                                              "rate_limit": rate_limit})
+
+
+@with_connection
+def broadcast(command, arguments=None, destination=None, connection=None,
+        connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
+    """Broadcast a control command to the celery workers.
+
+    :param command: Name of command to send.
+    :param arguments: Keyword arguments for the command.
+    :keyword destination: If set, a list of the hosts to send the command to,
+        when empty broadcast to all workers.
+    :keyword connection: Custom broker connection to use, if not set,
+        a connection will be established automatically.
+    :keyword connect_timeout: Timeout for new connection if a custom
+        connection is not provided.
+
+    """
+    arguments = arguments or {}
+
+    broadcast = BroadcastPublisher(connection)
+    try:
+        broadcast.send(command, arguments, destination=destination)
+    finally:
+        broadcast.close()

+ 212 - 0
celery/task/http.py

@@ -0,0 +1,212 @@
+import urllib2
+import warnings
+try:
+    from urlparse import parse_qsl
+except ImportError:
+    from cgi import parse_qsl
+from urllib import urlencode
+from urlparse import urlparse
+
+from anyjson import serialize, deserialize
+from billiard.utils.functional import wraps
+
+from celery import __version__ as celery_version
+from celery.task.base import Task as BaseTask
+
+
+class InvalidResponseError(Exception):
+    """The remote server gave an invalid response."""
+
+
+class RemoteExecuteError(Exception):
+    """The remote task gave a custom error."""
+
+
+class UnknownStatusError(InvalidResponseError):
+    """The remote server gave an unknown status."""
+
+
+def maybe_utf8(value):
+    """Encode utf-8 value, only if the value is actually utf-8."""
+    if isinstance(value, unicode):
+        return value.encode("utf-8")
+    return value
+
+
+def utf8dict(self, tup):
+    """With a dict's items() tuple return a new dict with any utf-8
+    keys/values encoded."""
+    return dict((key.encode("utf-8"), maybe_utf8(value))
+                    for key, value in tup)
+
+
+class MutableURL(object):
+    """Object wrapping a Uniform Resource Locator.
+
+    Supports editing the query parameter list.
+    You can convert the object back to a string, the query will be
+    properly urlencoded.
+
+    Examples
+
+        >>> url = URL("http://www.google.com:6580/foo/bar?x=3&y=4#foo")
+        >>> url.query
+        {'x': '3', 'y': '4'}
+        >>> str(url)
+        'http://www.google.com:6580/foo/bar?y=4&x=3#foo'
+        >>> url.query["x"] = 10
+        >>> url.query.update({"George": "Constanza"})
+        >>> str(url)
+        'http://www.google.com:6580/foo/bar?y=4&x=10&George=Constanza#foo'
+
+    """
+    def __init__(self, url):
+        self.url = urlparse(url)
+        self._query = dict(parse_qsl(self.url.query))
+
+
+    def __str__(self):
+        u = self.url
+        query = urlencode(utf8dict(self.query.items()))
+        components = ["%s://" % u.scheme,
+                      "%s" % u.netloc,
+                      u.path and "%s" % u.path or "/",
+                      u.params and ";%s" % u.params or None,
+                      query and "?%s" % query or None,
+                      u.fragment and "#%s" % u.fragment or None]
+        return "".join(filter(None, components))
+
+    def __repr__(self):
+        return "<%s %s>" % (self.__class__.__name__, str(self))
+
+    def _get_query(self):
+        return self._query
+
+    def _set_query(self, query):
+        self._query = query
+
+
+    query = property(_get_query, _set_query)
+
+
+class HttpDispatch(object):
+    """Make task HTTP request and collect the task result.
+
+    :param url: The URL to request.
+    :param method: HTTP method used. Currently supported methods are ``GET``
+        and``POST``.
+    :param task_kwargs: Task keyword arguments.
+    :param logger: Logger used for user/system feedback.
+
+    """
+    user_agent = "celery/%s" % celery_version
+    timeout = 5
+
+    def __init__(self, url, method, task_kwargs, logger):
+        self.url = url
+        self.method = method
+        self.task_kwargs = task_kwargs
+        self.logger = logger
+
+    def make_request(self, url, method, params):
+        """Makes an HTTP request and returns the response."""
+        request = urllib2.Request(url, params, headers=self.http_headers)
+        request.headers.update(self.http_headers)
+        response = urllib2.urlopen(request) # user catches errors.
+        return response.read()
+
+    def _dispatch_raw(self):
+        """Dispatches the callback and returns the raw response text."""
+        url = MutableURL(self.url)
+        params = None
+        if self.method == "GET":
+            url.query.update(self.task_kwargs)
+        elif self.method == "POST":
+            params = urlencode(utf8dict(self.task_kwargs.items()))
+        return self.make_request(str(url), self.method, params)
+
+    def execute(self):
+        warnings.warn(DeprecationWarning(
+            "execute() has been deprecated and is scheduled for removal in \
+            celery v1.2, please use dispatch() instead."))
+
+    def dispatch(self):
+        """Dispatch callback and return result."""
+        response = self._dispatch()
+        if not response:
+            raise InvalidResponseError("Empty response")
+        try:
+            payload = deserialize(response)
+        except ValueError, exc:
+            raise InvalidResponseError(str(exc))
+
+        status = payload["status"]
+        if status == "success":
+            return payload["retval"]
+        elif status == "failure":
+            raise RemoteExecuteError(payload.get("reason"))
+        else:
+            raise UnknownStatusError(str(status))
+
+    @property
+    def http_headers(self):
+        headers = {"Content-Type": "application/json",
+                   "User-Agent": self.user_agent}
+        return headers
+
+
+class HttpDispatchTask(BaseTask):
+    """Task dispatching to an URL.
+
+    :keyword url: The URL location of the HTTP callback task.
+    :keyword method: Method to use when dispatching the callback. Usually
+        ``GET`` or ``POST``.
+    :keyword \*\*kwargs: Keyword arguments to pass on to the HTTP callback.
+
+    .. attribute:: url
+
+        If this is set, this is used as the default URL for requests.
+        Default is to require the user of the task to supply the url as an
+        argument, as this attribute is intended for subclasses.
+
+    .. attribute:: method
+
+        If this is set, this is the default method used for requests.
+        Default is to require the user of the task to supply the method as an
+        argument, as this attribute is intended for subclasses.
+
+    """
+
+    url = None
+    method = None
+
+    def run(self, url=None, method="GET", **kwargs):
+        url = url or self.url
+        method = method or self.method
+        logger = self.get_logger(**kwargs)
+        return HttpDispatch(url, method, kwargs, logger).execute()
+
+
+class URL(MutableURL):
+    """HTTP Callback URL
+
+    Supports requesting an URL asynchronously.
+
+    :param url: URL to request.
+    :keyword dispatcher: Class used to dispatch the request.
+        By default this is :class:`HttpDispatchTask`.
+
+    """
+    dispatcher = HttpDispatchTask
+
+    def __init__(self, url, dispatcher=None):
+        super(URL, self).__init__(url)
+        self.dispatcher = dispatcher or self.dispatcher
+
+    def get_async(self, **kwargs):
+        return self.dispatcher.delay(str(self), "GET", **kwargs)
+
+    def post_async(self, **kwargs):
+        return self.dispatcher.delay(str(self), "POST", **kwargs)
+
+

+ 14 - 153
celery/task/rest.py

@@ -1,158 +1,19 @@
-from celery.task.base import Task as BaseTask
-from celery.registry import tasks
-from celery import __version__ as celery_version
-from cgi import parse_qsl
-from urllib import urlencode
-from urlparse import urlparse
-from anyjson import serialize, deserialize
-import httplib
-import urllib2
+from celery.task.http import (InvalidResponseError, RemoteExecuteError,
+                              UnknownStatusError)
+from celery.task.http import URL
+from celery.task.http import HttpDispatch as RESTProxy
+from celery.task.http import HttpDispatchTask as RESTProxyTask
 
+import warnings
+warnings.warn(DeprecationWarning(
+"""celery.task.rest has been deprecated and is scheduled for removal in
+v1.2. Please use celery.task.http instead.
 
-class InvalidResponseError(Exception):
-    """The remote server gave an invalid response."""
+The following objects has been renamed:
 
+    celery.task.rest.RESTProxy -> celery.task.http.HttpDispatch
+    celery.task.rest.RESTProxyTask -> celery.task.http.HttpDispatchTask
 
-class RemoteExecuteError(Exception):
-    """The remote task gave a custom error."""
+Other objects have the same name, just moved to the celery.task.http module.
 
-
-class UnknownStatusError(InvalidResponseError):
-    """The remote server gave an unknown status."""
-
-
-class URL(object):
-    """Object wrapping a Uniform Resource Locator.
-
-    Supports editing the query parameter list.
-    You can convert the object back to a string, the query will be
-    properly urlencoded.
-
-    Examples
-
-        >>> url = URL("http://www.google.com:6580/foo/bar?x=3&y=4#foo")
-        >>> url.query
-        {'x': '3', 'y': '4'}
-        >>> str(url)
-        'http://www.google.com:6580/foo/bar?y=4&x=3#foo'
-        >>> url.query["x"] = 10
-        >>> url.query.update({"George": "Constanza"})
-        >>> str(url)
-        'http://www.google.com:6580/foo/bar?y=4&x=10&George=Constanza#foo'
-
-    """
-
-    def __init__(self, url):
-        self.url = urlparse(url)
-        self._query = dict(parse_qsl(self.url.query))
-
-    def _utf8dict(self, tuple_):
-
-        def value_encode(val):
-            if isinstance(val, unicode):
-                return val.encode("utf-8")
-            return val
-
-        return dict((key.encode("utf-8"), value_encode(value))
-                        for key, value in tuple_)
-
-    def __str__(self):
-        u = self.url
-        query = urlencode(self._utf8dict(self.query.items()))
-        components = ["%s://" % u.scheme,
-                      "%s" % u.netloc,
-                      "%s" % u.path if u.path else "/",
-                      ";%s" % u.params if u.params else None,
-                      "?%s" % query if query else None,
-                      "#%s" % u.fragment if u.fragment else None]
-        return "".join(filter(None, components))
-
-    def __repr__(self):
-        return "<%s %s>" % (self.__class__.__name__, str(self))
-
-    def _get_query(self):
-        return self._query
-
-    def _set_query(self, query):
-        self._query = query
-
-    query = property(_get_query, _set_query)
-
-
-class RESTProxy(object):
-    user_agent = "celery/%s" % celery_version
-    timeout = 5
-
-    def __init__(self, url, task_kwargs, logger):
-        self.url = url
-        self.task_kwargs = task_kwargs
-        self.logger = logger
-
-    def _create_request(self):
-        url = URL(self.url)
-        url.query.update(self.task_kwargs)
-        req = urllib2.Request(str(url))
-        req.headers.update(self.http_headers)
-        return req
-
-    def _make_request(self):
-        request = self._create_request()
-        opener = urllib2.build_opener()
-        response = opener.open(request)
-        return response.read()
-
-    def execute(self):
-        response = self._make_request()
-        if not response:
-            raise InvalidResponseError("Empty response")
-        try:
-            payload = deserialize(response)
-        except ValueError, exc:
-            raise InvalidResponseError(str(exc))
-
-        # {"status": "success", "retval": 300}
-        # {"status": "failure": "reason": "Invalid moon alignment."}
-        status = payload["status"]
-        if status == "success":
-            return payload["retval"]
-        elif status == "failure":
-            raise RemoteExecuteError(payload["reason"])
-        else:
-            raise UnknownStatusError(str(status))
-
-    @property
-    def http_headers(self):
-        headers = {"Content-Type": "application/json",
-                   "User-Agent": self.user_agent}
-        return headers
-
-
-class RESTProxyTask(BaseTask):
-    name = "celery.task.rest.RESTProxyTask"
-    user_agent = "celery %s" % celery_version
-
-    def run(self, url, **kwargs):
-        logger = self.get_logger(**kwargs)
-        proxy = RESTProxy(url, kwargs, logger)
-        return proxy.execute()
-tasks.register(RESTProxyTask)
-
-
-def task_response(fun, *args, **kwargs):
-    import sys
-    try:
-        sys.stderr.write("executing %s\n" % fun)
-        retval = fun(*args, **kwargs)
-        sys.stderr.write("got: %s\n" % retval)
-    except Exception, exc:
-        response = {"status": "failure", "reason": str(exc)}
-    else:
-        response = {"status": "success", "retval": retval}
-
-    return serialize(response)
-
-
-class Task(BaseTask):
-
-    def __call__(self, *args, **kwargs):
-        return task_response(self.run, *args, **kwargs)
+"""))

+ 0 - 52
celery/task/strategy.py

@@ -1,52 +0,0 @@
-from carrot.connection import DjangoBrokerConnection
-from celery.utils import chunks
-
-
-def even_time_distribution(task, size, time_window, iterable, **apply_kwargs):
-    """With an iterator yielding task args, kwargs tuples, evenly distribute
-    the processing of its tasks throughout the time window available.
-
-    :param task: The kind of task (a :class:`celery.task.base.Task`.)
-    :param size: Total number of elements the iterator gives.
-    :param time_window: Total time available, in minutes.
-    :param iterable: Iterable yielding task args, kwargs tuples.
-    :param \*\*apply_kwargs: Additional keyword arguments to be passed on to
-        :func:`celery.execute.apply_async`.
-
-    Example
-
-        >>> class RefreshAllFeeds(Task):
-        ...
-        ...     def run(self, **kwargs):
-        ...         feeds = Feed.objects.all()
-        ...         total = feeds.count()
-        ...
-        ...         time_window = REFRESH_FEEDS_EVERY_INTERVAL_MINUTES
-        ...
-        ...         def iter_feed_task_args(iterable):
-        ...             for feed in iterable:
-        ...                 yield ([feed.feed_url], {}) # args, kwargs tuple
-        ...
-        ...         it = iter_feed_task_args(feeds.iterator())
-        ...
-        ...         even_time_distribution(RefreshFeedTask, total,
-        ...                                time_window, it)
-
-    """
-
-    bucketsize = size / time_window
-    buckets = chunks(iterable, int(bucketsize))
-
-    connection = DjangoBrokerConnection()
-    try:
-        for bucket_count, bucket in enumerate(buckets):
-            # Skew the countdown for items in this bucket by one.
-            seconds_eta = (60 * bucket_count if bucket_count else None)
-
-            for args, kwargs in bucket:
-                task.apply_async(args=args, kwargs=kwargs,
-                                 connection=connection,
-                                 countdown=seconds_eta,
-                                 **apply_kwargs)
-    finally:
-        connection.close()

+ 27 - 0
celery/tests/test_backends/__init__.py

@@ -0,0 +1,27 @@
+import unittest
+
+
+from celery.backends.database import DatabaseBackend
+from celery.backends.amqp import AMQPBackend
+from celery.backends.pyredis import RedisBackend
+from celery import backends
+
+
+class TestBackends(unittest.TestCase):
+
+    def test_get_backend_aliases(self):
+        self.assertTrue(issubclass(
+            backends.get_backend_cls("amqp"), AMQPBackend))
+        self.assertTrue(issubclass(
+            backends.get_backend_cls("database"), DatabaseBackend))
+        self.assertTrue(issubclass(
+            backends.get_backend_cls("db"), DatabaseBackend))
+        self.assertTrue(issubclass(
+            backends.get_backend_cls("redis"), RedisBackend))
+
+    def test_get_backend_cahe(self):
+        backends._backend_cache = {}
+        backends.get_backend_cls("amqp")
+        self.assertTrue("amqp" in backends._backend_cache)
+        amqp_backend = backends.get_backend_cls("amqp")
+        self.assertTrue(amqp_backend is backends._backend_cache["amqp"])

+ 63 - 0
celery/tests/test_backends/test_amqp.py

@@ -0,0 +1,63 @@
+from __future__ import with_statement
+
+import sys
+import unittest
+import errno
+
+from django.core.exceptions import ImproperlyConfigured
+
+from celery.backends.amqp import AMQPBackend
+from celery.utils import gen_unique_id
+from celery.datastructures import ExceptionInfo
+
+
+class SomeClass(object):
+
+    def __init__(self, data):
+        self.data = data
+
+
+class TestRedisBackend(unittest.TestCase):
+
+    def setUp(self):
+        self.backend = AMQPBackend()
+
+    def test_mark_as_done(self):
+        tb = self.backend
+
+        tid = gen_unique_id()
+
+        tb.mark_as_done(tid, 42)
+        self.assertTrue(tb.is_successful(tid))
+        self.assertEquals(tb.get_status(tid), "SUCCESS")
+        self.assertEquals(tb.get_result(tid), 42)
+        self.assertTrue(tb._cache.get(tid))
+        self.assertTrue(tb.get_result(tid), 42)
+
+    def test_is_pickled(self):
+        tb = self.backend
+
+        tid2 = gen_unique_id()
+        result = {"foo": "baz", "bar": SomeClass(12345)}
+        tb.mark_as_done(tid2, result)
+        # is serialized properly.
+        rindb = tb.get_result(tid2)
+        self.assertEquals(rindb.get("foo"), "baz")
+        self.assertEquals(rindb.get("bar").data, 12345)
+
+    def test_mark_as_failure(self):
+        tb = self.backend
+
+        tid3 = gen_unique_id()
+        try:
+            raise KeyError("foo")
+        except KeyError, exception:
+            einfo = ExceptionInfo(sys.exc_info())
+        tb.mark_as_failure(tid3, exception, traceback=einfo.traceback)
+        self.assertFalse(tb.is_successful(tid3))
+        self.assertEquals(tb.get_status(tid3), "FAILURE")
+        self.assertTrue(isinstance(tb.get_result(tid3), KeyError))
+        self.assertEquals(tb.get_traceback(tid3), einfo.traceback)
+
+    def test_process_cleanup(self):
+        self.backend.process_cleanup()

+ 11 - 6
celery/tests/test_backends/test_base.py

@@ -1,10 +1,12 @@
 import unittest
 import types
-from celery.backends.base import BaseBackend, KeyValueStoreBackend
-from celery.serialization import find_nearest_pickleable_exception as fnpe
-from celery.serialization import UnpickleableExceptionWrapper
-from celery.serialization import get_pickleable_exception as gpe
+
 from django.db.models.base import subclass_exception
+from billiard.serialization import find_nearest_pickleable_exception as fnpe
+from billiard.serialization import UnpickleableExceptionWrapper
+from billiard.serialization import get_pickleable_exception as gpe
+
+from celery.backends.base import BaseBackend, KeyValueStoreBackend
 
 
 class wrapobject(object):
@@ -24,11 +26,11 @@ class TestBaseBackendInterface(unittest.TestCase):
 
     def test_get_status(self):
         self.assertRaises(NotImplementedError,
-                b.is_done, "SOMExx-N0Nex1stant-IDxx-")
+                b.is_successful, "SOMExx-N0Nex1stant-IDxx-")
 
     def test_store_result(self):
         self.assertRaises(NotImplementedError,
-                b.store_result, "SOMExx-N0nex1stant-IDxx-", 42, "DONE")
+                b.store_result, "SOMExx-N0nex1stant-IDxx-", 42, "SUCCESS")
 
     def test_get_result(self):
         self.assertRaises(NotImplementedError,
@@ -42,6 +44,9 @@ class TestBaseBackendInterface(unittest.TestCase):
         self.assertRaises(NotImplementedError,
                 b.store_taskset, "SOMExx-N0nex1stant-IDxx-", "blergh")
 
+    def test_get_traceback(self):
+        self.assertRaises(NotImplementedError,
+                b.get_traceback, "SOMExx-N0nex1stant-IDxx-")
 
 
 class TestPickleException(unittest.TestCase):

+ 58 - 9
celery/tests/test_backends/test_cache.py

@@ -1,10 +1,11 @@
 import sys
 import unittest
-import errno
-import socket
-from celery.backends.cache import Backend as CacheBackend
+
+from billiard.serialization import pickle
+
 from celery.utils import gen_unique_id
-from django.conf import settings
+from celery.backends.cache import CacheBackend
+from celery.datastructures import ExceptionInfo
 
 
 class SomeClass(object):
@@ -20,13 +21,13 @@ class TestCacheBackend(unittest.TestCase):
 
         tid = gen_unique_id()
 
-        self.assertFalse(cb.is_done(tid))
+        self.assertFalse(cb.is_successful(tid))
         self.assertEquals(cb.get_status(tid), "PENDING")
         self.assertEquals(cb.get_result(tid), None)
 
         cb.mark_as_done(tid, 42)
-        self.assertTrue(cb.is_done(tid))
-        self.assertEquals(cb.get_status(tid), "DONE")
+        self.assertTrue(cb.is_successful(tid))
+        self.assertEquals(cb.get_status(tid), "SUCCESS")
         self.assertEquals(cb.get_result(tid), 42)
         self.assertTrue(cb._cache.get(tid))
         self.assertTrue(cb.get_result(tid), 42)
@@ -45,17 +46,65 @@ class TestCacheBackend(unittest.TestCase):
     def test_mark_as_failure(self):
         cb = CacheBackend()
 
+        einfo = None
         tid3 = gen_unique_id()
         try:
             raise KeyError("foo")
         except KeyError, exception:
+            einfo = ExceptionInfo(sys.exc_info())
             pass
-        cb.mark_as_failure(tid3, exception)
-        self.assertFalse(cb.is_done(tid3))
+        cb.mark_as_failure(tid3, exception, traceback=einfo.traceback)
+        self.assertFalse(cb.is_successful(tid3))
         self.assertEquals(cb.get_status(tid3), "FAILURE")
         self.assertTrue(isinstance(cb.get_result(tid3), KeyError))
+        self.assertEquals(cb.get_traceback(tid3), einfo.traceback)
 
     def test_process_cleanup(self):
         cb = CacheBackend()
 
         cb.process_cleanup()
+
+
+class TestCustomCacheBackend(unittest.TestCase):
+
+    def test_custom_cache_backend(self):
+        from celery import conf
+        prev_backend = conf.CELERY_CACHE_BACKEND
+        prev_module = sys.modules["celery.backends.cache"]
+        conf.CELERY_CACHE_BACKEND = "dummy://"
+        sys.modules.pop("celery.backends.cache")
+        try:
+            from celery.backends.cache import cache
+            from django.core.cache import cache as django_cache
+            self.assertEquals(cache.__class__.__module__,
+                              "django.core.cache.backends.dummy")
+            self.assertTrue(cache is not django_cache)
+        finally:
+            conf.CELERY_CACHE_BACKEND = prev_backend
+            sys.modules["celery.backends.cache"] = prev_module
+
+
+class TestMemcacheWrapper(unittest.TestCase):
+
+    def test_memcache_wrapper(self):
+
+        from django.core.cache.backends import memcached
+        from django.core.cache.backends import locmem
+        prev_cache_cls = memcached.CacheClass
+        memcached.CacheClass = locmem.CacheClass
+        prev_backend_module = sys.modules.pop("celery.backends.cache")
+        try:
+            from celery.backends.cache import cache, DjangoMemcacheWrapper
+            self.assertTrue(isinstance(cache, DjangoMemcacheWrapper))
+
+            key = "cu.test_memcache_wrapper"
+            val = "The quick brown fox."
+            default = "The lazy dog."
+
+            self.assertEquals(cache.get(key, default=default), default)
+            cache.set(key, val)
+            self.assertEquals(pickle.loads(cache.get(key, default=default)),
+                              val)
+        finally:
+            memcached.CacheClass = prev_cache_cls
+            sys.modules["celery.backends.cache"] = prev_backend_module

+ 9 - 22
celery/tests/test_backends/test_database.py

@@ -1,10 +1,9 @@
 import unittest
-from celery.backends.database import Backend
-from celery.utils import gen_unique_id
+from datetime import timedelta
+
 from celery.task import PeriodicTask
-from celery import registry
-from celery.models import PeriodicTaskMeta
-from datetime import datetime, timedelta
+from celery.utils import gen_unique_id
+from celery.backends.database import DatabaseBackend
 
 
 class SomeClass(object):
@@ -19,33 +18,21 @@ class MyPeriodicTask(PeriodicTask):
 
     def run(self, **kwargs):
         return 42
-registry.tasks.register(MyPeriodicTask)
 
 
 class TestDatabaseBackend(unittest.TestCase):
 
-    def test_run_periodic_tasks(self):
-        #obj, created = PeriodicTaskMeta.objects.get_or_create(
-        #                    name=MyPeriodicTask.name,
-        #                    defaults={"last_run_at": datetime.now() -
-        #                        timedelta(days=-4)})
-        #if not created:
-        #    obj.last_run_at = datetime.now() - timedelta(days=4)
-        #    obj.save()
-        b = Backend()
-        b.run_periodic_tasks()
-
     def test_backend(self):
-        b = Backend()
+        b = DatabaseBackend()
         tid = gen_unique_id()
 
-        self.assertFalse(b.is_done(tid))
+        self.assertFalse(b.is_successful(tid))
         self.assertEquals(b.get_status(tid), "PENDING")
         self.assertTrue(b.get_result(tid) is None)
 
         b.mark_as_done(tid, 42)
-        self.assertTrue(b.is_done(tid))
-        self.assertEquals(b.get_status(tid), "DONE")
+        self.assertTrue(b.is_successful(tid))
+        self.assertEquals(b.get_status(tid), "SUCCESS")
         self.assertEquals(b.get_result(tid), 42)
         self.assertTrue(b._cache.get(tid))
         self.assertTrue(b.get_result(tid), 42)
@@ -64,7 +51,7 @@ class TestDatabaseBackend(unittest.TestCase):
         except KeyError, exception:
             pass
         b.mark_as_failure(tid3, exception)
-        self.assertFalse(b.is_done(tid3))
+        self.assertFalse(b.is_successful(tid3))
         self.assertEquals(b.get_status(tid3), "FAILURE")
         self.assertTrue(isinstance(b.get_result(tid3), KeyError))
 

+ 156 - 0
celery/tests/test_backends/test_redis.py

@@ -0,0 +1,156 @@
+from __future__ import with_statement
+
+import sys
+import unittest
+import errno
+
+from django.core.exceptions import ImproperlyConfigured
+
+from celery.backends import pyredis
+from celery.backends.pyredis import RedisBackend
+from celery.utils import gen_unique_id
+
+_no_redis_msg = "* Redis %s. Will not execute related tests."
+_no_redis_msg_emitted = False
+
+
+class SomeClass(object):
+
+    def __init__(self, data):
+        self.data = data
+
+
+def get_redis_or_None():
+
+    def emit_no_redis_msg(reason):
+        global _no_redis_msg_emitted
+        if not _no_redis_msg_emitted:
+            sys.stderr.write("\n" + _no_redis_msg % reason + "\n")
+            _no_redis_msg_emitted = True
+
+    if pyredis.redis is None:
+        return emit_no_redis_msg("not installed")
+    try:
+        tb = RedisBackend(redis_db="celery_unittest")
+        try:
+            tb.open()
+        except pyredis.redis.ConnectionError, exc:
+            return emit_no_redis_msg("not running")
+        return tb
+    except ImproperlyConfigured, exc:
+        if "need to install" in str(exc):
+            return emit_no_redis_msg("not installed")
+        return emit_no_redis_msg("not configured")
+
+
+class TestRedisBackend(unittest.TestCase):
+
+    def test_cached_connection(self):
+        tb = get_redis_or_None()
+        if not tb:
+            return # Skip test
+
+        self.assertTrue(tb._connection is not None)
+        tb.close()
+        self.assertTrue(tb._connection is None)
+        tb.open()
+        self.assertTrue(tb._connection is not None)
+
+    def test_mark_as_done(self):
+        tb = get_redis_or_None()
+        if not tb:
+            return
+
+        tid = gen_unique_id()
+
+        self.assertFalse(tb.is_successful(tid))
+        self.assertEquals(tb.get_status(tid), "PENDING")
+        self.assertEquals(tb.get_result(tid), None)
+
+        tb.mark_as_done(tid, 42)
+        self.assertTrue(tb.is_successful(tid))
+        self.assertEquals(tb.get_status(tid), "SUCCESS")
+        self.assertEquals(tb.get_result(tid), 42)
+        self.assertTrue(tb._cache.get(tid))
+        self.assertTrue(tb.get_result(tid), 42)
+
+    def test_is_pickled(self):
+        tb = get_redis_or_None()
+        if not tb:
+            return
+
+        tid2 = gen_unique_id()
+        result = {"foo": "baz", "bar": SomeClass(12345)}
+        tb.mark_as_done(tid2, result)
+        # is serialized properly.
+        rindb = tb.get_result(tid2)
+        self.assertEquals(rindb.get("foo"), "baz")
+        self.assertEquals(rindb.get("bar").data, 12345)
+
+    def test_mark_as_failure(self):
+        tb = get_redis_or_None()
+        if not tb:
+            return
+
+        tid3 = gen_unique_id()
+        try:
+            raise KeyError("foo")
+        except KeyError, exception:
+            pass
+        tb.mark_as_failure(tid3, exception)
+        self.assertFalse(tb.is_successful(tid3))
+        self.assertEquals(tb.get_status(tid3), "FAILURE")
+        self.assertTrue(isinstance(tb.get_result(tid3), KeyError))
+
+    def test_process_cleanup(self):
+        tb = get_redis_or_None()
+        if not tb:
+            return
+
+        tb.process_cleanup()
+
+        self.assertTrue(tb._connection is None)
+
+    def test_connection_close_if_connected(self):
+        tb = get_redis_or_None()
+        if not tb:
+            return
+
+        tb.open()
+        self.assertTrue(tb._connection is not None)
+        tb.close()
+        self.assertTrue(tb._connection is None)
+        tb.close()
+        self.assertTrue(tb._connection is None)
+
+
+class TestTyrantBackendNoTyrant(unittest.TestCase):
+
+    def test_tyrant_None_if_tyrant_not_installed(self):
+        from celery.tests.utils import mask_modules
+        prev = sys.modules.pop("celery.backends.pyredis")
+        with mask_modules("redis"):
+            from celery.backends.pyredis import redis
+            self.assertTrue(redis is None)
+        sys.modules["celery.backends.pyredis"] = prev
+
+    def test_constructor_raises_if_tyrant_not_installed(self):
+        from celery.backends import pyredis
+        prev = pyredis.redis
+        pyredis.redis = None
+        try:
+            self.assertRaises(ImproperlyConfigured, pyredis.RedisBackend)
+        finally:
+            pyredis.redis = prev
+
+    def test_constructor_raises_if_not_host_or_port(self):
+        from celery.backends import pyredis
+        prev_host = pyredis.RedisBackend.redis_host
+        prev_port = pyredis.RedisBackend.redis_port
+        pyredis.RedisBackend.redis_host = None
+        pyredis.RedisBackend.redis_port = None
+        try:
+            self.assertRaises(ImproperlyConfigured, pyredis.RedisBackend)
+        finally:
+            pyredis.RedisBackend.redis_host = prev_host
+            pyredis.RedisBackend.redis_port = prev_port

+ 13 - 14
celery/tests/test_backends/test_tyrant.py

@@ -3,12 +3,11 @@ import unittest
 import errno
 import socket
 from celery.backends import tyrant
-from celery.backends.tyrant import Backend as TyrantBackend
-from django.conf import settings
+from celery.backends.tyrant import TyrantBackend
 from celery.utils import gen_unique_id
 from django.core.exceptions import ImproperlyConfigured
 
-_no_tyrant_msg = "* Tokyo Tyrant not running. Will not execute related tests."
+_no_tyrant_msg = "* Tokyo Tyrant %s. Will not execute related tests."
 _no_tyrant_msg_emitted = False
 
 
@@ -20,28 +19,28 @@ class SomeClass(object):
 
 def get_tyrant_or_None():
 
-    def emit_no_tyrant_msg():
+    def emit_no_tyrant_msg(reason):
         global _no_tyrant_msg_emitted
         if not _no_tyrant_msg_emitted:
-            sys.stderr.write("\n" + _no_tyrant_msg + "\n")
+            sys.stderr.write("\n" + _no_tyrant_msg % reason + "\n")
             _no_tyrant_msg_emitted = True
 
     if tyrant.pytyrant is None:
-        emit_no_tyrant_msg()
-        return None
+        return emit_no_tyrant_msg("not installed")
     try:
         tb = TyrantBackend()
         try:
             tb.open()
         except socket.error, exc:
             if exc.errno == errno.ECONNREFUSED:
-                emit_no_tyrant_msg()
-                return None
+                return emit_no_tyrant_msg("not running")
             else:
                 raise
         return tb
     except ImproperlyConfigured, exc:
-        return None
+        if "need to install" in str(exc):
+            return emit_no_tyrant_msg("not installed")
+        return emit_no_tyrant_msg("not configured")
 
 
 class TestTyrantBackend(unittest.TestCase):
@@ -64,13 +63,13 @@ class TestTyrantBackend(unittest.TestCase):
 
         tid = gen_unique_id()
 
-        self.assertFalse(tb.is_done(tid))
+        self.assertFalse(tb.is_successful(tid))
         self.assertEquals(tb.get_status(tid), "PENDING")
         self.assertEquals(tb.get_result(tid), None)
 
         tb.mark_as_done(tid, 42)
-        self.assertTrue(tb.is_done(tid))
-        self.assertEquals(tb.get_status(tid), "DONE")
+        self.assertTrue(tb.is_successful(tid))
+        self.assertEquals(tb.get_status(tid), "SUCCESS")
         self.assertEquals(tb.get_result(tid), 42)
         self.assertTrue(tb._cache.get(tid))
         self.assertTrue(tb.get_result(tid), 42)
@@ -99,7 +98,7 @@ class TestTyrantBackend(unittest.TestCase):
         except KeyError, exception:
             pass
         tb.mark_as_failure(tid3, exception)
-        self.assertFalse(tb.is_done(tid3))
+        self.assertFalse(tb.is_successful(tid3))
         self.assertEquals(tb.get_status(tid3), "FAILURE")
         self.assertTrue(isinstance(tb.get_result(tid3), KeyError))
 

+ 213 - 0
celery/tests/test_beat.py

@@ -0,0 +1,213 @@
+import unittest
+import logging
+from datetime import datetime, timedelta
+
+from celery import log
+from celery import beat
+from celery import conf
+from celery.utils import gen_unique_id
+from celery.task.base import PeriodicTask
+from celery.registry import TaskRegistry
+from celery.result import AsyncResult
+
+
+class MockShelve(dict):
+    closed = False
+    synced = False
+
+    def close(self):
+        self.closed = True
+
+    def sync(self):
+        self.synced = True
+
+
+class MockClockService(object):
+    started = False
+    stopped = False
+
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def start(self, **kwargs):
+        self.started = True
+
+    def stop(self, **kwargs):
+        self.stopped = True
+
+
+class DuePeriodicTask(PeriodicTask):
+    run_every = timedelta(seconds=1)
+    applied = False
+
+    def is_due(self, *args, **kwargs):
+        return True, 100
+
+    @classmethod
+    def apply_async(self, *args, **kwargs):
+        self.applied = True
+        return AsyncResult(gen_unique_id())
+
+
+class DuePeriodicTaskRaising(PeriodicTask):
+    run_every = timedelta(seconds=1)
+    applied = False
+
+    def is_due(self, *args, **kwargs):
+        return True, 0
+
+    @classmethod
+    def apply_async(self, *args, **kwargs):
+        raise Exception("FoozBaaz")
+
+
+class PendingPeriodicTask(PeriodicTask):
+    run_every = timedelta(seconds=1)
+    applied = False
+
+    def is_due(self, *args, **kwargs):
+        return False, 100
+
+    @classmethod
+    def apply_async(self, *args, **kwargs):
+        self.applied = True
+        return AsyncResult(gen_unique_id())
+
+
+class AdditionalTask(PeriodicTask):
+    run_every = timedelta(days=7)
+
+    @classmethod
+    def apply_async(self, *args, **kwargs):
+        raise Exception("FoozBaaz")
+
+
+class TestScheduleEntry(unittest.TestCase):
+
+    def test_constructor(self):
+        s = beat.ScheduleEntry(DuePeriodicTask.name)
+        self.assertEquals(s.name, DuePeriodicTask.name)
+        self.assertTrue(isinstance(s.last_run_at, datetime))
+        self.assertEquals(s.total_run_count, 0)
+
+        now = datetime.now()
+        s = beat.ScheduleEntry(DuePeriodicTask.name, now, 300)
+        self.assertEquals(s.name, DuePeriodicTask.name)
+        self.assertEquals(s.last_run_at, now)
+        self.assertEquals(s.total_run_count, 300)
+
+    def test_next(self):
+        s = beat.ScheduleEntry(DuePeriodicTask.name, None, 300)
+        n = s.next()
+        self.assertEquals(n.name, s.name)
+        self.assertEquals(n.total_run_count, 301)
+        self.assertTrue(n.last_run_at > s.last_run_at)
+
+    def test_is_due(self):
+        due = beat.ScheduleEntry(DuePeriodicTask.name)
+        pending = beat.ScheduleEntry(PendingPeriodicTask.name)
+
+        self.assertTrue(due.is_due(DuePeriodicTask())[0])
+        self.assertFalse(pending.is_due(PendingPeriodicTask())[0])
+
+
+class TestScheduler(unittest.TestCase):
+
+    def setUp(self):
+        self.registry = TaskRegistry()
+        self.registry.register(DuePeriodicTask)
+        self.registry.register(PendingPeriodicTask)
+        self.scheduler = beat.Scheduler(self.registry,
+                                        max_interval=0.0001,
+                                        logger=log.get_default_logger())
+
+    def test_constructor(self):
+        s = beat.Scheduler()
+        self.assertTrue(isinstance(s.registry, TaskRegistry))
+        self.assertTrue(isinstance(s.schedule, dict))
+        self.assertTrue(isinstance(s.logger, logging.Logger))
+        self.assertEquals(s.max_interval, conf.CELERYBEAT_MAX_LOOP_INTERVAL)
+
+    def test_cleanup(self):
+        self.scheduler.schedule["fbz"] = beat.ScheduleEntry("fbz")
+        self.scheduler.cleanup()
+        self.assertTrue("fbz" not in self.scheduler.schedule)
+
+    def test_schedule_registry(self):
+        self.registry.register(AdditionalTask)
+        self.scheduler.schedule_registry()
+        self.assertTrue(AdditionalTask.name in self.scheduler.schedule)
+
+    def test_apply_async(self):
+        due_task = self.registry[DuePeriodicTask.name]
+        self.scheduler.apply_async(self.scheduler[due_task.name])
+        self.assertTrue(due_task.applied)
+
+    def test_apply_async_raises_SchedulingError_on_error(self):
+        self.registry.register(AdditionalTask)
+        self.scheduler.schedule_registry()
+        add_task = self.registry[AdditionalTask.name]
+        self.assertRaises(beat.SchedulingError,
+                          self.scheduler.apply_async,
+                          self.scheduler[add_task.name])
+
+    def test_is_due(self):
+        due = self.scheduler[DuePeriodicTask.name]
+        pending = self.scheduler[PendingPeriodicTask.name]
+
+        self.assertTrue(self.scheduler.is_due(due)[0])
+        self.assertFalse(self.scheduler.is_due(pending)[0])
+
+    def test_tick(self):
+        self.scheduler.schedule.pop(DuePeriodicTaskRaising.name, None)
+        self.registry.pop(DuePeriodicTaskRaising.name, None)
+        self.assertEquals(self.scheduler.tick(),
+                            self.scheduler.max_interval)
+
+    def test_quick_schedulingerror(self):
+        self.registry.register(DuePeriodicTaskRaising)
+        self.scheduler.schedule_registry()
+        self.assertEquals(self.scheduler.tick(),
+                            self.scheduler.max_interval)
+
+
+class TestClockService(unittest.TestCase):
+
+    def test_start(self):
+        s = beat.ClockService()
+        sh = MockShelve()
+        s.open_schedule = lambda *a, **kw: sh
+
+        self.assertTrue(isinstance(s.schedule, dict))
+        self.assertTrue(isinstance(s.schedule, dict))
+        self.assertTrue(isinstance(s.scheduler, beat.Scheduler))
+        self.assertTrue(isinstance(s.scheduler, beat.Scheduler))
+
+        self.assertTrue(s.schedule is sh)
+        self.assertTrue(s._schedule is sh)
+
+        s._in_sync = False
+        s.sync()
+        self.assertTrue(sh.closed)
+        self.assertTrue(sh.synced)
+        self.assertTrue(s._stopped.isSet())
+        s.sync()
+
+        s.stop(wait=False)
+        self.assertTrue(s._shutdown.isSet())
+        s.stop(wait=True)
+        self.assertTrue(s._shutdown.isSet())
+
+
+class TestClockServiceThread(unittest.TestCase):
+
+    def test_start_stop(self):
+        s = beat.ClockServiceThread()
+        self.assertTrue(isinstance(s.clockservice, beat.ClockService))
+        s.clockservice = MockClockService()
+
+        s.run()
+        self.assertTrue(s.clockservice.started)
+
+        s.stop()
+        self.assertTrue(s.clockservice.stopped)

+ 233 - 0
celery/tests/test_buckets.py

@@ -0,0 +1,233 @@
+import os
+import sys
+sys.path.insert(0, os.getcwd())
+import time
+import unittest
+from itertools import chain, izip
+
+from billiard.utils.functional import curry
+
+from celery.task.base import Task
+from celery.utils import gen_unique_id
+from celery.tests.utils import skip_if_environ
+from celery.worker import buckets
+from celery.registry import TaskRegistry
+
+skip_if_disabled = curry(skip_if_environ("SKIP_RLIMITS"))
+
+
+class MockJob(object):
+
+    def __init__(self, task_id, task_name, args, kwargs):
+        self.task_id = task_id
+        self.task_name = task_name
+        self.args = args
+        self.kwargs = kwargs
+
+    def __eq__(self, other):
+        if isinstance(other, self.__class__):
+            return bool(self.task_id == other.task_id \
+                    and self.task_name == other.task_name \
+                    and self.args == other.args \
+                    and self.kwargs == other.kwargs)
+        else:
+            return self == other
+
+    def __repr__(self):
+        return "<MockJob: task:%s id:%s args:%s kwargs:%s" % (
+                self.task_name, self.task_id, self.args, self.kwargs)
+
+
+class TestTokenBucketQueue(unittest.TestCase):
+
+    @skip_if_disabled
+    def empty_queue_yields_QueueEmpty(self):
+        x = buckets.TokenBucketQueue(fill_rate=10)
+        self.assertRaises(buckets.QueueEmpty, x.get)
+
+    @skip_if_disabled
+    def test_bucket__put_get(self):
+        x = buckets.TokenBucketQueue(fill_rate=10)
+        x.put("The quick brown fox")
+        self.assertEquals(x.get(), "The quick brown fox")
+
+        x.put_nowait("The lazy dog")
+        time.sleep(0.2)
+        self.assertEquals(x.get_nowait(), "The lazy dog")
+
+    @skip_if_disabled
+    def test_fill_rate(self):
+        x = buckets.TokenBucketQueue(fill_rate=10)
+        # 20 items should take at least one second to complete
+        time_start = time.time()
+        [x.put(str(i)) for i in xrange(20)]
+        for i in xrange(20):
+            sys.stderr.write("x")
+            x.wait()
+        self.assertTrue(time.time() - time_start > 1.5)
+
+    @skip_if_disabled
+    def test_can_consume(self):
+        x = buckets.TokenBucketQueue(fill_rate=1)
+        x.put("The quick brown fox")
+        self.assertEqual(x.get(), "The quick brown fox")
+        time.sleep(0.1)
+        # Not yet ready for another token
+        x.put("The lazy dog")
+        self.assertRaises(x.RateLimitExceeded, x.get)
+
+    @skip_if_disabled
+    def test_expected_time(self):
+        x = buckets.TokenBucketQueue(fill_rate=1)
+        x.put_nowait("The quick brown fox")
+        self.assertEqual(x.get_nowait(), "The quick brown fox")
+        self.assertTrue(x.expected_time())
+
+    @skip_if_disabled
+    def test_qsize(self):
+        x = buckets.TokenBucketQueue(fill_rate=1)
+        x.put("The quick brown fox")
+        self.assertEqual(x.qsize(), 1)
+        self.assertTrue(x.get_nowait(), "The quick brown fox")
+
+
+class TestRateLimitString(unittest.TestCase):
+
+    @skip_if_disabled
+    def test_conversion(self):
+        self.assertEquals(buckets.parse_ratelimit_string(999), 999)
+        self.assertEquals(buckets.parse_ratelimit_string("1456/s"), 1456)
+        self.assertEquals(buckets.parse_ratelimit_string("100/m"),
+                          100 / 60.0)
+        self.assertEquals(buckets.parse_ratelimit_string("10/h"),
+                          10 / 60.0 / 60.0)
+        self.assertEquals(buckets.parse_ratelimit_string("0xffec/s"), 0xffec)
+        self.assertEquals(buckets.parse_ratelimit_string("0xcda/m"),
+                          0xcda / 60.0)
+        self.assertEquals(buckets.parse_ratelimit_string("0xF/h"),
+                          0xf / 60.0 / 60.0)
+
+        for zero in ("0x0", "0b0", "0o0", 0, None, "0/m", "0/h", "0/s"):
+            self.assertEquals(buckets.parse_ratelimit_string(zero), 0)
+
+
+class TaskA(Task):
+    rate_limit = 10
+
+
+class TaskB(Task):
+    rate_limit = None
+
+
+class TaskC(Task):
+    rate_limit = "1/s"
+
+
+class TaskD(Task):
+    rate_limit = "1000/m"
+
+
+class TestTaskBuckets(unittest.TestCase):
+
+    def setUp(self):
+        self.registry = TaskRegistry()
+        self.task_classes = (TaskA, TaskB, TaskC)
+        for task_cls in self.task_classes:
+            self.registry.register(task_cls)
+
+    @skip_if_disabled
+    def test_auto_add_on_missing(self):
+        b = buckets.TaskBucket(task_registry=self.registry)
+        for task_cls in self.task_classes:
+            self.assertTrue(task_cls.name in b.buckets.keys())
+        self.registry.register(TaskD)
+        self.assertTrue(b.get_bucket_for_type(TaskD.name))
+        self.assertTrue(TaskD.name in b.buckets.keys())
+        self.registry.unregister(TaskD)
+
+    @skip_if_disabled
+    def test_has_rate_limits(self):
+        b = buckets.TaskBucket(task_registry=self.registry)
+        self.assertEqual(b.buckets[TaskA.name].fill_rate, 10)
+        self.assertTrue(isinstance(b.buckets[TaskB.name], buckets.Queue))
+        self.assertEqual(b.buckets[TaskC.name].fill_rate, 1)
+        self.registry.register(TaskD)
+        b.init_with_registry()
+        try:
+            self.assertEqual(b.buckets[TaskD.name].fill_rate, 1000 / 60.0)
+        finally:
+            self.registry.unregister(TaskD)
+
+    @skip_if_disabled
+    def test_on_empty_buckets__get_raises_empty(self):
+        b = buckets.TaskBucket(task_registry=self.registry)
+        self.assertRaises(buckets.QueueEmpty, b.get)
+        self.assertEqual(b.qsize(), 0)
+
+    @skip_if_disabled
+    def test_put__get(self):
+        b = buckets.TaskBucket(task_registry=self.registry)
+        job = MockJob(gen_unique_id(), TaskA.name, ["theqbf"], {"foo": "bar"})
+        b.put(job)
+        self.assertEquals(b.get(), job)
+
+    @skip_if_disabled
+    def test_fill_rate(self):
+        b = buckets.TaskBucket(task_registry=self.registry)
+
+        cjob = lambda i: MockJob(gen_unique_id(), TaskA.name, [i], {})
+        jobs = [cjob(i) for i in xrange(20)]
+        [b.put(job) for job in jobs]
+
+        self.assertEqual(b.qsize(), 20)
+
+        # 20 items should take at least one second to complete
+        time_start = time.time()
+        for i, job in enumerate(jobs):
+            sys.stderr.write("i")
+            self.assertEqual(b.get(), job)
+        self.assertTrue(time.time() - time_start > 1.5)
+
+    @skip_if_disabled
+    def test__very_busy_queue_doesnt_block_others(self):
+        b = buckets.TaskBucket(task_registry=self.registry)
+
+        cjob = lambda i, t: MockJob(gen_unique_id(), t.name, [i], {})
+        ajobs = [cjob(i, TaskA) for i in xrange(10)]
+        bjobs = [cjob(i, TaskB) for i in xrange(20)]
+        jobs = list(chain(*izip(bjobs, ajobs)))
+        map(b.put, jobs)
+
+        got_ajobs = 0
+        for job in (b.get() for i in xrange(20)):
+            if job.task_name == TaskA.name:
+                got_ajobs += 1
+
+        self.assertTrue(got_ajobs > 2)
+
+    @skip_if_disabled
+    def test_thorough__multiple_types(self):
+        self.registry.register(TaskD)
+        try:
+            b = buckets.TaskBucket(task_registry=self.registry)
+
+            cjob = lambda i, t: MockJob(gen_unique_id(), t.name, [i], {})
+
+            ajobs = [cjob(i, TaskA) for i in xrange(10)]
+            bjobs = [cjob(i, TaskB) for i in xrange(10)]
+            cjobs = [cjob(i, TaskC) for i in xrange(10)]
+            djobs = [cjob(i, TaskD) for i in xrange(10)]
+
+            # Spread the jobs around.
+            jobs = list(chain(*izip(ajobs, bjobs, cjobs, djobs)))
+
+            [b.put(job) for job in jobs]
+            for i, job in enumerate(jobs):
+                sys.stderr.write("0")
+                self.assertTrue(b.get(), job)
+            self.assertEqual(i+1, len(jobs))
+        finally:
+            self.registry.unregister(TaskD)
+
+if __name__ == "__main__":
+    unittest.main()

+ 13 - 24
celery/tests/test_conf.py

@@ -4,42 +4,31 @@ from django.conf import settings
 
 
 SETTING_VARS = (
-    ("CELERY_AMQP_CONSUMER_QUEUE", "AMQP_CONSUMER_QUEUE",
-        "DEFAULT_AMQP_CONSUMER_QUEUE"),
-    ("CELERY_AMQP_PUBLISHER_ROUTING_KEY", "AMQP_PUBLISHER_ROUTING_KEY",
-        "DEFAULT_AMQP_PUBLISHER_ROUTING_KEY"),
-    ("CELERY_AMQP_CONSUMER_ROUTING_KEY", "AMQP_CONSUMER_ROUTING_KEY",
-        "DEFAULT_AMQP_CONSUMER_ROUTING_KEY"),
-    ("CELERY_AMQP_EXCHANGE_TYPE", "AMQP_EXCHANGE_TYPE",
-        "DEFAULT_AMQP_EXCHANGE_TYPE"),
-    ("CELERY_AMQP_EXCHANGE", "AMQP_EXCHANGE",
-        "DEFAULT_AMQP_EXCHANGE"),
-    ("CELERYD_CONCURRENCY", "DAEMON_CONCURRENCY",
-        "DEFAULT_DAEMON_CONCURRENCY"),
-    ("CELERYD_PID_FILE", "DAEMON_PID_FILE",
-        "DEFAULT_DAEMON_PID_FILE"),
-    ("CELERYD_LOG_FILE", "DAEMON_LOG_FILE",
-        "DEFAULT_DAEMON_LOG_FILE"),
-    ("CELERYD_DAEMON_LOG_FORMAT", "LOG_FORMAT",
-        "DEFAULT_LOG_FMT"),
+    ("CELERY_DEFAULT_QUEUE", "DEFAULT_QUEUE"),
+    ("CELERY_DEFAULT_ROUTING_KEY", "DEFAULT_ROUTING_KEY"),
+    ("CELERY_DEFAULT_EXCHANGE_TYPE", "DEFAULT_EXCHANGE_TYPE"),
+    ("CELERY_DEFAULT_EXCHANGE", "DEFAULT_EXCHANGE"),
+    ("CELERYD_CONCURRENCY", "CELERYD_CONCURRENCY"),
+    ("CELERYD_LOG_FILE", "CELERYD_LOG_FILE"),
+    ("CELERYD_LOG_FORMAT", "CELERYD_LOG_FORMAT"),
 )
 
 
 class TestConf(unittest.TestCase):
 
-    def assertDefaultSetting(self, setting_name, result_var, default_var):
+    def assertDefaultSetting(self, setting_name, result_var):
         if hasattr(settings, setting_name):
             self.assertEquals(getattr(conf, result_var),
                               getattr(settings, setting_name),
                               "Overwritten setting %s is written to %s" % (
                                   setting_name, result_var))
         else:
-            self.assertEqual(getattr(conf, default_var),
+            self.assertEqual(conf._DEFAULTS.get(setting_name),
                              getattr(conf, result_var),
                              "Default setting %s is written to %s" % (
-                                 default_var, result_var))
+                                 setting_name, result_var))
 
     def test_configuration_cls(self):
-        for setting_name, result_var, default_var in SETTING_VARS:
-            self.assertDefaultSetting(setting_name, result_var, default_var)
-        self.assertTrue(isinstance(conf.DAEMON_LOG_LEVEL, int))
+        for setting_name, result_var in SETTING_VARS:
+            self.assertDefaultSetting(setting_name, result_var)
+        self.assertTrue(isinstance(conf.CELERYD_LOG_LEVEL, int))

+ 80 - 1
celery/tests/test_datastructures.py

@@ -1,7 +1,10 @@
 import unittest
 import sys
+from Queue import Queue
 
 from celery.datastructures import PositionQueue, ExceptionInfo
+from celery.datastructures import LimitedSet, consume_queue
+from celery.datastructures import SharedCounter
 
 
 class TestPositionQueue(unittest.TestCase):
@@ -44,8 +47,84 @@ class TestExceptionInfo(unittest.TestCase):
             exc_info = sys.exc_info()
 
         einfo = ExceptionInfo(exc_info)
-        self.assertEquals(str(einfo), "The quick brown fox jumps...")
+        self.assertEquals(str(einfo), einfo.traceback)
         self.assertTrue(isinstance(einfo.exception, LookupError))
         self.assertEquals(einfo.exception.args,
                 ("The quick brown fox jumps...", ))
         self.assertTrue(einfo.traceback)
+
+        r = repr(einfo)
+        self.assertTrue(r)
+
+
+class TestUtilities(unittest.TestCase):
+
+    def test_consume_queue(self):
+        x = Queue()
+        it = consume_queue(x)
+        self.assertRaises(StopIteration, it.next)
+        x.put("foo")
+        it = consume_queue(x)
+        self.assertEquals(it.next(), "foo")
+        self.assertRaises(StopIteration, it.next)
+
+
+class TestSharedCounter(unittest.TestCase):
+
+    def test_initial_value(self):
+        self.assertEquals(int(SharedCounter(10)), 10)
+
+    def test_increment(self):
+        c = SharedCounter(10)
+        c.increment()
+        self.assertEquals(int(c), 11)
+        c.increment(2)
+        self.assertEquals(int(c), 13)
+
+    def test_decrement(self):
+        c = SharedCounter(10)
+        c.decrement()
+        self.assertEquals(int(c), 9)
+        c.decrement(2)
+        self.assertEquals(int(c), 7)
+
+    def test_iadd(self):
+        c = SharedCounter(10)
+        c += 10
+        self.assertEquals(int(c), 20)
+
+    def test_isub(self):
+        c = SharedCounter(10)
+        c -= 20
+        self.assertEquals(int(c), -10)
+
+    def test_repr(self):
+        self.assertTrue(repr(SharedCounter(10)).startswith("<SharedCounter:"))
+
+
+class TestLimitedSet(unittest.TestCase):
+
+    def test_add(self):
+        s = LimitedSet(maxlen=2)
+        s.add("foo")
+        s.add("bar")
+        for n in "foo", "bar":
+            self.assertTrue(n in s)
+        s.add("baz")
+        for n in "bar", "baz":
+            self.assertTrue(n in s)
+        self.assertTrue("foo" not in s)
+
+    def test_iter(self):
+        s = LimitedSet(maxlen=2)
+        items = "foo", "bar"
+        map(s.add, items)
+        l = list(iter(items))
+        for item in items:
+            self.assertTrue(item in l)
+
+    def test_repr(self):
+        s = LimitedSet(maxlen=2)
+        items = "foo", "bar"
+        map(s.add, items)
+        self.assertTrue(repr(s).startswith("LimitedSet("))

+ 2 - 3
celery/tests/test_discovery.py

@@ -1,6 +1,6 @@
 import unittest
 from django.conf import settings
-from celery.discovery import autodiscover
+from celery.loaders.djangoapp import autodiscover
 from celery.task import tasks
 
 
@@ -9,7 +9,6 @@ class TestDiscovery(unittest.TestCase):
     def assertDiscovery(self):
         apps = autodiscover()
         self.assertTrue(apps)
-        tasks.autodiscover()
         self.assertTrue("c.unittest.SomeAppTask" in tasks)
         self.assertEquals(tasks["c.unittest.SomeAppTask"].run(), 42)
 
@@ -21,4 +20,4 @@ class TestDiscovery(unittest.TestCase):
         if "someapp" in settings.INSTALLED_APPS:
             settings.INSTALLED_APPS = settings.INSTALLED_APPS + \
                     ["xxxnot.aexist"]
-            self.assertDiscovery()
+            self.assertRaises(ImportError, autodiscover)

+ 75 - 0
celery/tests/test_events.py

@@ -0,0 +1,75 @@
+import unittest
+
+from celery import events
+
+
+class MockPublisher(object):
+
+    def __init__(self, *args, **kwargs):
+        self.sent = []
+
+    def send(self, msg, *args, **kwargs):
+        self.sent.append(msg)
+
+    def close(self):
+        pass
+
+    def has_event(self, kind):
+        for event in self.sent:
+            if event["type"] == kind:
+                return event
+        return False
+
+
+class TestEvent(unittest.TestCase):
+
+    def test_constructor(self):
+        event = events.Event("world war II")
+        self.assertEquals(event["type"], "world war II")
+        self.assertTrue(event["timestamp"])
+
+
+class TestEventDispatcher(unittest.TestCase):
+
+    def test_send(self):
+        publisher = MockPublisher()
+        eventer = events.EventDispatcher(object(), publisher=publisher)
+
+        eventer.publisher = publisher
+        eventer.enabled = True
+        eventer.send("World War II", ended=True)
+        self.assertTrue(publisher.has_event("World War II"))
+
+
+class TestEventReceiver(unittest.TestCase):
+
+    def test_process(self):
+
+        message = {"type": "world-war"}
+
+        got_event = [False]
+
+        def my_handler(event):
+            got_event[0] = True
+
+        r = events.EventReceiver(object(), handlers={
+                                    "world-war": my_handler})
+        r._receive(message, object())
+        self.assertTrue(got_event[0])
+
+    def test_catch_all_event(self):
+
+        message = {"type": "world-war"}
+
+        got_event = [False]
+
+        def my_handler(event):
+            got_event[0] = True
+
+        r = events.EventReceiver(object())
+        events.EventReceiver.handlers["*"] = my_handler
+        try:
+            r._receive(message, object())
+            self.assertTrue(got_event[0])
+        finally:
+            events.EventReceiver.handlers = {}

+ 127 - 0
celery/tests/test_loaders.py

@@ -0,0 +1,127 @@
+import os
+import sys
+import unittest
+
+from billiard.utils.functional import wraps
+
+from celery import loaders
+from celery.loaders import base
+from celery.loaders import djangoapp
+from celery.loaders import default
+from celery.tests.utils import with_environ
+
+
+class TestLoaders(unittest.TestCase):
+
+    def test_get_loader_cls(self):
+
+        self.assertEquals(loaders.get_loader_cls("django"),
+                          loaders.DjangoLoader)
+        self.assertEquals(loaders.get_loader_cls("default"),
+                          loaders.DefaultLoader)
+        # Execute cached branch.
+        self.assertEquals(loaders.get_loader_cls("django"),
+                          loaders.DjangoLoader)
+        self.assertEquals(loaders.get_loader_cls("default"),
+                          loaders.DefaultLoader)
+
+    @with_environ("CELERY_LOADER", "default")
+    def test_detect_loader_CELERY_LOADER(self):
+        self.assertEquals(loaders.detect_loader(), loaders.DefaultLoader)
+
+
+class DummyLoader(base.BaseLoader):
+
+    class Config(object):
+
+        def __init__(self, **kwargs):
+            for attr, val in kwargs.items():
+                setattr(self, attr, val)
+
+    def read_configuration(self):
+        return self.Config(foo="bar", CELERY_IMPORTS=("os", "sys"))
+
+
+class TestLoaderBase(unittest.TestCase):
+
+    def setUp(self):
+        self.loader = DummyLoader()
+
+    def test_handlers_pass(self):
+        self.loader.on_task_init("foo.task", "feedface-cafebabe")
+        self.loader.on_worker_init()
+
+    def test_import_task_module(self):
+        import sys
+        self.assertEquals(sys, self.loader.import_task_module("sys"))
+
+    def test_conf_property(self):
+        self.assertEquals(self.loader.conf.foo, "bar")
+        self.assertEquals(self.loader._conf_cache.foo, "bar")
+        self.assertEquals(self.loader.conf.foo, "bar")
+
+    def test_import_default_modules(self):
+        import os
+        import sys
+        self.assertEquals(self.loader.import_default_modules(), [os, sys])
+
+
+class TestDjangoLoader(unittest.TestCase):
+
+    def setUp(self):
+        self.loader = loaders.DjangoLoader()
+
+    def test_on_worker_init(self):
+        from django.conf import settings
+        old_imports = settings.CELERY_IMPORTS
+        settings.CELERY_IMPORTS = ("xxx.does.not.exist", )
+        try:
+            self.assertRaises(ImportError, self.loader.on_worker_init)
+        finally:
+            settings.CELERY_IMPORTS = old_imports
+
+    def test_race_protection(self):
+        djangoapp._RACE_PROTECTION = True
+        try:
+            self.assertFalse(self.loader.on_worker_init())
+        finally:
+            djangoapp._RACE_PROTECTION = False
+
+    def test_find_related_module_no_path(self):
+        self.assertFalse(djangoapp.find_related_module("sys", "tasks"))
+
+    def test_find_related_module_no_related(self):
+        self.assertFalse(djangoapp.find_related_module("someapp",
+                                                       "frobulators"))
+
+
+class TestDefaultLoader(unittest.TestCase):
+
+    def test_wanted_module_item(self):
+        self.assertTrue(default.wanted_module_item("FOO"))
+        self.assertTrue(default.wanted_module_item("foo"))
+        self.assertFalse(default.wanted_module_item("_foo"))
+        self.assertFalse(default.wanted_module_item("__foo"))
+
+    def test_read_configuration(self):
+        from types import ModuleType
+
+        class ConfigModule(ModuleType):
+            pass
+
+        celeryconfig = ConfigModule("celeryconfig")
+        celeryconfig.CELERY_IMPORTS = ("os", "sys")
+
+        sys.modules["celeryconfig"] = celeryconfig
+        try:
+            l = default.Loader()
+            settings = l.read_configuration()
+            self.assertEquals(settings.CELERY_IMPORTS, ("os", "sys"))
+            from django.conf import settings
+            settings.configured = False
+            settings = l.read_configuration()
+            self.assertEquals(settings.CELERY_IMPORTS, ("os", "sys"))
+            self.assertTrue(settings.configured)
+            l.on_worker_init()
+        finally:
+            sys.modules.pop("celeryconfig", None)

+ 54 - 15
celery/tests/test_log.py

@@ -1,29 +1,39 @@
 from __future__ import with_statement
+
 import os
 import sys
 import logging
 import unittest
-import multiprocessing
+from tempfile import mktemp
 from StringIO import StringIO
-from celery.log import setup_logger, emergency_error
+from contextlib import contextmanager
+
+from carrot.utils import rpartition
+
+from celery.log import (setup_logger, emergency_error,
+                        redirect_stdouts_to_logger, LoggingProxy)
 from celery.tests.utils import override_stdouts
-from tempfile import mktemp
+
+
+@contextmanager
+def wrap_logger(logger, loglevel=logging.ERROR):
+    old_handlers = logger.handlers
+    sio = StringIO()
+    siohandler = logging.StreamHandler(sio)
+    logger.handlers = [siohandler]
+
+    yield sio
+
+    logger.handlers = old_handlers
 
 
 class TestLog(unittest.TestCase):
 
     def _assertLog(self, logger, logmsg, loglevel=logging.ERROR):
-        # Save old handlers
-        old_handler = logger.handlers[0]
-        logger.removeHandler(old_handler)
-        sio = StringIO()
-        siohandler = logging.StreamHandler(sio)
-        logger.addHandler(siohandler)
-        logger.log(loglevel, logmsg)
-        logger.removeHandler(siohandler)
-        # Reset original handlers
-        logger.addHandler(old_handler)
-        return sio.getvalue().strip()
+
+        with wrap_logger(logger, loglevel=loglevel) as sio:
+            logger.log(loglevel, logmsg)
+            return sio.getvalue().strip()
 
     def assertDidLogTrue(self, logger, logmsg, reason, loglevel=None):
         val = self._assertLog(logger, logmsg, loglevel=loglevel)
@@ -51,7 +61,7 @@ class TestLog(unittest.TestCase):
     def test_emergency_error(self):
         sio = StringIO()
         emergency_error(sio, "Testing emergency error facility")
-        self.assertEquals(sio.getvalue().rpartition(":")[2].strip(),
+        self.assertEquals(rpartition(sio.getvalue(), ":")[2].strip(),
                              "Testing emergency error facility")
 
     def test_setup_logger_no_handlers_stream(self):
@@ -85,3 +95,32 @@ class TestLog(unittest.TestCase):
         with open(tempfile, "r") as tempfilefh:
             self.assertTrue("Vandelay Industries" in "".join(tempfilefh))
         os.unlink(tempfile)
+
+    def test_redirect_stdouts(self):
+        logger = setup_logger(loglevel=logging.ERROR, logfile=None)
+        try:
+            with wrap_logger(logger) as sio:
+                redirect_stdouts_to_logger(logger, loglevel=logging.ERROR)
+                logger.error("foo")
+                self.assertTrue("foo" in sio.getvalue())
+        finally:
+            sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__
+
+    def test_logging_proxy(self):
+        logger = setup_logger(loglevel=logging.ERROR, logfile=None)
+        with wrap_logger(logger) as sio:
+            p = LoggingProxy(logger)
+            p.close()
+            p.write("foo")
+            self.assertTrue("foo" not in sio.getvalue())
+            p.closed = False
+            p.write("foo")
+            self.assertTrue("foo" in sio.getvalue())
+            lines = ["baz", "xuzzy"]
+            p.writelines(lines)
+            for line in lines:
+                self.assertTrue(line in sio.getvalue())
+            p.flush()
+            p.close()
+            self.assertFalse(p.isatty())
+            self.assertTrue(p.fileno() is None)

+ 1 - 1
celery/tests/test_messaging.py

@@ -1,5 +1,5 @@
 import unittest
-from celery.messaging import MSG_OPTIONS, get_msg_options, extract_msg_options
+from celery.messaging import MSG_OPTIONS, extract_msg_options
 
 
 class TestMsgOptions(unittest.TestCase):

+ 8 - 33
celery/tests/test_models.py

@@ -1,14 +1,8 @@
 import unittest
 from datetime import datetime, timedelta
-from celery.models import TaskMeta, TaskSetMeta, PeriodicTaskMeta
-from celery.task import PeriodicTask
-from celery.registry import tasks
-from celery.utils import gen_unique_id
-
 
-class TestPeriodicTask(PeriodicTask):
-    name = "celery.unittest.test_models.test_periodic_task"
-    run_every = timedelta(minutes=30)
+from celery.utils import gen_unique_id
+from celery.models import TaskMeta, TaskSetMeta
 
 
 class TestModels(unittest.TestCase):
@@ -23,11 +17,6 @@ class TestModels(unittest.TestCase):
         tasksetmeta, created = TaskSetMeta.objects.get_or_create(taskset_id=id)
         return tasksetmeta
 
-    def createPeriodicTaskMeta(self, name):
-        ptaskmeta, created = PeriodicTaskMeta.objects.get_or_create(name=name,
-                defaults={"last_run_at": datetime.now()})
-        return ptaskmeta
-
     def test_taskmeta(self):
         m1 = self.createTaskMeta()
         m2 = self.createTaskMeta()
@@ -38,11 +27,11 @@ class TestModels(unittest.TestCase):
 
         self.assertEquals(TaskMeta.objects.get_task(m1.task_id).task_id,
                 m1.task_id)
-        self.assertFalse(TaskMeta.objects.is_done(m1.task_id))
-        TaskMeta.objects.store_result(m1.task_id, True, status="DONE")
-        TaskMeta.objects.store_result(m2.task_id, True, status="DONE")
-        self.assertTrue(TaskMeta.objects.is_done(m1.task_id))
-        self.assertTrue(TaskMeta.objects.is_done(m2.task_id))
+        self.assertFalse(TaskMeta.objects.is_successful(m1.task_id))
+        TaskMeta.objects.store_result(m1.task_id, True, status="SUCCESS")
+        TaskMeta.objects.store_result(m2.task_id, True, status="SUCCESS")
+        self.assertTrue(TaskMeta.objects.is_successful(m1.task_id))
+        self.assertTrue(TaskMeta.objects.is_successful(m2.task_id))
 
         # Have to avoid save() because it applies the auto_now=True.
         TaskMeta.objects.filter(task_id=m1.task_id).update(
@@ -55,7 +44,7 @@ class TestModels(unittest.TestCase):
 
         TaskMeta.objects.delete_expired()
         self.assertFalse(m1 in TaskMeta.objects.all())
-    
+
     def test_tasksetmeta(self):
         m1 = self.createTaskSetMeta()
         m2 = self.createTaskSetMeta()
@@ -78,17 +67,3 @@ class TestModels(unittest.TestCase):
 
         TaskSetMeta.objects.delete_expired()
         self.assertFalse(m1 in TaskSetMeta.objects.all())
-
-    def test_periodic_taskmeta(self):
-        tasks.register(TestPeriodicTask)
-        p = self.createPeriodicTaskMeta(TestPeriodicTask.name)
-        # check that repr works.
-        self.assertTrue(unicode(p).startswith("<PeriodicTask:"))
-        self.assertFalse(p in PeriodicTaskMeta.objects.get_waiting_tasks())
-        p.last_run_at = datetime.now() - (TestPeriodicTask.run_every +
-                timedelta(seconds=10))
-        p.save()
-        self.assertTrue(p in PeriodicTaskMeta.objects.get_waiting_tasks())
-        self.assertTrue(isinstance(p.task, TestPeriodicTask))
-
-        p.delay()

+ 0 - 96
celery/tests/test_monitoring.py

@@ -1,96 +0,0 @@
-from __future__ import with_statement
-import unittest
-import time
-from celery.monitoring import TaskTimerStats, Statistics, StatsCollector
-from carrot.connection import DjangoBrokerConnection
-from celery.messaging import StatsConsumer
-from celery.tests.utils import override_stdouts
-
-
-class PartialStatistics(Statistics):
-    type = "c.u.partial"
-
-
-class TestStatisticsInterface(unittest.TestCase):
-
-    def test_must_have_type(self):
-        self.assertRaises(NotImplementedError, Statistics)
-
-    def test_must_have_on_start(self):
-        self.assertRaises(NotImplementedError, PartialStatistics().on_start)
-
-    def test_must_have_on_stop(self):
-        self.assertRaises(NotImplementedError, PartialStatistics().on_stop)
-
-
-class TestTaskTimerStats(unittest.TestCase):
-
-    def test_time(self):
-        self.assertTimeElapsed(0.5, 1, 0, "0.5")
-        self.assertTimeElapsed(0.002, 0.05, 0, "0.0")
-        self.assertTimeElapsed(0.1, 0.5, 0, "0.1")
-
-    def test_not_enabled(self):
-        t = TaskTimerStats()
-        t.enabled = False
-        self.assertFalse(t.publish(isnot="enabled"))
-        self.assertFalse(getattr(t, "time_start", None))
-        t.run("foo", "bar", [], {})
-        t.stop()
-
-    def assertTimeElapsed(self, time_sleep, max_appx, min_appx, appx):
-        t = TaskTimerStats()
-        t.enabled = True
-        t.run("foo", "bar", [], {})
-        self.assertTrue(t.time_start)
-        time.sleep(time_sleep)
-        time_stop = t.stop()
-        self.assertTrue(time_stop)
-        self.assertFalse(time_stop > max_appx)
-        self.assertFalse(time_stop <= min_appx)
-
-        strstop = str(time_stop)[0:3]
-        # Time elapsed is approximately 0.1 seconds.
-        self.assertTrue(strstop == appx)
-
-
-class TestStatsCollector(unittest.TestCase):
-
-    def setUp(self):
-        conn = DjangoBrokerConnection()
-        consumer = StatsConsumer(connection=conn)
-        consumer.discard_all()
-        conn.close()
-        consumer.close()
-        self.s = StatsCollector()
-        self.assertEquals(self.s.total_tasks_processed, 0)
-        self.assertEquals(self.s.total_tasks_processed_by_type, {})
-        self.assertEquals(self.s.total_task_time_running, 0.0)
-        self.assertEquals(self.s.total_task_time_running_by_type, {})
-
-    def test_collect_report_dump(self):
-        timer1 = TaskTimerStats()
-        timer1.enabled = True
-        timer1.run("foo", "bar", [], {})
-        timer2 = TaskTimerStats()
-        timer2.enabled = True
-        timer2.run("foo", "bar", [], {})
-        timer3 = TaskTimerStats()
-        timer3.enabled = True
-        timer3.run("foo", "bar", [], {})
-        for timer in (timer1, timer2, timer3):
-            timer.stop()
-
-        # Collect
-        self.s.collect()
-        self.assertEquals(self.s.total_tasks_processed, 3)
-
-        # Report
-        with override_stdouts() as outs:
-            stdout, stderr = outs
-            self.s.report()
-            self.assertTrue(
-                "Total processing time by task type:" in stdout.getvalue())
-
-        # Dump to cache
-        self.s.dump_to_cache()

+ 2 - 1
celery/tests/test_pickle.py

@@ -1,5 +1,6 @@
 import unittest
-from celery.serialization import pickle
+
+from billiard.serialization import pickle
 
 
 class RegularException(Exception):

+ 1 - 2
celery/tests/test_pool.py

@@ -2,7 +2,7 @@ import unittest
 import logging
 import itertools
 import time
-from celery.pool import TaskPool
+from celery.worker.pool import TaskPool
 from celery.datastructures import ExceptionInfo
 import sys
 
@@ -12,7 +12,6 @@ def do_something(i):
 
 
 def long_something():
-    import time
     time.sleep(1)
 
 

+ 6 - 21
celery/tests/test_registry.py

@@ -1,9 +1,8 @@
 import unittest
+
 from celery import registry
 from celery.task import Task, PeriodicTask
 
-FUNC_TASK_NAME = "celery.unittest.func_task"
-
 
 class TestTask(Task):
     name = "celery.unittest.test_task"
@@ -20,23 +19,17 @@ class TestPeriodicTask(PeriodicTask):
         return True
 
 
-def func_task(**kwargs):
-    return True
-
-
 class TestTaskRegistry(unittest.TestCase):
 
     def assertRegisterUnregisterCls(self, r, task):
         self.assertRaises(r.NotRegistered, r.unregister, task)
         r.register(task)
         self.assertTrue(task.name in r)
-        self.assertRaises(r.AlreadyRegistered, r.register, task)
 
     def assertRegisterUnregisterFunc(self, r, task, task_name):
         self.assertRaises(r.NotRegistered, r.unregister, task_name)
         r.register(task, task_name)
         self.assertTrue(task_name in r)
-        self.assertRaises(r.AlreadyRegistered, r.register, task, task_name)
 
     def test_task_registry(self):
         r = registry.TaskRegistry()
@@ -44,37 +37,29 @@ class TestTaskRegistry(unittest.TestCase):
                 "TaskRegistry has composited dict")
 
         self.assertRegisterUnregisterCls(r, TestTask)
-        self.assertRegisterUnregisterFunc(r, func_task, FUNC_TASK_NAME)
         self.assertRegisterUnregisterCls(r, TestPeriodicTask)
 
-        tasks = r.get_all()
+        tasks = dict(r)
         self.assertTrue(isinstance(tasks.get(TestTask.name), TestTask))
         self.assertTrue(isinstance(tasks.get(TestPeriodicTask.name),
                                    TestPeriodicTask))
-        self.assertEquals(tasks.get(FUNC_TASK_NAME), func_task)
 
-        regular = r.get_all_regular()
+        regular = r.regular()
         self.assertTrue(TestTask.name in regular)
         self.assertFalse(TestPeriodicTask.name in regular)
-        self.assertTrue(FUNC_TASK_NAME in regular)
 
-        periodic = r.get_all_periodic()
+        periodic = r.periodic()
         self.assertFalse(TestTask.name in periodic)
         self.assertTrue(TestPeriodicTask.name in periodic)
-        self.assertFalse(FUNC_TASK_NAME in periodic)
 
-        self.assertTrue(isinstance(r.get_task(TestTask.name), TestTask))
-        self.assertTrue(isinstance(r.get_task(TestPeriodicTask.name),
+        self.assertTrue(isinstance(r[TestTask.name], TestTask))
+        self.assertTrue(isinstance(r[TestPeriodicTask.name],
                                    TestPeriodicTask))
-        self.assertEquals(r.get_task(FUNC_TASK_NAME), func_task)
 
         r.unregister(TestTask)
         self.assertFalse(TestTask.name in r)
         r.unregister(TestPeriodicTask)
         self.assertFalse(TestPeriodicTask.name in r)
-        r.unregister(FUNC_TASK_NAME)
-        self.assertFalse(FUNC_TASK_NAME in r)
 
-        self.assertTrue(func_task())
         self.assertTrue(TestTask().run())
         self.assertTrue(TestPeriodicTask().run())

+ 105 - 18
celery/tests/test_result.py

@@ -1,9 +1,11 @@
 import unittest
-from celery.backends import default_backend
-from celery.result import AsyncResult
-from celery.result import TaskSetResult
-from celery.result import TimeoutError
+
 from celery.utils import gen_unique_id
+from celery.tests.utils import skip_if_quick
+from celery.result import AsyncResult, TaskSetResult
+from celery.backends import default_backend
+from celery.exceptions import TimeoutError
+from celery.task.base import Task
 
 
 def mock_task(name, status, result):
@@ -11,14 +13,19 @@ def mock_task(name, status, result):
 
 
 def save_result(task):
-    if task["status"] == "DONE":
+    traceback = "Some traceback"
+    if task["status"] == "SUCCESS":
         default_backend.mark_as_done(task["id"], task["result"])
+    elif task["status"] == "RETRY":
+        default_backend.mark_as_retry(task["id"], task["result"],
+                traceback=traceback)
     else:
-        default_backend.mark_as_failure(task["id"], task["result"])
+        default_backend.mark_as_failure(task["id"], task["result"],
+                traceback=traceback)
 
 
 def make_mock_taskset(size=10):
-    tasks = [mock_task("ts%d" % i, "DONE", i) for i in xrange(size)]
+    tasks = [mock_task("ts%d" % i, "SUCCESS", i) for i in xrange(size)]
     [save_result(task) for task in tasks]
     return [AsyncResult(task["id"]) for task in tasks]
 
@@ -26,26 +33,22 @@ def make_mock_taskset(size=10):
 class TestAsyncResult(unittest.TestCase):
 
     def setUp(self):
-        self.task1 = mock_task("task1", "DONE", "the")
-        self.task2 = mock_task("task2", "DONE", "quick")
+        self.task1 = mock_task("task1", "SUCCESS", "the")
+        self.task2 = mock_task("task2", "SUCCESS", "quick")
         self.task3 = mock_task("task3", "FAILURE", KeyError("brown"))
+        self.task4 = mock_task("task3", "RETRY", KeyError("red"))
 
-        for task in (self.task1, self.task2, self.task3):
+        for task in (self.task1, self.task2, self.task3, self.task4):
             save_result(task)
 
-    def test_is_done(self):
-        ok_res = AsyncResult(self.task1["id"])
-        nok_res = AsyncResult(self.task3["id"])
-
-        self.assertTrue(ok_res.is_done())
-        self.assertFalse(nok_res.is_done())
-
-    def test_sucessful(self):
+    def test_successful(self):
         ok_res = AsyncResult(self.task1["id"])
         nok_res = AsyncResult(self.task3["id"])
+        nok_res2 = AsyncResult(self.task4["id"])
 
         self.assertTrue(ok_res.successful())
         self.assertFalse(nok_res.successful())
+        self.assertFalse(nok_res2.successful())
 
     def test_str(self):
         ok_res = AsyncResult(self.task1["id"])
@@ -66,20 +69,62 @@ class TestAsyncResult(unittest.TestCase):
         self.assertEquals(repr(nok_res), "<AsyncResult: %s>" % (
                 self.task3["id"]))
 
+    def test_get_traceback(self):
+        ok_res = AsyncResult(self.task1["id"])
+        nok_res = AsyncResult(self.task3["id"])
+        nok_res2 = AsyncResult(self.task4["id"])
+        self.assertFalse(ok_res.traceback)
+        self.assertTrue(nok_res.traceback)
+        self.assertTrue(nok_res2.traceback)
+
     def test_get(self):
         ok_res = AsyncResult(self.task1["id"])
         ok2_res = AsyncResult(self.task2["id"])
         nok_res = AsyncResult(self.task3["id"])
+        nok2_res = AsyncResult(self.task4["id"])
 
         self.assertEquals(ok_res.get(), "the")
         self.assertEquals(ok2_res.get(), "quick")
         self.assertRaises(KeyError, nok_res.get)
+        self.assertTrue(isinstance(nok2_res.result, KeyError))
+
+    def test_get_timeout(self):
+        res = AsyncResult(self.task4["id"]) # has RETRY status
+        self.assertRaises(TimeoutError, res.get, timeout=0.1)
+
+    @skip_if_quick
+    def test_get_timeout_longer(self):
+        res = AsyncResult(self.task4["id"]) # has RETRY status
+        self.assertRaises(TimeoutError, res.get, timeout=1)
 
     def test_ready(self):
         oks = (AsyncResult(self.task1["id"]),
                AsyncResult(self.task2["id"]),
                AsyncResult(self.task3["id"]))
         [self.assertTrue(ok.ready()) for ok in oks]
+        self.assertFalse(AsyncResult(self.task4["id"]).ready())
+
+
+class MockAsyncResultFailure(AsyncResult):
+
+    @property
+    def result(self):
+        return KeyError("baz")
+
+    @property
+    def status(self):
+        return "FAILURE"
+
+
+class MockAsyncResultSuccess(AsyncResult):
+
+    @property
+    def result(self):
+        return 42
+
+    @property
+    def status(self):
+        return "SUCCESS"
 
 
 class TestTaskSetResult(unittest.TestCase):
@@ -91,6 +136,27 @@ class TestTaskSetResult(unittest.TestCase):
     def test_total(self):
         self.assertEquals(self.ts.total, self.size)
 
+    def test_iterate_raises(self):
+        ar = MockAsyncResultFailure(gen_unique_id())
+        ts = TaskSetResult(gen_unique_id(), [ar])
+        it = iter(ts)
+        self.assertRaises(KeyError, it.next)
+
+    def test_iterate_yields(self):
+        ar = MockAsyncResultSuccess(gen_unique_id())
+        ar2 = MockAsyncResultSuccess(gen_unique_id())
+        ts = TaskSetResult(gen_unique_id(), [ar, ar2])
+        it = iter(ts)
+        self.assertEquals(it.next(), 42)
+        self.assertEquals(it.next(), 42)
+
+    def test_join_timeout(self):
+        ar = MockAsyncResultSuccess(gen_unique_id())
+        ar2 = MockAsyncResultSuccess(gen_unique_id())
+        ar3 = AsyncResult(gen_unique_id())
+        ts = TaskSetResult(gen_unique_id(), [ar, ar2, ar3])
+        self.assertRaises(TimeoutError, ts.join, timeout=0.0000001)
+
     def test_itersubtasks(self):
 
         it = self.ts.itersubtasks()
@@ -192,3 +258,24 @@ class TestTaskSetPending(unittest.TestCase):
 
     def x_join(self):
         self.assertRaises(TimeoutError, self.ts.join, timeout=0.001)
+
+    @skip_if_quick
+    def x_join_longer(self):
+        self.assertRaises(TimeoutError, self.ts.join, timeout=1)
+
+
+class RaisingTask(Task):
+
+    def run(self, x, y):
+        raise KeyError("xy")
+
+
+class TestEagerResult(unittest.TestCase):
+
+    def test_wait_raises(self):
+        res = RaisingTask.apply(args=[3, 3])
+        self.assertRaises(KeyError, res.wait)
+
+    def test_revoke(self):
+        res = RaisingTask.apply(args=[3, 3])
+        self.assertFalse(res.revoke())

+ 3 - 3
celery/tests/test_serialization.py

@@ -7,9 +7,9 @@ class TestAAPickle(unittest.TestCase):
 
     def test_no_cpickle(self):
         from celery.tests.utils import mask_modules
-        prev = sys.modules.pop("celery.serialization")
+        prev = sys.modules.pop("billiard.serialization")
         with mask_modules("cPickle"):
-            from celery.serialization import pickle
+            from billiard.serialization import pickle
             import pickle as orig_pickle
             self.assertTrue(pickle.dumps is orig_pickle.dumps)
-        sys.modules["celery.serialization"] = prev
+        sys.modules["billiard.serialization"] = prev

+ 0 - 66
celery/tests/test_supervisor.py

@@ -1,66 +0,0 @@
-import unittest
-from celery.supervisor import OFASupervisor
-from celery.supervisor import TimeoutError, MaxRestartsExceededError
-
-
-def target_one(x, y, z):
-    return x * y * z
-
-
-class MockProcess(object):
-    _started = False
-    _stopped = False
-    _terminated = False
-    _joined = False
-    alive = True
-    timeout_on_is_alive = False
-
-    def __init__(self, target, args, kwargs):
-        self.target = target
-        self.args = args
-        self.kwargs = kwargs
-
-    def start(self):
-        self._stopped = False
-        self._started = True
-
-    def stop(self):
-        self._stopped = True
-        self._started = False
-
-    def terminate(self):
-        self._terminated = False
-
-    def is_alive(self):
-        if self._started and self.alive:
-            if self.timeout_on_is_alive:
-                raise TimeoutError("Supervised: timed out.")
-            return True
-        return False
-
-    def join(self, timeout=None):
-        self._joined = True
-
-
-class TestOFASupervisor(unittest.TestCase):
-
-    def test_init(self):
-        s = OFASupervisor(target=target_one, args=[2, 4, 8], kwargs={})
-        s.Process = MockProcess
-
-    def test_start(self):
-        MockProcess.alive = False
-        s = OFASupervisor(target=target_one, args=[2, 4, 8], kwargs={},
-                          max_restart_freq=0, max_restart_freq_time=0)
-        s.Process = MockProcess
-        self.assertRaises(MaxRestartsExceededError, s.start)
-        MockProcess.alive = True
-
-    def test_start_is_alive_timeout(self):
-        MockProcess.alive = True
-        MockProcess.timeout_on_is_alive = True
-        s = OFASupervisor(target=target_one, args=[2, 4, 8], kwargs={},
-                          max_restart_freq=0, max_restart_freq_time=0)
-        s.Process = MockProcess
-        self.assertRaises(MaxRestartsExceededError, s.start)
-        MockProcess.timeout_on_is_alive = False

+ 123 - 34
celery/tests/test_task.py

@@ -1,27 +1,37 @@
 import unittest
-import uuid
-import logging
 from StringIO import StringIO
+from datetime import datetime, timedelta
 
 from celery import task
-from celery import registry
-from celery.log import setup_logger
 from celery import messaging
 from celery.result import EagerResult
 from celery.backends import default_backend
-from datetime import datetime, timedelta
-
+from celery.decorators import task as task_dec
+from celery.worker.listener import parse_iso8601
+from celery.exceptions import RetryTaskError
 
-def return_True(self, **kwargs):
+def return_True(*args, **kwargs):
     # Task run functions can't be closures/lambdas, as they're pickled.
     return True
-registry.tasks.register(return_True, "cu.return-true")
+
+
+return_True_task = task_dec()(return_True)
 
 
 def raise_exception(self, **kwargs):
     raise Exception("%s error" % self.__class__)
 
 
+class MockApplyTask(task.Task):
+
+    def run(self, x, y):
+        return x * y
+
+    @classmethod
+    def apply_async(self, *args, **kwargs):
+        pass
+
+
 class IncrementCounterTask(task.Task):
     name = "c.unittest.increment_counter_task"
     count = 0
@@ -54,6 +64,26 @@ class RetryTask(task.Task):
             return self.retry(args=[arg1, arg2], kwargs=kwargs, countdown=0)
 
 
+class RetryTaskMockApply(task.Task):
+    max_retries = 3
+    iterations = 0
+    applied = 0
+
+    def run(self, arg1, arg2, kwarg=1, **kwargs):
+        self.__class__.iterations += 1
+
+        retries = kwargs["task_retries"]
+        if retries >= 3:
+            return arg1
+        else:
+            kwargs.update({"kwarg": kwarg})
+            return self.retry(args=[arg1, arg2], kwargs=kwargs, countdown=0)
+
+    @classmethod
+    def apply_async(self, *args, **kwargs):
+        self.applied = 1
+
+
 class MyCustomException(Exception):
     """Random custom exception."""
 
@@ -86,6 +116,22 @@ class TestTaskRetries(unittest.TestCase):
         self.assertEquals(result.get(), 0xFF)
         self.assertEquals(RetryTask.iterations, 4)
 
+    def test_retry_not_eager(self):
+        exc = Exception("baz")
+        try:
+            RetryTaskMockApply.retry(args=[4, 4], kwargs={},
+                                     exc=exc, throw=False)
+            self.assertTrue(RetryTaskMockApply.applied)
+        finally:
+            RetryTaskMockApply.applied = 0
+
+        try:
+            self.assertRaises(RetryTaskError, RetryTaskMockApply.retry,
+                    args=[4, 4], kwargs={}, exc=exc, throw=True)
+            self.assertTrue(RetryTaskMockApply.applied)
+        finally:
+            RetryTaskMockApply.applied = 0
+
     def test_retry_with_kwargs(self):
         RetryTaskCustomExc.max_retries = 3
         RetryTaskCustomExc.iterations = 0
@@ -117,12 +163,19 @@ class TestTaskRetries(unittest.TestCase):
         self.assertEquals(RetryTask.iterations, 2)
 
 
+class MockPublisher(object):
+
+    def __init__(self, *args, **kwargs):
+        self.kwargs = kwargs
+
+
 class TestCeleryTasks(unittest.TestCase):
 
     def createTaskCls(self, cls_name, task_name=None):
-        attrs = {}
+        attrs = {"__module__": self.__module__}
         if task_name:
             attrs["name"] = task_name
+
         cls = type(cls_name, (task.Task, ), attrs)
         cls.run = return_True
         return cls
@@ -166,7 +219,9 @@ class TestCeleryTasks(unittest.TestCase):
         self.assertEquals(task_data["task"], task_name)
         task_kwargs = task_data.get("kwargs", {})
         if test_eta:
-            self.assertTrue(isinstance(task_data.get("eta"), datetime))
+            self.assertTrue(isinstance(task_data.get("eta"), basestring))
+            to_datetime = parse_iso8601(task_data.get("eta"))
+            self.assertTrue(isinstance(to_datetime, datetime))
         for arg_name, arg_value in kwargs.items():
             self.assertEquals(task_kwargs.get(arg_name), arg_value)
 
@@ -190,7 +245,6 @@ class TestCeleryTasks(unittest.TestCase):
         T2 = self.createTaskCls("T2")
         self.assertEquals(T2().name, "celery.tests.test_task.T2")
 
-        registry.tasks.register(T1)
         t1 = T1()
         consumer = t1.get_consumer()
         self.assertRaises(NotImplementedError, consumer.receive, "foo", "foo")
@@ -202,7 +256,7 @@ class TestCeleryTasks(unittest.TestCase):
         self.assertNextTaskDataEquals(consumer, presult, t1.name)
 
         # With arguments.
-        presult2 = task.delay_task(t1.name, name="George Constanza")
+        presult2 = t1.apply_async(kwargs=dict(name="George Constanza"))
         self.assertNextTaskDataEquals(consumer, presult2, t1.name,
                 name="George Constanza")
 
@@ -218,25 +272,30 @@ class TestCeleryTasks(unittest.TestCase):
         self.assertNextTaskDataEquals(consumer, presult2, t1.name,
                 name="George Constanza", test_eta=True)
 
-        self.assertRaises(registry.tasks.NotRegistered, task.delay_task,
-                "some.task.that.should.never.exist.X.X.X.X.X")
-
         # Discarding all tasks.
         task.discard_all()
-        tid3 = task.delay_task(t1.name)
+        task.apply_async(t1)
         self.assertEquals(task.discard_all(), 1)
         self.assertTrue(consumer.fetch() is None)
 
-        self.assertFalse(task.is_done(presult.task_id))
-        self.assertFalse(presult.is_done())
+        self.assertFalse(presult.successful())
         default_backend.mark_as_done(presult.task_id, result=None)
-        self.assertTrue(task.is_done(presult.task_id))
-        self.assertTrue(presult.is_done())
-
+        self.assertTrue(presult.successful())
 
         publisher = t1.get_publisher()
         self.assertTrue(isinstance(publisher, messaging.TaskPublisher))
 
+    def test_get_publisher(self):
+        from celery.task import base
+        old_pub = base.TaskPublisher
+        base.TaskPublisher = MockPublisher
+        try:
+            p = IncrementCounterTask.get_publisher(exchange="foo",
+                                                   connection="bar")
+            self.assertEquals(p.kwargs["exchange"], "foo")
+        finally:
+            base.TaskPublisher = old_pub
+
     def test_get_logger(self):
         T1 = self.createTaskCls("T1", "c.unittest.t.t1")
         t1 = T1()
@@ -250,9 +309,9 @@ class TestTaskSet(unittest.TestCase):
     def test_function_taskset(self):
         from celery import conf
         conf.ALWAYS_EAGER = True
-        ts = task.TaskSet("cu.return-true", [
+        ts = task.TaskSet(return_True_task.name, [
             [[1], {}], [[2], {}], [[3], {}], [[4], {}], [[5], {}]])
-        res = ts.run()
+        res = ts.apply_async()
         self.assertEquals(res.join(), [True, True, True, True, True])
 
         conf.ALWAYS_EAGER = False
@@ -276,11 +335,11 @@ class TestTaskSet(unittest.TestCase):
 
         consumer = IncrementCounterTask().get_consumer()
         consumer.discard_all()
-        taskset_res = ts.run()
+        taskset_res = ts.apply_async()
         subtasks = taskset_res.subtasks
         taskset_id = taskset_res.taskset_id
         for subtask in subtasks:
-            m = consumer.decoder(consumer.fetch().body)
+            m = consumer.fetch().payload
             self.assertEquals(m.get("taskset"), taskset_id)
             self.assertEquals(m.get("task"), IncrementCounterTask.name)
             self.assertEquals(m.get("id"), subtask.task_id)
@@ -304,22 +363,52 @@ class TestTaskApply(unittest.TestCase):
         e = IncrementCounterTask.apply(kwargs={"increment_by": 4})
         self.assertEquals(e.get(), 6)
 
-        self.assertTrue(e.is_done())
-        self.assertTrue(e.is_ready())
+        self.assertTrue(e.successful())
+        self.assertTrue(e.ready())
         self.assertTrue(repr(e).startswith("<EagerResult:"))
 
         f = RaisingTask.apply()
-        self.assertTrue(f.is_ready())
-        self.assertFalse(f.is_done())
+        self.assertTrue(f.ready())
+        self.assertFalse(f.successful())
         self.assertTrue(f.traceback)
         self.assertRaises(KeyError, f.get)
 
 
-class TestPeriodicTask(unittest.TestCase):
+class MyPeriodic(task.PeriodicTask):
+    run_every = timedelta(hours=1)
 
-    def test_interface(self):
 
-        class MyPeriodicTask(task.PeriodicTask):
-            run_every = None
+class TestPeriodicTask(unittest.TestCase):
 
-        self.assertRaises(NotImplementedError, MyPeriodicTask)
+    def test_must_have_run_every(self):
+        self.assertRaises(NotImplementedError, type, "Foo",
+            (task.PeriodicTask, ), {"__module__": __name__})
+
+    def test_remaining_estimate(self):
+        self.assertTrue(isinstance(
+            MyPeriodic().remaining_estimate(datetime.now()),
+            timedelta))
+
+    def test_timedelta_seconds_returns_0_on_negative_time(self):
+        delta = timedelta(days=-2)
+        self.assertEquals(MyPeriodic().timedelta_seconds(delta), 0)
+
+    def test_timedelta_seconds(self):
+        deltamap = ((timedelta(seconds=1), 1),
+                    (timedelta(seconds=27), 27),
+                    (timedelta(minutes=3), 3 * 60),
+                    (timedelta(hours=4), 4 * 60 * 60),
+                    (timedelta(days=3), 3 * 86400))
+        for delta, seconds in deltamap:
+            self.assertEquals(MyPeriodic().timedelta_seconds(delta), seconds)
+
+    def test_is_due_not_due(self):
+        due, remaining = MyPeriodic().is_due(datetime.now())
+        self.assertFalse(due)
+        self.assertTrue(remaining > 60)
+
+    def test_is_due(self):
+        p = MyPeriodic()
+        due, remaining = p.is_due(datetime.now() - p.run_every)
+        self.assertTrue(due)
+        self.assertEquals(remaining, p.timedelta_seconds(p.run_every))

+ 4 - 2
celery/tests/test_task_builtins.py

@@ -1,7 +1,9 @@
 import unittest
-from celery.task.builtins import PingTask, DeleteExpiredTaskMetaTask
+
+from billiard.serialization import pickle
+
 from celery.task.base import ExecuteRemoteTask
-from celery.serialization import pickle
+from celery.task.builtins import PingTask, DeleteExpiredTaskMetaTask
 
 
 def some_func(i):

+ 63 - 0
celery/tests/test_task_control.py

@@ -0,0 +1,63 @@
+import unittest
+
+from celery.task import control
+from celery.task.builtins import PingTask
+from celery.utils import gen_unique_id
+
+
+class MockBroadcastPublisher(object):
+    sent = []
+
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def send(self, command, *args, **kwargs):
+        self.__class__.sent.append(command)
+
+    def close(self):
+        pass
+
+
+def with_mock_broadcast(fun):
+
+    def _mocked(*args, **kwargs):
+        old_pub = control.BroadcastPublisher
+        control.BroadcastPublisher = MockBroadcastPublisher
+        try:
+            return fun(*args, **kwargs)
+        finally:
+            MockBroadcastPublisher.sent = []
+            control.BroadcastPublisher = old_pub
+    return _mocked
+
+
+class TestBroadcast(unittest.TestCase):
+
+    @with_mock_broadcast
+    def test_broadcast(self):
+        control.broadcast("foobarbaz", arguments=[])
+        self.assertTrue("foobarbaz" in MockBroadcastPublisher.sent)
+
+    @with_mock_broadcast
+    def test_rate_limit(self):
+        control.rate_limit(PingTask.name, "100/m")
+        self.assertTrue("rate_limit" in MockBroadcastPublisher.sent)
+
+    @with_mock_broadcast
+    def test_revoke(self):
+        control.revoke("foozbaaz")
+        self.assertTrue("revoke" in MockBroadcastPublisher.sent)
+
+    @with_mock_broadcast
+    def test_revoke_from_result(self):
+        from celery.result import AsyncResult
+        AsyncResult("foozbazzbar").revoke()
+        self.assertTrue("revoke" in MockBroadcastPublisher.sent)
+
+    @with_mock_broadcast
+    def test_revoke_from_resultset(self):
+        from celery.result import TaskSetResult, AsyncResult
+        r = TaskSetResult(gen_unique_id(), map(AsyncResult, [gen_unique_id()
+                                                        for i in range(10)]))
+        r.revoke()
+        self.assertTrue("revoke" in MockBroadcastPublisher.sent)

+ 78 - 4
celery/tests/test_utils.py

@@ -1,6 +1,11 @@
 import sys
+import socket
 import unittest
-from celery.utils import chunks
+
+from billiard.utils.functional import wraps
+
+from celery import utils
+from celery.tests.utils import sleepdeprived
 
 
 class TestChunks(unittest.TestCase):
@@ -8,16 +13,85 @@ class TestChunks(unittest.TestCase):
     def test_chunks(self):
 
         # n == 2
-        x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2)
+        x = utils.chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2)
         self.assertEquals(list(x),
             [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]])
 
         # n == 3
-        x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3)
+        x = utils.chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3)
         self.assertEquals(list(x),
             [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]])
 
         # n == 2 (exact)
-        x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), 2)
+        x = utils.chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), 2)
         self.assertEquals(list(x),
             [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]])
+
+
+class TestGenUniqueId(unittest.TestCase):
+
+    def test_gen_unique_id_without_ctypes(self):
+        from celery.tests.utils import mask_modules
+        old_utils = sys.modules.pop("celery.utils")
+        try:
+            with mask_modules("ctypes"):
+                from celery.utils import ctypes, gen_unique_id
+                self.assertTrue(ctypes is None)
+                uuid = gen_unique_id()
+                self.assertTrue(uuid)
+                self.assertTrue(isinstance(uuid, basestring))
+        finally:
+            sys.modules["celery.utils"] = old_utils
+
+
+class TestDivUtils(unittest.TestCase):
+
+    def test_repeatlast(self):
+        items = range(6)
+        it = utils.repeatlast(items)
+        for i in items:
+            self.assertEquals(it.next(), i)
+        for j in items:
+            self.assertEquals(it.next(), i)
+
+
+class TestRetryOverTime(unittest.TestCase):
+
+    def test_returns_retval_on_success(self):
+
+        def _fun(x, y):
+            return x * y
+
+        ret = utils.retry_over_time(_fun, (socket.error, ), args=[16, 16],
+                                    max_retries=3)
+
+        self.assertEquals(ret, 256)
+
+    @sleepdeprived
+    def test_raises_on_unlisted_exception(self):
+
+        def _fun(x, y):
+            raise KeyError("bar")
+
+        self.assertRaises(KeyError, utils.retry_over_time, _fun,
+                         (socket.error, ), args=[32, 32], max_retries=3)
+
+    @sleepdeprived
+    def test_retries_on_failure(self):
+
+        iterations = [0]
+
+        def _fun(x, y):
+            iterations[0] += 1
+            if iterations[0] == 3:
+                return x * y
+            raise socket.error("foozbaz")
+
+        ret = utils.retry_over_time(_fun, (socket.error, ), args=[32, 32],
+                                    max_retries=None)
+
+        self.assertEquals(iterations[0], 3)
+        self.assertEquals(ret, 1024)
+
+        self.assertRaises(socket.error, utils.retry_over_time,
+                        _fun, (socket.error, ), args=[32, 32], max_retries=1)

+ 62 - 0
celery/tests/test_utils_info.py

@@ -0,0 +1,62 @@
+import unittest
+
+from celery.utils import info
+
+RANDTEXT = """\
+The quick brown
+fox jumps
+over the
+lazy dog\
+"""
+
+RANDTEXT_RES = """\
+    The quick brown
+    fox jumps
+    over the
+    lazy dog\
+"""
+
+ROUTE = {"queue1": {
+            "exchange": "exchange1",
+            "exchange_type": "type1",
+            "binding_key": "bind1"},
+         "queue2": {
+            "exchange": "exchange2",
+            "exchange_type": "type2",
+            "binding_key": "bind2"}}
+
+
+ROUTE_FORMAT = """
+. queue1 -> exchange:exchange1 (type1) binding:bind1
+. queue2 -> exchange:exchange2 (type2) binding:bind2
+""".strip()
+
+
+class TestInfo(unittest.TestCase):
+
+    def test_humanize_seconds(self):
+        t = ((4 * 60 * 60 * 24, "4 days"),
+             (1 * 60 * 60 * 24, "1 day"),
+             (4 * 60 * 60, "4 hours"),
+             (1 * 60 * 60, "1 hour"),
+             (4 * 60, "4 minutes"),
+             (1 * 60, "1 minute"),
+             (4, "4.00 seconds"),
+             (1, "1.00 second"),
+             (4.3567631221, "4.36 seconds"),
+             (0, "now"))
+
+        for seconds, human in t:
+            self.assertEquals(info.humanize_seconds(seconds), human)
+
+        self.assertEquals(info.humanize_seconds(4, prefix="about "),
+                          "about 4.00 seconds")
+
+    def test_textindent(self):
+        self.assertEquals(info.textindent(RANDTEXT, 4), RANDTEXT_RES)
+
+    def test_format_routing_table(self):
+        self.assertEquals(info.format_routing_table(ROUTE), ROUTE_FORMAT)
+
+    def test_broker_info(self):
+        info.format_broker_info()

+ 129 - 0
celery/tests/test_views.py

@@ -0,0 +1,129 @@
+import sys
+import unittest
+
+from django.http import HttpResponse
+from django.test.client import Client
+from django.test.testcases import TestCase as DjangoTestCase
+from django.core.urlresolvers import reverse
+from django.template import TemplateDoesNotExist
+
+from anyjson import deserialize as JSON_load
+from billiard.utils.functional import curry
+
+from celery import conf
+from celery.utils import gen_unique_id, get_full_cls_name
+from celery.backends import default_backend
+from celery.exceptions import RetryTaskError
+from celery.decorators import task
+from celery.datastructures import ExceptionInfo
+
+def reversestar(name, **kwargs):
+    return reverse(name, kwargs=kwargs)
+
+task_is_successful = curry(reversestar, "celery-is_task_successful")
+task_status = curry(reversestar, "celery-task_status")
+task_apply = curry(reverse, "celery-apply")
+
+scratch = {}
+@task()
+def mytask(x, y):
+    ret = scratch["result"] = int(x) * int(y)
+    return ret
+
+
+def create_exception(name, base=Exception):
+    return type(name, (base, ), {})
+
+
+def catch_exception(exception):
+    try:
+        raise exception
+    except exception.__class__, exc:
+        exc = default_backend.prepare_exception(exc)
+        return exc, ExceptionInfo(sys.exc_info()).traceback
+
+
+class ViewTestCase(DjangoTestCase):
+
+    def assertJSONEquals(self, json, py):
+        json = isinstance(json, HttpResponse) and json.content or json
+        try:
+            self.assertEquals(JSON_load(json), py)
+        except TypeError, exc:
+            raise TypeError("%s: %s" % (exc, json))
+
+
+class TestTaskApply(ViewTestCase):
+
+    def test_apply(self):
+        conf.ALWAYS_EAGER = True
+        try:
+            ret = self.client.get(task_apply(kwargs={"task_name": mytask.name,
+                                                     "args": "4/4"}))
+            self.assertEquals(scratch["result"], 16)
+        finally:
+            conf.ALWAYS_EAGER = False
+
+    def test_apply_raises_404_on_unregistered_task(self):
+        conf.ALWAYS_EAGER = True
+        try:
+            name = "xxx.does.not.exist"
+            action = curry(self.client.get, task_apply(kwargs={
+                                                        "task_name": name,
+                                                        "args": "4/4"}))
+            self.assertRaises(TemplateDoesNotExist, action)
+        finally:
+            conf.ALWAYS_EAGER = False
+
+
+class TestTaskStatus(ViewTestCase):
+
+    def assertStatusForIs(self, status, res, traceback=None):
+        uuid = gen_unique_id()
+        default_backend.store_result(uuid, res, status,
+                                     traceback=traceback)
+        json = self.client.get(task_status(task_id=uuid))
+        expect = dict(id=uuid, status=status, result=res)
+        if status in default_backend.EXCEPTION_STATES:
+            instore = default_backend.get_result(uuid)
+            self.assertEquals(str(instore.args), str(res.args))
+            expect["result"] = str(res.args[0])
+            expect["exc"] = get_full_cls_name(res.__class__)
+            expect["traceback"] = traceback
+
+        self.assertJSONEquals(json, dict(task=expect))
+
+    def test_task_status_success(self):
+        self.assertStatusForIs("SUCCESS", "The quick brown fox")
+
+    def test_task_status_failure(self):
+        exc, tb = catch_exception(KeyError("foo"))
+        self.assertStatusForIs("FAILURE", exc, tb)
+
+    def test_task_status_retry(self):
+        oexc, _ = catch_exception(KeyError("Resource not available"))
+        exc, tb = catch_exception(RetryTaskError(str(oexc), oexc))
+        self.assertStatusForIs("RETRY", exc, tb)
+
+
+class TestTaskIsSuccessful(ViewTestCase):
+
+    def assertStatusForIs(self, status, outcome):
+        uuid = gen_unique_id()
+        result = gen_unique_id()
+        default_backend.store_result(uuid, result, status)
+        json = self.client.get(task_is_successful(task_id=uuid))
+        self.assertJSONEquals(json, {"task": {"id": uuid,
+                                              "executed": outcome}})
+
+    def test_is_successful_success(self):
+        self.assertStatusForIs("SUCCESS", True)
+
+    def test_is_successful_pending(self):
+        self.assertStatusForIs("PENDING", False)
+
+    def test_is_successful_failure(self):
+        self.assertStatusForIs("FAILURE", False)
+
+    def test_is_successful_retry(self):
+        self.assertStatusForIs("RETRY", False)

+ 221 - 53
celery/tests/test_worker.py

@@ -1,20 +1,61 @@
+from __future__ import with_statement
+
 import unittest
 from Queue import Queue, Empty
-from carrot.connection import BrokerConnection
-from celery.messaging import TaskConsumer
-from celery.worker.job import TaskWrapper
-from celery.worker import AMQPListener, WorkController
+from datetime import datetime, timedelta
 from multiprocessing import get_logger
+
+from carrot.connection import BrokerConnection
 from carrot.backends.base import BaseMessage
-from celery import registry
-from celery.serialization import pickle
-from celery.utils import gen_unique_id
-from datetime import datetime, timedelta
+from billiard.serialization import pickle
+
+from celery import conf
+from celery.utils import gen_unique_id, noop
+from celery.worker import WorkController
+from celery.worker.listener import CarrotListener, RUN, CLOSE
+from celery.worker.job import TaskWrapper
+from celery.worker.scheduler import Scheduler
+from celery.decorators import task as task_dec
+from celery.decorators import periodic_task as periodic_task_dec
+
+
+class PlaceHolder(object):
+        pass
+
 
+class MockControlDispatch(object):
+    commands = []
 
+    def dispatch_from_message(self, message):
+        self.commands.append(message.pop("command", None))
+
+
+class MockEventDispatcher(object):
+    sent = []
+    closed = False
+
+    def send(self, event, *args, **kwargs):
+        self.sent.append(event)
+
+    def close(self):
+        self.closed = True
+
+
+class MockHeart(object):
+    closed = False
+
+    def stop(self):
+        self.closed = True
+
+
+@task_dec()
 def foo_task(x, y, z, **kwargs):
     return x * y * z
-registry.tasks.register(foo_task, name="c.u.foo")
+
+
+@periodic_task_dec(run_every=60)
+def foo_periodic_task():
+    return "foo"
 
 
 class MockLogger(object):
@@ -74,123 +115,250 @@ class MockController(object):
 
 
 def create_message(backend, **data):
-    data["id"] = gen_unique_id()
+    data.setdefault("id", gen_unique_id())
     return BaseMessage(backend, body=pickle.dumps(dict(**data)),
                        content_type="application/x-python-serialize",
                        content_encoding="binary")
 
 
-class TestAMQPListener(unittest.TestCase):
+class TestCarrotListener(unittest.TestCase):
 
     def setUp(self):
-        self.bucket_queue = Queue()
-        self.hold_queue = Queue()
+        self.ready_queue = Queue()
+        self.eta_schedule = Scheduler(self.ready_queue)
         self.logger = get_logger()
         self.logger.setLevel(0)
 
+    def test_mainloop(self):
+        l = CarrotListener(self.ready_queue, self.eta_schedule, self.logger,
+                           send_events=False)
+
+        class MockConnection(object):
+
+            def drain_events(self):
+                return "draining"
+
+        l.connection = PlaceHolder()
+        l.connection.connection = MockConnection()
+
+        it = l._mainloop()
+        self.assertTrue(it.next(), "draining")
+
+        records = {}
+        def create_recorder(key):
+            def _recorder(*args, **kwargs):
+                records[key] = True
+            return _recorder
+
+        l.task_consumer = PlaceHolder()
+        l.task_consumer.iterconsume = create_recorder("consume_tasks")
+        l.broadcast_consumer = PlaceHolder()
+        l.broadcast_consumer.register_callback = create_recorder(
+                                                    "broadcast_callback")
+        l.broadcast_consumer.iterconsume = create_recorder(
+                                             "consume_broadcast")
+        l.task_consumer.add_consumer = create_recorder("consumer_add")
+
+        records.clear()
+        self.assertEquals(l._detect_wait_method(), l._mainloop)
+        self.assertTrue(records.get("broadcast_callback"))
+        self.assertTrue(records.get("consume_broadcast"))
+        self.assertTrue(records.get("consume_tasks"))
+
+        records.clear()
+        l.connection.connection = PlaceHolder()
+        self.assertTrue(l._detect_wait_method() is l.task_consumer.iterconsume)
+        self.assertTrue(records.get("consumer_add"))
+
     def test_connection(self):
-        l = AMQPListener(self.bucket_queue, self.hold_queue, self.logger)
+        l = CarrotListener(self.ready_queue, self.eta_schedule, self.logger,
+                           send_events=False)
 
-        c = l.reset_connection()
-        self.assertTrue(isinstance(l.amqp_connection, BrokerConnection))
+        l.reset_connection()
+        self.assertTrue(isinstance(l.connection, BrokerConnection))
 
         l.close_connection()
-        self.assertTrue(l.amqp_connection is None)
+        self.assertTrue(l.connection is None)
         self.assertTrue(l.task_consumer is None)
 
-        c = l.reset_connection()
-        self.assertTrue(isinstance(l.amqp_connection, BrokerConnection))
+        l.reset_connection()
+        self.assertTrue(isinstance(l.connection, BrokerConnection))
 
         l.stop()
-        self.assertTrue(l.amqp_connection is None)
+        self.assertTrue(l.connection is None)
         self.assertTrue(l.task_consumer is None)
 
+    def test_receive_message_control_command(self):
+        l = CarrotListener(self.ready_queue, self.eta_schedule, self.logger,
+                           send_events=False)
+        backend = MockBackend()
+        m = create_message(backend, control={"command": "shutdown"})
+        l.event_dispatcher = MockEventDispatcher()
+        l.control_dispatch = MockControlDispatch()
+        l.receive_message(m.decode(), m)
+        self.assertTrue("shutdown" in l.control_dispatch.commands)
+
+    def test_close_connection(self):
+        l = CarrotListener(self.ready_queue, self.eta_schedule, self.logger,
+                           send_events=False)
+        l._state = RUN
+        l.close_connection()
+
+        l = CarrotListener(self.ready_queue, self.eta_schedule, self.logger,
+                           send_events=False)
+        eventer = l.event_dispatcher = MockEventDispatcher()
+        heart = l.heart = MockHeart()
+        l._state = RUN
+        l.close_connection()
+        self.assertTrue(eventer.closed)
+        self.assertTrue(heart.closed)
+
+    def test_receive_message_unknown(self):
+        l = CarrotListener(self.ready_queue, self.eta_schedule, self.logger,
+                           send_events=False)
+        backend = MockBackend()
+        m = create_message(backend, unknown={"baz": "!!!"})
+        l.event_dispatcher = MockEventDispatcher()
+        l.control_dispatch = MockControlDispatch()
+        import warnings
+        with warnings.catch_warnings(record=True) as log:
+                l.receive_message(m.decode(), m)
+                self.assertTrue(log)
+                self.assertTrue("unknown message" in log[0].message.args[0])
+
     def test_receieve_message(self):
-        l = AMQPListener(self.bucket_queue, self.hold_queue, self.logger)
+        l = CarrotListener(self.ready_queue, self.eta_schedule, self.logger,
+                           send_events=False)
         backend = MockBackend()
-        m = create_message(backend, task="c.u.foo", args=[2, 4, 8], kwargs={})
+        m = create_message(backend, task=foo_task.name,
+                           args=[2, 4, 8], kwargs={})
 
+        l.event_dispatcher = MockEventDispatcher()
         l.receive_message(m.decode(), m)
 
-        in_bucket = self.bucket_queue.get_nowait()
+        in_bucket = self.ready_queue.get_nowait()
         self.assertTrue(isinstance(in_bucket, TaskWrapper))
-        self.assertEquals(in_bucket.task_name, "c.u.foo")
+        self.assertEquals(in_bucket.task_name, foo_task.name)
         self.assertEquals(in_bucket.execute(), 2 * 4 * 8)
-        self.assertRaises(Empty, self.hold_queue.get_nowait)
+        self.assertTrue(self.eta_schedule.empty())
+
+    def test_receieve_message_eta_isoformat(self):
+        l = CarrotListener(self.ready_queue, self.eta_schedule, self.logger,
+                           send_events=False)
+        backend = MockBackend()
+        m = create_message(backend, task=foo_task.name,
+                           eta=datetime.now().isoformat(),
+                           args=[2, 4, 8], kwargs={})
+
+        l.event_dispatcher = MockEventDispatcher()
+        l.receive_message(m.decode(), m)
+
+        items = [entry[2] for entry in self.eta_schedule.queue]
+        found = 0
+        for item in items:
+            if item.task_name == foo_task.name:
+                found = True
+        self.assertTrue(found)
+
+    def test_revoke(self):
+        ready_queue = Queue()
+        l = CarrotListener(ready_queue, self.eta_schedule, self.logger,
+                           send_events=False)
+        backend = MockBackend()
+        id = gen_unique_id()
+        c = create_message(backend, control={"command": "revoke",
+                                             "task_id": id})
+        t = create_message(backend, task=foo_task.name, args=[2, 4, 8],
+                           kwargs={}, id=id)
+        l.event_dispatcher = MockEventDispatcher()
+        l.receive_message(c.decode(), c)
+        from celery.worker.revoke import revoked
+        self.assertTrue(id in revoked)
+
+        l.receive_message(t.decode(), t)
+        self.assertTrue(ready_queue.empty())
 
     def test_receieve_message_not_registered(self):
-        l = AMQPListener(self.bucket_queue, self.hold_queue, self.logger)
+        l = CarrotListener(self.ready_queue, self.eta_schedule, self.logger,
+                          send_events=False)
         backend = MockBackend()
         m = create_message(backend, task="x.X.31x", args=[2, 4, 8], kwargs={})
 
+        l.event_dispatcher = MockEventDispatcher()
         self.assertFalse(l.receive_message(m.decode(), m))
-        self.assertRaises(Empty, self.bucket_queue.get_nowait)
-        self.assertRaises(Empty, self.hold_queue.get_nowait)
+        self.assertRaises(Empty, self.ready_queue.get_nowait)
+        self.assertTrue(self.eta_schedule.empty())
 
     def test_receieve_message_eta(self):
-        l = AMQPListener(self.bucket_queue, self.hold_queue, self.logger)
+        l = CarrotListener(self.ready_queue, self.eta_schedule, self.logger,
+                          send_events=False)
         backend = MockBackend()
-        m = create_message(backend, task="c.u.foo", args=[2, 4, 8], kwargs={},
-                           eta=datetime.now() + timedelta(days=1))
-
+        m = create_message(backend, task=foo_task.name,
+                           args=[2, 4, 8], kwargs={},
+                           eta=(datetime.now() +
+                               timedelta(days=1)).isoformat())
+
+        l.reset_connection()
+        p, conf.BROKER_CONNECTION_RETRY = conf.BROKER_CONNECTION_RETRY, False
+        try:
+            l.reset_connection()
+        finally:
+            conf.BROKER_CONNECTION_RETRY = p
         l.receive_message(m.decode(), m)
 
-        in_hold = self.hold_queue.get_nowait()
-        self.assertEquals(len(in_hold), 3)
-        task, eta, on_accept = in_hold
+        in_hold = self.eta_schedule.queue[0]
+        self.assertEquals(len(in_hold), 4)
+        eta, priority, task, on_accept = in_hold
         self.assertTrue(isinstance(task, TaskWrapper))
-        self.assertTrue(isinstance(eta, datetime))
         self.assertTrue(callable(on_accept))
-        self.assertEquals(task.task_name, "c.u.foo")
+        self.assertEquals(task.task_name, foo_task.name)
         self.assertEquals(task.execute(), 2 * 4 * 8)
-        self.assertRaises(Empty, self.bucket_queue.get_nowait)
+        self.assertRaises(Empty, self.ready_queue.get_nowait)
 
 
 class TestWorkController(unittest.TestCase):
 
     def setUp(self):
-        self.worker = WorkController(concurrency=1, loglevel=0,
-                                     is_detached=False)
+        self.worker = WorkController(concurrency=1, loglevel=0)
         self.worker.logger = MockLogger()
 
     def test_attrs(self):
         worker = self.worker
-        self.assertTrue(isinstance(worker.bucket_queue, Queue))
-        self.assertTrue(isinstance(worker.hold_queue, Queue))
-        self.assertTrue(worker.periodic_work_controller)
+        self.assertTrue(isinstance(worker.eta_schedule, Scheduler))
+        self.assertTrue(worker.scheduler)
         self.assertTrue(worker.pool)
-        self.assertTrue(worker.amqp_listener)
+        self.assertTrue(worker.listener)
         self.assertTrue(worker.mediator)
         self.assertTrue(worker.components)
 
-    def test_safe_process_task(self):
+    def test_process_task(self):
         worker = self.worker
         worker.pool = MockPool()
         backend = MockBackend()
-        m = create_message(backend, task="c.u.foo", args=[4, 8, 10],
+        m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
                            kwargs={})
         task = TaskWrapper.from_message(m, m.decode())
-        worker.safe_process_task(task)
+        worker.process_task(task)
         worker.pool.stop()
 
-    def test_safe_process_task_raise_base(self):
+    def test_process_task_raise_base(self):
         worker = self.worker
         worker.pool = MockPool(raise_base=True)
         backend = MockBackend()
-        m = create_message(backend, task="c.u.foo", args=[4, 8, 10],
+        m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
                            kwargs={})
         task = TaskWrapper.from_message(m, m.decode())
-        worker.safe_process_task(task)
+        worker.process_task(task)
         worker.pool.stop()
 
-    def test_safe_process_task_raise_regular(self):
+    def test_process_task_raise_regular(self):
         worker = self.worker
         worker.pool = MockPool(raise_regular=True)
         backend = MockBackend()
-        m = create_message(backend, task="c.u.foo", args=[4, 8, 10],
+        m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
                            kwargs={})
         task = TaskWrapper.from_message(m, m.decode())
-        worker.safe_process_task(task)
+        worker.process_task(task)
         worker.pool.stop()
 
     def test_start_stop(self):

+ 58 - 0
celery/tests/test_worker_control.py

@@ -0,0 +1,58 @@
+import socket
+import unittest
+
+from celery.task.builtins import PingTask
+from celery.utils import gen_unique_id
+from celery.worker import control
+from celery.worker.revoke import revoked
+from celery.registry import tasks
+
+hostname = socket.gethostname()
+
+class TestControlPanel(unittest.TestCase):
+
+    def setUp(self):
+        self.panel = control.ControlDispatch(hostname=hostname)
+
+    def test_shutdown(self):
+        self.assertRaises(SystemExit, self.panel.execute, "shutdown")
+
+    def test_dump_tasks(self):
+        self.panel.execute("dump_tasks")
+
+    def test_rate_limit(self):
+        task = tasks[PingTask.name]
+        old_rate_limit = task.rate_limit
+        try:
+            self.panel.execute("rate_limit", kwargs=dict(
+                                                task_name=task.name,
+                                                rate_limit="100/m"))
+            self.assertEquals(task.rate_limit, "100/m")
+            self.panel.execute("rate_limit", kwargs=dict(
+                                                task_name=task.name,
+                                                rate_limit=0))
+            self.assertEquals(task.rate_limit, 0)
+        finally:
+            task.rate_limit = old_rate_limit
+
+    def test_rate_limit_nonexistant_task(self):
+        self.panel.execute("rate_limit", kwargs={
+                                "task_name": "xxxx.does.not.exist",
+                                "rate_limit": "1000/s"})
+
+    def test_unexposed_command(self):
+        self.panel.execute("foo", kwargs={})
+
+    def test_revoke(self):
+        uuid = gen_unique_id()
+        m = {"command": "revoke",
+             "destination": hostname,
+             "task_id": uuid}
+        self.panel.dispatch_from_message(m)
+        self.assertTrue(uuid in revoked)
+
+        m = {"command": "revoke",
+             "destination": "does.not.exist",
+             "task_id": uuid + "xxx"}
+        self.panel.dispatch_from_message(m)
+        self.assertTrue(uuid + "xxx" not in revoked)

+ 47 - 45
celery/tests/test_worker_controllers.py

@@ -1,25 +1,27 @@
-import unittest
 import time
-import multiprocessing
-from Queue import Queue, Empty
-from datetime import datetime, timedelta
+import unittest
+from Queue import Queue
 
-from celery.worker.controllers import Mediator, PeriodicWorkController
-from celery.worker.controllers import BackgroundThread
+from celery.utils import gen_unique_id
+from celery.worker.controllers import Mediator
+from celery.worker.controllers import BackgroundThread, ScheduleController
 
 
 class MockTask(object):
     task_id = 1234
     task_name = "mocktask"
+    acked = False
 
     def __init__(self, value, **kwargs):
         self.value = value
 
+    def on_ack(self):
+        self.acked = True
+
 
 class MyBackgroundThread(BackgroundThread):
 
     def on_iteration(self):
-        import time
         time.sleep(1)
 
 
@@ -48,8 +50,8 @@ class TestBackgroundThread(unittest.TestCase):
 class TestMediator(unittest.TestCase):
 
     def test_mediator_start__stop(self):
-        bucket_queue = Queue()
-        m = Mediator(bucket_queue, lambda t: t)
+        ready_queue = Queue()
+        m = Mediator(ready_queue, lambda t: t)
         m.start()
         self.assertFalse(m._shutdown.isSet())
         self.assertFalse(m._stopped.isSet())
@@ -59,57 +61,57 @@ class TestMediator(unittest.TestCase):
         self.assertTrue(m._stopped.isSet())
 
     def test_mediator_on_iteration(self):
-        bucket_queue = Queue()
+        ready_queue = Queue()
         got = {}
 
         def mycallback(value):
             got["value"] = value.value
 
-        m = Mediator(bucket_queue, mycallback)
-        bucket_queue.put(MockTask("George Constanza"))
+        m = Mediator(ready_queue, mycallback)
+        ready_queue.put(MockTask("George Constanza"))
 
         m.on_iteration()
 
         self.assertEquals(got["value"], "George Constanza")
 
+    def test_mediator_on_iteration_revoked(self):
+        ready_queue = Queue()
+        got = {}
 
-class TestPeriodicWorkController(unittest.TestCase):
-
-    def test_process_hold_queue(self):
-        bucket_queue = Queue()
-        hold_queue = Queue()
-        m = PeriodicWorkController(bucket_queue, hold_queue)
-        m.process_hold_queue()
+        def mycallback(value):
+            got["value"] = value.value
 
-        scratchpad = {}
+        m = Mediator(ready_queue, mycallback)
+        t = MockTask("Jerry Seinfeld")
+        t.task_id = gen_unique_id()
+        from celery.worker.revoke import revoked
+        revoked.add(t.task_id)
+        ready_queue.put(t)
 
-        def on_accept():
-            scratchpad["accepted"] = True
+        m.on_iteration()
 
-        hold_queue.put((MockTask("task1"),
-                        datetime.now() - timedelta(days=1),
-                        on_accept))
+        self.assertTrue("value" not in got)
+        self.assertTrue(t.acked)
 
-        m.process_hold_queue()
-        self.assertRaises(Empty, hold_queue.get_nowait)
-        self.assertTrue(scratchpad.get("accepted"))
-        self.assertEquals(bucket_queue.get_nowait().value, "task1")
-        tomorrow = datetime.now() + timedelta(days=1)
-        hold_queue.put((MockTask("task2"), tomorrow, on_accept))
-        m.process_hold_queue()
-        self.assertRaises(Empty, bucket_queue.get_nowait)
-        value, eta, on_accept = hold_queue.get_nowait()
-        self.assertEquals(value.value, "task2")
-        self.assertEquals(eta, tomorrow)
 
-    def test_run_periodic_tasks(self):
-        bucket_queue = Queue()
-        hold_queue = Queue()
-        m = PeriodicWorkController(bucket_queue, hold_queue)
-        m.run_periodic_tasks()
+class TestScheduleController(unittest.TestCase):
 
     def test_on_iteration(self):
-        bucket_queue = Queue()
-        hold_queue = Queue()
-        m = PeriodicWorkController(bucket_queue, hold_queue)
-        m.on_iteration()
+        times = range(10) + [None]
+        c = ScheduleController(times)
+
+        import time
+        slept = [None]
+
+        def _sleep(count):
+            slept[0] = count
+
+        old_sleep = time.sleep
+        time.sleep = _sleep
+        try:
+            for i in times:
+                c.on_iteration()
+                res = i is None and 1 or i
+                self.assertEquals(slept[0], res)
+        finally:
+            time.sleep = old_sleep

+ 51 - 0
celery/tests/test_worker_heartbeat.py

@@ -0,0 +1,51 @@
+import unittest
+
+from celery.worker.heartbeat import Heart
+
+
+class MockDispatcher(object):
+
+    def __init__(self):
+        self.sent = []
+
+    def send(self, msg):
+        self.sent.append(msg)
+
+
+class MockDispatcherRaising(object):
+
+    def send(self, msg):
+        if msg == "worker-offline":
+            raise Exception("foo")
+
+
+class TestHeart(unittest.TestCase):
+
+    def test_run(self):
+        eventer = MockDispatcher()
+
+        heart = Heart(eventer, interval=1)
+        heart._shutdown.set()
+        heart.run()
+        self.assertTrue(heart._state == "RUN")
+        self.assertTrue("worker-online" in eventer.sent)
+        self.assertTrue("worker-heartbeat" in eventer.sent)
+        self.assertTrue("worker-offline" in eventer.sent)
+
+        self.assertTrue(heart._stopped.isSet())
+
+        heart.stop()
+        heart.stop()
+        self.assertTrue(heart._state == "CLOSE")
+
+        heart = Heart(eventer, interval=0.00001)
+        heart._shutdown.set()
+        for i in range(10):
+            heart.run()
+
+    def test_run_stopped_is_set_even_if_send_breaks(self):
+        eventer = MockDispatcherRaising()
+        heart = Heart(eventer, interval=1)
+        heart._shutdown.set()
+        self.assertRaises(Exception, heart.run)
+        self.assertTrue(heart._stopped.isSet())

+ 188 - 56
celery/tests/test_worker_job.py

@@ -1,70 +1,103 @@
 # -*- coding: utf-8 -*-
+from __future__ import with_statement
+
 import sys
+import logging
 import unittest
-from celery.execute import ExecuteWrapper
-from celery.worker.job import TaskWrapper
-from celery.datastructures import ExceptionInfo
-from celery.models import TaskMeta
-from celery.registry import tasks, NotRegistered
-from celery.pool import TaskPool
-from celery.utils import gen_unique_id
-from carrot.backends.base import BaseMessage
+import simplejson
 from StringIO import StringIO
-from celery.log import setup_logger
+
 from django.core import cache
-import simplejson
-import logging
+from carrot.backends.base import BaseMessage
+
+from celery.log import setup_logger
+from celery.task.base import Task
+from celery.utils import gen_unique_id
+from celery.models import TaskMeta
+from celery.result import AsyncResult
+from celery.worker.job import WorkerTaskTrace, TaskWrapper
+from celery.worker.pool import TaskPool
+from celery.exceptions import RetryTaskError, NotRegistered
+from celery.decorators import task as task_dec
+from celery.datastructures import ExceptionInfo
 
 scratch = {"ACK": False}
 some_kwargs_scratchpad = {}
 
 
-def jail(task_id, task_name, fun, args, kwargs):
-    return ExecuteWrapper(fun, task_id, task_name, args, kwargs)()
+def jail(task_id, task_name, args, kwargs):
+    return WorkerTaskTrace(task_name, task_id, args, kwargs)()
 
 
 def on_ack():
     scratch["ACK"] = True
 
 
+@task_dec()
 def mytask(i, **kwargs):
     return i ** i
-tasks.register(mytask, name="cu.mytask")
 
 
+@task_dec()
 def mytask_no_kwargs(i):
     return i ** i
-tasks.register(mytask_no_kwargs, name="mytask_no_kwargs")
 
 
+class MyTaskIgnoreResult(Task):
+    ignore_result = True
+
+    def run(self, i):
+        return i ** i
+
+
+@task_dec()
 def mytask_some_kwargs(i, logfile):
     some_kwargs_scratchpad["logfile"] = logfile
     return i ** i
-tasks.register(mytask_some_kwargs, name="mytask_some_kwargs")
 
 
+@task_dec()
 def mytask_raising(i, **kwargs):
     raise KeyError(i)
-tasks.register(mytask_raising, name="cu.mytask-raising")
 
 
+@task_dec()
 def get_db_connection(i, **kwargs):
     from django.db import connection
     return id(connection)
 get_db_connection.ignore_result = True
 
 
+class TestRetryTaskError(unittest.TestCase):
+
+    def test_retry_task_error(self):
+        try:
+            raise Exception("foo")
+        except Exception, exc:
+            ret = RetryTaskError("Retrying task", exc)
+
+        self.assertEquals(ret.exc, exc)
+
+
 class TestJail(unittest.TestCase):
 
     def test_execute_jail_success(self):
-        ret = jail(gen_unique_id(), gen_unique_id(), mytask, [2], {})
+        ret = jail(gen_unique_id(), mytask.name, [2], {})
         self.assertEquals(ret, 4)
 
     def test_execute_jail_failure(self):
-        ret = jail(gen_unique_id(), gen_unique_id(), mytask_raising, [4], {})
+        ret = jail(gen_unique_id(), mytask_raising.name,
+                   [4], {})
         self.assertTrue(isinstance(ret, ExceptionInfo))
         self.assertEquals(ret.exception.args, (4, ))
 
+    def test_execute_ignore_result(self):
+        task_id = gen_unique_id()
+        ret = jail(id, MyTaskIgnoreResult.name,
+                   [4], {})
+        self.assertTrue(ret, 8)
+        self.assertFalse(AsyncResult(task_id).ready())
+
     def test_django_db_connection_is_closed(self):
         from django.db import connection
         connection._was_closed = False
@@ -75,12 +108,11 @@ class TestJail(unittest.TestCase):
             return old_connection_close(*args, **kwargs)
 
         connection.close = monkeypatched_connection_close
-
-        ret = jail(gen_unique_id(), gen_unique_id(),
-                   get_db_connection, [2], {})
-        self.assertTrue(connection._was_closed)
-
-        connection.close = old_connection_close
+        try:
+            jail(gen_unique_id(), get_db_connection.name, [2], {})
+            self.assertTrue(connection._was_closed)
+        finally:
+            connection.close = old_connection_close
 
     def test_django_cache_connection_is_closed(self):
         old_cache_close = getattr(cache.cache, "close", None)
@@ -96,7 +128,7 @@ class TestJail(unittest.TestCase):
 
         cache.cache.close = monkeypatched_cache_close
 
-        jail(gen_unique_id(), gen_unique_id(), mytask, [4], {})
+        jail(gen_unique_id(), mytask.name, [4], {})
         self.assertTrue(cache._was_closed)
         cache.cache.close = old_cache_close
         cache.settings.CACHE_BACKEND = old_backend
@@ -116,7 +148,7 @@ class TestJail(unittest.TestCase):
 
         cache.cache.close = monkeypatched_cache_close
 
-        jail(gen_unique_id(), gen_unique_id(), mytask, [4], {})
+        jail(gen_unique_id(), mytask.name, [4], {})
         self.assertTrue(cache._was_closed)
         cache.cache.close = old_cache_close
         cache.settings.CACHE_BACKEND = old_backend
@@ -126,21 +158,125 @@ class TestJail(unittest.TestCase):
             del(cache.parse_backend_uri)
 
 
-class TestTaskWrapper(unittest.TestCase):
+class MockEventDispatcher(object):
+
+    def __init__(self):
+        self.sent = []
 
-    def test_task_wrapper_attrs(self):
-        tw = TaskWrapper(gen_unique_id(), gen_unique_id(),
-                         mytask, [1], {"f": "x"})
-        for attr in ("task_name", "task_id", "args", "kwargs", "logger"):
-            self.assertTrue(getattr(tw, attr, None))
+    def send(self, event):
+        self.sent.append(event)
+
+
+class TestTaskWrapper(unittest.TestCase):
 
     def test_task_wrapper_repr(self):
-        tw = TaskWrapper(gen_unique_id(), gen_unique_id(),
-                         mytask, [1], {"f": "x"})
+        tw = TaskWrapper(mytask.name, gen_unique_id(), [1], {"f": "x"})
         self.assertTrue(repr(tw))
 
+    def test_send_event(self):
+        tw = TaskWrapper(mytask.name, gen_unique_id(), [1], {"f": "x"})
+        tw.eventer = MockEventDispatcher()
+        tw.send_event("task-frobulated")
+        self.assertTrue("task-frobulated" in tw.eventer.sent)
+
+    def test_send_email(self):
+        from celery import conf
+        from celery.worker import job
+        old_mail_admins = job.mail_admins
+        old_enable_mails = conf.CELERY_SEND_TASK_ERROR_EMAILS
+        mail_sent = [False]
+
+        def mock_mail_admins(*args, **kwargs):
+            mail_sent[0] = True
+
+        job.mail_admins = mock_mail_admins
+        conf.CELERY_SEND_TASK_ERROR_EMAILS = True
+        try:
+            tw = TaskWrapper(mytask.name, gen_unique_id(), [1], {"f": "x"})
+            try:
+                raise KeyError("foo")
+            except KeyError, exc:
+                einfo = ExceptionInfo(sys.exc_info())
+
+            tw.on_failure(einfo)
+            self.assertTrue(mail_sent[0])
+
+            mail_sent[0] = False
+            conf.CELERY_SEND_TASK_ERROR_EMAILS = False
+            tw.on_failure(einfo)
+            self.assertFalse(mail_sent[0])
+
+        finally:
+            job.mail_admins = old_mail_admins
+            conf.CELERY_SEND_TASK_ERROR_EMAILS = old_enable_mails
+
+    def test_execute_and_trace(self):
+        from celery.worker.job import execute_and_trace
+        res = execute_and_trace(mytask.name, gen_unique_id(), [4], {})
+        self.assertEquals(res, 4 ** 4)
+
+    def test_execute_safe_catches_exception(self):
+        from celery.worker.job import execute_and_trace, WorkerTaskTrace
+        old_exec = WorkerTaskTrace.execute
+
+        def _error_exec(self, *args, **kwargs):
+            raise KeyError("baz")
+
+        WorkerTaskTrace.execute = _error_exec
+        try:
+            import warnings
+            with warnings.catch_warnings(record=True) as log:
+                res = execute_and_trace(mytask.name, gen_unique_id(),
+                                        [4], {})
+                self.assertTrue(isinstance(res, ExceptionInfo))
+                self.assertTrue(log)
+                self.assertTrue("Exception outside" in log[0].message.args[0])
+                self.assertTrue("KeyError" in log[0].message.args[0])
+        finally:
+            WorkerTaskTrace.execute = old_exec
+
+    def create_exception(self, exc):
+        try:
+            raise exc
+        except exc.__class__, thrown:
+            return sys.exc_info()
+
+    def test_worker_task_trace_handle_retry(self):
+        from celery.exceptions import RetryTaskError
+        uuid = gen_unique_id()
+        w = WorkerTaskTrace(mytask.name, uuid, [4], {})
+        type_, value_, tb_ = self.create_exception(ValueError("foo"))
+        type_, value_, tb_ = self.create_exception(RetryTaskError(str(value_),
+                                                                  exc=value_))
+        w._store_errors = False
+        w.handle_retry(value_, type_, tb_, "")
+        self.assertEquals(mytask.backend.get_status(uuid), "PENDING")
+        w._store_errors = True
+        w.handle_retry(value_, type_, tb_, "")
+        self.assertEquals(mytask.backend.get_status(uuid), "RETRY")
+
+    def test_worker_task_trace_handle_failure(self):
+        from celery.worker.job import WorkerTaskTrace
+        uuid = gen_unique_id()
+        w = WorkerTaskTrace(mytask.name, uuid, [4], {})
+        type_, value_, tb_ = self.create_exception(ValueError("foo"))
+        w._store_errors = False
+        w.handle_failure(value_, type_, tb_, "")
+        self.assertEquals(mytask.backend.get_status(uuid), "PENDING")
+        w._store_errors = True
+        w.handle_failure(value_, type_, tb_, "")
+        self.assertEquals(mytask.backend.get_status(uuid), "FAILURE")
+
+    def test_executed_bit(self):
+        from celery.worker.job import AlreadyExecutedError
+        tw = TaskWrapper(mytask.name, gen_unique_id(), [], {})
+        self.assertFalse(tw.executed)
+        tw._set_executed_bit()
+        self.assertTrue(tw.executed)
+        self.assertRaises(AlreadyExecutedError, tw._set_executed_bit)
+
     def test_task_wrapper_mail_attrs(self):
-        tw = TaskWrapper(gen_unique_id(), gen_unique_id(), mytask, [], {})
+        tw = TaskWrapper(mytask.name, gen_unique_id(), [], {})
         x = tw.success_msg % {"name": tw.task_name,
                               "id": tw.task_id,
                               "return_value": 10}
@@ -157,7 +293,7 @@ class TestTaskWrapper(unittest.TestCase):
         self.assertTrue(x)
 
     def test_from_message(self):
-        body = {"task": "cu.mytask", "id": gen_unique_id(),
+        body = {"task": mytask.name, "id": gen_unique_id(),
                 "args": [2], "kwargs": {u"æØåveéðƒeæ": "bar"}}
         m = BaseMessage(body=simplejson.dumps(body), backend="foo",
                         content_type="application/json",
@@ -170,7 +306,6 @@ class TestTaskWrapper(unittest.TestCase):
         self.assertEquals(tw.kwargs.keys()[0],
                           u"æØåveéðƒeæ".encode("utf-8"))
         self.assertFalse(isinstance(tw.kwargs.keys()[0], unicode))
-        self.assertEquals(id(mytask), id(tw.task_func))
         self.assertTrue(tw.logger)
 
     def test_from_message_nonexistant_task(self):
@@ -184,45 +319,42 @@ class TestTaskWrapper(unittest.TestCase):
 
     def test_execute(self):
         tid = gen_unique_id()
-        tw = TaskWrapper("cu.mytask", tid, mytask, [4], {"f": "x"})
+        tw = TaskWrapper(mytask.name, tid, [4], {"f": "x"})
         self.assertEquals(tw.execute(), 256)
         meta = TaskMeta.objects.get(task_id=tid)
         self.assertEquals(meta.result, 256)
-        self.assertEquals(meta.status, "DONE")
+        self.assertEquals(meta.status, "SUCCESS")
 
     def test_execute_success_no_kwargs(self):
         tid = gen_unique_id()
-        tw = TaskWrapper("cu.mytask_no_kwargs", tid, mytask_no_kwargs,
-                         [4], {})
+        tw = TaskWrapper(mytask_no_kwargs.name, tid, [4], {})
         self.assertEquals(tw.execute(), 256)
         meta = TaskMeta.objects.get(task_id=tid)
         self.assertEquals(meta.result, 256)
-        self.assertEquals(meta.status, "DONE")
+        self.assertEquals(meta.status, "SUCCESS")
 
     def test_execute_success_some_kwargs(self):
         tid = gen_unique_id()
-        tw = TaskWrapper("cu.mytask_some_kwargs", tid, mytask_some_kwargs,
-                         [4], {})
+        tw = TaskWrapper(mytask_some_kwargs.name, tid, [4], {})
         self.assertEquals(tw.execute(logfile="foobaz.log"), 256)
         meta = TaskMeta.objects.get(task_id=tid)
         self.assertEquals(some_kwargs_scratchpad.get("logfile"), "foobaz.log")
         self.assertEquals(meta.result, 256)
-        self.assertEquals(meta.status, "DONE")
+        self.assertEquals(meta.status, "SUCCESS")
 
     def test_execute_ack(self):
         tid = gen_unique_id()
-        tw = TaskWrapper("cu.mytask", tid, mytask, [4], {"f": "x"},
+        tw = TaskWrapper(mytask.name, tid, [4], {"f": "x"},
                         on_ack=on_ack)
         self.assertEquals(tw.execute(), 256)
         meta = TaskMeta.objects.get(task_id=tid)
         self.assertTrue(scratch["ACK"])
         self.assertEquals(meta.result, 256)
-        self.assertEquals(meta.status, "DONE")
+        self.assertEquals(meta.status, "SUCCESS")
 
     def test_execute_fail(self):
         tid = gen_unique_id()
-        tw = TaskWrapper("cu.mytask-raising", tid, mytask_raising, [4],
-                         {"f": "x"})
+        tw = TaskWrapper(mytask_raising.name, tid, [4], {"f": "x"})
         self.assertTrue(isinstance(tw.execute(), ExceptionInfo))
         meta = TaskMeta.objects.get(task_id=tid)
         self.assertEquals(meta.status, "FAILURE")
@@ -230,7 +362,7 @@ class TestTaskWrapper(unittest.TestCase):
 
     def test_execute_using_pool(self):
         tid = gen_unique_id()
-        tw = TaskWrapper("cu.mytask", tid, mytask, [4], {"f": "x"})
+        tw = TaskWrapper(mytask.name, tid, [4], {"f": "x"})
         p = TaskPool(2)
         p.start()
         asyncres = tw.execute_using_pool(p)
@@ -239,7 +371,7 @@ class TestTaskWrapper(unittest.TestCase):
 
     def test_default_kwargs(self):
         tid = gen_unique_id()
-        tw = TaskWrapper("cu.mytask", tid, mytask, [4], {"f": "x"})
+        tw = TaskWrapper(mytask.name, tid, [4], {"f": "x"})
         self.assertEquals(tw.extend_with_default_kwargs(10, "some_logfile"), {
             "f": "x",
             "logfile": "some_logfile",
@@ -250,7 +382,7 @@ class TestTaskWrapper(unittest.TestCase):
 
     def test_on_failure(self):
         tid = gen_unique_id()
-        tw = TaskWrapper("cu.mytask", tid, mytask, [4], {"f": "x"})
+        tw = TaskWrapper(mytask.name, tid, [4], {"f": "x"})
         try:
             raise Exception("Inside unit tests")
         except Exception:
@@ -261,12 +393,12 @@ class TestTaskWrapper(unittest.TestCase):
         tw.logger = setup_logger(logfile=logfh, loglevel=logging.INFO)
 
         from celery import conf
-        conf.SEND_CELERY_TASK_ERROR_EMAILS = True
+        conf.CELERY_SEND_TASK_ERROR_EMAILS = True
 
         tw.on_failure(exc_info)
         logvalue = logfh.getvalue()
-        self.assertTrue("cu.mytask" in logvalue)
+        self.assertTrue(mytask.name in logvalue)
         self.assertTrue(tid in logvalue)
         self.assertTrue("ERROR" in logvalue)
 
-        conf.SEND_CELERY_TASK_ERROR_EMAILS = False
+        conf.CELERY_SEND_TASK_ERROR_EMAILS = False

+ 12 - 0
celery/tests/test_worker_revoke.py

@@ -0,0 +1,12 @@
+import unittest
+
+from celery.worker import revoke
+
+
+class TestRevokeRegistry(unittest.TestCase):
+
+    def test_is_working(self):
+        revoke.revoked.add("foo")
+        self.assertTrue("foo" in revoke.revoked)
+        revoke.revoked.pop_value("foo")
+        self.assertTrue("foo" not in revoke.revoked)

+ 47 - 0
celery/tests/test_worker_scheduler.py

@@ -0,0 +1,47 @@
+import unittest
+from Queue import Queue, Empty
+from datetime import datetime, timedelta
+
+from celery.worker.scheduler import Scheduler
+
+
+class TestScheduler(unittest.TestCase):
+
+    def test_sched_and_run_now(self):
+        ready_queue = Queue()
+        sched = Scheduler(ready_queue)
+        now = datetime.now()
+
+        callback_called = [False]
+        def callback():
+            callback_called[0] = True
+
+        sched.enter("foo", eta=now, callback=callback)
+
+        remaining = iter(sched).next()
+        self.assertEquals(remaining, 0)
+        self.assertTrue(callback_called[0])
+        self.assertEquals(ready_queue.get_nowait(), "foo")
+
+    def test_sched_run_later(self):
+        ready_queue = Queue()
+        sched = Scheduler(ready_queue)
+        now = datetime.now()
+
+        callback_called = [False]
+        def callback():
+            callback_called[0] = True
+
+        eta = now + timedelta(seconds=10)
+        sched.enter("foo", eta=eta, callback=callback)
+
+        remaining = iter(sched).next()
+        self.assertTrue(remaining > 7)
+        self.assertFalse(callback_called[0])
+        self.assertRaises(Empty, ready_queue.get_nowait)
+
+    def test_empty_queue_yields_None(self):
+        ready_queue = Queue()
+        sched = Scheduler(ready_queue)
+
+        self.assertTrue(iter(sched).next() is None)

+ 98 - 2
celery/tests/utils.py

@@ -1,11 +1,107 @@
 from __future__ import with_statement
-from contextlib import contextmanager
-from StringIO import StringIO
+
 import os
 import sys
 import __builtin__
+from StringIO import StringIO
+from functools import wraps
+from contextlib import contextmanager
+
+from celery.utils import noop
+
+
+def with_environ(env_name, env_value):
+
+    def _envpatched(fun):
+
+        @wraps(fun)
+        def _patch_environ(*args, **kwargs):
+            prev_val = os.environ.get(env_name)
+            os.environ[env_name] = env_value
+            try:
+                return fun(*args, **kwargs)
+            finally:
+                if prev_val is not None:
+                    os.environ[env_name] = prev_val
+
+        return _patch_environ
+    return _envpatched
+
+
+def sleepdeprived(fun):
+
+    @wraps(fun)
+    def _sleepdeprived(*args, **kwargs):
+        import time
+        old_sleep = time.sleep
+        time.sleep = noop
+        try:
+            return fun(*args, **kwargs)
+        finally:
+            time.sleep = old_sleep
+
+    return _sleepdeprived
+
+
+def skip_if_environ(env_var_name):
+
+    def _wrap_test(fun):
+
+        @wraps(fun)
+        def _skips_if_environ(*args, **kwargs):
+            if os.environ.get(env_var_name):
+                sys.stderr.write("SKIP %s: %s set\n" % (
+                    fun.__name__, env_var_name))
+                return
+            return fun(*args, **kwargs)
+
+        return _skips_if_environ
+
+    return _wrap_test
+
+
+def skip_if_quick(fun):
+    return skip_if_environ("QUICKTEST")(fun)
+
+
+def _skip_test(reason, sign):
+
+    def _wrap_test(fun):
+
+        @wraps(fun)
+        def _skipped_test(*args, **kwargs):
+            sys.stderr.write("%s: %s " % (sign, reason))
+
+        return _skipped_test
+    return _wrap_test
+
+
+def todo(reason):
+    """TODO test decorator."""
+    return _skip_test(reason, "TODO")
+
+
+def skip(reason):
+    """Skip test decorator."""
+    return _skip_test(reason, "SKIP")
+
+
+def skip_if(predicate, reason):
+    """Skip test if predicate is ``True``."""
+
+    def _inner(fun):
+        return predicate and skip(reason)(fun) or fun
+
+    return _inner
+
+
+def skip_unless(predicate, reason):
+    """Skip test if predicate is ``False``."""
+    return skip_if(not predicate, reason)
 
 
+# Taken from
+# http://bitbucket.org/runeh/snippets/src/tip/missing_modules.py
 @contextmanager
 def mask_modules(*modnames):
     """Ban some modules from being importable inside the context

+ 3 - 2
celery/urls.py

@@ -4,12 +4,13 @@ URLs defined for celery.
 
 """
 from django.conf.urls.defaults import patterns, url
+
 from celery import views
 
 
 urlpatterns = patterns("",
-    url(r'^(?P<task_id>[\w\d\-]+)/done/?$', views.is_task_done,
-        name="celery-is_task_done"),
+    url(r'^(?P<task_id>[\w\d\-]+)/done/?$', views.is_task_successful,
+        name="celery-is_task_successful"),
     url(r'^(?P<task_id>[\w\d\-]+)/status/?$', views.task_status,
         name="celery-task_status"),
 )

Some files were not shown because too many files changed in this diff