Просмотр исходного кода

Merge branch 'master' into dbschedule

Conflicts:
	celery/conf.py
Ask Solem 15 лет назад
Родитель
Сommit
1ab0847e35
100 измененных файлов с 5033 добавлено и 1773 удалено
  1. 1 0
      .gitignore
  2. 9 0
      AUTHORS
  3. 427 32
      Changelog
  4. 105 180
      FAQ
  5. 1 1
      MANIFEST.in
  6. 10 6
      README.rst
  7. 0 4
      bin/celerybeat
  8. 0 2
      bin/celeryd
  9. 0 4
      bin/celeryev
  10. 1 12
      celery/__init__.py
  11. 2 0
      celery/backends/__init__.py
  12. 65 40
      celery/backends/amqp.py
  13. 5 4
      celery/backends/base.py
  14. 77 0
      celery/backends/cache.py
  15. 174 0
      celery/backends/cassandra.py
  16. 31 35
      celery/backends/database.py
  17. 5 1
      celery/backends/mongodb.py
  18. 5 4
      celery/bin/celerybeat.py
  19. 34 20
      celery/bin/celeryd.py
  20. 28 491
      celery/bin/celeryev.py
  21. 21 6
      celery/concurrency/processes/__init__.py
  22. 45 9
      celery/concurrency/processes/pool.py
  23. 26 10
      celery/conf.py
  24. 61 0
      celery/datastructures.py
  25. 47 0
      celery/db/dfd042c7.py
  26. 25 32
      celery/db/models.py
  27. 3 1
      celery/decorators.py
  28. 11 4
      celery/events/__init__.py
  29. 415 0
      celery/events/cursesmon.py
  30. 68 0
      celery/events/dumper.py
  31. 99 0
      celery/events/snapshot.py
  32. 107 27
      celery/events/state.py
  33. 16 5
      celery/exceptions.py
  34. 27 14
      celery/execute/__init__.py
  35. 7 1
      celery/loaders/base.py
  36. 28 5
      celery/loaders/default.py
  37. 67 42
      celery/log.py
  38. 0 0
      celery/management/__init__.py
  39. 0 0
      celery/management/commands/__init__.py
  40. 16 0
      celery/management/commands/celeryd.py
  41. 26 27
      celery/messaging.py
  42. 11 3
      celery/models.py
  43. 3 2
      celery/platform.py
  44. 2 2
      celery/registry.py
  45. 16 16
      celery/result.py
  46. 63 43
      celery/routes.py
  47. 1 1
      celery/serialization.py
  48. 3 0
      celery/signals.py
  49. 2 2
      celery/task/__init__.py
  50. 40 13
      celery/task/base.py
  51. 2 3
      celery/task/builtins.py
  52. 51 0
      celery/task/control.py
  53. 22 21
      celery/task/http.py
  54. 67 17
      celery/task/sets.py
  55. 12 0
      celery/tests/__init__.py
  56. 1 7
      celery/tests/config.py
  57. 0 0
      celery/tests/functional/__init__.py
  58. 165 0
      celery/tests/functional/case.py
  59. 23 0
      celery/tests/functional/tasks.py
  60. 0 2
      celery/tests/test_backends/disabled_amqp.py
  61. 93 4
      celery/tests/test_backends/test_base.py
  62. 129 0
      celery/tests/test_backends/test_cache.py
  63. 133 0
      celery/tests/test_backends/test_database.py
  64. 0 3
      celery/tests/test_backends/test_redis.py
  65. 0 3
      celery/tests/test_backends/test_tyrant.py
  66. 0 0
      celery/tests/test_bin/__init__.py
  67. 111 0
      celery/tests/test_bin/test_celerybeat.py
  68. 310 0
      celery/tests/test_bin/test_celeryd.py
  69. 0 13
      celery/tests/test_bin_celeryd.py
  70. 86 12
      celery/tests/test_buckets.py
  71. 0 5
      celery/tests/test_celery.py
  72. 93 0
      celery/tests/test_concurrency_processes.py
  73. 18 7
      celery/tests/test_datastructures.py
  74. 110 20
      celery/tests/test_log.py
  75. 1 0
      celery/tests/test_result.py
  76. 55 59
      celery/tests/test_routes.py
  77. 20 17
      celery/tests/test_task.py
  78. 32 1
      celery/tests/test_task_control.py
  79. 1 4
      celery/tests/test_task_http.py
  80. 175 0
      celery/tests/test_task_sets.py
  81. 120 4
      celery/tests/test_utils.py
  82. 4 4
      celery/tests/test_utils_info.py
  83. 179 27
      celery/tests/test_worker.py
  84. 150 5
      celery/tests/test_worker_control.py
  85. 7 59
      celery/tests/test_worker_controllers.py
  86. 135 26
      celery/tests/test_worker_job.py
  87. 5 5
      celery/tests/test_worker_revoke.py
  88. 0 58
      celery/tests/test_worker_scheduler.py
  89. 126 4
      celery/utils/__init__.py
  90. 92 31
      celery/utils/compat.py
  91. 6 8
      celery/utils/info.py
  92. 8 15
      celery/utils/timeutils.py
  93. 43 24
      celery/worker/__init__.py
  94. 10 46
      celery/worker/buckets.py
  95. 13 4
      celery/worker/control/__init__.py
  96. 54 17
      celery/worker/control/builtins.py
  97. 0 7
      celery/worker/control/registry.py
  98. 18 75
      celery/worker/controllers.py
  99. 2 0
      celery/worker/heartbeat.py
  100. 115 55
      celery/worker/job.py

+ 1 - 0
.gitignore

@@ -14,3 +14,4 @@ pip-log.txt
 erl_crash.dump
 erl_crash.dump
 *.db
 *.db
 Documentation/
 Documentation/
+.tox/

+ 9 - 0
AUTHORS

@@ -29,3 +29,12 @@ Ordered by date of first contribution:
   Patrick Altman <paltman@gmail.com>
   Patrick Altman <paltman@gmail.com>
   Vincent Driessen <vincent@datafox.nl>
   Vincent Driessen <vincent@datafox.nl>
   Hari <haridara@gmail.com>
   Hari <haridara@gmail.com>
+  Bartosz Ptaszynski
+  Marcin Lulek <info@webreactor.eu>
+  Honza Kral <honza.kral@gmail.com>
+  Jonas Haag <jonas@lophus.org>
+  Armin Ronacher <armin.ronacher@active-4.com>
+  Gunnlaugur Thor Briem <gunnlaugur@gmail.com>
+  Mikhail Gusarov <dottedmag@dottedmag.net>
+  Frédéric Junod <frederic.junod@camptocamp.com>
+  Lukas Linhart <lukas.linhart@centrumholdings.com>

+ 427 - 32
Changelog

@@ -5,14 +5,253 @@
 .. contents::
 .. contents::
     :local:
     :local:
 
 
-1.2.0
+2.0.2
 =====
 =====
-:release-date: NOT RELEASED
-:branch: master
-:state: beta
+:release-date: 2010-07-22 11:31 A.M CEST
 
 
-Celery 1.2 contains backward incompatible changes, the most important
-being that the Django dependency has been removed, so Celery no longer
+* Routes: When using the dict route syntax, the exchange for a task
+  could dissapear making the task unroutable.
+
+    See http://github.com/ask/celery/issues/issue/158
+
+* Test suite now passing on Python 2.4
+
+* No longer have to type PYTHONPATH=. to use celeryconfig in current dir.
+
+    This is accomplished by the default loader ensuring that the current
+    directory is in ``sys.path`` when loading the config module.
+    ``sys.path`` is reset to its original state after loading.
+
+    Adding cwd to ``sys.path`` without the user knowing may be a security
+    issue, as this means someone can drop a Python module in the users
+    directory that executes arbitrary commands. This was the original reason
+    not to do this, but if done *only when loading the config module*, this
+    means that the behvavior will only apply to the modules imported in the
+    config module, which I think is a good compromise (certainly better than
+    just explictly setting PYTHONPATH=. anyway)
+
+* Experimental Cassandra backend added.
+
+* celeryd: SIGHUP handler accidentally propagated to worker pool processes.
+
+    In combination with 7a7c44e39344789f11b5346e9cc8340f5fe4846c
+    this would make each child process start a new celeryd when
+    the terminal window was closed :/
+
+* celeryd: Do not install SIGHUP handler if running from a terminal.
+
+    This fixes the problem where celeryd is launched in the background
+    when closing the terminal.
+
+* celeryd: Now joins threads at shutdown.
+
+    See http://github.com/ask/celery/issues/issue/152
+
+* Test teardown: Don't use atexit but nose's ``teardown()`` functionality
+  instead.
+
+    See http://github.com/ask/celery/issues/issue/154
+
+* Debian init script for celeryd: Stop now works correctly.
+
+* Task logger:  ``warn`` method added (synonym for ``warning``)
+
+* Can now define a whitelist of errors to send error e-mails for.
+
+    Example::
+
+        CELERY_TASK_ERROR_WHITELIST = ('myapp.MalformedInputError')
+
+    See http://github.com/ask/celery/issues/issue/153
+
+* celeryd: Now handles overflow exceptions in ``time.mktime`` while parsing
+  the ETA field.
+
+* LoggerWrapper: Try to detect loggers logging back to stderr/stdout making
+  an infinite loop.
+
+* Added :class:`celery.task.control.inspect`: Inspects a running worker.
+
+    Examples::
+
+        # Inspect a single worker
+        >>> i = inspect("myworker.example.com")
+
+        # Inspect several workers
+        >>> i = inspect(["myworker.example.com", "myworker2.example.com"])
+
+        # Inspect all workers consuming on this vhost.
+        >>> i = inspect()
+
+        ### Methods
+
+        # Get currently executing tasks
+        >>> i.active()
+
+        # Get currently reserved tasks
+        >>> i.reserved()
+
+        # Get the current eta schedule
+        >>> i.scheduled()
+
+        # Worker statistics and info
+        >>> i.stats()
+
+        # List of currently revoked tasks
+        >>> i.revoked()
+
+        # List of registered tasks
+        >>> i.registered_tasks()
+
+*  Remote control commands ``dump_active``/``dump_reserved``/``dump_schedule``
+   now replies with detailed task requests.
+
+    Containing the original arguments and fields of the task requested.
+
+    In addition the remote control command ``set_loglevel`` has been added,
+    this only changes the loglevel for the main process.
+
+* Worker control command execution now catches errors and returns their
+  string representation in the reply.
+
+* Functional test suite added
+
+    :mod:`celery.tests.functional.case` contains utilities to start
+    and stop an embedded celeryd process, for use in functional testing.
+
+2.0.1
+=====
+:release-date: 2010-07-09 03:02 P.M CEST
+
+* multiprocessing.pool: Now handles encoding errors, so that pickling errors
+  doesn't crash the worker processes.
+
+* The remote control command replies was not working with RabbitMQ 1.8.0's
+  stricter equivalence checks.
+
+    If you've already hit this problem you may have to delete the
+    declaration::
+
+        $ camqadm exchange.delete celerycrq
+
+    or::
+
+        $ python manage.py camqadm exchange.delete celerycrq
+
+* A bug sneaked in the ETA scheduler that made it only able to execute
+  one task per second(!)
+
+    The scheduler sleeps between iterations so it doesn't consume too much CPU.
+    It keeps a list of the scheduled items sorted by time, at each iteration
+    it sleeps for the remaining time of the item with the nearest deadline.
+    If there are no eta tasks it will sleep for a minimum amount of time, one
+    second by default.
+
+    A bug sneaked in here, making it sleep for one second for every task
+    that was scheduled. This has been fixed, so now it should move
+    tasks like hot knife through butter.
+
+    In addition a new setting has been added to control the minimum sleep
+    interval; ``CELERYD_ETA_SCHEDULER_PRECISION``. A good
+    value for this would be a float between 0 and 1, depending
+    on the needed precision. A value of 0.8 means that when the ETA of a task
+    is met, it will take at most 0.8 seconds for the task to be moved to the
+    ready queue.
+
+* Pool: Supervisor did not release the semaphore.
+
+    This would lead to a deadlock if all workers terminated prematurely.
+
+* Added Python version trove classifiers: 2.4, 2.5, 2.6 and 2.7
+
+* Tests now passing on Python 2.7.
+
+* Task.__reduce__: Tasks created using the task decorator can now be pickled.
+
+* setup.py: nose added to ``tests_require``.
+
+* Pickle should now work with SQLAlchemy 0.5.x
+
+* New homepage design by Jan Henrik Helmers: http://celeryproject.org
+
+* New Sphinx theme by Armin Ronacher: http://celeryproject.org/docs
+
+* Fixed "pending_xref" errors shown in the HTML rendering of the
+  documentation. Apparently this was caused by new changes in Sphinx 1.0b2.
+
+* Router classes in ``CELERY_ROUTES`` are now imported lazily.
+
+    Importing a router class in a module that also loads the Celery
+    environment would cause a circular dependency. This is solved
+    by importing it when needed after the environment is set up.
+
+* ``CELERY_ROUTES`` was broken if set to a single dict.
+
+    This example in the docs should now work again::
+
+        CELERY_ROUTES = {"feed.tasks.import_feed": "feeds"}
+
+* ``CREATE_MISSING_QUEUES`` was not honored by apply_async.
+
+* New remote control command: ``stats``
+
+    Dumps information about the worker, like pool process pids, and
+    total number of tasks executed by type.
+
+    Example reply::
+
+        [{'worker.local':
+             'total': {'tasks.sleeptask': 6},
+             'pool': {'timeouts': [None, None],
+                      'processes': [60376, 60377],
+                      'max-concurrency': 2,
+                      'max-tasks-per-child': None,
+                      'put-guarded-by-semaphore': True}}]
+
+* New remote control command: ``dump_active``
+
+    Gives a list of tasks currently being executed by the worker.
+    By default arguments are passed through repr in case there
+    are arguments that is not JSON encodable. If you know
+    the arguments are JSON safe, you can pass the argument ``safe=True``.
+
+    Example reply::
+
+        >>> broadcast("dump_active", arguments={"safe": False}, reply=True)
+        [{'worker.local': [
+            {'args': '(1,)',
+             'time_start': 1278580542.6300001,
+             'name': 'tasks.sleeptask',
+             'delivery_info': {
+                 'consumer_tag': '30',
+                 'routing_key': 'celery',
+                 'exchange': 'celery'},
+             'hostname': 'casper.local',
+             'acknowledged': True,
+             'kwargs': '{}',
+             'id': '802e93e9-e470-47ed-b913-06de8510aca2',
+            }
+        ]}]
+
+* Added experimental support for persistent revokes.
+
+    Use the ``-S|--statedb`` argument to celeryd to enable it::
+
+        $ celeryd --statedb=/var/run/celeryd
+
+    This will use the file: ``/var/run/celeryd.db``,
+    as the ``shelve`` module automatically adds the ``.db`` suffix.
+
+
+2.0.0
+=====
+:release-date: 2010-07-02 02:30 P.M CEST
+
+Foreword
+--------
+
+Celery 2.0 contains backward incompatible changes, the most important
+being that the Django dependency has been removed so Celery no longer
 supports Django out of the box, but instead as an add-on package
 supports Django out of the box, but instead as an add-on package
 called `django-celery`_.
 called `django-celery`_.
 
 
@@ -39,6 +278,11 @@ Django integration has been moved to a separate package: `django-celery`_.
 
 
     INSTALLED_APPS = "djcelery"
     INSTALLED_APPS = "djcelery"
 
 
+* If you use ``mod_wsgi`` you need to add the following line to your ``.wsgi``
+  file::
+
+    import os
+    os.environ["CELERY_LOADER"] = "django"
 
 
 * The following modules has been moved to `django-celery`_:
 * The following modules has been moved to `django-celery`_:
 
 
@@ -48,29 +292,31 @@ Django integration has been moved to a separate package: `django-celery`_.
     ``celery.models``                      ``djcelery.models``
     ``celery.models``                      ``djcelery.models``
     ``celery.managers``                    ``djcelery.managers``
     ``celery.managers``                    ``djcelery.managers``
     ``celery.views``                       ``djcelery.views``
     ``celery.views``                       ``djcelery.views``
-    ``celery.urls``                        ``djcelery.url``
+    ``celery.urls``                        ``djcelery.urls``
     ``celery.management``                  ``djcelery.management``
     ``celery.management``                  ``djcelery.management``
     ``celery.loaders.djangoapp``           ``djcelery.loaders``
     ``celery.loaders.djangoapp``           ``djcelery.loaders``
     ``celery.backends.database``           ``djcelery.backends.database``
     ``celery.backends.database``           ``djcelery.backends.database``
     ``celery.backends.cache``              ``djcelery.backends.cache``
     ``celery.backends.cache``              ``djcelery.backends.cache``
     =====================================  =====================================
     =====================================  =====================================
 
 
-Importing :mod:`djcelery` will automatically setup celery to use the Django
-loader by setting the :envvar:`CELERY_LOADER`` environment variable (it won't
-change it if it's already defined).
+Importing :mod:`djcelery` will automatically setup Celery to use Django loader.
+loader.  It does this by setting the :envvar:`CELERY_LOADER` environment variable to
+``"django"`` (it won't change it if a loader is already set.)
 
 
-When the Django loader is used, the "database" and "cache" backend aliases
-will point to the :mod:`djcelery` backends instead of the built-in backends.
+When the Django loader is used, the "database" and "cache" result backend
+aliases will point to the :mod:`djcelery` backends instead of the built-in backends,
+and configuration will be read from the Django settings.
 
 
 .. _`django-celery`: http://pypi.python.org/pypi/django-celery
 .. _`django-celery`: http://pypi.python.org/pypi/django-celery
 
 
-
 Upgrading for others
 Upgrading for others
 --------------------
 --------------------
 
 
-The database backend is now using `SQLAlchemy`_ instead of the Django ORM,
-see `Supported Databases`_ for a table of supported databases.
+Database result backend
+~~~~~~~~~~~~~~~~~~~~~~~
 
 
+The database result backend is now using `SQLAlchemy`_ instead of the
+Django ORM, see `Supported Databases`_ for a table of supported databases.
 
 
 The ``DATABASE_*`` settings has been replaced by a single setting:
 The ``DATABASE_*`` settings has been replaced by a single setting:
 ``CELERY_RESULT_DBURI``. The value here should be an
 ``CELERY_RESULT_DBURI``. The value here should be an
@@ -108,6 +354,25 @@ the ``CELERY_RESULT_ENGINE_OPTIONS`` setting::
 .. _`SQLAlchemy Connection Strings`:
 .. _`SQLAlchemy Connection Strings`:
     http://www.sqlalchemy.org/docs/dbengine.html#create-engine-url-arguments
     http://www.sqlalchemy.org/docs/dbengine.html#create-engine-url-arguments
 
 
+Cache result backend
+~~~~~~~~~~~~~~~~~~~~
+
+The cache result backend is no longer using the Django cache framework,
+but it supports mostly the same configuration syntax::
+
+    CELERY_CACHE_BACKEND = "memcached://A.example.com:11211;B.example.com"
+
+To use the cache backend you must either have the `pylibmc`_ or
+`python-memcached`_ library installed, of which the former is regarded
+as the best choice.
+
+.. _`pylibmc`: http://pypi.python.org/pypi/pylibmc
+.. _`python-memcached`: http://pypi.python.org/pypi/python-memcached
+
+The support backend types are ``memcached://`` and ``memory://``,
+we haven't felt the need to support any of the other backends
+provided by Django.
+
 Backward incompatible changes
 Backward incompatible changes
 -----------------------------
 -----------------------------
 
 
@@ -121,8 +386,8 @@ Backward incompatible changes
     Also this makes it possible to use the client side of celery without being
     Also this makes it possible to use the client side of celery without being
     configured::
     configured::
 
 
-        >>> from carrot.connection import Connection
-        >>> conn = Connection("localhost", "guest", "guest", "/")
+        >>> from carrot.connection import BrokerConnection
+        >>> conn = BrokerConnection("localhost", "guest", "guest", "/")
         >>> from celery.execute import send_task
         >>> from celery.execute import send_task
         >>> r = send_task("celery.ping", args=(), kwargs={}, connection=conn)
         >>> r = send_task("celery.ping", args=(), kwargs={}, connection=conn)
         >>> from celery.backends.amqp import AMQPBackend
         >>> from celery.backends.amqp import AMQPBackend
@@ -137,9 +402,8 @@ Backward incompatible changes
     **Setting name**                       **Replace with**
     **Setting name**                       **Replace with**
     =====================================  =====================================
     =====================================  =====================================
     ``CELERY_AMQP_CONSUMER_QUEUES``        ``CELERY_QUEUES``
     ``CELERY_AMQP_CONSUMER_QUEUES``        ``CELERY_QUEUES``
-    ``CELERY_AMQP_CONSUMER_QUEUES``        ``CELERY_QUEUES``
     ``CELERY_AMQP_EXCHANGE``               ``CELERY_DEFAULT_EXCHANGE``
     ``CELERY_AMQP_EXCHANGE``               ``CELERY_DEFAULT_EXCHANGE``
-    ``CELERY_AMQP_EXCHANGE_TYPE``          ``CELERY_DEFAULT_AMQP_EXCHANGE_TYPE``
+    ``CELERY_AMQP_EXCHANGE_TYPE``          ``CELERY_DEFAULT_EXCHANGE_TYPE``
     ``CELERY_AMQP_CONSUMER_ROUTING_KEY``   ``CELERY_QUEUES``
     ``CELERY_AMQP_CONSUMER_ROUTING_KEY``   ``CELERY_QUEUES``
     ``CELERY_AMQP_PUBLISHER_ROUTING_KEY``  ``CELERY_DEFAULT_ROUTING_KEY``
     ``CELERY_AMQP_PUBLISHER_ROUTING_KEY``  ``CELERY_DEFAULT_ROUTING_KEY``
     =====================================  =====================================
     =====================================  =====================================
@@ -162,6 +426,37 @@ Backward incompatible changes
 
 
         CELERY_LOADER = "myapp.loaders.Loader"
         CELERY_LOADER = "myapp.loaders.Loader"
 
 
+* ``CELERY_TASK_RESULT_EXPIRES`` now defaults to 1 day.
+
+    Previous default setting was to expire in 5 days.
+
+*  AMQP backend: Don't use different values for `auto_delete`.
+
+    This bug became visible with RabbitMQ 1.8.0, which no longer
+    allows conflicting declarations for the auto_delete and durable settings.
+
+    If you've already used celery with this backend chances are you
+    have to delete the previous declaration::
+
+        $ camqadm exchange.delete celeryresults
+
+* Now uses pickle instead of cPickle on Python versions <= 2.5
+
+    cPikle is broken in Python <= 2.5.
+
+    It unsafely and incorrectly uses relative instead of absolute imports,
+    so e.g::
+
+          exceptions.KeyError
+
+    becomes::
+
+          celery.exceptions.KeyError
+
+    Your best choice is to upgrade to Python 2.6,
+    as while the pure pickle version has worse performance,
+    it is the only safe option for older Python versions.
+
 .. _120news:
 .. _120news:
 
 
 News
 News
@@ -195,6 +490,17 @@ News
         The fields here are, in order: *sender hostname*, *timestamp*, *event type* and
         The fields here are, in order: *sender hostname*, *timestamp*, *event type* and
         *additional event fields*.
         *additional event fields*.
 
 
+* AMQP result backend: Now supports ``.ready()``, ``.successful()``,
+  ``.result``, ``.status``, and even responds to changes in task state
+
+* New user guides:
+
+    * :doc:`userguide/workers`
+    * :doc:`userguide/tasksets`
+    * :doc:`userguide/routing`
+
+* celeryd: Standard out/error is now being redirected to the logfile.
+
 * :mod:`billiard` has been moved back to the celery repository.
 * :mod:`billiard` has been moved back to the celery repository.
 
 
     =====================================  =====================================
     =====================================  =====================================
@@ -211,6 +517,11 @@ News
 
 
 * now depends on :mod:`pyparsing`
 * now depends on :mod:`pyparsing`
 
 
+* celeryd: Added ``--purge`` as an alias to ``--discard``.
+
+* celeryd: Ctrl+C (SIGINT) once does warm shutdown, hitting Ctrl+C twice
+  forces termination.
+
 * Added support for using complex crontab-expressions in periodic tasks. For
 * Added support for using complex crontab-expressions in periodic tasks. For
   example, you can now use::
   example, you can now use::
 
 
@@ -226,15 +537,55 @@ News
   tasks to the pool.
   tasks to the pool.
 
 
     This means it doesn't have to wait for dozens of tasks to finish at shutdown
     This means it doesn't have to wait for dozens of tasks to finish at shutdown
-    because it has already applied n prefetched tasks without any pool
-    processes to immediately accept them.
+    because it has applied prefetched tasks without having any pool
+    processes available to immediately accept them.
 
 
-    Some overhead for very short tasks though, then the shutdown probably doesn't
-    matter either so can disable with::
+    See http://github.com/ask/celery/issues/closed#issue/122
 
 
-        CELERYD_POOL_PUTLOCKS = False
+* New built-in way to do task callbacks using
+  :class:`~celery.task.sets.subtask`.
 
 
-    See http://github.com/ask/celery/issues/closed#issue/122
+  See :doc:`userguide/tasksets` for more information.
+
+* TaskSets can now contain several types of tasks.
+
+  :class:`~celery.task.sets.TaskSet` has been refactored to use
+  a new syntax, please see :doc:`userguide/tasksets` for more information.
+
+  The previous syntax is still supported, but will be deprecated in
+  version 1.4.
+
+* TaskSet failed() result was incorrect.
+
+    See http://github.com/ask/celery/issues/closed#issue/132
+
+* Now creates different loggers per task class.
+
+    See http://github.com/ask/celery/issues/closed#issue/129
+
+* Missing queue definitions are now created automatically.
+
+    You can disable this using the CELERY_CREATE_MISSING_QUEUES setting.
+
+    The missing queues are created with the following options::
+
+        CELERY_QUEUES[name] = {"exchange": name,
+                               "exchange_type": "direct",
+                               "routing_key": "name}
+
+   This feature is added for easily setting up routing using the ``-Q``
+   option to ``celeryd``::
+
+       $ celeryd -Q video, image
+
+   See the new routing section of the userguide for more information:
+   :doc:`userguide/routing`.
+
+* New Task option: ``Task.queue``
+
+    If set, message options will be taken from the corresponding entry
+    in ``CELERY_QUEUES``. ``exchange``, ``exchange_type`` and ``routing_key``
+    will be ignored
 
 
 * Added support for task soft and hard timelimits.
 * Added support for task soft and hard timelimits.
 
 
@@ -306,7 +657,7 @@ News
 
 
         class Router(object):
         class Router(object):
 
 
-            def route_for_task(self, task, task_id=None, args=None, kwargs=None):
+            def route_for_task(self, task, args=None, kwargs=None):
                 if task == "celery.ping":
                 if task == "celery.ping":
                     return "default"
                     return "default"
 
 
@@ -384,6 +735,30 @@ News
         >>> broadcast("enable_events")
         >>> broadcast("enable_events")
         >>> broadcast("disable_events")
         >>> broadcast("disable_events")
 
 
+* Removed top-level tests directory. Test config now in celery.tests.config
+
+    This means running the unittests doesn't require any special setup.
+    ``celery/tests/__init__`` now configures the ``CELERY_CONFIG_MODULE`` and
+    ``CELERY_LOADER``, so when ``nosetests`` imports that, the unit test
+    environment is all set up.
+
+    Before you run the tests you need to install the test requirements::
+
+        $ pip install -r contrib/requirements/test.txt
+
+    Running all tests::
+
+        $ nosetests
+
+    Specifying the tests to run::
+
+        $ nosetests celery.tests.test_task
+
+    Producing HTML coverage::
+
+        $ nosetests --with-coverage3
+
+    The coverage output is then located in ``celery/tests/cover/index.html``.
 
 
 * celeryd: New option ``--version``: Dump version info and exit.
 * celeryd: New option ``--version``: Dump version info and exit.
 
 
@@ -447,10 +822,31 @@ News
         celeryd-multi -n baz.myhost -c 10
         celeryd-multi -n baz.myhost -c 10
         celeryd-multi -n xuzzy.myhost -c 3
         celeryd-multi -n xuzzy.myhost -c 3
 
 
+* The worker now calls the result backends ``process_cleanup`` method
+  *after* task execution instead of before.
+
+* AMQP result backend now supports Pika.
+
+1.0.6
+=====
+:release-date: 2010-06-30 09:57 A.M CEST
+
+* RabbitMQ 1.8.0 has extended their exchange equivalence tests to
+  include ``auto_delete`` and ``durable``. This broke the AMQP backend.
+
+  If you've already used the AMQP backend this means you have to
+  delete the previous definitions::
+
+      $ camqadm exchange.delete celeryresults
+
+  or::
+
+      $ python manage.py camqadm exchange.delete celeryresults
+
+
 1.0.5
 1.0.5
 =====
 =====
 :release-date: 2010-06-01 02:36 P.M CEST
 :release-date: 2010-06-01 02:36 P.M CEST
-:md5: c93f7522c2ce98a32e1cc1a970a7dba1
 
 
 Critical
 Critical
 --------
 --------
@@ -863,7 +1259,7 @@ Fixes
 
 
     Please note that a patch to :mod:`multiprocessing` is currently being
     Please note that a patch to :mod:`multiprocessing` is currently being
     worked on, this patch would enable us to use a better solution, and is
     worked on, this patch would enable us to use a better solution, and is
-    scheduled for inclusion in the ``1.2.0`` release.
+    scheduled for inclusion in the ``2.0.0`` release.
 
 
 * celeryd now shutdowns cleanly when receving the ``TERM`` signal.
 * celeryd now shutdowns cleanly when receving the ``TERM`` signal.
 
 
@@ -1141,7 +1537,7 @@ Deprecations
 ------------
 ------------
 
 
 * The following configuration variables has been renamed and will be
 * The following configuration variables has been renamed and will be
-  deprecated in v1.2:
+  deprecated in v2.0:
 
 
     * CELERYD_DAEMON_LOG_FORMAT -> CELERYD_LOG_FORMAT
     * CELERYD_DAEMON_LOG_FORMAT -> CELERYD_LOG_FORMAT
     * CELERYD_DAEMON_LOG_LEVEL -> CELERYD_LOG_LEVEL
     * CELERYD_DAEMON_LOG_LEVEL -> CELERYD_LOG_LEVEL
@@ -1159,12 +1555,12 @@ Deprecations
     the custom AMQP routing options (queue/exchange/routing_key, etc), you
     the custom AMQP routing options (queue/exchange/routing_key, etc), you
     should read the new FAQ entry: http://bit.ly/aiWoH.
     should read the new FAQ entry: http://bit.ly/aiWoH.
 
 
-    The previous syntax is deprecated and scheduled for removal in v1.2.
+    The previous syntax is deprecated and scheduled for removal in v2.0.
 
 
 * ``TaskSet.run`` has been renamed to ``TaskSet.apply_async``.
 * ``TaskSet.run`` has been renamed to ``TaskSet.apply_async``.
 
 
     ``TaskSet.run`` has now been deprecated, and is scheduled for
     ``TaskSet.run`` has now been deprecated, and is scheduled for
-    removal in v1.2.
+    removal in v2.0.
 
 
 News
 News
 ----
 ----
@@ -1179,7 +1575,6 @@ News
 
 
 * celeryd now sends events if enabled with the ``-E`` argument.
 * celeryd now sends events if enabled with the ``-E`` argument.
 
 
-
     Excellent for monitoring tools, one is already in the making
     Excellent for monitoring tools, one is already in the making
     (http://github.com/ask/celerymon).
     (http://github.com/ask/celerymon).
 
 

+ 105 - 180
FAQ

@@ -62,8 +62,7 @@ Is celery for Django only?
 **Answer:** No.
 **Answer:** No.
 
 
 Celery does not depend on Django anymore. To use Celery with Django you have
 Celery does not depend on Django anymore. To use Celery with Django you have
-to use the `django-celery`_ package:
-
+to use the `django-celery`_ package.
 
 
 .. _`django-celery`: http://pypi.python.org/pypi/django-celery
 .. _`django-celery`: http://pypi.python.org/pypi/django-celery
 
 
@@ -240,34 +239,10 @@ waiting tasks you have to stop all the workers, and then discard the tasks
 using ``discard_all``.
 using ``discard_all``.
 
 
 
 
-Windows: The ``-B`` / ``--beat`` option to celeryd doesn't work?
-----------------------------------------------------------------
-**Answer**: That's right. Run ``celerybeat`` and ``celeryd`` as separate
-services instead.
-
-Tasks
-=====
-
-How can I reuse the same connection when applying tasks?
---------------------------------------------------------
-
-**Answer**: See :doc:`userguide/executing`.
-
-Can I execute a task by name?
------------------------------
-
-**Answer**: Yes. Use :func:`celery.execute.send_task`.
-You can also execute a task by name from any language
-that has an AMQP client.
-
-    >>> from celery.execute import send_task
-    >>> send_task("tasks.add", args=[2, 2], kwargs={})
-    <AsyncResult: 373550e8-b9a0-4666-bc61-ace01fa4f91d>
-
 Results
 Results
 =======
 =======
 
 
-How dow I get the result of a task if I have the ID that points there?
+How do I get the result of a task if I have the ID that points there?
 ----------------------------------------------------------------------
 ----------------------------------------------------------------------
 
 
 **Answer**: Use ``Task.AsyncResult``::
 **Answer**: Use ``Task.AsyncResult``::
@@ -331,58 +306,44 @@ Can I use celery with ActiveMQ/STOMP?
 
 
 **Answer**: Yes, but this is somewhat experimental for now.
 **Answer**: Yes, but this is somewhat experimental for now.
 It is working ok in a test configuration, but it has not
 It is working ok in a test configuration, but it has not
-been tested in production like RabbitMQ has. If you have any problems with
-using STOMP and celery, please report the bugs to the issue tracker:
+been tested in production. If you have any problems
+using STOMP with celery, please report an issue here::
 
 
     http://github.com/ask/celery/issues/
     http://github.com/ask/celery/issues/
 
 
-First you have to use the ``master`` branch of ``celery``::
-
-    $ git clone git://github.com/ask/celery.git
-    $ cd celery
-    $ sudo python setup.py install
-    $ cd ..
-
-Then you need to install the ``stompbackend`` branch of ``carrot``::
-
-    $ git clone git://github.com/ask/carrot.git
-    $ cd carrot
-    $ git checkout stompbackend
-    $ sudo python setup.py install
-    $ cd ..
+The STOMP carrot backend requires the `stompy`_ library::
 
 
-And my fork of ``python-stomp`` which adds non-blocking support::
-
-    $ hg clone http://bitbucket.org/asksol/python-stomp/
+    $ pip install stompy
     $ cd python-stomp
     $ cd python-stomp
     $ sudo python setup.py install
     $ sudo python setup.py install
     $ cd ..
     $ cd ..
 
 
+.. _`stompy`: http://pypi.python.org/pypi/stompy
+
 In this example we will use a queue called ``celery`` which we created in
 In this example we will use a queue called ``celery`` which we created in
 the ActiveMQ web admin interface.
 the ActiveMQ web admin interface.
 
 
-**Note**: For ActiveMQ the queue name has to have ``"/queue/"`` prepended to
-it. i.e. the queue ``celery`` becomes ``/queue/celery``.
+**Note**: When using ActiveMQ the queue name needs to have ``"/queue/"``
+prepended to it. i.e. the queue ``celery`` becomes ``/queue/celery``.
 
 
-Since a STOMP queue is a single named entity and it doesn't have the
-routing capabilities of AMQP you need to set both the ``queue``, and
-``exchange`` settings to your queue name. This is a minor inconvenience since
-carrot needs to maintain the same interface for both AMQP and STOMP (obviously
-the one with the most capabilities won).
+Since STOMP doesn't have exchanges and the routing capabilities of AMQP,
+you need to set ``exchange`` name to the same as the queue name. This is
+a minor inconvenience since carrot needs to maintain the same interface
+for both AMQP and STOMP.
 
 
-Use the following specific settings in your ``settings.py``:
+Use the following settings in your ``celeryconfig.py``/django ``settings.py``:
 
 
 .. code-block:: python
 .. code-block:: python
 
 
-    # Makes python-stomp the default backend for carrot.
+    # Use the stomp carrot backend.
     CARROT_BACKEND = "stomp"
     CARROT_BACKEND = "stomp"
 
 
     # STOMP hostname and port settings.
     # STOMP hostname and port settings.
     BROKER_HOST = "localhost"
     BROKER_HOST = "localhost"
     BROKER_PORT = 61613
     BROKER_PORT = 61613
 
 
-    # The queue name to use (both queue and exchange must be set to the
-    # same queue name when using STOMP)
+    # The queue name to use (the exchange *must* be set to the
+    # same as the queue name when using STOMP)
     CELERY_DEFAULT_QUEUE = "/queue/celery"
     CELERY_DEFAULT_QUEUE = "/queue/celery"
     CELERY_DEFAULT_EXCHANGE = "/queue/celery" 
     CELERY_DEFAULT_EXCHANGE = "/queue/celery" 
 
 
@@ -390,11 +351,8 @@ Use the following specific settings in your ``settings.py``:
         "/queue/celery": {"exchange": "/queue/celery"}
         "/queue/celery": {"exchange": "/queue/celery"}
     }
     }
 
 
-Now you can go on reading the tutorial in the README, ignoring any AMQP
-specific options. 
-
-What features are not supported when using STOMP?
---------------------------------------------------
+What features are not supported when using ghettoq/STOMP?
+---------------------------------------------------------
 
 
 This is a (possible incomplete) list of features not available when
 This is a (possible incomplete) list of features not available when
 using the STOMP backend:
 using the STOMP backend:
@@ -407,8 +365,56 @@ using the STOMP backend:
 
 
     * mandatory
     * mandatory
 
 
-Features
-========
+Tasks
+=====
+
+How can I reuse the same connection when applying tasks?
+--------------------------------------------------------
+
+**Answer**: See :doc:`userguide/executing`.
+
+Can I execute a task by name?
+-----------------------------
+
+**Answer**: Yes. Use :func:`celery.execute.send_task`.
+You can also execute a task by name from any language
+that has an AMQP client.
+
+    >>> from celery.execute import send_task
+    >>> send_task("tasks.add", args=[2, 2], kwargs={})
+    <AsyncResult: 373550e8-b9a0-4666-bc61-ace01fa4f91d>
+
+
+How can I get the task id of the current task?
+----------------------------------------------
+
+**Answer**: Celery does set some default keyword arguments if the task
+accepts them (you can accept them by either using ``**kwargs``, or list them
+specifically)::
+
+    @task
+    def mytask(task_id=None):
+        cache.set(task_id, "Running")
+
+The default keyword arguments are documented here:
+http://celeryq.org/docs/userguide/tasks.html#default-keyword-arguments
+
+Can I specify a custom task_id?
+-------------------------------
+
+**Answer**: Yes. Use the ``task_id`` argument to
+:meth:`~celery.execute.apply_async`::
+
+    >>> task.apply_async(args, kwargs, task_id="...")
+
+Can I use natural task ids?
+---------------------------
+
+**Answer**: Yes, but make sure it is unique, as the behavior
+for two tasks existing with the same id is undefined.
+
+The world will probably not explode, but at the worst
+they can overwrite each others results.
 
 
 How can I run a task once another task has finished?
 How can I run a task once another task has finished?
 ----------------------------------------------------
 ----------------------------------------------------
@@ -422,7 +428,7 @@ Also, a common pattern is to use callback tasks:
     def add(x, y, callback=None):
     def add(x, y, callback=None):
         result = x + y
         result = x + y
         if callback:
         if callback:
-            callback.delay(result)
+            subtask(callback).delay(result)
         return result
         return result
 
 
 
 
@@ -431,8 +437,11 @@ Also, a common pattern is to use callback tasks:
         logger = log_result.get_logger(**kwargs)
         logger = log_result.get_logger(**kwargs)
         logger.info("log_result got: %s" % (result, ))
         logger.info("log_result got: %s" % (result, ))
 
 
+Invocation::
+
+    >>> add.delay(2, 2, callback=log_result.subtask())
 
 
-    >>> add.delay(2, 2, callback=log_result)
+See :doc:`userguide/tasksets` for more information.
 
 
 Can I cancel the execution of a task?
 Can I cancel the execution of a task?
 -------------------------------------
 -------------------------------------
@@ -468,120 +477,7 @@ Can I send some tasks to only some servers?
 **Answer:** Yes. You can route tasks to an arbitrary server using AMQP,
 **Answer:** Yes. You can route tasks to an arbitrary server using AMQP,
 and a worker can bind to as many queues as it wants.
 and a worker can bind to as many queues as it wants.
 
 
-Say you have two servers, ``x``, and ``y`` that handles regular tasks,
-and one server ``z``, that only handles feed related tasks, you can use this
-configuration:
-
-* Servers ``x`` and ``y``: settings.py:
-
-.. code-block:: python
-
-    CELERY_DEFAULT_QUEUE = "regular_tasks"
-    CELERY_QUEUES = {
-        "regular_tasks": {
-            "binding_key": "task.#",
-        },
-    }
-    CELERY_DEFAULT_EXCHANGE = "tasks"
-    CELERY_DEFAULT_EXCHANGE_TYPE = "topic"
-    CELERY_DEFAULT_ROUTING_KEY = "task.regular"
-
-* Server ``z``: settings.py:
-
-.. code-block:: python
-
-        CELERY_DEFAULT_QUEUE = "feed_tasks"
-        CELERY_QUEUES = {
-            "feed_tasks": {
-                "binding_key": "feed.#",
-            },
-        }
-        CELERY_DEFAULT_EXCHANGE = "tasks"
-        CELERY_DEFAULT_ROUTING_KEY = "task.regular"
-        CELERY_DEFAULT_EXCHANGE_TYPE = "topic"
-
-``CELERY_QUEUES`` is a map of queue names and their exchange/type/binding_key,
-if you don't set exchange or exchange type, they will be taken from the
-``CELERY_DEFAULT_EXCHANGE``/``CELERY_DEFAULT_EXCHANGE_TYPE`` settings.
-
-Now to make a Task run on the ``z`` server you need to set its
-``routing_key`` attribute so it starts with the words ``"task.feed."``:
-
-.. code-block:: python
-
-    from feedaggregator.models import Feed
-    from celery.decorators import task
-
-    @task(routing_key="feed.importer")
-    def import_feed(feed_url):
-        Feed.objects.import_feed(feed_url)
-
-or if subclassing the ``Task`` class directly:
-
-.. code-block:: python
-
-    class FeedImportTask(Task):
-        routing_key = "feed.importer"
-
-        def run(self, feed_url):
-            Feed.objects.import_feed(feed_url)
-
-
-You can also override this using the ``routing_key`` argument to
-:func:`celery.task.apply_async`:
-
-    >>> from myapp.tasks import RefreshFeedTask
-    >>> RefreshFeedTask.apply_async(args=["http://cnn.com/rss"],
-    ...                             routing_key="feed.importer")
-
-
- If you want, you can even have your feed processing worker handle regular
- tasks as well, maybe in times when there's a lot of work to do.
- Just add a new queue to server ``z``'s ``CELERY_QUEUES``:
-
- .. code-block:: python
-
-        CELERY_QUEUES = {
-            "feed_tasks": {
-                "binding_key": "feed.#",
-            },
-            "regular_tasks": {
-                "binding_key": "task.#",
-            },
-        }
-
-Since the default exchange is ``tasks``, they will both use the same
-exchange.
-
-If you have another queue but on another exchange you want to add,
-just specify a custom exchange and exchange type:
-
-.. code-block:: python
-
-    CELERY_QUEUES = {
-            "feed_tasks": {
-                "binding_key": "feed.#",
-            },
-            "regular_tasks": {
-                "binding_key": "task.#",
-            }
-            "image_tasks": {
-                "binding_key": "image.compress",
-                "exchange": "mediatasks",
-                "exchange_type": "direct",
-            },
-        }
-
-If you're confused about these terms, you should read up on AMQP and RabbitMQ.
-`Rabbits and Warrens`_ is an excellent blog post describing queues and
-exchanges. There's also AMQP in 10 minutes*: `Flexible Routing Model`_,
-and `Standard Exchange Types`_. For users of RabbitMQ the `RabbitMQ FAQ`_
-could also be useful as a source of information.
-
-.. _`Rabbits and Warrens`: http://blogs.digitar.com/jjww/2009/01/rabbits-and-warrens/
-.. _`Flexible Routing Model`: http://bit.ly/95XFO1
-.. _`Standard Exchange Types`: http://bit.ly/EEWca
-.. _`RabbitMQ FAQ`: http://www.rabbitmq.com/faq.html
+See :doc:`userguide/routing` for more information.
 
 
 Can I change the interval of a periodic task at runtime?
 Can I change the interval of a periodic task at runtime?
 --------------------------------------------------------
 --------------------------------------------------------
@@ -691,3 +587,32 @@ and they will not be re-run unless you have the ``acks_late`` option set.
 How do I run celeryd in the background on [platform]?
 How do I run celeryd in the background on [platform]?
 -----------------------------------------------------
 -----------------------------------------------------
 **Answer**: Please see :doc:`cookbook/daemonizing`.
 **Answer**: Please see :doc:`cookbook/daemonizing`.
+
+Windows
+=======
+
+celeryd keeps spawning processes at startup
+-------------------------------------------
+
+**Answer**: This is a known issue on Windows.
+You have to start celeryd with the command::
+
+    $ python -m celeryd.bin.celeryd
+
+Any additional arguments can be appended to this command.
+
+See http://bit.ly/bo9RSw
+
+The ``-B`` / ``--beat`` option to celeryd doesn't work?
+----------------------------------------------------------------
+**Answer**: That's right. Run ``celerybeat`` and ``celeryd`` as separate
+services instead.
+
+``django-celery`` can’t find settings?
+--------------------------------------
+
+**Answer**: You need to specify the ``--settings`` argument to ``manage.py``::
+
+    $ python manage.py celeryd start --settings=settings
+
+See http://bit.ly/bo9RSw

+ 1 - 1
MANIFEST.in

@@ -11,8 +11,8 @@ include setup.cfg
 recursive-include bin *
 recursive-include bin *
 recursive-include celery *.py
 recursive-include celery *.py
 recursive-include docs *
 recursive-include docs *
-recursive-include tests *
 recursive-include contrib *
 recursive-include contrib *
+recursive-include examples *
 prune tests/*.pyc
 prune tests/*.pyc
 prune docs/*.pyc
 prune docs/*.pyc
 prune contrib/*.pyc
 prune contrib/*.pyc

+ 10 - 6
README.rst

@@ -4,7 +4,7 @@
 
 
 .. image:: http://cloud.github.com/downloads/ask/celery/celery_favicon_128.png
 .. image:: http://cloud.github.com/downloads/ask/celery/celery_favicon_128.png
 
 
-:Version: 1.1.1
+:Version: 2.1.0a1
 :Web: http://celeryproject.org/
 :Web: http://celeryproject.org/
 :Download: http://pypi.python.org/pypi/celery/
 :Download: http://pypi.python.org/pypi/celery/
 :Source: http://github.com/ask/celery/
 :Source: http://github.com/ask/celery/
@@ -13,8 +13,9 @@
 
 
 --
 --
 
 
-Celery is a task queue/job queue based on distributed message passing.
-It is focused on real-time operation, but supports scheduling as well.
+Celery is an open source asynchronous task queue/job queue based on
+distributed message passing. It is focused on real-time operation,
+but supports scheduling as well.
 
 
 The execution units, called tasks, are executed concurrently on a single or
 The execution units, called tasks, are executed concurrently on a single or
 more worker servers. Tasks can execute asynchronously (in the background) or synchronously
 more worker servers. Tasks can execute asynchronously (in the background) or synchronously
@@ -28,8 +29,8 @@ language. It can also `operate with other languages using webhooks`_.
 The recommended message broker is `RabbitMQ`_, but support for `Redis`_ and
 The recommended message broker is `RabbitMQ`_, but support for `Redis`_ and
 databases (`SQLAlchemy`_) is also available.
 databases (`SQLAlchemy`_) is also available.
 
 
-You may also be pleased to know that full Django integration exists
-via the `django-celery`_ package.
+You may also be pleased to know that full Django integration exists,
+delivered by the `django-celery`_ package.
 
 
 .. _`RabbitMQ`: http://www.rabbitmq.com/
 .. _`RabbitMQ`: http://www.rabbitmq.com/
 .. _`Redis`: http://code.google.com/p/redis/
 .. _`Redis`: http://code.google.com/p/redis/
@@ -38,6 +39,9 @@ via the `django-celery`_ package.
 .. _`operate with other languages using webhooks`:
 .. _`operate with other languages using webhooks`:
     http://ask.github.com/celery/userguide/remote-tasks.html
     http://ask.github.com/celery/userguide/remote-tasks.html
 
 
+.. contents::
+    :local:
+
 Overview
 Overview
 ========
 ========
 
 
@@ -47,7 +51,7 @@ This is a high level overview of the architecture.
 
 
 The broker pushes tasks to the worker servers.
 The broker pushes tasks to the worker servers.
 A worker server is a networked machine running ``celeryd``. This can be one or
 A worker server is a networked machine running ``celeryd``. This can be one or
-more machines, depending on the workload.
+more machines depending on the workload.
 
 
 The result of the task can be stored for later retrieval (called its
 The result of the task can be stored for later retrieval (called its
 "tombstone").
 "tombstone").

+ 0 - 4
bin/celerybeat

@@ -1,8 +1,4 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
-import sys
-if not '' in sys.path:
-    sys.path.insert(0, '')
-
 from celery.bin import celerybeat
 from celery.bin import celerybeat
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":

+ 0 - 2
bin/celeryd

@@ -1,7 +1,5 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
 import sys
 import sys
-if not '' in sys.path:
-    sys.path.insert(0, '')
 from celery.bin import celeryd
 from celery.bin import celeryd
 
 
 WINDOWS_MESSAGE = """
 WINDOWS_MESSAGE = """

+ 0 - 4
bin/celeryev

@@ -1,8 +1,4 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
-import sys
-if not '' in sys.path:
-    sys.path.insert(0, '')
-
 from celery.bin import celeryev
 from celery.bin import celeryev
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":

+ 1 - 12
celery/__init__.py

@@ -1,20 +1,9 @@
 """Distributed Task Queue"""
 """Distributed Task Queue"""
 
 
-VERSION = (1, 1, 1)
+VERSION = (2, 1, 0, "a1")
 
 
 __version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
 __version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
 __author__ = "Ask Solem"
 __author__ = "Ask Solem"
 __contact__ = "ask@celeryproject.org"
 __contact__ = "ask@celeryproject.org"
 __homepage__ = "http://github.com/ask/celery/"
 __homepage__ = "http://github.com/ask/celery/"
 __docformat__ = "restructuredtext"
 __docformat__ = "restructuredtext"
-
-
-def is_stable_release():
-    if len(VERSION) > 3 and isinstance(VERSION[3], basestring):
-        return False
-    return not VERSION[1] % 2
-
-
-def version_with_meta():
-    return "%s (%s)" % (__version__,
-                        is_stable_release() and "stable" or "unstable")

+ 2 - 0
celery/backends/__init__.py

@@ -5,10 +5,12 @@ from celery.loaders import current_loader
 
 
 BACKEND_ALIASES = {
 BACKEND_ALIASES = {
     "amqp": "celery.backends.amqp.AMQPBackend",
     "amqp": "celery.backends.amqp.AMQPBackend",
+    "cache": "celery.backends.cache.CacheBackend",
     "redis": "celery.backends.pyredis.RedisBackend",
     "redis": "celery.backends.pyredis.RedisBackend",
     "mongodb": "celery.backends.mongodb.MongoBackend",
     "mongodb": "celery.backends.mongodb.MongoBackend",
     "tyrant": "celery.backends.tyrant.TyrantBackend",
     "tyrant": "celery.backends.tyrant.TyrantBackend",
     "database": "celery.backends.database.DatabaseBackend",
     "database": "celery.backends.database.DatabaseBackend",
+    "cassandra": "celery.backends.cassandra.CassandraBackend",
 }
 }
 
 
 _backend_cache = {}
 _backend_cache = {}

+ 65 - 40
celery/backends/amqp.py

@@ -1,13 +1,17 @@
 """celery.backends.amqp"""
 """celery.backends.amqp"""
 import socket
 import socket
+import time
+
+from datetime import timedelta
 
 
 from carrot.messaging import Consumer, Publisher
 from carrot.messaging import Consumer, Publisher
 
 
 from celery import conf
 from celery import conf
 from celery import states
 from celery import states
-from celery.exceptions import TimeoutError
 from celery.backends.base import BaseDictBackend
 from celery.backends.base import BaseDictBackend
+from celery.exceptions import TimeoutError
 from celery.messaging import establish_connection
 from celery.messaging import establish_connection
+from celery.utils import timeutils
 
 
 
 
 class ResultPublisher(Publisher):
 class ResultPublisher(Publisher):
@@ -16,6 +20,7 @@ class ResultPublisher(Publisher):
     delivery_mode = conf.RESULT_PERSISTENT and 2 or 1
     delivery_mode = conf.RESULT_PERSISTENT and 2 or 1
     serializer = conf.RESULT_SERIALIZER
     serializer = conf.RESULT_SERIALIZER
     durable = conf.RESULT_PERSISTENT
     durable = conf.RESULT_PERSISTENT
+    auto_delete = True
 
 
     def __init__(self, connection, task_id, **kwargs):
     def __init__(self, connection, task_id, **kwargs):
         super(ResultPublisher, self).__init__(connection,
         super(ResultPublisher, self).__init__(connection,
@@ -30,8 +35,11 @@ class ResultConsumer(Consumer):
     no_ack = True
     no_ack = True
     auto_delete = True
     auto_delete = True
 
 
-    def __init__(self, connection, task_id, **kwargs):
+    def __init__(self, connection, task_id, expires=None, **kwargs):
         routing_key = task_id.replace("-", "")
         routing_key = task_id.replace("-", "")
+        if expires is not None:
+            pass
+            #self.queue_arguments = {"x-expires": expires}
         super(ResultConsumer, self).__init__(connection,
         super(ResultConsumer, self).__init__(connection,
                 queue=routing_key, routing_key=routing_key, **kwargs)
                 queue=routing_key, routing_key=routing_key, **kwargs)
 
 
@@ -46,15 +54,25 @@ class AMQPBackend(BaseDictBackend):
 
 
     """
     """
 
 
-    exchange = conf.RESULT_EXCHANGE
-    exchange_type = conf.RESULT_EXCHANGE_TYPE
-    persistent = conf.RESULT_PERSISTENT
-    serializer = conf.RESULT_SERIALIZER
     _connection = None
     _connection = None
 
 
-    def __init__(self, *args, **kwargs):
-        self._connection = kwargs.get("connection", None)
-        super(AMQPBackend, self).__init__(*args, **kwargs)
+    def __init__(self, connection=None, exchange=None, exchange_type=None,
+            persistent=None, serializer=None, auto_delete=None,
+            expires=None, **kwargs):
+        self._connection = connection
+        self.exchange = exchange
+        self.exchange_type = exchange_type
+        self.persistent = persistent
+        self.serializer = serializer
+        self.auto_delete = auto_delete
+        self.expires = expires
+        if self.expires is None:
+            self.expires = conf.TASK_RESULT_EXPIRES
+        if isinstance(self.expires, timedelta):
+            self.expires = timeutils.timedelta_seconds(self.expires)
+        if self.expires is not None:
+            self.expires = int(self.expires)
+        super(AMQPBackend, self).__init__(**kwargs)
 
 
     def _create_publisher(self, task_id, connection):
     def _create_publisher(self, task_id, connection):
         delivery_mode = self.persistent and 2 or 1
         delivery_mode = self.persistent and 2 or 1
@@ -66,13 +84,16 @@ class AMQPBackend(BaseDictBackend):
                                exchange=self.exchange,
                                exchange=self.exchange,
                                exchange_type=self.exchange_type,
                                exchange_type=self.exchange_type,
                                delivery_mode=delivery_mode,
                                delivery_mode=delivery_mode,
-                               serializer=self.serializer)
+                               serializer=self.serializer,
+                               auto_delete=self.auto_delete)
 
 
     def _create_consumer(self, task_id, connection):
     def _create_consumer(self, task_id, connection):
         return ResultConsumer(connection, task_id,
         return ResultConsumer(connection, task_id,
                               exchange=self.exchange,
                               exchange=self.exchange,
                               exchange_type=self.exchange_type,
                               exchange_type=self.exchange_type,
-                              durable=self.persistent)
+                              durable=self.persistent,
+                              auto_delete=self.auto_delete,
+                              expires=self.expires)
 
 
     def store_result(self, task_id, result, status, traceback=None):
     def store_result(self, task_id, result, status, traceback=None):
         """Send task return value and status."""
         """Send task return value and status."""
@@ -91,6 +112,9 @@ class AMQPBackend(BaseDictBackend):
 
 
         return result
         return result
 
 
+    def get_task_meta(self, task_id, cache=True):
+        return self.poll(task_id)
+
     def wait_for(self, task_id, timeout=None, cache=True):
     def wait_for(self, task_id, timeout=None, cache=True):
         if task_id in self._cache:
         if task_id in self._cache:
             meta = self._cache[task_id]
             meta = self._cache[task_id]
@@ -106,22 +130,21 @@ class AMQPBackend(BaseDictBackend):
             raise self.exception_to_python(meta["result"])
             raise self.exception_to_python(meta["result"])
 
 
     def poll(self, task_id):
     def poll(self, task_id):
-        routing_key = task_id.replace("-", "")
         consumer = self._create_consumer(task_id, self.connection)
         consumer = self._create_consumer(task_id, self.connection)
         result = consumer.fetch()
         result = consumer.fetch()
-        payload = None
-        if result:
-            payload = self._cache[task_id] = result.payload
-            consumer.backend.queue_delete(routing_key)
-        else:
-            # Use previously received status if any.
-            if task_id in self._cache:
-                payload = self._cache[task_id]
+        try:
+            if result:
+                payload = self._cache[task_id] = result.payload
+                return payload
             else:
             else:
-                payload = {"status": states.PENDING, "result": None}
 
 
-        consumer.close()
-        return payload
+                # Use previously received status if any.
+                if task_id in self._cache:
+                    return self._cache[task_id]
+
+                return {"status": states.PENDING, "result": None}
+        finally:
+            consumer.close()
 
 
     def consume(self, task_id, timeout=None):
     def consume(self, task_id, timeout=None):
         results = []
         results = []
@@ -129,24 +152,36 @@ class AMQPBackend(BaseDictBackend):
         def callback(message_data, message):
         def callback(message_data, message):
             results.append(message_data)
             results.append(message_data)
 
 
-        routing_key = task_id.replace("-", "")
-
-        wait = self.connection.connection.wait_multi
+        wait = self.connection.drain_events
         consumer = self._create_consumer(task_id, self.connection)
         consumer = self._create_consumer(task_id, self.connection)
         consumer.register_callback(callback)
         consumer.register_callback(callback)
 
 
         consumer.consume()
         consumer.consume()
         try:
         try:
-            wait([consumer.backend.channel], timeout=timeout)
+            time_start = time.time()
+            while True:
+                # Total time spent may exceed a single call to wait()
+                if timeout and time.time() - time_start >= timeout:
+                    raise socket.timeout()
+                wait(timeout=timeout)
+                if results:
+                    # Got event on the wanted channel.
+                    break
         finally:
         finally:
-            consumer.backend.queue_delete(routing_key)
             consumer.close()
             consumer.close()
 
 
         self._cache[task_id] = results[0]
         self._cache[task_id] = results[0]
         return results[0]
         return results[0]
 
 
-    def get_task_meta(self, task_id, cache=True):
-        return self.poll(task_id)
+    def close(self):
+        if self._connection is not None:
+            self._connection.close()
+
+    @property
+    def connection(self):
+        if not self._connection:
+            self._connection = establish_connection()
+        return self._connection
 
 
     def reload_task_result(self, task_id):
     def reload_task_result(self, task_id):
         raise NotImplementedError(
         raise NotImplementedError(
@@ -166,13 +201,3 @@ class AMQPBackend(BaseDictBackend):
         """Get the result of a taskset."""
         """Get the result of a taskset."""
         raise NotImplementedError(
         raise NotImplementedError(
                 "restore_taskset is not supported by this backend.")
                 "restore_taskset is not supported by this backend.")
-
-    def close(self):
-        if self._connection is not None:
-            self._connection.close()
-
-    @property
-    def connection(self):
-        if not self._connection:
-            self._connection = establish_connection()
-        return self._connection

+ 5 - 4
celery/backends/base.py

@@ -22,10 +22,10 @@ class BaseBackend(object):
         pass
         pass
 
 
     def encode_result(self, result, status):
     def encode_result(self, result, status):
-        if status == states.SUCCESS:
-            return self.prepare_value(result)
-        elif status in self.EXCEPTION_STATES:
+        if status in self.EXCEPTION_STATES:
             return self.prepare_exception(result)
             return self.prepare_exception(result)
+        else:
+            return self.prepare_value(result)
 
 
     def store_result(self, task_id, result, status):
     def store_result(self, task_id, result, status):
         """Store the result and status of a task."""
         """Store the result and status of a task."""
@@ -143,7 +143,8 @@ class BaseDictBackend(BaseBackend):
 
 
     def __init__(self, *args, **kwargs):
     def __init__(self, *args, **kwargs):
         super(BaseDictBackend, self).__init__(*args, **kwargs)
         super(BaseDictBackend, self).__init__(*args, **kwargs)
-        self._cache = LocalCache(limit=conf.MAX_CACHED_RESULTS)
+        self._cache = LocalCache(limit=kwargs.get("max_cached_results") or
+                                 conf.MAX_CACHED_RESULTS)
 
 
     def store_result(self, task_id, result, status, traceback=None):
     def store_result(self, task_id, result, status, traceback=None):
         """Store task result and status."""
         """Store task result and status."""

+ 77 - 0
celery/backends/cache.py

@@ -0,0 +1,77 @@
+from datetime import timedelta
+
+from carrot.utils import partition
+
+from celery import conf
+from celery.backends.base import KeyValueStoreBackend
+from celery.exceptions import ImproperlyConfigured
+from celery.utils import timeutils
+from celery.datastructures import LocalCache
+
+
+def get_best_memcache(*args, **kwargs):
+    behaviors = kwargs.pop("behaviors", None)
+    is_pylibmc = False
+    try:
+        import pylibmc as memcache
+        is_pylibmc = True
+    except ImportError:
+        try:
+            import memcache
+        except ImportError:
+            raise ImproperlyConfigured("Memcached backend requires either "
+                                       "the 'memcache' or 'pylibmc' library")
+    client = memcache.Client(*args, **kwargs)
+    if is_pylibmc and behaviors is not None:
+        client.behaviors = behaviors
+    return client
+
+
+class DummyClient(object):
+
+    def __init__(self, *args, **kwargs):
+        self.cache = LocalCache(5000)
+
+    def get(self, key, *args, **kwargs):
+        return self.cache.get(key)
+
+    def set(self, key, value, *args, **kwargs):
+        self.cache[key] = value
+
+
+backends = {"memcache": get_best_memcache,
+            "memcached": get_best_memcache,
+            "pylibmc": get_best_memcache,
+            "memory": DummyClient}
+
+class CacheBackend(KeyValueStoreBackend):
+    _client = None
+
+    def __init__(self, expires=conf.TASK_RESULT_EXPIRES,
+            backend=conf.CACHE_BACKEND, options={}, **kwargs):
+        super(CacheBackend, self).__init__(self, **kwargs)
+        if isinstance(expires, timedelta):
+            expires = timeutils.timedelta_seconds(expires)
+        self.expires = expires
+        self.options = dict(conf.CACHE_BACKEND_OPTIONS, **options)
+        self.backend, _, servers = partition(backend, "://")
+        self.servers = servers.split(";")
+        try:
+            self.Client = backends[self.backend]
+        except KeyError:
+            raise ImproperlyConfigured(
+                    "Unknown cache backend: %s. Please use one of the "
+                    "following backends: %s" % (self.backend,
+                                                ", ".join(backends.keys())))
+
+    def get(self, key):
+        return self.client.get(key)
+
+    def set(self, key, value):
+        return self.client.set(key, value, self.expires)
+
+    @property
+    def client(self):
+        if self._client is None:
+            self._client = self.Client(self.servers, **self.options)
+        return self._client

+ 174 - 0
celery/backends/cassandra.py

@@ -0,0 +1,174 @@
+"""celery.backends.cassandra"""
+try:
+    import pycassa
+    from thrift import Thrift
+    C = __import__('cassandra').ttypes # FIXME: Namespace kludge
+except ImportError:
+    pycassa = None
+
+import itertools
+import random
+import socket
+import time
+
+from datetime import datetime
+
+from celery.backends.base import BaseDictBackend
+from celery import conf
+from celery.exceptions import ImproperlyConfigured
+from celery.loaders import load_settings
+from celery.log import setup_logger
+from celery.serialization import pickle
+from celery import states
+
+
+class CassandraBackend(BaseDictBackend):
+    """Highly fault tolerant Cassandra backend.
+
+    .. attribute:: servers
+
+        List of Cassandra servers with format: "hostname:port".
+
+    :raises celery.exceptions.ImproperlyConfigured: if
+        module :mod:`pycassa` is not available.
+
+    """
+    servers = []
+    keyspace = None
+    column_family = None
+    _retry_timeout = 300
+    _retry_wait = 3
+    _index_shards = 64
+    _index_keys = ["celery.results.index!%02x" % i
+                        for i in range(_index_shards)]
+
+    def __init__(self, servers=None, keyspace=None, column_family=None,
+            cassandra_options=None, **kwargs):
+        """Initialize Cassandra backend.
+
+        Raises :class:`celery.exceptions.ImproperlyConfigured` if
+        the ``CASSANDRA_SERVERS`` setting is not set.
+
+        """
+        self.logger = setup_logger("celery.backends.cassandra")
+
+        self.result_expires = kwargs.get("result_expires") or \
+                                conf.TASK_RESULT_EXPIRES
+
+        if not pycassa:
+            raise ImproperlyConfigured(
+                    "You need to install the pycassa library to use the "
+                    "Cassandra backend. See http://github.com/vomjom/pycassa")
+
+        settings = load_settings()
+
+        self.servers = servers or \
+                         getattr(settings, "CASSANDRA_SERVERS", self.servers)
+        self.keyspace = keyspace or \
+                          getattr(settings, "CASSANDRA_KEYSPACE",
+                                  self.keyspace)
+        self.column_family = column_family or \
+                               getattr(settings, "CASSANDRA_COLUMN_FAMILY",
+                                       self.column_family)
+        self.cassandra_options = dict(cassandra_options or {},
+                                   **getattr(settings,
+                                             "CASSANDRA_OPTIONS", {}))
+        if not self.servers or not self.keyspace or not self.column_family:
+            raise ImproperlyConfigured(
+                    "Cassandra backend not configured.")
+
+        super(CassandraBackend, self).__init__()
+        self._column_family = None
+
+    def _retry_on_error(func):
+        def wrapper(*args, **kwargs):
+            self = args[0]
+            ts = time.time() + self._retry_timeout
+            while 1:
+                try:
+                    return func(*args, **kwargs)
+                except (pycassa.InvalidRequestException,
+                        pycassa.NoServerAvailable,
+                        pycassa.TimedOutException,
+                        pycassa.UnavailableException,
+                        socket.error,
+                        socket.timeout,
+                        Thrift.TException), exc:
+                    self.logger.warn('Cassandra error: %s. Retrying...' % exc)
+                    if time.time() > ts:
+                        raise
+                    time.sleep(self._retry_wait)
+        return wrapper
+
+    def _get_column_family(self):
+        if self._column_family is None:
+            conn = pycassa.connect(self.servers,
+                                   **self.cassandra_options)
+            self._column_family = \
+              pycassa.ColumnFamily(conn, self.keyspace,
+                    self.column_family,
+                    read_consistency_level=pycassa.ConsistencyLevel.DCQUORUM,
+                    write_consistency_level=pycassa.ConsistencyLevel.DCQUORUM)
+        return self._column_family
+
+    def process_cleanup(self):
+        if self._column_family is not None:
+            self._column_family = None
+
+    @_retry_on_error
+    def _store_result(self, task_id, result, status, traceback=None):
+        """Store return value and status of an executed task."""
+        cf = self._get_column_family()
+        date_done = datetime.utcnow()
+        index_key = 'celery.results.index!%02x' % (
+                random.randrange(self._index_shards))
+        index_column_name = '%8x!%s' % (time.mktime(date_done.timetuple()),
+                                        task_id)
+        meta = {"status": status,
+                "result": pickle.dumps(result),
+                "date_done": date_done.strftime('%Y-%m-%dT%H:%M:%SZ'),
+                "traceback": pickle.dumps(traceback)}
+        cf.insert(task_id, meta)
+        cf.insert(index_key, {index_column_name: status})
+
+    @_retry_on_error
+    def _get_task_meta_for(self, task_id):
+        """Get task metadata for a task by id."""
+        cf = self._get_column_family()
+        try:
+            obj = cf.get(task_id)
+            meta = {
+                "task_id": task_id,
+                "status": obj["status"],
+                "result": pickle.loads(str(obj["result"])),
+                "date_done": obj["date_done"],
+                "traceback": pickle.loads(str(obj["traceback"])),
+            }
+        except (KeyError, pycassa.NotFoundException):
+            meta = {"status": states.PENDING, "result": None}
+        return meta
+
+    def cleanup(self):
+        """Delete expired metadata."""
+        self.logger.debug('Running cleanup...')
+        expires = datetime.utcnow() - self.result_expires
+        end_column = '%8x"' % (time.mktime(expires.timetuple()))
+
+        cf = self._get_column_family()
+        column_parent = C.ColumnParent(cf.column_family)
+        slice_pred = C.SlicePredicate(slice_range=C.SliceRange('', end_column,
+                                                               count=2**30))
+        columns = cf.client.multiget_slice(cf.keyspace, self._index_keys,
+                                           column_parent, slice_pred,
+                                           pycassa.ConsistencyLevel.DCQUORUM)
+
+        index_cols = [c.column.name
+                        for c in itertools.chain(*columns.values())]
+        for k in self._index_keys:
+            cf.remove(k, index_cols)
+
+        task_ids = [c[9:] for c in index_cols]
+        for k in task_ids:
+            cf.remove(k)
+
+        self.logger.debug('Cleaned %i expired results' % len(task_ids))

+ 31 - 35
celery/backends/database.py

@@ -11,15 +11,17 @@ from celery.exceptions import ImproperlyConfigured
 class DatabaseBackend(BaseDictBackend):
 class DatabaseBackend(BaseDictBackend):
     """The database result backend."""
     """The database result backend."""
 
 
-    def __init__(self, dburi=conf.RESULT_DBURI,
+    def __init__(self, dburi=None, result_expires=None,
             engine_options=None, **kwargs):
             engine_options=None, **kwargs):
-        if not dburi:
+        self.result_expires = result_expires or conf.TASK_RESULT_EXPIRES
+        self.dburi = dburi or conf.RESULT_DBURI
+        self.engine_options = dict(engine_options or {},
+                                   **conf.RESULT_ENGINE_OPTIONS or {})
+        if not self.dburi:
             raise ImproperlyConfigured(
             raise ImproperlyConfigured(
                     "Missing connection string! Do you have "
                     "Missing connection string! Do you have "
                     "CELERY_RESULT_DBURI set to a real value?")
                     "CELERY_RESULT_DBURI set to a real value?")
-        self.dburi = dburi
-        self.engine_options = dict(engine_options or {},
-                                   **conf.RESULT_ENGINE_OPTIONS or {})
+
         super(DatabaseBackend, self).__init__(**kwargs)
         super(DatabaseBackend, self).__init__(**kwargs)
 
 
     def ResultSession(self):
     def ResultSession(self):
@@ -29,13 +31,11 @@ class DatabaseBackend(BaseDictBackend):
         """Store return value and status of an executed task."""
         """Store return value and status of an executed task."""
         session = self.ResultSession()
         session = self.ResultSession()
         try:
         try:
-            tasks = session.query(Task).filter(Task.task_id == task_id).all()
-            if not tasks:
+            task = session.query(Task).filter(Task.task_id == task_id).first()
+            if not task:
                 task = Task(task_id)
                 task = Task(task_id)
                 session.add(task)
                 session.add(task)
                 session.flush()
                 session.flush()
-            else:
-                task = tasks[0]
             task.result = result
             task.result = result
             task.status = status
             task.status = status
             task.traceback = traceback
             task.traceback = traceback
@@ -44,32 +44,29 @@ class DatabaseBackend(BaseDictBackend):
             session.close()
             session.close()
         return result
         return result
 
 
-    def _save_taskset(self, taskset_id, result):
-        """Store the result of an executed taskset."""
-        taskset = TaskSet(taskset_id, result)
-        session = self.ResultSession()
-        try:
-            session.add(taskset)
-            session.flush()
-            session.commit()
-        finally:
-            session.close()
-        return result
-
     def _get_task_meta_for(self, task_id):
     def _get_task_meta_for(self, task_id):
         """Get task metadata for a task by id."""
         """Get task metadata for a task by id."""
         session = self.ResultSession()
         session = self.ResultSession()
         try:
         try:
-            task = None
-            for task in session.query(Task).filter(Task.task_id == task_id):
-                break
+            task = session.query(Task).filter(Task.task_id == task_id).first()
             if not task:
             if not task:
                 task = Task(task_id)
                 task = Task(task_id)
                 session.add(task)
                 session.add(task)
                 session.flush()
                 session.flush()
                 session.commit()
                 session.commit()
-            if task:
-                return task.to_dict()
+            return task.to_dict()
+        finally:
+            session.close()
+
+    def _save_taskset(self, taskset_id, result):
+        """Store the result of an executed taskset."""
+        session = self.ResultSession()
+        try:
+            taskset = TaskSet(taskset_id, result)
+            session.add(taskset)
+            session.flush()
+            session.commit()
+            return result
         finally:
         finally:
             session.close()
             session.close()
 
 
@@ -77,23 +74,22 @@ class DatabaseBackend(BaseDictBackend):
         """Get taskset metadata for a taskset by id."""
         """Get taskset metadata for a taskset by id."""
         session = self.ResultSession()
         session = self.ResultSession()
         try:
         try:
-            qs = session.query(TaskSet)
-            for taskset in qs.filter(TaskSet.taskset_id == taskset_id):
+            taskset = session.query(TaskSet).filter(
+                    TaskSet.taskset_id == taskset_id).first()
+            if taskset:
                 return taskset.to_dict()
                 return taskset.to_dict()
         finally:
         finally:
             session.close()
             session.close()
 
 
     def cleanup(self):
     def cleanup(self):
         """Delete expired metadata."""
         """Delete expired metadata."""
-        expires = conf.TASK_RESULT_EXPIRES
         session = self.ResultSession()
         session = self.ResultSession()
+        expires = self.result_expires
         try:
         try:
-            for task in session.query(Task).filter(
-                    Task.date_done < (datetime.now() - expires)):
-                session.delete(task)
-            for taskset in session.query(TaskSet).filter(
-                    TaskSet.date_done < (datetime.now() - expires)):
-                session.delete(taskset)
+            session.query(Task).filter(
+                    Task.date_done < (datetime.now() - expires)).delete()
+            session.query(TaskSet).filter(
+                    TaskSet.date_done < (datetime.now() - expires)).delete()
             session.commit()
             session.commit()
         finally:
         finally:
             session.close()
             session.close()

+ 5 - 1
celery/backends/mongodb.py

@@ -35,6 +35,8 @@ class MongoBackend(BaseDictBackend):
             module :mod:`pymongo` is not available.
             module :mod:`pymongo` is not available.
 
 
         """
         """
+        self.result_expires = kwargs.get("result_expires") or \
+                                conf.TASK_RESULT_EXPIRES
 
 
         if not pymongo:
         if not pymongo:
             raise ImproperlyConfigured(
             raise ImproperlyConfigured(
@@ -107,6 +109,8 @@ class MongoBackend(BaseDictBackend):
         taskmeta_collection = db[self.mongodb_taskmeta_collection]
         taskmeta_collection = db[self.mongodb_taskmeta_collection]
         taskmeta_collection.save(meta, safe=True)
         taskmeta_collection.save(meta, safe=True)
 
 
+        return result
+
     def _get_task_meta_for(self, task_id):
     def _get_task_meta_for(self, task_id):
         """Get task metadata for a task by id."""
         """Get task metadata for a task by id."""
 
 
@@ -132,6 +136,6 @@ class MongoBackend(BaseDictBackend):
         taskmeta_collection = db[self.mongodb_taskmeta_collection]
         taskmeta_collection = db[self.mongodb_taskmeta_collection]
         taskmeta_collection.remove({
         taskmeta_collection.remove({
                 "date_done": {
                 "date_done": {
-                    "$lt": datetime.now() - conf.TASK_RESULT_EXPIRES,
+                    "$lt": datetime.now() - self.result_expires,
                  }
                  }
         })
         })

+ 5 - 4
celery/bin/celerybeat.py

@@ -49,11 +49,12 @@ OPTION_LIST = (
     optparse.make_option('-l', '--loglevel',
     optparse.make_option('-l', '--loglevel',
             default=conf.CELERYBEAT_LOG_LEVEL,
             default=conf.CELERYBEAT_LOG_LEVEL,
             action="store", dest="loglevel",
             action="store", dest="loglevel",
-            help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL/FATAL."),
+            help="Loglevel. One of DEBUG/INFO/WARNING/ERROR/CRITICAL."),
 )
 )
 
 
 
 
 class Beat(object):
 class Beat(object):
+    ClockService = ClockService
 
 
     def __init__(self, loglevel=conf.CELERYBEAT_LOG_LEVEL,
     def __init__(self, loglevel=conf.CELERYBEAT_LOG_LEVEL,
             logfile=conf.CELERYBEAT_LOG_FILE,
             logfile=conf.CELERYBEAT_LOG_FILE,
@@ -77,9 +78,9 @@ class Beat(object):
 
 
     def start_scheduler(self):
     def start_scheduler(self):
         from celery.log import setup_logger
         from celery.log import setup_logger
-        logger = setup_logger(self.loglevel, self.logfile)
-        beat = ClockService(logger,
-                            schedule_filename=self.schedule)
+        logger = setup_logger(self.loglevel, self.logfile, name="celery.beat")
+        beat = self.ClockService(logger,
+                                 schedule_filename=self.schedule)
 
 
         try:
         try:
             self.install_sync_handler(beat)
             self.install_sync_handler(beat)

+ 34 - 20
celery/bin/celeryd.py

@@ -79,6 +79,7 @@ from celery.utils import info
 from celery.utils import get_full_cls_name
 from celery.utils import get_full_cls_name
 from celery.worker import WorkController
 from celery.worker import WorkController
 from celery.exceptions import ImproperlyConfigured
 from celery.exceptions import ImproperlyConfigured
+from celery.routes import Router
 
 
 STARTUP_INFO_FMT = """
 STARTUP_INFO_FMT = """
 Configuration ->
 Configuration ->
@@ -134,6 +135,11 @@ OPTION_LIST = (
                     option. The extension '.db' will be appended to the \
                     option. The extension '.db' will be appended to the \
                     filename. Default: %s" % (
                     filename. Default: %s" % (
                     conf.CELERYBEAT_SCHEDULE_FILENAME)),
                     conf.CELERYBEAT_SCHEDULE_FILENAME)),
+    optparse.make_option('-S', '--statedb', default=conf.CELERYD_STATE_DB,
+            action="store", dest="db",
+            help="Path to the state database. The extension '.db' will \
+                 be appended to the filename. Default: %s" % (
+                     conf.CELERYD_STATE_DB)),
     optparse.make_option('-E', '--events', default=conf.SEND_EVENTS,
     optparse.make_option('-E', '--events', default=conf.SEND_EVENTS,
             action="store_true", dest="events",
             action="store_true", dest="events",
             help="Send events so celery can be monitored by e.g. celerymon."),
             help="Send events so celery can be monitored by e.g. celerymon."),
@@ -159,6 +165,7 @@ OPTION_LIST = (
 
 
 
 
 class Worker(object):
 class Worker(object):
+    WorkController = WorkController
 
 
     def __init__(self, concurrency=conf.CELERYD_CONCURRENCY,
     def __init__(self, concurrency=conf.CELERYD_CONCURRENCY,
             loglevel=conf.CELERYD_LOG_LEVEL, logfile=conf.CELERYD_LOG_FILE,
             loglevel=conf.CELERYD_LOG_LEVEL, logfile=conf.CELERYD_LOG_FILE,
@@ -167,7 +174,7 @@ class Worker(object):
             task_time_limit=conf.CELERYD_TASK_TIME_LIMIT,
             task_time_limit=conf.CELERYD_TASK_TIME_LIMIT,
             task_soft_time_limit=conf.CELERYD_TASK_SOFT_TIME_LIMIT,
             task_soft_time_limit=conf.CELERYD_TASK_SOFT_TIME_LIMIT,
             max_tasks_per_child=conf.CELERYD_MAX_TASKS_PER_CHILD,
             max_tasks_per_child=conf.CELERYD_MAX_TASKS_PER_CHILD,
-            queues=None, events=False, **kwargs):
+            queues=None, events=False, db=None, **kwargs):
         self.concurrency = concurrency or multiprocessing.cpu_count()
         self.concurrency = concurrency or multiprocessing.cpu_count()
         self.loglevel = loglevel
         self.loglevel = loglevel
         self.logfile = logfile
         self.logfile = logfile
@@ -179,7 +186,9 @@ class Worker(object):
         self.task_time_limit = task_time_limit
         self.task_time_limit = task_time_limit
         self.task_soft_time_limit = task_soft_time_limit
         self.task_soft_time_limit = task_soft_time_limit
         self.max_tasks_per_child = max_tasks_per_child
         self.max_tasks_per_child = max_tasks_per_child
+        self.db = db
         self.queues = queues or []
         self.queues = queues or []
+        self._isatty = sys.stdout.isatty()
 
 
         if isinstance(self.queues, basestring):
         if isinstance(self.queues, basestring):
             self.queues = self.queues.split(",")
             self.queues = self.queues.split(",")
@@ -190,26 +199,17 @@ class Worker(object):
     def run(self):
     def run(self):
         self.init_loader()
         self.init_loader()
         self.init_queues()
         self.init_queues()
+        self.worker_init()
         self.redirect_stdouts_to_logger()
         self.redirect_stdouts_to_logger()
         print("celery@%s v%s is starting." % (self.hostname,
         print("celery@%s v%s is starting." % (self.hostname,
                                               celery.__version__))
                                               celery.__version__))
 
 
-
-        if conf.RESULT_BACKEND == "database" \
-                and self.settings.DATABASE_ENGINE == "sqlite3" and \
-                self.concurrency > 1:
-            warnings.warn("The sqlite3 database engine doesn't handle "
-                          "concurrency well. Will use a single process only.",
-                          UserWarning)
-            self.concurrency = 1
-
         if getattr(self.settings, "DEBUG", False):
         if getattr(self.settings, "DEBUG", False):
             warnings.warn("Using settings.DEBUG leads to a memory leak, "
             warnings.warn("Using settings.DEBUG leads to a memory leak, "
                     "never use this setting in a production environment!")
                     "never use this setting in a production environment!")
 
 
         if self.discard:
         if self.discard:
             self.purge_messages()
             self.purge_messages()
-        self.worker_init()
 
 
         # Dump configuration to screen so we have some basic information
         # Dump configuration to screen so we have some basic information
         # for when users sends bug reports.
         # for when users sends bug reports.
@@ -227,6 +227,13 @@ class Worker(object):
             conf.QUEUES = dict((queue, options)
             conf.QUEUES = dict((queue, options)
                                 for queue, options in conf.QUEUES.items()
                                 for queue, options in conf.QUEUES.items()
                                     if queue in self.queues)
                                     if queue in self.queues)
+            for queue in self.queues:
+                if queue not in conf.QUEUES:
+                    if conf.CREATE_MISSING_QUEUES:
+                        Router(queues=conf.QUEUES).add_queue(queue)
+                    else:
+                        raise ImproperlyConfigured(
+                            "Queue '%s' not defined in CELERY_QUEUES" % queue)
 
 
     def init_loader(self):
     def init_loader(self):
         from celery.loaders import current_loader, load_settings
         from celery.loaders import current_loader, load_settings
@@ -238,10 +245,12 @@ class Worker(object):
 
 
     def redirect_stdouts_to_logger(self):
     def redirect_stdouts_to_logger(self):
         from celery import log
         from celery import log
+        handled = log.setup_logging_subsystem(loglevel=self.loglevel,
+                                              logfile=self.logfile)
         # Redirect stdout/stderr to our logger.
         # Redirect stdout/stderr to our logger.
-        logger = log.setup_logger(loglevel=self.loglevel,
-                                  logfile=self.logfile)
-        log.redirect_stdouts_to_logger(logger, loglevel=logging.WARNING)
+        if not handled:
+            logger = log.get_default_logger()
+            log.redirect_stdouts_to_logger(logger, loglevel=logging.WARNING)
 
 
     def purge_messages(self):
     def purge_messages(self):
         discarded_count = discard_all()
         discarded_count = discard_all()
@@ -269,9 +278,11 @@ class Worker(object):
             include_builtins = self.loglevel <= logging.DEBUG
             include_builtins = self.loglevel <= logging.DEBUG
             tasklist = self.tasklist(include_builtins=include_builtins)
             tasklist = self.tasklist(include_builtins=include_builtins)
 
 
+        queues = conf.get_queues()
+
         return STARTUP_INFO_FMT % {
         return STARTUP_INFO_FMT % {
             "conninfo": info.format_broker_info(),
             "conninfo": info.format_broker_info(),
-            "queues": info.format_routing_table(indent=8),
+            "queues": info.format_queues(queues, indent=8),
             "concurrency": self.concurrency,
             "concurrency": self.concurrency,
             "loglevel": conf.LOG_LEVELS[self.loglevel],
             "loglevel": conf.LOG_LEVELS[self.loglevel],
             "logfile": self.logfile or "[stderr]",
             "logfile": self.logfile or "[stderr]",
@@ -282,7 +293,7 @@ class Worker(object):
         }
         }
 
 
     def run_worker(self):
     def run_worker(self):
-        worker = WorkController(concurrency=self.concurrency,
+        worker = self.WorkController(concurrency=self.concurrency,
                                 loglevel=self.loglevel,
                                 loglevel=self.loglevel,
                                 logfile=self.logfile,
                                 logfile=self.logfile,
                                 hostname=self.hostname,
                                 hostname=self.hostname,
@@ -290,15 +301,19 @@ class Worker(object):
                                 embed_clockservice=self.run_clockservice,
                                 embed_clockservice=self.run_clockservice,
                                 schedule_filename=self.schedule,
                                 schedule_filename=self.schedule,
                                 send_events=self.events,
                                 send_events=self.events,
+                                db=self.db,
                                 max_tasks_per_child=self.max_tasks_per_child,
                                 max_tasks_per_child=self.max_tasks_per_child,
                                 task_time_limit=self.task_time_limit,
                                 task_time_limit=self.task_time_limit,
                                 task_soft_time_limit=self.task_soft_time_limit)
                                 task_soft_time_limit=self.task_soft_time_limit)
 
 
         # Install signal handler so SIGHUP restarts the worker.
         # Install signal handler so SIGHUP restarts the worker.
-        install_worker_restart_handler(worker)
+        if not self._isatty:
+            # only install HUP handler if detached from terminal,
+            # so closing the terminal window doesn't restart celeryd
+            # into the background.
+            install_worker_restart_handler(worker)
         install_worker_term_handler(worker)
         install_worker_term_handler(worker)
         install_worker_int_handler(worker)
         install_worker_int_handler(worker)
-
         signals.worker_init.send(sender=worker)
         signals.worker_init.send(sender=worker)
         worker.start()
         worker.start()
 
 
@@ -333,7 +348,6 @@ def install_worker_int_again_handler(worker):
     platform.install_signal_handler("SIGINT", _stop)
     platform.install_signal_handler("SIGINT", _stop)
 
 
 
 
-
 def install_worker_term_handler(worker):
 def install_worker_term_handler(worker):
 
 
     def _stop(signum, frame):
     def _stop(signum, frame):
@@ -370,7 +384,7 @@ def set_process_status(info):
     arg_start = "manage" in sys.argv[0] and 2 or 1
     arg_start = "manage" in sys.argv[0] and 2 or 1
     if sys.argv[arg_start:]:
     if sys.argv[arg_start:]:
         info = "%s (%s)" % (info, " ".join(sys.argv[arg_start:]))
         info = "%s (%s)" % (info, " ".join(sys.argv[arg_start:]))
-    platform.set_mp_process_title("celeryd", info=info)
+    return platform.set_mp_process_title("celeryd", info=info)
 
 
 
 
 def run_worker(**options):
 def run_worker(**options):

+ 28 - 491
celery/bin/celeryev.py

@@ -1,505 +1,42 @@
-import sys
-import time
-import curses
-import socket
+import logging
 import optparse
 import optparse
-import threading
-
-from datetime import datetime
-from textwrap import wrap
-from itertools import count
-
-from carrot.utils import rpartition
-
-import celery
-from celery import states
-from celery.task import control
-from celery.events import EventReceiver
-from celery.events.state import State
-from celery.messaging import establish_connection
-from celery.datastructures import LocalCache
+import sys
 
 
-TASK_NAMES = LocalCache(0xFFF)
+from celery.events.cursesmon import evtop
+from celery.events.dumper import evdump
+from celery.events.snapshot import evcam
 
 
-HUMAN_TYPES = {"worker-offline": "shutdown",
-               "worker-online": "started",
-               "worker-heartbeat": "heartbeat"}
 
 
 OPTION_LIST = (
 OPTION_LIST = (
-    optparse.make_option('-d', '--DUMP',
+    optparse.make_option('-d', '--dump',
         action="store_true", dest="dump",
         action="store_true", dest="dump",
         help="Dump events to stdout."),
         help="Dump events to stdout."),
+    optparse.make_option('-c', '--camera',
+        action="store", dest="camera",
+        help="Camera class to take event snapshots with."),
+    optparse.make_option('-F', '--frequency', '--freq',
+        action="store", dest="frequency", type="float", default=1.0,
+        help="Recording: Snapshot frequency."),
+    optparse.make_option('-r', '--maxrate',
+        action="store", dest="maxrate", default=None,
+        help="Recording: Shutter rate limit (e.g. 10/m)"),
+    optparse.make_option('-l', '--loglevel',
+        action="store", dest="loglevel", default="WARNING",
+        help="Loglevel. Default is WARNING."),
+    optparse.make_option('-f', '--logfile',
+        action="store", dest="logfile", default=None,
+        help="Log file. Default is <stderr>"),
 )
 )
 
 
 
 
-def humanize_type(type):
-    try:
-        return HUMAN_TYPES[type.lower()]
-    except KeyError:
-        return type.lower().replace("-", " ")
-
-
-class Dumper(object):
-
-    def on_event(self, event):
-        timestamp = datetime.fromtimestamp(event.pop("timestamp"))
-        type = event.pop("type").lower()
-        hostname = event.pop("hostname")
-        if type.startswith("task-"):
-            uuid = event.pop("uuid")
-            if type.startswith("task-received"):
-                task = TASK_NAMES[uuid] = "%s(%s) args=%s kwargs=%s" % (
-                        event.pop("name"), uuid,
-                        event.pop("args"),
-                        event.pop("kwargs"))
-            else:
-                task = TASK_NAMES.get(uuid, "")
-            return self.format_task_event(hostname, timestamp,
-                                          type, task, event)
-        fields = ", ".join("%s=%s" % (key, event[key])
-                        for key in sorted(event.keys()))
-        sep = fields and ":" or ""
-        print("%s [%s] %s%s %s" % (hostname, timestamp,
-                                    humanize_type(type), sep, fields))
-
-    def format_task_event(self, hostname, timestamp, type, task, event):
-        fields = ", ".join("%s=%s" % (key, event[key])
-                        for key in sorted(event.keys()))
-        sep = fields and ":" or ""
-        print("%s [%s] %s%s %s %s" % (hostname, timestamp,
-                                    humanize_type(type), sep, task, fields))
-
-
-def abbr(S, max, dots=True):
-    if S is None:
-        return "???"
-    if len(S) > max:
-        return dots and S[:max-3] + "..." or S[:max-3]
-    return S
-
-
-def abbrtask(S, max):
-    if S is None:
-        return "???"
-    if len(S) > max:
-        module, _, cls = rpartition(S, ".")
-        module = abbr(module, max - len(cls), False)
-        return module + "[.]" + cls
-    return S
-
-
-class CursesMonitor(object):
-    keymap = {}
-    win = None
-    screen_width = None
-    screen_delay = 0.1
-    selected_task = None
-    selected_position = 0
-    selected_str = "Selected: "
-    limit = 20
-    foreground = curses.COLOR_BLACK
-    background = curses.COLOR_WHITE
-    online_str = "Workers online: "
-    help_title = "Keys: "
-    help = ("j:up k:down i:info t:traceback r:result c:revoke ^c: quit")
-    greet = "celeryev %s" % celery.__version__
-    info_str = "Info: "
-
-    def __init__(self, state, keymap=None):
-        self.keymap = keymap or self.keymap
-        self.state = state
-        default_keymap = {"J": self.move_selection_down,
-                          "K": self.move_selection_up,
-                          "C": self.revoke_selection,
-                          "T": self.selection_traceback,
-                          "R": self.selection_result,
-                          "I": self.selection_info,
-                          "L": self.selection_rate_limit}
-        self.keymap = dict(default_keymap, **self.keymap)
-
-    def format_row(self, uuid, worker, task, timestamp, state):
-        my, mx = self.win.getmaxyx()
-        mx = mx - 3
-        uuid_max = 36
-        if mx < 88:
-            uuid_max = mx - 52 - 2
-        uuid = abbr(uuid, uuid_max).ljust(uuid_max)
-        worker = abbr(worker, 16).ljust(16)
-        task = abbrtask(task, 16).ljust(16)
-        state = abbr(state, 8).ljust(8)
-        timestamp = timestamp.ljust(8)
-        row = "%s %s %s %s %s " % (uuid, worker, task, timestamp, state)
-        if self.screen_width is None:
-            self.screen_width = len(row[:mx])
-        return row[:mx]
-
-    def find_position(self):
-        if not self.tasks:
-            return 0
-        for i, e in enumerate(self.tasks):
-            if self.selected_task == e[0]:
-                return i
-        return 0
-
-    def move_selection_up(self):
-        self.move_selection(-1)
-
-    def move_selection_down(self):
-        self.move_selection(1)
-
-    def move_selection(self, direction=1):
-        if not self.tasks:
-            return
-        pos = self.find_position()
-        try:
-            self.selected_task = self.tasks[pos + direction][0]
-        except IndexError:
-            self.selected_task = self.tasks[0][0]
-
-    keyalias = {curses.KEY_DOWN: "J",
-                curses.KEY_UP: "K",
-                curses.KEY_ENTER: "I"}
-
-    def handle_keypress(self):
-        try:
-            key = self.win.getkey().upper()
-        except:
-            return
-        key = self.keyalias.get(key) or key
-        handler = self.keymap.get(key)
-        if handler is not None:
-            handler()
-
-    def alert(self, callback, title=None):
-        self.win.erase()
-        my, mx = self.win.getmaxyx()
-        y = blank_line = count(2).next
-        if title:
-            self.win.addstr(y(), 3, title, curses.A_BOLD | curses.A_UNDERLINE)
-            blank_line()
-        callback(my, mx, y())
-        self.win.addstr(my - 1, 0, "Press any key to continue...",
-                        curses.A_BOLD)
-        self.win.refresh()
-        while 1:
-            try:
-                return self.win.getkey().upper()
-            except:
-                pass
-
-    def selection_rate_limit(self):
-        if not self.selected_task:
-            return curses.beep()
-        task = self.state.tasks[self.selected_task]
-        if not task.name:
-            return curses.beep()
-
-        my, mx = self.win.getmaxyx()
-        r = "New rate limit: "
-        self.win.addstr(my - 2, 3, r, curses.A_BOLD | curses.A_UNDERLINE)
-        self.win.addstr(my - 2, len(r) + 3, " " * (mx - len(r)))
-        rlimit = self.readline(my - 2, 3 + len(r))
-
-        if rlimit:
-            reply = control.rate_limit(task.name, rlimit.strip(), reply=True)
-            self.alert_remote_control_reply(reply)
-
-    def alert_remote_control_reply(self, reply):
-
-        def callback(my, mx, xs):
-            y = count(xs).next
-            if not reply:
-                self.win.addstr(y(), 3, "No replies received in 1s deadline.",
-                        curses.A_BOLD + curses.color_pair(2))
-                return
-
-            for subreply in reply:
-                curline = y()
-
-                host, response = subreply.items()[0]
-                host = "%s: " % host
-                self.win.addstr(curline, 3, host, curses.A_BOLD)
-                attr = curses.A_NORMAL
-                text = ""
-                if "error" in response:
-                    text = response["error"]
-                    attr |= curses.color_pair(2)
-                elif "ok" in response:
-                    text = response["ok"]
-                    attr |= curses.color_pair(3)
-                self.win.addstr(curline, 3 + len(host), text, attr)
-
-        return self.alert(callback, "Remote Control Command Replies")
-
-    def readline(self, x, y):
-        buffer = str()
-        curses.echo()
-        try:
-            i = 0
-            while True:
-                ch = self.win.getch(x, y + i)
-                if ch != -1:
-                    if ch in (10, curses.KEY_ENTER): # enter
-                        break
-                    if ch in (27, ):
-                        buffer = str()
-                        break
-                    buffer += chr(ch)
-                    i += 1
-        finally:
-            curses.noecho()
-        return buffer
-
-    def revoke_selection(self):
-        if not self.selected_task:
-            return curses.beep()
-        reply = control.revoke(self.selected_task, reply=True)
-        self.alert_remote_control_reply(reply)
-
-    def selection_info(self):
-        if not self.selected_task:
-            return
-
-        def alert_callback(mx, my, xs):
-            y = count(xs).next
-            task = self.state.tasks[self.selected_task]
-            info = task.info(extra=["state"])
-            infoitems = [("args", info.pop("args", None)),
-                         ("kwargs", info.pop("kwargs", None))] + info.items()
-            for key, value in infoitems:
-                if key is None:
-                    continue
-                curline = y()
-                keys = key + ": "
-                self.win.addstr(curline, 3, keys, curses.A_BOLD)
-                wrapped = wrap(str(value), mx - 2)
-                if len(wrapped) == 1:
-                    self.win.addstr(curline, len(keys) + 3, wrapped[0])
-                else:
-                    for subline in wrapped:
-                        self.win.addstr(y(), 3, " " * 4 + subline,
-                                curses.A_NORMAL)
-
-        return self.alert(alert_callback,
-                "Task details for %s" % self.selected_task)
-
-    def selection_traceback(self):
-        if not self.selected_task:
-            return curses.beep()
-        task = self.state.tasks[self.selected_task]
-        if task.state not in states.EXCEPTION_STATES:
-            return curses.beep()
-
-        def alert_callback(my, mx, xs):
-            y = count(xs).next
-            for line in task.traceback.split("\n"):
-                self.win.addstr(y(), 3, line)
-
-        return self.alert(alert_callback,
-                "Task Exception Traceback for %s" % self.selected_task)
-
-    def selection_result(self):
-        if not self.selected_task:
-            return
-
-        def alert_callback(my, mx, xs):
-            y = count(xs).next
-            task = self.state.tasks[self.selected_task]
-            result = getattr(task, "result", None) or getattr(task,
-                    "exception", None)
-            for line in wrap(result, mx - 2):
-                self.win.addstr(y(), 3, line)
-
-        return self.alert(alert_callback,
-                "Task Result for %s" % self.selected_task)
-
-    def draw(self):
-        win = self.win
-        self.handle_keypress()
-        x = 3
-        y = blank_line = count(2).next
-        my, mx = win.getmaxyx()
-        win.erase()
-        win.bkgd(" ", curses.color_pair(1))
-        win.border()
-        win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5))
-        blank_line()
-        win.addstr(y(), x, self.format_row("UUID", "TASK",
-                                           "WORKER", "TIME", "STATE"),
-                curses.A_BOLD | curses.A_UNDERLINE)
-        tasks = self.tasks
-        if tasks:
-            for uuid, task in tasks:
-                if task.uuid:
-                    state_color = self.state_colors.get(task.state)
-                    attr = curses.A_NORMAL
-                    if task.uuid == self.selected_task:
-                        attr = curses.A_STANDOUT
-                    timestamp = datetime.fromtimestamp(
-                                    task.timestamp or time.time())
-                    timef = timestamp.strftime("%H:%M:%S")
-                    line = self.format_row(uuid, task.name,
-                                           task.worker.hostname,
-                                           timef, task.state)
-                    lineno = y()
-                    win.addstr(lineno, x, line, attr)
-                    if state_color:
-                        win.addstr(lineno, len(line) - len(task.state) + 1,
-                                task.state, state_color | attr)
-                    if task.ready:
-                        task.visited = time.time()
-
-        # -- Footer
-        blank_line()
-        win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width)
-
-        # Selected Task Info
-        if self.selected_task:
-            win.addstr(my - 5, x, self.selected_str, curses.A_BOLD)
-            info = "Missing extended info"
-            try:
-                selection = self.state.tasks[self.selected_task]
-            except KeyError:
-                pass
-            else:
-                info = selection.info(["args", "kwargs",
-                                       "result", "runtime", "eta"])
-                if "runtime" in info:
-                    info["runtime"] = "%.2fs" % info["runtime"]
-                if "result" in info:
-                    info["result"] = abbr(info["result"], 16)
-                info = " ".join("%s=%s" % (key, value)
-                            for key, value in info.items())
-            win.addstr(my - 5, x + len(self.selected_str), info)
-        else:
-            win.addstr(my - 5, x, "No task selected", curses.A_NORMAL)
-
-
-        # Workers
-        if self.workers:
-            win.addstr(my - 4, x, self.online_str, curses.A_BOLD)
-            win.addstr(my - 4, x + len(self.online_str),
-                    ", ".join(self.workers), curses.A_NORMAL)
-        else:
-            win.addstr(my - 4, x, "No workers discovered.")
-
-        # Info
-        win.addstr(my - 3, x, self.info_str, curses.A_BOLD)
-        win.addstr(my - 3, x + len(self.info_str),
-                "events:%s tasks:%s workers:%s/%s" % (
-                    self.state.event_count, self.state.task_count,
-                    len([w for w in self.state.workers.values()
-                            if w.alive]),
-                    len(self.state.workers)),
-                curses.A_DIM)
-
-        # Help
-        win.addstr(my - 2, x, self.help_title, curses.A_BOLD)
-        win.addstr(my - 2, x + len(self.help_title), self.help, curses.A_DIM)
-        win.refresh()
-
-    def init_screen(self):
-        self.win = curses.initscr()
-        self.win.nodelay(True)
-        self.win.keypad(True)
-        curses.start_color()
-        curses.init_pair(1, self.foreground, self.background)
-        # exception states
-        curses.init_pair(2, curses.COLOR_RED, self.background)
-        # successful state
-        curses.init_pair(3, curses.COLOR_GREEN, self.background)
-        # revoked state
-        curses.init_pair(4, curses.COLOR_MAGENTA, self.background)
-        # greeting
-        curses.init_pair(5, curses.COLOR_BLUE, self.background)
-        # started state
-        curses.init_pair(6, curses.COLOR_YELLOW, self.foreground)
-
-        self.state_colors = {states.SUCCESS: curses.color_pair(3),
-                             states.REVOKED: curses.color_pair(4),
-                             states.STARTED: curses.color_pair(6)}
-        for state in states.EXCEPTION_STATES:
-            self.state_colors[state] = curses.color_pair(2)
-
-        curses.cbreak()
-
-    def resetscreen(self):
-        curses.nocbreak()
-        self.win.keypad(False)
-        curses.echo()
-        curses.endwin()
-
-    def nap(self):
-        curses.napms(int(self.screen_delay * 1000))
-
-    @property
-    def tasks(self):
-        return self.state.tasks_by_timestamp()[:self.limit]
-
-    @property
-    def workers(self):
-        return [hostname
-                    for hostname, w in self.state.workers.items()
-                        if w.alive]
-
-
-class DisplayThread(threading.Thread):
-
-    def __init__(self, display):
-        self.display = display
-        self.shutdown = False
-        threading.Thread.__init__(self)
-
-    def run(self):
-        while not self.shutdown:
-            self.display.draw()
-            self.display.nap()
-
-
-def eventtop():
-    sys.stderr.write("-> celeryev: starting capture...\n")
-    state = State()
-    display = CursesMonitor(state)
-    display.init_screen()
-    refresher = DisplayThread(display)
-    refresher.start()
-    conn = establish_connection()
-    recv = EventReceiver(conn, handlers={"*": state.event})
-    try:
-        consumer = recv.consumer()
-        consumer.consume()
-        while True:
-            try:
-                conn.connection.drain_events()
-            except socket.timeout:
-                pass
-    except Exception:
-        refresher.shutdown = True
-        refresher.join()
-        display.resetscreen()
-        raise
-    except (KeyboardInterrupt, SystemExit):
-        conn and conn.close()
-        refresher.shutdown = True
-        refresher.join()
-        display.resetscreen()
-
-
-def eventdump():
-    sys.stderr.write("-> celeryev: starting capture...\n")
-    dumper = Dumper()
-    conn = establish_connection()
-    recv = EventReceiver(conn, handlers={"*": dumper.on_event})
-    try:
-        recv.capture()
-    except (KeyboardInterrupt, SystemExit):
-        conn and conn.close()
-
-
-def run_celeryev(dump=False, **kwargs):
+def run_celeryev(dump=False, camera=None, frequency=1.0, maxrate=None,
+        loglevel=logging.WARNING, logfile=None, **kwargs):
     if dump:
     if dump:
-        return eventdump()
-    return eventtop()
+        return evdump()
+    if camera:
+        return evcam(camera, frequency, maxrate,
+                     loglevel=loglevel, logfile=logfile)
+    return evtop()
 
 
 
 
 def parse_options(arguments):
 def parse_options(arguments):

+ 21 - 6
celery/concurrency/processes/__init__.py

@@ -27,6 +27,7 @@ class TaskPool(object):
         The logger used for debugging.
         The logger used for debugging.
 
 
     """
     """
+    Pool = Pool
 
 
     def __init__(self, limit, logger=None, initializer=None,
     def __init__(self, limit, logger=None, initializer=None,
             maxtasksperchild=None, timeout=None, soft_timeout=None,
             maxtasksperchild=None, timeout=None, soft_timeout=None,
@@ -46,11 +47,11 @@ class TaskPool(object):
         Will pre-fork all workers so they're ready to accept tasks.
         Will pre-fork all workers so they're ready to accept tasks.
 
 
         """
         """
-        self._pool = Pool(processes=self.limit,
-                          initializer=self.initializer,
-                          timeout=self.timeout,
-                          soft_timeout=self.soft_timeout,
-                          maxtasksperchild=self.maxtasksperchild)
+        self._pool = self.Pool(processes=self.limit,
+                               initializer=self.initializer,
+                               timeout=self.timeout,
+                               soft_timeout=self.soft_timeout,
+                               maxtasksperchild=self.maxtasksperchild)
 
 
     def stop(self):
     def stop(self):
         """Gracefully stop the pool."""
         """Gracefully stop the pool."""
@@ -80,6 +81,7 @@ class TaskPool(object):
         errbacks = errbacks or []
         errbacks = errbacks or []
 
 
         on_ready = curry(self.on_ready, callbacks, errbacks)
         on_ready = curry(self.on_ready, callbacks, errbacks)
+        on_worker_error = curry(self.on_worker_error, errbacks)
 
 
         self.logger.debug("TaskPool: Apply %s (args:%s kwargs:%s)" % (
         self.logger.debug("TaskPool: Apply %s (args:%s kwargs:%s)" % (
             target, args, kwargs))
             target, args, kwargs))
@@ -88,16 +90,29 @@ class TaskPool(object):
                                       callback=on_ready,
                                       callback=on_ready,
                                       accept_callback=accept_callback,
                                       accept_callback=accept_callback,
                                       timeout_callback=timeout_callback,
                                       timeout_callback=timeout_callback,
+                                      error_callback=on_worker_error,
                                       waitforslot=self.putlocks)
                                       waitforslot=self.putlocks)
 
 
+    def on_worker_error(self, errbacks, exc):
+        einfo = ExceptionInfo((exc.__class__, exc, None))
+        [errback(einfo) for errback in errbacks]
+
     def on_ready(self, callbacks, errbacks, ret_value):
     def on_ready(self, callbacks, errbacks, ret_value):
         """What to do when a worker task is ready and its return value has
         """What to do when a worker task is ready and its return value has
         been collected."""
         been collected."""
 
 
         if isinstance(ret_value, ExceptionInfo):
         if isinstance(ret_value, ExceptionInfo):
             if isinstance(ret_value.exception, (
             if isinstance(ret_value.exception, (
-                    SystemExit, KeyboardInterrupt)): # pragma: no cover
+                    SystemExit, KeyboardInterrupt)):
                 raise ret_value.exception
                 raise ret_value.exception
             [errback(ret_value) for errback in errbacks]
             [errback(ret_value) for errback in errbacks]
         else:
         else:
             [callback(ret_value) for callback in callbacks]
             [callback(ret_value) for callback in callbacks]
+
+    @property
+    def info(self):
+        return {"max-concurrency": self.limit,
+                "processes": [p.pid for p in self._pool._pool],
+                "max-tasks-per-child": self.maxtasksperchild,
+                "put-guarded-by-semaphore": self.putlocks,
+                "timeouts": (self.soft_timeout, self.timeout)}

+ 45 - 9
celery/concurrency/processes/pool.py

@@ -51,6 +51,22 @@ def mapstar(args):
 #
 #
 
 
 
 
+class MaybeEncodingError(Exception):
+    """Wraps unpickleable object."""
+
+    def __init__(self, exc, value):
+        self.exc = str(exc)
+        self.value = repr(value)
+        super(MaybeEncodingError, self).__init__(self.exc, self.value)
+
+    def __repr__(self):
+        return "<MaybeEncodingError: %s>" % str(self)
+
+    def __str__(self):
+        return "Error sending result: '%s'. Reason: '%s'." % (
+                    self.value, self.exc)
+
+
 def soft_timeout_sighandler(signum, frame):
 def soft_timeout_sighandler(signum, frame):
     raise SoftTimeLimitExceeded()
     raise SoftTimeLimitExceeded()
 
 
@@ -90,7 +106,12 @@ def worker(inqueue, outqueue, ackqueue, initializer=None, initargs=(),
             result = (True, func(*args, **kwds))
             result = (True, func(*args, **kwds))
         except Exception, e:
         except Exception, e:
             result = (False, e)
             result = (False, e)
-        put((job, i, result))
+        try:
+            put((job, i, result))
+        except Exception, exc:
+            wrapped = MaybeEncodingError(exc, result[1])
+            put((job, i, (False, wrapped)))
+
         completed += 1
         completed += 1
     debug('worker exiting after %d tasks' % completed)
     debug('worker exiting after %d tasks' % completed)
 
 
@@ -239,8 +260,8 @@ class AckHandler(PoolThread):
 
 
 class TimeoutHandler(PoolThread):
 class TimeoutHandler(PoolThread):
 
 
-    def __init__(self, processes, sentinel_event, cache, t_soft, t_hard):
-        self.sentinel_event = sentinel_event
+    def __init__(self, processes, cache, t_soft, t_hard):
+        self.processes = processes
         self.cache = cache
         self.cache = cache
         self.t_soft = t_soft
         self.t_soft = t_soft
         self.t_hard = t_hard
         self.t_hard = t_hard
@@ -350,7 +371,10 @@ class ResultHandler(PoolThread):
                 return
                 return
 
 
             if putlock is not None:
             if putlock is not None:
-                putlock.release()
+                try:
+                    putlock.release()
+                except ValueError:
+                    pass
 
 
             if self._state:
             if self._state:
                 assert self._state == TERMINATE
                 assert self._state == TERMINATE
@@ -368,7 +392,10 @@ class ResultHandler(PoolThread):
                 pass
                 pass
 
 
         if putlock is not None:
         if putlock is not None:
-            putlock.release()
+            try:
+                putlock.release()
+            except ValueError:
+                pass
 
 
         while cache and self._state != TERMINATE:
         while cache and self._state != TERMINATE:
             try:
             try:
@@ -449,7 +476,7 @@ class Pool(object):
         self._worker_handler = self.Supervisor(self)
         self._worker_handler = self.Supervisor(self)
         self._worker_handler.start()
         self._worker_handler.start()
 
 
-        self._putlock = threading.Semaphore(self._processes)
+        self._putlock = threading.BoundedSemaphore(self._processes)
 
 
         self._task_handler = self.TaskHandler(self._taskqueue, self._quick_put,
         self._task_handler = self.TaskHandler(self._taskqueue, self._quick_put,
                                          self._outqueue, self._pool)
                                          self._outqueue, self._pool)
@@ -508,6 +535,11 @@ class Pool(object):
             if worker.exitcode is not None:
             if worker.exitcode is not None:
                 # worker exited
                 # worker exited
                 debug('cleaning up worker %d' % i)
                 debug('cleaning up worker %d' % i)
+                if self._putlock is not None:
+                    try:
+                        self._putlock.release()
+                    except ValueError:
+                        pass
                 worker.join()
                 worker.join()
                 del self._pool[i]
                 del self._pool[i]
         return len(self._pool) < self._processes
         return len(self._pool) < self._processes
@@ -589,7 +621,7 @@ class Pool(object):
 
 
     def apply_async(self, func, args=(), kwds={},
     def apply_async(self, func, args=(), kwds={},
             callback=None, accept_callback=None, timeout_callback=None,
             callback=None, accept_callback=None, timeout_callback=None,
-            waitforslot=False):
+            waitforslot=False, error_callback=None):
         '''
         '''
         Asynchronous equivalent of `apply()` builtin.
         Asynchronous equivalent of `apply()` builtin.
 
 
@@ -607,7 +639,8 @@ class Pool(object):
         '''
         '''
         assert self._state == RUN
         assert self._state == RUN
         result = ApplyResult(self._cache, callback,
         result = ApplyResult(self._cache, callback,
-                             accept_callback, timeout_callback)
+                             accept_callback, timeout_callback,
+                             error_callback)
         if waitforslot:
         if waitforslot:
             self._putlock.acquire()
             self._putlock.acquire()
         self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
         self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
@@ -742,7 +775,7 @@ DynamicPool = Pool
 class ApplyResult(object):
 class ApplyResult(object):
 
 
     def __init__(self, cache, callback, accept_callback=None,
     def __init__(self, cache, callback, accept_callback=None,
-            timeout_callback=None):
+            timeout_callback=None, error_callback=None):
         self._cond = threading.Condition(threading.Lock())
         self._cond = threading.Condition(threading.Lock())
         self._job = job_counter.next()
         self._job = job_counter.next()
         self._cache = cache
         self._cache = cache
@@ -751,6 +784,7 @@ class ApplyResult(object):
         self._time_accepted = None
         self._time_accepted = None
         self._ready = False
         self._ready = False
         self._callback = callback
         self._callback = callback
+        self._errback = error_callback
         self._accept_callback = accept_callback
         self._accept_callback = accept_callback
         self._timeout_callback = timeout_callback
         self._timeout_callback = timeout_callback
         cache[self._job] = self
         cache[self._job] = self
@@ -786,6 +820,8 @@ class ApplyResult(object):
         self._success, self._value = obj
         self._success, self._value = obj
         if self._callback and self._success:
         if self._callback and self._success:
             self._callback(self._value)
             self._callback(self._value)
+        if self._errback and not self._success:
+            self._errback(self._value)
         self._cond.acquire()
         self._cond.acquire()
         try:
         try:
             self._ready = True
             self._ready = True

+ 26 - 10
celery/conf.py

@@ -22,10 +22,18 @@ LOG_LEVELS[logging.FATAL] = "FATAL"
 settings = load_settings()
 settings = load_settings()
 
 
 _DEFAULTS = {
 _DEFAULTS = {
+    "BROKER_CONNECTION_TIMEOUT": 4,
+    "BROKER_CONNECTION_RETRY": True,
+    "BROKER_CONNECTION_MAX_RETRIES": 100,
+    "BROKER_HOST": "localhost",
+    "BROKER_PORT": None,
+    "BROKER_USER": "guest",
+    "BROKER_PASSWORD": "guest",
+    "BROKER_VHOST": "/",
     "CELERY_RESULT_BACKEND": "database",
     "CELERY_RESULT_BACKEND": "database",
     "CELERY_ALWAYS_EAGER": False,
     "CELERY_ALWAYS_EAGER": False,
     "CELERY_EAGER_PROPAGATES_EXCEPTIONS": False,
     "CELERY_EAGER_PROPAGATES_EXCEPTIONS": False,
-    "CELERY_TASK_RESULT_EXPIRES": timedelta(days=5),
+    "CELERY_TASK_RESULT_EXPIRES": timedelta(days=1),
     "CELERY_SEND_EVENTS": False,
     "CELERY_SEND_EVENTS": False,
     "CELERY_IGNORE_RESULT": False,
     "CELERY_IGNORE_RESULT": False,
     "CELERY_STORE_ERRORS_EVEN_IF_IGNORED": False,
     "CELERY_STORE_ERRORS_EVEN_IF_IGNORED": False,
@@ -35,19 +43,17 @@ _DEFAULTS = {
     "CELERYD_TASK_SOFT_TIME_LIMIT": None,
     "CELERYD_TASK_SOFT_TIME_LIMIT": None,
     "CELERYD_MAX_TASKS_PER_CHILD": None,
     "CELERYD_MAX_TASKS_PER_CHILD": None,
     "CELERY_ROUTES": None,
     "CELERY_ROUTES": None,
+    "CELERY_CREATE_MISSING_QUEUES": True,
     "CELERY_DEFAULT_ROUTING_KEY": "celery",
     "CELERY_DEFAULT_ROUTING_KEY": "celery",
     "CELERY_DEFAULT_QUEUE": "celery",
     "CELERY_DEFAULT_QUEUE": "celery",
     "CELERY_DEFAULT_EXCHANGE": "celery",
     "CELERY_DEFAULT_EXCHANGE": "celery",
     "CELERY_DEFAULT_EXCHANGE_TYPE": "direct",
     "CELERY_DEFAULT_EXCHANGE_TYPE": "direct",
     "CELERY_DEFAULT_DELIVERY_MODE": 2, # persistent
     "CELERY_DEFAULT_DELIVERY_MODE": 2, # persistent
-    "BROKER_CONNECTION_TIMEOUT": 4,
-    "BROKER_CONNECTION_RETRY": True,
-    "BROKER_CONNECTION_MAX_RETRIES": 100,
     "CELERY_ACKS_LATE": False,
     "CELERY_ACKS_LATE": False,
     "CELERYD_POOL_PUTLOCKS": True,
     "CELERYD_POOL_PUTLOCKS": True,
     "CELERYD_POOL": "celery.concurrency.processes.TaskPool",
     "CELERYD_POOL": "celery.concurrency.processes.TaskPool",
     "CELERYD_MEDIATOR": "celery.worker.controllers.Mediator",
     "CELERYD_MEDIATOR": "celery.worker.controllers.Mediator",
-    "CELERYD_ETA_SCHEDULER": "celery.worker.controllers.ScheduleController",
+    "CELERYD_ETA_SCHEDULER": "timer2.Timer",
     "CELERYD_LISTENER": "celery.worker.listener.CarrotListener",
     "CELERYD_LISTENER": "celery.worker.listener.CarrotListener",
     "CELERYD_CONCURRENCY": 0, # defaults to cpu count
     "CELERYD_CONCURRENCY": 0, # defaults to cpu count
     "CELERYD_PREFETCH_MULTIPLIER": 4,
     "CELERYD_PREFETCH_MULTIPLIER": 4,
@@ -57,6 +63,8 @@ _DEFAULTS = {
     "CELERYD_LOG_LEVEL": "WARN",
     "CELERYD_LOG_LEVEL": "WARN",
     "CELERYD_LOG_FILE": None, # stderr
     "CELERYD_LOG_FILE": None, # stderr
     "CELERYBEAT_SCHEDULE": {},
     "CELERYBEAT_SCHEDULE": {},
+    "CELERYD_STATE_DB": None,
+    "CELERYD_ETA_SCHEDULER_PRECISION": 1,
     "CELERYBEAT_SCHEDULE_FILENAME": "celerybeat-schedule",
     "CELERYBEAT_SCHEDULE_FILENAME": "celerybeat-schedule",
     "CELERYBEAT_MAX_LOOP_INTERVAL": 5 * 60, # five minutes.
     "CELERYBEAT_MAX_LOOP_INTERVAL": 5 * 60, # five minutes.
     "CELERYBEAT_LOG_LEVEL": "INFO",
     "CELERYBEAT_LOG_LEVEL": "INFO",
@@ -117,7 +125,8 @@ ALWAYS_EAGER = _get("CELERY_ALWAYS_EAGER")
 EAGER_PROPAGATES_EXCEPTIONS = _get("CELERY_EAGER_PROPAGATES_EXCEPTIONS")
 EAGER_PROPAGATES_EXCEPTIONS = _get("CELERY_EAGER_PROPAGATES_EXCEPTIONS")
 RESULT_BACKEND = _get("CELERY_RESULT_BACKEND", compat=["CELERY_BACKEND"])
 RESULT_BACKEND = _get("CELERY_RESULT_BACKEND", compat=["CELERY_BACKEND"])
 CELERY_BACKEND = RESULT_BACKEND # FIXME Remove in 1.4
 CELERY_BACKEND = RESULT_BACKEND # FIXME Remove in 1.4
-CELERY_CACHE_BACKEND = _get("CELERY_CACHE_BACKEND")
+CACHE_BACKEND = _get("CELERY_CACHE_BACKEND") or _get("CACHE_BACKEND")
+CACHE_BACKEND_OPTIONS = _get("CELERY_CACHE_BACKEND_OPTIONS") or {}
 TASK_SERIALIZER = _get("CELERY_TASK_SERIALIZER")
 TASK_SERIALIZER = _get("CELERY_TASK_SERIALIZER")
 TASK_RESULT_EXPIRES = _get("CELERY_TASK_RESULT_EXPIRES")
 TASK_RESULT_EXPIRES = _get("CELERY_TASK_RESULT_EXPIRES")
 IGNORE_RESULT = _get("CELERY_IGNORE_RESULT")
 IGNORE_RESULT = _get("CELERY_IGNORE_RESULT")
@@ -147,6 +156,7 @@ CELERYD_MAX_TASKS_PER_CHILD = _get("CELERYD_MAX_TASKS_PER_CHILD")
 STORE_ERRORS_EVEN_IF_IGNORED = _get("CELERY_STORE_ERRORS_EVEN_IF_IGNORED")
 STORE_ERRORS_EVEN_IF_IGNORED = _get("CELERY_STORE_ERRORS_EVEN_IF_IGNORED")
 CELERY_SEND_TASK_ERROR_EMAILS = _get("CELERY_SEND_TASK_ERROR_EMAILS", False,
 CELERY_SEND_TASK_ERROR_EMAILS = _get("CELERY_SEND_TASK_ERROR_EMAILS", False,
                                      compat=["SEND_CELERY_TASK_ERROR_EMAILS"])
                                      compat=["SEND_CELERY_TASK_ERROR_EMAILS"])
+CELERY_TASK_ERROR_WHITELIST = _get("CELERY_TASK_ERROR_WHITELIST")
 CELERYD_LOG_FORMAT = _get("CELERYD_LOG_FORMAT",
 CELERYD_LOG_FORMAT = _get("CELERYD_LOG_FORMAT",
                           compat=["CELERYD_DAEMON_LOG_FORMAT"])
                           compat=["CELERYD_DAEMON_LOG_FORMAT"])
 CELERYD_TASK_LOG_FORMAT = _get("CELERYD_TASK_LOG_FORMAT")
 CELERYD_TASK_LOG_FORMAT = _get("CELERYD_TASK_LOG_FORMAT")
@@ -156,6 +166,7 @@ CELERYD_LOG_COLOR = _get("CELERYD_LOG_COLOR",
 CELERYD_LOG_LEVEL = _get("CELERYD_LOG_LEVEL",
 CELERYD_LOG_LEVEL = _get("CELERYD_LOG_LEVEL",
                             compat=["CELERYD_DAEMON_LOG_LEVEL"])
                             compat=["CELERYD_DAEMON_LOG_LEVEL"])
 CELERYD_LOG_LEVEL = LOG_LEVELS[CELERYD_LOG_LEVEL.upper()]
 CELERYD_LOG_LEVEL = LOG_LEVELS[CELERYD_LOG_LEVEL.upper()]
+CELERYD_STATE_DB = _get("CELERYD_STATE_DB")
 CELERYD_CONCURRENCY = _get("CELERYD_CONCURRENCY")
 CELERYD_CONCURRENCY = _get("CELERYD_CONCURRENCY")
 CELERYD_PREFETCH_MULTIPLIER = _get("CELERYD_PREFETCH_MULTIPLIER")
 CELERYD_PREFETCH_MULTIPLIER = _get("CELERYD_PREFETCH_MULTIPLIER")
 CELERYD_POOL_PUTLOCKS = _get("CELERYD_POOL_PUTLOCKS")
 CELERYD_POOL_PUTLOCKS = _get("CELERYD_POOL_PUTLOCKS")
@@ -164,6 +175,7 @@ CELERYD_POOL = _get("CELERYD_POOL")
 CELERYD_LISTENER = _get("CELERYD_LISTENER")
 CELERYD_LISTENER = _get("CELERYD_LISTENER")
 CELERYD_MEDIATOR = _get("CELERYD_MEDIATOR")
 CELERYD_MEDIATOR = _get("CELERYD_MEDIATOR")
 CELERYD_ETA_SCHEDULER = _get("CELERYD_ETA_SCHEDULER")
 CELERYD_ETA_SCHEDULER = _get("CELERYD_ETA_SCHEDULER")
+CELERYD_ETA_SCHEDULER_PRECISION = _get("CELERYD_ETA_SCHEDULER_PRECISION")
 
 
 # :--- Email settings                               <-   --   --- - ----- -- #
 # :--- Email settings                               <-   --   --- - ----- -- #
 ADMINS = _get("ADMINS")
 ADMINS = _get("ADMINS")
@@ -188,7 +200,9 @@ BROKER_CONNECTION_RETRY = _get("BROKER_CONNECTION_RETRY",
                                 compat=["CELERY_BROKER_CONNECTION_RETRY"])
                                 compat=["CELERY_BROKER_CONNECTION_RETRY"])
 BROKER_CONNECTION_MAX_RETRIES = _get("BROKER_CONNECTION_MAX_RETRIES",
 BROKER_CONNECTION_MAX_RETRIES = _get("BROKER_CONNECTION_MAX_RETRIES",
                             compat=["CELERY_BROKER_CONNECTION_MAX_RETRIES"])
                             compat=["CELERY_BROKER_CONNECTION_MAX_RETRIES"])
-BROKER_BACKEND = _get("BROKER_BACKEND") or _get("CARROT_BACKEND")
+BROKER_BACKEND = _get("BROKER_TRANSPORT") or \
+                        _get("BROKER_BACKEND") or \
+                            _get("CARROT_BACKEND")
 
 
 # <--- Message routing                             <-   --   --- - ----- -- #
 # <--- Message routing                             <-   --   --- - ----- -- #
 DEFAULT_QUEUE = _get("CELERY_DEFAULT_QUEUE")
 DEFAULT_QUEUE = _get("CELERY_DEFAULT_QUEUE")
@@ -200,6 +214,7 @@ QUEUES = _get("CELERY_QUEUES") or {DEFAULT_QUEUE: {
                                        "exchange": DEFAULT_EXCHANGE,
                                        "exchange": DEFAULT_EXCHANGE,
                                        "exchange_type": DEFAULT_EXCHANGE_TYPE,
                                        "exchange_type": DEFAULT_EXCHANGE_TYPE,
                                        "binding_key": DEFAULT_ROUTING_KEY}}
                                        "binding_key": DEFAULT_ROUTING_KEY}}
+CREATE_MISSING_QUEUES = _get("CELERY_CREATE_MISSING_QUEUES")
 ROUTES = routes.prepare(_get("CELERY_ROUTES") or [])
 ROUTES = routes.prepare(_get("CELERY_ROUTES") or [])
 # :--- Broadcast queue settings                     <-   --   --- - ----- -- #
 # :--- Broadcast queue settings                     <-   --   --- - ----- -- #
 
 
@@ -234,7 +249,7 @@ CELERYMON_LOG_LEVEL = _get("CELERYMON_LOG_LEVEL")
 CELERYMON_LOG_FILE = _get("CELERYMON_LOG_FILE")
 CELERYMON_LOG_FILE = _get("CELERYMON_LOG_FILE")
 
 
 
 
-def _init_routing_table(queues):
+def _init_queues(queues):
     """Convert configuration mapping to a table of queues digestible
     """Convert configuration mapping to a table of queues digestible
     by a :class:`carrot.messaging.ConsumerSet`."""
     by a :class:`carrot.messaging.ConsumerSet`."""
 
 
@@ -242,10 +257,11 @@ def _init_routing_table(queues):
         opts.setdefault("exchange", DEFAULT_EXCHANGE),
         opts.setdefault("exchange", DEFAULT_EXCHANGE),
         opts.setdefault("exchange_type", DEFAULT_EXCHANGE_TYPE)
         opts.setdefault("exchange_type", DEFAULT_EXCHANGE_TYPE)
         opts.setdefault("binding_key", DEFAULT_EXCHANGE)
         opts.setdefault("binding_key", DEFAULT_EXCHANGE)
+        opts.setdefault("routing_key", opts.get("binding_key"))
         return opts
         return opts
 
 
     return dict((queue, _defaults(opts)) for queue, opts in queues.items())
     return dict((queue, _defaults(opts)) for queue, opts in queues.items())
 
 
 
 
-def get_routing_table():
-    return _init_routing_table(QUEUES)
+def get_queues():
+    return _init_queues(QUEUES)

+ 61 - 0
celery/datastructures.py

@@ -222,6 +222,15 @@ class LimitedSet(object):
     def __contains__(self, value):
     def __contains__(self, value):
         return value in self._data
         return value in self._data
 
 
+    def update(self, other):
+        if isinstance(other, self.__class__):
+            self._data.update(other._data)
+        else:
+            self._data.update(other)
+
+    def as_dict(self):
+        return self._data
+
     def __iter__(self):
     def __iter__(self):
         return iter(self._data.keys())
         return iter(self._data.keys())
 
 
@@ -256,3 +265,55 @@ class LocalCache(OrderedDict):
         while len(self) >= self.limit:
         while len(self) >= self.limit:
             self.popitem(last=False)
             self.popitem(last=False)
         super(LocalCache, self).__setitem__(key, value)
         super(LocalCache, self).__setitem__(key, value)
+
+
+class TokenBucket(object):
+    """Token Bucket Algorithm.
+
+    See http://en.wikipedia.org/wiki/Token_Bucket
+    Most of this code was stolen from an entry in the ASPN Python Cookbook:
+    http://code.activestate.com/recipes/511490/
+
+    :param fill_rate: see :attr:`fill_rate`.
+    :keyword capacity: see :attr:`capacity`.
+
+    .. attribute:: fill_rate
+
+        The rate in tokens/second that the bucket will be refilled.
+
+    .. attribute:: capacity
+
+        Maximum number of tokens in the bucket. Default is ``1``.
+
+    .. attribute:: timestamp
+
+        Timestamp of the last time a token was taken out of the bucket.
+
+    """
+
+    def __init__(self, fill_rate, capacity=1):
+        self.capacity = float(capacity)
+        self._tokens = capacity
+        self.fill_rate = float(fill_rate)
+        self.timestamp = time.time()
+
+    def can_consume(self, tokens=1):
+        if tokens <= self._get_tokens():
+            self._tokens -= tokens
+            return True
+        return False
+
+    def expected_time(self, tokens=1):
+        """Returns the expected time in seconds when a new token should be
+        available. *Note: consumes a token from the bucket*"""
+        _tokens = self._get_tokens()
+        tokens = max(tokens, _tokens)
+        return (tokens - _tokens) / self.fill_rate
+
+    def _get_tokens(self):
+        if self._tokens < self.capacity:
+            now = time.time()
+            delta = self.fill_rate * (now - self.timestamp)
+            self._tokens = min(self.capacity, self._tokens + delta)
+            self.timestamp = now
+        return self._tokens

+ 47 - 0
celery/db/dfd042c7.py

@@ -0,0 +1,47 @@
+"""
+dfd042c7
+
+SQLAlchemy 0.5.8 version of a805d4bd, see the docstring of that module
+for an explanation of this workaround.
+
+"""
+from sqlalchemy.types import PickleType as _PickleType
+from sqlalchemy import util
+
+
+class PickleType(_PickleType):
+
+    def process_bind_param(self, value, dialect):
+        dumps = self.pickler.dumps
+        protocol = self.protocol
+        if value is None:
+            return None
+        return dumps(value, protocol)
+
+    def process_result_value(self, value, dialect):
+        loads = self.pickler.loads
+        if value is None:
+            return None
+        return loads(str(value))
+
+    def copy_value(self, value):
+        if self.mutable:
+            return self.pickler.loads(self.pickler.dumps(value, self.protocol))
+        else:
+            return value
+
+    def compare_values(self, x, y):
+        if self.comparator:
+            return self.comparator(x, y)
+        elif self.mutable and not hasattr(x, '__eq__') and x is not None:
+            util.warn_deprecated(
+                    "Objects stored with PickleType when mutable=True "
+                    "must implement __eq__() for reliable comparison.")
+            a = self.pickler.dumps(x, self.protocol)
+            b = self.pickler.dumps(y, self.protocol)
+            return a == b
+        else:
+            return x == y
+
+    def is_mutable(self):
+        return self.mutable

+ 25 - 32
celery/db/models.py

@@ -1,12 +1,14 @@
 from datetime import datetime
 from datetime import datetime
 
 
-from sqlalchemy import Column, Sequence
-from sqlalchemy import Integer, String, Text, DateTime
+import sqlalchemy as sa
 
 
 from celery import states
 from celery import states
 from celery.db.session import ResultModelBase
 from celery.db.session import ResultModelBase
 # See docstring of a805d4bd for an explanation for this workaround ;)
 # See docstring of a805d4bd for an explanation for this workaround ;)
-from celery.db.a805d4bd import PickleType
+if sa.__version__.startswith('0.5'):
+    from celery.db.dfd042c7 import PickleType
+else:
+    from celery.db.a805d4bd import PickleType
 
 
 
 
 class Task(ResultModelBase):
 class Task(ResultModelBase):
@@ -14,33 +16,27 @@ class Task(ResultModelBase):
     __tablename__ = "celery_taskmeta"
     __tablename__ = "celery_taskmeta"
     __table_args__ = {"sqlite_autoincrement": True}
     __table_args__ = {"sqlite_autoincrement": True}
 
 
-    id = Column("id", Integer, Sequence("task_id_sequence"), primary_key=True,
-            autoincrement=True)
-    task_id = Column("task_id", String(255))
-    status = Column("status", String(50), default=states.PENDING)
-    result = Column("result", PickleType, nullable=True)
-    date_done = Column("date_done", DateTime, default=datetime.now,
+    id = sa.Column(sa.Integer, sa.Sequence("task_id_sequence"),
+                   primary_key=True,
+                   autoincrement=True)
+    task_id = sa.Column(sa.String(255))
+    status = sa.Column(sa.String(50), default=states.PENDING)
+    result = sa.Column(PickleType, nullable=True)
+    date_done = sa.Column(sa.DateTime, default=datetime.now,
                        onupdate=datetime.now, nullable=True)
                        onupdate=datetime.now, nullable=True)
-    traceback = Column("traceback", Text, nullable=True)
+    traceback = sa.Column(sa.Text, nullable=True)
 
 
     def __init__(self, task_id):
     def __init__(self, task_id):
         self.task_id = task_id
         self.task_id = task_id
 
 
-    def __str__(self):
-        return "<Task(%s, %s, %s, %s)>" % (self.task_id,
-                                           self.result,
-                                           self.status,
-                                           self.traceback)
-
     def to_dict(self):
     def to_dict(self):
         return {"task_id": self.task_id,
         return {"task_id": self.task_id,
                 "status": self.status,
                 "status": self.status,
                 "result": self.result,
                 "result": self.result,
-                "date_done": self.date_done,
                 "traceback": self.traceback}
                 "traceback": self.traceback}
 
 
-    def __unicode__(self):
-        return u"<Task: %s successful: %s>" % (self.task_id, self.status)
+    def __repr__(self):
+        return "<Task %s state: %s>" % (self.task_id, self.status)
 
 
 
 
 class TaskSet(ResultModelBase):
 class TaskSet(ResultModelBase):
@@ -48,23 +44,20 @@ class TaskSet(ResultModelBase):
     __tablename__ = "celery_tasksetmeta"
     __tablename__ = "celery_tasksetmeta"
     __table_args__ = {"sqlite_autoincrement": True}
     __table_args__ = {"sqlite_autoincrement": True}
 
 
-    id = Column("id", Integer, Sequence("taskset_id_sequence"),
+    id = sa.Column(sa.Integer, sa.Sequence("taskset_id_sequence"),
                 autoincrement=True, primary_key=True)
                 autoincrement=True, primary_key=True)
-    taskset_id = Column("taskset_id", String(255))
-    result = Column("result", PickleType, nullable=True)
-    date_done = Column("date_done", DateTime, default=datetime.now,
+    taskset_id = sa.Column(sa.String(255))
+    result = sa.Column(sa.PickleType, nullable=True)
+    date_done = sa.Column(sa.DateTime, default=datetime.now,
                        nullable=True)
                        nullable=True)
 
 
-    def __init__(self, task_id):
-        self.task_id = task_id
-
-    def __str__(self):
-        return "<TaskSet(%s, %s)>" % (self.task_id, self.result)
+    def __init__(self, taskset_id, result):
+        self.taskset_id = taskset_id
+        self.result = result
 
 
     def to_dict(self):
     def to_dict(self):
         return {"taskset_id": self.taskset_id,
         return {"taskset_id": self.taskset_id,
-                "result": self.result,
-                "date_done": self.date_done}
+                "result": self.result}
 
 
-    def __unicode__(self):
-        return u"<TaskSet: %s>" % (self.taskset_id)
+    def __repr__(self):
+        return u"<TaskSet: %s>" % (self.taskset_id, )

+ 3 - 1
celery/decorators.py

@@ -5,6 +5,7 @@ Decorators
 """
 """
 from inspect import getargspec
 from inspect import getargspec
 
 
+from celery import registry
 from celery.task.base import Task, PeriodicTask
 from celery.task.base import Task, PeriodicTask
 from celery.utils.functional import wraps
 from celery.utils.functional import wraps
 
 
@@ -58,7 +59,8 @@ def task(*args, **options):
             cls_dict = dict(options, run=run,
             cls_dict = dict(options, run=run,
                             __module__=fun.__module__,
                             __module__=fun.__module__,
                             __doc__=fun.__doc__)
                             __doc__=fun.__doc__)
-            return type(fun.__name__, (base, ), cls_dict)()
+            T = type(fun.__name__, (base, ), cls_dict)()
+            return registry.tasks[T.name] # global instance.
 
 
         return _create_task_cls
         return _create_task_cls
 
 

+ 11 - 4
celery/events/__init__.py

@@ -2,6 +2,8 @@ import time
 import socket
 import socket
 import threading
 import threading
 
 
+from itertools import count
+
 from celery.messaging import EventPublisher, EventConsumer
 from celery.messaging import EventPublisher, EventConsumer
 
 
 
 
@@ -106,7 +108,7 @@ class EventReceiver(object):
         consumer.register_callback(self._receive)
         consumer.register_callback(self._receive)
         return consumer
         return consumer
 
 
-    def capture(self, limit=None):
+    def capture(self, limit=None, timeout=None):
         """Open up a consumer capturing events.
         """Open up a consumer capturing events.
 
 
         This has to run in the main process, and it will never
         This has to run in the main process, and it will never
@@ -114,9 +116,14 @@ class EventReceiver(object):
 
 
         """
         """
         consumer = self.consumer()
         consumer = self.consumer()
-        it = consumer.iterconsume(limit=limit)
-        while True:
-            it.next()
+        consumer.consume()
+        try:
+            for iteration in count(0):
+                if limit and iteration > limit:
+                    break
+                consumer.connection.drain_events(timeout=timeout)
+        finally:
+            consumer.close()
 
 
     def _receive(self, message_data, message):
     def _receive(self, message_data, message):
         type = message_data.pop("type").lower()
         type = message_data.pop("type").lower()

+ 415 - 0
celery/events/cursesmon.py

@@ -0,0 +1,415 @@
+import celery
+import curses
+import sys
+import threading
+import time
+
+from datetime import datetime
+from itertools import count
+from textwrap import wrap
+
+from carrot.utils import rpartition
+
+from celery import states
+from celery.events import EventReceiver
+from celery.events.state import State
+from celery.messaging import establish_connection
+from celery.task import control
+from celery.utils import abbr, abbrtask
+
+
+
+
+class CursesMonitor(object):
+    keymap = {}
+    win = None
+    screen_width = None
+    screen_delay = 0.1
+    selected_task = None
+    selected_position = 0
+    selected_str = "Selected: "
+    limit = 20
+    foreground = curses.COLOR_BLACK
+    background = curses.COLOR_WHITE
+    online_str = "Workers online: "
+    help_title = "Keys: "
+    help = ("j:up k:down i:info t:traceback r:result c:revoke ^c: quit")
+    greet = "celeryev %s" % celery.__version__
+    info_str = "Info: "
+
+    def __init__(self, state, keymap=None):
+        self.keymap = keymap or self.keymap
+        self.state = state
+        default_keymap = {"J": self.move_selection_down,
+                          "K": self.move_selection_up,
+                          "C": self.revoke_selection,
+                          "T": self.selection_traceback,
+                          "R": self.selection_result,
+                          "I": self.selection_info,
+                          "L": self.selection_rate_limit}
+        self.keymap = dict(default_keymap, **self.keymap)
+
+    def format_row(self, uuid, worker, task, timestamp, state):
+        my, mx = self.win.getmaxyx()
+        mx = mx - 3
+        uuid_max = 36
+        if mx < 88:
+            uuid_max = mx - 52 - 2
+        uuid = abbr(uuid, uuid_max).ljust(uuid_max)
+        worker = abbr(worker, 16).ljust(16)
+        task = abbrtask(task, 16).ljust(16)
+        state = abbr(state, 8).ljust(8)
+        timestamp = timestamp.ljust(8)
+        row = "%s %s %s %s %s " % (uuid, worker, task, timestamp, state)
+        if self.screen_width is None:
+            self.screen_width = len(row[:mx])
+        return row[:mx]
+
+    def find_position(self):
+        if not self.tasks:
+            return 0
+        for i, e in enumerate(self.tasks):
+            if self.selected_task == e[0]:
+                return i
+        return 0
+
+    def move_selection_up(self):
+        self.move_selection(-1)
+
+    def move_selection_down(self):
+        self.move_selection(1)
+
+    def move_selection(self, direction=1):
+        if not self.tasks:
+            return
+        pos = self.find_position()
+        try:
+            self.selected_task = self.tasks[pos + direction][0]
+        except IndexError:
+            self.selected_task = self.tasks[0][0]
+
+    keyalias = {curses.KEY_DOWN: "J",
+                curses.KEY_UP: "K",
+                curses.KEY_ENTER: "I"}
+
+    def handle_keypress(self):
+        try:
+            key = self.win.getkey().upper()
+        except:
+            return
+        key = self.keyalias.get(key) or key
+        handler = self.keymap.get(key)
+        if handler is not None:
+            handler()
+
+    def alert(self, callback, title=None):
+        self.win.erase()
+        my, mx = self.win.getmaxyx()
+        y = blank_line = count(2).next
+        if title:
+            self.win.addstr(y(), 3, title, curses.A_BOLD | curses.A_UNDERLINE)
+            blank_line()
+        callback(my, mx, y())
+        self.win.addstr(my - 1, 0, "Press any key to continue...",
+                        curses.A_BOLD)
+        self.win.refresh()
+        while 1:
+            try:
+                return self.win.getkey().upper()
+            except:
+                pass
+
+    def selection_rate_limit(self):
+        if not self.selected_task:
+            return curses.beep()
+        task = self.state.tasks[self.selected_task]
+        if not task.name:
+            return curses.beep()
+
+        my, mx = self.win.getmaxyx()
+        r = "New rate limit: "
+        self.win.addstr(my - 2, 3, r, curses.A_BOLD | curses.A_UNDERLINE)
+        self.win.addstr(my - 2, len(r) + 3, " " * (mx - len(r)))
+        rlimit = self.readline(my - 2, 3 + len(r))
+
+        if rlimit:
+            reply = control.rate_limit(task.name, rlimit.strip(), reply=True)
+            self.alert_remote_control_reply(reply)
+
+    def alert_remote_control_reply(self, reply):
+
+        def callback(my, mx, xs):
+            y = count(xs).next
+            if not reply:
+                self.win.addstr(y(), 3, "No replies received in 1s deadline.",
+                        curses.A_BOLD + curses.color_pair(2))
+                return
+
+            for subreply in reply:
+                curline = y()
+
+                host, response = subreply.items()[0]
+                host = "%s: " % host
+                self.win.addstr(curline, 3, host, curses.A_BOLD)
+                attr = curses.A_NORMAL
+                text = ""
+                if "error" in response:
+                    text = response["error"]
+                    attr |= curses.color_pair(2)
+                elif "ok" in response:
+                    text = response["ok"]
+                    attr |= curses.color_pair(3)
+                self.win.addstr(curline, 3 + len(host), text, attr)
+
+        return self.alert(callback, "Remote Control Command Replies")
+
+    def readline(self, x, y):
+        buffer = str()
+        curses.echo()
+        try:
+            i = 0
+            while True:
+                ch = self.win.getch(x, y + i)
+                if ch != -1:
+                    if ch in (10, curses.KEY_ENTER): # enter
+                        break
+                    if ch in (27, ):
+                        buffer = str()
+                        break
+                    buffer += chr(ch)
+                    i += 1
+        finally:
+            curses.noecho()
+        return buffer
+
+    def revoke_selection(self):
+        if not self.selected_task:
+            return curses.beep()
+        reply = control.revoke(self.selected_task, reply=True)
+        self.alert_remote_control_reply(reply)
+
+    def selection_info(self):
+        if not self.selected_task:
+            return
+
+        def alert_callback(mx, my, xs):
+            y = count(xs).next
+            task = self.state.tasks[self.selected_task]
+            info = task.info(extra=["state"])
+            infoitems = [("args", info.pop("args", None)),
+                         ("kwargs", info.pop("kwargs", None))] + info.items()
+            for key, value in infoitems:
+                if key is None:
+                    continue
+                curline = y()
+                keys = key + ": "
+                self.win.addstr(curline, 3, keys, curses.A_BOLD)
+                wrapped = wrap(str(value), mx - 2)
+                if len(wrapped) == 1:
+                    self.win.addstr(curline, len(keys) + 3, wrapped[0])
+                else:
+                    for subline in wrapped:
+                        self.win.addstr(y(), 3, " " * 4 + subline,
+                                curses.A_NORMAL)
+
+        return self.alert(alert_callback,
+                "Task details for %s" % self.selected_task)
+
+    def selection_traceback(self):
+        if not self.selected_task:
+            return curses.beep()
+        task = self.state.tasks[self.selected_task]
+        if task.state not in states.EXCEPTION_STATES:
+            return curses.beep()
+
+        def alert_callback(my, mx, xs):
+            y = count(xs).next
+            for line in task.traceback.split("\n"):
+                self.win.addstr(y(), 3, line)
+
+        return self.alert(alert_callback,
+                "Task Exception Traceback for %s" % self.selected_task)
+
+    def selection_result(self):
+        if not self.selected_task:
+            return
+
+        def alert_callback(my, mx, xs):
+            y = count(xs).next
+            task = self.state.tasks[self.selected_task]
+            result = getattr(task, "result", None) or getattr(task,
+                    "exception", None)
+            for line in wrap(result, mx - 2):
+                self.win.addstr(y(), 3, line)
+
+        return self.alert(alert_callback,
+                "Task Result for %s" % self.selected_task)
+
+    def draw(self):
+        win = self.win
+        self.handle_keypress()
+        x = 3
+        y = blank_line = count(2).next
+        my, mx = win.getmaxyx()
+        win.erase()
+        win.bkgd(" ", curses.color_pair(1))
+        win.border()
+        win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5))
+        blank_line()
+        win.addstr(y(), x, self.format_row("UUID", "TASK",
+                                           "WORKER", "TIME", "STATE"),
+                curses.A_BOLD | curses.A_UNDERLINE)
+        tasks = self.tasks
+        if tasks:
+            for uuid, task in tasks:
+                if task.uuid:
+                    state_color = self.state_colors.get(task.state)
+                    attr = curses.A_NORMAL
+                    if task.uuid == self.selected_task:
+                        attr = curses.A_STANDOUT
+                    timestamp = datetime.fromtimestamp(
+                                    task.timestamp or time.time())
+                    timef = timestamp.strftime("%H:%M:%S")
+                    line = self.format_row(uuid, task.name,
+                                           task.worker.hostname,
+                                           timef, task.state)
+                    lineno = y()
+                    win.addstr(lineno, x, line, attr)
+                    if state_color:
+                        win.addstr(lineno, len(line) - len(task.state) + 1,
+                                task.state, state_color | attr)
+                    if task.ready:
+                        task.visited = time.time()
+
+        # -- Footer
+        blank_line()
+        win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width)
+
+        # Selected Task Info
+        if self.selected_task:
+            win.addstr(my - 5, x, self.selected_str, curses.A_BOLD)
+            info = "Missing extended info"
+            try:
+                selection = self.state.tasks[self.selected_task]
+            except KeyError:
+                pass
+            else:
+                info = selection.info(["args", "kwargs",
+                                       "result", "runtime", "eta"])
+                if "runtime" in info:
+                    info["runtime"] = "%.2fs" % info["runtime"]
+                if "result" in info:
+                    info["result"] = abbr(info["result"], 16)
+                info = " ".join("%s=%s" % (key, value)
+                            for key, value in info.items())
+            win.addstr(my - 5, x + len(self.selected_str), info)
+        else:
+            win.addstr(my - 5, x, "No task selected", curses.A_NORMAL)
+
+
+        # Workers
+        if self.workers:
+            win.addstr(my - 4, x, self.online_str, curses.A_BOLD)
+            win.addstr(my - 4, x + len(self.online_str),
+                    ", ".join(self.workers), curses.A_NORMAL)
+        else:
+            win.addstr(my - 4, x, "No workers discovered.")
+
+        # Info
+        win.addstr(my - 3, x, self.info_str, curses.A_BOLD)
+        win.addstr(my - 3, x + len(self.info_str),
+                "events:%s tasks:%s workers:%s/%s" % (
+                    self.state.event_count, self.state.task_count,
+                    len([w for w in self.state.workers.values()
+                            if w.alive]),
+                    len(self.state.workers)),
+                curses.A_DIM)
+
+        # Help
+        win.addstr(my - 2, x, self.help_title, curses.A_BOLD)
+        win.addstr(my - 2, x + len(self.help_title), self.help, curses.A_DIM)
+        win.refresh()
+
+    def init_screen(self):
+        self.win = curses.initscr()
+        self.win.nodelay(True)
+        self.win.keypad(True)
+        curses.start_color()
+        curses.init_pair(1, self.foreground, self.background)
+        # exception states
+        curses.init_pair(2, curses.COLOR_RED, self.background)
+        # successful state
+        curses.init_pair(3, curses.COLOR_GREEN, self.background)
+        # revoked state
+        curses.init_pair(4, curses.COLOR_MAGENTA, self.background)
+        # greeting
+        curses.init_pair(5, curses.COLOR_BLUE, self.background)
+        # started state
+        curses.init_pair(6, curses.COLOR_YELLOW, self.foreground)
+
+        self.state_colors = {states.SUCCESS: curses.color_pair(3),
+                             states.REVOKED: curses.color_pair(4),
+                             states.STARTED: curses.color_pair(6)}
+        for state in states.EXCEPTION_STATES:
+            self.state_colors[state] = curses.color_pair(2)
+
+        curses.cbreak()
+
+    def resetscreen(self):
+        curses.nocbreak()
+        self.win.keypad(False)
+        curses.echo()
+        curses.endwin()
+
+    def nap(self):
+        curses.napms(int(self.screen_delay * 1000))
+
+    @property
+    def tasks(self):
+        return self.state.tasks_by_timestamp()[:self.limit]
+
+    @property
+    def workers(self):
+        return [hostname
+                    for hostname, w in self.state.workers.items()
+                        if w.alive]
+
+
+class DisplayThread(threading.Thread):
+
+    def __init__(self, display):
+        self.display = display
+        self.shutdown = False
+        threading.Thread.__init__(self)
+
+    def run(self):
+        while not self.shutdown:
+            self.display.draw()
+            self.display.nap()
+
+
+def evtop():
+    sys.stderr.write("-> evtop: starting capture...\n")
+    state = State()
+    display = CursesMonitor(state)
+    display.init_screen()
+    refresher = DisplayThread(display)
+    refresher.start()
+    conn = establish_connection()
+    recv = EventReceiver(conn, handlers={"*": state.event})
+    try:
+        recv.capture(limit=None)
+    except Exception:
+        refresher.shutdown = True
+        refresher.join()
+        display.resetscreen()
+        raise
+    except (KeyboardInterrupt, SystemExit):
+        conn and conn.close()
+        refresher.shutdown = True
+        refresher.join()
+        display.resetscreen()
+
+
+if __name__ == "__main__":
+    evtop()

+ 68 - 0
celery/events/dumper.py

@@ -0,0 +1,68 @@
+import sys
+
+from datetime import datetime
+
+from celery.datastructures import LocalCache
+from celery.events import EventReceiver
+from celery.events.state import State
+from celery.messaging import establish_connection
+
+
+TASK_NAMES = LocalCache(0xFFF)
+
+HUMAN_TYPES = {"worker-offline": "shutdown",
+               "worker-online": "started",
+               "worker-heartbeat": "heartbeat"}
+
+
+def humanize_type(type):
+    try:
+        return HUMAN_TYPES[type.lower()]
+    except KeyError:
+        return type.lower().replace("-", " ")
+
+
+class Dumper(object):
+
+    def on_event(self, event):
+        timestamp = datetime.fromtimestamp(event.pop("timestamp"))
+        type = event.pop("type").lower()
+        hostname = event.pop("hostname")
+        if type.startswith("task-"):
+            uuid = event.pop("uuid")
+            if type.startswith("task-received"):
+                task = TASK_NAMES[uuid] = "%s(%s) args=%s kwargs=%s" % (
+                        event.pop("name"), uuid,
+                        event.pop("args"),
+                        event.pop("kwargs"))
+            else:
+                task = TASK_NAMES.get(uuid, "")
+            return self.format_task_event(hostname, timestamp,
+                                          type, task, event)
+        fields = ", ".join("%s=%s" % (key, event[key])
+                        for key in sorted(event.keys()))
+        sep = fields and ":" or ""
+        print("%s [%s] %s%s %s" % (hostname, timestamp,
+                                    humanize_type(type), sep, fields))
+
+    def format_task_event(self, hostname, timestamp, type, task, event):
+        fields = ", ".join("%s=%s" % (key, event[key])
+                        for key in sorted(event.keys()))
+        sep = fields and ":" or ""
+        print("%s [%s] %s%s %s %s" % (hostname, timestamp,
+                                    humanize_type(type), sep, task, fields))
+
+
+
+def evdump():
+    sys.stderr.write("-> evdump: starting capture...\n")
+    dumper = Dumper()
+    conn = establish_connection()
+    recv = EventReceiver(conn, handlers={"*": dumper.on_event})
+    try:
+        recv.capture()
+    except (KeyboardInterrupt, SystemExit):
+        conn and conn.close()
+
+if __name__ == "__main__":
+    evdump()

+ 99 - 0
celery/events/snapshot.py

@@ -0,0 +1,99 @@
+import sys
+import time
+import timer2
+
+from celery import conf
+from celery import log
+from celery.datastructures import TokenBucket
+from celery.events import EventReceiver
+from celery.events.state import State
+from celery.messaging import establish_connection
+from celery.utils import instantiate
+from celery.utils.dispatch import Signal
+from celery.utils.timeutils import rate
+
+
+class Polaroid(object):
+    shutter_signal = Signal(providing_args=("state", ))
+    cleanup_signal = Signal()
+
+    _tref = None
+
+    def __init__(self, state, freq=1.0, maxrate=None,
+            cleanup_freq=3600.0, logger=None):
+        self.state = state
+        self.freq = freq
+        self.cleanup_freq = cleanup_freq
+        self.logger = logger
+        self.maxrate = maxrate and TokenBucket(rate(maxrate))
+
+    def install(self):
+        self._tref = timer2.apply_interval(self.freq * 1000.0,
+                                           self.capture)
+        self._ctref = timer2.apply_interval(self.cleanup_freq * 1000.0,
+                                            self.cleanup)
+
+    def on_shutter(self, state):
+        pass
+
+    def on_cleanup(self):
+        pass
+
+    def cleanup(self):
+        self.debug("Cleanup: Running...")
+        self.cleanup_signal.send(None)
+        self.on_cleanup()
+
+    def debug(self, msg):
+        if self.logger:
+            self.logger.debug(msg)
+
+    def shutter(self):
+        if self.maxrate is None or self.maxrate.can_consume():
+            self.debug("Shutter: %s" % (self.state, ))
+            self.shutter_signal.send(self.state)
+            self.on_shutter(self.state)
+            self.state.clear()
+
+    def capture(self):
+        return self.state.freeze_while(self.shutter)
+
+    def cancel(self):
+        if self._tref:
+            self._tref()
+            self._tref.cancel()
+        if self._ctref:
+            self._ctref.cancel()
+
+    def __enter__(self):
+        self.install()
+        return self
+
+    def __exit__(self, *exc_info):
+        self.cancel()
+
+
+def evcam(camera, freq=1.0, maxrate=None, loglevel=0,
+        logfile=None):
+    if not isinstance(loglevel, int):
+        loglevel = conf.LOG_LEVELS[loglevel.upper()]
+    logger = log.setup_logger(loglevel=loglevel,
+                              logfile=logfile,
+                              name="celery.evcam")
+    logger.info(
+        "-> evcam: Taking snapshots with %s (every %s secs.)\n" % (
+            camera, freq))
+    state = State()
+    cam = instantiate(camera, state,
+                      freq=freq, maxrate=maxrate, logger=logger)
+    cam.install()
+    conn = establish_connection()
+    recv = EventReceiver(conn, handlers={"*": state.event})
+    try:
+        try:
+            recv.capture(limit=None)
+        except KeyboardInterrupt:
+            raise SystemExit
+    finally:
+        cam.cancel()
+        conn.close()

+ 107 - 27
celery/events/state.py

@@ -1,35 +1,29 @@
 import time
 import time
 import heapq
 import heapq
 
 
+from collections import deque
+from threading import RLock
+
 from carrot.utils import partition
 from carrot.utils import partition
 
 
 from celery import states
 from celery import states
-from celery.datastructures import LocalCache
+from celery.datastructures import AttributeDict, LocalCache
 from celery.utils import kwdict
 from celery.utils import kwdict
 
 
 HEARTBEAT_EXPIRE = 150 # 2 minutes, 30 seconds
 HEARTBEAT_EXPIRE = 150 # 2 minutes, 30 seconds
 
 
 
 
-class Element(dict):
+class Element(AttributeDict):
     """Base class for types."""
     """Base class for types."""
     visited = False
     visited = False
 
 
     def __init__(self, **fields):
     def __init__(self, **fields):
         dict.__init__(self, fields)
         dict.__init__(self, fields)
 
 
-    def __getattr__(self, key):
-        try:
-            return self[key]
-        except KeyError:
-            raise AttributeError("'%s' object has no attribute '%s'" % (
-                    self.__class__.__name__, key))
-
-    def __setattr__(self, key, value):
-        self[key] = value
-
 
 
 class Worker(Element):
 class Worker(Element):
     """Worker State."""
     """Worker State."""
+    heartbeat_max = 4
 
 
     def __init__(self, **fields):
     def __init__(self, **fields):
         super(Worker, self).__init__(**fields)
         super(Worker, self).__init__(**fields)
@@ -47,6 +41,8 @@ class Worker(Element):
     def _heartpush(self, timestamp):
     def _heartpush(self, timestamp):
         if timestamp:
         if timestamp:
             heapq.heappush(self.heartbeats, timestamp)
             heapq.heappush(self.heartbeats, timestamp)
+            if len(self.heartbeats) > self.heartbeat_max:
+                self.heartbeats = self.heartbeats[:self.heartbeat_max]
 
 
     @property
     @property
     def alive(self):
     def alive(self):
@@ -57,7 +53,7 @@ class Worker(Element):
 class Task(Element):
 class Task(Element):
     """Task State."""
     """Task State."""
     _info_fields = ("args", "kwargs", "retries",
     _info_fields = ("args", "kwargs", "retries",
-                    "result", "eta", "runtime",
+                    "result", "eta", "runtime", "expires",
                     "exception")
                     "exception")
 
 
     _defaults = dict(uuid=None,
     _defaults = dict(uuid=None,
@@ -72,9 +68,14 @@ class Task(Element):
                      args=None,
                      args=None,
                      kwargs=None,
                      kwargs=None,
                      eta=None,
                      eta=None,
+                     expires=None,
                      retries=None,
                      retries=None,
                      worker=None,
                      worker=None,
-                     timestamp=None)
+                     result=None,
+                     exception=None,
+                     timestamp=None,
+                     runtime=None,
+                     traceback=None)
 
 
     def __init__(self, **fields):
     def __init__(self, **fields):
         super(Task, self).__init__(**dict(self._defaults, **fields))
         super(Task, self).__init__(**dict(self._defaults, **fields))
@@ -97,10 +98,8 @@ class Task(Element):
         return super(Task, self).update(d, **extra)
         return super(Task, self).update(d, **extra)
 
 
     def on_received(self, timestamp=None, **fields):
     def on_received(self, timestamp=None, **fields):
-        print("ON RECEIVED")
         self.received = timestamp
         self.received = timestamp
         self.state = "RECEIVED"
         self.state = "RECEIVED"
-        print(fields)
         self.update(fields, timestamp=timestamp)
         self.update(fields, timestamp=timestamp)
 
 
     def on_started(self, timestamp=None, **fields):
     def on_started(self, timestamp=None, **fields):
@@ -130,9 +129,58 @@ class Task(Element):
 
 
 
 
 class State(object):
 class State(object):
-    """Represents a snapshot of a clusters state."""
+    """Records clusters state."""
     event_count = 0
     event_count = 0
     task_count = 0
     task_count = 0
+    _buffering = False
+    buffer = deque()
+    frozen = False
+
+    def freeze(self, buffer=True):
+        """Stop recording the event stream.
+
+        :keyword buffer: If true, any events received while frozen
+           will be buffered, you can use ``thaw(replay=True)`` to apply
+           this buffer. :meth:`thaw` will clear the buffer and resume
+           recording the stream.
+
+        """
+        self._buffering = buffer
+        self.frozen = True
+
+    def _replay(self):
+        while self.buffer:
+            try:
+                event = self.buffer.popleft()
+            except IndexError:
+                pass
+            self._dispatch_event(event)
+
+    def thaw(self, replay=True):
+        """Resume recording of the event stream.
+
+        :keyword replay: Will replay buffered events received while
+          the stream was frozen.
+
+        This will always clear the buffer, deleting any events collected
+        while the stream was frozen.
+
+        """
+        self._buffering = False
+        try:
+            if replay:
+                self._replay()
+            else:
+                self.buffer.clear()
+        finally:
+            self.frozen = False
+
+    def freeze_while(self, fun, *args, **kwargs):
+        self.freeze()
+        try:
+            return fun(*args, **kwargs)
+        finally:
+            self.thaw(replay=True)
 
 
     def __init__(self, callback=None,
     def __init__(self, callback=None,
             max_workers_in_memory=5000, max_tasks_in_memory=10000):
             max_workers_in_memory=5000, max_tasks_in_memory=10000):
@@ -141,6 +189,24 @@ class State(object):
         self.event_callback = callback
         self.event_callback = callback
         self.group_handlers = {"worker": self.worker_event,
         self.group_handlers = {"worker": self.worker_event,
                                "task": self.task_event}
                                "task": self.task_event}
+        self._resource = RLock()
+
+    def clear_tasks(self, ready=True):
+        if ready:
+            self.tasks = dict((uuid, task)
+                                for uuid, task in self.tasks.items()
+                                    if task.state not in states.READY_STATES)
+        else:
+            self.tasks.clear()
+
+    def clear(self, ready=True):
+        try:
+            self.workers.clear()
+            self.clear_tasks(ready)
+            self.event_count = 0
+            self.task_count = 0
+        finally:
+            pass
 
 
     def get_or_create_worker(self, hostname, **kwargs):
     def get_or_create_worker(self, hostname, **kwargs):
         """Get or create worker by hostname."""
         """Get or create worker by hostname."""
@@ -182,8 +248,7 @@ class State(object):
             handler(**fields)
             handler(**fields)
         task.worker = worker
         task.worker = worker
 
 
-    def event(self, event):
-        """Process event."""
+    def _dispatch_event(self, event):
         self.event_count += 1
         self.event_count += 1
         event = kwdict(event)
         event = kwdict(event)
         group, _, type = partition(event.pop("type"), "-")
         group, _, type = partition(event.pop("type"), "-")
@@ -191,45 +256,60 @@ class State(object):
         if self.event_callback:
         if self.event_callback:
             self.event_callback(self, event)
             self.event_callback(self, event)
 
 
-    def tasks_by_timestamp(self):
+    def event(self, event):
+        """Process event."""
+        try:
+            if not self.frozen:
+                self._dispatch_event(event)
+            elif self._buffering:
+                self.buffer.append(event)
+        finally:
+            pass
+
+    def tasks_by_timestamp(self, limit=None):
         """Get tasks by timestamp.
         """Get tasks by timestamp.
 
 
         Returns a list of ``(uuid, task)`` tuples.
         Returns a list of ``(uuid, task)`` tuples.
 
 
         """
         """
-        return self._sort_tasks_by_time(self.tasks.items())
+        return self._sort_tasks_by_time(self.tasks.items()[:limit])
 
 
     def _sort_tasks_by_time(self, tasks):
     def _sort_tasks_by_time(self, tasks):
         """Sort task items by time."""
         """Sort task items by time."""
-        return sorted(tasks, key=lambda t: t[1].timestamp, reverse=True)
+        return sorted(tasks, key=lambda t: t[1].timestamp,
+                      reverse=True)
 
 
-    def tasks_by_type(self, name):
+    def tasks_by_type(self, name, limit=None):
         """Get all tasks by type.
         """Get all tasks by type.
 
 
         Returns a list of ``(uuid, task)`` tuples.
         Returns a list of ``(uuid, task)`` tuples.
 
 
         """
         """
         return self._sort_tasks_by_time([(uuid, task)
         return self._sort_tasks_by_time([(uuid, task)
-                for uuid, task in self.tasks.items()
+                for uuid, task in self.tasks.items()[:limit]
                     if task.name == name])
                     if task.name == name])
 
 
-    def tasks_by_worker(self, hostname):
+    def tasks_by_worker(self, hostname, limit=None):
         """Get all tasks by worker.
         """Get all tasks by worker.
 
 
         Returns a list of ``(uuid, task)`` tuples.
         Returns a list of ``(uuid, task)`` tuples.
 
 
         """
         """
         return self._sort_tasks_by_time([(uuid, task)
         return self._sort_tasks_by_time([(uuid, task)
-                for uuid, task in self.tasks.items()
+                for uuid, task in self.tasks.items()[:limit]
                     if task.worker.hostname == hostname])
                     if task.worker.hostname == hostname])
 
 
     def task_types(self):
     def task_types(self):
         """Returns a list of all seen task types."""
         """Returns a list of all seen task types."""
-        return list(set(task.name for task in self.tasks.values()))
+        return list(sorted(set(task.name for task in self.tasks.values())))
 
 
     def alive_workers(self):
     def alive_workers(self):
         """Returns a list of (seemingly) alive workers."""
         """Returns a list of (seemingly) alive workers."""
         return [w for w in self.workers.values() if w.alive]
         return [w for w in self.workers.values() if w.alive]
 
 
+    def __repr__(self):
+        return "<ClusterState: events=%s tasks=%s>" % (self.event_count,
+                                                       self.task_count)
+
 
 
 state = State()
 state = State()

+ 16 - 5
celery/exceptions.py

@@ -8,13 +8,14 @@ UNREGISTERED_FMT = """
 Task of kind %s is not registered, please make sure it's imported.
 Task of kind %s is not registered, please make sure it's imported.
 """.strip()
 """.strip()
 
 
-
-class RouteNotFound(KeyError):
-    """Task routed to a queue not in the routing table (CELERY_QUEUES)."""
+class QueueNotFound(KeyError):
+    """Task routed to a queue not in CELERY_QUEUES."""
+    pass
 
 
 
 
 class TimeLimitExceeded(Exception):
 class TimeLimitExceeded(Exception):
     """The time limit has been exceeded and the job has been terminated."""
     """The time limit has been exceeded and the job has been terminated."""
+    pass
 
 
 
 
 class SoftTimeLimitExceeded(Exception):
 class SoftTimeLimitExceeded(Exception):
@@ -23,6 +24,11 @@ class SoftTimeLimitExceeded(Exception):
     pass
     pass
 
 
 
 
+class WorkerLostError(Exception):
+    """The worker processing a task has exited prematurely."""
+    pass
+
+
 class ImproperlyConfigured(Exception):
 class ImproperlyConfigured(Exception):
     """Celery is somehow improperly configured."""
     """Celery is somehow improperly configured."""
     pass
     pass
@@ -56,9 +62,14 @@ class RetryTaskError(Exception):
 
 
     def __init__(self, message, exc, *args, **kwargs):
     def __init__(self, message, exc, *args, **kwargs):
         self.exc = exc
         self.exc = exc
-        Exception.__init__(self, message, exc, *args, **kwargs)
-
+        Exception.__init__(self, message, exc, *args,
+                           **kwargs)
 
 
 class TaskRevokedError(Exception):
 class TaskRevokedError(Exception):
     """The task has been revoked, so no result available."""
     """The task has been revoked, so no result available."""
     pass
     pass
+
+
+class NotConfigured(UserWarning):
+    """Celery has not been configured, as no config module has been found."""
+

+ 27 - 14
celery/execute/__init__.py

@@ -1,13 +1,14 @@
 from celery import conf
 from celery import conf
-from celery.utils import gen_unique_id, fun_takes_kwargs, mattrgetter
-from celery.result import AsyncResult, EagerResult
+from celery.datastructures import ExceptionInfo
 from celery.execute.trace import TaskTrace
 from celery.execute.trace import TaskTrace
-from celery.registry import tasks
 from celery.messaging import with_connection
 from celery.messaging import with_connection
 from celery.messaging import TaskPublisher
 from celery.messaging import TaskPublisher
-from celery.datastructures import ExceptionInfo
+from celery.registry import tasks
+from celery.result import AsyncResult, EagerResult
+from celery.routes import Router
+from celery.utils import gen_unique_id, fun_takes_kwargs, mattrgetter
 
 
-extract_exec_options = mattrgetter("routing_key", "exchange",
+extract_exec_options = mattrgetter("queue", "routing_key", "exchange",
                                    "immediate", "mandatory",
                                    "immediate", "mandatory",
                                    "priority", "serializer",
                                    "priority", "serializer",
                                    "delivery_mode")
                                    "delivery_mode")
@@ -16,7 +17,7 @@ extract_exec_options = mattrgetter("routing_key", "exchange",
 @with_connection
 @with_connection
 def apply_async(task, args=None, kwargs=None, countdown=None, eta=None,
 def apply_async(task, args=None, kwargs=None, countdown=None, eta=None,
         task_id=None, publisher=None, connection=None, connect_timeout=None,
         task_id=None, publisher=None, connection=None, connect_timeout=None,
-        **options):
+        router=None, expires=None, **options):
     """Run a task asynchronously by the celery daemon(s).
     """Run a task asynchronously by the celery daemon(s).
 
 
     :param task: The :class:`~celery.task.base.Task` to run.
     :param task: The :class:`~celery.task.base.Task` to run.
@@ -32,9 +33,14 @@ def apply_async(task, args=None, kwargs=None, countdown=None, eta=None,
       the ``immediate`` setting, they are unrelated).
       the ``immediate`` setting, they are unrelated).
 
 
     :keyword eta: A :class:`~datetime.datetime` object that describes the
     :keyword eta: A :class:`~datetime.datetime` object that describes the
-      absolute time when the task should execute. May not be specified
-      if ``countdown`` is also supplied. (Do not confuse this with the
-      ``immediate`` setting, they are unrelated).
+      absolute time and date of when the task should execute. May not be
+      specified if ``countdown`` is also supplied. (Do not confuse this
+      with the ``immediate`` setting, they are unrelated).
+
+    :keyword expires: Either a :class:`int`, describing the number of seconds,
+      or a :class:`~datetime.datetime` object that describes the absolute time
+      and date of when the task should expire.
+      The task will not be executed after the expiration time.
 
 
     :keyword connection: Re-use existing broker connection instead
     :keyword connection: Re-use existing broker connection instead
       of establishing a new one. The ``connect_timeout`` argument is
       of establishing a new one. The ``connect_timeout`` argument is
@@ -78,11 +84,16 @@ def apply_async(task, args=None, kwargs=None, countdown=None, eta=None,
     replaced by a local :func:`apply` call instead.
     replaced by a local :func:`apply` call instead.
 
 
     """
     """
+    router = router or Router(conf.ROUTES, conf.get_queues(),
+                              conf.CREATE_MISSING_QUEUES)
+
     if conf.ALWAYS_EAGER:
     if conf.ALWAYS_EAGER:
         return apply(task, args, kwargs, task_id=task_id)
         return apply(task, args, kwargs, task_id=task_id)
 
 
     task = tasks[task.name] # get instance from registry
     task = tasks[task.name] # get instance from registry
+
     options = dict(extract_exec_options(task), **options)
     options = dict(extract_exec_options(task), **options)
+    options = router.route(options, task.name, args, kwargs)
     exchange = options.get("exchange")
     exchange = options.get("exchange")
     exchange_type = options.get("exchange_type")
     exchange_type = options.get("exchange_type")
 
 
@@ -90,7 +101,8 @@ def apply_async(task, args=None, kwargs=None, countdown=None, eta=None,
                                               exchange_type=exchange_type)
                                               exchange_type=exchange_type)
     try:
     try:
         task_id = publish.delay_task(task.name, args, kwargs, task_id=task_id,
         task_id = publish.delay_task(task.name, args, kwargs, task_id=task_id,
-                                     countdown=countdown, eta=eta, **options)
+                                     countdown=countdown, eta=eta,
+                                     expires=expires, **options)
     finally:
     finally:
         publisher or publish.close()
         publisher or publish.close()
 
 
@@ -100,7 +112,7 @@ def apply_async(task, args=None, kwargs=None, countdown=None, eta=None,
 @with_connection
 @with_connection
 def send_task(name, args=None, kwargs=None, countdown=None, eta=None,
 def send_task(name, args=None, kwargs=None, countdown=None, eta=None,
         task_id=None, publisher=None, connection=None, connect_timeout=None,
         task_id=None, publisher=None, connection=None, connect_timeout=None,
-        result_cls=AsyncResult, **options):
+        result_cls=AsyncResult, expires=None, **options):
     """Send task by name.
     """Send task by name.
 
 
     Useful if you don't have access to the :class:`~celery.task.base.Task`
     Useful if you don't have access to the :class:`~celery.task.base.Task`
@@ -118,7 +130,8 @@ def send_task(name, args=None, kwargs=None, countdown=None, eta=None,
                                          exchange_type=exchange_type)
                                          exchange_type=exchange_type)
     try:
     try:
         task_id = publish.delay_task(name, args, kwargs, task_id=task_id,
         task_id = publish.delay_task(name, args, kwargs, task_id=task_id,
-                                     countdown=countdown, eta=eta, **options)
+                                     countdown=countdown, eta=eta,
+                                     expires=expires, **options)
     finally:
     finally:
         publisher or publish.close()
         publisher or publish.close()
 
 
@@ -135,7 +148,7 @@ def delay_task(task_name, *args, **kwargs):
     :raises celery.exceptions.NotRegistered: exception if no such task
     :raises celery.exceptions.NotRegistered: exception if no such task
         has been registered in the task registry.
         has been registered in the task registry.
 
 
-    :returns: :class:`celery.result.AsyncResult`.
+    :returns :class:`celery.result.AsyncResult`:
 
 
     Example
     Example
 
 
@@ -161,7 +174,7 @@ def apply(task, args, kwargs, **options):
     """
     """
     args = args or []
     args = args or []
     kwargs = kwargs or {}
     kwargs = kwargs or {}
-    task_id = options.get("task_id", gen_unique_id())
+    task_id = options.get("task_id") or gen_unique_id()
     retries = options.get("retries", 0)
     retries = options.get("retries", 0)
     throw = options.pop("throw", conf.EAGER_PROPAGATES_EXCEPTIONS)
     throw = options.pop("throw", conf.EAGER_PROPAGATES_EXCEPTIONS)
 
 

+ 7 - 1
celery/loaders/base.py

@@ -1,3 +1,5 @@
+from importlib import import_module
+
 BUILTIN_MODULES = ["celery.task"]
 BUILTIN_MODULES = ["celery.task"]
 
 
 
 
@@ -26,12 +28,16 @@ class BaseLoader(object):
         """This method is called before a task is executed."""
         """This method is called before a task is executed."""
         pass
         pass
 
 
+    def on_process_cleanup(self):
+        """This method is called after a task is executed."""
+        pass
+
     def on_worker_init(self):
     def on_worker_init(self):
         """This method is called when the worker (``celeryd``) starts."""
         """This method is called when the worker (``celeryd``) starts."""
         pass
         pass
 
 
     def import_task_module(self, module):
     def import_task_module(self, module):
-        return __import__(module, [], [], [''])
+        return import_module(module)
 
 
     def import_default_modules(self):
     def import_default_modules(self):
         imports = getattr(self.conf, "CELERY_IMPORTS", None) or []
         imports = getattr(self.conf, "CELERY_IMPORTS", None) or []

+ 28 - 5
celery/loaders/default.py

@@ -1,8 +1,10 @@
 import os
 import os
+import sys
 import warnings
 import warnings
 from importlib import import_module
 from importlib import import_module
 
 
 from celery.loaders.base import BaseLoader
 from celery.loaders.base import BaseLoader
+from celery.exceptions import NotConfigured
 
 
 DEFAULT_CONFIG_MODULE = "celeryconfig"
 DEFAULT_CONFIG_MODULE = "celeryconfig"
 
 
@@ -13,6 +15,7 @@ DEFAULT_SETTINGS = {
     "DATABASE_NAME": "celery.sqlite",
     "DATABASE_NAME": "celery.sqlite",
     "INSTALLED_APPS": ("celery", ),
     "INSTALLED_APPS": ("celery", ),
     "CELERY_IMPORTS": (),
     "CELERY_IMPORTS": (),
+    "CELERY_TASK_ERROR_WHITELIST": (),
 }
 }
 
 
 DEFAULT_UNCONFIGURED_SETTINGS = {
 DEFAULT_UNCONFIGURED_SETTINGS = {
@@ -20,9 +23,6 @@ DEFAULT_UNCONFIGURED_SETTINGS = {
 }
 }
 
 
 
 
-class NotConfigured(UserWarning):
-    """Celery has not been configured, as no config module has been found."""
-
 
 
 def wanted_module_item(item):
 def wanted_module_item(item):
     return not item.startswith("_")
     return not item.startswith("_")
@@ -52,17 +52,40 @@ class Loader(BaseLoader):
         installed_apps = set(list(DEFAULT_SETTINGS["INSTALLED_APPS"]) + \
         installed_apps = set(list(DEFAULT_SETTINGS["INSTALLED_APPS"]) + \
                              list(settings.INSTALLED_APPS))
                              list(settings.INSTALLED_APPS))
         settings.INSTALLED_APPS = tuple(installed_apps)
         settings.INSTALLED_APPS = tuple(installed_apps)
+        settings.CELERY_TASK_ERROR_WHITELIST = tuple(
+                getattr(import_module(mod), cls)
+                    for fqn in settings.CELERY_TASK_ERROR_WHITELIST
+                        for mod, cls in (fqn.rsplit('.', 1), ))
 
 
         return settings
         return settings
 
 
+    def import_from_cwd(self, module, imp=import_module):
+        """Import module, but make sure it finds modules
+        located in the current directory.
+
+        Modules located in the current directory has
+        precedence over modules located in ``sys.path``.
+        """
+        cwd = os.getcwd()
+        if cwd in sys.path:
+            return imp(module)
+        sys.path.insert(0, cwd)
+        try:
+            return imp(module)
+        finally:
+            try:
+                sys.path.remove(cwd)
+            except ValueError:
+                pass
+
     def read_configuration(self):
     def read_configuration(self):
         """Read configuration from ``celeryconfig.py`` and configure
         """Read configuration from ``celeryconfig.py`` and configure
         celery and Django so it can be used by regular Python."""
         celery and Django so it can be used by regular Python."""
         configname = os.environ.get("CELERY_CONFIG_MODULE",
         configname = os.environ.get("CELERY_CONFIG_MODULE",
                                     DEFAULT_CONFIG_MODULE)
                                     DEFAULT_CONFIG_MODULE)
         try:
         try:
-            celeryconfig = import_module(configname)
-        except ImportError, exc:
+            celeryconfig = self.import_from_cwd(configname)
+        except ImportError:
             warnings.warn("No celeryconfig.py module found! Please make "
             warnings.warn("No celeryconfig.py module found! Please make "
                           "sure it exists and is available to Python.",
                           "sure it exists and is available to Python.",
                           NotConfigured)
                           NotConfigured)

+ 67 - 42
celery/log.py

@@ -1,31 +1,34 @@
 """celery.log"""
 """celery.log"""
+import logging
+import threading
+import time
 import os
 import os
 import sys
 import sys
-import time
-import logging
 import traceback
 import traceback
 
 
 from celery import conf
 from celery import conf
+from celery import signals
 from celery.utils import noop
 from celery.utils import noop
-from celery.utils.patch import ensure_process_aware_logger
 from celery.utils.compat import LoggerAdapter
 from celery.utils.compat import LoggerAdapter
+from celery.utils.patch import ensure_process_aware_logger
 
 
-_hijacked = False
-_monkeypatched = False
+# The logging subsystem is only configured once per process.
+# setup_logging_subsystem sets this flag, and subsequent calls
+# will do nothing.
+_setup = False
 
 
 BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
 BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
 RESET_SEQ = "\033[0m"
 RESET_SEQ = "\033[0m"
 COLOR_SEQ = "\033[1;%dm"
 COLOR_SEQ = "\033[1;%dm"
 BOLD_SEQ = "\033[1m"
 BOLD_SEQ = "\033[1m"
-COLORS = {
-    "WARNING": YELLOW,
-    "DEBUG": BLUE,
-    "CRITICAL": MAGENTA,
-    "ERROR": RED,
-}
+COLORS = {"DEBUG": BLUE,
+          "WARNING": YELLOW,
+          "ERROR": RED,
+          "CRITICAL": MAGENTA}
 
 
 
 
 class ColorFormatter(logging.Formatter):
 class ColorFormatter(logging.Formatter):
+
     def __init__(self, msg, use_color=True):
     def __init__(self, msg, use_color=True):
         logging.Formatter.__init__(self, msg)
         logging.Formatter.__init__(self, msg)
         self.use_color = use_color
         self.use_color = use_color
@@ -38,33 +41,37 @@ class ColorFormatter(logging.Formatter):
         return logging.Formatter.format(self, record)
         return logging.Formatter.format(self, record)
 
 
 
 
-def get_task_logger(loglevel=None):
-    ensure_process_aware_logger()
-    logger = logging.getLogger("celery.Task")
+def get_task_logger(loglevel=None, name=None):
+    logger = logging.getLogger(name or "celery.task.default")
     if loglevel is not None:
     if loglevel is not None:
         logger.setLevel(loglevel)
         logger.setLevel(loglevel)
     return logger
     return logger
 
 
 
 
-def _hijack_multiprocessing_logger():
-    from multiprocessing import util as mputil
-    global _hijacked
-
-    if _hijacked:
-        return mputil.get_logger()
-
-    ensure_process_aware_logger()
-
-    logging.Logger.manager.loggerDict.clear()
-
-    try:
-        if mputil._logger is not None:
-            mputil.logger = None
-    except AttributeError:
-        pass
-
-    _hijacked = True
-    return mputil.get_logger()
+def setup_logging_subsystem(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
+        format=conf.CELERYD_LOG_FORMAT, colorize=conf.CELERYD_LOG_COLOR,
+        **kwargs):
+    global _setup
+    if not _setup:
+        ensure_process_aware_logger()
+        logging.Logger.manager.loggerDict.clear()
+        from multiprocessing import util as mputil
+        try:
+            if mputil._logger is not None:
+                mputil.logger = None
+        except AttributeError:
+            pass
+        receivers = signals.setup_logging.send(sender=None,
+                                               loglevel=loglevel,
+                                               logfile=logfile,
+                                               format=format,
+                                               colorize=colorize)
+        if not receivers:
+            root = logging.getLogger()
+            _setup_logger(root, logfile, format, colorize, **kwargs)
+            root.setLevel(loglevel)
+        _setup = True
+        return receivers
 
 
 
 
 def _detect_handler(logfile=None):
 def _detect_handler(logfile=None):
@@ -75,13 +82,13 @@ def _detect_handler(logfile=None):
     return logging.FileHandler(logfile)
     return logging.FileHandler(logfile)
 
 
 
 
-def get_default_logger(loglevel=None):
+def get_default_logger(loglevel=None, name="celery"):
     """Get default logger instance.
     """Get default logger instance.
 
 
     :keyword loglevel: Initial log level.
     :keyword loglevel: Initial log level.
 
 
     """
     """
-    logger = _hijack_multiprocessing_logger()
+    logger = logging.getLogger(name)
     if loglevel is not None:
     if loglevel is not None:
         logger.setLevel(loglevel)
         logger.setLevel(loglevel)
     return logger
     return logger
@@ -89,20 +96,23 @@ def get_default_logger(loglevel=None):
 
 
 def setup_logger(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
 def setup_logger(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
         format=conf.CELERYD_LOG_FORMAT, colorize=conf.CELERYD_LOG_COLOR,
         format=conf.CELERYD_LOG_FORMAT, colorize=conf.CELERYD_LOG_COLOR,
-        **kwargs):
+        name="celery", root=True, **kwargs):
     """Setup the ``multiprocessing`` logger. If ``logfile`` is not specified,
     """Setup the ``multiprocessing`` logger. If ``logfile`` is not specified,
     then ``stderr`` is used.
     then ``stderr`` is used.
 
 
     Returns logger object.
     Returns logger object.
 
 
     """
     """
-    return _setup_logger(get_default_logger(loglevel),
-                         logfile, format, colorize, **kwargs)
+    if not root:
+        return _setup_logger(get_default_logger(loglevel, name),
+                             logfile, format, colorize, **kwargs)
+    setup_logging_subsystem(loglevel, logfile, format, colorize, **kwargs)
+    return get_default_logger(name=name)
 
 
 
 
 def setup_task_logger(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
 def setup_task_logger(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
         format=conf.CELERYD_TASK_LOG_FORMAT, colorize=conf.CELERYD_LOG_COLOR,
         format=conf.CELERYD_TASK_LOG_FORMAT, colorize=conf.CELERYD_LOG_COLOR,
-        task_kwargs=None, **kwargs):
+        task_kwargs=None, root=True, **kwargs):
     """Setup the task logger. If ``logfile`` is not specified, then
     """Setup the task logger. If ``logfile`` is not specified, then
     ``stderr`` is used.
     ``stderr`` is used.
 
 
@@ -112,12 +122,19 @@ def setup_task_logger(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
     if task_kwargs is None:
     if task_kwargs is None:
         task_kwargs = {}
         task_kwargs = {}
     task_kwargs.setdefault("task_id", "-?-")
     task_kwargs.setdefault("task_id", "-?-")
+    task_name = task_kwargs.get("task_name")
     task_kwargs.setdefault("task_name", "-?-")
     task_kwargs.setdefault("task_name", "-?-")
-    logger = _setup_logger(get_task_logger(loglevel),
-                           logfile, format, colorize, **kwargs)
+    if not root:
+        logger = _setup_logger(get_task_logger(loglevel, task_name),
+                               logfile, format, colorize, **kwargs)
+    else:
+        setup_logging_subsystem(loglevel, logfile, format, colorize, **kwargs)
+        logger = get_task_logger(name=task_name)
     return LoggerAdapter(logger, task_kwargs)
     return LoggerAdapter(logger, task_kwargs)
 
 
 
 
+
+
 def _setup_logger(logger, logfile, format, colorize,
 def _setup_logger(logger, logfile, format, colorize,
         formatter=ColorFormatter, **kwargs):
         formatter=ColorFormatter, **kwargs):
 
 
@@ -173,6 +190,7 @@ class LoggingProxy(object):
     name = None
     name = None
     closed = False
     closed = False
     loglevel = logging.ERROR
     loglevel = logging.ERROR
+    _thread = threading.local()
 
 
     def __init__(self, logger, loglevel=None):
     def __init__(self, logger, loglevel=None):
         self.logger = logger
         self.logger = logger
@@ -206,10 +224,17 @@ class LoggingProxy(object):
         return map(wrap_handler, self.logger.handlers)
         return map(wrap_handler, self.logger.handlers)
 
 
     def write(self, data):
     def write(self, data):
+        if getattr(self._thread, "recurse_protection", False):
+            # Logger is logging back to this file, so stop recursing.
+            return
         """Write message to logging object."""
         """Write message to logging object."""
         data = data.strip()
         data = data.strip()
         if data and not self.closed:
         if data and not self.closed:
-            self.logger.log(self.loglevel, data)
+            self._thread.recurse_protection = True
+            try:
+                self.logger.log(self.loglevel, data)
+            finally:
+                self._thread.recurse_protection = False
 
 
     def writelines(self, sequence):
     def writelines(self, sequence):
         """``writelines(sequence_of_strings) -> None``.
         """``writelines(sequence_of_strings) -> None``.

+ 0 - 0
tests/__init__.py → celery/management/__init__.py


+ 0 - 0
celery/management/commands/__init__.py


+ 16 - 0
celery/management/commands/celeryd.py

@@ -0,0 +1,16 @@
+"""
+
+Start the celery daemon from the Django management command.
+
+"""
+from django.core.management.base import BaseCommand
+
+import celery.models # <-- shows upgrade instructions at exit.
+
+
+class Command(BaseCommand):
+    """Run the celery daemon."""
+    help = 'Run the celery daemon'
+
+    def handle(self, *args, **options):
+        pass

+ 26 - 27
celery/messaging.py

@@ -14,20 +14,17 @@ from celery import conf
 from celery import signals
 from celery import signals
 from celery.utils import gen_unique_id, mitemgetter, noop
 from celery.utils import gen_unique_id, mitemgetter, noop
 from celery.utils.functional import wraps
 from celery.utils.functional import wraps
-from celery.routes import lookup_route, expand_destination
-from celery.loaders import load_settings
 
 
 
 
-MSG_OPTIONS = ("mandatory", "priority",
-               "immediate", "routing_key",
-               "serializer", "delivery_mode")
+MSG_OPTIONS = ("mandatory", "priority", "immediate",
+               "routing_key", "serializer", "delivery_mode")
 
 
 get_msg_options = mitemgetter(*MSG_OPTIONS)
 get_msg_options = mitemgetter(*MSG_OPTIONS)
 extract_msg_options = lambda d: dict(zip(MSG_OPTIONS, get_msg_options(d)))
 extract_msg_options = lambda d: dict(zip(MSG_OPTIONS, get_msg_options(d)))
-default_queue = conf.get_routing_table()[conf.DEFAULT_QUEUE]
+default_queue = conf.get_queues()[conf.DEFAULT_QUEUE]
 
 
 _queues_declared = False
 _queues_declared = False
-_exchanges_declared = {}
+_exchanges_declared = set()
 
 
 
 
 class TaskPublisher(Publisher):
 class TaskPublisher(Publisher):
@@ -47,26 +44,35 @@ class TaskPublisher(Publisher):
             consumers = get_consumer_set(self.connection)
             consumers = get_consumer_set(self.connection)
             consumers.close()
             consumers.close()
             _queues_declared = True
             _queues_declared = True
+        self.declare()
+
+    def declare(self):
         if self.exchange not in _exchanges_declared:
         if self.exchange not in _exchanges_declared:
-            self.declare()
-            _exchanges_declared[self.exchange] = True
+            super(TaskPublisher, self).declare()
+            _exchanges_declared.add(self.exchange)
 
 
     def delay_task(self, task_name, task_args=None, task_kwargs=None,
     def delay_task(self, task_name, task_args=None, task_kwargs=None,
-            countdown=None, eta=None, task_id=None, taskset_id=None, **kwargs):
+            countdown=None, eta=None, task_id=None, taskset_id=None,
+            expires=None, **kwargs):
         """Delay task for execution by the celery nodes."""
         """Delay task for execution by the celery nodes."""
 
 
         task_id = task_id or gen_unique_id()
         task_id = task_id or gen_unique_id()
-
-        if countdown: # Convert countdown to ETA.
-            eta = datetime.now() + timedelta(seconds=countdown)
-
         task_args = task_args or []
         task_args = task_args or []
         task_kwargs = task_kwargs or {}
         task_kwargs = task_kwargs or {}
+        now = None
+        if countdown: # Convert countdown to ETA.
+            now = datetime.now()
+            eta = now + timedelta(seconds=countdown)
+
         if not isinstance(task_args, (list, tuple)):
         if not isinstance(task_args, (list, tuple)):
             raise ValueError("task args must be a list or tuple")
             raise ValueError("task args must be a list or tuple")
         if not isinstance(task_kwargs, dict):
         if not isinstance(task_kwargs, dict):
             raise ValueError("task kwargs must be a dictionary")
             raise ValueError("task kwargs must be a dictionary")
 
 
+        if isinstance(expires, int):
+            now = now or datetime.now()
+            expires = now + timedelta(seconds=expires)
+
         message_data = {
         message_data = {
             "task": task_name,
             "task": task_name,
             "id": task_id,
             "id": task_id,
@@ -74,22 +80,13 @@ class TaskPublisher(Publisher):
             "kwargs": task_kwargs or {},
             "kwargs": task_kwargs or {},
             "retries": kwargs.get("retries", 0),
             "retries": kwargs.get("retries", 0),
             "eta": eta and eta.isoformat(),
             "eta": eta and eta.isoformat(),
+            "expires": expires and expires.isoformat(),
         }
         }
 
 
         if taskset_id:
         if taskset_id:
             message_data["taskset"] = taskset_id
             message_data["taskset"] = taskset_id
 
 
-        route = {}
-        if conf.ROUTES:
-            route = lookup_route(conf.ROUTES, task_name, task_id,
-                                 task_args, task_kwargs)
-        if route:
-            dest = expand_destination(route, conf.get_routing_table())
-            msg_options = dict(extract_msg_options(kwargs), **dest)
-        else:
-            msg_options = extract_msg_options(kwargs)
-
-        self.send(message_data, **msg_options)
+        self.send(message_data, **extract_msg_options(kwargs))
         signals.task_sent.send(sender=task_name, **message_data)
         signals.task_sent.send(sender=task_name, **message_data)
 
 
         return task_id
         return task_id
@@ -186,6 +183,8 @@ class ControlReplyPublisher(Publisher):
     exchange = "celerycrq"
     exchange = "celerycrq"
     exchange_type = "direct"
     exchange_type = "direct"
     delivery_mode = "non-persistent"
     delivery_mode = "non-persistent"
+    durable = False
+    auto_delete = True
 
 
 
 
 class BroadcastPublisher(Publisher):
 class BroadcastPublisher(Publisher):
@@ -268,7 +267,7 @@ def get_consumer_set(connection, queues=None, **options):
     Defaults to the queues in ``CELERY_QUEUES``.
     Defaults to the queues in ``CELERY_QUEUES``.
 
 
     """
     """
-    queues = queues or conf.get_routing_table()
+    queues = queues or conf.get_queues()
     cset = ConsumerSet(connection)
     cset = ConsumerSet(connection)
     for queue_name, queue_options in queues.items():
     for queue_name, queue_options in queues.items():
         queue_options = dict(queue_options)
         queue_options = dict(queue_options)
@@ -282,6 +281,6 @@ def get_consumer_set(connection, queues=None, **options):
 @with_connection
 @with_connection
 def reply(data, exchange, routing_key, connection=None, connect_timeout=None,
 def reply(data, exchange, routing_key, connection=None, connect_timeout=None,
         **kwargs):
         **kwargs):
-    pub = Publisher(connection, exchange=exchange,
+    pub = ControlReplyPublisher(connection, exchange=exchange,
                     routing_key=routing_key, **kwargs)
                     routing_key=routing_key, **kwargs)
     pub.send(data)
     pub.send(data)

+ 11 - 3
celery/models.py

@@ -2,12 +2,18 @@
 
 
 celery.models has been moved to djcelery.models.
 celery.models has been moved to djcelery.models.
 
 
-This file is deprecated and will be removed in Celery v1.4.0.
+This file is deprecated and will be removed in Celery v2.1.0.
 
 
 """
 """
+import atexit
+
 from django.core.exceptions import ImproperlyConfigured
 from django.core.exceptions import ImproperlyConfigured
 
 
-raise ImproperlyConfigured("""
+@atexit.register
+def _display_help():
+    import sys
+
+    sys.stderr.write("""
 
 
 ======================================================
 ======================================================
 ERROR: celery can't be added to INSTALLED_APPS anymore
 ERROR: celery can't be added to INSTALLED_APPS anymore
@@ -37,7 +43,9 @@ To install django-celery you can do one of the following:
 
 
 
 
 If you weren't aware of this already you should read the
 If you weren't aware of this already you should read the
-Celery 1.2.0 Changelog as well:
+Celery 2.0 Changelog as well:
     http://github.com/ask/celery/tree/djangofree/Changelog
     http://github.com/ask/celery/tree/djangofree/Changelog
 
 
 """)
 """)
+
+raise ImproperlyConfigured("Please install django-celery")

+ 3 - 2
celery/platform.py

@@ -53,10 +53,11 @@ def set_process_title(progname, info=None):
     Only works if :mod`setproctitle` is installed.
     Only works if :mod`setproctitle` is installed.
 
 
     """
     """
+    proctitle = "[%s]" % progname
+    proctitle = info and "%s %s" % (proctitle, info) or proctitle
     if _setproctitle:
     if _setproctitle:
-        proctitle = "[%s]" % progname
-        proctitle = info and "%s %s" % (proctitle, info) or proctitle
         _setproctitle(proctitle)
         _setproctitle(proctitle)
+    return proctitle
 
 
 
 
 def set_mp_process_title(progname, info=None):
 def set_mp_process_title(progname, info=None):

+ 2 - 2
celery/registry.py

@@ -60,13 +60,13 @@ class TaskRegistry(UserDict):
         try:
         try:
             return UserDict.__getitem__(self, key)
             return UserDict.__getitem__(self, key)
         except KeyError, exc:
         except KeyError, exc:
-            raise self.NotRegistered(exc)
+            raise self.NotRegistered(str(exc))
 
 
     def pop(self, key, *args):
     def pop(self, key, *args):
         try:
         try:
             return UserDict.pop(self, key, *args)
             return UserDict.pop(self, key, *args)
         except KeyError, exc:
         except KeyError, exc:
-            raise self.NotRegistered(exc)
+            raise self.NotRegistered(str(exc))
 
 
 
 
 """
 """

+ 16 - 16
celery/result.py

@@ -66,22 +66,22 @@ class BaseAsyncResult(object):
 
 
     def ready(self):
     def ready(self):
         """Returns ``True`` if the task executed successfully, or raised
         """Returns ``True`` if the task executed successfully, or raised
-        an exception. If the task is still running, pending, or is waiting
-        for retry then ``False`` is returned.
+        an exception.
 
 
-        :rtype: bool
+        If the task is still running, pending, or is waiting
+        for retry then ``False`` is returned.
 
 
         """
         """
         return self.status not in self.backend.UNREADY_STATES
         return self.status not in self.backend.UNREADY_STATES
 
 
     def successful(self):
     def successful(self):
-        """Returns ``True`` if the task executed successfully.
-
-        :rtype: bool
-
-        """
+        """Returns ``True`` if the task executed successfully."""
         return self.status == states.SUCCESS
         return self.status == states.SUCCESS
 
 
+    def failed(self):
+        """Returns ``True`` if the task failed by exception."""
+        return self.status == states.FAILURE
+
     def __str__(self):
     def __str__(self):
         """``str(self) -> self.task_id``"""
         """``str(self) -> self.task_id``"""
         return self.task_id
         return self.task_id
@@ -208,8 +208,8 @@ class TaskSetResult(object):
             successfully (i.e. did not raise an exception).
             successfully (i.e. did not raise an exception).
 
 
         """
         """
-        return all((subtask.successful()
-                        for subtask in self.itersubtasks()))
+        return all(subtask.successful()
+                        for subtask in self.itersubtasks())
 
 
     def failed(self):
     def failed(self):
         """Did the taskset fail?
         """Did the taskset fail?
@@ -218,8 +218,8 @@ class TaskSetResult(object):
             (i.e., raised an exception)
             (i.e., raised an exception)
 
 
         """
         """
-        return any((not subtask.successful()
-                        for subtask in self.itersubtasks()))
+        return any(subtask.failed()
+                        for subtask in self.itersubtasks())
 
 
     def waiting(self):
     def waiting(self):
         """Is the taskset waiting?
         """Is the taskset waiting?
@@ -228,8 +228,8 @@ class TaskSetResult(object):
             waiting for execution.
             waiting for execution.
 
 
         """
         """
-        return any((not subtask.ready()
-                        for subtask in self.itersubtasks()))
+        return any(not subtask.ready()
+                        for subtask in self.itersubtasks())
 
 
     def ready(self):
     def ready(self):
         """Is the task ready?
         """Is the task ready?
@@ -238,8 +238,8 @@ class TaskSetResult(object):
             executed.
             executed.
 
 
         """
         """
-        return all((subtask.ready()
-                        for subtask in self.itersubtasks()))
+        return all(subtask.ready()
+                        for subtask in self.itersubtasks())
 
 
     def completed_count(self):
     def completed_count(self):
         """Task completion count.
         """Task completion count.

+ 63 - 43
celery/routes.py

@@ -1,5 +1,7 @@
-from celery.exceptions import RouteNotFound
-from celery.utils import instantiate
+from celery.exceptions import QueueNotFound
+from celery.utils import instantiate, firstmethod, mpromise
+
+_first_route = firstmethod("route_for_task")
 
 
 
 
 class MapRoute(object):
 class MapRoute(object):
@@ -9,20 +11,65 @@ class MapRoute(object):
         self.map = map
         self.map = map
 
 
     def route_for_task(self, task, *args, **kwargs):
     def route_for_task(self, task, *args, **kwargs):
-        return self.map.get(task)
+        route = self.map.get(task)
+        if route:
+            return dict(route)
+
+
+class Router(object):
+
+    def __init__(self, routes=None, queues=None, create_missing=False):
+        if queues is None:
+            queues = {}
+        if routes is None:
+            routes = []
+        self.queues = queues
+        self.routes = routes
+        self.create_missing = create_missing
+
+    def add_queue(self, queue):
+        q = self.queues[queue] = {"binding_key": queue,
+                                  "routing_key": queue,
+                                  "exchange": queue,
+                                  "exchange_type": "direct"}
+        return q
+
+    def route(self, options, task, args=(), kwargs={}):
+        # Expand "queue" keys in options.
+        options = self.expand_destination(options)
+        if self.routes:
+            route = self.lookup_route(task, args, kwargs)
+            if route:
+                # Also expand "queue" keys in route.
+                return dict(options, **self.expand_destination(route))
+        return options
+
+    def expand_destination(self, route):
+        # The route can simply be a queue name,
+        # this is convenient for direct exchanges.
+        if isinstance(route, basestring):
+            queue, route = route, {}
+        else:
+            # For topic exchanges you can use the defaults from a queue
+            # definition, and override e.g. just the routing_key.
+            queue = route.pop("queue", None)
+
+        if queue:
+            try:
+                dest = dict(self.queues[queue])
+            except KeyError:
+                if self.create_missing:
+                    dest = self.add_queue(queue)
+                else:
+                    raise QueueNotFound(
+                        "Queue '%s' is not defined in CELERY_QUEUES" % queue)
+            dest.setdefault("routing_key", dest.get("binding_key"))
+            return dict(route, **dest)
 
 
+        return route
 
 
-def expand_destination(route, routing_table):
-    if isinstance(route, basestring):
-        try:
-            dest = dict(routing_table[route])
-        except KeyError:
-            raise RouteNotFound(
-                "Route %s does not exist in the routing table "
-                "(CELERY_QUEUES)" % route)
-        dest.setdefault("routing_key", dest.get("binding_key"))
-        return dest
-    return route
+    def lookup_route(self, task, args=None, kwargs=None):
+        return _first_route(self.routes, task, args, kwargs)
 
 
 
 
 def prepare(routes):
 def prepare(routes):
@@ -32,36 +79,9 @@ def prepare(routes):
         if isinstance(route, dict):
         if isinstance(route, dict):
             return MapRoute(route)
             return MapRoute(route)
         if isinstance(route, basestring):
         if isinstance(route, basestring):
-            return instantiate(route)
+            return mpromise(instantiate, route)
         return route
         return route
 
 
-    if not hasattr(routes, "__iter__"):
+    if not isinstance(routes, (list, tuple)):
         routes = (routes, )
         routes = (routes, )
     return map(expand_route, routes)
     return map(expand_route, routes)
-
-
-def firstmatcher(method):
-    """Returns a functions that with a list of instances,
-    finds the first instance that returns a value for the given method."""
-
-    def _matcher(seq, *args, **kwargs):
-        for cls in seq:
-            try:
-                answer = getattr(cls, method)(*args, **kwargs)
-                if answer is not None:
-                    return answer
-            except AttributeError:
-                pass
-    return _matcher
-
-
-_first_route = firstmatcher("route_for_task")
-_first_disabled = firstmatcher("disabled")
-
-
-def lookup_route(routes, task, task_id=None, args=None, kwargs=None):
-    return _first_route(routes, task, task_id, args, kwargs)
-
-
-def lookup_disabled(routes, task, task_id=None, args =None, kwargs=None):
-    return _first_disabled(routes, task, task_id, args, kwargs)

+ 1 - 1
celery/serialization.py

@@ -57,7 +57,7 @@ def find_nearest_pickleable_exception(exc):
     :returns: the nearest exception if it's not :exc:`Exception` or below,
     :returns: the nearest exception if it's not :exc:`Exception` or below,
         if it is it returns ``None``.
         if it is it returns ``None``.
 
 
-    :rtype: :exc:`Exception`
+    :rtype :exc:`Exception`:
 
 
     """
     """
     cls = exc.__class__
     cls = exc.__class__

+ 3 - 0
celery/signals.py

@@ -14,3 +14,6 @@ worker_init = Signal(providing_args=[])
 worker_process_init = Signal(providing_args=[])
 worker_process_init = Signal(providing_args=[])
 worker_ready = Signal(providing_args=[])
 worker_ready = Signal(providing_args=[])
 worker_shutdown = Signal(providing_args=[])
 worker_shutdown = Signal(providing_args=[])
+
+setup_logging = Signal(providing_args=["loglevel", "logfile",
+                                       "format", "colorize"])

+ 2 - 2
celery/task/__init__.py

@@ -36,7 +36,7 @@ def dmap_async(fun, args, timeout=None):
     """Distribute processing of the arguments and collect the results
     """Distribute processing of the arguments and collect the results
     asynchronously.
     asynchronously.
 
 
-    :returns: :class:`celery.result.AsyncResult` object.
+    :returns :class:`celery.result.AsyncResult`:
 
 
     Example
     Example
 
 
@@ -64,7 +64,7 @@ def execute_remote(fun, *args, **kwargs):
     The object must be picklable, so you can't use lambdas or functions
     The object must be picklable, so you can't use lambdas or functions
     defined in the REPL (the objects must have an associated module).
     defined in the REPL (the objects must have an associated module).
 
 
-    :returns: class:`celery.result.AsyncResult`.
+    :returns class:`celery.result.AsyncResult`:
 
 
     """
     """
     return ExecuteRemoteTask.delay(pickle.dumps(fun), args, kwargs)
     return ExecuteRemoteTask.delay(pickle.dumps(fun), args, kwargs)

+ 40 - 13
celery/task/base.py

@@ -31,6 +31,10 @@ Please use the CELERYBEAT_SCHEDULE setting instead:
 """
 """
 
 
 
 
+def _unpickle_task(name):
+    return tasks[name]
+
+
 class TaskType(type):
 class TaskType(type):
     """Metaclass for tasks.
     """Metaclass for tasks.
 
 
@@ -95,6 +99,12 @@ class Task(object):
         however if you want a periodic task, you should subclass
         however if you want a periodic task, you should subclass
         :class:`PeriodicTask` instead.
         :class:`PeriodicTask` instead.
 
 
+    .. attribute:: queue
+
+        Select a destination queue for this task. The queue needs to exist
+        in ``CELERY_QUEUES``. The ``routing_key``, ``exchange`` and
+        ``exchange_type`` attributes will be ignored if this is set.
+
     .. attribute:: routing_key
     .. attribute:: routing_key
 
 
         Override the global default ``routing_key`` for this task.
         Override the global default ``routing_key`` for this task.
@@ -137,7 +147,7 @@ class Task(object):
     .. attribute:: default_retry_delay
     .. attribute:: default_retry_delay
 
 
         Default time in seconds before a retry of the task should be
         Default time in seconds before a retry of the task should be
-        executed. Default is a 1 minute delay.
+        executed. Default is a 3 minute delay.
 
 
     .. attribute:: rate_limit
     .. attribute:: rate_limit
 
 
@@ -152,7 +162,7 @@ class Task(object):
     .. attribute:: disable_error_emails
     .. attribute:: disable_error_emails
 
 
         Disable all error e-mails for this task (only applicable if
         Disable all error e-mails for this task (only applicable if
-        ``settings.SEND_CELERY_ERROR_EMAILS`` is on.)
+        ``settings.CELERY_SEND_TASK_ERROR_EMAILS`` is on.)
 
 
     .. attribute:: serializer
     .. attribute:: serializer
 
 
@@ -201,11 +211,16 @@ class Task(object):
     abstract = True
     abstract = True
     autoregister = True
     autoregister = True
     type = "regular"
     type = "regular"
-    exchange = None
+
+    queue = None
     routing_key = None
     routing_key = None
+    exchange = None
+    exchange_type = conf.DEFAULT_EXCHANGE_TYPE
+    delivery_mode = conf.DEFAULT_DELIVERY_MODE
     immediate = False
     immediate = False
     mandatory = False
     mandatory = False
     priority = None
     priority = None
+
     ignore_result = conf.IGNORE_RESULT
     ignore_result = conf.IGNORE_RESULT
     disable_error_emails = False
     disable_error_emails = False
     max_retries = 3
     max_retries = 3
@@ -213,8 +228,6 @@ class Task(object):
     serializer = conf.TASK_SERIALIZER
     serializer = conf.TASK_SERIALIZER
     rate_limit = conf.DEFAULT_RATE_LIMIT
     rate_limit = conf.DEFAULT_RATE_LIMIT
     backend = default_backend
     backend = default_backend
-    exchange_type = conf.DEFAULT_EXCHANGE_TYPE
-    delivery_mode = conf.DEFAULT_DELIVERY_MODE
     track_started = conf.TRACK_STARTED
     track_started = conf.TRACK_STARTED
     acks_late = conf.ACKS_LATE
     acks_late = conf.ACKS_LATE
 
 
@@ -223,6 +236,9 @@ class Task(object):
     def __call__(self, *args, **kwargs):
     def __call__(self, *args, **kwargs):
         return self.run(*args, **kwargs)
         return self.run(*args, **kwargs)
 
 
+    def __reduce__(self):
+        return (_unpickle_task, (self.name, ), None)
+
     def run(self, *args, **kwargs):
     def run(self, *args, **kwargs):
         """The body of the task executed by the worker.
         """The body of the task executed by the worker.
 
 
@@ -259,7 +275,7 @@ class Task(object):
     def establish_connection(self,
     def establish_connection(self,
             connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
             connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
         """Establish a connection to the message broker."""
         """Establish a connection to the message broker."""
-        return _establish_connection(connect_timeout)
+        return _establish_connection(connect_timeout=connect_timeout)
 
 
     @classmethod
     @classmethod
     def get_publisher(self, connection=None, exchange=None,
     def get_publisher(self, connection=None, exchange=None,
@@ -267,7 +283,7 @@ class Task(object):
             exchange_type=None):
             exchange_type=None):
         """Get a celery task message publisher.
         """Get a celery task message publisher.
 
 
-        :rtype: :class:`celery.messaging.TaskPublisher`.
+        :rtype :class:`celery.messaging.TaskPublisher`:
 
 
         Please be sure to close the AMQP connection when you're done
         Please be sure to close the AMQP connection when you're done
         with this object, i.e.:
         with this object, i.e.:
@@ -292,7 +308,7 @@ class Task(object):
             connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
             connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
         """Get a celery task message consumer.
         """Get a celery task message consumer.
 
 
-        :rtype: :class:`celery.messaging.TaskConsumer`.
+        :rtype :class:`celery.messaging.TaskConsumer`:
 
 
         Please be sure to close the AMQP connection when you're done
         Please be sure to close the AMQP connection when you're done
         with this object. i.e.:
         with this object. i.e.:
@@ -314,7 +330,7 @@ class Task(object):
         :param \*args: positional arguments passed on to the task.
         :param \*args: positional arguments passed on to the task.
         :param \*\*kwargs: keyword arguments passed on to the task.
         :param \*\*kwargs: keyword arguments passed on to the task.
 
 
-        :returns: :class:`celery.result.AsyncResult`
+        :returns :class:`celery.result.AsyncResult`:
 
 
         """
         """
         return self.apply_async(args, kwargs)
         return self.apply_async(args, kwargs)
@@ -330,8 +346,7 @@ class Task(object):
 
 
         See :func:`celery.execute.apply_async` for more information.
         See :func:`celery.execute.apply_async` for more information.
 
 
-        :rtype: :class:`celery.result.AsyncResult`
-
+        :returns :class:`celery.result.AsyncResult`:
 
 
         """
         """
         return apply_async(self, args, kwargs, **options)
         return apply_async(self, args, kwargs, **options)
@@ -376,6 +391,11 @@ class Task(object):
             ...                        countdown=60 * 5, exc=exc)
             ...                        countdown=60 * 5, exc=exc)
 
 
         """
         """
+        if not kwargs:
+            raise TypeError(
+                    "kwargs argument to retries can't be empty. "
+                    "Task must accept **kwargs, see http://bit.ly/cAx3Bg")
+
         delivery_info = kwargs.pop("delivery_info", {})
         delivery_info = kwargs.pop("delivery_info", {})
         options.setdefault("exchange", delivery_info.get("exchange"))
         options.setdefault("exchange", delivery_info.get("exchange"))
         options.setdefault("routing_key", delivery_info.get("routing_key"))
         options.setdefault("routing_key", delivery_info.get("routing_key"))
@@ -407,12 +427,15 @@ class Task(object):
 
 
     @classmethod
     @classmethod
     def apply(self, args=None, kwargs=None, **options):
     def apply(self, args=None, kwargs=None, **options):
-        """Execute this task at once, by blocking until the task
+        """Execute this task locally, by blocking until the task
         has finished executing.
         has finished executing.
 
 
         :param args: positional arguments passed on to the task.
         :param args: positional arguments passed on to the task.
         :param kwargs: keyword arguments passed on to the task.
         :param kwargs: keyword arguments passed on to the task.
-        :rtype: :class:`celery.result.EagerResult`
+        :keyword throw: Re-raise task exceptions. Defaults to
+            the ``CELERY_EAGER_PROPAGATES_EXCEPTIONS`` setting.
+
+        :rtype :class:`celery.result.EagerResult`:
 
 
         See :func:`celery.execute.apply`.
         See :func:`celery.execute.apply`.
 
 
@@ -522,6 +545,10 @@ class Task(object):
         for a single task invocation."""
         for a single task invocation."""
         return subtask(cls, *args, **kwargs)
         return subtask(cls, *args, **kwargs)
 
 
+    @property
+    def __name__(self):
+        return self.__class__.__name__
+
 
 
 class PeriodicTask(Task):
 class PeriodicTask(Task):
     """A periodic task is a task that behaves like a :manpage:`cron` job.
     """A periodic task is a task that behaves like a :manpage:`cron` job.

+ 2 - 3
celery/task/builtins.py

@@ -1,6 +1,5 @@
 from datetime import timedelta
 from datetime import timedelta
 
 
-from celery.backends import default_backend
 from celery.serialization import pickle
 from celery.serialization import pickle
 from celery.task.base import Task, PeriodicTask
 from celery.task.base import Task, PeriodicTask
 from celery.task.sets import TaskSet
 from celery.task.sets import TaskSet
@@ -19,8 +18,8 @@ class DeleteExpiredTaskMetaTask(PeriodicTask):
     def run(self, **kwargs):
     def run(self, **kwargs):
         """:returns: None"""
         """:returns: None"""
         logger = self.get_logger(**kwargs)
         logger = self.get_logger(**kwargs)
-        logger.info("Deleting expired task meta objects...")
-        default_backend.cleanup()
+        logger.info("Deleting expired task results...")
+        self.backend.cleanup()
 
 
 
 
 class PingTask(Task):
 class PingTask(Task):

+ 51 - 0
celery/task/control.py

@@ -88,6 +88,57 @@ def rate_limit(task_name, rate_limit, destination=None, **kwargs):
                                    **kwargs)
                                    **kwargs)
 
 
 
 
+def flatten_reply(reply):
+    nodes = {}
+    for item in reply:
+        nodes.update(item)
+    return nodes
+
+
+class inspect(object):
+
+    def __init__(self, destination=None, timeout=1):
+        self.destination = destination
+        self.timeout = timeout
+
+    def _prepare(self, reply):
+        if not reply:
+            return
+        by_node = flatten_reply(reply)
+        if self.destination and \
+                not isinstance(self.destination, (list, tuple)):
+            return by_node.get(self.destination)
+        return by_node
+
+    def _request(self, command, **kwargs):
+        return self._prepare(broadcast(command, arguments=kwargs,
+                                      timeout=self.timeout, reply=True))
+
+    def active(self, safe=False):
+        return self._request("dump_active", safe=safe)
+
+    def scheduled(self, safe=False):
+        return self._request("dump_schedule", safe=safe)
+
+    def reserved(self, safe=False):
+        return self._request("dump_reserved", safe=safe)
+
+    def stats(self):
+        return self._request("stats")
+
+    def revoked(self):
+        return self._request("dump_revoked")
+
+    def registered_tasks(self):
+        return self._request("dump_tasks")
+
+    def enable_events(self):
+        return self._request("enable_events")
+
+    def disable_events(self):
+        return self._request("disable_events")
+
+
 @with_connection
 @with_connection
 def broadcast(command, arguments=None, destination=None, connection=None,
 def broadcast(command, arguments=None, destination=None, connection=None,
         connect_timeout=conf.BROKER_CONNECTION_TIMEOUT, reply=False,
         connect_timeout=conf.BROKER_CONNECTION_TIMEOUT, reply=False,

+ 22 - 21
celery/task/http.py

@@ -37,6 +37,24 @@ def utf8dict(tup):
                     for key, value in tup)
                     for key, value in tup)
 
 
 
 
+def extract_response(raw_response):
+    """Extract the response text from a raw JSON response."""
+    if not raw_response:
+        raise InvalidResponseError("Empty response")
+    try:
+        payload = deserialize(raw_response)
+    except ValueError, exc:
+        raise InvalidResponseError(str(exc))
+
+    status = payload["status"]
+    if status == "success":
+        return payload["retval"]
+    elif status == "failure":
+        raise RemoteExecuteError(payload.get("reason"))
+    else:
+        raise UnknownStatusError(str(status))
+
+
 class MutableURL(object):
 class MutableURL(object):
     """Object wrapping a Uniform Resource Locator.
     """Object wrapping a Uniform Resource Locator.
 
 
@@ -110,33 +128,16 @@ class HttpDispatch(object):
         response = urllib2.urlopen(request) # user catches errors.
         response = urllib2.urlopen(request) # user catches errors.
         return response.read()
         return response.read()
 
 
-    def _dispatch_raw(self):
-        """Dispatches the callback and returns the raw response text."""
+    def dispatch(self):
+        """Dispatch callback and return result."""
         url = MutableURL(self.url)
         url = MutableURL(self.url)
         params = None
         params = None
         if self.method in GET_METHODS:
         if self.method in GET_METHODS:
             url.query.update(self.task_kwargs)
             url.query.update(self.task_kwargs)
         else:
         else:
             params = urlencode(utf8dict(self.task_kwargs.items()))
             params = urlencode(utf8dict(self.task_kwargs.items()))
-        return self.make_request(str(url), self.method, params)
-
-    def dispatch(self):
-        """Dispatch callback and return result."""
-        response = self._dispatch_raw()
-        if not response:
-            raise InvalidResponseError("Empty response")
-        try:
-            payload = deserialize(response)
-        except ValueError, exc:
-            raise InvalidResponseError(str(exc))
-
-        status = payload["status"]
-        if status == "success":
-            return payload["retval"]
-        elif status == "failure":
-            raise RemoteExecuteError(payload.get("reason"))
-        else:
-            raise UnknownStatusError(str(status))
+        raw_response = self.make_request(str(url), self.method, params)
+        return extract_response(raw_response)
 
 
     @property
     @property
     def http_headers(self):
     def http_headers(self):

+ 67 - 17
celery/task/sets.py

@@ -1,13 +1,37 @@
+import warnings
+
 from UserList import UserList
 from UserList import UserList
 
 
 from celery import conf
 from celery import conf
 from celery import registry
 from celery import registry
 from celery.datastructures import AttributeDict
 from celery.datastructures import AttributeDict
-from celery.messaging import establish_connection, with_connection
+from celery.messaging import with_connection
 from celery.messaging import TaskPublisher
 from celery.messaging import TaskPublisher
 from celery.result import TaskSetResult
 from celery.result import TaskSetResult
 from celery.utils import gen_unique_id
 from celery.utils import gen_unique_id
 
 
+TASKSET_DEPRECATION_TEXT = """\
+Using this invocation of TaskSet is deprecated and will be removed
+in Celery v2.4!
+
+TaskSets now supports multiple types of tasks, the API has to reflect
+this so the syntax has been changed to:
+
+    from celery.task.sets import TaskSet
+
+    ts = TaskSet(tasks=[
+            %(cls)s.subtask(args1, kwargs1, options1),
+            %(cls)s.subtask(args2, kwargs2, options2),
+            %(cls)s.subtask(args3, kwargs3, options3),
+            ...
+            %(cls)s.subtask(argsN, kwargsN, optionsN),
+    ])
+
+    result = ts.apply_async()
+
+Thank you for your patience!
+
+"""
 
 
 class subtask(AttributeDict):
 class subtask(AttributeDict):
     """Class that wraps the arguments and execution options
     """Class that wraps the arguments and execution options
@@ -49,19 +73,25 @@ class subtask(AttributeDict):
         init(task=task_name, args=tuple(args or ()), kwargs=kwargs or (),
         init(task=task_name, args=tuple(args or ()), kwargs=kwargs or (),
              options=options or ())
              options=options or ())
 
 
-    def apply(self, *argmerge, **execopts):
+    def delay(self, *argmerge, **kwmerge):
+        """Shortcut to ``apply_async(argmerge, kwargs)``."""
+        return self.apply_async(args=argmerge, kwargs=kwmerge)
+
+    def apply(self, args=(), kwargs={}, **options):
         """Apply this task locally."""
         """Apply this task locally."""
         # For callbacks: extra args are prepended to the stored args.
         # For callbacks: extra args are prepended to the stored args.
-        args = tuple(argmerge) + tuple(self.args)
-        return self.get_type().apply(args, self.kwargs,
-                                     **dict(self.options, **execopts))
+        args = tuple(args) + tuple(self.args)
+        kwargs = dict(self.kwargs, **kwargs)
+        options = dict(self.options, **options)
+        return self.get_type().apply(args, kwargs, **options)
 
 
-    def apply_async(self, *argmerge, **execopts):
+    def apply_async(self, args=(), kwargs={}, **options):
         """Apply this task asynchronously."""
         """Apply this task asynchronously."""
         # For callbacks: extra args are prepended to the stored args.
         # For callbacks: extra args are prepended to the stored args.
-        args = tuple(argmerge) + tuple(self.args)
-        return self.get_type().apply_async(args, self.kwargs,
-                                           **dict(self.options, **execopts))
+        args = tuple(args) + tuple(self.args)
+        kwargs = dict(self.kwargs, **kwargs)
+        options = dict(self.options, **options)
+        return self.get_type().apply_async(args, kwargs, **options)
 
 
     def get_type(self):
     def get_type(self):
         # For JSON serialization, the task class is lazily loaded,
         # For JSON serialization, the task class is lazily loaded,
@@ -93,17 +123,23 @@ class TaskSet(UserList):
         >>> list_of_return_values = taskset_result.join()
         >>> list_of_return_values = taskset_result.join()
 
 
     """
     """
-    task = None # compat
-    task_name = None # compat
+    _task = None # compat
+    _task_name = None # compat
 
 
     def __init__(self, task=None, tasks=None):
     def __init__(self, task=None, tasks=None):
-        # Previously TaskSet only supported applying one kind of task.
-        # the signature then was TaskSet(task, arglist)
-        # Convert the arguments to subtasks'.
         if task is not None:
         if task is not None:
-            tasks = [subtask(task, *arglist) for arglist in tasks]
-            self.task = task
-            self.task_name = task.name
+            if hasattr(task, "__iter__"):
+                tasks = task
+            else:
+                # Previously TaskSet only supported applying one kind of task.
+                # the signature then was TaskSet(task, arglist),
+                # so convert the arguments to subtasks'.
+                tasks = [subtask(task, *arglist) for arglist in tasks]
+                task = self._task = registry.tasks[task.name]
+                self._task_name = task.name
+                warnings.warn(TASKSET_DEPRECATION_TEXT % {
+                                "cls": task.__class__.__name__},
+                              DeprecationWarning)
 
 
         self.data = list(tasks)
         self.data = list(tasks)
         self.total = len(self.tasks)
         self.total = len(self.tasks)
@@ -165,3 +201,17 @@ class TaskSet(UserList):
     @property
     @property
     def tasks(self):
     def tasks(self):
         return self.data
         return self.data
+
+    @property
+    def task(self):
+        warnings.warn(
+            "TaskSet.task is deprecated and will be removed in 1.4",
+            DeprecationWarning)
+        return self._task
+
+    @property
+    def task_name(self):
+        warnings.warn(
+            "TaskSet.task_name is deprecated and will be removed in 1.4",
+            DeprecationWarning)
+        return self._task_name

+ 12 - 0
celery/tests/__init__.py

@@ -0,0 +1,12 @@
+import os
+
+config = os.environ.setdefault("CELERY_TEST_CONFIG_MODULE",
+                               "celery.tests.config")
+
+os.environ["CELERY_CONFIG_MODULE"] = config
+os.environ["CELERY_LOADER"] = "default"
+
+def teardown():
+    import os
+    if os.path.exists("test.db"):
+        os.remove("test.db")

+ 1 - 7
tests/celeryconfig.py → celery/tests/config.py

@@ -1,5 +1,3 @@
-import atexit
-
 CARROT_BACKEND = "memory"
 CARROT_BACKEND = "memory"
 
 
 
 
@@ -12,8 +10,4 @@ CELERY_DEFAULT_EXCHANGE = "testcelery"
 CELERY_DEFAULT_ROUTING_KEY = "testcelery"
 CELERY_DEFAULT_ROUTING_KEY = "testcelery"
 CELERY_QUEUES = {"testcelery": {"binding_key": "testcelery"}}
 CELERY_QUEUES = {"testcelery": {"binding_key": "testcelery"}}
 
 
-@atexit.register
-def teardown_testdb():
-    import os
-    if os.path.exists("test.db"):
-        os.remove("test.db")
+CELERYD_LOG_COLOR = False

+ 0 - 0
celery/tests/functional/__init__.py


+ 165 - 0
celery/tests/functional/case.py

@@ -0,0 +1,165 @@
+import atexit
+import logging
+import os
+import signal
+import socket
+import sys
+import traceback
+import unittest2 as unittest
+
+from itertools import count
+
+from celery.exceptions import TimeoutError
+from celery.task.control import ping, flatten_reply, inspect
+from celery.utils import get_full_cls_name
+
+HOSTNAME = socket.gethostname()
+
+
+def say(msg):
+    sys.stderr.write("%s\n" % msg)
+
+
+def try_while(fun, reason="Timed out", timeout=10, interval=0.5):
+    for iterations in count(0):
+        if iterations * interval >= timeout:
+            raise TimeoutError()
+        ret = fun()
+        if ret:
+            return ret
+
+
+class Worker(object):
+    started = False
+    next_worker_id = count(1).next
+    _shutdown_called = False
+
+    def __init__(self, hostname, loglevel="error"):
+        self.hostname = hostname
+        self.loglevel = loglevel
+
+    def start(self):
+        if not self.started:
+            self._fork_and_exec()
+            self.started = True
+
+    def _fork_and_exec(self):
+        pid = os.fork()
+        if pid == 0:
+            os.execv(sys.executable,
+                    [sys.executable] + ["-m", "celery.bin.celeryd",
+                                        "-l", self.loglevel,
+                                        "-n", self.hostname])
+            os.exit()
+        self.pid = pid
+
+    def is_alive(self, timeout=1):
+        r = ping(destination=[self.hostname],
+                 timeout=timeout)
+        return self.hostname in flatten_reply(r)
+
+    def wait_until_started(self, timeout=10, interval=0.2):
+        try_while(lambda: self.is_alive(interval),
+                "Worker won't start (after %s secs.)" % timeout,
+                interval=0.2, timeout=10)
+        say("--WORKER %s IS ONLINE--" % self.hostname)
+
+    def ensure_shutdown(self, timeout=10, interval=0.5):
+        os.kill(self.pid, signal.SIGTERM)
+        try_while(lambda: not self.is_alive(interval),
+                  "Worker won't shutdown (after %s secs.)" % timeout,
+                  timeout=10, interval=0.5)
+        say("--WORKER %s IS SHUTDOWN--" % self.hostname)
+        self._shutdown_called = True
+
+    def ensure_started(self):
+        self.start()
+        self.wait_until_started()
+
+    @classmethod
+    def managed(cls, hostname=None, caller=None):
+        hostname = hostname or socket.gethostname()
+        if caller:
+            hostname = ".".join([get_full_cls_name(caller), hostname])
+        else:
+            hostname += str(cls.next_worker_id())
+        worker = cls(hostname)
+        worker.ensure_started()
+        stack = traceback.format_stack()
+
+        @atexit.register
+        def _ensure_shutdown_once():
+            if not worker._shutdown_called:
+                say("-- Found worker not stopped at shutdown: %s\n%s" % (
+                        worker.hostname,
+                        "\n".join(stack)))
+                worker.ensure_shutdown()
+
+        return worker
+
+
+class WorkerCase(unittest.TestCase):
+    hostname = HOSTNAME
+    worker = None
+
+    @classmethod
+    def setUpClass(cls):
+        logging.getLogger("amqplib").setLevel(logging.ERROR)
+        cls.worker = Worker.managed(cls.hostname, caller=cls)
+
+    @classmethod
+    def tearDownClass(cls):
+        cls.worker.ensure_shutdown()
+
+    def assertWorkerAlive(self, timeout=1):
+        self.assertTrue(self.worker.is_alive)
+
+    def inspect(self, timeout=1):
+        return inspect(self.worker.hostname, timeout=timeout)
+
+    def my_response(self, response):
+        return flatten_reply(response)[self.worker.hostname]
+
+    def is_accepted(self, task_id, interval=0.5):
+        active = self.inspect(timeout=interval).active()
+        if active:
+            for task in active:
+                if task["id"] == task_id:
+                    return True
+        return False
+
+    def is_reserved(self, task_id, interval=0.5):
+        reserved = self.inspect(timeout=interval).reserved()
+        if reserved:
+            for task in reserved:
+                if task["id"] == task_id:
+                    return True
+        return False
+
+    def is_scheduled(self, task_id, interval=0.5):
+        schedule = self.inspect(timeout=interval).scheduled()
+        if schedule:
+            for item in schedule:
+                if item["request"]["id"] == task_id:
+                    return True
+        return False
+
+    def is_received(self, task_id, interval=0.5):
+        return (self.is_reserved(task_id, interval) or
+                self.is_scheduled(task_id, interval) or
+                self.is_accepted(task_id, interval))
+
+    def ensure_accepted(self, task_id, interval=0.5, timeout=10):
+        return try_while(lambda: self.is_accepted(task_id, interval),
+                         "Task not accepted within timeout",
+                         interval=0.5, timeout=10)
+
+    def ensure_received(self, task_id, interval=0.5, timeout=10):
+        return try_while(lambda: self.is_received(task_id, interval),
+                        "Task not receied within timeout",
+                        interval=0.5, timeout=10)
+
+    def ensure_scheduled(self, task_id, interval=0.5, timeout=10):
+        return try_while(lambda: self.is_scheduled(task_id, interval),
+                        "Task not scheduled within timeout",
+                        interval=0.5, timeout=10)

+ 23 - 0
celery/tests/functional/tasks.py

@@ -0,0 +1,23 @@
+import time
+
+from celery.decorators import task
+from celery.task.sets import subtask
+
+
+@task
+def add(x, y):
+    return x + y
+
+
+@task
+def add_cb(x, y, callback=None):
+    result = x + y
+    if callback:
+        return subtask(callback).apply_async(result)
+    return result
+
+
+@task
+def sleeptask(i):
+    time.sleep(i)
+    return i

+ 0 - 2
celery/tests/test_backends/disabled_amqp.py

@@ -25,7 +25,6 @@ class test_AMQPBackend(unittest.TestCase):
         tid = gen_unique_id()
         tid = gen_unique_id()
 
 
         tb1.mark_as_done(tid, 42)
         tb1.mark_as_done(tid, 42)
-        self.assertTrue(tb2.is_successful(tid))
         self.assertEqual(tb2.get_status(tid), states.SUCCESS)
         self.assertEqual(tb2.get_status(tid), states.SUCCESS)
         self.assertEqual(tb2.get_result(tid), 42)
         self.assertEqual(tb2.get_result(tid), 42)
         self.assertTrue(tb2._cache.get(tid))
         self.assertTrue(tb2._cache.get(tid))
@@ -53,7 +52,6 @@ class test_AMQPBackend(unittest.TestCase):
         except KeyError, exception:
         except KeyError, exception:
             einfo = ExceptionInfo(sys.exc_info())
             einfo = ExceptionInfo(sys.exc_info())
         tb1.mark_as_failure(tid3, exception, traceback=einfo.traceback)
         tb1.mark_as_failure(tid3, exception, traceback=einfo.traceback)
-        self.assertFalse(tb2.is_successful(tid3))
         self.assertEqual(tb2.get_status(tid3), states.FAILURE)
         self.assertEqual(tb2.get_status(tid3), states.FAILURE)
         self.assertIsInstance(tb2.get_result(tid3), KeyError)
         self.assertIsInstance(tb2.get_result(tid3), KeyError)
         self.assertEqual(tb2.get_traceback(tid3), einfo.traceback)
         self.assertEqual(tb2.get_traceback(tid3), einfo.traceback)

+ 93 - 4
celery/tests/test_backends/test_base.py

@@ -9,6 +9,8 @@ from celery.serialization import get_pickleable_exception as gpe
 
 
 from celery import states
 from celery import states
 from celery.backends.base import BaseBackend, KeyValueStoreBackend
 from celery.backends.base import BaseBackend, KeyValueStoreBackend
+from celery.backends.base import BaseDictBackend
+from celery.utils import gen_unique_id
 
 
 
 
 class wrapobject(object):
 class wrapobject(object):
@@ -24,7 +26,7 @@ Lookalike = subclass_exception("Lookalike", wrapobject, "foo.module")
 b = BaseBackend()
 b = BaseBackend()
 
 
 
 
-class TestBaseBackendInterface(unittest.TestCase):
+class test_BaseBackend_interface(unittest.TestCase):
 
 
     def test_get_status(self):
     def test_get_status(self):
         self.assertRaises(NotImplementedError,
         self.assertRaises(NotImplementedError,
@@ -34,6 +36,14 @@ class TestBaseBackendInterface(unittest.TestCase):
         self.assertRaises(NotImplementedError,
         self.assertRaises(NotImplementedError,
                 b.store_result, "SOMExx-N0nex1stant-IDxx-", 42, states.SUCCESS)
                 b.store_result, "SOMExx-N0nex1stant-IDxx-", 42, states.SUCCESS)
 
 
+    def test_reload_task_result(self):
+        self.assertRaises(NotImplementedError,
+                b.reload_task_result, "SOMExx-N0nex1stant-IDxx-")
+
+    def test_reload_taskset_result(self):
+        self.assertRaises(NotImplementedError,
+                b.reload_taskset_result, "SOMExx-N0nex1stant-IDxx-")
+
     def test_get_result(self):
     def test_get_result(self):
         self.assertRaises(NotImplementedError,
         self.assertRaises(NotImplementedError,
                 b.get_result, "SOMExx-N0nex1stant-IDxx-")
                 b.get_result, "SOMExx-N0nex1stant-IDxx-")
@@ -51,7 +61,7 @@ class TestBaseBackendInterface(unittest.TestCase):
                 b.get_traceback, "SOMExx-N0nex1stant-IDxx-")
                 b.get_traceback, "SOMExx-N0nex1stant-IDxx-")
 
 
 
 
-class TestPickleException(unittest.TestCase):
+class test_exception_pickle(unittest.TestCase):
 
 
     def test_oldstyle(self):
     def test_oldstyle(self):
         self.assertIsNone(fnpe(Oldstyle()))
         self.assertIsNone(fnpe(Oldstyle()))
@@ -68,7 +78,7 @@ class TestPickleException(unittest.TestCase):
         self.assertIsNone(fnpe(Impossible()))
         self.assertIsNone(fnpe(Impossible()))
 
 
 
 
-class TestPrepareException(unittest.TestCase):
+class test_prepare_exception(unittest.TestCase):
 
 
     def test_unpickleable(self):
     def test_unpickleable(self):
         x = b.prepare_exception(Unpickleable(1, 2, "foo"))
         x = b.prepare_exception(Unpickleable(1, 2, "foo"))
@@ -93,7 +103,86 @@ class TestPrepareException(unittest.TestCase):
         self.assertIsInstance(y, KeyError)
         self.assertIsInstance(y, KeyError)
 
 
 
 
-class TestKeyValueStoreBackendInterface(unittest.TestCase):
+class KVBackend(KeyValueStoreBackend):
+
+    def __init__(self, *args, **kwargs):
+        self.db = {}
+        super(KVBackend, self).__init__(KeyValueStoreBackend)
+
+    def get(self, key):
+        return self.db.get(key)
+
+    def set(self, key, value):
+        self.db[key] = value
+
+
+class DictBackend(BaseDictBackend):
+
+    def _save_taskset(self, taskset_id, result):
+        return "taskset-saved"
+
+    def _restore_taskset(self, taskset_id):
+        if taskset_id == "exists":
+            return {"result": "taskset"}
+
+    def _get_task_meta_for(self, task_id):
+        if task_id == "task-exists":
+            return {"result": "task"}
+
+
+class test_BaseDictBackend(unittest.TestCase):
+
+    def setUp(self):
+        self.b = DictBackend()
+
+    def test_save_taskset(self):
+        self.assertEqual(self.b.save_taskset("foofoo", "xxx"),
+                         "taskset-saved")
+
+    def test_restore_taskset(self):
+        self.assertIsNone(self.b.restore_taskset("missing"))
+        self.assertIsNone(self.b.restore_taskset("missing"))
+        self.assertEqual(self.b.restore_taskset("exists"), "taskset")
+        self.assertEqual(self.b.restore_taskset("exists"), "taskset")
+        self.assertEqual(self.b.restore_taskset("exists", cache=False),
+                         "taskset")
+
+    def test_reload_taskset_result(self):
+        self.b._cache = {}
+        self.b.reload_taskset_result("exists")
+        self.b._cache["exists"] = {"result": "taskset"}
+
+    def test_reload_task_result(self):
+        self.b._cache = {}
+        self.b.reload_taskset_result("task-exists")
+        self.b._cache["task-exists"] = {"result": "task"}
+
+
+class test_KeyValueStoreBackend(unittest.TestCase):
+
+    def setUp(self):
+        self.b = KVBackend()
+
+    def test_get_store_result(self):
+        tid = gen_unique_id()
+        self.b.mark_as_done(tid, "Hello world")
+        self.assertEqual(self.b.get_result(tid), "Hello world")
+        self.assertEqual(self.b.get_status(tid), states.SUCCESS)
+
+    def test_get_missing_meta(self):
+        self.assertIsNone(self.b.get_result("xxx-missing"))
+        self.assertEqual(self.b.get_status("xxx-missing"), states.PENDING)
+
+    def test_save_restore_taskset(self):
+        tid = gen_unique_id()
+        self.b.save_taskset(tid, "Hello world")
+        self.assertEqual(self.b.restore_taskset(tid), "Hello world")
+
+    def test_restore_missing_taskset(self):
+        self.assertIsNone(self.b.restore_taskset("xxx-nonexistant"))
+
+
+class test_KeyValueStoreBackend_interface(unittest.TestCase):
 
 
     def test_get(self):
     def test_get(self):
         self.assertRaises(NotImplementedError, KeyValueStoreBackend().get,
         self.assertRaises(NotImplementedError, KeyValueStoreBackend().get,

+ 129 - 0
celery/tests/test_backends/test_cache.py

@@ -0,0 +1,129 @@
+import sys
+import types
+import unittest2 as unittest
+
+from celery import states
+from celery.backends.cache import CacheBackend, DummyClient
+from celery.exceptions import ImproperlyConfigured
+from celery.utils import gen_unique_id
+
+from celery.tests.utils import mask_modules
+
+
+class SomeClass(object):
+
+    def __init__(self, data):
+        self.data = data
+
+
+class test_CacheBackend(unittest.TestCase):
+
+    def test_mark_as_done(self):
+        tb = CacheBackend(backend="memory://")
+
+        tid = gen_unique_id()
+
+        self.assertEqual(tb.get_status(tid), states.PENDING)
+        self.assertIsNone(tb.get_result(tid))
+
+        tb.mark_as_done(tid, 42)
+        self.assertEqual(tb.get_status(tid), states.SUCCESS)
+        self.assertEqual(tb.get_result(tid), 42)
+
+    def test_is_pickled(self):
+        tb = CacheBackend(backend="memory://")
+
+        tid2 = gen_unique_id()
+        result = {"foo": "baz", "bar": SomeClass(12345)}
+        tb.mark_as_done(tid2, result)
+        # is serialized properly.
+        rindb = tb.get_result(tid2)
+        self.assertEqual(rindb.get("foo"), "baz")
+        self.assertEqual(rindb.get("bar").data, 12345)
+
+    def test_mark_as_failure(self):
+        tb = CacheBackend(backend="memory://")
+
+        tid3 = gen_unique_id()
+        try:
+            raise KeyError("foo")
+        except KeyError, exception:
+            pass
+        tb.mark_as_failure(tid3, exception)
+        self.assertEqual(tb.get_status(tid3), states.FAILURE)
+        self.assertIsInstance(tb.get_result(tid3), KeyError)
+
+    def test_process_cleanup(self):
+        tb = CacheBackend(backend="memory://")
+        tb.process_cleanup()
+
+    def test_expires_as_int(self):
+        tb = CacheBackend(backend="memory://", expires=10)
+        self.assertEqual(tb.expires, 10)
+
+    def test_unknown_backend_raises_ImproperlyConfigured(self):
+        self.assertRaises(ImproperlyConfigured,
+                          CacheBackend, backend="unknown://")
+
+
+class test_get_best_memcache(unittest.TestCase):
+
+    def mock_memcache(self):
+        memcache = types.ModuleType("memcache")
+        memcache.Client = DummyClient
+        memcache.Client.__module__ = memcache.__name__
+        prev, sys.modules["memcache"] = sys.modules.get("memcache"), memcache
+        yield True
+        if prev is not None:
+            sys.modules["memcache"] = prev
+        yield True
+
+    def mock_pylibmc(self):
+        pylibmc = types.ModuleType("pylibmc")
+        pylibmc.Client = DummyClient
+        pylibmc.Client.__module__ = pylibmc.__name__
+        prev = sys.modules.get("pylibmc")
+        sys.modules["pylibmc"] = pylibmc
+        yield True
+        if prev is not None:
+            sys.modules["pylibmc"] = prev
+        yield True
+
+    def test_pylibmc(self):
+        pylibmc = self.mock_pylibmc()
+        pylibmc.next()
+        sys.modules.pop("celery.backends.cache", None)
+        from celery.backends import cache
+        self.assertEqual(cache.get_best_memcache().__module__, "pylibmc")
+        pylibmc.next()
+
+    def test_memcache(self):
+
+        def with_no_pylibmc():
+            sys.modules.pop("celery.backends.cache", None)
+            from celery.backends import cache
+            self.assertEqual(cache.get_best_memcache().__module__, "memcache")
+
+        context = mask_modules("pylibmc")
+        context.__enter__()
+        try:
+            memcache = self.mock_memcache()
+            memcache.next()
+            with_no_pylibmc()
+            memcache.next()
+        finally:
+            context.__exit__(None, None, None)
+
+    def test_no_implementations(self):
+
+        def with_no_memcache_libs():
+            sys.modules.pop("celery.backends.cache", None)
+            from celery.backends import cache
+            self.assertRaises(ImproperlyConfigured, cache.get_best_memcache)
+
+        context = mask_modules("pylibmc", "memcache")
+        context.__enter__()
+        try:
+            with_no_memcache_libs()
+        finally:
+            context.__exit__(None, None, None)

+ 133 - 0
celery/tests/test_backends/test_database.py

@@ -0,0 +1,133 @@
+import unittest2 as unittest
+
+from datetime import datetime
+
+from celery.exceptions import ImproperlyConfigured
+
+from celery import conf
+from celery import states
+from celery.db.models import Task, TaskSet
+from celery.utils import gen_unique_id
+from celery.backends.database import DatabaseBackend
+
+
+class SomeClass(object):
+
+    def __init__(self, data):
+        self.data = data
+
+
+class test_DatabaseBackend(unittest.TestCase):
+
+    def test_missing_dburi_raises_ImproperlyConfigured(self):
+        prev, conf.RESULT_DBURI = conf.RESULT_DBURI, None
+        try:
+            self.assertRaises(ImproperlyConfigured, DatabaseBackend)
+        finally:
+            conf.RESULT_DBURI = prev
+
+    def test_missing_task_id_is_PENDING(self):
+        tb = DatabaseBackend()
+        self.assertEqual(tb.get_status("xxx-does-not-exist"), states.PENDING)
+
+    def test_mark_as_done(self):
+        tb = DatabaseBackend()
+
+        tid = gen_unique_id()
+
+        self.assertEqual(tb.get_status(tid), states.PENDING)
+        self.assertIsNone(tb.get_result(tid))
+
+        tb.mark_as_done(tid, 42)
+        self.assertEqual(tb.get_status(tid), states.SUCCESS)
+        self.assertEqual(tb.get_result(tid), 42)
+
+    def test_is_pickled(self):
+        tb = DatabaseBackend()
+
+        tid2 = gen_unique_id()
+        result = {"foo": "baz", "bar": SomeClass(12345)}
+        tb.mark_as_done(tid2, result)
+        # is serialized properly.
+        rindb = tb.get_result(tid2)
+        self.assertEqual(rindb.get("foo"), "baz")
+        self.assertEqual(rindb.get("bar").data, 12345)
+
+    def test_mark_as_started(self):
+        tb = DatabaseBackend()
+        tid = gen_unique_id()
+        tb.mark_as_started(tid)
+        self.assertEqual(tb.get_status(tid), states.STARTED)
+
+    def test_mark_as_revoked(self):
+        tb = DatabaseBackend()
+        tid = gen_unique_id()
+        tb.mark_as_revoked(tid)
+        self.assertEqual(tb.get_status(tid), states.REVOKED)
+
+    def test_mark_as_retry(self):
+        tb = DatabaseBackend()
+        tid = gen_unique_id()
+        try:
+            raise KeyError("foo")
+        except KeyError, exception:
+            import traceback
+            trace = "\n".join(traceback.format_stack())
+        tb.mark_as_retry(tid, exception, traceback=trace)
+        self.assertEqual(tb.get_status(tid), states.RETRY)
+        self.assertIsInstance(tb.get_result(tid), KeyError)
+        self.assertEqual(tb.get_traceback(tid), trace)
+
+    def test_mark_as_failure(self):
+        tb = DatabaseBackend()
+
+        tid3 = gen_unique_id()
+        try:
+            raise KeyError("foo")
+        except KeyError, exception:
+            import traceback
+            trace = "\n".join(traceback.format_stack())
+        tb.mark_as_failure(tid3, exception, traceback=trace)
+        self.assertEqual(tb.get_status(tid3), states.FAILURE)
+        self.assertIsInstance(tb.get_result(tid3), KeyError)
+        self.assertEqual(tb.get_traceback(tid3), trace)
+
+    def test_process_cleanup(self):
+        tb = DatabaseBackend()
+        tb.process_cleanup()
+
+    def test_save___restore_taskset(self):
+        tb = DatabaseBackend()
+
+        tid = gen_unique_id()
+        res = {u"something": "special"}
+        self.assertEqual(tb.save_taskset(tid, res), res)
+
+        res2 = tb.restore_taskset(tid)
+        self.assertEqual(res2, res)
+
+        self.assertIsNone(tb.restore_taskset("xxx-nonexisting-id"))
+
+    def test_cleanup(self):
+        tb = DatabaseBackend()
+        for i in range(10):
+            tb.mark_as_done(gen_unique_id(), 42)
+            tb.save_taskset(gen_unique_id(), {"foo": "bar"})
+        s = tb.ResultSession()
+        for t in s.query(Task).all():
+            t.date_done = datetime.now() - tb.result_expires * 2
+        for t in s.query(TaskSet).all():
+            t.date_done = datetime.now() - tb.result_expires * 2
+        s.commit()
+        s.close()
+
+        tb.cleanup()
+        s2 = tb.ResultSession()
+        self.assertEqual(s2.query(Task).count(), 0)
+        self.assertEqual(s2.query(TaskSet).count(), 0)
+
+    def test_Task__repr__(self):
+        self.assertIn("foo", repr(Task("foo")))
+
+    def test_TaskSet__repr__(self):
+        self.assertIn("foo", repr(TaskSet("foo", None)))

+ 0 - 3
celery/tests/test_backends/test_redis.py

@@ -72,12 +72,10 @@ class TestRedisBackend(unittest.TestCase):
 
 
         tid = gen_unique_id()
         tid = gen_unique_id()
 
 
-        self.assertFalse(tb.is_successful(tid))
         self.assertEqual(tb.get_status(tid), states.PENDING)
         self.assertEqual(tb.get_status(tid), states.PENDING)
         self.assertIsNone(tb.get_result(tid))
         self.assertIsNone(tb.get_result(tid))
 
 
         tb.mark_as_done(tid, 42)
         tb.mark_as_done(tid, 42)
-        self.assertTrue(tb.is_successful(tid))
         self.assertEqual(tb.get_status(tid), states.SUCCESS)
         self.assertEqual(tb.get_status(tid), states.SUCCESS)
         self.assertEqual(tb.get_result(tid), 42)
         self.assertEqual(tb.get_result(tid), 42)
 
 
@@ -101,7 +99,6 @@ class TestRedisBackend(unittest.TestCase):
         except KeyError, exception:
         except KeyError, exception:
             pass
             pass
         tb.mark_as_failure(tid3, exception)
         tb.mark_as_failure(tid3, exception)
-        self.assertFalse(tb.is_successful(tid3))
         self.assertEqual(tb.get_status(tid3), states.FAILURE)
         self.assertEqual(tb.get_status(tid3), states.FAILURE)
         self.assertIsInstance(tb.get_result(tid3), KeyError)
         self.assertIsInstance(tb.get_result(tid3), KeyError)
 
 

+ 0 - 3
celery/tests/test_backends/test_tyrant.py

@@ -65,12 +65,10 @@ class TestTyrantBackend(unittest.TestCase):
 
 
         tid = gen_unique_id()
         tid = gen_unique_id()
 
 
-        self.assertFalse(tb.is_successful(tid))
         self.assertEqual(tb.get_status(tid), states.PENDING)
         self.assertEqual(tb.get_status(tid), states.PENDING)
         self.assertIsNone(tb.get_result(tid), None)
         self.assertIsNone(tb.get_result(tid), None)
 
 
         tb.mark_as_done(tid, 42)
         tb.mark_as_done(tid, 42)
-        self.assertTrue(tb.is_successful(tid))
         self.assertEqual(tb.get_status(tid), states.SUCCESS)
         self.assertEqual(tb.get_status(tid), states.SUCCESS)
         self.assertEqual(tb.get_result(tid), 42)
         self.assertEqual(tb.get_result(tid), 42)
 
 
@@ -94,7 +92,6 @@ class TestTyrantBackend(unittest.TestCase):
         except KeyError, exception:
         except KeyError, exception:
             pass
             pass
         tb.mark_as_failure(tid3, exception)
         tb.mark_as_failure(tid3, exception)
-        self.assertFalse(tb.is_successful(tid3))
         self.assertEqual(tb.get_status(tid3), states.FAILURE)
         self.assertEqual(tb.get_status(tid3), states.FAILURE)
         self.assertIsInstance(tb.get_result(tid3), KeyError)
         self.assertIsInstance(tb.get_result(tid3), KeyError)
 
 

+ 0 - 0
celery/tests/test_bin/__init__.py


+ 111 - 0
celery/tests/test_bin/test_celerybeat.py

@@ -0,0 +1,111 @@
+import logging
+import sys
+import unittest2 as unittest
+
+from celery import platform
+from celery.beat import ClockService
+from celery.bin import celerybeat as beat
+
+
+class MockClockService(ClockService):
+    started = False
+    in_sync = False
+
+    def start(self):
+        self.__class__.started = True
+
+    def sync(self):
+        self.__class__.in_sync = True
+
+
+class MockBeat(beat.Beat):
+    running = False
+
+    def run(self):
+        self.__class__.running = True
+
+
+class MockBeat2(beat.Beat):
+    ClockService = MockClockService
+
+    def install_sync_handler(self, b):
+        pass
+
+
+class test_Beat(unittest.TestCase):
+
+    def test_loglevel_string(self):
+        b = beat.Beat(loglevel="DEBUG")
+        self.assertEqual(b.loglevel, logging.DEBUG)
+
+        b2 = beat.Beat(loglevel=logging.DEBUG)
+        self.assertEqual(b2.loglevel, logging.DEBUG)
+
+    def test_init_loader(self):
+        b = beat.Beat()
+        b.init_loader()
+
+    def test_startup_info(self):
+        b = beat.Beat()
+        self.assertIn("@stderr", b.startup_info())
+
+    def test_process_title(self):
+        b = beat.Beat()
+        b.set_process_title()
+
+    def test_run(self):
+        b = MockBeat2()
+        MockClockService.started = False
+        b.run()
+        self.assertTrue(MockClockService.started)
+
+    def psig(self, fun, *args, **kwargs):
+        handlers = {}
+
+        def i(sig, handler):
+            handlers[sig] = handler
+
+        p, platform.install_signal_handler = platform.install_signal_handler, i
+        try:
+            fun(*args, **kwargs)
+            return handlers
+        finally:
+            platform.install_signal_handler = p
+
+    def test_install_sync_handler(self):
+        b = beat.Beat()
+        clock = MockClockService()
+        MockClockService.in_sync = False
+        handlers = self.psig(b.install_sync_handler, clock)
+        self.assertRaises(SystemExit, handlers["SIGINT"],
+                          "SIGINT", object())
+        self.assertTrue(MockClockService.in_sync)
+        MockClockService.in_sync = False
+
+
+class test_div(unittest.TestCase):
+
+    def setUp(self):
+        self.prev, beat.Beat = beat.Beat, MockBeat
+
+    def tearDown(self):
+        beat.Beat = self.prev
+
+    def test_main(self):
+        sys.argv = [sys.argv[0], "-s", "foo"]
+        try:
+            beat.main()
+            self.assertTrue(MockBeat.running)
+        finally:
+            MockBeat.running = False
+
+    def test_run_celerybeat(self):
+        try:
+            beat.run_celerybeat()
+            self.assertTrue(MockBeat.running)
+        finally:
+            MockBeat.running = False
+
+    def test_parse_options(self):
+        options = beat.parse_options(["-s", "foo"])
+        self.assertEqual(options.schedule, "foo")

+ 310 - 0
celery/tests/test_bin/test_celeryd.py

@@ -0,0 +1,310 @@
+import logging
+import os
+import sys
+import unittest2 as unittest
+
+from multiprocessing import get_logger, current_process
+from StringIO import StringIO
+
+from celery import conf
+from celery import platform
+from celery import signals
+from celery.bin import celeryd as cd
+from celery.exceptions import ImproperlyConfigured
+from celery.utils import patch
+from celery.utils.functional import wraps
+
+from celery.tests.compat import catch_warnings
+from celery.tests.utils import execute_context
+
+
+patch.ensure_process_aware_logger()
+
+def disable_stdouts(fun):
+
+    @wraps(fun)
+    def disable(*args, **kwargs):
+        sys.stdout, sys.stderr = StringIO(), StringIO()
+        try:
+            return fun(*args, **kwargs)
+        finally:
+            sys.stdout = sys.__stdout__
+            sys.stderr = sys.__stderr__
+
+    return disable
+
+
+class _WorkController(object):
+
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def start(self):
+        pass
+
+
+class Worker(cd.Worker):
+    WorkController = _WorkController
+
+
+class test_Worker(unittest.TestCase):
+    Worker = Worker
+
+    @disable_stdouts
+    def test_queues_string(self):
+        worker = self.Worker(queues="foo,bar,baz")
+        self.assertEqual(worker.queues, ["foo", "bar", "baz"])
+
+    @disable_stdouts
+    def test_loglevel_string(self):
+        worker = self.Worker(loglevel="INFO")
+        self.assertEqual(worker.loglevel, logging.INFO)
+
+    def test_run_worker(self):
+        handlers = {}
+
+        def i(sig, handler):
+            handlers[sig] = handler
+
+        p = platform.install_signal_handler
+        platform.install_signal_handler = i
+        try:
+            w = self.Worker()
+            w._isatty = False
+            w.run_worker()
+            for sig in "SIGINT", "SIGHUP", "SIGTERM":
+                self.assertIn(sig, handlers)
+
+            handlers.clear()
+            w = self.Worker()
+            w._isatty = True
+            w.run_worker()
+            for sig in "SIGINT", "SIGTERM":
+                self.assertIn(sig, handlers)
+            self.assertNotIn("SIGHUP", handlers)
+        finally:
+            platform.install_signal_handler = p
+
+    @disable_stdouts
+    def test_startup_info(self):
+        worker = self.Worker()
+        worker.run()
+        self.assertTrue(worker.startup_info())
+        worker.loglevel = logging.DEBUG
+        self.assertTrue(worker.startup_info())
+        worker.loglevel = logging.INFO
+        self.assertTrue(worker.startup_info())
+
+    @disable_stdouts
+    def test_run(self):
+        self.Worker().run()
+        self.Worker(discard=True).run()
+
+        worker = self.Worker()
+        worker.init_loader()
+        worker.settings.DEBUG = True
+
+        def with_catch_warnings(log):
+            worker.run()
+            self.assertIn("memory leak", log[0].message.args[0])
+
+        context = catch_warnings(record=True)
+        execute_context(context, with_catch_warnings)
+        worker.settings.DEBUG = False
+
+    @disable_stdouts
+    def test_purge_messages(self):
+        self.Worker().purge_messages()
+
+    @disable_stdouts
+    def test_init_queues(self):
+        p, conf.QUEUES = conf.QUEUES, {
+                "celery": {"exchange": "celery",
+                           "binding_key": "celery"},
+                "video": {"exchange": "video",
+                           "binding_key": "video"}}
+        try:
+            self.Worker(queues=["video"]).init_queues()
+            self.assertIn("video", conf.QUEUES)
+            self.assertNotIn("celery", conf.QUEUES)
+
+            conf.CREATE_MISSING_QUEUES = False
+            self.assertRaises(ImproperlyConfigured,
+                    self.Worker(queues=["image"]).init_queues)
+            conf.CREATE_MISSING_QUEUES = True
+            self.Worker(queues=["image"]).init_queues()
+            self.assertIn("image", conf.QUEUES)
+        finally:
+            conf.QUEUES = p
+
+    @disable_stdouts
+    def test_on_listener_ready(self):
+
+        worker_ready_sent = [False]
+        def on_worker_ready(**kwargs):
+            worker_ready_sent[0] = True
+
+        signals.worker_ready.connect(on_worker_ready)
+
+        self.Worker().on_listener_ready(object())
+        self.assertTrue(worker_ready_sent[0])
+
+
+class test_funs(unittest.TestCase):
+
+    @disable_stdouts
+    def test_dump_version(self):
+        self.assertRaises(SystemExit, cd.dump_version)
+
+    @disable_stdouts
+    def test_set_process_status(self):
+        prev1, sys.argv = sys.argv, ["Arg0"]
+        try:
+            st = cd.set_process_status("Running")
+            self.assertIn("celeryd", st)
+            self.assertIn("Running", st)
+            prev2, sys.argv = sys.argv, ["Arg0", "Arg1"]
+            try:
+                st = cd.set_process_status("Running")
+                self.assertIn("celeryd", st)
+                self.assertIn("Running", st)
+                self.assertIn("Arg1", st)
+            finally:
+                sys.argv = prev2
+        finally:
+            sys.argv = prev1
+
+    @disable_stdouts
+    def test_parse_options(self):
+        opts = cd.parse_options(["--concurrency=512"])
+        self.assertEqual(opts.concurrency, 512)
+
+    @disable_stdouts
+    def test_run_worker(self):
+        p, cd.Worker = cd.Worker, Worker
+        try:
+            cd.run_worker(discard=True)
+        finally:
+            cd.Worker = p
+
+    @disable_stdouts
+    def test_main(self):
+        p, cd.Worker = cd.Worker, Worker
+        s, sys.argv = sys.argv, ["celeryd", "--discard"]
+        try:
+            cd.main()
+        finally:
+            cd.Worker = p
+            sys.argv = s
+
+
+class test_signal_handlers(unittest.TestCase):
+
+    class _Worker(object):
+        stopped = False
+        terminated = False
+        logger = get_logger()
+
+        def stop(self):
+            self.stopped = True
+
+        def terminate(self):
+            self.terminated = True
+
+    def psig(self, fun, *args, **kwargs):
+        handlers = {}
+
+        def i(sig, handler):
+            handlers[sig] = handler
+
+        p, platform.install_signal_handler = platform.install_signal_handler, i
+        try:
+            fun(*args, **kwargs)
+            return handlers
+        finally:
+            platform.install_signal_handler = p
+
+    @disable_stdouts
+    def test_worker_int_handler(self):
+        worker = self._Worker()
+        handlers = self.psig(cd.install_worker_int_handler, worker)
+
+        next_handlers = {}
+        def i(sig, handler):
+            next_handlers[sig] = handler
+        p = platform.install_signal_handler
+        platform.install_signal_handler = i
+        try:
+            self.assertRaises(SystemExit, handlers["SIGINT"],
+                              "SIGINT", object())
+            self.assertTrue(worker.stopped)
+        finally:
+            platform.install_signal_handler = p
+
+        self.assertRaises(SystemExit, next_handlers["SIGINT"],
+                          "SIGINT", object())
+        self.assertTrue(worker.terminated)
+
+    @disable_stdouts
+    def test_worker_int_handler_only_stop_MainProcess(self):
+        process = current_process()
+        name, process.name = process.name, "OtherProcess"
+        try:
+            worker = self._Worker()
+            handlers = self.psig(cd.install_worker_int_handler, worker)
+            self.assertRaises(SystemExit, handlers["SIGINT"],
+                            "SIGINT", object())
+            self.assertFalse(worker.stopped)
+        finally:
+            process.name = name
+
+    @disable_stdouts
+    def test_worker_int_again_handler_only_stop_MainProcess(self):
+        process = current_process()
+        name, process.name = process.name, "OtherProcess"
+        try:
+            worker = self._Worker()
+            handlers = self.psig(cd.install_worker_int_again_handler, worker)
+            self.assertRaises(SystemExit, handlers["SIGINT"],
+                            "SIGINT", object())
+            self.assertFalse(worker.terminated)
+        finally:
+            process.name = name
+
+    @disable_stdouts
+    def test_worker_term_handler(self):
+        worker = self._Worker()
+        handlers = self.psig(cd.install_worker_term_handler, worker)
+        self.assertRaises(SystemExit, handlers["SIGTERM"],
+                          "SIGTERM", object())
+        self.assertTrue(worker.stopped)
+
+    @disable_stdouts
+    def test_worker_term_handler_only_stop_MainProcess(self):
+        process = current_process()
+        name, process.name = process.name, "OtherProcess"
+        try:
+            worker = self._Worker()
+            handlers = self.psig(cd.install_worker_term_handler, worker)
+            self.assertRaises(SystemExit, handlers["SIGTERM"],
+                          "SIGTERM", object())
+            self.assertFalse(worker.stopped)
+        finally:
+            process.name = name
+
+    @disable_stdouts
+    def test_worker_restart_handler(self):
+        argv = []
+
+        def _execv(*args):
+            argv.extend(args)
+
+        execv, os.execv = os.execv, _execv
+        try:
+            worker = self._Worker()
+            handlers = self.psig(cd.install_worker_restart_handler, worker)
+            handlers["SIGHUP"]("SIGHUP", object())
+            self.assertTrue(worker.stopped)
+            self.assertTrue(argv)
+        finally:
+            os.execv = execv

+ 0 - 13
celery/tests/test_bin_celeryd.py

@@ -1,13 +0,0 @@
-import unittest2 as unittest
-
-from celery.bin import celeryd
-
-
-class TestWorker(unittest.TestCase):
-
-    def test_init_loader(self):
-
-        w = celeryd.Worker()
-        w.init_loader()
-        self.assertTrue(w.loader)
-        self.assertTrue(w.settings)

+ 86 - 12
celery/tests/test_buckets.py

@@ -1,18 +1,17 @@
 from __future__ import generators
 from __future__ import generators
-import os
+
 import sys
 import sys
-sys.path.insert(0, os.getcwd())
 import time
 import time
 import unittest2 as unittest
 import unittest2 as unittest
-from itertools import chain, izip
 
 
+from itertools import chain, izip
 
 
+from celery.registry import TaskRegistry
 from celery.task.base import Task
 from celery.task.base import Task
 from celery.utils import timeutils
 from celery.utils import timeutils
 from celery.utils import gen_unique_id
 from celery.utils import gen_unique_id
 from celery.utils.functional import curry
 from celery.utils.functional import curry
 from celery.worker import buckets
 from celery.worker import buckets
-from celery.registry import TaskRegistry
 
 
 from celery.tests.utils import skip_if_environ
 from celery.tests.utils import skip_if_environ
 
 
@@ -41,7 +40,7 @@ class MockJob(object):
                 self.task_name, self.task_id, self.args, self.kwargs)
                 self.task_name, self.task_id, self.args, self.kwargs)
 
 
 
 
-class TestTokenBucketQueue(unittest.TestCase):
+class test_TokenBucketQueue(unittest.TestCase):
 
 
     @skip_if_disabled
     @skip_if_disabled
     def empty_queue_yields_QueueEmpty(self):
     def empty_queue_yields_QueueEmpty(self):
@@ -94,7 +93,7 @@ class TestTokenBucketQueue(unittest.TestCase):
         self.assertEqual(x.get_nowait(), "The quick brown fox")
         self.assertEqual(x.get_nowait(), "The quick brown fox")
 
 
 
 
-class TestRateLimitString(unittest.TestCase):
+class test_rate_limit_string(unittest.TestCase):
 
 
     @skip_if_disabled
     @skip_if_disabled
     def test_conversion(self):
     def test_conversion(self):
@@ -125,7 +124,7 @@ class TaskD(Task):
     rate_limit = "1000/m"
     rate_limit = "1000/m"
 
 
 
 
-class TestTaskBuckets(unittest.TestCase):
+class test_TaskBucket(unittest.TestCase):
 
 
     def setUp(self):
     def setUp(self):
         self.registry = TaskRegistry()
         self.registry = TaskRegistry()
@@ -133,6 +132,44 @@ class TestTaskBuckets(unittest.TestCase):
         for task_cls in self.task_classes:
         for task_cls in self.task_classes:
             self.registry.register(task_cls)
             self.registry.register(task_cls)
 
 
+    @skip_if_disabled
+    def test_get_nowait(self):
+        x = buckets.TaskBucket(task_registry=self.registry)
+        self.assertRaises(buckets.QueueEmpty, x.get_nowait)
+
+    @skip_if_disabled
+    def test_refresh(self):
+        reg = {}
+        x = buckets.TaskBucket(task_registry=reg)
+        reg["foo"] = "something"
+        x.refresh()
+        self.assertIn("foo", x.buckets)
+        self.assertTrue(x.get_bucket_for_type("foo"))
+
+    @skip_if_disabled
+    def test__get_queue_for_type(self):
+        x = buckets.TaskBucket(task_registry={})
+        x.buckets["foo"] = buckets.TokenBucketQueue(fill_rate=1)
+        self.assertIs(x._get_queue_for_type("foo"), x.buckets["foo"].queue)
+        x.buckets["bar"] = buckets.FastQueue()
+        self.assertIs(x._get_queue_for_type("bar"), x.buckets["bar"])
+
+    @skip_if_disabled
+    def test_update_bucket_for_type(self):
+        bucket = buckets.TaskBucket(task_registry=self.registry)
+        b = bucket._get_queue_for_type(TaskC.name)
+        self.assertIs(bucket.update_bucket_for_type(TaskC.name).queue, b)
+        self.assertIs(bucket.buckets[TaskC.name].queue, b)
+
+    @skip_if_disabled
+    def test_auto_add_on_missing_put(self):
+        reg = {}
+        b = buckets.TaskBucket(task_registry=reg)
+        reg["nonexisting.task"] = "foo"
+
+        b.put(MockJob(gen_unique_id(), "nonexisting.task", (), {}))
+        self.assertIn("nonexisting.task", b.buckets)
+
     @skip_if_disabled
     @skip_if_disabled
     def test_auto_add_on_missing(self):
     def test_auto_add_on_missing(self):
         b = buckets.TaskBucket(task_registry=self.registry)
         b = buckets.TaskBucket(task_registry=self.registry)
@@ -146,13 +183,14 @@ class TestTaskBuckets(unittest.TestCase):
     @skip_if_disabled
     @skip_if_disabled
     def test_has_rate_limits(self):
     def test_has_rate_limits(self):
         b = buckets.TaskBucket(task_registry=self.registry)
         b = buckets.TaskBucket(task_registry=self.registry)
-        self.assertEqual(b.buckets[TaskA.name].fill_rate, 10)
+        self.assertEqual(b.buckets[TaskA.name]._bucket.fill_rate, 10)
         self.assertIsInstance(b.buckets[TaskB.name], buckets.Queue)
         self.assertIsInstance(b.buckets[TaskB.name], buckets.Queue)
-        self.assertEqual(b.buckets[TaskC.name].fill_rate, 1)
+        self.assertEqual(b.buckets[TaskC.name]._bucket.fill_rate, 1)
         self.registry.register(TaskD)
         self.registry.register(TaskD)
         b.init_with_registry()
         b.init_with_registry()
         try:
         try:
-            self.assertEqual(b.buckets[TaskD.name].fill_rate, 1000 / 60.0)
+            self.assertEqual(b.buckets[TaskD.name]._bucket.fill_rate,
+                             1000 / 60.0)
         finally:
         finally:
             self.registry.unregister(TaskD)
             self.registry.unregister(TaskD)
 
 
@@ -227,5 +265,41 @@ class TestTaskBuckets(unittest.TestCase):
         finally:
         finally:
             self.registry.unregister(TaskD)
             self.registry.unregister(TaskD)
 
 
-if __name__ == "__main__":
-    unittest.main()
+    @skip_if_disabled
+    def test_empty(self):
+        x = buckets.TaskBucket(task_registry=self.registry)
+        self.assertTrue(x.empty())
+        x.put(MockJob(gen_unique_id(), TaskC.name, [], {}))
+        self.assertFalse(x.empty())
+        x.clear()
+        self.assertTrue(x.empty())
+
+    @skip_if_disabled
+    def test_items(self):
+        x = buckets.TaskBucket(task_registry=self.registry)
+        x.buckets[TaskA.name].put(1)
+        x.buckets[TaskB.name].put(2)
+        x.buckets[TaskC.name].put(3)
+        self.assertItemsEqual(x.items, [1, 2, 3])
+
+
+class test_FastQueue(unittest.TestCase):
+
+    def test_items(self):
+        x = buckets.FastQueue()
+        x.put(10)
+        x.put(20)
+        self.assertListEqual([10, 20], list(x.items))
+
+    def test_wait(self):
+        x = buckets.FastQueue()
+        x.put(10)
+        self.assertEqual(x.wait(), 10)
+
+    def test_clear(self):
+        x = buckets.FastQueue()
+        x.put(10)
+        x.put(20)
+        self.assertFalse(x.empty())
+        x.clear()
+        self.assertTrue(x.empty())

+ 0 - 5
celery/tests/test_celery.py

@@ -9,12 +9,7 @@ class TestInitFile(unittest.TestCase):
         self.assertTrue(celery.VERSION)
         self.assertTrue(celery.VERSION)
         self.assertGreaterEqual(len(celery.VERSION), 3)
         self.assertGreaterEqual(len(celery.VERSION), 3)
         celery.VERSION = (0, 3, 0)
         celery.VERSION = (0, 3, 0)
-        self.assertFalse(celery.is_stable_release())
         self.assertGreaterEqual(celery.__version__.count("."), 2)
         self.assertGreaterEqual(celery.__version__.count("."), 2)
-        self.assertIn("(unstable)", celery.version_with_meta())
-        celery.VERSION = (0, 4, 0)
-        self.assertTrue(celery.is_stable_release())
-        self.assertIn("(stable)", celery.version_with_meta())
 
 
     def test_meta(self):
     def test_meta(self):
         for m in ("__author__", "__contact__", "__homepage__",
         for m in ("__author__", "__contact__", "__homepage__",

+ 93 - 0
celery/tests/test_concurrency_processes.py

@@ -0,0 +1,93 @@
+import sys
+import unittest2 as unittest
+
+from celery.concurrency import processes as mp
+from celery.datastructures import ExceptionInfo
+
+
+def to_excinfo(exc):
+    try:
+        raise exc
+    except:
+        return ExceptionInfo(sys.exc_info())
+
+
+class MockPool(object):
+    started = False
+    closed = False
+    joined = False
+    terminated = False
+    _state = None
+
+    def __init__(self, *args, **kwargs):
+        self.started = True
+        self._state = mp.RUN
+
+    def close(self):
+        self.closed = True
+        self._state = "CLOSE"
+
+    def join(self):
+        self.joined = True
+
+    def terminate(self):
+        self.terminated = True
+
+    def apply_async(self, *args, **kwargs):
+        pass
+
+
+class TaskPool(mp.TaskPool):
+    Pool = MockPool
+
+
+class test_TaskPool(unittest.TestCase):
+
+    def test_start(self):
+        pool = TaskPool(10)
+        pool.start()
+        self.assertTrue(pool._pool.started)
+
+        _pool = pool._pool
+        pool.stop()
+        self.assertTrue(_pool.closed)
+        self.assertTrue(_pool.joined)
+        pool.stop()
+
+        pool.start()
+        _pool = pool._pool
+        pool.terminate()
+        pool.terminate()
+        self.assertTrue(_pool.terminated)
+
+    def test_on_ready_exception(self):
+
+        scratch = [None]
+        def errback(retval):
+            scratch[0] = retval
+
+        pool = TaskPool(10)
+        exc = to_excinfo(KeyError("foo"))
+        pool.on_ready([], [errback], exc)
+        self.assertEqual(exc, scratch[0])
+
+    def test_on_ready_value(self):
+
+        scratch = [None]
+        def callback(retval):
+            scratch[0] = retval
+
+        pool = TaskPool(10)
+        retval = "the quick brown fox"
+        pool.on_ready([callback], [], retval)
+        self.assertEqual(retval, scratch[0])
+
+    def test_on_ready_exit_exception(self):
+        pool = TaskPool(10)
+        exc = to_excinfo(SystemExit("foo"))
+        self.assertRaises(SystemExit, pool.on_ready, [], [], exc)
+
+    def test_apply_async(self):
+        pool = TaskPool(10)
+        pool.start()
+        pool.apply_async(lambda x: x, (2, ), {})

+ 18 - 7
celery/tests/test_datastructures.py

@@ -4,9 +4,10 @@ from Queue import Queue
 
 
 from celery.datastructures import PositionQueue, ExceptionInfo, LocalCache
 from celery.datastructures import PositionQueue, ExceptionInfo, LocalCache
 from celery.datastructures import LimitedSet, SharedCounter, consume_queue
 from celery.datastructures import LimitedSet, SharedCounter, consume_queue
+from celery.datastructures import AttributeDict
 
 
 
 
-class TestPositionQueue(unittest.TestCase):
+class test_PositionQueue(unittest.TestCase):
 
 
     def test_position_queue_unfilled(self):
     def test_position_queue_unfilled(self):
         q = PositionQueue(length=10)
         q = PositionQueue(length=10)
@@ -36,7 +37,7 @@ class TestPositionQueue(unittest.TestCase):
         self.assertTrue(q.full())
         self.assertTrue(q.full())
 
 
 
 
-class TestExceptionInfo(unittest.TestCase):
+class test_ExceptionInfo(unittest.TestCase):
 
 
     def test_exception_info(self):
     def test_exception_info(self):
 
 
@@ -56,7 +57,7 @@ class TestExceptionInfo(unittest.TestCase):
         self.assertTrue(r)
         self.assertTrue(r)
 
 
 
 
-class TestUtilities(unittest.TestCase):
+class test_utilities(unittest.TestCase):
 
 
     def test_consume_queue(self):
     def test_consume_queue(self):
         x = Queue()
         x = Queue()
@@ -68,7 +69,7 @@ class TestUtilities(unittest.TestCase):
         self.assertRaises(StopIteration, it.next)
         self.assertRaises(StopIteration, it.next)
 
 
 
 
-class TestSharedCounter(unittest.TestCase):
+class test_SharedCounter(unittest.TestCase):
 
 
     def test_initial_value(self):
     def test_initial_value(self):
         self.assertEqual(int(SharedCounter(10)), 10)
         self.assertEqual(int(SharedCounter(10)), 10)
@@ -101,7 +102,7 @@ class TestSharedCounter(unittest.TestCase):
         self.assertIn("<SharedCounter:", repr(SharedCounter(10)))
         self.assertIn("<SharedCounter:", repr(SharedCounter(10)))
 
 
 
 
-class TestLimitedSet(unittest.TestCase):
+class test_LimitedSet(unittest.TestCase):
 
 
     def test_add(self):
     def test_add(self):
         s = LimitedSet(maxlen=2)
         s = LimitedSet(maxlen=2)
@@ -118,7 +119,7 @@ class TestLimitedSet(unittest.TestCase):
         s = LimitedSet(maxlen=2)
         s = LimitedSet(maxlen=2)
         items = "foo", "bar"
         items = "foo", "bar"
         map(s.add, items)
         map(s.add, items)
-        l = list(iter(items))
+        l = list(iter(s))
         for item in items:
         for item in items:
             self.assertIn(item, l)
             self.assertIn(item, l)
 
 
@@ -129,7 +130,7 @@ class TestLimitedSet(unittest.TestCase):
         self.assertIn("LimitedSet(", repr(s))
         self.assertIn("LimitedSet(", repr(s))
 
 
 
 
-class TestLocalCache(unittest.TestCase):
+class test_LocalCache(unittest.TestCase):
 
 
     def test_expires(self):
     def test_expires(self):
         limit = 100
         limit = 100
@@ -138,3 +139,13 @@ class TestLocalCache(unittest.TestCase):
         for i in slots:
         for i in slots:
             x[i] = i
             x[i] = i
         self.assertListEqual(x.keys(), slots[limit:])
         self.assertListEqual(x.keys(), slots[limit:])
+
+
+class test_AttributeDict(unittest.TestCase):
+
+    def test_getattr__setattr(self):
+        x = AttributeDict({"foo": "bar"})
+        self.assertEqual(x["foo"], "bar")
+        self.assertRaises(AttributeError, getattr, x, "bar")
+        x.bar = "foo"
+        self.assertEqual(x["bar"], "foo")

+ 110 - 20
celery/tests/test_log.py

@@ -14,24 +14,44 @@ except ImportError:
 
 
 from carrot.utils import rpartition
 from carrot.utils import rpartition
 
 
-from celery.log import (setup_logger, emergency_error,
+from celery import log
+from celery.log import (setup_logger, setup_task_logger, emergency_error,
+                        get_default_logger, get_task_logger,
                         redirect_stdouts_to_logger, LoggingProxy)
                         redirect_stdouts_to_logger, LoggingProxy)
 from celery.tests.utils import override_stdouts, execute_context
 from celery.tests.utils import override_stdouts, execute_context
+from celery.utils import gen_unique_id
+from celery.utils.compat import LoggerAdapter
+from celery.utils.compat import _CompatLoggerAdapter
 
 
 
 
+def get_handlers(logger):
+    if isinstance(logger, LoggerAdapter):
+        return logger.logger.handlers
+    return logger.handlers
+
+def set_handlers(logger, new_handlers):
+    if isinstance(logger, LoggerAdapter):
+        logger.logger.handlers = new_handlers
+    logger.handlers = new_handlers
+
 @contextmanager
 @contextmanager
 def wrap_logger(logger, loglevel=logging.ERROR):
 def wrap_logger(logger, loglevel=logging.ERROR):
-    old_handlers = logger.handlers
+    old_handlers = get_handlers(logger)
     sio = StringIO()
     sio = StringIO()
     siohandler = logging.StreamHandler(sio)
     siohandler = logging.StreamHandler(sio)
-    logger.handlers = [siohandler]
+    set_handlers(logger, [siohandler])
 
 
     yield sio
     yield sio
 
 
-    logger.handlers = old_handlers
+    set_handlers(logger, old_handlers)
+
 
 
+class test_default_logger(unittest.TestCase):
 
 
-class TestLog(unittest.TestCase):
+    def setUp(self):
+        self.setup_logger = setup_logger
+        self.get_logger = get_default_logger
+        log._setup = False
 
 
     def _assertLog(self, logger, logmsg, loglevel=logging.ERROR):
     def _assertLog(self, logger, logmsg, loglevel=logging.ERROR):
 
 
@@ -51,10 +71,12 @@ class TestLog(unittest.TestCase):
         return self.assertFalse(val, reason)
         return self.assertFalse(val, reason)
 
 
     def test_setup_logger(self):
     def test_setup_logger(self):
-        logger = setup_logger(loglevel=logging.ERROR, logfile=None)
-        logger.handlers = [] # Reset previously set logger.
-        logger = setup_logger(loglevel=logging.ERROR, logfile=None)
-        self.assertIs(logger.handlers[0].stream, sys.__stderr__,
+        logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
+                                   root=False)
+        set_handlers(logger, [])
+        logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
+                                   root=False)
+        self.assertIs(get_handlers(logger)[0].stream, sys.__stderr__,
                 "setup_logger logs to stderr without logfile argument.")
                 "setup_logger logs to stderr without logfile argument.")
         self.assertDidLogFalse(logger, "Logging something",
         self.assertDidLogFalse(logger, "Logging something",
                 "Logger doesn't info when loglevel is ERROR",
                 "Logger doesn't info when loglevel is ERROR",
@@ -67,13 +89,13 @@ class TestLog(unittest.TestCase):
                              "Testing emergency error facility")
                              "Testing emergency error facility")
 
 
     def test_setup_logger_no_handlers_stream(self):
     def test_setup_logger_no_handlers_stream(self):
-        from multiprocessing import get_logger
-        l = get_logger()
-        l.handlers = []
+        l = self.get_logger()
+        set_handlers(l, [])
 
 
         def with_override_stdouts(outs):
         def with_override_stdouts(outs):
             stdout, stderr = outs
             stdout, stderr = outs
-            l = setup_logger(logfile=stderr, loglevel=logging.INFO)
+            l = self.setup_logger(logfile=stderr, loglevel=logging.INFO,
+                                  root=False)
             l.info("The quick brown fox...")
             l.info("The quick brown fox...")
             self.assertIn("The quick brown fox...", stderr.getvalue())
             self.assertIn("The quick brown fox...", stderr.getvalue())
 
 
@@ -81,12 +103,12 @@ class TestLog(unittest.TestCase):
         execute_context(context, with_override_stdouts)
         execute_context(context, with_override_stdouts)
 
 
     def test_setup_logger_no_handlers_file(self):
     def test_setup_logger_no_handlers_file(self):
-        from multiprocessing import get_logger
-        l = get_logger()
-        l.handlers = []
+        l = self.get_logger()
+        set_handlers(l, [])
         tempfile = mktemp(suffix="unittest", prefix="celery")
         tempfile = mktemp(suffix="unittest", prefix="celery")
-        l = setup_logger(logfile=tempfile, loglevel=0)
-        self.assertIsInstance(l.handlers[0], logging.FileHandler)
+        l = self.setup_logger(logfile=tempfile, loglevel=0, root=False)
+        self.assertIsInstance(get_handlers(l)[0 ],
+                              logging.FileHandler)
 
 
     def test_emergency_error_stderr(self):
     def test_emergency_error_stderr(self):
         def with_override_stdouts(outs):
         def with_override_stdouts(outs):
@@ -109,7 +131,8 @@ class TestLog(unittest.TestCase):
             os.unlink(tempfile)
             os.unlink(tempfile)
 
 
     def test_redirect_stdouts(self):
     def test_redirect_stdouts(self):
-        logger = setup_logger(loglevel=logging.ERROR, logfile=None)
+        logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
+                                   root=False)
         try:
         try:
             def with_wrap_logger(sio):
             def with_wrap_logger(sio):
                 redirect_stdouts_to_logger(logger, loglevel=logging.ERROR)
                 redirect_stdouts_to_logger(logger, loglevel=logging.ERROR)
@@ -122,7 +145,8 @@ class TestLog(unittest.TestCase):
             sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__
             sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__
 
 
     def test_logging_proxy(self):
     def test_logging_proxy(self):
-        logger = setup_logger(loglevel=logging.ERROR, logfile=None)
+        logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
+                                   root=False)
 
 
         def with_wrap_logger(sio):
         def with_wrap_logger(sio):
             p = LoggingProxy(logger)
             p = LoggingProxy(logger)
@@ -143,3 +167,69 @@ class TestLog(unittest.TestCase):
 
 
         context = wrap_logger(logger)
         context = wrap_logger(logger)
         execute_context(context, with_wrap_logger)
         execute_context(context, with_wrap_logger)
+
+
+class test_task_logger(test_default_logger):
+
+    def setUp(self):
+        self.setup_logger = setup_task_logger
+        self.get_logger = get_task_logger
+
+
+class MockLogger(logging.Logger):
+    _records = None
+
+    def __init__(self, *args, **kwargs):
+        self._records = []
+        logging.Logger.__init__(self, *args, **kwargs)
+
+    def handle(self, record):
+        self._records.append(record)
+
+    def isEnabledFor(self, level):
+        return True
+
+
+class test_CompatLoggerAdapter(unittest.TestCase):
+    levels = ("debug",
+              "info",
+              "warn", "warning",
+              "error",
+              "fatal", "critical")
+
+    def setUp(self):
+        self.logger, self.adapter = self.createAdapter()
+
+    def createAdapter(self, name=None, extra={"foo": "bar"}):
+        logger = MockLogger(name=name or gen_unique_id())
+        return logger, _CompatLoggerAdapter(logger, extra)
+
+    def test_levels(self):
+        for level in self.levels:
+            msg = "foo bar %s" % (level, )
+            logger, adapter = self.createAdapter()
+            getattr(adapter, level)(msg)
+            self.assertEqual(logger._records[0].msg, msg)
+
+    def test_exception(self):
+        try:
+            raise KeyError("foo")
+        except KeyError:
+            self.adapter.exception("foo bar exception")
+        self.assertEqual(self.logger._records[0].msg, "foo bar exception")
+
+    def test_setLevel(self):
+        self.adapter.setLevel(logging.INFO)
+        self.assertEqual(self.logger.level, logging.INFO)
+
+    def test_process(self):
+        msg, kwargs = self.adapter.process("foo bar baz", {"exc_info": 1})
+        self.assertDictEqual(kwargs, {"exc_info": 1,
+                                      "extra": {"foo": "bar"}})
+
+    def test_add_remove_handlers(self):
+        handler = logging.StreamHandler()
+        self.adapter.addHandler(handler)
+        self.assertIs(self.logger.handlers[0], handler)
+        self.adapter.removeHandler(handler)
+        self.assertListEqual(self.logger.handlers, [])

+ 1 - 0
celery/tests/test_result.py

@@ -4,6 +4,7 @@ import unittest2 as unittest
 
 
 from celery import states
 from celery import states
 from celery.utils import gen_unique_id
 from celery.utils import gen_unique_id
+from celery.utils.compat import all
 from celery.result import AsyncResult, TaskSetResult
 from celery.result import AsyncResult, TaskSetResult
 from celery.backends import default_backend
 from celery.backends import default_backend
 from celery.exceptions import TimeoutError
 from celery.exceptions import TimeoutError

+ 55 - 59
celery/tests/test_routes.py

@@ -3,14 +3,14 @@ import unittest2 as unittest
 
 
 from celery import conf
 from celery import conf
 from celery import routes
 from celery import routes
-from celery.utils import gen_unique_id
+from celery.utils import maybe_promise
 from celery.utils.functional import wraps
 from celery.utils.functional import wraps
-from celery.exceptions import RouteNotFound
+from celery.exceptions import QueueNotFound
 
 
 
 
-def E(routing_table):
+def E(queues):
     def expand(answer):
     def expand(answer):
-        return routes.expand_destination(answer, routing_table)
+        return routes.Router([], queues).expand_destination(answer)
     return expand
     return expand
 
 
 
 
@@ -29,84 +29,80 @@ def with_queues(**queues):
     return patch_fun
     return patch_fun
 
 
 
 
-a_route = {"exchange": "fooexchange",
+a_queue = {"exchange": "fooexchange",
            "exchange_type": "fanout",
            "exchange_type": "fanout",
                "binding_key": "xuzzy"}
                "binding_key": "xuzzy"}
-b_route = {"exchange": "barexchange",
+b_queue = {"exchange": "barexchange",
            "exchange_type": "topic",
            "exchange_type": "topic",
            "binding_key": "b.b.#"}
            "binding_key": "b.b.#"}
 
 
 
 
 class test_MapRoute(unittest.TestCase):
 class test_MapRoute(unittest.TestCase):
 
 
-    @with_queues(foo=a_route, bar=b_route)
+    @with_queues(foo=a_queue, bar=b_queue)
     def test_route_for_task_expanded_route(self):
     def test_route_for_task_expanded_route(self):
         expand = E(conf.QUEUES)
         expand = E(conf.QUEUES)
-        route = routes.MapRoute({"celery.ping": "foo"})
-        self.assertDictContainsSubset(a_route,
+        route = routes.MapRoute({"celery.ping": {"queue": "foo"}})
+        self.assertDictContainsSubset(a_queue,
                              expand(route.route_for_task("celery.ping")))
                              expand(route.route_for_task("celery.ping")))
         self.assertIsNone(route.route_for_task("celery.awesome"))
         self.assertIsNone(route.route_for_task("celery.awesome"))
 
 
-    @with_queues(foo=a_route, bar=b_route)
+    @with_queues(foo=a_queue, bar=b_queue)
     def test_route_for_task(self):
     def test_route_for_task(self):
         expand = E(conf.QUEUES)
         expand = E(conf.QUEUES)
-        route = routes.MapRoute({"celery.ping": b_route})
-        self.assertDictContainsSubset(b_route,
+        route = routes.MapRoute({"celery.ping": b_queue})
+        self.assertDictContainsSubset(b_queue,
                              expand(route.route_for_task("celery.ping")))
                              expand(route.route_for_task("celery.ping")))
         self.assertIsNone(route.route_for_task("celery.awesome"))
         self.assertIsNone(route.route_for_task("celery.awesome"))
 
 
     def test_expand_route_not_found(self):
     def test_expand_route_not_found(self):
         expand = E(conf.QUEUES)
         expand = E(conf.QUEUES)
-        route = routes.MapRoute({"a": "x"})
-        self.assertRaises(RouteNotFound, expand, route.route_for_task("a"))
+        route = routes.MapRoute({"a": {"queue": "x"}})
+        self.assertRaises(QueueNotFound, expand, route.route_for_task("a"))
 
 
 
 
 class test_lookup_route(unittest.TestCase):
 class test_lookup_route(unittest.TestCase):
 
 
-    @with_queues(foo=a_route, bar=b_route)
-    def test_lookup_takes_first(self):
-        expand = E(conf.QUEUES)
-        R = routes.prepare(({"celery.ping": "bar"},
-                            {"celery.ping": "foo"}))
-        self.assertDictContainsSubset(b_route,
-                expand(routes.lookup_route(R, "celery.ping", gen_unique_id(),
-                    args=[1, 2], kwargs={})))
+    def test_init_queues(self):
+        router = routes.Router(queues=None)
+        self.assertDictEqual(router.queues, {})
 
 
-    @with_queues(foo=a_route, bar=b_route)
+    @with_queues(foo=a_queue, bar=b_queue)
+    def test_lookup_takes_first(self):
+        R = routes.prepare(({"celery.ping": {"queue": "bar"}},
+                            {"celery.ping": {"queue": "foo"}}))
+        router = routes.Router(R, conf.QUEUES)
+        self.assertDictContainsSubset(b_queue,
+                router.route({}, "celery.ping",
+                    args=[1, 2], kwargs={}))
+
+    @with_queues(foo=a_queue, bar=b_queue)
     def test_lookup_paths_traversed(self):
     def test_lookup_paths_traversed(self):
-        expand = E(conf.QUEUES)
-        R = routes.prepare(({"celery.xaza": "bar"},
-                            {"celery.ping": "foo"}))
-        self.assertDictContainsSubset(a_route,
-                expand(routes.lookup_route(R, "celery.ping", gen_unique_id(),
-                    args=[1, 2], kwargs={})))
-        self.assertIsNone(routes.lookup_route(R, "celery.poza"))
-
-
-class test_lookup_disabled(unittest.TestCase):
-
-    def test_disabled(self):
-
-        def create_router(name, is_disabled):
-            class _Router(object):
-
-                def disabled(self, task, *args):
-                    if task == name:
-                        return is_disabled
-            return _Router()
-
-
-        A = create_router("celery.ping", True)
-        B = create_router("celery.ping", False)
-        C = object()
-
-        R1 = (routes.prepare((A, B, C)), True)
-        R2 = (routes.prepare((B, C, A)), False)
-        R3 = (routes.prepare((C, A, B)), True)
-        R4 = (routes.prepare((B, A, C)), False)
-        R5 = (routes.prepare((A, C, B)), True)
-        R6 = (routes.prepare((C, B, A)), False)
-
-        for i, (router, state) in enumerate((R1, R2, R3, R4, R5, R6)):
-            self.assertEqual(routes.lookup_disabled(router, "celery.ping"),
-                             state, "ok %d" % i)
+        R = routes.prepare(({"celery.xaza": {"queue": "bar"}},
+                            {"celery.ping": {"queue": "foo"}}))
+        router = routes.Router(R, conf.QUEUES)
+        self.assertDictContainsSubset(a_queue,
+                router.route({}, "celery.ping",
+                    args=[1, 2], kwargs={}))
+        self.assertEqual(router.route({}, "celery.poza"), {})
+
+
+class test_prepare(unittest.TestCase):
+
+    def test_prepare(self):
+        from celery.datastructures import LocalCache
+        o = object()
+        R = [{"foo": "bar"},
+                  "celery.datastructures.LocalCache",
+                  o]
+        p = routes.prepare(R)
+        self.assertIsInstance(p[0], routes.MapRoute)
+        self.assertIsInstance(maybe_promise(p[1]), LocalCache)
+        self.assertIs(p[2], o)
+
+        self.assertEqual(routes.prepare(o), [o])
+
+    def test_prepare_item_is_dict(self):
+        R = {"foo": "bar"}
+        p = routes.prepare(R)
+        self.assertIsInstance(p[0], routes.MapRoute)

+ 20 - 17
celery/tests/test_task.py

@@ -10,14 +10,13 @@ from celery import task
 from celery import messaging
 from celery import messaging
 from celery.task.schedules import crontab, crontab_parser
 from celery.task.schedules import crontab, crontab_parser
 from celery.utils import timeutils
 from celery.utils import timeutils
-from celery.utils import gen_unique_id
+from celery.utils import gen_unique_id, parse_iso8601
 from celery.utils.functional import wraps
 from celery.utils.functional import wraps
 from celery.result import EagerResult
 from celery.result import EagerResult
 from celery.execute import send_task
 from celery.execute import send_task
 from celery.backends import default_backend
 from celery.backends import default_backend
 from celery.decorators import task as task_dec
 from celery.decorators import task as task_dec
 from celery.exceptions import RetryTaskError
 from celery.exceptions import RetryTaskError
-from celery.worker.listener import parse_iso8601
 
 
 from celery.tests.utils import with_eager_tasks
 from celery.tests.utils import with_eager_tasks
 
 
@@ -149,10 +148,14 @@ class TestTaskRetries(unittest.TestCase):
         self.assertEqual(result.get(), 42)
         self.assertEqual(result.get(), 42)
         self.assertEqual(RetryTaskNoArgs.iterations, 4)
         self.assertEqual(RetryTaskNoArgs.iterations, 4)
 
 
+    def test_retry_kwargs_can_not_be_empty(self):
+        self.assertRaises(TypeError, RetryTaskMockApply.retry,
+                            args=[4, 4], kwargs={})
+
     def test_retry_not_eager(self):
     def test_retry_not_eager(self):
         exc = Exception("baz")
         exc = Exception("baz")
         try:
         try:
-            RetryTaskMockApply.retry(args=[4, 4], kwargs={},
+            RetryTaskMockApply.retry(args=[4, 4], kwargs={"task_retries": 0},
                                      exc=exc, throw=False)
                                      exc=exc, throw=False)
             self.assertTrue(RetryTaskMockApply.applied)
             self.assertTrue(RetryTaskMockApply.applied)
         finally:
         finally:
@@ -160,7 +163,8 @@ class TestTaskRetries(unittest.TestCase):
 
 
         try:
         try:
             self.assertRaises(RetryTaskError, RetryTaskMockApply.retry,
             self.assertRaises(RetryTaskError, RetryTaskMockApply.retry,
-                    args=[4, 4], kwargs={}, exc=exc, throw=True)
+                    args=[4, 4], kwargs={"task_retries": 0},
+                    exc=exc, throw=True)
             self.assertTrue(RetryTaskMockApply.applied)
             self.assertTrue(RetryTaskMockApply.applied)
         finally:
         finally:
             RetryTaskMockApply.applied = 0
             RetryTaskMockApply.applied = 0
@@ -352,25 +356,24 @@ class TestTaskSet(unittest.TestCase):
 
 
     @with_eager_tasks
     @with_eager_tasks
     def test_function_taskset(self):
     def test_function_taskset(self):
-        ts = task.TaskSet(return_True_task.name, [
-              ([1], {}), [[2], {}], [[3], {}], [[4], {}], [[5], {}]])
+        subtasks = [return_True_task.subtask([i]) for i in range(1, 6)]
+        ts = task.TaskSet(subtasks)
         res = ts.apply_async()
         res = ts.apply_async()
         self.assertListEqual(res.join(), [True, True, True, True, True])
         self.assertListEqual(res.join(), [True, True, True, True, True])
 
 
     def test_counter_taskset(self):
     def test_counter_taskset(self):
         IncrementCounterTask.count = 0
         IncrementCounterTask.count = 0
-        ts = task.TaskSet(IncrementCounterTask, [
-            ([], {}),
-            ([], {"increment_by": 2}),
-            ([], {"increment_by": 3}),
-            ([], {"increment_by": 4}),
-            ([], {"increment_by": 5}),
-            ([], {"increment_by": 6}),
-            ([], {"increment_by": 7}),
-            ([], {"increment_by": 8}),
-            ([], {"increment_by": 9}),
+        ts = task.TaskSet(tasks=[
+            IncrementCounterTask.subtask((), {}),
+            IncrementCounterTask.subtask((), {"increment_by": 2}),
+            IncrementCounterTask.subtask((), {"increment_by": 3}),
+            IncrementCounterTask.subtask((), {"increment_by": 4}),
+            IncrementCounterTask.subtask((), {"increment_by": 5}),
+            IncrementCounterTask.subtask((), {"increment_by": 6}),
+            IncrementCounterTask.subtask((), {"increment_by": 7}),
+            IncrementCounterTask.subtask((), {"increment_by": 8}),
+            IncrementCounterTask.subtask((), {"increment_by": 9}),
         ])
         ])
-        self.assertEqual(ts.task_name, IncrementCounterTask.name)
         self.assertEqual(ts.total, 9)
         self.assertEqual(ts.total, 9)
 
 
 
 

+ 32 - 1
celery/tests/test_task_control.py

@@ -3,6 +3,7 @@ import unittest2 as unittest
 from celery.task import control
 from celery.task import control
 from celery.task.builtins import PingTask
 from celery.task.builtins import PingTask
 from celery.utils import gen_unique_id
 from celery.utils import gen_unique_id
+from celery.utils.functional import wraps
 
 
 
 
 class MockBroadcastPublisher(object):
 class MockBroadcastPublisher(object):
@@ -18,26 +19,56 @@ class MockBroadcastPublisher(object):
         pass
         pass
 
 
 
 
+class MockControlReplyConsumer(object):
+
+    def __init__(self, *args, **kwarg):
+        pass
+
+    def collect(self, *args, **kwargs):
+        pass
+
+    def close(self):
+        pass
+
+
 def with_mock_broadcast(fun):
 def with_mock_broadcast(fun):
 
 
+    @wraps(fun)
     def _mocked(*args, **kwargs):
     def _mocked(*args, **kwargs):
         old_pub = control.BroadcastPublisher
         old_pub = control.BroadcastPublisher
+        old_rep = control.ControlReplyConsumer
         control.BroadcastPublisher = MockBroadcastPublisher
         control.BroadcastPublisher = MockBroadcastPublisher
+        control.ControlReplyConsumer = MockControlReplyConsumer
         try:
         try:
             return fun(*args, **kwargs)
             return fun(*args, **kwargs)
         finally:
         finally:
             MockBroadcastPublisher.sent = []
             MockBroadcastPublisher.sent = []
             control.BroadcastPublisher = old_pub
             control.BroadcastPublisher = old_pub
+            control.ControlReplyConsumer = old_rep
     return _mocked
     return _mocked
 
 
 
 
-class TestBroadcast(unittest.TestCase):
+class test_Broadcast(unittest.TestCase):
+
+    def test_discard_all(self):
+        control.discard_all()
 
 
     @with_mock_broadcast
     @with_mock_broadcast
     def test_broadcast(self):
     def test_broadcast(self):
         control.broadcast("foobarbaz", arguments=[])
         control.broadcast("foobarbaz", arguments=[])
         self.assertIn("foobarbaz", MockBroadcastPublisher.sent)
         self.assertIn("foobarbaz", MockBroadcastPublisher.sent)
 
 
+    @with_mock_broadcast
+    def test_broadcast_limit(self):
+        control.broadcast("foobarbaz1", arguments=[], limit=None,
+                destination=[1, 2, 3])
+        self.assertIn("foobarbaz1", MockBroadcastPublisher.sent)
+
+    @with_mock_broadcast
+    def test_broadcast_validate(self):
+        self.assertRaises(ValueError, control.broadcast, "foobarbaz2",
+                          destination="foo")
+
     @with_mock_broadcast
     @with_mock_broadcast
     def test_rate_limit(self):
     def test_rate_limit(self):
         control.rate_limit(PingTask.name, "100/m")
         control.rate_limit(PingTask.name, "100/m")

+ 1 - 4
celery/tests/test_task_http.py

@@ -8,10 +8,7 @@ try:
     from contextlib import contextmanager
     from contextlib import contextmanager
 except ImportError:
 except ImportError:
     from celery.tests.utils import fallback_contextmanager as contextmanager
     from celery.tests.utils import fallback_contextmanager as contextmanager
-try:
-    from cStringIO import StringIO
-except ImportError:
-    from StringIO import StringIO
+from StringIO import StringIO
 
 
 from anyjson import serialize
 from anyjson import serialize
 
 

+ 175 - 0
celery/tests/test_task_sets.py

@@ -0,0 +1,175 @@
+import unittest2 as unittest
+
+import simplejson
+
+from celery import conf
+from celery.task import Task
+from celery.task.sets import subtask, TaskSet
+
+from celery.tests.utils import execute_context
+from celery.tests.compat import catch_warnings
+
+
+class MockTask(Task):
+    name = "tasks.add"
+
+    def run(self, x, y, **kwargs):
+        return x + y
+
+    @classmethod
+    def apply_async(cls, args, kwargs, **options):
+        return (args, kwargs, options)
+
+    @classmethod
+    def apply(cls, args, kwargs, **options):
+        return (args, kwargs, options)
+
+
+class test_subtask(unittest.TestCase):
+
+    def test_behaves_like_type(self):
+        s = subtask("tasks.add", (2, 2), {"cache": True},
+                    {"routing_key": "CPU-bound"})
+        self.assertDictEqual(subtask(s), s)
+
+    def test_task_argument_can_be_task_cls(self):
+        s = subtask(MockTask, (2, 2))
+        self.assertEqual(s.task, MockTask.name)
+
+    def test_apply_async(self):
+        s = MockTask.subtask((2, 2), {"cache": True},
+                {"routing_key": "CPU-bound"})
+        args, kwargs, options = s.apply_async()
+        self.assertTupleEqual(args, (2, 2))
+        self.assertDictEqual(kwargs, {"cache": True})
+        self.assertDictEqual(options, {"routing_key": "CPU-bound"})
+
+    def test_delay_argmerge(self):
+        s = MockTask.subtask((2, ), {"cache": True},
+                {"routing_key": "CPU-bound"})
+        args, kwargs, options = s.delay(10, cache=False, other="foo")
+        self.assertTupleEqual(args, (10, 2))
+        self.assertDictEqual(kwargs, {"cache": False, "other": "foo"})
+        self.assertDictEqual(options, {"routing_key": "CPU-bound"})
+
+    def test_apply_async_argmerge(self):
+        s = MockTask.subtask((2, ), {"cache": True},
+                {"routing_key": "CPU-bound"})
+        args, kwargs, options = s.apply_async((10, ),
+                                              {"cache": False, "other": "foo"},
+                                              routing_key="IO-bound",
+                                              exchange="fast")
+
+        self.assertTupleEqual(args, (10, 2))
+        self.assertDictEqual(kwargs, {"cache": False, "other": "foo"})
+        self.assertDictEqual(options, {"routing_key": "IO-bound",
+                                        "exchange": "fast"})
+
+    def test_apply_argmerge(self):
+        s = MockTask.subtask((2, ), {"cache": True},
+                {"routing_key": "CPU-bound"})
+        args, kwargs, options = s.apply((10, ),
+                                        {"cache": False, "other": "foo"},
+                                        routing_key="IO-bound",
+                                        exchange="fast")
+
+        self.assertTupleEqual(args, (10, 2))
+        self.assertDictEqual(kwargs, {"cache": False, "other": "foo"})
+        self.assertDictEqual(options, {"routing_key": "IO-bound",
+                                        "exchange": "fast"})
+
+    def test_is_JSON_serializable(self):
+        s = MockTask.subtask((2, ), {"cache": True},
+                {"routing_key": "CPU-bound"})
+        s.args = list(s.args) # tuples are not preserved
+                              # but this doesn't matter.
+        self.assertEqual(s,
+                         subtask(simplejson.loads(simplejson.dumps(s))))
+
+
+class test_TaskSet(unittest.TestCase):
+
+    def test_interface__compat(self):
+
+        def with_catch_warnings(log):
+            ts = TaskSet(MockTask, [[(2, 2)], [(4, 4)], [(8, 8)]])
+            self.assertTrue(log)
+            self.assertIn("Using this invocation of TaskSet is deprecated",
+                          log[0].message.args[0])
+            self.assertListEqual(ts.tasks,
+                                 [MockTask.subtask((i, i))
+                                    for i in (2, 4, 8)])
+            return ts
+
+        context = catch_warnings(record=True)
+        execute_context(context, with_catch_warnings)
+
+        # TaskSet.task (deprecated)
+        def with_catch_warnings2(log):
+            ts = TaskSet(MockTask, [[(2, 2)], [(4, 4)], [(8, 8)]])
+            self.assertEqual(ts.task, MockTask)
+            self.assertTrue(log)
+            self.assertIn("TaskSet.task is deprecated",
+                          log[0].message.args[0])
+
+        execute_context(catch_warnings(record=True), with_catch_warnings2)
+
+        # TaskSet.task_name (deprecated)
+        def with_catch_warnings3(log):
+            ts = TaskSet(MockTask, [[(2, 2)], [(4, 4)], [(8, 8)]])
+            self.assertEqual(ts.task_name, MockTask.name)
+            self.assertTrue(log)
+            self.assertIn("TaskSet.task_name is deprecated",
+                          log[0].message.args[0])
+
+        execute_context(catch_warnings(record=True), with_catch_warnings3)
+
+    def test_task_arg_can_be_iterable__compat(self):
+        ts = TaskSet([MockTask.subtask((i, i))
+                        for i in (2, 4, 8)])
+        self.assertEqual(len(ts), 3)
+
+    def test_respects_ALWAYS_EAGER(self):
+
+        class MockTaskSet(TaskSet):
+            applied = 0
+
+            def apply(self, *args, **kwargs):
+                self.applied += 1
+
+        ts = MockTaskSet([MockTask.subtask((i, i))
+                        for i in (2, 4, 8)])
+        conf.ALWAYS_EAGER = True
+        try:
+            ts.apply_async()
+        finally:
+            conf.ALWAYS_EAGER = False
+        self.assertEqual(ts.applied, 1)
+
+    def test_apply_async(self):
+
+        applied = [0]
+
+        class mocksubtask(subtask):
+
+            def apply_async(self, *args, **kwargs):
+                applied[0] += 1
+
+        ts = TaskSet([mocksubtask(MockTask, (i, i))
+                        for i in (2, 4, 8)])
+        ts.apply_async()
+        self.assertEqual(applied[0], 3)
+
+    def test_apply(self):
+
+        applied = [0]
+
+        class mocksubtask(subtask):
+
+            def apply(self, *args, **kwargs):
+                applied[0] += 1
+
+        ts = TaskSet([mocksubtask(MockTask, (i, i))
+                        for i in (2, 4, 8)])
+        ts.apply()
+        self.assertEqual(applied[0], 3)

+ 120 - 4
celery/tests/test_utils.py

@@ -1,13 +1,20 @@
+import pickle
 import sys
 import sys
 import socket
 import socket
 import unittest2 as unittest
 import unittest2 as unittest
 
 
 from celery import utils
 from celery import utils
+from celery.utils import promise, mpromise, maybe_promise
 
 
 from celery.tests.utils import sleepdeprived, execute_context
 from celery.tests.utils import sleepdeprived, execute_context
 from celery.tests.utils import mask_modules
 from celery.tests.utils import mask_modules
 
 
-class TestChunks(unittest.TestCase):
+
+def double(x):
+    return x * 2
+
+
+class test_chunks(unittest.TestCase):
 
 
     def test_chunks(self):
     def test_chunks(self):
 
 
@@ -27,7 +34,7 @@ class TestChunks(unittest.TestCase):
             [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]])
             [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]])
 
 
 
 
-class TestGenUniqueId(unittest.TestCase):
+class test_gen_unique_id(unittest.TestCase):
 
 
     def test_gen_unique_id_without_ctypes(self):
     def test_gen_unique_id_without_ctypes(self):
         old_utils = sys.modules.pop("celery.utils")
         old_utils = sys.modules.pop("celery.utils")
@@ -47,7 +54,7 @@ class TestGenUniqueId(unittest.TestCase):
             sys.modules["celery.utils"] = old_utils
             sys.modules["celery.utils"] = old_utils
 
 
 
 
-class TestDivUtils(unittest.TestCase):
+class test_utils(unittest.TestCase):
 
 
     def test_repeatlast(self):
     def test_repeatlast(self):
         items = range(6)
         items = range(6)
@@ -57,8 +64,65 @@ class TestDivUtils(unittest.TestCase):
         for j in items:
         for j in items:
             self.assertEqual(it.next(), i)
             self.assertEqual(it.next(), i)
 
 
+    def test_get_full_cls_name(self):
+        Class = type("Fox", (object, ), {"__module__": "quick.brown"})
+        self.assertEqual(utils.get_full_cls_name(Class), "quick.brown.Fox")
+
+    def test_is_iterable(self):
+        for a in "f", ["f"], ("f", ), {"f": "f"}:
+            self.assertTrue(utils.is_iterable(a))
+        for b in object(), 1:
+            self.assertFalse(utils.is_iterable(b))
+
+    def test_padlist(self):
+        self.assertListEqual(utils.padlist(["George", "Costanza", "NYC"], 3),
+                ["George", "Costanza", "NYC"])
+        self.assertListEqual(utils.padlist(["George", "Costanza"], 3),
+                ["George", "Costanza", None])
+        self.assertListEqual(utils.padlist(["George", "Costanza", "NYC"], 4,
+                                           default="Earth"),
+                ["George", "Costanza", "NYC", "Earth"])
+
+    def test_firstmethod_AttributeError(self):
+        self.assertIsNone(utils.firstmethod("foo")([object()]))
+
+    def test_firstmethod_promises(self):
+
+        class A(object):
+
+            def __init__(self, value=None):
+                self.value = value
+
+            def m(self):
+                return self.value
+
+        self.assertEqual("four", utils.firstmethod("m")([
+            A(), A(), A(), A("four"), A("five")]))
+        self.assertEqual("four", utils.firstmethod("m")([
+            A(), A(), A(), promise(lambda: A("four")), A("five")]))
+
+    def test_first(self):
+        iterations = [0]
+
+        def predicate(value):
+            iterations[0] += 1
+            if value == 5:
+                return True
+            return False
+
+        self.assertEqual(5, utils.first(predicate, xrange(10)))
+        self.assertEqual(iterations[0], 6)
+
+        iterations[0] = 0
+        self.assertIsNone(utils.first(predicate, xrange(10, 20)))
+        self.assertEqual(iterations[0], 10)
 
 
-class TestRetryOverTime(unittest.TestCase):
+    def test_get_cls_by_name__instance_returns_instance(self):
+        instance = object()
+        self.assertIs(utils.get_cls_by_name(instance), instance)
+
+
+class test_retry_over_time(unittest.TestCase):
 
 
     def test_returns_retval_on_success(self):
     def test_returns_retval_on_success(self):
 
 
@@ -98,3 +162,55 @@ class TestRetryOverTime(unittest.TestCase):
 
 
         self.assertRaises(socket.error, utils.retry_over_time,
         self.assertRaises(socket.error, utils.retry_over_time,
                         _fun, (socket.error, ), args=[32, 32], max_retries=1)
                         _fun, (socket.error, ), args=[32, 32], max_retries=1)
+
+
+class test_promise(unittest.TestCase):
+
+    def test__str__(self):
+        self.assertEqual(str(promise(lambda: "the quick brown fox")),
+                "the quick brown fox")
+
+    def test__repr__(self):
+        self.assertEqual(repr(promise(lambda: "fi fa fo")),
+                "'fi fa fo'")
+
+    def test_evaluate(self):
+        self.assertEqual(promise(lambda: 2 + 2)(), 4)
+        self.assertEqual(promise(lambda x: x * 4, 2), 8)
+        self.assertEqual(promise(lambda x: x * 8, 2)(), 16)
+
+    def test_cmp(self):
+        self.assertEqual(promise(lambda: 10), promise(lambda: 10))
+        self.assertNotEqual(promise(lambda: 10), promise(lambda: 20))
+
+    def test__reduce__(self):
+        x = promise(double, 4)
+        y = pickle.loads(pickle.dumps(x))
+        self.assertEqual(x(), y())
+
+    def test__deepcopy__(self):
+        from copy import deepcopy
+        x = promise(double, 4)
+        y = deepcopy(x)
+        self.assertEqual(x._fun, y._fun)
+        self.assertEqual(x._args, y._args)
+        self.assertEqual(x(), y())
+
+
+class test_mpromise(unittest.TestCase):
+
+    def test_is_memoized(self):
+
+        it = iter(xrange(20, 30))
+        p = mpromise(it.next)
+        self.assertEqual(p(), 20)
+        self.assertTrue(p.evaluated)
+        self.assertEqual(p(), 20)
+        self.assertEqual(repr(p), "20")
+
+
+class test_maybe_promise(unittest.TestCase):
+
+    def test_evaluates(self):
+        self.assertEqual(maybe_promise(promise(lambda: 10)), 10)
+        self.assertEqual(maybe_promise(20), 20)

+ 4 - 4
celery/tests/test_utils_info.py

@@ -16,7 +16,7 @@ RANDTEXT_RES = """\
     lazy dog\
     lazy dog\
 """
 """
 
 
-ROUTE = {"queue1": {
+QUEUES = {"queue1": {
             "exchange": "exchange1",
             "exchange": "exchange1",
             "exchange_type": "type1",
             "exchange_type": "type1",
             "binding_key": "bind1"},
             "binding_key": "bind1"},
@@ -26,7 +26,7 @@ ROUTE = {"queue1": {
             "binding_key": "bind2"}}
             "binding_key": "bind2"}}
 
 
 
 
-ROUTE_FORMAT = """
+QUEUE_FORMAT = """
 . queue1 -> exchange:exchange1 (type1) binding:bind1
 . queue1 -> exchange:exchange1 (type1) binding:bind1
 . queue2 -> exchange:exchange2 (type2) binding:bind2
 . queue2 -> exchange:exchange2 (type2) binding:bind2
 """.strip()
 """.strip()
@@ -55,8 +55,8 @@ class TestInfo(unittest.TestCase):
     def test_textindent(self):
     def test_textindent(self):
         self.assertEqual(info.textindent(RANDTEXT, 4), RANDTEXT_RES)
         self.assertEqual(info.textindent(RANDTEXT, 4), RANDTEXT_RES)
 
 
-    def test_format_routing_table(self):
-        self.assertEqual(info.format_routing_table(ROUTE), ROUTE_FORMAT)
+    def test_format_queues(self):
+        self.assertEqual(info.format_queues(QUEUES), QUEUE_FORMAT)
 
 
     def test_broker_info(self):
     def test_broker_info(self):
         info.format_broker_info()
         info.format_broker_info()

+ 179 - 27
celery/tests/test_worker.py

@@ -1,24 +1,26 @@
+import socket
 import unittest2 as unittest
 import unittest2 as unittest
-from Queue import Empty
+
 from datetime import datetime, timedelta
 from datetime import datetime, timedelta
 from multiprocessing import get_logger
 from multiprocessing import get_logger
+from Queue import Empty
 
 
-from carrot.connection import BrokerConnection
 from carrot.backends.base import BaseMessage
 from carrot.backends.base import BaseMessage
+from carrot.connection import BrokerConnection
+from timer2 import Timer
 
 
 from celery import conf
 from celery import conf
+from celery.decorators import task as task_dec
+from celery.decorators import periodic_task as periodic_task_dec
+from celery.serialization import pickle
 from celery.utils import gen_unique_id
 from celery.utils import gen_unique_id
 from celery.worker import WorkController
 from celery.worker import WorkController
-from celery.worker.job import TaskRequest
 from celery.worker.buckets import FastQueue
 from celery.worker.buckets import FastQueue
+from celery.worker.job import TaskRequest
 from celery.worker.listener import CarrotListener, QoS, RUN
 from celery.worker.listener import CarrotListener, QoS, RUN
-from celery.worker.scheduler import Scheduler
-from celery.decorators import task as task_dec
-from celery.decorators import periodic_task as periodic_task_dec
-from celery.serialization import pickle
 
 
-from celery.tests.utils import execute_context
 from celery.tests.compat import catch_warnings
 from celery.tests.compat import catch_warnings
+from celery.tests.utils import execute_context
 
 
 
 
 class PlaceHolder(object):
 class PlaceHolder(object):
@@ -62,17 +64,20 @@ def foo_periodic_task():
 
 
 class MockLogger(object):
 class MockLogger(object):
 
 
-    def critical(self, *args, **kwargs):
-        pass
+    def __init__(self):
+        self.logged = []
 
 
-    def info(self, *args, **kwargs):
-        pass
+    def critical(self, msg, *args, **kwargs):
+        self.logged.append(msg)
 
 
-    def error(self, *args, **kwargs):
-        pass
+    def info(self, msg, *args, **kwargs):
+        self.logged.append(msg)
 
 
-    def debug(self, *args, **kwargs):
-        pass
+    def error(self, msg, *args, **kwargs):
+        self.logged.append(msg)
+
+    def debug(self, msg, *args, **kwargs):
+        self.logged.append(msg)
 
 
 
 
 class MockBackend(object):
 class MockBackend(object):
@@ -83,6 +88,8 @@ class MockBackend(object):
 
 
 
 
 class MockPool(object):
 class MockPool(object):
+    _terminated = False
+    _stopped = False
 
 
     def __init__(self, *args, **kwargs):
     def __init__(self, *args, **kwargs):
         self.raise_regular = kwargs.get("raise_regular", False)
         self.raise_regular = kwargs.get("raise_regular", False)
@@ -98,9 +105,13 @@ class MockPool(object):
         pass
         pass
 
 
     def stop(self):
     def stop(self):
-        pass
+        self._stopped = True
         return True
         return True
 
 
+    def terminate(self):
+        self._terminated = True
+        self.stop()
+
 
 
 class MockController(object):
 class MockController(object):
 
 
@@ -123,11 +134,33 @@ def create_message(backend, **data):
                        content_encoding="binary")
                        content_encoding="binary")
 
 
 
 
-class TestCarrotListener(unittest.TestCase):
+class test_QoS(unittest.TestCase):
+
+    class MockConsumer(object):
+        prefetch_count = 0
+
+        def qos(self, prefetch_size=0, prefetch_count=0, apply_global=False):
+            self.prefetch_count = prefetch_count
+
+    def test_decrement(self):
+        consumer = self.MockConsumer()
+        qos = QoS(consumer, 10, get_logger())
+        qos.update()
+        self.assertEqual(int(qos.value), 10)
+        self.assertEqual(consumer.prefetch_count, 10)
+        qos.decrement()
+        self.assertEqual(int(qos.value), 9)
+        self.assertEqual(consumer.prefetch_count, 9)
+        qos.decrement_eventually()
+        self.assertEqual(int(qos.value), 8)
+        self.assertEqual(consumer.prefetch_count, 9)
+
+
+class test_CarrotListener(unittest.TestCase):
 
 
     def setUp(self):
     def setUp(self):
         self.ready_queue = FastQueue()
         self.ready_queue = FastQueue()
-        self.eta_schedule = Scheduler(self.ready_queue)
+        self.eta_schedule = Timer()
         self.logger = get_logger()
         self.logger = get_logger()
         self.logger.setLevel(0)
         self.logger.setLevel(0)
 
 
@@ -232,6 +265,38 @@ class TestCarrotListener(unittest.TestCase):
         context = catch_warnings(record=True)
         context = catch_warnings(record=True)
         execute_context(context, with_catch_warnings)
         execute_context(context, with_catch_warnings)
 
 
+    def test_receive_message_InvalidTaskError(self):
+        logger = MockLogger()
+        l = CarrotListener(self.ready_queue, self.eta_schedule, logger,
+                           send_events=False)
+        backend = MockBackend()
+        m = create_message(backend, task=foo_task.name,
+            args=(1, 2), kwargs="foobarbaz", id=1)
+        l.event_dispatcher = MockEventDispatcher()
+        l.control_dispatch = MockControlDispatch()
+
+        l.receive_message(m.decode(), m)
+        self.assertIn("Invalid task ignored", logger.logged[0])
+
+    def test_on_decode_error(self):
+        logger = MockLogger()
+        l = CarrotListener(self.ready_queue, self.eta_schedule, logger,
+                           send_events=False)
+
+        class MockMessage(object):
+            content_type = "application/x-msgpack"
+            content_encoding = "binary"
+            body = "foobarbaz"
+            acked = False
+
+            def ack(self):
+                self.acked = True
+
+        message = MockMessage()
+        l.on_decode_error(message, KeyError("foo"))
+        self.assertTrue(message.acked)
+        self.assertIn("Message decoding error", logger.logged[0])
+
     def test_receieve_message(self):
     def test_receieve_message(self):
         l = CarrotListener(self.ready_queue, self.eta_schedule, self.logger,
         l = CarrotListener(self.ready_queue, self.eta_schedule, self.logger,
                            send_events=False)
                            send_events=False)
@@ -271,7 +336,7 @@ class TestCarrotListener(unittest.TestCase):
         items = [entry[2] for entry in self.eta_schedule.queue]
         items = [entry[2] for entry in self.eta_schedule.queue]
         found = 0
         found = 0
         for item in items:
         for item in items:
-            if item.task_name == foo_task.name:
+            if item.args[0].task_name == foo_task.name:
                 found = True
                 found = True
         self.assertTrue(found)
         self.assertTrue(found)
         self.assertTrue(l.task_consumer.prefetch_count_incremented)
         self.assertTrue(l.task_consumer.prefetch_count_incremented)
@@ -288,7 +353,7 @@ class TestCarrotListener(unittest.TestCase):
                            kwargs={}, id=id)
                            kwargs={}, id=id)
         l.event_dispatcher = MockEventDispatcher()
         l.event_dispatcher = MockEventDispatcher()
         l.receive_message(c.decode(), c)
         l.receive_message(c.decode(), c)
-        from celery.worker.revoke import revoked
+        from celery.worker.state import revoked
         self.assertIn(id, revoked)
         self.assertIn(id, revoked)
 
 
         l.receive_message(t.decode(), t)
         l.receive_message(t.decode(), t)
@@ -323,30 +388,97 @@ class TestCarrotListener(unittest.TestCase):
         l.receive_message(m.decode(), m)
         l.receive_message(m.decode(), m)
 
 
         in_hold = self.eta_schedule.queue[0]
         in_hold = self.eta_schedule.queue[0]
-        self.assertEqual(len(in_hold), 4)
-        eta, priority, task, on_accept = in_hold
+        self.assertEqual(len(in_hold), 3)
+        eta, priority, entry = in_hold
+        task = entry.args[0]
         self.assertIsInstance(task, TaskRequest)
         self.assertIsInstance(task, TaskRequest)
-        self.assertTrue(callable(on_accept))
         self.assertEqual(task.task_name, foo_task.name)
         self.assertEqual(task.task_name, foo_task.name)
         self.assertEqual(task.execute(), 2 * 4 * 8)
         self.assertEqual(task.execute(), 2 * 4 * 8)
         self.assertRaises(Empty, self.ready_queue.get_nowait)
         self.assertRaises(Empty, self.ready_queue.get_nowait)
 
 
+    def test_start__consume_messages(self):
+
+        class _QoS(object):
+            prev = 3
+            next = 4
+
+            def update(self):
+                self.prev = self.next
+
+        class _Listener(CarrotListener):
+            iterations = 0
+            wait_method = None
+
+            def reset_connection(self):
+                if self.iterations >= 1:
+                    raise KeyError("foo")
+
+            def _detect_wait_method(self):
+                return self.wait_method
+
+        called_back = [False]
+        def init_callback(listener):
+            called_back[0] = True
+
+
+        l = _Listener(self.ready_queue, self.eta_schedule, self.logger,
+                      send_events=False, init_callback=init_callback)
+        l.qos = _QoS()
+
+        def raises_KeyError(limit=None):
+            yield True
+            l.iterations = 1
+            raise KeyError("foo")
+
+        l.wait_method = raises_KeyError
+        self.assertRaises(KeyError, l.start)
+        self.assertTrue(called_back[0])
+        self.assertEqual(l.iterations, 1)
+        self.assertEqual(l.qos.prev, l.qos.next)
+
+        l = _Listener(self.ready_queue, self.eta_schedule, self.logger,
+                      send_events=False, init_callback=init_callback)
+        l.qos = _QoS()
+        def raises_socket_error(limit=None):
+            yield True
+            l.iterations = 1
+            raise socket.error("foo")
+
+        l.wait_method = raises_socket_error
+        self.assertRaises(KeyError, l.start)
+        self.assertTrue(called_back[0])
+        self.assertEqual(l.iterations, 1)
+
 
 
-class TestWorkController(unittest.TestCase):
+class test_WorkController(unittest.TestCase):
 
 
     def setUp(self):
     def setUp(self):
         self.worker = WorkController(concurrency=1, loglevel=0)
         self.worker = WorkController(concurrency=1, loglevel=0)
         self.worker.logger = MockLogger()
         self.worker.logger = MockLogger()
 
 
+    def test_with_rate_limits_disabled(self):
+        conf.DISABLE_RATE_LIMITS = True
+        try:
+            worker = WorkController(concurrency=1, loglevel=0)
+            self.assertIsInstance(worker.ready_queue, FastQueue)
+        finally:
+            conf.DISABLE_RATE_LIMITS = False
+
     def test_attrs(self):
     def test_attrs(self):
         worker = self.worker
         worker = self.worker
-        self.assertIsInstance(worker.eta_schedule, Scheduler)
+        self.assertIsInstance(worker.scheduler, Timer)
         self.assertTrue(worker.scheduler)
         self.assertTrue(worker.scheduler)
         self.assertTrue(worker.pool)
         self.assertTrue(worker.pool)
         self.assertTrue(worker.listener)
         self.assertTrue(worker.listener)
         self.assertTrue(worker.mediator)
         self.assertTrue(worker.mediator)
         self.assertTrue(worker.components)
         self.assertTrue(worker.components)
 
 
+    def test_with_embedded_clockservice(self):
+        worker = WorkController(concurrency=1, loglevel=0,
+                                embed_clockservice=True)
+        self.assertTrue(worker.clockservice)
+        self.assertIn(worker.clockservice, worker.components)
+
     def test_process_task(self):
     def test_process_task(self):
         worker = self.worker
         worker = self.worker
         worker.pool = MockPool()
         worker.pool = MockPool()
@@ -377,7 +509,7 @@ class TestWorkController(unittest.TestCase):
         worker.process_task(task)
         worker.process_task(task)
         worker.pool.stop()
         worker.pool.stop()
 
 
-    def test_start_stop(self):
+    def test_start__stop(self):
         worker = self.worker
         worker = self.worker
         w1 = {"started": False}
         w1 = {"started": False}
         w2 = {"started": False}
         w2 = {"started": False}
@@ -393,3 +525,23 @@ class TestWorkController(unittest.TestCase):
         worker.stop()
         worker.stop()
         for component in worker.components:
         for component in worker.components:
             self.assertTrue(component._stopped)
             self.assertTrue(component._stopped)
+
+    def test_start__terminate(self):
+        worker = self.worker
+        w1 = {"started": False}
+        w2 = {"started": False}
+        w3 = {"started": False}
+        w4 = {"started": False}
+        worker.components = [MockController(w1), MockController(w2),
+                             MockController(w3), MockController(w4),
+                             MockPool()]
+
+        worker.start()
+        for w in (w1, w2, w3, w4):
+            self.assertTrue(w["started"])
+        self.assertTrue(worker._running, len(worker.components))
+        self.assertEqual(worker._state, RUN)
+        worker.terminate()
+        for component in worker.components:
+            self.assertTrue(component._stopped)
+        self.assertTrue(worker.components[4]._terminated)

+ 150 - 5
celery/tests/test_worker_control.py

@@ -1,25 +1,116 @@
 import socket
 import socket
 import unittest2 as unittest
 import unittest2 as unittest
 
 
+from timer2 import Timer
+
+from celery import conf
+from celery.decorators import task
+from celery.registry import tasks
 from celery.task.builtins import PingTask
 from celery.task.builtins import PingTask
 from celery.utils import gen_unique_id
 from celery.utils import gen_unique_id
 from celery.worker import control
 from celery.worker import control
-from celery.worker.revoke import revoked
-from celery.registry import tasks
+from celery.worker.buckets import FastQueue
+from celery.worker.job import TaskRequest
+from celery.worker.state import revoked
 
 
 hostname = socket.gethostname()
 hostname = socket.gethostname()
 
 
 
 
-class TestControlPanel(unittest.TestCase):
+@task(rate_limit=200) # for extra info in dump_tasks
+def mytask():
+    pass
+
+
+class Dispatcher(object):
+    enabled = None
+
+    def __init__(self, *args, **kwargs):
+        self.sent = []
+
+    def enable(self):
+        self.enabled = True
+
+    def disable(self):
+        self.enabled = False
+
+    def send(self, event):
+        self.sent.append(event)
+
+
+class Listener(object):
+
+    def __init__(self):
+        self.ready_queue = FastQueue()
+        self.ready_queue.put(TaskRequest(task_name=mytask.name,
+                                         task_id=gen_unique_id(),
+                                         args=(2, 2),
+                                         kwargs={}))
+        self.eta_schedule = Timer()
+        self.event_dispatcher = Dispatcher()
+
+
+class test_ControlPanel(unittest.TestCase):
 
 
     def setUp(self):
     def setUp(self):
-        self.panel = self.create_panel(listener=object())
+        self.panel = self.create_panel(listener=Listener())
 
 
     def create_panel(self, **kwargs):
     def create_panel(self, **kwargs):
         return control.ControlDispatch(hostname=hostname, **kwargs)
         return control.ControlDispatch(hostname=hostname, **kwargs)
 
 
+    def test_disable_events(self):
+        listener = Listener()
+        panel = self.create_panel(listener=listener)
+        listener.event_dispatcher.enabled = True
+        panel.execute("disable_events")
+        self.assertEqual(listener.event_dispatcher.enabled, False)
+        self.assertIn("worker-offline", listener.event_dispatcher.sent)
+
+    def test_enable_events(self):
+        listener = Listener()
+        panel = self.create_panel(listener=listener)
+        listener.event_dispatcher.enabled = False
+        panel.execute("enable_events")
+        self.assertEqual(listener.event_dispatcher.enabled, True)
+        self.assertIn("worker-online", listener.event_dispatcher.sent)
+
     def test_dump_tasks(self):
     def test_dump_tasks(self):
-        self.panel.execute("dump_tasks")
+        info = "\n".join(self.panel.execute("dump_tasks"))
+        self.assertIn("mytask", info)
+        self.assertIn("rate_limit=200", info)
+
+    def test_dump_schedule(self):
+        listener = Listener()
+        panel = self.create_panel(listener=listener)
+        self.assertFalse(panel.execute("dump_schedule"))
+        import operator
+        listener.eta_schedule.schedule.enter(100, operator.add, (2, 2))
+        self.assertTrue(panel.execute("dump_schedule"))
+
+    def test_dump_reserved(self):
+        listener = Listener()
+        panel = self.create_panel(listener=listener)
+        response = panel.execute("dump_reserved", {"safe": True})
+        self.assertDictContainsSubset({"name": mytask.name,
+                                       "args": (2, 2),
+                                       "kwargs": {},
+                                       "hostname": socket.gethostname()},
+                                       response[0])
+        listener.ready_queue = FastQueue()
+        self.assertFalse(panel.execute("dump_reserved"))
+
+    def test_rate_limit_when_disabled(self):
+        conf.DISABLE_RATE_LIMITS = True
+        try:
+            e = self.panel.execute("rate_limit", kwargs=dict(
+                 task_name=mytask.name, rate_limit="100/m"))
+            self.assertIn("rate limits disabled", e.get("error"))
+        finally:
+            conf.DISABLE_RATE_LIMITS = False
+
+    def test_rate_limit_invalid_rate_limit_string(self):
+        e = self.panel.execute("rate_limit", kwargs=dict(
+            task_name="tasks.add", rate_limit="x1240301#%!"))
+        self.assertIn("Invalid rate limit string", e.get("error"))
 
 
     def test_rate_limit(self):
     def test_rate_limit(self):
 
 
@@ -60,6 +151,24 @@ class TestControlPanel(unittest.TestCase):
     def test_unexposed_command(self):
     def test_unexposed_command(self):
         self.panel.execute("foo", kwargs={})
         self.panel.execute("foo", kwargs={})
 
 
+    def test_revoke_with_name(self):
+        uuid = gen_unique_id()
+        m = {"command": "revoke",
+             "destination": hostname,
+             "task_id": uuid,
+             "task_name": mytask.name}
+        self.panel.dispatch_from_message(m)
+        self.assertIn(uuid, revoked)
+
+    def test_revoke_with_name_not_in_registry(self):
+        uuid = gen_unique_id()
+        m = {"command": "revoke",
+             "destination": hostname,
+             "task_id": uuid,
+             "task_name": "xxxxxxxxx33333333388888"}
+        self.panel.dispatch_from_message(m)
+        self.assertIn(uuid, revoked)
+
     def test_revoke(self):
     def test_revoke(self):
         uuid = gen_unique_id()
         uuid = gen_unique_id()
         m = {"command": "revoke",
         m = {"command": "revoke",
@@ -73,3 +182,39 @@ class TestControlPanel(unittest.TestCase):
              "task_id": uuid + "xxx"}
              "task_id": uuid + "xxx"}
         self.panel.dispatch_from_message(m)
         self.panel.dispatch_from_message(m)
         self.assertNotIn(uuid + "xxx", revoked)
         self.assertNotIn(uuid + "xxx", revoked)
+
+    def test_ping(self):
+        m = {"command": "ping",
+             "destination": hostname}
+        r = self.panel.dispatch_from_message(m)
+        self.assertEqual(r, "pong")
+
+    def test_shutdown(self):
+        m = {"command": "shutdown",
+             "destination": hostname}
+        self.assertRaises(SystemExit, self.panel.dispatch_from_message, m)
+
+    def test_panel_reply(self):
+
+        replies = []
+
+        class MockReplyPublisher(object):
+
+            def __init__(self, *args, **kwargs):
+                pass
+
+            def send(self, reply, **kwargs):
+                replies.append(reply)
+
+            def close(self):
+                pass
+
+        class _Dispatch(control.ControlDispatch):
+            ReplyPublisher = MockReplyPublisher
+
+        panel = _Dispatch(hostname, listener=Listener())
+
+        r = panel.execute("ping", reply_to={"exchange": "x",
+                                            "routing_key": "x"})
+        self.assertEqual(r, "pong")
+        self.assertDictEqual(replies[0], {panel.hostname: "pong"})

+ 7 - 59
celery/tests/test_worker_controllers.py

@@ -1,11 +1,10 @@
-import time
 import unittest2 as unittest
 import unittest2 as unittest
+
 from Queue import Queue
 from Queue import Queue
 
 
 from celery.utils import gen_unique_id
 from celery.utils import gen_unique_id
 from celery.worker.controllers import Mediator
 from celery.worker.controllers import Mediator
-from celery.worker.controllers import BackgroundThread, ScheduleController
-from celery.worker.revoke import revoked as revoked_tasks
+from celery.worker.state import revoked as revoked_tasks
 
 
 
 
 class MockTask(object):
 class MockTask(object):
@@ -26,35 +25,7 @@ class MockTask(object):
         return False
         return False
 
 
 
 
-class MyBackgroundThread(BackgroundThread):
-
-    def on_iteration(self):
-        time.sleep(1)
-
-
-class TestBackgroundThread(unittest.TestCase):
-
-    def test_on_iteration(self):
-        self.assertRaises(NotImplementedError,
-                BackgroundThread().on_iteration)
-
-    def test_run(self):
-        t = MyBackgroundThread()
-        t._shutdown.set()
-        t.run()
-        self.assertTrue(t._stopped.isSet())
-
-    def test_start_stop(self):
-        t = MyBackgroundThread()
-        t.start()
-        self.assertFalse(t._shutdown.isSet())
-        self.assertFalse(t._stopped.isSet())
-        t.stop()
-        self.assertTrue(t._shutdown.isSet())
-        self.assertTrue(t._stopped.isSet())
-
-
-class TestMediator(unittest.TestCase):
+class test_Mediator(unittest.TestCase):
 
 
     def test_mediator_start__stop(self):
     def test_mediator_start__stop(self):
         ready_queue = Queue()
         ready_queue = Queue()
@@ -67,7 +38,7 @@ class TestMediator(unittest.TestCase):
         self.assertTrue(m._shutdown.isSet())
         self.assertTrue(m._shutdown.isSet())
         self.assertTrue(m._stopped.isSet())
         self.assertTrue(m._stopped.isSet())
 
 
-    def test_mediator_on_iteration(self):
+    def test_mediator_move(self):
         ready_queue = Queue()
         ready_queue = Queue()
         got = {}
         got = {}
 
 
@@ -77,11 +48,11 @@ class TestMediator(unittest.TestCase):
         m = Mediator(ready_queue, mycallback)
         m = Mediator(ready_queue, mycallback)
         ready_queue.put(MockTask("George Costanza"))
         ready_queue.put(MockTask("George Costanza"))
 
 
-        m.on_iteration()
+        m.move()
 
 
         self.assertEqual(got["value"], "George Costanza")
         self.assertEqual(got["value"], "George Costanza")
 
 
-    def test_mediator_on_iteration_revoked(self):
+    def test_mediator_move_revoked(self):
         ready_queue = Queue()
         ready_queue = Queue()
         got = {}
         got = {}
 
 
@@ -94,30 +65,7 @@ class TestMediator(unittest.TestCase):
         revoked_tasks.add(t.task_id)
         revoked_tasks.add(t.task_id)
         ready_queue.put(t)
         ready_queue.put(t)
 
 
-        m.on_iteration()
+        m.move()
 
 
         self.assertNotIn("value", got)
         self.assertNotIn("value", got)
         self.assertTrue(t.acked)
         self.assertTrue(t.acked)
-
-
-class TestScheduleController(unittest.TestCase):
-
-    def test_on_iteration(self):
-        times = range(10) + [None]
-        c = ScheduleController(times)
-
-        import time
-        slept = [None]
-
-        def _sleep(count):
-            slept[0] = count
-
-        old_sleep = time.sleep
-        time.sleep = _sleep
-        try:
-            for i in times:
-                c.on_iteration()
-                res = i is None and 1 or i
-                self.assertEqual(slept[0], res)
-        finally:
-            time.sleep = old_sleep

+ 135 - 26
celery/tests/test_worker_job.py

@@ -1,25 +1,29 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
-import sys
 import logging
 import logging
-import unittest2 as unittest
 import simplejson
 import simplejson
+import sys
+import unittest2 as unittest
+
 from StringIO import StringIO
 from StringIO import StringIO
 
 
 from carrot.backends.base import BaseMessage
 from carrot.backends.base import BaseMessage
 
 
 from celery import states
 from celery import states
+from celery.backends import default_backend
+from celery.datastructures import ExceptionInfo
+from celery.decorators import task as task_dec
+from celery.exceptions import RetryTaskError, NotRegistered
 from celery.log import setup_logger
 from celery.log import setup_logger
+from celery.result import AsyncResult
 from celery.task.base import Task
 from celery.task.base import Task
 from celery.utils import gen_unique_id
 from celery.utils import gen_unique_id
-from celery.result import AsyncResult
 from celery.worker.job import WorkerTaskTrace, TaskRequest
 from celery.worker.job import WorkerTaskTrace, TaskRequest
-from celery.backends import default_backend
-from celery.exceptions import RetryTaskError, NotRegistered
-from celery.decorators import task as task_dec
-from celery.datastructures import ExceptionInfo
+from celery.worker.job import execute_and_trace, AlreadyExecutedError
+from celery.worker.job import InvalidTaskError
+from celery.worker.state import revoked
 
 
-from celery.tests.utils import execute_context
 from celery.tests.compat import catch_warnings
 from celery.tests.compat import catch_warnings
+from celery.tests.utils import execute_context
 
 
 scratch = {"ACK": False}
 scratch = {"ACK": False}
 some_kwargs_scratchpad = {}
 some_kwargs_scratchpad = {}
@@ -38,7 +42,7 @@ def mytask(i, **kwargs):
     return i ** i
     return i ** i
 
 
 
 
-@task_dec()
+@task_dec # traverses coverage for decorator without parens
 def mytask_no_kwargs(i):
 def mytask_no_kwargs(i):
     return i ** i
     return i ** i
 
 
@@ -61,7 +65,7 @@ def mytask_raising(i, **kwargs):
     raise KeyError(i)
     raise KeyError(i)
 
 
 
 
-class TestRetryTaskError(unittest.TestCase):
+class test_RetryTaskError(unittest.TestCase):
 
 
     def test_retry_task_error(self):
     def test_retry_task_error(self):
         try:
         try:
@@ -72,12 +76,19 @@ class TestRetryTaskError(unittest.TestCase):
         self.assertEqual(ret.exc, exc)
         self.assertEqual(ret.exc, exc)
 
 
 
 
-class TestJail(unittest.TestCase):
+class test_WorkerTaskTrace(unittest.TestCase):
 
 
     def test_execute_jail_success(self):
     def test_execute_jail_success(self):
         ret = jail(gen_unique_id(), mytask.name, [2], {})
         ret = jail(gen_unique_id(), mytask.name, [2], {})
         self.assertEqual(ret, 4)
         self.assertEqual(ret, 4)
 
 
+    def test_marked_as_started(self):
+        mytask.track_started = True
+        try:
+            jail(gen_unique_id(), mytask.name, [2], {})
+        finally:
+            mytask.track_started = False
+
     def test_execute_jail_failure(self):
     def test_execute_jail_failure(self):
         ret = jail(gen_unique_id(), mytask_raising.name,
         ret = jail(gen_unique_id(), mytask_raising.name,
                    [4], {})
                    [4], {})
@@ -101,7 +112,7 @@ class MockEventDispatcher(object):
         self.sent.append(event)
         self.sent.append(event)
 
 
 
 
-class TestTaskRequest(unittest.TestCase):
+class test_TaskRequest(unittest.TestCase):
 
 
     def test_task_wrapper_repr(self):
     def test_task_wrapper_repr(self):
         tw = TaskRequest(mytask.name, gen_unique_id(), [1], {"f": "x"})
         tw = TaskRequest(mytask.name, gen_unique_id(), [1], {"f": "x"})
@@ -128,8 +139,8 @@ class TestTaskRequest(unittest.TestCase):
         try:
         try:
             tw = TaskRequest(mytask.name, gen_unique_id(), [1], {"f": "x"})
             tw = TaskRequest(mytask.name, gen_unique_id(), [1], {"f": "x"})
             try:
             try:
-                raise KeyError("foo")
-            except KeyError:
+                raise KeyError("moofoobar")
+            except:
                 einfo = ExceptionInfo(sys.exc_info())
                 einfo = ExceptionInfo(sys.exc_info())
 
 
             tw.on_failure(einfo)
             tw.on_failure(einfo)
@@ -144,13 +155,112 @@ class TestTaskRequest(unittest.TestCase):
             job.mail_admins = old_mail_admins
             job.mail_admins = old_mail_admins
             conf.CELERY_SEND_TASK_ERROR_EMAILS = old_enable_mails
             conf.CELERY_SEND_TASK_ERROR_EMAILS = old_enable_mails
 
 
+    def test_already_revoked(self):
+        tw = TaskRequest(mytask.name, gen_unique_id(), [1], {"f": "x"})
+        tw._already_revoked = True
+        self.assertTrue(tw.revoked())
+
+    def test_revoked(self):
+        tw = TaskRequest(mytask.name, gen_unique_id(), [1], {"f": "x"})
+        revoked.add(tw.task_id)
+        self.assertTrue(tw.revoked())
+        self.assertTrue(tw._already_revoked)
+        self.assertTrue(tw.acknowledged)
+
+    def test_execute_does_not_execute_revoked(self):
+        tw = TaskRequest(mytask.name, gen_unique_id(), [1], {"f": "x"})
+        revoked.add(tw.task_id)
+        tw.execute()
+
+    def test_execute_acks_late(self):
+        mytask_raising.acks_late = True
+        tw = TaskRequest(mytask_raising.name, gen_unique_id(), [1], {"f": "x"})
+        try:
+            tw.execute()
+            self.assertTrue(tw.acknowledged)
+        finally:
+            mytask_raising.acks_late = False
+
+    def test_execute_using_pool_does_not_execute_revoked(self):
+        tw = TaskRequest(mytask.name, gen_unique_id(), [1], {"f": "x"})
+        revoked.add(tw.task_id)
+        tw.execute_using_pool(None)
+
+    def test_on_accepted_acks_early(self):
+        tw = TaskRequest(mytask.name, gen_unique_id(), [1], {"f": "x"})
+        tw.on_accepted()
+        self.assertTrue(tw.acknowledged)
+
+    def test_on_accepted_acks_late(self):
+        tw = TaskRequest(mytask.name, gen_unique_id(), [1], {"f": "x"})
+        mytask.acks_late = True
+        try:
+            tw.on_accepted()
+            self.assertFalse(tw.acknowledged)
+        finally:
+            mytask.acks_late = False
+
+    def test_on_success_acks_early(self):
+        tw = TaskRequest(mytask.name, gen_unique_id(), [1], {"f": "x"})
+        tw.time_start = 1
+        tw.on_success(42)
+        self.assertFalse(tw.acknowledged)
+
+    def test_on_success_acks_late(self):
+        tw = TaskRequest(mytask.name, gen_unique_id(), [1], {"f": "x"})
+        tw.time_start = 1
+        mytask.acks_late = True
+        try:
+            tw.on_success(42)
+            self.assertTrue(tw.acknowledged)
+        finally:
+            mytask.acks_late = False
+
+    def test_on_failure_acks_late(self):
+        tw = TaskRequest(mytask.name, gen_unique_id(), [1], {"f": "x"})
+        tw.time_start = 1
+        mytask.acks_late = True
+        try:
+            try:
+                raise KeyError("foo")
+            except KeyError:
+                exc_info = ExceptionInfo(sys.exc_info())
+            tw.on_failure(exc_info)
+            self.assertTrue(tw.acknowledged)
+        finally:
+            mytask.acks_late = False
+
+    def test_from_message_invalid_kwargs(self):
+        message_data = dict(task="foo", id=1, args=(), kwargs="foo")
+        self.assertRaises(InvalidTaskError, TaskRequest.from_message, None,
+                message_data)
+
+    def test_on_timeout(self):
+
+        class MockLogger(object):
+
+            def __init__(self):
+                self.warnings = []
+                self.errors = []
+
+            def warning(self, msg, *args, **kwargs):
+                self.warnings.append(msg)
+
+            def error(self, msg, *args, **kwargs):
+                self.errors.append(msg)
+
+        tw = TaskRequest(mytask.name, gen_unique_id(), [1], {"f": "x"})
+        tw.logger = MockLogger()
+        tw.on_timeout(soft=True)
+        self.assertIn("Soft time limit exceeded", tw.logger.warnings[0])
+        tw.on_timeout(soft=False)
+        self.assertIn("Hard time limit exceeded", tw.logger.errors[0])
+
     def test_execute_and_trace(self):
     def test_execute_and_trace(self):
-        from celery.worker.job import execute_and_trace
         res = execute_and_trace(mytask.name, gen_unique_id(), [4], {})
         res = execute_and_trace(mytask.name, gen_unique_id(), [4], {})
         self.assertEqual(res, 4 ** 4)
         self.assertEqual(res, 4 ** 4)
 
 
     def test_execute_safe_catches_exception(self):
     def test_execute_safe_catches_exception(self):
-        from celery.worker.job import execute_and_trace, WorkerTaskTrace
         old_exec = WorkerTaskTrace.execute
         old_exec = WorkerTaskTrace.execute
 
 
         def _error_exec(self, *args, **kwargs):
         def _error_exec(self, *args, **kwargs):
@@ -192,7 +302,6 @@ class TestTaskRequest(unittest.TestCase):
         self.assertEqual(mytask.backend.get_status(uuid), states.RETRY)
         self.assertEqual(mytask.backend.get_status(uuid), states.RETRY)
 
 
     def test_worker_task_trace_handle_failure(self):
     def test_worker_task_trace_handle_failure(self):
-        from celery.worker.job import WorkerTaskTrace
         uuid = gen_unique_id()
         uuid = gen_unique_id()
         w = WorkerTaskTrace(mytask.name, uuid, [4], {})
         w = WorkerTaskTrace(mytask.name, uuid, [4], {})
         type_, value_, tb_ = self.create_exception(ValueError("foo"))
         type_, value_, tb_ = self.create_exception(ValueError("foo"))
@@ -204,7 +313,6 @@ class TestTaskRequest(unittest.TestCase):
         self.assertEqual(mytask.backend.get_status(uuid), states.FAILURE)
         self.assertEqual(mytask.backend.get_status(uuid), states.FAILURE)
 
 
     def test_executed_bit(self):
     def test_executed_bit(self):
-        from celery.worker.job import AlreadyExecutedError
         tw = TaskRequest(mytask.name, gen_unique_id(), [], {})
         tw = TaskRequest(mytask.name, gen_unique_id(), [], {})
         self.assertFalse(tw.executed)
         self.assertFalse(tw.executed)
         tw._set_executed_bit()
         tw._set_executed_bit()
@@ -217,12 +325,12 @@ class TestTaskRequest(unittest.TestCase):
                               "id": tw.task_id,
                               "id": tw.task_id,
                               "return_value": 10}
                               "return_value": 10}
         self.assertTrue(x)
         self.assertTrue(x)
-        x = tw.fail_msg % {"name": tw.task_name,
+        x = tw.error_msg % {"name": tw.task_name,
                            "id": tw.task_id,
                            "id": tw.task_id,
                            "exc": "FOOBARBAZ",
                            "exc": "FOOBARBAZ",
                            "traceback": "foobarbaz"}
                            "traceback": "foobarbaz"}
         self.assertTrue(x)
         self.assertTrue(x)
-        x = tw.fail_email_subject % {"name": tw.task_name,
+        x = tw.email_subject % {"name": tw.task_name,
                                      "id": tw.task_id,
                                      "id": tw.task_id,
                                      "exc": "FOOBARBAZ",
                                      "exc": "FOOBARBAZ",
                                      "hostname": "lana"}
                                      "hostname": "lana"}
@@ -257,7 +365,7 @@ class TestTaskRequest(unittest.TestCase):
         tid = gen_unique_id()
         tid = gen_unique_id()
         tw = TaskRequest(mytask.name, tid, [4], {"f": "x"})
         tw = TaskRequest(mytask.name, tid, [4], {"f": "x"})
         self.assertEqual(tw.execute(), 256)
         self.assertEqual(tw.execute(), 256)
-        meta = default_backend._get_task_meta_for(tid)
+        meta = default_backend.get_task_meta(tid)
         self.assertEqual(meta["result"], 256)
         self.assertEqual(meta["result"], 256)
         self.assertEqual(meta["status"], states.SUCCESS)
         self.assertEqual(meta["status"], states.SUCCESS)
 
 
@@ -265,7 +373,7 @@ class TestTaskRequest(unittest.TestCase):
         tid = gen_unique_id()
         tid = gen_unique_id()
         tw = TaskRequest(mytask_no_kwargs.name, tid, [4], {})
         tw = TaskRequest(mytask_no_kwargs.name, tid, [4], {})
         self.assertEqual(tw.execute(), 256)
         self.assertEqual(tw.execute(), 256)
-        meta = default_backend._get_task_meta_for(tid)
+        meta = default_backend.get_task_meta(tid)
         self.assertEqual(meta["result"], 256)
         self.assertEqual(meta["result"], 256)
         self.assertEqual(meta["status"], states.SUCCESS)
         self.assertEqual(meta["status"], states.SUCCESS)
 
 
@@ -273,7 +381,7 @@ class TestTaskRequest(unittest.TestCase):
         tid = gen_unique_id()
         tid = gen_unique_id()
         tw = TaskRequest(mytask_some_kwargs.name, tid, [4], {})
         tw = TaskRequest(mytask_some_kwargs.name, tid, [4], {})
         self.assertEqual(tw.execute(logfile="foobaz.log"), 256)
         self.assertEqual(tw.execute(logfile="foobaz.log"), 256)
-        meta = default_backend._get_task_meta_for(tid)
+        meta = default_backend.get_task_meta(tid)
         self.assertEqual(some_kwargs_scratchpad.get("logfile"), "foobaz.log")
         self.assertEqual(some_kwargs_scratchpad.get("logfile"), "foobaz.log")
         self.assertEqual(meta["result"], 256)
         self.assertEqual(meta["result"], 256)
         self.assertEqual(meta["status"], states.SUCCESS)
         self.assertEqual(meta["status"], states.SUCCESS)
@@ -283,7 +391,7 @@ class TestTaskRequest(unittest.TestCase):
         tw = TaskRequest(mytask.name, tid, [4], {"f": "x"},
         tw = TaskRequest(mytask.name, tid, [4], {"f": "x"},
                         on_ack=on_ack)
                         on_ack=on_ack)
         self.assertEqual(tw.execute(), 256)
         self.assertEqual(tw.execute(), 256)
-        meta = default_backend._get_task_meta_for(tid)
+        meta = default_backend.get_task_meta(tid)
         self.assertTrue(scratch["ACK"])
         self.assertTrue(scratch["ACK"])
         self.assertEqual(meta["result"], 256)
         self.assertEqual(meta["result"], 256)
         self.assertEqual(meta["status"], states.SUCCESS)
         self.assertEqual(meta["status"], states.SUCCESS)
@@ -292,7 +400,7 @@ class TestTaskRequest(unittest.TestCase):
         tid = gen_unique_id()
         tid = gen_unique_id()
         tw = TaskRequest(mytask_raising.name, tid, [4], {"f": "x"})
         tw = TaskRequest(mytask_raising.name, tid, [4], {"f": "x"})
         self.assertIsInstance(tw.execute(), ExceptionInfo)
         self.assertIsInstance(tw.execute(), ExceptionInfo)
-        meta = default_backend._get_task_meta_for(tid)
+        meta = default_backend.get_task_meta(tid)
         self.assertEqual(meta["status"], states.FAILURE)
         self.assertEqual(meta["status"], states.FAILURE)
         self.assertIsInstance(meta["result"], KeyError)
         self.assertIsInstance(meta["result"], KeyError)
 
 
@@ -347,7 +455,8 @@ class TestTaskRequest(unittest.TestCase):
 
 
         logfh = StringIO()
         logfh = StringIO()
         tw.logger.handlers = []
         tw.logger.handlers = []
-        tw.logger = setup_logger(logfile=logfh, loglevel=logging.INFO)
+        tw.logger = setup_logger(logfile=logfh, loglevel=logging.INFO,
+                                 root=False)
 
 
         from celery import conf
         from celery import conf
         conf.CELERY_SEND_TASK_ERROR_EMAILS = True
         conf.CELERY_SEND_TASK_ERROR_EMAILS = True

+ 5 - 5
celery/tests/test_worker_revoke.py

@@ -1,12 +1,12 @@
 import unittest2 as unittest
 import unittest2 as unittest
 
 
-from celery.worker import revoke
+from celery.worker import state
 
 
 
 
 class TestRevokeRegistry(unittest.TestCase):
 class TestRevokeRegistry(unittest.TestCase):
 
 
     def test_is_working(self):
     def test_is_working(self):
-        revoke.revoked.add("foo")
-        self.assertIn("foo", revoke.revoked)
-        revoke.revoked.pop_value("foo")
-        self.assertNotIn("foo", revoke.revoked)
+        state.revoked.add("foo")
+        self.assertIn("foo", state.revoked)
+        state.revoked.pop_value("foo")
+        self.assertNotIn("foo", state.revoked)

+ 0 - 58
celery/tests/test_worker_scheduler.py

@@ -1,58 +0,0 @@
-from __future__ import generators
-
-import unittest2 as unittest
-from Queue import Queue, Empty
-from datetime import datetime, timedelta
-
-from celery.worker.scheduler import Scheduler
-
-
-class MockItem(object):
-    is_revoked = False
-
-    def __init__(self, value):
-        self.task_id = value
-
-    def revoked(self):
-        return self.is_revoked
-
-
-class TestScheduler(unittest.TestCase):
-
-    def test_sched_and_run_now(self):
-        ready_queue = Queue()
-        sched = Scheduler(ready_queue)
-        now = datetime.now()
-
-        callback_called = [False]
-        def callback():
-            callback_called[0] = True
-
-        sched.enter(MockItem("foo"), eta=now, callback=callback)
-
-        remaining = iter(sched).next()
-        self.assertEqual(remaining, 0)
-        self.assertTrue(callback_called[0])
-        self.assertEqual(ready_queue.get_nowait().task_id, "foo")
-
-    def test_sched_run_later(self):
-        ready_queue = Queue()
-        sched = Scheduler(ready_queue)
-        now = datetime.now()
-
-        callback_called = [False]
-        def callback():
-            callback_called[0] = True
-
-        eta = now + timedelta(seconds=10)
-        sched.enter(MockItem("foo"), eta=eta, callback=callback)
-
-        remaining = iter(sched).next()
-        self.assertTrue(remaining > 7 or remaining == sched.max_interval)
-        self.assertFalse(callback_called[0])
-        self.assertRaises(Empty, ready_queue.get_nowait)
-
-    def test_empty_queue_yields_None(self):
-        ready_queue = Queue()
-        sched = Scheduler(ready_queue)
-        self.assertIsNone(iter(sched).next())

+ 126 - 4
celery/utils/__init__.py

@@ -1,9 +1,5 @@
 from __future__ import generators
 from __future__ import generators
-"""
 
 
-Utility functions
-
-"""
 import time
 import time
 import operator
 import operator
 try:
 try:
@@ -11,17 +7,90 @@ try:
 except ImportError:
 except ImportError:
     ctypes = None
     ctypes = None
 import importlib
 import importlib
+from datetime import datetime
 from uuid import UUID, uuid4, _uuid_generate_random
 from uuid import UUID, uuid4, _uuid_generate_random
 from inspect import getargspec
 from inspect import getargspec
 from itertools import islice
 from itertools import islice
 
 
 from carrot.utils import rpartition
 from carrot.utils import rpartition
+from dateutil.parser import parse as parse_iso8601
 
 
 from celery.utils.compat import all, any, defaultdict
 from celery.utils.compat import all, any, defaultdict
 from celery.utils.timeutils import timedelta_seconds # was here before
 from celery.utils.timeutils import timedelta_seconds # was here before
 from celery.utils.functional import curry
 from celery.utils.functional import curry
 
 
 
 
+class promise(object):
+    """A promise.
+
+    Evaluated when called or if the :meth:`evaluate` method is called.
+    The function is evaluated on every access, so the value is not
+    memoized (see :class:`mpromise`).
+
+    Overloaded operations that will evaluate the promise:
+        :meth:`__str__`, :meth:`__repr__`, :meth:`__cmp__`.
+
+    """
+
+    def __init__(self, fun, *args, **kwargs):
+        self._fun = fun
+        self._args = args
+        self._kwargs = kwargs
+
+    def __call__(self):
+        return self.evaluate()
+
+    def evaluate(self):
+        return self._fun(*self._args, **self._kwargs)
+
+    def __str__(self):
+        return str(self())
+
+    def __repr__(self):
+        return repr(self())
+
+    def __cmp__(self, rhs):
+        if isinstance(rhs, self.__class__):
+            return -cmp(rhs, self())
+        return cmp(self(), rhs)
+
+    def __deepcopy__(self, memo):
+        memo[id(self)] = self
+        return self
+
+    def __reduce__(self):
+        return (self.__class__, (self._fun, ), {"_args": self._args,
+                                                "_kwargs": self._kwargs})
+
+
+class mpromise(promise):
+    """Memoized promise.
+
+    The function is only evaluated once, every subsequent access
+    will return the same value.
+
+    .. attribute:: evaluated
+
+        Set to to :const:`True` after the promise has been evaluated.
+
+    """
+    evaluated = False
+    _value = None
+
+    def evaluate(self):
+        if not self.evaluated:
+            self._value = super(mpromise, self).evaluate()
+            self.evaluated = True
+        return self._value
+
+
+def maybe_promise(value):
+    """Evaluates if the value is a promise."""
+    if isinstance(value, promise):
+        return value.evaluate()
+    return value
+
+
 def noop(*args, **kwargs):
 def noop(*args, **kwargs):
     """No operation.
     """No operation.
 
 
@@ -31,6 +100,15 @@ def noop(*args, **kwargs):
     pass
     pass
 
 
 
 
+def maybe_iso8601(dt):
+    """``Either datetime | str -> datetime or None -> None``"""
+    if not dt:
+        return
+    if isinstance(dt, datetime):
+        return dt
+    return parse_iso8601(dt)
+
+
 def kwdict(kwargs):
 def kwdict(kwargs):
     """Make sure keyword arguments are not in unicode.
     """Make sure keyword arguments are not in unicode.
 
 
@@ -50,6 +128,25 @@ def first(predicate, iterable):
             return item
             return item
 
 
 
 
+def firstmethod(method):
+    """Returns a functions that with a list of instances,
+    finds the first instance that returns a value for the given method.
+
+    The list can also contain promises (:class:`promise`.)
+
+    """
+
+    def _matcher(seq, *args, **kwargs):
+        for cls in seq:
+            try:
+                answer = getattr(maybe_promise(cls), method)(*args, **kwargs)
+                if answer is not None:
+                    return answer
+            except AttributeError:
+                pass
+    return _matcher
+
+
 def chunks(it, n):
 def chunks(it, n):
     """Split an iterator into chunks with ``n`` elements each.
     """Split an iterator into chunks with ``n`` elements each.
 
 
@@ -256,3 +353,28 @@ def instantiate(name, *args, **kwargs):
 
 
     """
     """
     return get_cls_by_name(name)(*args, **kwargs)
     return get_cls_by_name(name)(*args, **kwargs)
+
+
+def truncate_text(text, maxlen=128, suffix="..."):
+    """Truncates text to a maximum number of characters."""
+    if len(text) >= maxlen:
+        return text[:maxlen].rsplit(" ", 1)[0] + suffix
+    return text
+
+
+def abbr(S, max, ellipsis="..."):
+    if S is None:
+        return "???"
+    if len(S) > max:
+        return ellipsis and (S[:max-len(ellipsis)] + ellipsis) or S[:max]
+    return S
+
+
+def abbrtask(S, max):
+    if S is None:
+        return "???"
+    if len(S) > max:
+        module, _, cls = rpartition(S, ".")
+        module = abbr(module, max - len(cls), False)
+        return module + "[.]" + cls
+    return S

+ 92 - 31
celery/utils/compat.py

@@ -290,48 +290,95 @@ except ImportError:
     collections.defaultdict = defaultdict # Pickle needs this.
     collections.defaultdict = defaultdict # Pickle needs this.
 
 
 ############## logging.LoggerAdapter ########################################
 ############## logging.LoggerAdapter ########################################
+import logging
+import multiprocessing
+import sys
 
 
-try:
-    from logging import LoggerAdapter
-except ImportError:
-    class LoggerAdapter(object):
+class _CompatLoggerAdapter(object):
 
 
-        def __init__(self, logger, extra):
-            self.logger = logger
-            self.extra = extra
+    def __init__(self, logger, extra):
+        self.logger = logger
+        self.extra = extra
 
 
-        def process(self, msg, kwargs):
-            kwargs["extra"] = self.extra
-            return msg, kwargs
+    def setLevel(self, level):
+        self.logger.level = logging._checkLevel(level)
 
 
-        def debug(self, msg, *args, **kwargs):
-            msg, kwargs = self.process(msg, kwargs)
-            self.logger.debug(msg, *args, **kwargs)
+    def process(self, msg, kwargs):
+        kwargs["extra"] = self.extra
+        return msg, kwargs
 
 
-        def info(self, msg, *args, **kwargs):
-            msg, kwargs = self.process(msg, kwargs)
-            self.logger.info(msg, *args, **kwargs)
+    def debug(self, msg, *args, **kwargs):
+        self.log(logging.DEBUG, msg, args, **kwargs)
 
 
-        def warning(self, msg, *args, **kwargs):
-            msg, kwargs = self.process(msg, kwargs)
-            self.logger.warning(msg, *args, **kwargs)
+    def info(self, msg, *args, **kwargs):
+        self.log(logging.INFO, msg, args, **kwargs)
 
 
-        def error(self, msg, *args, **kwargs):
-            msg, kwargs = self.process(msg, kwargs)
-            self.logger.error(msg, *args, **kwargs)
+    def warning(self, msg, *args, **kwargs):
+        self.log(logging.WARNING, msg, args, **kwargs)
+    warn = warning
 
 
-        def exception(self, msg, *args, **kwargs):
-            msg, kwargs = self.process(msg, kwargs)
-            kwargs["exc_info"] = 1
-            self.logger.error(msg, *args, **kwargs)
+    def error(self, msg, *args, **kwargs):
+        self.log(logging.ERROR, msg, args, **kwargs)
 
 
-        def critical(self, msg, *args, **kwargs):
-            msg, kwargs = self.process(msg, kwargs)
-            self.logger.critical(msg, *args, **kwargs)
+    def exception(self, msg, *args, **kwargs):
+        kwargs.setdefault("exc_info", 1)
+        self.error(msg, *args, **kwargs)
+
+    def critical(self, msg, *args, **kwargs):
+        self.log(logging.CRITICAL, msg, args, **kwargs)
+    fatal = critical
 
 
-        def log(self, level, msg, *args, **kwargs):
+    def log(self, level, msg, args, **kwargs):
+        if self.logger.isEnabledFor(level):
             msg, kwargs = self.process(msg, kwargs)
             msg, kwargs = self.process(msg, kwargs)
-            self.logger.log(level, msg, *args, **kwargs)
+            self._log(level, msg, args, **kwargs)
+
+    def makeRecord(self, name, level, fn, lno, msg, args, exc_info, 
+            func=None, extra=None):
+        rv = logging.LogRecord(name, level, fn, lno,
+                               msg, args, exc_info, func)
+        if extra is not None:
+            for key, value in extra.items():
+                if key in ("message", "asctime") or key in rv.__dict__:
+                    raise KeyError(
+                            "Attempt to override %r in LogRecord" % key)
+                rv.__dict__[key] = value
+        rv.processName = multiprocessing.current_process()._name
+        return rv
+
+    def _log(self, level, msg, args, exc_info=None, extra=None):
+        defcaller = "(unknown file)", 0, "(unknown function)"
+        if logging._srcfile:
+            # IronPython doesn't track Python frames, so findCaller
+            # throws an exception on some versions of IronPython.
+            # We trap it here so that IronPython can use logging.
+            try:
+                fn, lno, func = self.logger.findCaller()
+            except ValueError:
+                fn, lno, func = defcaller
+        else:
+            fn, lno, func = defcaller
+        if exc_info:
+            if not isinstance(exc_info, tuple):
+                exc_info = sys.exc_info()
+        record = self.makeRecord(self.logger.name, level, fn, lno, msg,
+                                    args, exc_info, func, extra)
+        self.logger.handle(record)
+
+    def isEnabledFor(self, level):
+        return self.logger.isEnabledFor(level)
+
+    def addHandler(self, hdlr):
+        self.logger.addHandler(hdlr)
+
+    def removeHandler(self, hdlr):
+        self.logger.removeHandler(hdlr)
+
+
+try:
+    from logging import LoggerAdapter
+except ImportError:
+    LoggerAdapter = _CompatLoggerAdapter
 
 
 ############## itertools.izip_longest #######################################
 ############## itertools.izip_longest #######################################
 
 
@@ -353,3 +400,17 @@ except ImportError:
                 yield tup
                 yield tup
         except IndexError:
         except IndexError:
             pass
             pass
+
+############## itertools.chain.from_iterable ################################
+from itertools import chain
+
+
+def _compat_chain_from_iterable(iterables):
+    for it in iterables:
+        for element in it:
+            yield element
+
+try:
+    chain_from_iterable = getattr(chain, "from_iterable")
+except AttributeError:
+    chain_from_iterable = _compat_chain_from_iterable

+ 6 - 8
celery/utils/info.py

@@ -1,9 +1,8 @@
 import math
 import math
 
 
-from celery import conf
 from celery.messaging import establish_connection
 from celery.messaging import establish_connection
 
 
-ROUTE_FORMAT = """
+QUEUE_FORMAT = """
 . %(name)s -> exchange:%(exchange)s (%(exchange_type)s) \
 . %(name)s -> exchange:%(exchange)s (%(exchange_type)s) \
 binding:%(binding_key)s
 binding:%(binding_key)s
 """
 """
@@ -31,13 +30,12 @@ def textindent(t, indent=0):
     return "\n".join(" " * indent + p for p in t.split("\n"))
     return "\n".join(" " * indent + p for p in t.split("\n"))
 
 
 
 
-def format_routing_table(table=None, indent=0):
+def format_queues(queues, indent=0):
     """Format routing table into string for log dumps."""
     """Format routing table into string for log dumps."""
-    table = table or conf.get_routing_table()
-    format = lambda **route: ROUTE_FORMAT.strip() % route
-    routes = "\n".join(format(name=name, **route)
-                            for name, route in table.items())
-    return textindent(routes, indent=indent)
+    format = lambda **queue: QUEUE_FORMAT.strip() % queue
+    info = "\n".join(format(name=name, **config)
+                            for name, config in queues.items())
+    return textindent(info, indent=indent)
 
 
 
 
 def get_broker_info():
 def get_broker_info():

+ 8 - 15
celery/utils/timeutils.py

@@ -1,4 +1,4 @@
-from datetime import datetime
+from datetime import datetime, timedelta
 
 
 from carrot.utils import partition
 from carrot.utils import partition
 
 
@@ -9,6 +9,8 @@ RATE_MODIFIER_MAP = {"s": lambda n: n,
                      "m": lambda n: n / 60.0,
                      "m": lambda n: n / 60.0,
                      "h": lambda n: n / 60.0 / 60.0}
                      "h": lambda n: n / 60.0 / 60.0}
 
 
+HAVE_TIMEDELTA_TOTAL_SECONDS = hasattr(timedelta, "total_seconds")
+
 
 
 def timedelta_seconds(delta):
 def timedelta_seconds(delta):
     """Convert :class:`datetime.timedelta` to seconds.
     """Convert :class:`datetime.timedelta` to seconds.
@@ -16,6 +18,9 @@ def timedelta_seconds(delta):
     Doesn't account for negative values.
     Doesn't account for negative values.
 
 
     """
     """
+    if HAVE_TIMEDELTA_TOTAL_SECONDS:
+        # Should return 0 for negative seconds
+        return max(delta.total_seconds(), 0)
     if delta.days < 0:
     if delta.days < 0:
         return 0
         return 0
     return delta.days * 86400 + delta.seconds + (delta.microseconds / 10e5)
     return delta.days * 86400 + delta.seconds + (delta.microseconds / 10e5)
@@ -56,20 +61,6 @@ def remaining(start, ends_in, now=None, relative=True):
           of ``ends_in``).
           of ``ends_in``).
     :keyword now: The current time, defaults to :func:`datetime.now`.
     :keyword now: The current time, defaults to :func:`datetime.now`.
 
 
-    Examples::
-
-        >>> remaining(datetime.now(), ends_in=timedelta(seconds=30))
-        '0:0:29.999948'
-
-        >>> str(remaining(datetime.now() - timedelta(minutes=29),
-                ends_in=timedelta(hours=2)))
-        '1:30:59.999938'
-
-        >>> str(remaining(datetime.now() - timedelta(minutes=29),
-                ends_in=timedelta(hours=2),
-                relative=False))
-        '1:11:18.458437'
-
     """
     """
     now = now or datetime.now()
     now = now or datetime.now()
 
 
@@ -93,6 +84,8 @@ def rate(rate):
 def weekday(name):
 def weekday(name):
     """Return the position of a weekday (0 - 7, where 0 is Sunday).
     """Return the position of a weekday (0 - 7, where 0 is Sunday).
 
 
+    Example::
+
         >>> weekday("sunday"), weekday("sun"), weekday("mon")
         >>> weekday("sunday"), weekday("sun"), weekday("mon")
         (0, 0, 1)
         (0, 0, 1)
 
 

+ 43 - 24
celery/worker/__init__.py

@@ -8,21 +8,30 @@ import logging
 import traceback
 import traceback
 from multiprocessing.util import Finalize
 from multiprocessing.util import Finalize
 
 
+from timer2 import Timer
+
 from celery import conf
 from celery import conf
+from celery import log
 from celery import registry
 from celery import registry
 from celery import platform
 from celery import platform
 from celery import signals
 from celery import signals
-from celery.log import setup_logger, _hijack_multiprocessing_logger
+from celery.log import setup_logger
 from celery.beat import EmbeddedClockService
 from celery.beat import EmbeddedClockService
 from celery.utils import noop, instantiate
 from celery.utils import noop, instantiate
 
 
+from celery.worker import state
 from celery.worker.buckets import TaskBucket, FastQueue
 from celery.worker.buckets import TaskBucket, FastQueue
-from celery.worker.scheduler import Scheduler
 
 
 RUN = 0x1
 RUN = 0x1
 CLOSE = 0x2
 CLOSE = 0x2
 TERMINATE = 0x3
 TERMINATE = 0x3
 
 
+WORKER_SIGRESET = frozenset(["SIGTERM",
+                             "SIGHUP",
+                             "SIGTTIN",
+                             "SIGTTOU"])
+WORKER_SIGIGNORE = frozenset(["SIGINT"])
+
 
 
 def process_initializer():
 def process_initializer():
     """Initializes the process so it can be used to process tasks.
     """Initializes the process so it can be used to process tasks.
@@ -30,13 +39,8 @@ def process_initializer():
     Used for multiprocessing environments.
     Used for multiprocessing environments.
 
 
     """
     """
-    # There seems to a bug in multiprocessing (backport?)
-    # when detached, where the worker gets EOFErrors from time to time
-    # and the logger is left from the parent process causing a crash.
-    _hijack_multiprocessing_logger()
-
-    platform.reset_signal("SIGTERM")
-    platform.ignore_signal("SIGINT")
+    map(platform.reset_signal, WORKER_SIGRESET)
+    map(platform.ignore_signal, WORKER_SIGIGNORE)
     platform.set_mp_process_title("celeryd")
     platform.set_mp_process_title("celeryd")
 
 
     # This is for windows and other platforms not supporting
     # This is for windows and other platforms not supporting
@@ -60,7 +64,7 @@ class WorkController(object):
     .. attribute:: concurrency
     .. attribute:: concurrency
 
 
         The number of simultaneous processes doing work (default:
         The number of simultaneous processes doing work (default:
-        :const:`celery.conf.CELERYD_CONCURRENCY`)
+        ``conf.CELERYD_CONCURRENCY``)
 
 
     .. attribute:: loglevel
     .. attribute:: loglevel
 
 
@@ -69,7 +73,7 @@ class WorkController(object):
     .. attribute:: logfile
     .. attribute:: logfile
 
 
         The logfile used, if no logfile is specified it uses ``stderr``
         The logfile used, if no logfile is specified it uses ``stderr``
-        (default: :const:`celery.conf.CELERYD_LOG_FILE`).
+        (default: `celery.conf.CELERYD_LOG_FILE`).
 
 
     .. attribute:: embed_clockservice
     .. attribute:: embed_clockservice
 
 
@@ -123,7 +127,8 @@ class WorkController(object):
             task_time_limit=conf.CELERYD_TASK_TIME_LIMIT,
             task_time_limit=conf.CELERYD_TASK_TIME_LIMIT,
             task_soft_time_limit=conf.CELERYD_TASK_SOFT_TIME_LIMIT,
             task_soft_time_limit=conf.CELERYD_TASK_SOFT_TIME_LIMIT,
             max_tasks_per_child=conf.CELERYD_MAX_TASKS_PER_CHILD,
             max_tasks_per_child=conf.CELERYD_MAX_TASKS_PER_CHILD,
-            pool_putlocks=conf.CELERYD_POOL_PUTLOCKS):
+            pool_putlocks=conf.CELERYD_POOL_PUTLOCKS,
+            db=conf.CELERYD_STATE_DB):
 
 
         # Options
         # Options
         self.loglevel = loglevel or self.loglevel
         self.loglevel = loglevel or self.loglevel
@@ -138,14 +143,20 @@ class WorkController(object):
         self.task_soft_time_limit = task_soft_time_limit
         self.task_soft_time_limit = task_soft_time_limit
         self.max_tasks_per_child = max_tasks_per_child
         self.max_tasks_per_child = max_tasks_per_child
         self.pool_putlocks = pool_putlocks
         self.pool_putlocks = pool_putlocks
+        self.timer_debug = log.SilenceRepeated(self.logger.debug,
+                                               max_iterations=10)
+        self.db = db
         self._finalize = Finalize(self, self.stop, exitpriority=1)
         self._finalize = Finalize(self, self.stop, exitpriority=1)
 
 
+        if self.db:
+            persistence = state.Persistent(self.db)
+            Finalize(persistence, persistence.save, exitpriority=5)
+
         # Queues
         # Queues
         if conf.DISABLE_RATE_LIMITS:
         if conf.DISABLE_RATE_LIMITS:
             self.ready_queue = FastQueue()
             self.ready_queue = FastQueue()
         else:
         else:
             self.ready_queue = TaskBucket(task_registry=registry.tasks)
             self.ready_queue = TaskBucket(task_registry=registry.tasks)
-        self.eta_schedule = Scheduler(self.ready_queue, logger=self.logger)
 
 
         self.logger.debug("Instantiating thread components...")
         self.logger.debug("Instantiating thread components...")
 
 
@@ -161,7 +172,9 @@ class WorkController(object):
                                     callback=self.process_task,
                                     callback=self.process_task,
                                     logger=self.logger)
                                     logger=self.logger)
         self.scheduler = instantiate(eta_scheduler_cls,
         self.scheduler = instantiate(eta_scheduler_cls,
-                                     self.eta_schedule, logger=self.logger)
+                               precision=conf.CELERYD_ETA_SCHEDULER_PRECISION,
+                               on_error=self.on_timer_error,
+                               on_tick=self.on_timer_tick)
 
 
         self.clockservice = None
         self.clockservice = None
         if self.embed_clockservice:
         if self.embed_clockservice:
@@ -171,12 +184,13 @@ class WorkController(object):
         prefetch_count = self.concurrency * conf.CELERYD_PREFETCH_MULTIPLIER
         prefetch_count = self.concurrency * conf.CELERYD_PREFETCH_MULTIPLIER
         self.listener = instantiate(listener_cls,
         self.listener = instantiate(listener_cls,
                                     self.ready_queue,
                                     self.ready_queue,
-                                    self.eta_schedule,
+                                    self.scheduler,
                                     logger=self.logger,
                                     logger=self.logger,
                                     hostname=self.hostname,
                                     hostname=self.hostname,
                                     send_events=self.send_events,
                                     send_events=self.send_events,
                                     init_callback=self.ready_callback,
                                     init_callback=self.ready_callback,
-                                    initial_prefetch_count=prefetch_count)
+                                    initial_prefetch_count=prefetch_count,
+                                    pool=self.pool)
 
 
         # The order is important here;
         # The order is important here;
         #   the first in the list is the first to start,
         #   the first in the list is the first to start,
@@ -191,14 +205,11 @@ class WorkController(object):
         """Starts the workers main loop."""
         """Starts the workers main loop."""
         self._state = RUN
         self._state = RUN
 
 
-        try:
-            for i, component in enumerate(self.components):
-                self.logger.debug("Starting thread %s..." % \
-                        component.__class__.__name__)
-                self._running = i + 1
-                component.start()
-        finally:
-            self.stop()
+        for i, component in enumerate(self.components):
+            self.logger.debug("Starting thread %s..." % (
+                                    component.__class__.__name__))
+            self._running = i + 1
+            component.start()
 
 
     def process_task(self, wrapper):
     def process_task(self, wrapper):
         """Process task by sending it to the pool of workers."""
         """Process task by sending it to the pool of workers."""
@@ -241,3 +252,11 @@ class WorkController(object):
 
 
         self.listener.close_connection()
         self.listener.close_connection()
         self._state = TERMINATE
         self._state = TERMINATE
+
+    def on_timer_error(self, exc_info):
+        _, exc, _ = exc_info
+        self.logger.error("Timer error: %r" % (exc, ))
+
+    def on_timer_tick(self, delay):
+        self.timer_debug("Scheduler wake-up! Next eta %s secs." % delay)
+

+ 10 - 46
celery/worker/buckets.py

@@ -1,12 +1,12 @@
 import time
 import time
 
 
 from collections import deque
 from collections import deque
-from itertools import chain
 from Queue import Queue, Empty as QueueEmpty
 from Queue import Queue, Empty as QueueEmpty
 
 
+from celery.datastructures import TokenBucket
 from celery.utils import all
 from celery.utils import all
 from celery.utils import timeutils
 from celery.utils import timeutils
-from celery.utils.compat import izip_longest
+from celery.utils.compat import izip_longest, chain_from_iterable
 
 
 
 
 class RateLimitExceeded(Exception):
 class RateLimitExceeded(Exception):
@@ -143,6 +143,7 @@ class TaskBucket(object):
         task_type = self.task_registry[task_name]
         task_type = self.task_registry[task_name]
         rate_limit = getattr(task_type, "rate_limit", None)
         rate_limit = getattr(task_type, "rate_limit", None)
         rate_limit = timeutils.rate(rate_limit)
         rate_limit = timeutils.rate(rate_limit)
+        task_queue = FastQueue()
         if task_name in self.buckets:
         if task_name in self.buckets:
             task_queue = self._get_queue_for_type(task_name)
             task_queue = self._get_queue_for_type(task_name)
         else:
         else:
@@ -180,7 +181,7 @@ class TaskBucket(object):
     def items(self):
     def items(self):
         # for queues with contents [(1, 2), (3, 4), (5, 6), (7, 8)]
         # for queues with contents [(1, 2), (3, 4), (5, 6), (7, 8)]
         # zips and flattens to [1, 3, 5, 7, 2, 4, 6, 8]
         # zips and flattens to [1, 3, 5, 7, 2, 4, 6, 8]
-        return filter(None, chain.from_iterable(izip_longest(*[bucket.items
+        return filter(None, chain_from_iterable(izip_longest(*[bucket.items
                                     for bucket in self.buckets.values()])))
                                     for bucket in self.buckets.values()])))
 
 
 
 
@@ -194,9 +195,6 @@ class FastQueue(Queue):
     def expected_time(self, tokens=1):
     def expected_time(self, tokens=1):
         return 0
         return 0
 
 
-    def can_consume(self, tokens=1):
-        return True
-
     def wait(self, block=True):
     def wait(self, block=True):
         return self.get(block=block)
         return self.get(block=block)
 
 
@@ -210,36 +208,19 @@ class TokenBucketQueue(object):
 
 
     This uses the token bucket algorithm to rate limit the queue on get
     This uses the token bucket algorithm to rate limit the queue on get
     operations.
     operations.
-    See http://en.wikipedia.org/wiki/Token_Bucket
-    Most of this code was stolen from an entry in the ASPN Python Cookbook:
-    http://code.activestate.com/recipes/511490/
-
-    :param fill_rate: see :attr:`fill_rate`.
-    :keyword capacity: see :attr:`capacity`.
-
-    .. attribute:: fill_rate
-
-        The rate in tokens/second that the bucket will be refilled.
 
 
-    .. attribute:: capacity
-
-        Maximum number of tokens in the bucket. Default is ``1``.
-
-    .. attribute:: timestamp
-
-        Timestamp of the last time a token was taken out of the bucket.
+    :param fill_rate: The rate in tokens/second that the bucket will
+      be refilled.
+    :keyword capacity: Maximum number of tokens in the bucket. Default is 1.
 
 
     """
     """
     RateLimitExceeded = RateLimitExceeded
     RateLimitExceeded = RateLimitExceeded
 
 
     def __init__(self, fill_rate, queue=None, capacity=1):
     def __init__(self, fill_rate, queue=None, capacity=1):
-        self.capacity = float(capacity)
-        self._tokens = self.capacity
+        self._bucket = TokenBucket(fill_rate, capacity)
         self.queue = queue
         self.queue = queue
         if not self.queue:
         if not self.queue:
             self.queue = Queue()
             self.queue = Queue()
-        self.fill_rate = float(fill_rate)
-        self.timestamp = time.time()
 
 
     def put(self, item, block=True):
     def put(self, item, block=True):
         """Put an item into the queue.
         """Put an item into the queue.
@@ -271,7 +252,7 @@ class TokenBucketQueue(object):
         """
         """
         get = block and self.queue.get or self.queue.get_nowait
         get = block and self.queue.get or self.queue.get_nowait
 
 
-        if not self.can_consume(1):
+        if not self._bucket.can_consume(1):
             raise RateLimitExceeded()
             raise RateLimitExceeded()
 
 
         return get()
         return get()
@@ -311,27 +292,10 @@ class TokenBucketQueue(object):
                 return self.get(block=block)
                 return self.get(block=block)
             time.sleep(remaining)
             time.sleep(remaining)
 
 
-    def can_consume(self, tokens=1):
-        """Consume tokens from the bucket. Returns True if there were
-        sufficient tokens otherwise False."""
-        if tokens <= self._get_tokens():
-            self._tokens -= tokens
-            return True
-        return False
-
     def expected_time(self, tokens=1):
     def expected_time(self, tokens=1):
         """Returns the expected time in seconds when a new token should be
         """Returns the expected time in seconds when a new token should be
         available."""
         available."""
-        tokens = max(tokens, self._get_tokens())
-        return (tokens - self._get_tokens()) / self.fill_rate
-
-    def _get_tokens(self):
-        if self._tokens < self.capacity:
-            now = time.time()
-            delta = self.fill_rate * (now - self.timestamp)
-            self._tokens = min(self.capacity, self._tokens + delta)
-            self.timestamp = now
-        return self._tokens
+        return self._bucket.expected_time(tokens)
 
 
     @property
     @property
     def items(self):
     def items(self):

+ 13 - 4
celery/worker/control/__init__.py

@@ -7,18 +7,19 @@ from celery.worker.control import builtins
 
 
 class ControlDispatch(object):
 class ControlDispatch(object):
     """Execute worker control panel commands."""
     """Execute worker control panel commands."""
-    panel_cls = Panel
+    Panel = Panel
+    ReplyPublisher = ControlReplyPublisher
 
 
     def __init__(self, logger=None, hostname=None, listener=None):
     def __init__(self, logger=None, hostname=None, listener=None):
         self.logger = logger or log.get_default_logger()
         self.logger = logger or log.get_default_logger()
         self.hostname = hostname
         self.hostname = hostname
         self.listener = listener
         self.listener = listener
-        self.panel = self.panel_cls(self.logger, self.listener, self.hostname)
+        self.panel = self.Panel(self.logger, self.listener, self.hostname)
 
 
     @with_connection
     @with_connection
     def reply(self, data, exchange, routing_key, connection=None,
     def reply(self, data, exchange, routing_key, connection=None,
             connect_timeout=None):
             connect_timeout=None):
-        crq = ControlReplyPublisher(connection, exchange=exchange)
+        crq = self.ReplyPublisher(connection, exchange=exchange)
         try:
         try:
             crq.send(data, routing_key=routing_key)
             crq.send(data, routing_key=routing_key)
         finally:
         finally:
@@ -56,7 +57,15 @@ class ControlDispatch(object):
         except KeyError:
         except KeyError:
             self.logger.error("No such control command: %s" % command)
             self.logger.error("No such control command: %s" % command)
         else:
         else:
-            reply = control(self.panel, **kwdict(kwargs))
+            try:
+                reply = control(self.panel, **kwdict(kwargs))
+            except SystemExit:
+                raise
+            except Exception, exc:
+                self.logger.error(
+                        "Error running control command %s kwargs=%s: %s" % (
+                            command, kwargs, exc))
+                reply = {"error": str(exc)}
             if reply_to:
             if reply_to:
                 self.reply({self.hostname: reply},
                 self.reply({self.hostname: reply},
                            exchange=reply_to["exchange"],
                            exchange=reply_to["exchange"],

+ 54 - 17
celery/worker/control/builtins.py

@@ -1,10 +1,12 @@
 from datetime import datetime
 from datetime import datetime
 
 
 from celery import conf
 from celery import conf
+from celery import log
 from celery.backends import default_backend
 from celery.backends import default_backend
 from celery.registry import tasks
 from celery.registry import tasks
 from celery.utils import timeutils
 from celery.utils import timeutils
-from celery.worker.revoke import revoked
+from celery.worker import state
+from celery.worker.state import revoked
 from celery.worker.control.registry import Panel
 from celery.worker.control.registry import Panel
 
 
 TASK_INFO_FIELDS = ("exchange", "routing_key", "rate_limit")
 TASK_INFO_FIELDS = ("exchange", "routing_key", "rate_limit")
@@ -28,19 +30,32 @@ def revoke(panel, task_id, task_name=None, **kwargs):
 @Panel.register
 @Panel.register
 def enable_events(panel):
 def enable_events(panel):
     dispatcher = panel.listener.event_dispatcher
     dispatcher = panel.listener.event_dispatcher
-    dispatcher.enable()
-    dispatcher.send("worker-online")
-    panel.logger.warn("Events enabled by remote.")
-    return {"ok": "events enabled"}
+    if not dispatcher.enabled:
+        dispatcher.enable()
+        dispatcher.send("worker-online")
+        panel.logger.warn("Events enabled by remote.")
+        return {"ok": "events enabled"}
+    return {"ok": "events already enabled"}
 
 
 
 
 @Panel.register
 @Panel.register
 def disable_events(panel):
 def disable_events(panel):
     dispatcher = panel.listener.event_dispatcher
     dispatcher = panel.listener.event_dispatcher
-    dispatcher.send("worker-offline")
-    dispatcher.disable()
-    panel.logger.warn("Events disabled by remote.")
-    return {"ok": "events disabled"}
+    if dispatcher.enabled:
+        dispatcher.send("worker-offline")
+        dispatcher.disable()
+        panel.logger.warn("Events disabled by remote.")
+        return {"ok": "events disabled"}
+    return {"ok": "events already disabled"}
+
+
+@Panel.register
+def set_loglevel(panel, loglevel=None):
+    if loglevel is not None:
+        if not isinstance(loglevel, int):
+            loglevel = conf.LOG_LEVELS[loglevel.upper()]
+        log.get_default_logger(loglevel=loglevel)
+    return {"ok": loglevel}
 
 
 
 
 @Panel.register
 @Panel.register
@@ -83,8 +98,8 @@ def rate_limit(panel, task_name, rate_limit, **kwargs):
 
 
 
 
 @Panel.register
 @Panel.register
-def dump_schedule(panel, **kwargs):
-    schedule = panel.listener.eta_schedule
+def dump_schedule(panel, safe=False, **kwargs):
+    schedule = panel.listener.eta_schedule.schedule
     if not schedule.queue:
     if not schedule.queue:
         panel.logger.info("--Empty schedule--")
         panel.logger.info("--Empty schedule--")
         return []
         return []
@@ -96,20 +111,42 @@ def dump_schedule(panel, **kwargs):
     info = map(formatitem, enumerate(schedule.info()))
     info = map(formatitem, enumerate(schedule.info()))
     panel.logger.info("* Dump of current schedule:\n%s" % (
     panel.logger.info("* Dump of current schedule:\n%s" % (
                             "\n".join(info, )))
                             "\n".join(info, )))
-    return info
+    scheduled_tasks = []
+    for item in schedule.info():
+        scheduled_tasks.append({"eta": item["eta"],
+                                "priority": item["priority"],
+                                "request": item["item"].info(safe=safe)})
+    return scheduled_tasks
 
 
 
 
 @Panel.register
 @Panel.register
-def dump_reserved(panel, **kwargs):
+def dump_reserved(panel, safe=False, **kwargs):
     ready_queue = panel.listener.ready_queue
     ready_queue = panel.listener.ready_queue
     reserved = ready_queue.items
     reserved = ready_queue.items
     if not reserved:
     if not reserved:
         panel.logger.info("--Empty queue--")
         panel.logger.info("--Empty queue--")
         return []
         return []
-    info = map(repr, reserved)
     panel.logger.info("* Dump of currently reserved tasks:\n%s" % (
     panel.logger.info("* Dump of currently reserved tasks:\n%s" % (
-                            "\n".join(info, )))
-    return info
+                            "\n".join(map(repr, reserved), )))
+    return [request.info(safe=safe)
+            for request in reserved]
+
+
+@Panel.register
+def dump_active(panel, safe=False, **kwargs):
+    return [request.info(safe=safe)
+                for request in state.active_requests]
+
+
+@Panel.register
+def stats(panel, **kwargs):
+    return {"total": state.total_count,
+            "pool": panel.listener.pool.info}
+
+
+@Panel.register
+def dump_revoked(panel, **kwargs):
+    return list(state.revoked)
 
 
 
 
 @Panel.register
 @Panel.register
@@ -140,4 +177,4 @@ def ping(panel, **kwargs):
 @Panel.register
 @Panel.register
 def shutdown(panel, **kwargs):
 def shutdown(panel, **kwargs):
     panel.logger.critical("Got shutdown from remote.")
     panel.logger.critical("Got shutdown from remote.")
-    raise SystemExit
+    raise SystemExit("Got shutdown from remote")

+ 0 - 7
celery/worker/control/registry.py

@@ -13,10 +13,3 @@ class Panel(UserDict):
     def register(cls, method, name=None):
     def register(cls, method, name=None):
         cls.data[name or method.__name__] = method
         cls.data[name or method.__name__] = method
         return method
         return method
-
-    @classmethod
-    def unregister(cls, name_or_method):
-        name = name_or_method
-        if not isinstance(name_or_method, basestring):
-            name = name_or_method.__name__
-        cls.data.pop(name)

+ 18 - 75
celery/worker/controllers.py

@@ -7,61 +7,11 @@ import time
 import threading
 import threading
 from Queue import Empty as QueueEmpty
 from Queue import Empty as QueueEmpty
 
 
+from celery import conf
 from celery import log
 from celery import log
 
 
 
 
-class BackgroundThread(threading.Thread):
-    """Thread running an infinite loop which for every iteration
-    calls its :meth:`on_iteration` method.
-
-    This also implements graceful shutdown of the thread by providing
-    the :meth:`stop` method.
-
-    """
-
-    def __init__(self):
-        super(BackgroundThread, self).__init__()
-        self._shutdown = threading.Event()
-        self._stopped = threading.Event()
-        self.setDaemon(True)
-
-    def run(self):
-        """This is the body of the thread.
-
-        To start the thread use :meth:`start` instead.
-
-        """
-        self.on_start()
-
-        while 1:
-            if self._shutdown.isSet():
-                break
-            self.on_iteration()
-        self._stopped.set() # indicate that we are stopped
-
-    def on_start(self):
-        """This handler is run at thread start, just before the infinite
-        loop."""
-        pass
-
-    def on_iteration(self):
-        """This is the method called for every iteration and must be
-        implemented by every subclass of :class:`BackgroundThread`."""
-        raise NotImplementedError(
-                "InfiniteThreads must implement on_iteration")
-
-    def on_stop(self):
-        """This handler is run when the thread is shutdown."""
-        pass
-
-    def stop(self):
-        """Gracefully shutdown the thread."""
-        self.on_stop()
-        self._shutdown.set()
-        self._stopped.wait() # block until this thread is done
-
-
-class Mediator(BackgroundThread):
+class Mediator(threading.Thread):
     """Thread continuously sending tasks in the queue to the pool.
     """Thread continuously sending tasks in the queue to the pool.
 
 
     .. attribute:: ready_queue
     .. attribute:: ready_queue
@@ -76,13 +26,15 @@ class Mediator(BackgroundThread):
     """
     """
 
 
     def __init__(self, ready_queue, callback, logger=None):
     def __init__(self, ready_queue, callback, logger=None):
-        super(Mediator, self).__init__()
+        threading.Thread.__init__(self)
         self.logger = logger or log.get_default_logger()
         self.logger = logger or log.get_default_logger()
         self.ready_queue = ready_queue
         self.ready_queue = ready_queue
         self.callback = callback
         self.callback = callback
+        self._shutdown = threading.Event()
+        self._stopped = threading.Event()
+        self.setDaemon(True)
 
 
-    def on_iteration(self):
-        """Get tasks from bucket queue and apply the task callback."""
+    def move(self):
         try:
         try:
             # This blocks until there's a message in the queue.
             # This blocks until there's a message in the queue.
             task = self.ready_queue.get(timeout=1)
             task = self.ready_queue.get(timeout=1)
@@ -93,26 +45,17 @@ class Mediator(BackgroundThread):
                 return
                 return
 
 
             self.logger.debug(
             self.logger.debug(
-                    "Mediator: Running callback for task: %s[%s]" % (
-                        task.task_name, task.task_id))
+                "Mediator: Running callback for task: %s[%s]" % (
+                    task.task_name, task.task_id))
             self.callback(task) # execute
             self.callback(task) # execute
 
 
+    def run(self):
+        while not self._shutdown.isSet():
+            self.move()
+        self._stopped.set() # indicate that we are stopped
 
 
-class ScheduleController(BackgroundThread):
-    """Schedules tasks with an ETA by moving them to the bucket queue."""
-
-    def __init__(self, eta_schedule, logger=None):
-        super(ScheduleController, self).__init__()
-        self.logger = logger or log.get_default_logger()
-        self._scheduler = iter(eta_schedule)
-        self.debug = log.SilenceRepeated(self.logger.debug, max_iterations=10)
-
-    def on_iteration(self):
-        """Wake-up scheduler"""
-        delay = self._scheduler.next()
-        if delay is None:
-            delay = 1
-
-        self.debug("ScheduleController: Scheduler wake-up"
-                "ScheduleController: Next wake-up eta %s seconds..." % delay)
-        time.sleep(delay)
+    def stop(self):
+        """Gracefully shutdown the thread."""
+        self._shutdown.set()
+        self._stopped.wait() # block until this thread is done
+        self.join(1e100)

+ 2 - 0
celery/worker/heartbeat.py

@@ -58,3 +58,5 @@ class Heart(threading.Thread):
         self._state = "CLOSE"
         self._state = "CLOSE"
         self._shutdown.set()
         self._shutdown.set()
         self._stopped.wait() # block until this thread is done
         self._stopped.wait() # block until this thread is done
+        if self.isAlive():
+            self.join(1e100)

+ 115 - 55
celery/worker/job.py

@@ -1,29 +1,27 @@
-"""
-
-Jobs Executable by the Worker Server.
-
-"""
 import sys
 import sys
 import time
 import time
 import socket
 import socket
 import warnings
 import warnings
 
 
+from datetime import datetime
 
 
 from celery import conf
 from celery import conf
+from celery import log
 from celery import platform
 from celery import platform
-from celery.log import get_default_logger
-from celery.utils import noop, kwdict, fun_takes_kwargs
-from celery.utils.mail import mail_admins
-from celery.worker.revoke import revoked
-from celery.loaders import current_loader
+from celery.datastructures import ExceptionInfo
 from celery.execute.trace import TaskTrace
 from celery.execute.trace import TaskTrace
+from celery.loaders import current_loader
 from celery.registry import tasks
 from celery.registry import tasks
-from celery.datastructures import ExceptionInfo
+from celery.utils import noop, kwdict, fun_takes_kwargs
+from celery.utils import truncate_text, maybe_iso8601
+from celery.utils.compat import any
+from celery.utils.mail import mail_admins
+from celery.worker import state
 
 
 # pep8.py borks on a inline signature separator and
 # pep8.py borks on a inline signature separator and
 # says "trailing whitespace" ;)
 # says "trailing whitespace" ;)
 EMAIL_SIGNATURE_SEP = "-- "
 EMAIL_SIGNATURE_SEP = "-- "
-TASK_FAIL_EMAIL_BODY = """
+TASK_ERROR_EMAIL_BODY = """
 Task %%(name)s with id %%(id)s raised exception: %%(exc)s
 Task %%(name)s with id %%(id)s raised exception: %%(exc)s
 
 
 
 
@@ -71,7 +69,7 @@ class WorkerTaskTrace(TaskTrace):
     :param args: List of positional args to pass on to the function.
     :param args: List of positional args to pass on to the function.
     :param kwargs: Keyword arguments mapping to pass on to the function.
     :param kwargs: Keyword arguments mapping to pass on to the function.
 
 
-    :returns: the function return value on success, or
+    :returns: the evaluated functions return value on success, or
         the exception instance on failure.
         the exception instance on failure.
 
 
     """
     """
@@ -100,10 +98,13 @@ class WorkerTaskTrace(TaskTrace):
     def execute(self):
     def execute(self):
         """Execute, trace and store the result of the task."""
         """Execute, trace and store the result of the task."""
         self.loader.on_task_init(self.task_id, self.task)
         self.loader.on_task_init(self.task_id, self.task)
-        self.task.backend.process_cleanup()
         if self.task.track_started:
         if self.task.track_started:
             self.task.backend.mark_as_started(self.task_id)
             self.task.backend.mark_as_started(self.task_id)
-        return super(WorkerTaskTrace, self).execute()
+        try:
+            return super(WorkerTaskTrace, self).execute()
+        finally:
+            self.task.backend.process_cleanup()
+            self.loader.on_process_cleanup()
 
 
     def handle_success(self, retval, *args):
     def handle_success(self, retval, *args):
         """Handle successful execution."""
         """Handle successful execution."""
@@ -128,6 +129,13 @@ class WorkerTaskTrace(TaskTrace):
 
 
 
 
 def execute_and_trace(task_name, *args, **kwargs):
 def execute_and_trace(task_name, *args, **kwargs):
+    """This is a pickleable method used as a target when applying to pools.
+
+    It's the same as::
+
+        >>> WorkerTaskTrace(task_name, *args, **kwargs).execute_safe()
+
+    """
     platform.set_mp_process_title("celeryd", info=task_name)
     platform.set_mp_process_title("celeryd", info=task_name)
     try:
     try:
         return WorkerTaskTrace(task_name, *args, **kwargs).execute_safe()
         return WorkerTaskTrace(task_name, *args, **kwargs).execute_safe()
@@ -182,52 +190,60 @@ class TaskRequest(object):
         Set to ``True`` if the task has been acknowledged.
         Set to ``True`` if the task has been acknowledged.
 
 
     """
     """
+    # Logging output
     success_msg = "Task %(name)s[%(id)s] processed: %(return_value)s"
     success_msg = "Task %(name)s[%(id)s] processed: %(return_value)s"
-    fail_msg = """
+    error_msg = """
         Task %(name)s[%(id)s] raised exception: %(exc)s\n%(traceback)s
         Task %(name)s[%(id)s] raised exception: %(exc)s\n%(traceback)s
     """
     """
-    fail_email_subject = """
+
+    # E-mails
+    email_subject = """
         [celery@%(hostname)s] Error: Task %(name)s (%(id)s): %(exc)s
         [celery@%(hostname)s] Error: Task %(name)s (%(id)s): %(exc)s
     """
     """
-    fail_email_body = TASK_FAIL_EMAIL_BODY
+    email_body = TASK_ERROR_EMAIL_BODY
+
+    # Internal flags
     executed = False
     executed = False
     acknowledged = False
     acknowledged = False
     time_start = None
     time_start = None
+    _already_revoked = False
 
 
     def __init__(self, task_name, task_id, args, kwargs,
     def __init__(self, task_name, task_id, args, kwargs,
-            on_ack=noop, retries=0, delivery_info=None, hostname=None, **opts):
+            on_ack=noop, retries=0, delivery_info=None, hostname=None,
+            email_subject=None, email_body=None, logger=None,
+            eventer=None, eta=None, expires=None, **opts):
         self.task_name = task_name
         self.task_name = task_name
         self.task_id = task_id
         self.task_id = task_id
         self.retries = retries
         self.retries = retries
         self.args = args
         self.args = args
         self.kwargs = kwargs
         self.kwargs = kwargs
+        self.eta = eta
+        self.expires = expires
         self.on_ack = on_ack
         self.on_ack = on_ack
         self.delivery_info = delivery_info or {}
         self.delivery_info = delivery_info or {}
-        self.task = tasks[self.task_name]
         self.hostname = hostname or socket.gethostname()
         self.hostname = hostname or socket.gethostname()
-        self._already_revoked = False
-
-        for opt in ("success_msg", "fail_msg", "fail_email_subject",
-                    "fail_email_body", "logger", "eventer"):
-            setattr(self, opt, opts.get(opt, getattr(self, opt, None)))
+        self.logger = logger or log.get_default_logger()
+        self.eventer = eventer
+        self.email_subject = email_subject or self.email_subject
+        self.email_body = email_body or self.email_body
 
 
-        if not self.logger:
-            self.logger = get_default_logger()
+        self.task = tasks[self.task_name]
 
 
-    def __repr__(self):
-        return '<%s: {name:"%s", id:"%s", args:"%s", kwargs:"%s"}>' % (
-                self.__class__.__name__,
-                self.task_name, self.task_id,
-                self.args, self.kwargs)
+    def maybe_expire(self):
+        if self.expires and datetime.now() > self.expires:
+            state.revoked.add(self.task_id)
+            self.task.backend.mark_as_revoked(self.task_id)
 
 
     def revoked(self):
     def revoked(self):
         if self._already_revoked:
         if self._already_revoked:
             return True
             return True
-        if self.task_id in revoked:
+        if self.expires:
+            self.maybe_expire()
+        if self.task_id in state.revoked:
             self.logger.warn("Skipping revoked task: %s[%s]" % (
             self.logger.warn("Skipping revoked task: %s[%s]" % (
                 self.task_name, self.task_id))
                 self.task_name, self.task_id))
             self.send_event("task-revoked", uuid=self.task_id)
             self.send_event("task-revoked", uuid=self.task_id)
-            self.on_ack()
+            self.acknowledge()
             self._already_revoked = True
             self._already_revoked = True
             return True
             return True
         return False
         return False
@@ -241,7 +257,7 @@ class TaskRequest(object):
         :raises UnknownTaskError: if the message does not describe a task,
         :raises UnknownTaskError: if the message does not describe a task,
             the message is also rejected.
             the message is also rejected.
 
 
-        :returns: :class:`TaskRequest` instance.
+        :returns :class:`TaskRequest`:
 
 
         """
         """
         task_name = message_data["task"]
         task_name = message_data["task"]
@@ -249,6 +265,8 @@ class TaskRequest(object):
         args = message_data["args"]
         args = message_data["args"]
         kwargs = message_data["kwargs"]
         kwargs = message_data["kwargs"]
         retries = message_data.get("retries", 0)
         retries = message_data.get("retries", 0)
+        eta = maybe_iso8601(message_data.get("eta"))
+        expires = maybe_iso8601(message_data.get("expires"))
 
 
         _delivery_info = getattr(message, "delivery_info", {})
         _delivery_info = getattr(message, "delivery_info", {})
         delivery_info = dict((key, _delivery_info.get(key))
         delivery_info = dict((key, _delivery_info.get(key))
@@ -261,7 +279,8 @@ class TaskRequest(object):
         return cls(task_name, task_id, args, kwdict(kwargs),
         return cls(task_name, task_id, args, kwdict(kwargs),
                    retries=retries, on_ack=message.ack,
                    retries=retries, on_ack=message.ack,
                    delivery_info=delivery_info, logger=logger,
                    delivery_info=delivery_info, logger=logger,
-                   eventer=eventer, hostname=hostname)
+                   eventer=eventer, hostname=hostname,
+                   eta=eta, expires=expires)
 
 
     def extend_with_default_kwargs(self, loglevel, logfile):
     def extend_with_default_kwargs(self, loglevel, logfile):
         """Extend the tasks keyword arguments with standard task arguments.
         """Extend the tasks keyword arguments with standard task arguments.
@@ -335,8 +354,6 @@ class TaskRequest(object):
 
 
         :keyword logfile: The logfile used by the task.
         :keyword logfile: The logfile used by the task.
 
 
-        :returns :class:`multiprocessing.AsyncResult` instance.
-
         """
         """
         if self.revoked():
         if self.revoked():
             return
             return
@@ -352,6 +369,7 @@ class TaskRequest(object):
         return result
         return result
 
 
     def on_accepted(self):
     def on_accepted(self):
+        state.task_accepted(self)
         if not self.task.acks_late:
         if not self.task.acks_late:
             self.acknowledge()
             self.acknowledge()
         self.send_event("task-started", uuid=self.task_id)
         self.send_event("task-started", uuid=self.task_id)
@@ -359,6 +377,7 @@ class TaskRequest(object):
             self.task_name, self.task_id))
             self.task_name, self.task_id))
 
 
     def on_timeout(self, soft):
     def on_timeout(self, soft):
+        state.task_ready(self)
         if soft:
         if soft:
             self.logger.warning("Soft time limit exceeded for %s[%s]" % (
             self.logger.warning("Soft time limit exceeded for %s[%s]" % (
                 self.task_name, self.task_id))
                 self.task_name, self.task_id))
@@ -374,6 +393,7 @@ class TaskRequest(object):
     def on_success(self, ret_value):
     def on_success(self, ret_value):
         """The handler used if the task was successfully processed (
         """The handler used if the task was successfully processed (
         without raising an exception)."""
         without raising an exception)."""
+        state.task_ready(self)
 
 
         if self.task.acks_late:
         if self.task.acks_late:
             self.acknowledge()
             self.acknowledge()
@@ -385,11 +405,17 @@ class TaskRequest(object):
         msg = self.success_msg.strip() % {
         msg = self.success_msg.strip() % {
                 "id": self.task_id,
                 "id": self.task_id,
                 "name": self.task_name,
                 "name": self.task_name,
-                "return_value": ret_value}
+                "return_value": self.repr_result(ret_value)}
         self.logger.info(msg)
         self.logger.info(msg)
 
 
+    def repr_result(self, result, maxlen=46):
+        # 46 is the length needed to fit
+        #     "the quick brown fox jumps over the lazy dog" :)
+        return truncate_text(repr(result), maxlen)
+
     def on_failure(self, exc_info):
     def on_failure(self, exc_info):
         """The handler used if the task raised an exception."""
         """The handler used if the task raised an exception."""
+        state.task_ready(self)
 
 
         if self.task.acks_late:
         if self.task.acks_late:
             self.acknowledge()
             self.acknowledge()
@@ -398,21 +424,55 @@ class TaskRequest(object):
                                        exception=repr(exc_info.exception),
                                        exception=repr(exc_info.exception),
                                        traceback=exc_info.traceback)
                                        traceback=exc_info.traceback)
 
 
-        context = {
-            "hostname": self.hostname,
-            "id": self.task_id,
-            "name": self.task_name,
-            "exc": repr(exc_info.exception),
-            "traceback": unicode(exc_info.traceback, 'utf-8'),
-            "args": self.args,
-            "kwargs": self.kwargs,
-        }
-        self.logger.error(self.fail_msg.strip() % context)
+        context = {"hostname": self.hostname,
+                   "id": self.task_id,
+                   "name": self.task_name,
+                   "exc": repr(exc_info.exception),
+                   "traceback": unicode(exc_info.traceback, 'utf-8'),
+                   "args": self.args,
+                   "kwargs": self.kwargs}
+        self.logger.error(self.error_msg.strip() % context)
 
 
         task_obj = tasks.get(self.task_name, object)
         task_obj = tasks.get(self.task_name, object)
-        send_error_email = conf.CELERY_SEND_TASK_ERROR_EMAILS and not \
-                                task_obj.disable_error_emails
-        if send_error_email:
-            subject = self.fail_email_subject.strip() % context
-            body = self.fail_email_body.strip() % context
-            mail_admins(subject, body, fail_silently=True)
+        self.send_error_email(task_obj, context, exc_info.exception,
+                              enabled=conf.CELERY_SEND_TASK_ERROR_EMAILS,
+                              whitelist=conf.CELERY_TASK_ERROR_WHITELIST)
+
+    def send_error_email(self, task, context, exc,
+            whitelist=None, enabled=False, fail_silently=True):
+        if enabled and not task.disable_error_emails:
+            if whitelist:
+                if not isinstance(exc, tuple(whitelist)):
+                    return
+            subject = self.email_subject.strip() % context
+            body = self.email_body.strip() % context
+            return mail_admins(subject, body, fail_silently=fail_silently)
+
+    def __repr__(self):
+        return '<%s: {name:"%s", id:"%s", args:"%s", kwargs:"%s"}>' % (
+                self.__class__.__name__,
+                self.task_name, self.task_id,
+                self.args, self.kwargs)
+
+    def info(self, safe=False):
+        args = self.args
+        kwargs = self.kwargs
+        if not safe:
+            args = repr(args)
+            kwargs = repr(self.kwargs)
+
+        return {"id": self.task_id,
+                "name": self.task_name,
+                "args": args,
+                "kwargs": kwargs,
+                "hostname": self.hostname,
+                "time_start": self.time_start,
+                "acknowledged": self.acknowledged,
+                "delivery_info": self.delivery_info}
+
+    def shortinfo(self):
+        return "%s[%s]%s%s" % (
+                    self.task_name,
+                    self.task_id,
+                    self.eta and " eta:[%s]" % (self.eta, ) or "",
+                    self.expires and " expires:[%s]" % (self.expires, ) or "")

Некоторые файлы не были показаны из-за большого количества измененных файлов