Browse Source

Merge branch 'master' into kombuRPC

Conflicts:
	celery/app/amqp.py
	celery/app/task.py
	celery/backends/base.py
Ask Solem 13 years ago
parent
commit
2d9a1d342c
100 changed files with 2640 additions and 7246 deletions
  1. 0 1
      .travis.yml
  2. 13 0
      CONTRIBUTORS.txt
  3. 328 5204
      Changelog
  4. 151 180
      README.rst
  5. 4 2
      celery/__compat__.py
  6. 33 8
      celery/__init__.py
  7. 24 0
      celery/__main__.py
  8. 26 0
      celery/_state.py
  9. 55 0
      celery/app/__init__.py
  10. 1 2
      celery/app/abstract.py
  11. 31 13
      celery/app/amqp.py
  12. 2 2
      celery/app/annotations.py
  13. 74 34
      celery/app/base.py
  14. 129 71
      celery/app/builtins.py
  15. 18 11
      celery/app/control.py
  16. 15 20
      celery/app/defaults.py
  17. 9 7
      celery/app/log.py
  18. 11 3
      celery/app/registry.py
  19. 2 2
      celery/app/routes.py
  20. 39 57
      celery/app/task.py
  21. 33 26
      celery/app/utils.py
  22. 19 19
      celery/apps/beat.py
  23. 87 157
      celery/apps/worker.py
  24. 3 3
      celery/backends/__init__.py
  25. 5 14
      celery/backends/amqp.py
  26. 71 130
      celery/backends/base.py
  27. 14 9
      celery/backends/cache.py
  28. 4 4
      celery/backends/cassandra.py
  29. 2 2
      celery/backends/database/__init__.py
  30. 0 71
      celery/backends/database/a805d4bd.py
  31. 0 50
      celery/backends/database/dfd042c7.py
  32. 3 8
      celery/backends/database/models.py
  33. 2 2
      celery/backends/mongodb.py
  34. 2 2
      celery/backends/redis.py
  35. 40 15
      celery/beat.py
  36. 66 17
      celery/bin/base.py
  37. 19 20
      celery/bin/camqadm.py
  38. 161 99
      celery/bin/celery.py
  39. 1 2
      celery/bin/celerybeat.py
  40. 33 26
      celery/bin/celeryd.py
  41. 7 8
      celery/bin/celeryd_detach.py
  42. 44 42
      celery/bin/celeryd_multi.py
  43. 2 3
      celery/bin/celeryev.py
  44. 62 37
      celery/canvas.py
  45. 1 1
      celery/concurrency/__init__.py
  46. 5 3
      celery/concurrency/base.py
  47. 23 2
      celery/concurrency/eventlet.py
  48. 34 5
      celery/concurrency/gevent.py
  49. 5 12
      celery/concurrency/processes.py
  50. 0 116
      celery/concurrency/processes/_win.py
  51. 6 0
      celery/concurrency/threads.py
  52. 3 3
      celery/contrib/batches.py
  53. 292 26
      celery/contrib/migrate.py
  54. 30 15
      celery/contrib/rdb.py
  55. 23 17
      celery/datastructures.py
  56. 15 25
      celery/events/__init__.py
  57. 25 23
      celery/events/cursesmon.py
  58. 11 14
      celery/events/dumper.py
  59. 2 3
      celery/events/snapshot.py
  60. 36 29
      celery/events/state.py
  61. 26 5
      celery/exceptions.py
  62. 2 2
      celery/loaders/__init__.py
  63. 18 5
      celery/loaders/base.py
  64. 10 10
      celery/loaders/default.py
  65. 62 223
      celery/local.py
  66. 80 44
      celery/platforms.py
  67. 41 27
      celery/result.py
  68. 59 23
      celery/schedules.py
  69. 0 1
      celery/security/__init__.py
  70. 6 7
      celery/security/certificate.py
  71. 2 3
      celery/security/key.py
  72. 4 4
      celery/security/serialization.py
  73. 3 3
      celery/security/utils.py
  74. 2 1
      celery/signals.py
  75. 7 0
      celery/states.py
  76. 21 1
      celery/task/__init__.py
  77. 9 11
      celery/task/base.py
  78. 7 7
      celery/task/http.py
  79. 6 7
      celery/task/sets.py
  80. 51 31
      celery/task/trace.py
  81. 1 3
      celery/tests/__init__.py
  82. 8 9
      celery/tests/app/test_amqp.py
  83. 25 20
      celery/tests/app/test_app.py
  84. 5 6
      celery/tests/app/test_beat.py
  85. 1 1
      celery/tests/app/test_builtins.py
  86. 0 1
      celery/tests/app/test_control.py
  87. 1 2
      celery/tests/app/test_defaults.py
  88. 3 4
      celery/tests/app/test_loaders.py
  89. 0 2
      celery/tests/app/test_log.py
  90. 1 2
      celery/tests/app/test_routes.py
  91. 6 17
      celery/tests/backends/test_amqp.py
  92. 0 1
      celery/tests/backends/test_backends.py
  93. 12 54
      celery/tests/backends/test_base.py
  94. 1 2
      celery/tests/backends/test_cache.py
  95. 0 1
      celery/tests/backends/test_cassandra.py
  96. 2 22
      celery/tests/backends/test_database.py
  97. 0 1
      celery/tests/backends/test_mongodb.py
  98. 0 1
      celery/tests/backends/test_redis.py
  99. 2 4
      celery/tests/bin/test_base.py
  100. 0 1
      celery/tests/bin/test_camqadm.py

+ 0 - 1
.travis.yml

@@ -1,6 +1,5 @@
 language: python
 language: python
 python:
 python:
-    - 2.5
     - 2.6
     - 2.6
     - 2.7
     - 2.7
 install:
 install:

+ 13 - 0
CONTRIBUTORS.txt

@@ -106,3 +106,16 @@ Dimitrios Kouzis-Loukas, 2012/06/13
 Steven Skoczen, 2012/06/17
 Steven Skoczen, 2012/06/17
 Loren Abrams, 2012/06/19
 Loren Abrams, 2012/06/19
 Eran Rundstein, 2012/06/24
 Eran Rundstein, 2012/06/24
+John Watson, 2012/06/27
+Matt Long, 2012/07/04
+David Markey, 2012/07/05
+Jared Biel, 2012/07/05
+Jed Smith, 2012/07/08
+Łukasz Langa, 2012/07/10
+Rinat Shigapov, 2012/07/20
+Hynek Schlawack, 2012/07/23
+Paul McMillan, 2012/07/26
+Mitar, 2012/07/28
+Adam DePue, 2012/08/22
+Thomas Meson, 2012/08/28
+Daniel Lundin, 2012/08/30 

+ 328 - 5204
Changelog

@@ -1,3 +1,5 @@
+.. _changelog:
+
 ================
 ================
  Change history
  Change history
 ================
 ================
@@ -5,5425 +7,547 @@
 .. contents::
 .. contents::
     :local:
     :local:
 
 
-.. _version-2.6.0:
+If you're looking for versions prior to 3.x you should see :ref:`history`.
+
+.. _version-3.1.0:
 
 
-2.6.0
+3.1.0
 =====
 =====
-:status: FROZEN
+:state: DEVEL
 :branch: master
 :branch: master
 
 
-See :ref:`whatsnew-2.6`.
+- `celery inspect stats` now contains worker pid
+- `Task.apply_async` now supports timeout and soft_timeout arguments (Issue #802)
+- `App.control.Inspect.conf` can be used for inspecting worker configuration
 
 
-.. _version-2.5.2:
+.. _version-3.0.9:
 
 
-2.5.2
+3.0.9
 =====
 =====
-:release-date: 2012-04-13 04:30 P.M GMT
-
-.. _v252-news:
-
-News
-----
-
-- Now depends on Kombu 2.1.5.
-
-- Django documentation has been moved to the main Celery docs.
-
-    See :ref:`django`.
-
-- New :signal:`celeryd_init` signal can be used to configure workers
-  by hostname.
-
-- Signal.connect can now be used as a decorator.
-
-    Example:
-
-    .. code-block:: python
-
-        from celery.signals import task_sent
-
-        @task_sent.connect
-        def on_task_sent(**kwargs):
-            print("sent task: %r" % (kwargs, ))
-
-- Invalid task messages are now rejected instead of acked.
-
-    This means that they will be moved to the dead-letter queue
-    introduced in the latest RabbitMQ version (but must be enabled
-    manually, consult the RabbitMQ documentation).
+:release-date: 2012-08-31 06:00 P.M BST
 
 
-- Internal logging calls has been cleaned up to work
-  better with tools like Sentry.
+- Important note for users of Django and the database scheduler!
 
 
-    Contributed by David Cramer.
+    Recently a timezone issue has been fixed for periodic tasks,
+    but erroneous timezones could have already been stored in the
+    database, so for the fix to work you need to reset
+    the ``last_run_at`` fields.
 
 
-- New method ``subtask.clone()`` can be used to clone an existing
-  subtask with augmented arguments/options.
+    You can do this by executing the following command:
 
 
-    Example:
+    .. code-block:: bash
 
 
-    .. code-block:: python
-
-        >>> s = add.subtask((5, ))
-        >>> new = s.clone(args=(10, ), countdown=5})
-        >>> new.args
-        (10, 5)
-
-        >>> new.options
-        {"countdown": 5}
-
-- Chord callbacks are now triggered in eager mode.
-
-.. _v252-fixes:
-
-Fixes
------
-
-- Programs now verifies that the pidfile is actually written correctly
-  (Issue #641).
-
-    Hopefully this will crash the worker immediately if the system
-    is out of space to store the complete pidfile.
+        $ python manage.py shell
+        >>> from djcelery.models import PeriodicTask
+        >>> PeriodicTask.objects.update(last_run_at=None)
 
 
-    In addition, we now verify that existing pidfiles contain
-    a new line so that a partially written pidfile is detected as broken,
-    as before doing:
+    You also have to do this if you change the timezone or
+    :setting:`CELERY_ENABLE_UTC` setting.
 
 
-        echo -n "1" > celeryd.pid
+- Note about the :setting:`CELERY_ENABLE_UTC` setting.
 
 
-    would cause celeryd to think that an existing instance was already
-    running (init has pid 1 after all).
+    If you previously disabled this just to force periodic tasks to work with
+    your timezone, then you are now *encouraged to re-enable it*.
 
 
-- Fixed 2.5 compatibility issue with use of print_exception.
+- Now depends on Kombu 2.4.5 which fixes PyPy + Jython installation.
 
 
-    Fix contributed by Martin Melin.
+- Fixed bug with timezones when :setting:`CELERY_ENABLE_UTC` is disabled
+  (Issue #952).
 
 
-- Fixed 2.5 compatibility issue with imports.
+- Fixed a typo in the celerybeat upgrade mechanism (Issue #951).
 
 
-    Fix contributed by Iurii Kriachko.
+- Make sure the `exc_info` argument to logging is resolved (Issue #899).
 
 
-- All programs now fix up ``__package__`` when called as main.
+- Fixed problem with Python 3.2 and thread join timeout overflow (Issue #796).
 
 
-    This fixes compatibility with Python 2.5.
+- A test case was occasionally broken for Python 2.5.
 
 
-    Fix contributed by Martin Melin.
+- Unit test suite now passes for PyPy 1.9.
 
 
-- celeryctl can now be configured on the command line.
+- App instances now supports the with statement.
 
 
-    Like with celeryd it is now possible to configure celery settings
-    on the command line for celeryctl::
+    This calls the new :meth:`~celery.Celery.close` method at exit, which
+    cleans up after the app like closing pool connections.
 
 
-        $ celeryctl -- broker.pool_limit=30
+    Note that this is only necessary when dynamically creating apps,
+    e.g. for "temporary" apps.
 
 
-- Version dependency for python-dateutil fixed to be strict.
+- Support for piping a subtask to a chain.
 
 
-    Fix contributed by Thomas Meson.
-
-- ``Task.__call__`` is now optimized away in the task tracer
-  rather than when the task class is created.
+    For example:
 
 
-    This fixes a bug where a custom __call__  may mysteriously disappear.
+    .. code-block:: python
 
 
-- Autoreload's inotify support has been improved.
+        pipe = sometask.s() | othertask.s()
+        new_pipe = mytask.s() | pipe
 
 
-    Contributed by Mher Movsisyan.
+    Contributed by Steve Morin.
 
 
-- The Django broker documentation has been improved.
+- Fixed problem with group results on non-pickle serializers.
 
 
-- Removed confusing warning at top of routing user guide.
+    Fix contributed by Steeve Morin.
 
 
-.. _version-2.5.1:
+.. _version-3.0.8:
 
 
-2.5.1
+3.0.8
 =====
 =====
-:release-date: 2012-03-01 01:00 P.M GMT
-:by: Ask Solem
-
-.. _v251-fixes:
-
-Fixes
------
-
-* Eventlet/Gevent: A small typo caused celeryd to hang when eventlet/gevent
-  was used, this was because the environment was not monkey patched
-  early enough.
+:release-date: 2012-08-29 05:00 P.M BST
 
 
-* Eventlet/Gevent: Another small typo caused the mediator to be started
-  with eventlet/gevent, which would make celeryd sometimes hang at shutdown.
+- Now depends on Kombu 2.4.4
 
 
-* Mulitprocessing: Fixed an error occurring if the pool was stopped
-  before it was properly started.
+- Fixed problem with amqplib and receiving larger message payloads
+  (Issue #922).
 
 
-* Proxy objects now redirects ``__doc__`` and ``__name__`` so ``help(obj)``
-  works.
+    The problem would manifest itself as either the worker hanging,
+    or occasionally a ``Framing error`` exception appearing.
 
 
-* Internal timer (timer2) now logs exceptions instead of swallowing them
-  (Issue #626).
+    Users of the new ``pyamqp://`` transport must upgrade to
+    :mod:`amqp` 0.9.3.
 
 
-* celeryctl shell: can now be started with :option:`--eventlet` or
-  :option:`--gevent` options to apply their monkey patches.
+- Beat: Fixed another timezone bug with interval and crontab schedules
+  (Issue #943).
 
 
-.. _version-2.5.0:
+- Beat: The schedule file is now automatically cleared if the timezone
+  is changed.
 
 
-2.5.0
-=====
-:release-date: 2012-02-24 04:00 P.M GMT
-:by: Ask Solem
-
-See :ref:`whatsnew-2.5`.
-
-Since the changelog has gained considerable size, we decided to
-do things differently this time: by having separate "what's new"
-documents for major version changes.
+    The schedule is also cleared when you upgrade to 3.0.8 from an earlier
+    version, this to register the initial timezone info.
 
 
-Bugfix releases will still be found in the changelog.
+- Events: The :event:`worker-heartbeat` event now include processed and active
+  count fields.
 
 
-.. _version-2.4.5:
-
-2.4.5
-=====
-:release-date: 2011-12-02 05:00 P.M GMT
-:by: Ask Solem
+    Contributed by Mher Movsisyan.
 
 
-* Periodic task interval schedules were accidentally rounded down,
-  resulting in some periodic tasks being executed early.
+- Fixed error with error email and new task classes (Issue #931).
 
 
-* Logging of humanized times in the celerybeat log is now more detailed.
+- ``BaseTask.__call__`` is no longer optimized away if it has been monkey
+  patched.
 
 
-* New :ref:`brokers` section in the Getting Started part of the Documentation
+- Fixed shutdown issue when using gevent (Issue #911 & Issue #936).
 
 
-    This replaces the old :ref:`tut-otherqueues` tutorial, and adds
-    documentation for MongoDB, Beanstalk and CouchDB.
+    Fix contributed by Thomas Meson.
 
 
-.. _version-2.4.4:
+.. _version-3.0.7:
 
 
-2.4.4
+3.0.7
 =====
 =====
-:release-date: 2011-11-25 16:00 P.M GMT
-:by: Ask Solem
-
-.. _v244-security-fixes:
-
-Security Fixes
---------------
-
-* [Security: `CELERYSA-0001`_] Daemons would set effective id's rather than
-  real id's when the :option:`--uid`/:option:`--gid` arguments to
-  :program:`celeryd-multi`, :program:`celeryd_detach`,
-  :program:`celerybeat` and :program:`celeryev` were used.
-
-  This means privileges weren't properly dropped, and that it would
-  be possible to regain supervisor privileges later.
-
-
-.. _`CELERYSA-0001`:
-    http://github.com/celery/celery/tree/master/docs/sec/CELERYSA-0001.txt
+:release-date: 2012-08-24 05:00 P.M BST
 
 
-.. _v244-fixes:
+- Fixes several problems with periodic tasks and timezones (Issue #937).
 
 
-Fixes
------
+- Now depends on kombu 2.4.2
 
 
-* Processes pool: Fixed rare deadlock at shutdown (Issue #523).
+    - Redis: Fixes a race condition crash
 
 
-    Fix contributed by Ionel Maries Christian.
+    - Fixes an infinite loop that could happen when retrying establishing
+      the broker connection.
 
 
-* Webhook tasks issued the wrong HTTP POST headers (Issue #515).
+- Daemons now redirect standard file descriptors to :file:`/dev/null`
 
 
-    The *Content-Type* header has been changed from
-    ``application/json`` ⇒  ``application/x-www-form-urlencoded``,
-    and adds a proper *Content-Length* header.
+    Though by default the standard outs are also redirected
+    to the logger instead, but you can disable this by changing
+    the :setting:`CELERY_REDIRECT_STDOUTS` setting.
 
 
-    Fix contributed by Mitar.
+- Fixes possible problems when eventlet/gevent is patched too late.
 
 
-* Daemonization tutorial: Adds a configuration example using Django and
-  virtualenv together (Issue #505).
+- ``LoggingProxy`` no longer defines ``fileno()`` (Issue #928).
 
 
-    Contributed by Juan Ignacio Catalano.
+- Results are now ignored for the chord unlock task.
 
 
-* generic init scripts now automatically creates log and pid file
-  directories (Issue #545).
+    Fix contributed by Steeve Morin.
 
 
-    Contributed by Chris Streeter.
+- Cassandra backend now works if result expiry is disabled.
 
 
-.. _version-2.4.3:
+    Fix contributed by Steeve Morin.
 
 
-2.4.3
-=====
-:release-date: 2011-11-22 18:00 P.M GMT
-:by: Ask Solem
-
-* Fixes module import typo in `celeryctl` (Issue #538).
-
-    Fix contributed by Chris Streeter.
-
-.. _version-2.4.2:
-
-2.4.2
-=====
-:release-date: 2011-11-14 12:00 P.M GMT
-:by: Ask Solem
-
-* Program module no longer uses relative imports so that it is
-  possible to do ``python -m celery.bin.name``.
-
-.. _version-2.4.1:
+- The traceback object is now passed to signal handlers instead
+  of the string representation.
 
 
-2.4.1
-=====
-:release-date: 2011-11-07 06:00 P.M GMT
-:by: Ask Solem
+    Fix contributed by Adam DePue.
 
 
-* celeryctl inspect commands was missing output.
+- Celery command: Extensions are now sorted by name.
 
 
-* processes pool: Decrease polling interval for less idle CPU usage.
+- A regression caused the :event:`task-failed` event to be sent
+  with the exception object instead of its string representation.
 
 
-* processes pool: MaybeEncodingError was not wrapped in ExceptionInfo
-  (Issue #524).
+- The worker daemon would try to create the pid file before daemonizing
+  to catch errors, but this file was not immediately released (Issue #923).
 
 
-* celeryd: would silence errors occuring after task consumer started.
+- Fixes Jython compatibility.
 
 
-* logging: Fixed a bug where unicode in stdout redirected log messages
-  couldn't be written (Issue #522).
+- ``billiard.forking_enable`` was called by all pools not just the
+  processes pool, which would result in a useless warning if the billiard
+  C extensions were not installed.
 
 
-.. _version-2.4.0:
+.. _version-3.0.6:
 
 
-2.4.0
+3.0.6
 =====
 =====
-:release-date: 2011-11-04 04:00 P.M GMT
-:by: Ask Solem
-
-.. _v240-important:
-
-Important Notes
----------------
-
-* Now supports Python 3.
-
-* Fixed deadlock in worker process handling (Issue #496).
-
-    A deadlock could occur after spawning new child processes because
-    the logging library's mutex was not properly reset after fork.
-
-    The symptoms of this bug affecting would be that the worker simply
-    stops processing tasks, as none of the workers child processes
-    are functioning.  There was a greater chance of this bug occurring
-    with ``maxtasksperchild`` or a time-limit enabled.
-
-    This is a workaround for http://bugs.python.org/issue6721#msg140215.
-
-    Be aware that while this fixes the logging library lock,
-    there could still be other locks initialized in the parent
-    process, introduced by custom code.
-
-    Fix contributed by Harm Verhagen.
-
-* AMQP Result backend: Now expires results by default.
-
-    The default expiration value is now taken from the
-    :setting:`CELERY_TASK_RESULT_EXPIRES` setting.
-
-    The old :setting:`CELERY_AMQP_TASK_RESULT_EXPIRES` setting has been
-    deprecated and will be removed in version 3.0.
-
-    Note that this means that the result backend requires RabbitMQ 1.1.0 or
-    higher, and that you have to disable expiration if you are running
-    with an older version.  You can do so by disabling the
-    :setting:`CELERY_TASK_RESULT_EXPIRES` setting::
-
-        CELERY_TASK_RESULT_EXPIRES = None
-
-* Eventlet: Fixed problem with shutdown (Issue #457).
-
-* Broker transports can be now be specified using URLs
-
-    The broker can now be specified as an URL instead.
-    This URL must have the format::
-
-        transport://user:password@hostname:port/virtual_host
-
-    for example the default broker is written as::
-
-        amqp://guest:guest@localhost:5672//
-
-    The scheme is required, so that the host is identified
-    as an URL and not just a host name.
-    User, password, port and virtual_host are optional and
-    defaults to the particular transports default value.
-
-    .. note::
-
-        Note that the path component (virtual_host) always starts with a
-        forward-slash.  This is necessary to distinguish between the virtual
-        host ``''`` (empty) and ``'/'``, which are both acceptable virtual
-        host names.
-
-        A virtual host of ``'/'`` becomes:
-
-            amqp://guest:guest@localhost:5672//
-
-        and a virtual host of ``''`` (empty) becomes::
-
-            amqp://guest:guest@localhost:5672/
-
-        So the leading slash in the path component is **always required**.
-
-    In addition the :setting:`BROKER_URL` setting has been added as an alias
-    to ``BROKER_HOST``.  Any broker setting specified in both the URL and in
-    the configuration will be ignored, if a setting is not provided in the URL
-    then the value from the configuration will be used as default.
-
-    Also, programs now support the :option:`-b|--broker` option to specify
-    a broker URL on the command line::
-
-        $ celeryd -b redis://localhost
-
-        $ celeryctl -b amqp://guest:guest@localhost//e
-
-    The environment variable :envvar:`CELERY_BROKER_URL` can also be used to
-    easily override the default broker used.
-
-* The deprecated :func:`celery.loaders.setup_loader` function has been removed.
-
-* The :setting:`CELERY_TASK_ERROR_WHITELIST` setting has been replaced
-  by a more flexible approach (Issue #447).
-
-    The error mail sending logic is now available as ``Task.ErrorMail``,
-    with the implementation (for reference) in :mod:`celery.utils.mail`.
-
-    The error mail class can be sub-classed to gain complete control
-    of when error messages are sent, thus removing the need for a separate
-    white-list setting.
-
-    The :setting:`CELERY_TASK_ERROR_WHITELIST` setting has been deprecated,
-    and will be removed completely in version 3.0.
-
-* Additional Deprecations
-
-    The following functions has been deprecated and is scheduled for removal in
-    version 3.0:
-
-    =====================================  ===================================
-    **Old function**                       **Alternative**
-    =====================================  ===================================
-    `celery.loaders.current_loader`        `celery.current_app.loader`
-    `celery.loaders.load_settings`         `celery.current_app.conf`
-    `celery.execute.apply`                 `Task.apply`
-    `celery.execute.apply_async`           `Task.apply_async`
-    `celery.execute.delay_task`            `celery.execute.send_task`
-    =====================================  ===================================
-
-    The following settings has been deprecated and is scheduled for removal
-    in version 3.0:
-
-    =====================================  ===================================
-    **Old setting**                        **Alternative**
-    =====================================  ===================================
-    `CELERYD_LOG_LEVEL`                    ``celeryd --loglevel=``
-    `CELERYD_LOG_FILE`                     ``celeryd --logfile=``
-    `CELERYBEAT_LOG_LEVEL`                 ``celerybeat --loglevel=``
-    `CELERYBEAT_LOG_FILE`                  ``celerybeat --logfile=``
-    `CELERYMON_LOG_LEVEL`                  ``celerymon --loglevel=``
-    `CELERYMON_LOG_FILE`                   ``celerymon --logfile=``
-    =====================================  ===================================
-
-.. _v240-news:
-
-News
-----
-
-* No longer depends on :mod:`pyparsing`.
-
-* Now depends on Kombu 1.4.3.
-
-* CELERY_IMPORTS can now be a scalar value (Issue #485).
-
-    It is too easy to forget to add the comma after the sole element of a
-    tuple, and this is something that often affects newcomers.
-
-    The docs should probably use a list in examples, as using a tuple
-    for this doesn't even make sense.  Nonetheless, there are many
-    tutorials out there using a tuple, and this change should be a help
-    to new users.
-
-    Suggested by jsaxon-cars.
-
-* Fixed a memory leak when using the thread pool (Issue #486).
-
-    Contributed by Kornelijus Survila.
-
-* The statedb was not saved at exit.
-
-    This has now been fixed and it should again remember previously
-    revoked tasks when a ``--statedb`` is enabled.
-
-* Adds :setting:`EMAIL_USE_TLS` to enable secure SMTP connections
-  (Issue #418).
-
-    Contributed by Stefan Kjartansson.
-
-* Now handles missing fields in task messages as documented in the message
-  format documentation.
-
-    * Missing required field throws :exc:`~@InvalidTaskError`
-    * Missing args/kwargs is assumed empty.
-
-    Contributed by Chris Chamberlin.
-
-* Fixed race condition in celery.events.state (celerymon/celeryev)
-  where task info would be removed while iterating over it (Issue #501).
-
-* The Cache, Cassandra, MongoDB, Redis and Tyrant backends now respects
-  the :setting:`CELERY_RESULT_SERIALIZER` setting (Issue #435).
-
-    This means that only the database (django/sqlalchemy) backends
-    currently does not support using custom serializers.
-
-    Contributed by Steeve Morin
-
-* Logging calls no longer manually formats messages, but delegates
-  that to the logging system, so tools like Sentry can easier
-  work with the messages (Issue #445).
-
-    Contributed by Chris Adams.
+:release-date: 2012-08-17 11:00 P.M BST
 
 
-* ``celeryd_multi`` now supports a ``stop_verify`` command to wait for
-  processes to shutdown.
+- Now depends on kombu 2.4.0
 
 
-* Cache backend did not work if the cache key was unicode (Issue #504).
+- Now depends on billiard 2.7.3.12
 
 
-    Fix contributed by Neil Chintomby.
+- Redis: Celery now tries to restore messages whenever there are no messages
+  in the queue.
 
 
-* New setting :setting:`CELERY_RESULT_DB_SHORT_LIVED_SESSIONS` added,
-  which if enabled will disable the caching of SQLAlchemy sessions
-  (Issue #449).
+- Crontab schedules now properly respects :setting:`CELERY_TIMEZONE` setting.
 
 
-    Contributed by Leo Dirac.
+    It's important to note that crontab schedules uses UTC time by default
+    unless this setting is set.
 
 
-* All result backends now implements ``__reduce__`` so that they can
-  be pickled (Issue #441).
+    Issue #904 and django-celery #150.
 
 
-    Fix contributed by Remy Noel
+- ``billiard.enable_forking`` is now only set by the processes pool.
 
 
-* celeryd-multi did not work on Windows (Issue #472).
+- The transport is now properly shown by :program:`celery report`
+  (Issue #913).
 
 
-* New-style ``CELERY_REDIS_*`` settings now takes precedence over
-  the old ``REDIS_*`` configuration keys (Issue #508).
+- The `--app` argument now works if the last part is a module name
+  (Issue #921).
 
 
-    Fix contributed by Joshua Ginsberg
+- Fixed problem with unpickleable exceptions (billiard #12).
 
 
-* Generic celerybeat init script no longer sets `bash -e` (Issue #510).
+- Adds ``task_name`` attribute to ``EagerResult`` which is always
+  :const:`None` (Issue #907).
 
 
-    Fix contributed by Roger Hu.
+- Old Task class in :mod:`celery.task` no longer accepts magic kwargs by
+  default (Issue #918).
 
 
-* Documented that Chords do not work well with redis-server versions
-  before 2.2.
+    A regression long ago disabled magic kwargs for these, and since
+    no one has complained about it we don't have any incentive to fix it now.
 
 
-    Contributed by Dan McGee.
+- The ``inspect reserved`` control command did not work properly.
 
 
-* The :setting:`CELERYBEAT_MAX_LOOP_INTERVAL` setting was not respected.
+- Should now play better with static analyzation tools by explicitly
+  specifying dynamically created attributes in the :mod:`celery` and
+  :mod:`celery.task` modules.
 
 
-* ``inspect.registered_tasks`` renamed to ``inspect.registered`` for naming
-  consistency.
+- Terminating a task now results in
+  :exc:`~celery.exceptions.RevokedTaskError` instead of a ``WorkerLostError``.
 
 
-    The previous name is still available as an alias.
+- ``AsyncResult.revoke`` now accepts ``terminate`` and ``signal`` arguments.
 
 
-    Contributed by Mher Movsisyan
+- The :event:`task-revoked` event now includes new fields: ``terminated``,
+  ``signum``, and ``expired``.
 
 
-* Worker logged the string representation of args and kwargs
-  without safe guards (Issue #480).
+- The argument to :class:`~celery.exceptions.TaskRevokedError` is now one
+  of the reasons ``revoked``, ``expired`` or ``terminated``.
 
 
-* RHEL init script: Changed celeryd startup priority.
+- Old Task class does no longer use classmethods for push_request and
+  pop_request  (Issue #912).
 
 
-    The default start / stop priorities for MySQL on RHEL are
+- ``GroupResult`` now supports the ``children`` attribute (Issue #916).
 
 
-        # chkconfig: - 64 36
+- ``AsyncResult.collect`` now respects the ``intermediate`` argument
+  (Issue #917).
 
 
-    Therefore, if Celery is using a database as a broker / message store, it
-    should be started after the database is up and running, otherwise errors
-    will ensue. This commit changes the priority in the init script to
+- Fixes example task in documentation (Issue #902).
 
 
-        # chkconfig: - 85 15
+- Eventlet fixed so that the environment is patched as soon as possible.
 
 
-    which are the default recommended settings for 3-rd party applications
-    and assure that Celery will be started after the database service & shut
-    down before it terminates.
+- eventlet: Now warns if celery related modules that depends on threads
+  are imported before eventlet is patched.
 
 
-    Contributed by Yury V. Zaytsev.
+- Improved event and camera examples in the monitoring guide.
 
 
-* KeyValueStoreBackend.get_many did not respect the ``timeout`` argument
-  (Issue #512).
+- Disables celery command setuptools entrypoints if the command can't be
+  loaded.
 
 
-* celerybeat/celeryev's --workdir option did not chdir before after
-  configuration was attempted (Issue #506).
+- Fixed broken ``dump_request`` example in the tasks guide.
 
 
-* After deprecating 2.4 support we can now name modules correctly, since we
-  can take use of absolute imports.
+.. _version-3.0.5:
 
 
-    Therefore the following internal modules have been renamed:
-
-        celery.concurrency.evlet    -> celery.concurrency.eventlet
-        celery.concurrency.evg      -> celery.concurrency.gevent
-
-* AUTHORS file is now sorted alphabetically.
-
-    Also, as you may have noticed the contributors of new features/fixes are
-    now mentioned in the Changelog.
-
-.. _version-2.3.4:
-
-2.3.4
-=====
-:release-date: 2011-11-25 16:00 P.M GMT
-:by: Ask Solem
-
-.. _v234-security-fixes:
-
-Security Fixes
---------------
-
-* [Security: `CELERYSA-0001`_] Daemons would set effective id's rather than
-  real id's when the :option:`--uid`/:option:`--gid` arguments to
-  :program:`celeryd-multi`, :program:`celeryd_detach`,
-  :program:`celerybeat` and :program:`celeryev` were used.
-
-  This means privileges weren't properly dropped, and that it would
-  be possible to regain supervisor privileges later.
-
-
-.. _`CELERYSA-0001`:
-    http://github.com/celery/celery/tree/master/docs/sec/CELERYSA-0001.txt
-
-Fixes
------
-
-* Backported fix for #455 from 2.4 to 2.3.
-
-* Statedb was not saved at shutdown.
-
-* Fixes worker sometimes hanging when hard time limit exceeded.
-
-
-.. _version-2.3.3:
-
-2.3.3
-=====
-:release-date: 2011-16-09 05:00 P.M BST
-:by: Mher Movsisyan
-
-* Monkey patching :attr:`sys.stdout` could result in the worker
-  crashing if the replacing object did not define :meth:`isatty`
-  (Issue #477).
-
-* ``CELERYD`` option in :file:`/etc/default/celeryd` should not
-  be used with generic init scripts.
-
-
-.. _version-2.3.2:
-
-2.3.2
+3.0.5
 =====
 =====
-:release-date: 2011-10-07 05:00 P.M BST
-
-.. _v232-news:
-
-News
-----
-
-* Improved Contributing guide.
-
-    If you'd like to contribute to Celery you should read this
-    guide: http://celery.github.com/celery/contributing.html
-
-    We are looking for contributors at all skill levels, so don't
-    hesitate!
-
-* Now depends on Kombu 1.3.1
+:release-date: 2012-08-01 04:00 P.M BST
 
 
-* ``Task.request`` now contains the current worker host name (Issue #460).
+- Now depends on kombu 2.3.1 + billiard 2.7.3.11
 
 
-    Available as ``task.request.hostname``.
+- Fixed a bug with the -B option (``cannot pickle thread.lock objects``)
+  (Issue #894 + Issue #892, + django-celery #154).
 
 
-* It is now easier for app subclasses to extend how they are pickled.
-    (see :class:`celery.app.AppPickler`).
+- The :control:`restart_pool` control command now requires the
+  :setting:`CELERYD_POOL_RESTARTS` setting to be enabled
 
 
-.. _v232-fixes:
+    This change was necessary as the multiprocessing event that the restart
+    command depends on is responsible for creating many semaphores/file
+    descriptors, resulting in problems in some environments.
 
 
-Fixes
------
+- ``chain.apply`` now passes args to the first task (Issue #889).
 
 
-* `purge/discard_all` was not working correctly (Issue #455).
+- Documented previously secret options to the Django-Celery monitor
+  in the monitoring userguide (Issue #396).
 
 
-* The coloring of log messages didn't handle non-ASCII data well
-  (Issue #427).
+- Old changelog are now organized in separate documents for each series,
+  see :ref:`history`.
 
 
-* [Windows] the multiprocessing pool tried to import ``os.kill``
-  even though this is not available there (Issue #450).
+.. _version-3.0.4:
 
 
-* Fixes case where the worker could become unresponsive because of tasks
-  exceeding the hard time limit.
-
-* The ``task-sent`` event was missing from the event reference.
-
-* ``ResultSet.iterate`` now returns results as they finish (Issue #459).
-
-    This was not the case previously, even though the documentation
-    states this was the expected behavior.
-
-* Retries will no longer be performed when tasks are called directly
-  (using ``__call__``).
-
-   Instead the exception passed to ``retry`` will be re-raised.
-
-* Eventlet no longer crashes if autoscale is enabled.
-
-    growing and shrinking eventlet pools is still not supported.
-
-* py24 target removed from :file:`tox.ini`.
-
-
-.. _version-2.3.1:
-
-2.3.1
-=====
-:release-date: 2011-08-07 08:00 P.M BST
-
-Fixes
------
-
-* The :setting:`CELERY_AMQP_TASK_RESULT_EXPIRES` setting did not work,
-  resulting in an AMQP related error about not being able to serialize
-  floats while trying to publish task states (Issue #446).
-
-.. _version-2.3.0:
-
-2.3.0
+3.0.4
 =====
 =====
-:release-date: 2011-08-05 12:00 P.M BST
-:tested: cPython: 2.5, 2.6, 2.7; PyPy: 1.5; Jython: 2.5.2
-
-.. _v230-important:
-
-Important Notes
----------------
-
-* Now requires Kombu 1.2.1
-
-* Results are now disabled by default.
-
-    The AMQP backend was not a good default because often the users were
-    not consuming the results, resulting in thousands of queues.
-
-    While the queues can be configured to expire if left unused, it was not
-    possible to enable this by default because this was only available in
-    recent RabbitMQ versions (2.1.1+)
-
-    With this change enabling a result backend will be a conscious choice,
-    which will hopefully lead the user to read the documentation and be aware
-    of any common pitfalls with the particular backend.
-
-    The default backend is now a dummy backend
-    (:class:`celery.backends.base.DisabledBackend`).  Saving state is simply an
-    noop operation, and AsyncResult.wait(), .result, .state, etc. will raise
-    a :exc:`NotImplementedError` telling the user to configure the result backend.
-
-    For help choosing a backend please see :ref:`task-result-backends`.
-
-    If you depend on the previous default which was the AMQP backend, then
-    you have to set this explicitly before upgrading::
-
-        CELERY_RESULT_BACKEND = "amqp"
-
-    .. note::
-
-        For django-celery users the default backend is still ``database``,
-        and results are not disabled by default.
-
-* The Debian init scripts have been deprecated in favor of the generic-init.d
-  init scripts.
-
-    In addition generic init scripts for celerybeat and celeryev has been
-    added.
-
-.. _v230-news:
+:release-date: 2012-07-26 07:00 P.M BST
 
 
-News
-----
+- Now depends on Kombu 2.3
 
 
-* Automatic connection pool support.
+- New experimental standalone Celery monitor: Flower
 
 
-    The pool is used by everything that requires a broker connection.  For
-    example applying tasks, sending broadcast commands, retrieving results
-    with the AMQP result backend, and so on.
+    See :ref:`monitoring-flower` to read more about it!
 
 
-    The pool is disabled by default, but you can enable it by configuring the
-    :setting:`BROKER_POOL_LIMIT` setting::
-
-        BROKER_POOL_LIMIT = 10
-
-    A limit of 10 means a maximum of 10 simultaneous connections can co-exist.
-    Only a single connection will ever be used in a single-thread
-    environment, but in a concurrent environment (threads, greenlets, etc., but
-    not processes) when the limit has been exceeded, any try to acquire a
-    connection will block the thread and wait for a connection to be released.
-    This is something to take into consideration when choosing a limit.
-
-    A limit of :const:`None` or 0 means no limit, and connections will be
-    established and closed every time.
-
-* Introducing Chords (taskset callbacks).
-
-    A chord is a task that only executes after all of the tasks in a taskset
-    has finished executing.  It's a fancy term for "taskset callbacks"
-    adopted from
-    `Cω  <http://research.microsoft.com/en-us/um/cambridge/projects/comega/>`_).
-
-    It works with all result backends, but the best implementation is
-    currently provided by the Redis result backend.
-
-    Here's an example chord::
-
-        >>> chord(add.subtask((i, i))
-        ...         for i in xrange(100))(tsum.subtask()).get()
-        9900
-
-    Please read the :ref:`Chords section in the user guide <chords>`, if you
-    want to know more.
-
-* Time limits can now be set for individual tasks.
-
-    To set the soft and hard time limits for a task use the ``time_limit``
-    and ``soft_time_limit`` attributes:
+    Contributed by Mher Movsisyan.
 
 
-    .. code-block:: python
+- Now supports AMQP heartbeats if using the new ``pyamqp://`` transport.
 
 
-        import time
+    - The py-amqp transport requires the :mod:`amqp` library to be installed::
 
 
-        @task(time_limit=60, soft_time_limit=30)
-        def sleeptask(seconds):
-            time.sleep(seconds)
+        $ pip install amqp
 
 
-    If the attributes are not set, then the workers default time limits
-    will be used.
+    - Then you need to set the transport URL prefix to ``pyamqp://``.
 
 
-    New in this version you can also change the time limits for a task
-    at runtime using the :func:`time_limit` remote control command::
+    - The default heartbeat value is 10 seconds, but this can be changed using
+      the :setting:`BROKER_HEARTBEAT` setting::
 
 
-        >>> from celery.task import control
-        >>> control.time_limit("tasks.sleeptask",
-        ...                    soft=60, hard=120, reply=True)
-        [{'worker1.example.com': {'ok': 'time limits set successfully'}}]
+        BROKER_HEARTBEAT = 5.0
 
 
-    Only tasks that starts executing after the time limit change will be affected.
+    - If the broker heartbeat is set to 10 seconds, the heartbeats will be
+      monitored every 5 seconds (double the hertbeat rate).
 
 
-    .. note::
+    See the `Kombu 2.3 changelog`_ for more information.
 
 
-        Soft time limits will still not work on Windows or other platforms
-        that do not have the ``SIGUSR1`` signal.
+.. _`Kombu 2.3 changelog`:
+    http://kombu.readthedocs.org/en/latest/changelog.html#version-2-3-0
 
 
-* Redis backend configuration directive names changed to include the
-   ``CELERY_`` prefix.
+- Now supports RabbitMQ Consumer Cancel Notifications, using the ``pyamqp://``
+  transport.
 
 
+    This is essential when running RabbitMQ in a cluster.
 
 
-    =====================================  ===================================
-    **Old setting name**                   **Replace with**
-    =====================================  ===================================
-    `REDIS_HOST`                           `CELERY_REDIS_HOST`
-    `REDIS_PORT`                           `CELERY_REDIS_PORT`
-    `REDIS_DB`                             `CELERY_REDIS_DB`
-    `REDIS_PASSWORD`                       `CELERY_REDIS_PASSWORD`
-    =====================================  ===================================
+    See the `Kombu 2.3 changelog`_ for more information.
 
 
-    The old names are still supported but pending deprecation.
+- Delivery info is no longer passed directly through.
 
 
-* PyPy: The default pool implementation used is now multiprocessing
-  if running on PyPy 1.5.
+    It was discovered that the SQS transport adds objects that can't
+    be pickled to the delivery info mapping, so we had to go back
+    to using the whitelist again.
 
 
-* celeryd-multi: now supports "pass through" options.
+    Fixing this bug also means that the SQS transport is now working again.
 
 
-    Pass through options makes it easier to use celery without a
-    configuration file, or just add last-minute options on the command
-    line.
+- The semaphore was not properly released when a task was revoked (Issue #877).
 
 
-    Example use:
+    This could lead to tasks being swallowed and not released until a worker
+    restart.
 
 
-        $ celeryd-multi start 4  -c 2  -- broker.host=amqp.example.com \
-                                          broker.vhost=/               \
-                                          celery.disable_rate_limits=yes
+    Thanks to Hynek Schlawack for debugging the issue.
 
 
-* celerybeat: Now retries establishing the connection (Issue #419).
+- Retrying a task now also forwards any linked tasks.
 
 
-* celeryctl: New ``list bindings`` command.
+    This means that if a task is part of a chain (or linked in some other
+    way) and that even if the task is retried, then the next task in the chain
+    will be executed when the retry succeeds.
 
 
-    Lists the current or all available bindings, depending on the
-    broker transport used.
+- Chords: Now supports setting the interval and other keyword arguments
+  to the chord unlock task.
 
 
-* Heartbeat is now sent every 30 seconds (previously every 2 minutes).
+    - The interval can now be set as part of the chord subtasks kwargs::
 
 
-* ``ResultSet.join_native()`` and ``iter_native()`` is now supported by
-  the Redis and Cache result backends.
+        chord(header)(body, interval=10.0)
 
 
-    This is an optimized version of ``join()`` using the underlying
-    backends ability to fetch multiple results at once.
+    - In addition the chord unlock task now honors the Task.default_retry_delay
+      option, used when none is specified, which also means that the default
+      interval can also be changed using annotations:
 
 
-* Can now use SSL when sending error e-mails by enabling the
-  :setting:`EMAIL_USE_SSL` setting.
+        .. code-block:: python
 
 
-* ``events.default_dispatcher()``: Context manager to easily obtain
-  an event dispatcher instance using the connection pool.
+            CELERY_ANNOTATIONS = {
+                'celery.chord_unlock': {
+                    'default_retry_delay': 10.0,
+                }
+            }
 
 
-* Import errors in the configuration module will not be silenced anymore.
+- New :meth:`@Celery.add_defaults` method can add new default configuration
+  dicts to the applications configuration.
 
 
-* ResultSet.iterate:  Now supports the ``timeout``, ``propagate`` and
-  ``interval`` arguments.
+    For example::
 
 
-* ``with_default_connection`` ->  ``with default_connection``
+        config = {'FOO': 10}
 
 
-* TaskPool.apply_async:  Keyword arguments ``callbacks`` and ``errbacks``
-  has been renamed to ``callback`` and ``errback`` and take a single scalar
-  value instead of a list.
+        celery.add_defaults(config)
 
 
-* No longer propagates errors occurring during process cleanup (Issue #365)
+    is the same as ``celery.conf.update(config)`` except that data will not be
+    copied, and that it will not be pickled when the worker spawns child
+    processes.
 
 
-* Added ``TaskSetResult.delete()``, which will delete a previously
-  saved taskset result.
+    In addition the method accepts a callable::
 
 
-* Celerybeat now syncs every 3 minutes instead of only at
-  shutdown (Issue #382).
+        def initialize_config():
+            # insert heavy stuff that can't be done at import time here.
 
 
-* Monitors now properly handles unknown events, so user-defined events
-  are displayed.
+        celery.add_defaults(initialize_config)
 
 
-* Terminating a task on Windows now also terminates all of the tasks child
-  processes (Issue #384).
+    which means the same as the above except that it will not happen
+    until the celery configuration is actually used.
 
 
-* celeryd: ``-I|--include`` option now always searches the current directory
-  to import the specified modules.
+    As an example, Celery can lazily use the configuration of a Flask app::
 
 
-* Cassandra backend: Now expires results by using TTLs.
+        flask_app = Flask()
+        celery = Celery()
+        celery.add_defaults(lambda: flask_app.config)
 
 
-* Functional test suite in ``funtests`` is now actually working properly, and
-  passing tests.
+- Revoked tasks were not marked as revoked in the result backend (Issue #871).
 
 
-.. _v230-fixes:
+    Fix contributed by Hynek Schlawack.
 
 
-Fixes
------
+- Eventloop now properly handles the case when the epoll poller object
+  has been closed (Issue #882).
 
 
-* celeryev was trying to create the pidfile twice.
+- Fixed syntax error in ``funtests/test_leak.py``
 
 
-* celery.contrib.batches: Fixed problem where tasks failed
-  silently (Issue #393).
+    Fix contributed by Catalin Iacob.
 
 
-* Fixed an issue where logging objects would give "<Unrepresentable",
-  even though the objects were.
+- group/chunks: Now accepts empty task list (Issue #873).
 
 
-* ``CELERY_TASK_ERROR_WHITE_LIST`` is now properly initialized
-  in all loaders.
+- New method names:
 
 
-* celeryd_detach now passes through command-line configuration.
+    - ``Celery.default_connection()`` ➠  :meth:`~@Celery.connection_or_acquire`.
+    - ``Celery.default_producer()``   ➠  :meth:`~@Celery.producer_or_acquire`.
 
 
-* Remote control command ``add_consumer`` now does nothing if the
-  queue is already being consumed from.
+    The old names still work for backward compatibility.
 
 
-.. _version-2.2.8:
+.. _version-3.0.3:
 
 
-2.2.8
+3.0.3
 =====
 =====
-:release-date: 2011-11-25 16:00 P.M GMT
+:release-date: 2012-07-20 09:17 P.M BST
 :by: Ask Solem
 :by: Ask Solem
 
 
-.. _v228-security-fixes:
-
-Security Fixes
---------------
+- amqplib passes the channel object as part of the delivery_info
+  and it's not pickleable, so we now remove it.
 
 
-* [Security: `CELERYSA-0001`_] Daemons would set effective id's rather than
-  real id's when the :option:`--uid`/:option:`--gid` arguments to
-  :program:`celeryd-multi`, :program:`celeryd_detach`,
-  :program:`celerybeat` and :program:`celeryev` were used.
+.. _version-3.0.2:
 
 
-  This means privileges weren't properly dropped, and that it would
-  be possible to regain supervisor privileges later.
-
-
-.. _`CELERYSA-0001`:
-    http://github.com/celery/celery/tree/master/docs/sec/CELERYSA-0001.txt
-
-.. _version-2.2.7:
-
-2.2.7
+3.0.2
 =====
 =====
-:release-date: 2011-06-13 16:00 P.M BST
-
-* New signals: :signal:`after_setup_logger` and
-  :signal:`after_setup_task_logger`
-
-    These signals can be used to augment logging configuration
-    after Celery has set up logging.
-
-* Redis result backend now works with Redis 2.4.4.
-
-* celeryd_multi: The :option:`--gid` option now works correctly.
-
-* celeryd: Retry wrongfully used the repr of the traceback instead
-  of the string representation.
-
-* App.config_from_object: Now loads module, not attribute of module.
-
-* Fixed issue where logging of objects would give "<Unrepresentable: ...>"
+:release-date: 2012-07-20 04:00 P.M BST
+:by: Ask Solem
 
 
-.. _version-2.2.6:
+- A bug caused the following task options to not take defaults from the
+   configuration (Issue #867 + Issue #858)
 
 
-2.2.6
-=====
-:release-date: 2011-04-15 16:00 P.M CEST
+    The following settings were affected:
 
 
-.. _v226-important:
+    - :setting:`CELERY_IGNORE_RESULT`
+    - :setting:`CELERYD_SEND_TASK_ERROR_EMAILS`
+    - :setting:`CELERY_TRACK_STARTED`
+    - :setting:`CElERY_STORE_ERRORS_EVEN_IF_IGNORED`
 
 
-Important Notes
----------------
+    Fix contributed by John Watson.
 
 
-* Now depends on Kombu 1.1.2.
+- Task Request: ``delivery_info`` is now passed through as-is (Issue #807).
 
 
-* Dependency lists now explicitly specifies that we don't want python-dateutil
-  2.x, as this version only supports py3k.
+- The eta argument now supports datetime's with a timezone set (Issue #855).
 
 
-    If you have installed dateutil 2.0 by accident you should downgrade
-    to the 1.5.0 version::
+- The worker's banner displayed the autoscale settings in the wrong order
+  (Issue #859).
 
 
-        pip install -U python-dateutil==1.5.0
+- Extension commands are now loaded after concurrency is set up
+  so that they don't interfere with e.g. eventlet patching.
 
 
-    or by easy_install::
+- Fixed bug in the threaded pool (Issue #863)
 
 
-        easy_install -U python-dateutil==1.5.0
+- The task failure handler mixed up the fields in :func:`sys.exc_info`.
 
 
-.. _v226-fixes:
+    Fix contributed by Rinat Shigapov.
 
 
-Fixes
------
+- Fixed typos and wording in the docs.
 
 
-* The new ``WatchedFileHandler`` broke Python 2.5 support (Issue #367).
+    Fix contributed by Paul McMillan
 
 
-* Task: Don't use ``app.main`` if the task name is set explicitly.
+- New setting: :setting:`CELERY_WORKER_DIRECT`
 
 
-* Sending emails did not work on Python 2.5, due to a bug in
-  the version detection code (Issue #378).
+    If enabled each worker will consume from their own dedicated queue
+    which can be used to route tasks to specific workers.
 
 
-* Beat: Adds method ``ScheduleEntry._default_now``
+- Fixed several edge case bugs in the add consumer remote control command.
 
 
-    This method can be overridden to change the default value
-    of ``last_run_at``.
+- :mod:`~celery.contrib.migrate`: Can now filter and move tasks to specific
+  workers if :setting:`CELERY_WORKER_DIRECT` is enabled.
 
 
-* An error occurring in process cleanup could mask task errors.
+    Among other improvements, the following functions have been added:
 
 
-  We no longer propagate errors happening at process cleanup,
-  but log them instead.  This way they will not interfere with publishing
-  the task result (Issue #365).
+        * ``move_direct(filterfun, **opts)``
+        * ``move_direct_by_id(task_id, worker_hostname, **opts)``
+        * ``move_direct_by_idmap({task_id: worker_hostname, ...}, **opts)``
+        * ``move_direct_by_taskmap({task_name: worker_hostname, ...}, **opts)``
 
 
-* Defining tasks did not work properly when using the Django
-  ``shell_plus`` utility (Issue #366).
+- :meth:`~celery.Celery.default_connection` now accepts a pool argument that
+  if set to false causes a new connection to be created instead of acquiring
+  one from the pool.
 
 
-* ``AsyncResult.get`` did not accept the ``interval`` and ``propagate``
-   arguments.
+- New signal: :signal:`celeryd_after_setup`.
 
 
-* celeryd: Fixed a bug where celeryd would not shutdown if a
-   :exc:`socket.error` was raised.
+- Default loader now keeps lowercase attributes from the configuration module.
 
 
-.. _version-2.2.5:
+.. _version-3.0.1:
 
 
-2.2.5
+3.0.1
 =====
 =====
-:release-date: 2011-03-28 06:00 P.M CEST
-
-.. _v225-important:
-
-Important Notes
----------------
-
-* Now depends on Kombu 1.0.7
-
-.. _v225-news:
-
-News
-----
-
-* Our documentation is now hosted by Read The Docs
-  (http://docs.celeryproject.org), and all links have been changed to point to
-  the new URL.
-
-* Logging: Now supports log rotation using external tools like `logrotate.d`_
-  (Issue #321)
-
-    This is accomplished by using the ``WatchedFileHandler``, which re-opens
-    the file if it is renamed or deleted.
-
-.. _`logrotate.d`:
-    http://www.ducea.com/2006/06/06/rotating-linux-log-files-part-2-logrotate/
-
-* :ref:`tut-otherqueues` now documents how to configure Redis/Database result
-   backends.
-
-* gevent: Now supports ETA tasks.
-
-    But gevent still needs ``CELERY_DISABLE_RATE_LIMITS=True`` to work.
-
-* TaskSet User Guide: now contains TaskSet callback recipes.
-
-* Eventlet: New signals:
-
-    * ``eventlet_pool_started``
-    * ``eventlet_pool_preshutdown``
-    * ``eventlet_pool_postshutdown``
-    * ``eventlet_pool_apply``
-
-    See :mod:`celery.signals` for more information.
-
-* New :setting:`BROKER_TRANSPORT_OPTIONS` setting can be used to pass
-  additional arguments to a particular broker transport.
-
-* celeryd: ``worker_pid`` is now part of the request info as returned by
-  broadcast commands.
-
-* TaskSet.apply/Taskset.apply_async now accepts an optional ``taskset_id``
-  argument.
-
-* The taskset_id (if any) is now available in the Task request context.
-
-* SQLAlchemy result backend: taskset_id and taskset_id columns now have a
-  unique constraint.  (Tables need to recreated for this to take affect).
-
-* Task Userguide: Added section about choosing a result backend.
-
-* Removed unused attribute ``AsyncResult.uuid``.
-
-.. _v225-fixes:
-
-Fixes
------
-
-* multiprocessing.Pool:  Fixes race condition when marking job with
-  ``WorkerLostError`` (Issue #268).
-
-    The process may have published a result before it was terminated,
-    but we have no reliable way to detect that this is the case.
-
-    So we have to wait for 10 seconds before marking the result with
-    WorkerLostError.  This gives the result handler a chance to retrieve the
-    result.
-
-* multiprocessing.Pool: Shutdown could hang if rate limits disabled.
-
-    There was a race condition when the MainThread was waiting for the pool
-    semaphore to be released.  The ResultHandler now terminates after 5
-    seconds if there are unacked jobs, but no worker processes left to start
-    them  (it needs to timeout because there could still be an ack+result
-    that we haven't consumed from the result queue. It
-    is unlikely we will receive any after 5 seconds with no worker processes).
-
-* celerybeat: Now creates pidfile even if the ``--detach`` option is not set.
-
-* eventlet/gevent: The broadcast command consumer is now running in a separate
-  greenthread.
-
-    This ensures broadcast commands will take priority even if there are many
-    active tasks.
+:release-date: 2012-07-10 06:00 P.M BST
+:by: Ask Solem
 
 
-* Internal module ``celery.worker.controllers`` renamed to
-  ``celery.worker.mediator``.
+- Now depends on kombu 2.2.5
 
 
-* celeryd: Threads now terminates the program by calling ``os._exit``, as it
-  is the only way to ensure exit in the case of syntax errors, or other
-  unrecoverable errors.
+- inspect now supports limit argument::
 
 
-* Fixed typo in ``maybe_timedelta`` (Issue #352).
+    myapp.control.inspect(limit=1).ping()
 
 
-* celeryd: Broadcast commands now logs with loglevel debug instead of warning.
+- Beat: now works with timezone aware datetime's.
 
 
-* AMQP Result Backend: Now resets cached channel if the connection is lost.
+- Task classes inheriting ``from celery import Task``
+  mistakingly enabled ``accept_magic_kwargs``.
 
 
-* Polling results with the AMQP result backend was not working properly.
+- Fixed bug in ``inspect scheduled`` (Issue #829).
 
 
-* Rate limits: No longer sleeps if there are no tasks, but rather waits for
-  the task received condition (Performance improvement).
+- Beat: Now resets the schedule to upgrade to UTC.
 
 
-* ConfigurationView: ``iter(dict)`` should return keys, not items (Issue #362).
+- The :program:`celery worker` command now works with eventlet/gevent.
 
 
-* celerybeat:  PersistentScheduler now automatically removes a corrupted
-  schedule file (Issue #346).
+    Previously it would not patch the environment early enough.
 
 
-* Programs that doesn't support positional command line arguments now provides
-  a user friendly error message.
+- The :program:`celery` command now supports extension commands
+  using setuptools entry-points.
 
 
-* Programs no longer tries to load the configuration file when showing
-  ``--version`` (Issue #347).
+    Libraries can add additional commands to the :program:`celery`
+    command by adding an entry-point like::
 
 
-* Autoscaler: The "all processes busy" log message is now severity debug
-  instead of error.
+        setup(
+            entry_points=[
+                'celery.commands': [
+                    'foo = my.module:Command',
+            ],
+        ],
+        ...)
 
 
-* celeryd: If the message body can't be decoded, it is now passed through
-  ``safe_str`` when logging.
+    The command must then support the interface of
+    :class:`celery.bin.base.Command`.
 
 
-    This to ensure we don't get additional decoding errors when trying to log
-    the failure.
+- contrib.migrate: New utilities to move tasks from one queue to another.
 
 
-* ``app.config_from_object``/``app.config_from_envvar`` now works for all
-  loaders.
+    - :func:`~celery.contrib.migrate.move_tasks`
+    - :func:`~celery.contrib.migrate.move_task_by_id`
 
 
-* Now emits a user-friendly error message if the result backend name is
-  unknown (Issue #349).
+- The :event:`task-sent` event now contains ``exchange`` and ``routing_key``
+  fields.
 
 
-* :mod:`celery.contrib.batches`: Now sets loglevel and logfile in the task
-  request so ``task.get_logger`` works with batch tasks (Issue #357).
+- Fixes bug with installing on Python 3.
 
 
-* celeryd: An exception was raised if using the amqp transport and the prefetch
-  count value exceeded 65535 (Issue #359).
+    Fix contributed by Jed Smith.
 
 
-    The prefetch count is incremented for every received task with an
-    ETA/countdown defined.  The prefetch count is a short, so can only support
-    a maximum value of 65535.  If the value exceeds the maximum value we now
-    disable the prefetch count, it is re-enabled as soon as the value is below
-    the limit again.
+.. _version-3.0.0:
 
 
-* cursesmon: Fixed unbound local error (Issue #303).
-
-* eventlet/gevent is now imported on demand so autodoc can import the modules
-  without having eventlet/gevent installed.
-
-* celeryd: Ack callback now properly handles ``AttributeError``.
-
-* ``Task.after_return`` is now always called *after* the result has been
-  written.
-
-* Cassandra Result Backend: Should now work with the latest ``pycassa``
-  version.
-
-* multiprocessing.Pool: No longer cares if the putlock semaphore is released
-  too many times. (this can happen if one or more worker processes are
-  killed).
-
-* SQLAlchemy Result Backend: Now returns accidentally removed ``date_done`` again
-  (Issue #325).
-
-* Task.request contex is now always initialized to ensure calling the task
-  function directly works even if it actively uses the request context.
-
-* Exception occuring when iterating over the result from ``TaskSet.apply``
-  fixed.
-
-* eventlet: Now properly schedules tasks with an ETA in the past.
-
-.. _version-2.2.4:
-
-2.2.4
-=====
-:release-date: 2011-02-19 12:00 AM CET
-
-.. _v224-fixes:
-
-Fixes
------
-
-* celeryd: 2.2.3 broke error logging, resulting in tracebacks not being logged.
-
-* AMQP result backend: Polling task states did not work properly if there were
-  more than one result message in the queue.
-
-* ``TaskSet.apply_async()`` and ``TaskSet.apply()`` now supports an optional
-  ``taskset_id`` keyword argument (Issue #331).
-
-* The current taskset id (if any) is now available in the task context as
-  ``request.taskset`` (Issue #329).
-
-* SQLAlchemy result backend: `date_done` was no longer part of the results as it had
-  been accidentally removed.  It is now available again (Issue #325).
-
-* SQLAlchemy result backend: Added unique constraint on `Task.id` and
-  `TaskSet.taskset_id`.  Tables needs to be recreated for this to take effect.
-
-* Fixed exception raised when iterating on the result of ``TaskSet.apply()``.
-
-* Tasks Userguide: Added section on choosing a result backend.
-
-.. _version-2.2.3:
-
-2.2.3
-=====
-:release-date: 2011-02-12 04:00 P.M CET
-
-.. _v223-fixes:
-
-Fixes
------
-
-* Now depends on Kombu 1.0.3
-
-* Task.retry now supports a ``max_retries`` argument, used to change the
-  default value.
-
-* `multiprocessing.cpu_count` may raise :exc:`NotImplementedError` on
-  platforms where this is not supported (Issue #320).
-
-* Coloring of log messages broke if the logged object was not a string.
-
-* Fixed several typos in the init script documentation.
-
-* A regression caused `Task.exchange` and `Task.routing_key` to no longer
-  have any effect.  This is now fixed.
-
-* Routing Userguide: Fixes typo, routers in :setting:`CELERY_ROUTES` must be
-  instances, not classes.
-
-* :program:`celeryev` did not create pidfile even though the
-  :option:`--pidfile` argument was set.
-
-* Task logger format was no longer used. (Issue #317).
-
-   The id and name of the task is now part of the log message again.
-
-* A safe version of ``repr()`` is now used in strategic places to ensure
-  objects with a broken ``__repr__`` does not crash the worker, or otherwise
-  make errors hard to understand (Issue #298).
-
-* Remote control command ``active_queues``: did not account for queues added
-  at runtime.
-
-    In addition the dictionary replied by this command now has a different
-    structure: the exchange key is now a dictionary containing the
-    exchange declaration in full.
-
-* The :option:`-Q` option to :program:`celeryd` removed unused queue
-  declarations, so routing of tasks could fail.
-
-    Queues are no longer removed, but rather `app.amqp.queues.consume_from()`
-    is used as the list of queues to consume from.
-
-    This ensures all queues are available for routing purposes.
-
-* celeryctl: Now supports the `inspect active_queues` command.
-
-.. _version-2.2.2:
-
-2.2.2
-=====
-:release-date: 2011-02-03 04:00 P.M CET
-
-.. _v222-fixes:
-
-Fixes
------
-
-* Celerybeat could not read the schedule properly, so entries in
-  :setting:`CELERYBEAT_SCHEDULE` would not be scheduled.
-
-* Task error log message now includes `exc_info` again.
-
-* The `eta` argument can now be used with `task.retry`.
-
-    Previously it was overwritten by the countdown argument.
-
-* celeryd-multi/celeryd_detach: Now logs errors occuring when executing
-  the `celeryd` command.
-
-* daemonizing tutorial: Fixed typo ``--time-limit 300`` ->
-  ``--time-limit=300``
-
-* Colors in logging broke non-string objects in log messages.
-
-* ``setup_task_logger`` no longer makes assumptions about magic task kwargs.
-
-.. _version-2.2.1:
-
-2.2.1
-=====
-:release-date: 2011-02-02 04:00 P.M CET
-
-.. _v221-fixes:
-
-Fixes
------
-
-* Eventlet pool was leaking memory (Issue #308).
-
-* Deprecated function ``celery.execute.delay_task`` was accidentally removed,
-  now available again.
-
-* ``BasePool.on_terminate`` stub did not exist
-
-* celeryd detach: Adds readable error messages if user/group name does not
-   exist.
-
-* Smarter handling of unicode decod errors when logging errors.
-
-.. _version-2.2.0:
-
-2.2.0
-=====
-:release-date: 2011-02-01 10:00 AM CET
-
-.. _v220-important:
-
-Important Notes
----------------
-
-* Carrot has been replaced with `Kombu`_
-
-    Kombu is the next generation messaging framework for Python,
-    fixing several flaws present in Carrot that was hard to fix
-    without breaking backwards compatibility.
-
-    Also it adds:
-
-    * First-class support for virtual transports; Redis, Django ORM,
-      SQLAlchemy, Beanstalk, MongoDB, CouchDB and in-memory.
-    * Consistent error handling with introspection,
-    * The ability to ensure that an operation is performed by gracefully
-      handling connection and channel errors,
-    * Message compression (zlib, bzip2, or custom compression schemes).
-
-    This means that `ghettoq` is no longer needed as the
-    functionality it provided is already available in Celery by default.
-    The virtual transports are also more feature complete with support
-    for exchanges (direct and topic).  The Redis transport even supports
-    fanout exchanges so it is able to perform worker remote control
-    commands.
-
-.. _`Kombu`: http://pypi.python.org/pypi/kombu
-
-* Magic keyword arguments pending deprecation.
-
-    The magic keyword arguments were responsibile for many problems
-    and quirks: notably issues with tasks and decorators, and name
-    collisions in keyword arguments for the unaware.
-
-    It wasn't easy to find a way to deprecate the magic keyword arguments,
-    but we think this is a solution that makes sense and it will not
-    have any adverse effects for existing code.
-
-    The path to a magic keyword argument free world is:
-
-        * the `celery.decorators` module is deprecated and the decorators
-          can now be found in `celery.task`.
-        * The decorators in `celery.task` disables keyword arguments by
-          default
-        * All examples in the documentation have been changed to use
-          `celery.task`.
-
-        This means that the following will have magic keyword arguments
-        enabled (old style):
-
-            .. code-block:: python
-
-                from celery.decorators import task
-
-                @task()
-                def add(x, y, **kwargs):
-                    print("In task %s" % kwargs["task_id"])
-                    return x + y
-
-        And this will not use magic keyword arguments (new style):
-
-            .. code-block:: python
-
-                from celery.task import task
-
-                @task()
-                def add(x, y):
-                    print("In task %s" % add.request.id)
-                    return x + y
-
-    In addition, tasks can choose not to accept magic keyword arguments by
-    setting the `task.accept_magic_kwargs` attribute.
-
-    .. admonition:: Deprecation
-
-        Using the decorators in :mod:`celery.decorators` emits a
-        :class:`PendingDeprecationWarning` with a helpful message urging
-        you to change your code, in version 2.4 this will be replaced with
-        a :class:`DeprecationWarning`, and in version 3.0 the
-        :mod:`celery.decorators` module will be removed and no longer exist.
-
-        Similarly, the `task.accept_magic_kwargs` attribute will no
-        longer have any effect starting from version 3.0.
-
-* The magic keyword arguments are now available as `task.request`
-
-    This is called *the context*.  Using thread-local storage the
-    context contains state that is related to the current request.
-
-    It is mutable and you can add custom attributes that will only be seen
-    by the current task request.
-
-    The following context attributes are always available:
-
-    =====================================  ===================================
-    **Magic Keyword Argument**             **Replace with**
-    =====================================  ===================================
-    `kwargs["task_id"]`                    `self.request.id`
-    `kwargs["delivery_info"]`              `self.request.delivery_info`
-    `kwargs["task_retries"]`               `self.request.retries`
-    `kwargs["logfile"]`                    `self.request.logfile`
-    `kwargs["loglevel"]`                   `self.request.loglevel`
-    `kwargs["task_is_eager`                `self.request.is_eager`
-    **NEW**                                `self.request.args`
-    **NEW**                                `self.request.kwargs`
-    =====================================  ===================================
-
-    In addition, the following methods now automatically uses the current
-    context, so you don't have to pass `kwargs` manually anymore:
-
-        * `task.retry`
-        * `task.get_logger`
-        * `task.update_state`
-
-* `Eventlet`_ support.
-
-    This is great news for I/O-bound tasks!
-
-    To change pool implementations you use the :option:`-P|--pool` argument
-    to :program:`celeryd`, or globally using the
-    :setting:`CELERYD_POOL` setting.  This can be the full name of a class,
-    or one of the following aliases: `processes`, `eventlet`, `gevent`.
-
-    For more information please see the :ref:`concurrency-eventlet` section
-    in the User Guide.
-
-    .. admonition:: Why not gevent?
-
-        For our first alternative concurrency implementation we have focused
-        on `Eventlet`_, but there is also an experimental `gevent`_ pool
-        available. This is missing some features, notably the ability to
-        schedule ETA tasks.
-
-        Hopefully the `gevent`_ support will be feature complete by
-        version 2.3, but this depends on user demand (and contributions).
-
-.. _`Eventlet`: http://eventlet.net
-.. _`gevent`: http://gevent.org
-
-* Python 2.4 support deprecated!
-
-    We're happy^H^H^H^H^Hsad to announce that this is the last version
-    to support Python 2.4.
-
-    You are urged to make some noise if you're currently stuck with
-    Python 2.4.  Complain to your package maintainers, sysadmins and bosses:
-    tell them it's time to move on!
-
-    Apart from wanting to take advantage of with-statements, coroutines,
-    conditional expressions and enhanced try blocks, the code base
-    now contains so many 2.4 related hacks and workarounds it's no longer
-    just a compromise, but a sacrifice.
-
-    If it really isn't your choice, and you don't have the option to upgrade
-    to a newer version of Python, you can just continue to use Celery 2.2.
-    Important fixes can be backported for as long as there is interest.
-
-* `celeryd`: Now supports Autoscaling of child worker processes.
-
-    The :option:`--autoscale` option can be used to configure the minimum
-    and maximum number of child worker processes::
-
-        --autoscale=AUTOSCALE
-             Enable autoscaling by providing
-             max_concurrency,min_concurrency.  Example:
-               --autoscale=10,3 (always keep 3 processes, but grow to
-              10 if necessary).
-
-* Remote Debugging of Tasks
-
-   ``celery.contrib.rdb`` is an extended version of :mod:`pdb` that
-   enables remote debugging of processes that does not have terminal
-   access.
-
-   Example usage:
-
-   .. code-block:: python
-
-        from celery.contrib import rdb
-        from celery.task import task
-
-        @task()
-        def add(x, y):
-            result = x + y
-            rdb.set_trace()  # <- set breakpoint
-            return result
-
-
-    :func:`~celery.contrib.rdb.set_trace` sets a breakpoint at the current
-    location and creates a socket you can telnet into to remotely debug
-    your task.
-
-    The debugger may be started by multiple processes at the same time,
-    so rather than using a fixed port the debugger will search for an
-    available port, starting from the base port (6900 by default).
-    The base port can be changed using the environment variable
-    :envvar:`CELERY_RDB_PORT`.
-
-    By default the debugger will only be available from the local host,
-    to enable access from the outside you have to set the environment
-    variable :envvar:`CELERY_RDB_HOST`.
-
-    When `celeryd` encounters your breakpoint it will log the following
-    information::
-
-        [INFO/MainProcess] Got task from broker:
-            tasks.add[d7261c71-4962-47e5-b342-2448bedd20e8]
-        [WARNING/PoolWorker-1] Remote Debugger:6900:
-            Please telnet 127.0.0.1 6900.  Type `exit` in session to continue.
-        [2011-01-18 14:25:44,119: WARNING/PoolWorker-1] Remote Debugger:6900:
-            Waiting for client...
-
-    If you telnet the port specified you will be presented
-    with a ``pdb`` shell::
-
-        $ telnet localhost 6900
-        Connected to localhost.
-        Escape character is '^]'.
-        > /opt/devel/demoapp/tasks.py(128)add()
-        -> return result
-        (Pdb)
-
-    Enter ``help`` to get a list of available commands,
-    It may be a good idea to read the `Python Debugger Manual`_ if
-    you have never used `pdb` before.
-
-.. _`Python Debugger Manual`: http://docs.python.org/library/pdb.html
-
-
-* Events are now transient and is using a topic exchange (instead of direct).
-
-    The `CELERYD_EVENT_EXCHANGE`, `CELERYD_EVENT_ROUTING_KEY`,
-    `CELERYD_EVENT_EXCHANGE_TYPE` settings are no longer in use.
-
-    This means events will not be stored until there is a consumer, and the
-    events will be gone as soon as the consumer stops.  Also it means there
-    can be multiple monitors running at the same time.
-
-    The routing key of an event is the type of event (e.g. `worker.started`,
-    `worker.heartbeat`, `task.succeeded`, etc.  This means a consumer can
-    filter on specific types, to only be alerted of the events it cares about.
-
-    Each consumer will create a unique queue, meaning it is in effect a
-    broadcast exchange.
-
-    This opens up a lot of possibilities, for example the workers could listen
-    for worker events to know what workers are in the neighborhood, and even
-    restart workers when they go down (or use this information to optimize
-    tasks/autoscaling).
-
-    .. note::
-
-        The event exchange has been renamed from "celeryevent" to "celeryev"
-        so it does not collide with older versions.
-
-        If you would like to remove the old exchange you can do so
-        by executing the following command::
-
-            $ camqadm exchange.delete celeryevent
-
-* `celeryd` now starts without configuration, and configuration can be
-  specified directly on the command line.
-
-  Configuration options must appear after the last argument, separated
-  by two dashes::
-
-      $ celeryd -l info -I tasks -- broker.host=localhost broker.vhost=/app
-
-* Configuration is now an alias to the original configuration, so changes
-  to the original will reflect Celery at runtime.
-
-* `celery.conf` has been deprecated, and modifying `celery.conf.ALWAYS_EAGER`
-  will no longer have any effect.
-
-    The default configuration is now available in the
-    :mod:`celery.app.defaults` module.  The available configuration options
-    and their types can now be introspected.
-
-* Remote control commands are now provided by `kombu.pidbox`, the generic
-  process mailbox.
-
-* Internal module `celery.worker.listener` has been renamed to
-  `celery.worker.consumer`, and `.CarrotListener` is now `.Consumer`.
-
-* Previously deprecated modules `celery.models` and
-  `celery.management.commands` have now been removed as per the deprecation
-  timeline.
-
-* [Security: Low severity] Removed `celery.task.RemoteExecuteTask` and
-    accompanying functions: `dmap`, `dmap_async`, and `execute_remote`.
-
-    Executing arbitrary code using pickle is a potential security issue if
-    someone gains unrestricted access to the message broker.
-
-    If you really need this functionality, then you would have to add
-    this to your own project.
-
-* [Security: Low severity] The `stats` command no longer transmits the
-  broker password.
-
-    One would have needed an authenticated broker connection to receive
-    this password in the first place, but sniffing the password at the
-    wire level would have been possible if using unencrypted communication.
-
-.. _v220-news:
-
-News
-----
-
-* The internal module `celery.task.builtins` has been removed.
-
-* The module `celery.task.schedules` is deprecated, and
-  `celery.schedules` should be used instead.
-
-    For example if you have::
-
-        from celery.task.schedules import crontab
-
-    You should replace that with::
-
-        from celery.schedules import crontab
-
-    The module needs to be renamed because it must be possible
-    to import schedules without importing the `celery.task` module.
-
-* The following functions have been deprecated and is scheduled for
-  removal in version 2.3:
-
-    * `celery.execute.apply_async`
-
-        Use `task.apply_async()` instead.
-
-    * `celery.execute.apply`
-
-        Use `task.apply()` instead.
-
-    * `celery.execute.delay_task`
-
-        Use `registry.tasks[name].delay()` instead.
-
-* Importing `TaskSet` from `celery.task.base` is now deprecated.
-
-    You should use::
-
-        >>> from celery.task import TaskSet
-
-    instead.
-
-* New remote control commands:
-
-    * `active_queues`
-
-        Returns the queue declarations a worker is currently consuming from.
-
-* Added the ability to retry publishing the task message in
-  the event of connection loss or failure.
-
-    This is disabled by default but can be enabled using the
-    :setting:`CELERY_TASK_PUBLISH_RETRY` setting, and tweaked by
-    the :setting:`CELERY_TASK_PUBLISH_RETRY_POLICY` setting.
-
-    In addition `retry`, and `retry_policy` keyword arguments have
-    been added to `Task.apply_async`.
-
-    .. note::
-
-        Using the `retry` argument to `apply_async` requires you to
-        handle the publisher/connection manually.
-
-* Periodic Task classes (`@periodic_task`/`PeriodicTask`) will *not* be
-  deprecated as previously indicated in the source code.
-
-    But you are encouraged to use the more flexible
-    :setting:`CELERYBEAT_SCHEDULE` setting.
-
-* Built-in daemonization support of celeryd using `celeryd-multi`
-  is no longer experimental and is considered production quality.
-
-     See :ref:`daemon-generic` if you want to use the new generic init
-     scripts.
-
-* Added support for message compression using the
-  :setting:`CELERY_MESSAGE_COMPRESSION` setting, or the `compression` argument
-  to `apply_async`.  This can also be set using routers.
-
-* `celeryd`: Now logs stacktrace of all threads when receiving the
-   `SIGUSR1` signal.  (Does not work on cPython 2.4, Windows or Jython).
-
-    Inspired by https://gist.github.com/737056
-
-* Can now remotely terminate/kill the worker process currently processing
-  a task.
-
-    The `revoke` remote control command now supports a `terminate` argument
-    Default signal is `TERM`, but can be specified using the `signal`
-    argument. Signal can be the uppercase name of any signal defined
-    in the :mod:`signal` module in the Python Standard Library.
-
-    Terminating a task also revokes it.
-
-    Example::
-
-        >>> from celery.task.control import revoke
-
-        >>> revoke(task_id, terminate=True)
-        >>> revoke(task_id, terminate=True, signal="KILL")
-        >>> revoke(task_id, terminate=True, signal="SIGKILL")
-
-* `TaskSetResult.join_native`: Backend-optimized version of `join()`.
-
-    If available, this version uses the backends ability to retrieve
-    multiple results at once, unlike `join()` which fetches the results
-    one by one.
-
-    So far only supported by the AMQP result backend.  Support for memcached
-    and Redis may be added later.
-
-* Improved implementations of `TaskSetResult.join` and `AsyncResult.wait`.
-
-   An `interval` keyword argument have been added to both so the
-   polling interval can be specified (default interval is 0.5 seconds).
-
-    A `propagate` keyword argument have been added to `result.wait()`,
-    errors will be returned instead of raised if this is set to False.
-
-    .. warning::
-
-        You should decrease the polling interval when using the database
-        result backend, as frequent polling can result in high database load.
-
-
-* The PID of the child worker process accepting a task is now sent as a field
-  with the `task-started` event.
-
-* The following fields have been added to all events in the worker class:
-
-    * `sw_ident`: Name of worker software (e.g. celeryd).
-    * `sw_ver`: Software version (e.g. 2.2.0).
-    * `sw_sys`: Operating System (e.g. Linux, Windows, Darwin).
-
-* For better accuracy the start time reported by the multiprocessing worker
-  process is used when calculating task duration.
-
-    Previously the time reported by the accept callback was used.
-
-* `celerybeat`: New built-in daemonization support using the `--detach`
-   option.
-
-* `celeryev`: New built-in daemonization support using the `--detach`
-   option.
-
-* `TaskSet.apply_async`: Now supports custom publishers by using the
-  `publisher` argument.
-
-* Added :setting:`CELERY_SEND_TASK_SENT_EVENT` setting.
-
-    If enabled an event will be sent with every task, so monitors can
-    track tasks before the workers receive them.
-
-* `celerybeat`: Now reuses the broker connection when applying
-   scheduled tasks.
-
-* The configuration module and loader to use can now be specified on
-  the command line.
-
-    For example::
-
-        $ celeryd --config=celeryconfig.py --loader=myloader.Loader
-
-* Added signals: `beat_init` and `beat_embedded_init`
-
-    * :signal:`celery.signals.beat_init`
-
-        Dispatched when :program:`celerybeat` starts (either standalone or
-        embedded).  Sender is the :class:`celery.beat.Service` instance.
-
-    * :signal:`celery.signals.beat_embedded_init`
-
-        Dispatched in addition to the :signal:`beat_init` signal when
-        :program:`celerybeat` is started as an embedded process.  Sender
-        is the :class:`celery.beat.Service` instance.
-
-* Redis result backend: Removed deprecated settings `REDIS_TIMEOUT` and
-  `REDIS_CONNECT_RETRY`.
-
-* CentOS init script for :program:`celeryd` now available in `extra/centos`.
-
-* Now depends on `pyparsing` version 1.5.0 or higher.
-
-    There have been reported issues using Celery with pyparsing 1.4.x,
-    so please upgrade to the latest version.
-
-* Lots of new unit tests written, now with a total coverage of 95%.
-
-.. _v220-fixes:
-
-Fixes
------
-
-* `celeryev` Curses Monitor: Improved resize handling and UI layout
-  (Issue #274 + Issue #276)
-
-* AMQP Backend: Exceptions occurring while sending task results are now
-  propagated instead of silenced.
-
-    `celeryd` will then show the full traceback of these errors in the log.
-
-* AMQP Backend: No longer deletes the result queue after successful
-  poll, as this should be handled by the
-  :setting:`CELERY_AMQP_TASK_RESULT_EXPIRES` setting instead.
-
-* AMQP Backend: Now ensures queues are declared before polling results.
-
-* Windows: celeryd: Show error if running with `-B` option.
-
-    Running celerybeat embedded is known not to work on Windows, so
-    users are encouraged to run celerybeat as a separate service instead.
-
-* Windows: Utilities no longer output ANSI color codes on Windows
-
-* camqadm: Now properly handles Ctrl+C by simply exiting instead of showing
-  confusing traceback.
-
-* Windows: All tests are now passing on Windows.
-
-* Remove bin/ directory, and `scripts` section from setup.py.
-
-    This means we now rely completely on setuptools entrypoints.
-
-.. _v220-experimental:
-
-Experimental
-------------
-
-* Jython: celeryd now runs on Jython using the threaded pool.
-
-    All tests pass, but there may still be bugs lurking around the corners.
-
-* PyPy: celeryd now runs on PyPy.
-
-    It runs without any pool, so to get parallel execution you must start
-    multiple instances (e.g. using :program:`celeryd-multi`).
-
-    Sadly an initial benchmark seems to show a 30% performance decrease on
-    pypy-1.4.1 + JIT.  We would like to find out why this is, so stay tuned.
-
-* :class:`PublisherPool`: Experimental pool of task publishers and
-  connections to be used with the `retry` argument to `apply_async`.
-
-  The example code below will re-use connections and channels, and
-  retry sending of the task message if the connection is lost.
-
-  .. code-block:: python
-
-    from celery import current_app
-
-    # Global pool
-    pool = current_app().amqp.PublisherPool(limit=10)
-
-    def my_view(request):
-        with pool.acquire() as publisher:
-            add.apply_async((2, 2), publisher=publisher, retry=True)
-
-
-.. _version-2.1.4:
-
-2.1.4
-=====
-:release-date: 2010-12-03 12:00 P.M CEST
-
-.. _v214-fixes:
-
-Fixes
------
-
-* Execution options to `apply_async` now takes precedence over options
-  returned by active routers.  This was a regression introduced recently
-  (Issue #244).
-
-* `celeryev` curses monitor: Long arguments are now truncated so curses
-  doesn't crash with out of bounds errors.  (Issue #235).
-
-* `celeryd`: Channel errors occurring while handling control commands no
-  longer crash the worker but are instead logged with severity error.
-
-* SQLAlchemy database backend: Fixed a race condition occurring when
-  the client wrote the pending state.  Just like the Django database backend,
-  it does no longer save the pending state (Issue #261 + Issue #262).
-
-* Error email body now uses `repr(exception)` instead of `str(exception)`,
-  as the latter could result in Unicode decode errors (Issue #245).
-
-* Error email timeout value is now configurable by using the
-  :setting:`EMAIL_TIMEOUT` setting.
-
-* `celeryev`: Now works on Windows (but the curses monitor won't work without
-  having curses).
-
-* Unit test output no longer emits non-standard characters.
-
-* `celeryd`: The broadcast consumer is now closed if the connection is reset.
-
-* `celeryd`: Now properly handles errors occurring while trying to acknowledge
-  the message.
-
-* `TaskRequest.on_failure` now encodes traceback using the current filesystem
-   encoding.  (Issue #286).
-
-* `EagerResult` can now be pickled (Issue #288).
-
-.. _v214-documentation:
-
-Documentation
--------------
-
-* Adding :ref:`contributing`.
-
-* Added :ref:`guide-optimizing`.
-
-* Added :ref:`faq-security` section to the FAQ.
-
-.. _version-2.1.3:
-
-2.1.3
-=====
-:release-date: 2010-11-09 05:00 P.M CEST
-
-.. _v213-fixes:
-
-* Fixed deadlocks in `timer2` which could lead to `djcelerymon`/`celeryev -c`
-  hanging.
-
-* `EventReceiver`: now sends heartbeat request to find workers.
-
-    This means :program:`celeryev` and friends finds workers immediately
-    at startup.
-
-* celeryev cursesmon: Set screen_delay to 10ms, so the screen refreshes more
-  often.
-
-* Fixed pickling errors when pickling :class:`AsyncResult` on older Python
-  versions.
-
-* celeryd: prefetch count was decremented by eta tasks even if there
-  were no active prefetch limits.
-
-
-.. _version-2.1.2:
-
-2.1.2
-=====
-:release-data: TBA
-
-.. _v212-fixes:
-
-Fixes
------
-
-* celeryd: Now sends the `task-retried` event for retried tasks.
-
-* celeryd: Now honors ignore result for
-  :exc:`~@WorkerLostError` and timeout errors.
-
-* celerybeat: Fixed :exc:`UnboundLocalError` in celerybeat logging
-  when using logging setup signals.
-
-* celeryd: All log messages now includes `exc_info`.
-
-.. _version-2.1.1:
-
-2.1.1
-=====
-:release-date: 2010-10-14 02:00 P.M CEST
-
-.. _v211-fixes:
-
-Fixes
------
-
-* Now working on Windows again.
-
-   Removed dependency on the pwd/grp modules.
-
-* snapshots: Fixed race condition leading to loss of events.
-
-* celeryd: Reject tasks with an eta that cannot be converted to a time stamp.
-
-    See issue #209
-
-* concurrency.processes.pool: The semaphore was released twice for each task
-  (both at ACK and result ready).
-
-    This has been fixed, and it is now released only once per task.
-
-* docs/configuration: Fixed typo `CELERYD_SOFT_TASK_TIME_LIMIT` ->
-  :setting:`CELERYD_TASK_SOFT_TIME_LIMIT`.
-
-    See issue #214
-
-* control command `dump_scheduled`: was using old .info attribute
-
-* :program:`celeryd-multi`: Fixed `set changed size during iteration` bug
-    occurring in the restart command.
-
-* celeryd: Accidentally tried to use additional command line arguments.
-
-   This would lead to an error like:
-
-    `got multiple values for keyword argument 'concurrency'`.
-
-    Additional command line arguments are now ignored, and does not
-    produce this error.  However -- we do reserve the right to use
-    positional arguments in the future, so please do not depend on this
-    behavior.
-
-* celerybeat: Now respects routers and task execution options again.
-
-* celerybeat: Now reuses the publisher instead of the connection.
-
-* Cache result backend: Using :class:`float` as the expires argument
-  to `cache.set` is deprecated by the memcached libraries,
-  so we now automatically cast to :class:`int`.
-
-* unit tests: No longer emits logging and warnings in test output.
-
-.. _v211-news:
-
-News
-----
-
-* Now depends on carrot version 0.10.7.
-
-* Added :setting:`CELERY_REDIRECT_STDOUTS`, and
-  :setting:`CELERYD_REDIRECT_STDOUTS_LEVEL` settings.
-
-    :setting:`CELERY_REDIRECT_STDOUTS` is used by :program:`celeryd` and
-    :program:`celerybeat`.  All output to `stdout` and `stderr` will be
-    redirected to the current logger if enabled.
-
-    :setting:`CELERY_REDIRECT_STDOUTS_LEVEL` decides the log level used and is
-    :const:`WARNING` by default.
-
-* Added :setting:`CELERYBEAT_SCHEDULER` setting.
-
-    This setting is used to define the default for the -S option to
-    :program:`celerybeat`.
-
-    Example:
-
-    .. code-block:: python
-
-        CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler"
-
-* Added Task.expires: Used to set default expiry time for tasks.
-
-* New remote control commands: `add_consumer` and `cancel_consumer`.
-
-    .. method:: add_consumer(queue, exchange, exchange_type, routing_key,
-                             **options)
-        :module:
-
-        Tells the worker to declare and consume from the specified
-        declaration.
-
-    .. method:: cancel_consumer(queue_name)
-        :module:
-
-        Tells the worker to stop consuming from queue (by queue name).
-
-
-    Commands also added to :program:`celeryctl` and
-    :class:`~celery.task.control.inspect`.
-
-
-    Example using celeryctl to start consuming from queue "queue", in
-    exchange "exchange", of type "direct" using binding key "key"::
-
-        $ celeryctl inspect add_consumer queue exchange direct key
-        $ celeryctl inspect cancel_consumer queue
-
-    See :ref:`monitoring-celeryctl` for more information about the
-    :program:`celeryctl` program.
-
-
-    Another example using :class:`~celery.task.control.inspect`:
-
-    .. code-block:: python
-
-        >>> from celery.task.control import inspect
-        >>> inspect.add_consumer(queue="queue", exchange="exchange",
-        ...                      exchange_type="direct",
-        ...                      routing_key="key",
-        ...                      durable=False,
-        ...                      auto_delete=True)
-
-        >>> inspect.cancel_consumer("queue")
-
-* celerybeat: Now logs the traceback if a message can't be sent.
-
-* celerybeat: Now enables a default socket timeout of 30 seconds.
-
-* README/introduction/homepage: Added link to `Flask-Celery`_.
-
-.. _`Flask-Celery`: http://github.com/ask/flask-celery
-
-.. _version-2.1.0:
-
-2.1.0
-=====
-:release-date: 2010-10-08 12:00 P.M CEST
-
-.. _v210-important:
-
-Important Notes
----------------
-
-* Celery is now following the versioning semantics defined by `semver`_.
-
-    This means we are no longer allowed to use odd/even versioning semantics
-    By our previous versioning scheme this stable release should have
-    been version 2.2.
-
-.. _`semver`: http://semver.org
-
-* Now depends on Carrot 0.10.7.
-
-* No longer depends on SQLAlchemy, this needs to be installed separately
-  if the database result backend is used.
-
-* django-celery now comes with a monitor for the Django Admin interface.
-  This can also be used if you're not a Django user.  See
-  :ref:`monitoring-django-admin` and :ref:`monitoring-nodjango` for more information.
-
-* If you get an error after upgrading saying:
-  `AttributeError: 'module' object has no attribute 'system'`,
-
-    Then this is because the `celery.platform` module has been
-    renamed to `celery.platforms` to not collide with the built-in
-    :mod:`platform` module.
-
-    You have to remove the old :file:`platform.py` (and maybe
-    :file:`platform.pyc`) file from your previous Celery installation.
-
-    To do this use :program:`python` to find the location
-    of this module::
-
-        $ python
-        >>> import celery.platform
-        >>> celery.platform
-        <module 'celery.platform' from '/opt/devel/celery/celery/platform.pyc'>
-
-    Here the compiled module is in :file:`/opt/devel/celery/celery/`,
-    to remove the offending files do::
-
-        $ rm -f /opt/devel/celery/celery/platform.py*
-
-.. _v210-news:
-
-News
-----
-
-* Added support for expiration of AMQP results (requires RabbitMQ 2.1.0)
-
-    The new configuration option :setting:`CELERY_AMQP_TASK_RESULT_EXPIRES`
-    sets the expiry time in seconds (can be int or float):
-
-    .. code-block:: python
-
-        CELERY_AMQP_TASK_RESULT_EXPIRES = 30 * 60  # 30 minutes.
-        CELERY_AMQP_TASK_RESULT_EXPIRES = 0.80     # 800 ms.
-
-* celeryev: Event Snapshots
-
-    If enabled, :program:`celeryd` sends messages about what the worker is doing.
-    These messages are called "events".
-    The events are used by real-time monitors to show what the
-    cluster is doing, but they are not very useful for monitoring
-    over a longer period of time.  Snapshots
-    lets you take "pictures" of the clusters state at regular intervals.
-    This can then be stored in a database to generate statistics
-    with, or even monitoring over longer time periods.
-
-    django-celery now comes with a Celery monitor for the Django
-    Admin interface. To use this you need to run the django-celery
-    snapshot camera, which stores snapshots to the database at configurable
-    intervals.  See :ref:`monitoring-nodjango` for information about using
-    this monitor if you're not using Django.
-
-    To use the Django admin monitor you need to do the following:
-
-    1. Create the new database tables.
-
-        $ python manage.py syncdb
-
-    2. Start the django-celery snapshot camera::
-
-        $ python manage.py celerycam
-
-    3. Open up the django admin to monitor your cluster.
-
-    The admin interface shows tasks, worker nodes, and even
-    lets you perform some actions, like revoking and rate limiting tasks,
-    and shutting down worker nodes.
-
-    There's also a Debian init.d script for :mod:`~celery.bin.celeryev` available,
-    see :doc:`tutorials/daemonizing` for more information.
-
-    New command line arguments to celeryev:
-
-        * :option:`-c|--camera`: Snapshot camera class to use.
-        * :option:`--logfile|-f`: Log file
-        * :option:`--loglevel|-l`: Log level
-        * :option:`--maxrate|-r`: Shutter rate limit.
-        * :option:`--freq|-F`: Shutter frequency
-
-    The :option:`--camera` argument is the name of a class used to take
-    snapshots with. It must support the interface defined by
-    :class:`celery.events.snapshot.Polaroid`.
-
-    Shutter frequency controls how often the camera thread wakes up,
-    while the rate limit controls how often it will actually take
-    a snapshot.
-    The rate limit can be an integer (snapshots/s), or a rate limit string
-    which has the same syntax as the task rate limit strings (`"200/m"`,
-    `"10/s"`, `"1/h",` etc).
-
-    For the Django camera case, this rate limit can be used to control
-    how often the snapshots are written to the database, and the frequency
-    used to control how often the thread wakes up to check if there's
-    anything new.
-
-    The rate limit is off by default, which means it will take a snapshot
-    for every :option:`--frequency` seconds.
-
-.. seealso::
-
-    :ref:`monitoring-django-admin` and :ref:`monitoring-snapshots`.
-
-* :func:`~celery.task.control.broadcast`: Added callback argument, this can be
-  used to process replies immediately as they arrive.
-
-* celeryctl: New command-line utility to manage and inspect worker nodes,
-  apply tasks and inspect the results of tasks.
-
-    .. seealso::
-        The :ref:`monitoring-celeryctl` section in the :ref:`guide`.
-
-    Some examples::
-
-        $ celeryctl apply tasks.add -a '[2, 2]' --countdown=10
-
-        $ celeryctl inspect active
-        $ celeryctl inspect registered_tasks
-        $ celeryctl inspect scheduled
-        $ celeryctl inspect --help
-        $ celeryctl apply --help
-
-* Added the ability to set an expiry date and time for tasks.
-
-    Example::
-
-        >>> # Task expires after one minute from now.
-        >>> task.apply_async(args, kwargs, expires=60)
-        >>> # Also supports datetime
-        >>> task.apply_async(args, kwargs,
-        ...                  expires=datetime.now() + timedelta(days=1)
-
-    When a worker receives a task that has been expired it will be
-    marked as revoked (:exc:`~@TaskRevokedError`).
-
-* Changed the way logging is configured.
-
-    We now configure the root logger instead of only configuring
-    our custom logger. In addition we don't hijack
-    the multiprocessing logger anymore, but instead use a custom logger name
-    for different applications:
-
-    =====================================  =====================================
-    **Application**                        **Logger Name**
-    =====================================  =====================================
-    `celeryd`                              "celery"
-    `celerybeat`                           "celery.beat"
-    `celeryev`                             "celery.ev"
-    =====================================  =====================================
-
-    This means that the `loglevel` and `logfile` arguments will
-    affect all registered loggers (even those from 3rd party libraries).
-    Unless you configure the loggers manually as shown below, that is.
-
-    *Users can choose to configure logging by subscribing to the
-    :signal:`~celery.signals.setup_logging` signal:*
-
-    .. code-block:: python
-
-        from logging.config import fileConfig
-        from celery import signals
-
-        @signals.setup_logging.connect
-        def setup_logging(**kwargs):
-            fileConfig("logging.conf")
-
-    If there are no receivers for this signal, the logging subsystem
-    will be configured using the :option:`--loglevel`/:option:`--logfile`
-    argument, this will be used for *all defined loggers*.
-
-    Remember that celeryd also redirects stdout and stderr
-    to the celery logger, if manually configure logging
-    you also need to redirect the stdouts manually:
-
-    .. code-block:: python
-
-        from logging.config import fileConfig
-        from celery import log
-
-       def setup_logging(**kwargs):
-            import logging
-            fileConfig("logging.conf")
-            stdouts = logging.getLogger("mystdoutslogger")
-            log.redirect_stdouts_to_logger(stdouts, loglevel=logging.WARNING)
-
-* celeryd: Added command-line option :option:`-I`/:option:`--include`:
-
-    A comma separated list of (task) modules to be imported.
-
-    Example::
-
-        $ celeryd -I app1.tasks,app2.tasks
-
-* celeryd: now emits a warning if running as the root user (euid is 0).
-
-* :func:`celery.messaging.establish_connection`: Ability to override defaults
-  used using keyword argument "defaults".
-
-* celeryd: Now uses `multiprocessing.freeze_support()` so that it should work
-  with **py2exe**, **PyInstaller**, **cx_Freeze**, etc.
-
-* celeryd: Now includes more metadata for the :state:`STARTED` state: PID and
-  host name of the worker that started the task.
-
-    See issue #181
-
-* subtask: Merge additional keyword arguments to `subtask()` into task keyword
-  arguments.
-
-    e.g.:
-
-        >>> s = subtask((1, 2), {"foo": "bar"}, baz=1)
-        >>> s.args
-        (1, 2)
-        >>> s.kwargs
-        {"foo": "bar", "baz": 1}
-
-    See issue #182.
-
-* celeryd: Now emits a warning if there is already a worker node using the same
-  name running on the same virtual host.
-
-* AMQP result backend: Sending of results are now retried if the connection
-  is down.
-
-* AMQP result backend: `result.get()`: Wait for next state if state is not
-    in :data:`~celery.states.READY_STATES`.
-
-* TaskSetResult now supports subscription.
-
-    ::
-
-        >>> res = TaskSet(tasks).apply_async()
-        >>> res[0].get()
-
-* Added `Task.send_error_emails` + `Task.error_whitelist`, so these can
-  be configured per task instead of just by the global setting.
-
-* Added `Task.store_errors_even_if_ignored`, so it can be changed per Task,
-  not just by the global setting.
-
-* The crontab scheduler no longer wakes up every second, but implements
-  `remaining_estimate` (*Optimization*).
-
-* celeryd:  Store :state:`FAILURE` result if the
-   :exc:`~@WorkerLostError` exception occurs (worker process
-   disappeared).
-
-* celeryd: Store :state:`FAILURE` result if one of the `*TimeLimitExceeded`
-  exceptions occurs.
-
-* Refactored the periodic task responsible for cleaning up results.
-
-    * The backend cleanup task is now only added to the schedule if
-        :setting:`CELERY_TASK_RESULT_EXPIRES` is set.
-
-    * If the schedule already contains a periodic task named
-      "celery.backend_cleanup" it won't change it, so the behavior of the
-      backend cleanup task can be easily changed.
-
-    * The task is now run every day at 4:00 AM, rather than every day since
-      the first time it was run (using crontab schedule instead of
-      `run_every`)
-
-    * Renamed `celery.task.builtins.DeleteExpiredTaskMetaTask`
-        -> :class:`celery.task.builtins.backend_cleanup`
-
-    * The task itself has been renamed from "celery.delete_expired_task_meta"
-      to "celery.backend_cleanup"
-
-    See issue #134.
-
-* Implemented `AsyncResult.forget` for sqla/cache/redis/tyrant backends.
-  (Forget and remove task result).
-
-    See issue #184.
-
-* :meth:`TaskSetResult.join <celery.result.TaskSetResult.join>`:
-  Added 'propagate=True' argument.
-
-  When set to :const:`False` exceptions occurring in subtasks will
-  not be re-raised.
-
-* Added `Task.update_state(task_id, state, meta)`
-  as a shortcut to `task.backend.store_result(task_id, meta, state)`.
-
-    The backend interface is "private" and the terminology outdated,
-    so better to move this to :class:`~celery.task.base.Task` so it can be
-    used.
-
-* timer2: Set `self.running=False` in
-  :meth:`~celery.utils.timer2.Timer.stop` so it won't try to join again on
-  subsequent calls to `stop()`.
-
-* Log colors are now disabled by default on Windows.
-
-* `celery.platform` renamed to :mod:`celery.platforms`, so it doesn't
-  collide with the built-in :mod:`platform` module.
-
-* Exceptions occurring in Mediator+Pool callbacks are now caught and logged
-  instead of taking down the worker.
-
-* Redis result backend: Now supports result expiration using the Redis
-  `EXPIRE` command.
-
-* unit tests: Don't leave threads running at tear down.
-
-* celeryd: Task results shown in logs are now truncated to 46 chars.
-
-* `Task.__name__` is now an alias to `self.__class__.__name__`.
-   This way tasks introspects more like regular functions.
-
-* `Task.retry`: Now raises :exc:`TypeError` if kwargs argument is empty.
-
-    See issue #164.
-
-* timedelta_seconds: Use `timedelta.total_seconds` if running on Python 2.7
-
-* :class:`~celery.datastructures.TokenBucket`: Generic Token Bucket algorithm
-
-* :mod:`celery.events.state`: Recording of cluster state can now
-  be paused and resumed, including support for buffering.
-
-
-    .. method:: State.freeze(buffer=True)
-
-        Pauses recording of the stream.
-
-        If `buffer` is true, events received while being frozen will be
-        buffered, and may be replayed later.
-
-    .. method:: State.thaw(replay=True)
-
-        Resumes recording of the stream.
-
-        If `replay` is true, then the recorded buffer will be applied.
-
-    .. method:: State.freeze_while(fun)
-
-        With a function to apply, freezes the stream before,
-        and replays the buffer after the function returns.
-
-* :meth:`EventReceiver.capture <celery.events.EventReceiver.capture>`
-  Now supports a timeout keyword argument.
-
-* celeryd: The mediator thread is now disabled if
-  :setting:`CELERY_RATE_LIMITS` is enabled, and tasks are directly sent to the
-  pool without going through the ready queue (*Optimization*).
-
-.. _v210-fixes:
-
-Fixes
------
-
-* Pool: Process timed out by `TimeoutHandler` must be joined by the Supervisor,
-  so don't remove it from the internal process list.
-
-    See issue #192.
-
-* `TaskPublisher.delay_task` now supports exchange argument, so exchange can be
-  overridden when sending tasks in bulk using the same publisher
-
-    See issue #187.
-
-* celeryd no longer marks tasks as revoked if :setting:`CELERY_IGNORE_RESULT`
-  is enabled.
-
-    See issue #207.
-
-* AMQP Result backend: Fixed bug with `result.get()` if
-  :setting:`CELERY_TRACK_STARTED` enabled.
-
-    `result.get()` would stop consuming after receiving the
-    :state:`STARTED` state.
-
-* Fixed bug where new processes created by the pool supervisor becomes stuck
-  while reading from the task Queue.
-
-    See http://bugs.python.org/issue10037
-
-* Fixed timing issue when declaring the remote control command reply queue
-
-    This issue could result in replies being lost, but have now been fixed.
-
-* Backward compatible `LoggerAdapter` implementation: Now works for Python 2.4.
-
-    Also added support for several new methods:
-    `fatal`, `makeRecord`, `_log`, `log`, `isEnabledFor`,
-    `addHandler`, `removeHandler`.
-
-.. _v210-experimental:
-
-Experimental
-------------
-
-* celeryd-multi: Added daemonization support.
-
-    celeryd-multi can now be used to start, stop and restart worker nodes.
-
-        $ celeryd-multi start jerry elaine george kramer
-
-    This also creates PID files and log files (:file:`celeryd@jerry.pid`,
-    ..., :file:`celeryd@jerry.log`. To specify a location for these files
-    use the `--pidfile` and `--logfile` arguments with the `%n`
-    format::
-
-        $ celeryd-multi start jerry elaine george kramer \
-                        --logfile=/var/log/celeryd@%n.log \
-                        --pidfile=/var/run/celeryd@%n.pid
-
-    Stopping::
-
-        $ celeryd-multi stop jerry elaine george kramer
-
-    Restarting. The nodes will be restarted one by one as the old ones
-    are shutdown::
-
-        $ celeryd-multi restart jerry elaine george kramer
-
-    Killing the nodes (**WARNING**: Will discard currently executing tasks)::
-
-        $ celeryd-multi kill jerry elaine george kramer
-
-    See `celeryd-multi help` for help.
-
-* celeryd-multi: `start` command renamed to `show`.
-
-    `celeryd-multi start` will now actually start and detach worker nodes.
-    To just generate the commands you have to use `celeryd-multi show`.
-
-* celeryd: Added `--pidfile` argument.
-
-   The worker will write its pid when it starts.  The worker will
-   not be started if this file exists and the pid contained is still alive.
-
-* Added generic init.d script using `celeryd-multi`
-
-    http://github.com/celery/celery/tree/master/extra/generic-init.d/celeryd
-
-.. _v210-documentation:
-
-Documentation
--------------
-
-* Added User guide section: Monitoring
-
-* Added user guide section: Periodic Tasks
-
-    Moved from `getting-started/periodic-tasks` and updated.
-
-* tutorials/external moved to new section: "community".
-
-* References has been added to all sections in the documentation.
-
-    This makes it easier to link between documents.
-
-.. _version-2.0.3:
-
-2.0.3
-=====
-:release-date: 2010-08-27 12:00 P.M CEST
-
-.. _v203-fixes:
-
-Fixes
------
-
-* celeryd: Properly handle connection errors happening while
-  closing consumers.
-
-* celeryd: Events are now buffered if the connection is down,
-  then sent when the connection is re-established.
-
-* No longer depends on the :mod:`mailer` package.
-
-    This package had a name space collision with `django-mailer`,
-    so its functionality was replaced.
-
-* Redis result backend: Documentation typos: Redis doesn't have
-  database names, but database numbers. The default database is now 0.
-
-* :class:`~celery.task.control.inspect`:
-  `registered_tasks` was requesting an invalid command because of a typo.
-
-    See issue #170.
-
-* :setting:`CELERY_ROUTES`: Values defined in the route should now have
-  precedence over values defined in :setting:`CELERY_QUEUES` when merging
-  the two.
-
-    With the follow settings::
-
-        CELERY_QUEUES = {"cpubound": {"exchange": "cpubound",
-                                      "routing_key": "cpubound"}}
-
-        CELERY_ROUTES = {"tasks.add": {"queue": "cpubound",
-                                       "routing_key": "tasks.add",
-                                       "serializer": "json"}}
-
-    The final routing options for `tasks.add` will become::
-
-        {"exchange": "cpubound",
-         "routing_key": "tasks.add",
-         "serializer": "json"}
-
-    This was not the case before: the values
-    in :setting:`CELERY_QUEUES` would take precedence.
-
-* Worker crashed if the value of :setting:`CELERY_TASK_ERROR_WHITELIST` was
-  not an iterable
-
-* :func:`~celery.execute.apply`: Make sure `kwargs["task_id"]` is
-  always set.
-
-* `AsyncResult.traceback`: Now returns :const:`None`, instead of raising
-  :exc:`KeyError` if traceback is missing.
-
-* :class:`~celery.task.control.inspect`: Replies did not work correctly
-  if no destination was specified.
-
-* Can now store result/metadata for custom states.
-
-* celeryd: A warning is now emitted if the sending of task error
-  emails fails.
-
-* celeryev: Curses monitor no longer crashes if the terminal window
-  is resized.
-
-    See issue #160.
-
-* celeryd: On OS X it is not possible to run `os.exec*` in a process
-  that is threaded.
-
-      This breaks the SIGHUP restart handler,
-      and is now disabled on OS X, emitting a warning instead.
-
-    See issue #152.
-
-* :mod:`celery.execute.trace`: Properly handle `raise(str)`,
-  which is still allowed in Python 2.4.
-
-    See issue #175.
-
-* Using urllib2 in a periodic task on OS X crashed because
-  of the proxy auto detection used in OS X.
-
-    This is now fixed by using a workaround.
-    See issue #143.
-
-* Debian init scripts: Commands should not run in a sub shell
-
-    See issue #163.
-
-* Debian init scripts: Use the absolute path of celeryd to allow stat
-
-    See issue #162.
-
-.. _v203-documentation:
-
-Documentation
--------------
-
-* getting-started/broker-installation: Fixed typo
-
-    `set_permissions ""` -> `set_permissions ".*"`.
-
-* Tasks User Guide: Added section on database transactions.
-
-    See issue #169.
-
-* Routing User Guide: Fixed typo `"feed": -> {"queue": "feeds"}`.
-
-    See issue #169.
-
-* Documented the default values for the :setting:`CELERYD_CONCURRENCY`
-  and :setting:`CELERYD_PREFETCH_MULTIPLIER` settings.
-
-* Tasks User Guide: Fixed typos in the subtask example
-
-* celery.signals: Documented worker_process_init.
-
-* Daemonization cookbook: Need to export DJANGO_SETTINGS_MODULE in
-  `/etc/default/celeryd`.
-
-* Added some more FAQs from stack overflow
-
-* Daemonization cookbook: Fixed typo `CELERYD_LOGFILE/CELERYD_PIDFILE`
-
-    to `CELERYD_LOG_FILE` / `CELERYD_PID_FILE`
-
-    Also added troubleshooting section for the init scripts.
-
-.. _version-2.0.2:
-
-2.0.2
-=====
-:release-date: 2010-07-22 11:31 A.M CEST
-
-* Routes: When using the dict route syntax, the exchange for a task
-  could disappear making the task unroutable.
-
-    See issue #158.
-
-* Test suite now passing on Python 2.4
-
-* No longer have to type `PYTHONPATH=.` to use celeryconfig in the current
-  directory.
-
-    This is accomplished by the default loader ensuring that the current
-    directory is in `sys.path` when loading the config module.
-    `sys.path` is reset to its original state after loading.
-
-    Adding the current working directory to `sys.path` without the user
-    knowing may be a security issue, as this means someone can drop a Python module in the users
-    directory that executes arbitrary commands. This was the original reason
-    not to do this, but if done *only when loading the config module*, this
-    means that the behavior will only apply to the modules imported in the
-    config module, which I think is a good compromise (certainly better than
-    just explicitly setting `PYTHONPATH=.` anyway)
-
-* Experimental Cassandra backend added.
-
-* celeryd: SIGHUP handler accidentally propagated to worker pool processes.
-
-    In combination with 7a7c44e39344789f11b5346e9cc8340f5fe4846c
-    this would make each child process start a new celeryd when
-    the terminal window was closed :/
-
-* celeryd: Do not install SIGHUP handler if running from a terminal.
-
-    This fixes the problem where celeryd is launched in the background
-    when closing the terminal.
-
-* celeryd: Now joins threads at shutdown.
-
-    See issue #152.
-
-* Test tear down: Don't use `atexit` but nose's `teardown()` functionality
-  instead.
-
-    See issue #154.
-
-* Debian init script for celeryd: Stop now works correctly.
-
-* Task logger: `warn` method added (synonym for `warning`)
-
-* Can now define a white list of errors to send error emails for.
-
-    Example::
-
-        CELERY_TASK_ERROR_WHITELIST = ('myapp.MalformedInputError')
-
-    See issue #153.
-
-* celeryd: Now handles overflow exceptions in `time.mktime` while parsing
-  the ETA field.
-
-* LoggerWrapper: Try to detect loggers logging back to stderr/stdout making
-  an infinite loop.
-
-* Added :class:`celery.task.control.inspect`: Inspects a running worker.
-
-    Examples::
-
-        # Inspect a single worker
-        >>> i = inspect("myworker.example.com")
-
-        # Inspect several workers
-        >>> i = inspect(["myworker.example.com", "myworker2.example.com"])
-
-        # Inspect all workers consuming on this vhost.
-        >>> i = inspect()
-
-        ### Methods
-
-        # Get currently executing tasks
-        >>> i.active()
-
-        # Get currently reserved tasks
-        >>> i.reserved()
-
-        # Get the current eta schedule
-        >>> i.scheduled()
-
-        # Worker statistics and info
-        >>> i.stats()
-
-        # List of currently revoked tasks
-        >>> i.revoked()
-
-        # List of registered tasks
-        >>> i.registered_tasks()
-
-*  Remote control commands `dump_active`/`dump_reserved`/`dump_schedule`
-   now replies with detailed task requests.
-
-    Containing the original arguments and fields of the task requested.
-
-    In addition the remote control command `set_loglevel` has been added,
-    this only changes the log level for the main process.
-
-* Worker control command execution now catches errors and returns their
-  string representation in the reply.
-
-* Functional test suite added
-
-    :mod:`celery.tests.functional.case` contains utilities to start
-    and stop an embedded celeryd process, for use in functional testing.
-
-.. _version-2.0.1:
-
-2.0.1
-=====
-:release-date: 2010-07-09 03:02 P.M CEST
-
-* multiprocessing.pool: Now handles encoding errors, so that pickling errors
-  doesn't crash the worker processes.
-
-* The remote control command replies was not working with RabbitMQ 1.8.0's
-  stricter equivalence checks.
-
-    If you've already hit this problem you may have to delete the
-    declaration::
-
-        $ camqadm exchange.delete celerycrq
-
-    or::
-
-        $ python manage.py camqadm exchange.delete celerycrq
-
-* A bug sneaked in the ETA scheduler that made it only able to execute
-  one task per second(!)
-
-    The scheduler sleeps between iterations so it doesn't consume too much CPU.
-    It keeps a list of the scheduled items sorted by time, at each iteration
-    it sleeps for the remaining time of the item with the nearest deadline.
-    If there are no eta tasks it will sleep for a minimum amount of time, one
-    second by default.
-
-    A bug sneaked in here, making it sleep for one second for every task
-    that was scheduled. This has been fixed, so now it should move
-    tasks like hot knife through butter.
-
-    In addition a new setting has been added to control the minimum sleep
-    interval; :setting:`CELERYD_ETA_SCHEDULER_PRECISION`. A good
-    value for this would be a float between 0 and 1, depending
-    on the needed precision. A value of 0.8 means that when the ETA of a task
-    is met, it will take at most 0.8 seconds for the task to be moved to the
-    ready queue.
-
-* Pool: Supervisor did not release the semaphore.
-
-    This would lead to a deadlock if all workers terminated prematurely.
-
-* Added Python version trove classifiers: 2.4, 2.5, 2.6 and 2.7
-
-* Tests now passing on Python 2.7.
-
-* Task.__reduce__: Tasks created using the task decorator can now be pickled.
-
-* setup.py: nose added to `tests_require`.
-
-* Pickle should now work with SQLAlchemy 0.5.x
-
-* New homepage design by Jan Henrik Helmers: http://celeryproject.org
-
-* New Sphinx theme by Armin Ronacher: http://docs.celeryproject.org/
-
-* Fixed "pending_xref" errors shown in the HTML rendering of the
-  documentation. Apparently this was caused by new changes in Sphinx 1.0b2.
-
-* Router classes in :setting:`CELERY_ROUTES` are now imported lazily.
-
-    Importing a router class in a module that also loads the Celery
-    environment would cause a circular dependency. This is solved
-    by importing it when needed after the environment is set up.
-
-* :setting:`CELERY_ROUTES` was broken if set to a single dict.
-
-    This example in the docs should now work again::
-
-        CELERY_ROUTES = {"feed.tasks.import_feed": "feeds"}
-
-* `CREATE_MISSING_QUEUES` was not honored by apply_async.
-
-* New remote control command: `stats`
-
-    Dumps information about the worker, like pool process ids, and
-    total number of tasks executed by type.
-
-    Example reply::
-
-        [{'worker.local':
-             'total': {'tasks.sleeptask': 6},
-             'pool': {'timeouts': [None, None],
-                      'processes': [60376, 60377],
-                      'max-concurrency': 2,
-                      'max-tasks-per-child': None,
-                      'put-guarded-by-semaphore': True}}]
-
-* New remote control command: `dump_active`
-
-    Gives a list of tasks currently being executed by the worker.
-    By default arguments are passed through repr in case there
-    are arguments that is not JSON encodable. If you know
-    the arguments are JSON safe, you can pass the argument `safe=True`.
-
-    Example reply::
-
-        >>> broadcast("dump_active", arguments={"safe": False}, reply=True)
-        [{'worker.local': [
-            {'args': '(1,)',
-             'time_start': 1278580542.6300001,
-             'name': 'tasks.sleeptask',
-             'delivery_info': {
-                 'consumer_tag': '30',
-                 'routing_key': 'celery',
-                 'exchange': 'celery'},
-             'hostname': 'casper.local',
-             'acknowledged': True,
-             'kwargs': '{}',
-             'id': '802e93e9-e470-47ed-b913-06de8510aca2',
-            }
-        ]}]
-
-* Added experimental support for persistent revokes.
-
-    Use the `-S|--statedb` argument to celeryd to enable it::
-
-        $ celeryd --statedb=/var/run/celeryd
-
-    This will use the file: `/var/run/celeryd.db`,
-    as the `shelve` module automatically adds the `.db` suffix.
-
-.. _version-2.0.0:
-
-2.0.0
-=====
-:release-date: 2010-07-02 02:30 P.M CEST
-
-Foreword
---------
-
-Celery 2.0 contains backward incompatible changes, the most important
-being that the Django dependency has been removed so Celery no longer
-supports Django out of the box, but instead as an add-on package
-called `django-celery`_.
-
-We're very sorry for breaking backwards compatibility, but there's
-also many new and exciting features to make up for the time you lose
-upgrading, so be sure to read the :ref:`News <v200-news>` section.
-
-Quite a lot of potential users have been upset about the Django dependency,
-so maybe this is a chance to get wider adoption by the Python community as
-well.
-
-Big thanks to all contributors, testers and users!
-
-.. _v200-django-upgrade:
-
-Upgrading for Django-users
---------------------------
-
-Django integration has been moved to a separate package: `django-celery`_.
-
-* To upgrade you need to install the `django-celery`_ module and change::
-
-    INSTALLED_APPS = "celery"
-
-  to::
-
-    INSTALLED_APPS = "djcelery"
-
-* If you use `mod_wsgi` you need to add the following line to your `.wsgi`
-  file::
-
-    import os
-    os.environ["CELERY_LOADER"] = "django"
-
-* The following modules has been moved to `django-celery`_:
-
-    =====================================  =====================================
-    **Module name**                        **Replace with**
-    =====================================  =====================================
-    `celery.models`                        `djcelery.models`
-    `celery.managers`                      `djcelery.managers`
-    `celery.views`                         `djcelery.views`
-    `celery.urls`                          `djcelery.urls`
-    `celery.management`                    `djcelery.management`
-    `celery.loaders.djangoapp`             `djcelery.loaders`
-    `celery.backends.database`             `djcelery.backends.database`
-    `celery.backends.cache`                `djcelery.backends.cache`
-    =====================================  =====================================
-
-Importing :mod:`djcelery` will automatically setup Celery to use Django loader.
-loader.  It does this by setting the :envvar:`CELERY_LOADER` environment variable to
-`"django"` (it won't change it if a loader is already set.)
-
-When the Django loader is used, the "database" and "cache" result backend
-aliases will point to the :mod:`djcelery` backends instead of the built-in backends,
-and configuration will be read from the Django settings.
-
-.. _`django-celery`: http://pypi.python.org/pypi/django-celery
-
-.. _v200-upgrade:
-
-Upgrading for others
---------------------
-
-.. _v200-upgrade-database:
-
-Database result backend
-~~~~~~~~~~~~~~~~~~~~~~~
-
-The database result backend is now using `SQLAlchemy`_ instead of the
-Django ORM, see `Supported Databases`_ for a table of supported databases.
-
-The `DATABASE_*` settings has been replaced by a single setting:
-:setting:`CELERY_RESULT_DBURI`. The value here should be an
-`SQLAlchemy Connection String`_, some examples include:
-
-.. code-block:: python
-
-    # sqlite (filename)
-    CELERY_RESULT_DBURI = "sqlite:///celerydb.sqlite"
-
-    # mysql
-    CELERY_RESULT_DBURI = "mysql://scott:tiger@localhost/foo"
-
-    # postgresql
-    CELERY_RESULT_DBURI = "postgresql://scott:tiger@localhost/mydatabase"
-
-    # oracle
-    CELERY_RESULT_DBURI = "oracle://scott:tiger@127.0.0.1:1521/sidname"
-
-See `SQLAlchemy Connection Strings`_ for more information about connection
-strings.
-
-To specify additional SQLAlchemy database engine options you can use
-the :setting:`CELERY_RESULT_ENGINE_OPTIONS` setting::
-
-    # echo enables verbose logging from SQLAlchemy.
-    CELERY_RESULT_ENGINE_OPTIONS = {"echo": True}
-
-.. _`SQLAlchemy`:
-    http://www.sqlalchemy.org
-.. _`Supported Databases`:
-    http://www.sqlalchemy.org/docs/core/engines.html#supported-databases
-.. _`SQLAlchemy Connection String`:
-    http://www.sqlalchemy.org/docs/core/engines.html#database-urls
-.. _`SQLAlchemy Connection Strings`:
-    http://www.sqlalchemy.org/docs/core/engines.html#database-urls
-
-.. _v200-upgrade-cache:
-
-Cache result backend
-~~~~~~~~~~~~~~~~~~~~
-
-The cache result backend is no longer using the Django cache framework,
-but it supports mostly the same configuration syntax::
-
-    CELERY_CACHE_BACKEND = "memcached://A.example.com:11211;B.example.com"
-
-To use the cache backend you must either have the `pylibmc`_ or
-`python-memcached`_ library installed, of which the former is regarded
-as the best choice.
-
-.. _`pylibmc`: http://pypi.python.org/pypi/pylibmc
-.. _`python-memcached`: http://pypi.python.org/pypi/python-memcached
-
-The support backend types are `memcached://` and `memory://`,
-we haven't felt the need to support any of the other backends
-provided by Django.
-
-.. _v200-incompatible:
-
-Backward incompatible changes
------------------------------
-
-* Default (python) loader now prints warning on missing `celeryconfig.py`
-  instead of raising :exc:`ImportError`.
-
-    celeryd raises :exc:`~@ImproperlyConfigured` if the configuration
-    is not set up. This makes it possible to use `--help` etc., without having a
-    working configuration.
-
-    Also this makes it possible to use the client side of celery without being
-    configured::
-
-        >>> from carrot.connection import BrokerConnection
-        >>> conn = BrokerConnection("localhost", "guest", "guest", "/")
-        >>> from celery.execute import send_task
-        >>> r = send_task("celery.ping", args=(), kwargs={}, connection=conn)
-        >>> from celery.backends.amqp import AMQPBackend
-        >>> r.backend = AMQPBackend(connection=conn)
-        >>> r.get()
-        'pong'
-
-* The following deprecated settings has been removed (as scheduled by
-  the `deprecation timeline`_):
-
-    =====================================  =====================================
-    **Setting name**                       **Replace with**
-    =====================================  =====================================
-    `CELERY_AMQP_CONSUMER_QUEUES`          `CELERY_QUEUES`
-    `CELERY_AMQP_EXCHANGE`                 `CELERY_DEFAULT_EXCHANGE`
-    `CELERY_AMQP_EXCHANGE_TYPE`            `CELERY_DEFAULT_EXCHANGE_TYPE`
-    `CELERY_AMQP_CONSUMER_ROUTING_KEY`     `CELERY_QUEUES`
-    `CELERY_AMQP_PUBLISHER_ROUTING_KEY`    `CELERY_DEFAULT_ROUTING_KEY`
-    =====================================  =====================================
-
-.. _`deprecation timeline`:
-    http://celery.github.com/celery/internals/deprecation.html
-
-* The `celery.task.rest` module has been removed, use :mod:`celery.task.http`
-  instead (as scheduled by the `deprecation timeline`_).
-
-* It's no longer allowed to skip the class name in loader names.
-  (as scheduled by the `deprecation timeline`_):
-
-    Assuming the implicit `Loader` class name is no longer supported,
-    if you use e.g.::
-
-        CELERY_LOADER = "myapp.loaders"
-
-    You need to include the loader class name, like this::
-
-        CELERY_LOADER = "myapp.loaders.Loader"
-
-* :setting:`CELERY_TASK_RESULT_EXPIRES` now defaults to 1 day.
-
-    Previous default setting was to expire in 5 days.
-
-*  AMQP backend: Don't use different values for `auto_delete`.
-
-    This bug became visible with RabbitMQ 1.8.0, which no longer
-    allows conflicting declarations for the auto_delete and durable settings.
-
-    If you've already used celery with this backend chances are you
-    have to delete the previous declaration::
-
-        $ camqadm exchange.delete celeryresults
-
-* Now uses pickle instead of cPickle on Python versions <= 2.5
-
-    cPickle is broken in Python <= 2.5.
-
-    It unsafely and incorrectly uses relative instead of absolute imports,
-    so e.g.::
-
-          exceptions.KeyError
-
-    becomes::
-
-          celery.exceptions.KeyError
-
-    Your best choice is to upgrade to Python 2.6,
-    as while the pure pickle version has worse performance,
-    it is the only safe option for older Python versions.
-
-.. _v200-news:
-
-News
-----
-
-* **celeryev**: Curses Celery Monitor and Event Viewer.
-
-    This is a simple monitor allowing you to see what tasks are
-    executing in real-time and investigate tracebacks and results of ready
-    tasks. It also enables you to set new rate limits and revoke tasks.
-
-    Screenshot:
-
-    .. figure:: images/celeryevshotsm.jpg
-
-    If you run `celeryev` with the `-d` switch it will act as an event
-    dumper, simply dumping the events it receives to standard out::
-
-        $ celeryev -d
-        -> celeryev: starting capture...
-        casper.local [2010-06-04 10:42:07.020000] heartbeat
-        casper.local [2010-06-04 10:42:14.750000] task received:
-            tasks.add(61a68756-27f4-4879-b816-3cf815672b0e) args=[2, 2] kwargs={}
-            eta=2010-06-04T10:42:16.669290, retries=0
-        casper.local [2010-06-04 10:42:17.230000] task started
-            tasks.add(61a68756-27f4-4879-b816-3cf815672b0e) args=[2, 2] kwargs={}
-        casper.local [2010-06-04 10:42:17.960000] task succeeded:
-            tasks.add(61a68756-27f4-4879-b816-3cf815672b0e)
-            args=[2, 2] kwargs={} result=4, runtime=0.782663106918
-
-        The fields here are, in order: *sender hostname*, *timestamp*, *event type* and
-        *additional event fields*.
-
-* AMQP result backend: Now supports `.ready()`, `.successful()`,
-  `.result`, `.status`, and even responds to changes in task state
-
-* New user guides:
-
-    * :doc:`userguide/workers`
-    * :doc:`userguide/canvas`
-    * :doc:`userguide/routing`
-
-* celeryd: Standard out/error is now being redirected to the log file.
-
-* :mod:`billiard` has been moved back to the celery repository.
-
-    =====================================  =====================================
-    **Module name**                        **celery equivalent**
-    =====================================  =====================================
-    `billiard.pool`                        `celery.concurrency.processes.pool`
-    `billiard.serialization`               `celery.serialization`
-    `billiard.utils.functional`            `celery.utils.functional`
-    =====================================  =====================================
-
-    The :mod:`billiard` distribution may be maintained, depending on interest.
-
-* now depends on :mod:`carrot` >= 0.10.5
-
-* now depends on :mod:`pyparsing`
-
-* celeryd: Added `--purge` as an alias to `--discard`.
-
-* celeryd: Ctrl+C (SIGINT) once does warm shutdown, hitting Ctrl+C twice
-  forces termination.
-
-* Added support for using complex crontab-expressions in periodic tasks. For
-  example, you can now use::
-
-    >>> crontab(minute="*/15")
-
-  or even::
-
-    >>> crontab(minute="*/30", hour="8-17,1-2", day_of_week="thu-fri")
-
-  See :doc:`userguide/periodic-tasks`.
-
-* celeryd: Now waits for available pool processes before applying new
-  tasks to the pool.
-
-    This means it doesn't have to wait for dozens of tasks to finish at shutdown
-    because it has applied prefetched tasks without having any pool
-    processes available to immediately accept them.
-
-    See issue #122.
-
-* New built-in way to do task callbacks using
-  :class:`~celery.subtask`.
-
-  See :doc:`userguide/canvas` for more information.
-
-* TaskSets can now contain several types of tasks.
-
-  :class:`~celery.task.sets.TaskSet` has been refactored to use
-  a new syntax, please see :doc:`userguide/canvas` for more information.
-
-  The previous syntax is still supported, but will be deprecated in
-  version 1.4.
-
-* TaskSet failed() result was incorrect.
-
-    See issue #132.
-
-* Now creates different loggers per task class.
-
-    See issue #129.
-
-* Missing queue definitions are now created automatically.
-
-    You can disable this using the :setting:`CELERY_CREATE_MISSING_QUEUES`
-    setting.
-
-    The missing queues are created with the following options::
-
-        CELERY_QUEUES[name] = {"exchange": name,
-                               "exchange_type": "direct",
-                               "routing_key": "name}
-
-   This feature is added for easily setting up routing using the `-Q`
-   option to `celeryd`::
-
-       $ celeryd -Q video, image
-
-   See the new routing section of the User Guide for more information:
-   :doc:`userguide/routing`.
-
-* New Task option: `Task.queue`
-
-    If set, message options will be taken from the corresponding entry
-    in :setting:`CELERY_QUEUES`. `exchange`, `exchange_type` and `routing_key`
-    will be ignored
-
-* Added support for task soft and hard time limits.
-
-    New settings added:
-
-    * :setting:`CELERYD_TASK_TIME_LIMIT`
-
-        Hard time limit. The worker processing the task will be killed and
-        replaced with a new one when this is exceeded.
-
-    * :setting:`CELERYD_SOFT_TASK_TIME_LIMIT`
-
-        Soft time limit. The :exc:`~@SoftTimeLimitExceeded`
-        exception will be raised when this is exceeded.  The task can catch
-        this to e.g. clean up before the hard time limit comes.
-
-    New command line arguments to celeryd added:
-    `--time-limit` and `--soft-time-limit`.
-
-    What's left?
-
-    This won't work on platforms not supporting signals (and specifically
-    the `SIGUSR1` signal) yet. So an alternative the ability to disable
-    the feature all together on nonconforming platforms must be implemented.
-
-    Also when the hard time limit is exceeded, the task result should
-    be a `TimeLimitExceeded` exception.
-
-* Test suite is now passing without a running broker, using the carrot
-  in-memory backend.
-
-* Log output is now available in colors.
-
-    =====================================  =====================================
-    **Log level**                          **Color**
-    =====================================  =====================================
-    `DEBUG`                                Blue
-    `WARNING`                              Yellow
-    `CRITICAL`                             Magenta
-    `ERROR`                                Red
-    =====================================  =====================================
-
-    This is only enabled when the log output is a tty.
-    You can explicitly enable/disable this feature using the
-    :setting:`CELERYD_LOG_COLOR` setting.
-
-* Added support for task router classes (like the django multi-db routers)
-
-    * New setting: :setting:`CELERY_ROUTES`
-
-    This is a single, or a list of routers to traverse when
-    sending tasks. Dictionaries in this list converts to a
-    :class:`celery.routes.MapRoute` instance.
-
-    Examples:
-
-        >>> CELERY_ROUTES = {"celery.ping": "default",
-                             "mytasks.add": "cpu-bound",
-                             "video.encode": {
-                                 "queue": "video",
-                                 "exchange": "media"
-                                 "routing_key": "media.video.encode"}}
-
-        >>> CELERY_ROUTES = ("myapp.tasks.Router",
-                             {"celery.ping": "default})
-
-    Where `myapp.tasks.Router` could be:
-
-    .. code-block:: python
-
-        class Router(object):
-
-            def route_for_task(self, task, args=None, kwargs=None):
-                if task == "celery.ping":
-                    return "default"
-
-    route_for_task may return a string or a dict. A string then means
-    it's a queue name in :setting:`CELERY_QUEUES`, a dict means it's a custom route.
-
-    When sending tasks, the routers are consulted in order. The first
-    router that doesn't return `None` is the route to use. The message options
-    is then merged with the found route settings, where the routers settings
-    have priority.
-
-    Example if :func:`~celery.execute.apply_async` has these arguments::
-
-       >>> Task.apply_async(immediate=False, exchange="video",
-       ...                  routing_key="video.compress")
-
-    and a router returns::
-
-        {"immediate": True,
-         "exchange": "urgent"}
-
-    the final message options will be::
-
-        immediate=True, exchange="urgent", routing_key="video.compress"
-
-    (and any default message options defined in the
-    :class:`~celery.task.base.Task` class)
-
-* New Task handler called after the task returns:
-  :meth:`~celery.task.base.Task.after_return`.
-
-* :class:`~celery.datastructures.ExceptionInfo` now passed to
-   :meth:`~celery.task.base.Task.on_retry`/
-   :meth:`~celery.task.base.Task.on_failure` as einfo keyword argument.
-
-* celeryd: Added :setting:`CELERYD_MAX_TASKS_PER_CHILD` /
-  :option:`--maxtasksperchild`
-
-    Defines the maximum number of tasks a pool worker can process before
-    the process is terminated and replaced by a new one.
-
-* Revoked tasks now marked with state :state:`REVOKED`, and `result.get()`
-  will now raise :exc:`~@TaskRevokedError`.
-
-* :func:`celery.task.control.ping` now works as expected.
-
-* `apply(throw=True)` / :setting:`CELERY_EAGER_PROPAGATES_EXCEPTIONS`:
-  Makes eager execution re-raise task errors.
-
-* New signal: :signal:`~celery.signals.worker_process_init`: Sent inside the
-  pool worker process at init.
-
-* celeryd :option:`-Q` option: Ability to specify list of queues to use,
-  disabling other configured queues.
-
-    For example, if :setting:`CELERY_QUEUES` defines four
-    queues: `image`, `video`, `data` and `default`, the following
-    command would make celeryd only consume from the `image` and `video`
-    queues::
-
-        $ celeryd -Q image,video
-
-* celeryd: New return value for the `revoke` control command:
-
-    Now returns::
-
-        {"ok": "task $id revoked"}
-
-    instead of `True`.
-
-* celeryd: Can now enable/disable events using remote control
-
-    Example usage:
-
-        >>> from celery.task.control import broadcast
-        >>> broadcast("enable_events")
-        >>> broadcast("disable_events")
-
-* Removed top-level tests directory. Test config now in celery.tests.config
-
-    This means running the unit tests doesn't require any special setup.
-    `celery/tests/__init__` now configures the :envvar:`CELERY_CONFIG_MODULE`
-    and :envvar:`CELERY_LOADER` environment variables, so when `nosetests`
-    imports that, the unit test environment is all set up.
-
-    Before you run the tests you need to install the test requirements::
-
-        $ pip install -r requirements/test.txt
-
-    Running all tests::
-
-        $ nosetests
-
-    Specifying the tests to run::
-
-        $ nosetests celery.tests.test_task
-
-    Producing HTML coverage::
-
-        $ nosetests --with-coverage3
-
-    The coverage output is then located in `celery/tests/cover/index.html`.
-
-* celeryd: New option `--version`: Dump version info and exit.
-
-* :mod:`celeryd-multi <celeryd.bin.celeryd_multi>`: Tool for shell scripts
-  to start multiple workers.
-
- Some examples::
-
-        # Advanced example with 10 workers:
-        #   * Three of the workers processes the images and video queue
-        #   * Two of the workers processes the data queue with loglevel DEBUG
-        #   * the rest processes the default' queue.
-        $ celeryd-multi start 10 -l INFO -Q:1-3 images,video -Q:4,5:data
-            -Q default -L:4,5 DEBUG
-
-        # get commands to start 10 workers, with 3 processes each
-        $ celeryd-multi start 3 -c 3
-        celeryd -n celeryd1.myhost -c 3
-        celeryd -n celeryd2.myhost -c 3
-        celeryd- n celeryd3.myhost -c 3
-
-        # start 3 named workers
-        $ celeryd-multi start image video data -c 3
-        celeryd -n image.myhost -c 3
-        celeryd -n video.myhost -c 3
-        celeryd -n data.myhost -c 3
-
-        # specify custom hostname
-        $ celeryd-multi start 2 -n worker.example.com -c 3
-        celeryd -n celeryd1.worker.example.com -c 3
-        celeryd -n celeryd2.worker.example.com -c 3
-
-        # Additionl options are added to each celeryd',
-        # but you can also modify the options for ranges of or single workers
-
-        # 3 workers: Two with 3 processes, and one with 10 processes.
-        $ celeryd-multi start 3 -c 3 -c:1 10
-        celeryd -n celeryd1.myhost -c 10
-        celeryd -n celeryd2.myhost -c 3
-        celeryd -n celeryd3.myhost -c 3
-
-        # can also specify options for named workers
-        $ celeryd-multi start image video data -c 3 -c:image 10
-        celeryd -n image.myhost -c 10
-        celeryd -n video.myhost -c 3
-        celeryd -n data.myhost -c 3
-
-        # ranges and lists of workers in options is also allowed:
-        # (-c:1-3 can also be written as -c:1,2,3)
-        $ celeryd-multi start 5 -c 3  -c:1-3 10
-        celeryd-multi -n celeryd1.myhost -c 10
-        celeryd-multi -n celeryd2.myhost -c 10
-        celeryd-multi -n celeryd3.myhost -c 10
-        celeryd-multi -n celeryd4.myhost -c 3
-        celeryd-multi -n celeryd5.myhost -c 3
-
-        # lists also works with named workers
-        $ celeryd-multi start foo bar baz xuzzy -c 3 -c:foo,bar,baz 10
-        celeryd-multi -n foo.myhost -c 10
-        celeryd-multi -n bar.myhost -c 10
-        celeryd-multi -n baz.myhost -c 10
-        celeryd-multi -n xuzzy.myhost -c 3
-
-* The worker now calls the result backends `process_cleanup` method
-  *after* task execution instead of before.
-
-* AMQP result backend now supports Pika.
-
-.. _version-1.0.6:
-
-1.0.6
-=====
-:release-date: 2010-06-30 09:57 A.M CEST
-
-* RabbitMQ 1.8.0 has extended their exchange equivalence tests to
-  include `auto_delete` and `durable`. This broke the AMQP backend.
-
-  If you've already used the AMQP backend this means you have to
-  delete the previous definitions::
-
-      $ camqadm exchange.delete celeryresults
-
-  or::
-
-      $ python manage.py camqadm exchange.delete celeryresults
-
-.. _version-1.0.5:
-
-1.0.5
-=====
-:release-date: 2010-06-01 02:36 P.M CEST
-
-.. _v105-critical:
-
-Critical
---------
-
-* SIGINT/Ctrl+C killed the pool, abruptly terminating the currently executing
-  tasks.
-
-    Fixed by making the pool worker processes ignore :const:`SIGINT`.
-
-* Should not close the consumers before the pool is terminated, just cancel
-  the consumers.
-
-    See issue #122.
-
-* Now depends on :mod:`billiard` >= 0.3.1
-
-* celeryd: Previously exceptions raised by worker components could stall startup,
-  now it correctly logs the exceptions and shuts down.
-
-* celeryd: Prefetch counts was set too late. QoS is now set as early as possible,
-  so celeryd can't slurp in all the messages at start-up.
-
-.. _v105-changes:
-
-Changes
--------
-
-* :mod:`celery.contrib.abortable`: Abortable tasks.
-
-    Tasks that defines steps of execution, the task can then
-    be aborted after each step has completed.
-
-* :class:`~celery.events.EventDispatcher`: No longer creates AMQP channel
-  if events are disabled
-
-* Added required RPM package names under `[bdist_rpm]` section, to support building RPMs
-  from the sources using setup.py
-
-* Running unit tests: :envvar:`NOSE_VERBOSE` environment var now enables verbose output from Nose.
-
-* :func:`celery.execute.apply`: Pass log file/log level arguments as task kwargs.
-
-    See issue #110.
-
-* celery.execute.apply: Should return exception, not :class:`~celery.datastructures.ExceptionInfo`
-  on error.
-
-    See issue #111.
-
-* Added new entries to the :doc:`FAQs <faq>`:
-
-    * Should I use retry or acks_late?
-    * Can I call a task by name?
-
-.. _version-1.0.4:
-
-1.0.4
-=====
-:release-date: 2010-05-31 09:54 A.M CEST
-
-* Changelog merged with 1.0.5 as the release was never announced.
-
-.. _version-1.0.3:
-
-1.0.3
-=====
-:release-date: 2010-05-15 03:00 P.M CEST
-
-.. _v103-important:
-
-Important notes
----------------
-
-* Messages are now acknowledged *just before* the task function is executed.
-
-    This is the behavior we've wanted all along, but couldn't have because of
-    limitations in the multiprocessing module.
-    The previous behavior was not good, and the situation worsened with the
-    release of 1.0.1, so this change will definitely improve
-    reliability, performance and operations in general.
-
-    For more information please see http://bit.ly/9hom6T
-
-* Database result backend: result now explicitly sets `null=True` as
-  `django-picklefield` version 0.1.5 changed the default behavior
-  right under our noses :(
-
-    See: http://bit.ly/d5OwMr
-
-    This means those who created their celery tables (via syncdb or
-    celeryinit) with picklefield versions >= 0.1.5 has to alter their tables to
-    allow the result field to be `NULL` manually.
-
-    MySQL::
-
-        ALTER TABLE celery_taskmeta MODIFY result TEXT NULL
-
-    PostgreSQL::
-
-        ALTER TABLE celery_taskmeta ALTER COLUMN result DROP NOT NULL
-
-* Removed `Task.rate_limit_queue_type`, as it was not really useful
-  and made it harder to refactor some parts.
-
-* Now depends on carrot >= 0.10.4
-
-* Now depends on billiard >= 0.3.0
-
-.. _v103-news:
-
-News
-----
-
-* AMQP backend: Added timeout support for `result.get()` /
-  `result.wait()`.
-
-* New task option: `Task.acks_late` (default: :setting:`CELERY_ACKS_LATE`)
-
-    Late ack means the task messages will be acknowledged **after** the task
-    has been executed, not *just before*, which is the default behavior.
-
-    .. note::
-
-        This means the tasks may be executed twice if the worker
-        crashes in mid-execution. Not acceptable for most
-        applications, but desirable for others.
-
-* Added crontab-like scheduling to periodic tasks.
-
-    Like a cron job, you can specify units of time of when
-    you would like the task to execute. While not a full implementation
-    of cron's features, it should provide a fair degree of common scheduling
-    needs.
-
-    You can specify a minute (0-59), an hour (0-23), and/or a day of the
-    week (0-6 where 0 is Sunday, or by names: sun, mon, tue, wed, thu, fri,
-    sat).
-
-    Examples:
-
-    .. code-block:: python
-
-        from celery.schedules import crontab
-        from celery.decorators import periodic_task
-
-        @periodic_task(run_every=crontab(hour=7, minute=30))
-        def every_morning():
-            print("Runs every morning at 7:30a.m")
-
-        @periodic_task(run_every=crontab(hour=7, minute=30, day_of_week="mon"))
-        def every_monday_morning():
-            print("Run every monday morning at 7:30a.m")
-
-        @periodic_task(run_every=crontab(minutes=30))
-        def every_hour():
-            print("Runs every hour on the clock. e.g. 1:30, 2:30, 3:30 etc.")
-
-    .. note::
-        This a late addition. While we have unittests, due to the
-        nature of this feature we haven't been able to completely test this
-        in practice, so consider this experimental.
-
-* `TaskPool.apply_async`: Now supports the `accept_callback` argument.
-
-* `apply_async`: Now raises :exc:`ValueError` if task args is not a list,
-  or kwargs is not a tuple (Issue #95).
-
-* `Task.max_retries` can now be `None`, which means it will retry forever.
-
-* Celerybeat: Now reuses the same connection when publishing large
-  sets of tasks.
-
-* Modified the task locking example in the documentation to use
-  `cache.add` for atomic locking.
-
-* Added experimental support for a *started* status on tasks.
-
-    If `Task.track_started` is enabled the task will report its status
-    as "started" when the task is executed by a worker.
-
-    The default value is `False` as the normal behaviour is to not
-    report that level of granularity. Tasks are either pending, finished,
-    or waiting to be retried. Having a "started" status can be useful for
-    when there are long running tasks and there is a need to report which
-    task is currently running.
-
-    The global default can be overridden by the :setting:`CELERY_TRACK_STARTED`
-    setting.
-
-* User Guide: New section `Tips and Best Practices`.
-
-    Contributions welcome!
-
-.. _v103-remote-control:
-
-Remote control commands
------------------------
-
-* Remote control commands can now send replies back to the caller.
-
-    Existing commands has been improved to send replies, and the client
-    interface in `celery.task.control` has new keyword arguments: `reply`,
-    `timeout` and `limit`. Where reply means it will wait for replies,
-    timeout is the time in seconds to stop waiting for replies, and limit
-    is the maximum number of replies to get.
-
-    By default, it will wait for as many replies as possible for one second.
-
-    * rate_limit(task_name, destination=all, reply=False, timeout=1, limit=0)
-
-        Worker returns `{"ok": message}` on success,
-        or `{"failure": message}` on failure.
-
-            >>> from celery.task.control import rate_limit
-            >>> rate_limit("tasks.add", "10/s", reply=True)
-            [{'worker1': {'ok': 'new rate limit set successfully'}},
-             {'worker2': {'ok': 'new rate limit set successfully'}}]
-
-    * ping(destination=all, reply=False, timeout=1, limit=0)
-
-        Worker returns the simple message `"pong"`.
-
-            >>> from celery.task.control import ping
-            >>> ping(reply=True)
-            [{'worker1': 'pong'},
-             {'worker2': 'pong'},
-
-    * revoke(destination=all, reply=False, timeout=1, limit=0)
-
-        Worker simply returns `True`.
-
-            >>> from celery.task.control import revoke
-            >>> revoke("419e46eb-cf6a-4271-86a8-442b7124132c", reply=True)
-            [{'worker1': True},
-             {'worker2'; True}]
-
-* You can now add your own remote control commands!
-
-    Remote control commands are functions registered in the command
-    registry. Registering a command is done using
-    :meth:`celery.worker.control.Panel.register`:
-
-    .. code-block:: python
-
-        from celery.task.control import Panel
-
-        @Panel.register
-        def reset_broker_connection(panel, **kwargs):
-            panel.consumer.reset_connection()
-            return {"ok": "connection re-established"}
-
-    With this module imported in the worker, you can launch the command
-    using `celery.task.control.broadcast`::
-
-        >>> from celery.task.control import broadcast
-        >>> broadcast("reset_broker_connection", reply=True)
-        [{'worker1': {'ok': 'connection re-established'},
-         {'worker2': {'ok': 'connection re-established'}}]
-
-    **TIP** You can choose the worker(s) to receive the command
-    by using the `destination` argument::
-
-        >>> broadcast("reset_broker_connection", destination=["worker1"])
-        [{'worker1': {'ok': 'connection re-established'}]
-
-* New remote control command: `dump_reserved`
-
-    Dumps tasks reserved by the worker, waiting to be executed::
-
-        >>> from celery.task.control import broadcast
-        >>> broadcast("dump_reserved", reply=True)
-        [{'myworker1': [<TaskRequest ....>]}]
-
-* New remote control command: `dump_schedule`
-
-    Dumps the workers currently registered ETA schedule.
-    These are tasks with an `eta` (or `countdown`) argument
-    waiting to be executed by the worker.
-
-        >>> from celery.task.control import broadcast
-        >>> broadcast("dump_schedule", reply=True)
-        [{'w1': []},
-         {'w3': []},
-         {'w2': ['0. 2010-05-12 11:06:00 pri0 <TaskRequest
-                    {name:"opalfeeds.tasks.refresh_feed_slice",
-                     id:"95b45760-4e73-4ce8-8eac-f100aa80273a",
-                     args:"(<Feeds freq_max:3600 freq_min:60
-                                   start:2184.0 stop:3276.0>,)",
-                     kwargs:"{'page': 2}"}>']},
-         {'w4': ['0. 2010-05-12 11:00:00 pri0 <TaskRequest
-                    {name:"opalfeeds.tasks.refresh_feed_slice",
-                     id:"c053480b-58fb-422f-ae68-8d30a464edfe",
-                     args:"(<Feeds freq_max:3600 freq_min:60
-                                   start:1092.0 stop:2184.0>,)",
-                     kwargs:"{\'page\': 1}"}>',
-                '1. 2010-05-12 11:12:00 pri0 <TaskRequest
-                    {name:"opalfeeds.tasks.refresh_feed_slice",
-                     id:"ab8bc59e-6cf8-44b8-88d0-f1af57789758",
-                     args:"(<Feeds freq_max:3600 freq_min:60
-                                   start:3276.0 stop:4365>,)",
-                     kwargs:"{\'page\': 3}"}>']}]
-
-.. _v103-fixes:
-
-Fixes
------
-
-* Mediator thread no longer blocks for more than 1 second.
-
-    With rate limits enabled and when there was a lot of remaining time,
-    the mediator thread could block shutdown (and potentially block other
-    jobs from coming in).
-
-* Remote rate limits was not properly applied (Issue #98).
-
-* Now handles exceptions with Unicode messages correctly in
-  `TaskRequest.on_failure`.
-
-* Database backend: `TaskMeta.result`: default value should be `None`
-  not empty string.
-
-.. _version-1.0.2:
-
-1.0.2
-=====
-:release-date: 2010-03-31 12:50 P.M CET
-
-* Deprecated: :setting:`CELERY_BACKEND`, please use
-  :setting:`CELERY_RESULT_BACKEND` instead.
-
-* We now use a custom logger in tasks. This logger supports task magic
-  keyword arguments in formats.
-
-    The default format for tasks (:setting:`CELERYD_TASK_LOG_FORMAT`) now
-    includes the id and the name of tasks so the origin of task log messages
-    can easily be traced.
-
-    Example output::
-        [2010-03-25 13:11:20,317: INFO/PoolWorker-1]
-            [tasks.add(a6e1c5ad-60d9-42a0-8b24-9e39363125a4)] Hello from add
-
-    To revert to the previous behavior you can set::
-
-        CELERYD_TASK_LOG_FORMAT = """
-            [%(asctime)s: %(levelname)s/%(processName)s] %(message)s
-        """.strip()
-
-* Unit tests: Don't disable the django test database tear down,
-  instead fixed the underlying issue which was caused by modifications
-  to the `DATABASE_NAME` setting (Issue #82).
-
-* Django Loader: New config :setting:`CELERY_DB_REUSE_MAX` (max number of
-  tasks to reuse the same database connection)
-
-    The default is to use a new connection for every task.
-    We would very much like to reuse the connection, but a safe number of
-    reuses is not known, and we don't have any way to handle the errors
-    that might happen, which may even be database dependent.
-
-    See: http://bit.ly/94fwdd
-
-* celeryd: The worker components are now configurable: :setting:`CELERYD_POOL`,
-  :setting:`CELERYD_CONSUMER`, :setting:`CELERYD_MEDIATOR`, and
-  :setting:`CELERYD_ETA_SCHEDULER`.
-
-    The default configuration is as follows:
-
-    .. code-block:: python
-
-        CELERYD_POOL = "celery.concurrency.processes.TaskPool"
-        CELERYD_MEDIATOR = "celery.worker.controllers.Mediator"
-        CELERYD_ETA_SCHEDULER = "celery.worker.controllers.ScheduleController"
-        CELERYD_CONSUMER = "celery.worker.consumer.Consumer"
-
-    The :setting:`CELERYD_POOL` setting makes it easy to swap out the
-    multiprocessing pool with a threaded pool, or how about a
-    twisted/eventlet pool?
-
-    Consider the competition for the first pool plug-in started!
-
-
-* Debian init scripts: Use `-a` not `&&` (Issue #82).
-
-* Debian init scripts: Now always preserves `$CELERYD_OPTS` from the
-  `/etc/default/celeryd` and `/etc/default/celerybeat`.
-
-* celery.beat.Scheduler: Fixed a bug where the schedule was not properly
-  flushed to disk if the schedule had not been properly initialized.
-
-* celerybeat: Now syncs the schedule to disk when receiving the :sig:`SIGTERM`
-  and :sig:`SIGINT` signals.
-
-* Control commands: Make sure keywords arguments are not in Unicode.
-
-* ETA scheduler: Was missing a logger object, so the scheduler crashed
-  when trying to log that a task had been revoked.
-
-* management.commands.camqadm: Fixed typo `camqpadm` -> `camqadm`
-  (Issue #83).
-
-* PeriodicTask.delta_resolution: Was not working for days and hours, now fixed
-  by rounding to the nearest day/hour.
-
-* Fixed a potential infinite loop in `BaseAsyncResult.__eq__`, although
-  there is no evidence that it has ever been triggered.
-
-* celeryd: Now handles messages with encoding problems by acking them and
-  emitting an error message.
-
-.. _version-1.0.1:
-
-1.0.1
-=====
-:release-date: 2010-02-24 07:05 P.M CET
-
-* Tasks are now acknowledged early instead of late.
-
-    This is done because messages can only be acknowledged within the same
-    connection channel, so if the connection is lost we would have to refetch
-    the message again to acknowledge it.
-
-    This might or might not affect you, but mostly those running tasks with a
-    really long execution time are affected, as all tasks that has made it
-    all the way into the pool needs to be executed before the worker can
-    safely terminate (this is at most the number of pool workers, multiplied
-    by the :setting:`CELERYD_PREFETCH_MULTIPLIER` setting.)
-
-    We multiply the prefetch count by default to increase the performance at
-    times with bursts of tasks with a short execution time. If this doesn't
-    apply to your use case, you should be able to set the prefetch multiplier
-    to zero, without sacrificing performance.
-
-    .. note::
-
-        A patch to :mod:`multiprocessing` is currently being
-        worked on, this patch would enable us to use a better solution, and is
-        scheduled for inclusion in the `2.0.0` release.
-
-* celeryd now shutdowns cleanly when receiving the :sig:`SIGTERM` signal.
-
-* celeryd now does a cold shutdown if the :sig:`SIGINT` signal is received (Ctrl+C),
-  this means it tries to terminate as soon as possible.
-
-* Caching of results now moved to the base backend classes, so no need
-  to implement this functionality in the base classes.
-
-* Caches are now also limited in size, so their memory usage doesn't grow
-  out of control.
-
-    You can set the maximum number of results the cache
-    can hold using the :setting:`CELERY_MAX_CACHED_RESULTS` setting (the
-    default is five thousand results). In addition, you can refetch already
-    retrieved results using `backend.reload_task_result` +
-    `backend.reload_taskset_result` (that's for those who want to send
-    results incrementally).
-
-* `celeryd` now works on Windows again.
-
-    .. warning::
-
-        If you're using Celery with Django, you can't use `project.settings`
-        as the settings module name, but the following should work::
-
-        $ python manage.py celeryd --settings=settings
-
-* Execution: `.messaging.TaskPublisher.send_task` now
-  incorporates all the functionality apply_async previously did.
-
-    Like converting countdowns to eta, so :func:`celery.execute.apply_async` is
-    now simply a convenient front-end to
-    :meth:`celery.messaging.TaskPublisher.send_task`, using
-    the task classes default options.
-
-    Also :func:`celery.execute.send_task` has been
-    introduced, which can apply tasks using just the task name (useful
-    if the client does not have the destination task in its task registry).
-
-    Example:
-
-        >>> from celery.execute import send_task
-        >>> result = send_task("celery.ping", args=[], kwargs={})
-        >>> result.get()
-        'pong'
-
-* `camqadm`: This is a new utility for command line access to the AMQP API.
-
-    Excellent for deleting queues/bindings/exchanges, experimentation and
-    testing::
-
-        $ camqadm
-        1> help
-
-    Gives an interactive shell, type `help` for a list of commands.
-
-    When using Django, use the management command instead::
-
-        $ python manage.py camqadm
-        1> help
-
-* Redis result backend: To conform to recent Redis API changes, the following
-  settings has been deprecated:
-
-        * `REDIS_TIMEOUT`
-        * `REDIS_CONNECT_RETRY`
-
-    These will emit a `DeprecationWarning` if used.
-
-    A `REDIS_PASSWORD` setting has been added, so you can use the new
-    simple authentication mechanism in Redis.
-
-* The redis result backend no longer calls `SAVE` when disconnecting,
-  as this is apparently better handled by Redis itself.
-
-* If `settings.DEBUG` is on, celeryd now warns about the possible
-  memory leak it can result in.
-
-* The ETA scheduler now sleeps at most two seconds between iterations.
-
-* The ETA scheduler now deletes any revoked tasks it might encounter.
-
-    As revokes are not yet persistent, this is done to make sure the task
-    is revoked even though it's currently being hold because its eta is e.g.
-    a week into the future.
-
-* The `task_id` argument is now respected even if the task is executed
-  eagerly (either using apply, or :setting:`CELERY_ALWAYS_EAGER`).
-
-* The internal queues are now cleared if the connection is reset.
-
-* New magic keyword argument: `delivery_info`.
-
-    Used by retry() to resend the task to its original destination using the same
-    exchange/routing_key.
-
-* Events: Fields was not passed by `.send()` (fixes the UUID key errors
-  in celerymon)
-
-* Added `--schedule`/`-s` option to celeryd, so it is possible to
-  specify a custom schedule filename when using an embedded celerybeat
-  server (the `-B`/`--beat`) option.
-
-* Better Python 2.4 compatibility. The test suite now passes.
-
-* task decorators: Now preserve docstring as `cls.__doc__`, (was previously
-  copied to `cls.run.__doc__`)
-
-* The `testproj` directory has been renamed to `tests` and we're now using
-  `nose` + `django-nose` for test discovery, and `unittest2` for test
-  cases.
-
-* New pip requirements files available in :file:`requirements`.
-
-* TaskPublisher: Declarations are now done once (per process).
-
-* Added `Task.delivery_mode` and the :setting:`CELERY_DEFAULT_DELIVERY_MODE`
-  setting.
-
-    These can be used to mark messages non-persistent (i.e. so they are
-    lost if the broker is restarted).
-
-* Now have our own `ImproperlyConfigured` exception, instead of using the
-  Django one.
-
-* Improvements to the Debian init scripts: Shows an error if the program is
-  not executable.  Does not modify `CELERYD` when using django with
-  virtualenv.
-
-.. _version-1.0.0:
-
-1.0.0
-=====
-:release-date: 2010-02-10 04:00 P.M CET
-
-.. _v100-incompatible:
-
-Backward incompatible changes
------------------------------
-
-* Celery does not support detaching anymore, so you have to use the tools
-  available on your platform, or something like Supervisord to make
-  celeryd/celerybeat/celerymon into background processes.
-
-    We've had too many problems with celeryd daemonizing itself, so it was
-    decided it has to be removed. Example startup scripts has been added to
-    the `extra/` directory:
-
-    * Debian, Ubuntu, (start-stop-daemon)
-
-        `extra/debian/init.d/celeryd`
-        `extra/debian/init.d/celerybeat`
-
-    * Mac OS X launchd
-
-        `extra/mac/org.celeryq.celeryd.plist`
-        `extra/mac/org.celeryq.celerybeat.plist`
-        `extra/mac/org.celeryq.celerymon.plist`
-
-    * Supervisord (http://supervisord.org)
-
-        `extra/supervisord/supervisord.conf`
-
-    In addition to `--detach`, the following program arguments has been
-    removed: `--uid`, `--gid`, `--workdir`, `--chroot`, `--pidfile`,
-    `--umask`. All good daemonization tools should support equivalent
-    functionality, so don't worry.
-
-    Also the following configuration keys has been removed:
-    `CELERYD_PID_FILE`, `CELERYBEAT_PID_FILE`, `CELERYMON_PID_FILE`.
-
-* Default celeryd loglevel is now `WARN`, to enable the previous log level
-  start celeryd with `--loglevel=INFO`.
-
-* Tasks are automatically registered.
-
-    This means you no longer have to register your tasks manually.
-    You don't have to change your old code right away, as it doesn't matter if
-    a task is registered twice.
-
-    If you don't want your task to be automatically registered you can set
-    the `abstract` attribute
-
-    .. code-block:: python
-
-        class MyTask(Task):
-            abstract = True
-
-    By using `abstract` only tasks subclassing this task will be automatically
-    registered (this works like the Django ORM).
-
-    If you don't want subclasses to be registered either, you can set the
-    `autoregister` attribute to `False`.
-
-    Incidentally, this change also fixes the problems with automatic name
-    assignment and relative imports. So you also don't have to specify a task name
-    anymore if you use relative imports.
-
-* You can no longer use regular functions as tasks.
-
-    This change was added
-    because it makes the internals a lot more clean and simple. However, you can
-    now turn functions into tasks by using the `@task` decorator:
-
-    .. code-block:: python
-
-        from celery.decorators import task
-
-        @task()
-        def add(x, y):
-            return x + y
-
-    .. seealso::
-
-        :ref:`guide-tasks` for more information about the task decorators.
-
-* The periodic task system has been rewritten to a centralized solution.
-
-    This means `celeryd` no longer schedules periodic tasks by default,
-    but a new daemon has been introduced: `celerybeat`.
-
-    To launch the periodic task scheduler you have to run celerybeat::
-
-        $ celerybeat
-
-    Make sure this is running on one server only, if you run it twice, all
-    periodic tasks will also be executed twice.
-
-    If you only have one worker server you can embed it into celeryd like this::
-
-        $ celeryd --beat # Embed celerybeat in celeryd.
-
-* The supervisor has been removed.
-
-    This means the `-S` and `--supervised` options to `celeryd` is
-    no longer supported. Please use something like http://supervisord.org
-    instead.
-
-* `TaskSet.join` has been removed, use `TaskSetResult.join` instead.
-
-* The task status `"DONE"` has been renamed to `"SUCCESS"`.
-
-* `AsyncResult.is_done` has been removed, use `AsyncResult.successful`
-  instead.
-
-* The worker no longer stores errors if `Task.ignore_result` is set, to
-  revert to the previous behaviour set
-  :setting:`CELERY_STORE_ERRORS_EVEN_IF_IGNORED` to `True`.
-
-* The statistics functionality has been removed in favor of events,
-  so the `-S` and --statistics` switches has been removed.
-
-* The module `celery.task.strategy` has been removed.
-
-* `celery.discovery` has been removed, and it's `autodiscover` function is
-  now in `celery.loaders.djangoapp`. Reason: Internal API.
-
-* The :envvar:`CELERY_LOADER` environment variable now needs loader class name
-  in addition to module name,
-
-    E.g. where you previously had: `"celery.loaders.default"`, you now need
-    `"celery.loaders.default.Loader"`, using the previous syntax will result
-    in a `DeprecationWarning`.
-
-* Detecting the loader is now lazy, and so is not done when importing
-  `celery.loaders`.
-
-    To make this happen `celery.loaders.settings` has
-    been renamed to `load_settings` and is now a function returning the
-    settings object. `celery.loaders.current_loader` is now also
-    a function, returning the current loader.
-
-    So::
-
-        loader = current_loader
-
-    needs to be changed to::
-
-        loader = current_loader()
-
-.. _v100-deprecations:
-
-Deprecations
-------------
-
-* The following configuration variables has been renamed and will be
-  deprecated in v2.0:
-
-    * CELERYD_DAEMON_LOG_FORMAT -> CELERYD_LOG_FORMAT
-    * CELERYD_DAEMON_LOG_LEVEL -> CELERYD_LOG_LEVEL
-    * CELERY_AMQP_CONNECTION_TIMEOUT -> CELERY_BROKER_CONNECTION_TIMEOUT
-    * CELERY_AMQP_CONNECTION_RETRY -> CELERY_BROKER_CONNECTION_RETRY
-    * CELERY_AMQP_CONNECTION_MAX_RETRIES -> CELERY_BROKER_CONNECTION_MAX_RETRIES
-    * SEND_CELERY_TASK_ERROR_EMAILS -> CELERY_SEND_TASK_ERROR_EMAILS
-
-* The public API names in celery.conf has also changed to a consistent naming
-  scheme.
-
-* We now support consuming from an arbitrary number of queues.
-
-    To do this we had to rename the configuration syntax. If you use any of
-    the custom AMQP routing options (queue/exchange/routing_key, etc.), you
-    should read the new FAQ entry: http://bit.ly/aiWoH.
-
-    The previous syntax is deprecated and scheduled for removal in v2.0.
-
-* `TaskSet.run` has been renamed to `TaskSet.apply_async`.
-
-    `TaskSet.run` has now been deprecated, and is scheduled for
-    removal in v2.0.
-
-.. v100-news:
-
-News
-----
-
-* Rate limiting support (per task type, or globally).
-
-* New periodic task system.
-
-* Automatic registration.
-
-* New cool task decorator syntax.
-
-* celeryd now sends events if enabled with the `-E` argument.
-
-    Excellent for monitoring tools, one is already in the making
-    (http://github.com/celery/celerymon).
-
-    Current events include: worker-heartbeat,
-    task-[received/succeeded/failed/retried],
-    worker-online, worker-offline.
-
-* You can now delete (revoke) tasks that has already been applied.
-
-* You can now set the hostname celeryd identifies as using the `--hostname`
-  argument.
-
-* Cache backend now respects the :setting:`CELERY_TASK_RESULT_EXPIRES` setting.
-
-* Message format has been standardized and now uses ISO-8601 format
-  for dates instead of datetime.
-
-* `celeryd` now responds to the :sig:`SIGHUP` signal by restarting itself.
-
-* Periodic tasks are now scheduled on the clock.
-
-    I.e. `timedelta(hours=1)` means every hour at :00 minutes, not every
-    hour from the server starts.  To revert to the previous behaviour you
-    can set `PeriodicTask.relative = True`.
-
-* Now supports passing execute options to a TaskSets list of args, e.g.:
-
-    >>> ts = TaskSet(add, [([2, 2], {}, {"countdown": 1}),
-    ...                   ([4, 4], {}, {"countdown": 2}),
-    ...                   ([8, 8], {}, {"countdown": 3})])
-    >>> ts.run()
-
-* Got a 3x performance gain by setting the prefetch count to four times the
-  concurrency, (from an average task round-trip of 0.1s to 0.03s!).
-
-    A new setting has been added: :setting:`CELERYD_PREFETCH_MULTIPLIER`, which
-    is set to `4` by default.
-
-* Improved support for webhook tasks.
-
-    `celery.task.rest` is now deprecated, replaced with the new and shiny
-    :mod:`celery.task.http`. With more reflective names, sensible interface,
-    and it's possible to override the methods used to perform HTTP requests.
-
-* The results of task sets are now cached by storing it in the result
-  backend.
-
-.. _v100-changes:
-
-Changes
--------
-
-* Now depends on carrot >= 0.8.1
-
-* New dependencies: billiard, python-dateutil, django-picklefield
-
-* No longer depends on python-daemon
-
-* The `uuid` distribution is added as a dependency when running Python 2.4.
-
-* Now remembers the previously detected loader by keeping it in
-  the :envvar:`CELERY_LOADER` environment variable.
-
-    This may help on windows where fork emulation is used.
-
-* ETA no longer sends datetime objects, but uses ISO 8601 date format in a
-  string for better compatibility with other platforms.
-
-* No longer sends error mails for retried tasks.
-
-* Task can now override the backend used to store results.
-
-* Refactored the ExecuteWrapper, `apply` and :setting:`CELERY_ALWAYS_EAGER`
-  now also executes the task callbacks and signals.
-
-* Now using a proper scheduler for the tasks with an ETA.
-
-    This means waiting eta tasks are sorted by time, so we don't have
-    to poll the whole list all the time.
-
-* Now also imports modules listed in :setting:`CELERY_IMPORTS` when running
-  with django (as documented).
-
-* Log level for stdout/stderr changed from INFO to ERROR
-
-* ImportErrors are now properly propagated when autodiscovering tasks.
-
-* You can now use `celery.messaging.establish_connection` to establish a
-  connection to the broker.
-
-* When running as a separate service the periodic task scheduler does some
-  smart moves to not poll too regularly.
-
-    If you need faster poll times you can lower the value
-    of :setting:`CELERYBEAT_MAX_LOOP_INTERVAL`.
-
-* You can now change periodic task intervals at runtime, by making
-  `run_every` a property, or subclassing `PeriodicTask.is_due`.
-
-* The worker now supports control commands enabled through the use of a
-  broadcast queue, you can remotely revoke tasks or set the rate limit for
-  a task type. See :mod:`celery.task.control`.
-
-* The services now sets informative process names (as shown in `ps`
-  listings) if the :mod:`setproctitle` module is installed.
-
-* :exc:`~@NotRegistered` now inherits from :exc:`KeyError`,
-  and `TaskRegistry.__getitem__`+`pop` raises `NotRegistered` instead
-
-* You can set the loader via the :envvar:`CELERY_LOADER` environment variable.
-
-* You can now set :setting:`CELERY_IGNORE_RESULT` to ignore task results by
-  default (if enabled, tasks doesn't save results or errors to the backend used).
-
-* celeryd now correctly handles malformed messages by throwing away and
-  acknowledging the message, instead of crashing.
-
-.. _v100-bugs:
-
-Bugs
-----
-
-* Fixed a race condition that could happen while storing task results in the
-  database.
-
-.. _v100-documentation:
-
-Documentation
--------------
-
-* Reference now split into two sections; API reference and internal module
-  reference.
-
-.. _version-0.8.4:
-
-0.8.4
-=====
-:release-date: 2010-02-05 01:52 P.M CEST
-
-* Now emits a warning if the --detach argument is used.
-  --detach should not be used anymore, as it has several not easily fixed
-  bugs related to it. Instead, use something like start-stop-daemon,
-  Supervisord or launchd (os x).
-
-
-* Make sure logger class is process aware, even if running Python >= 2.6.
-
-
-* Error emails are not sent anymore when the task is retried.
-
-.. _version-0.8.3:
-
-0.8.3
-=====
-:release-date: 2009-12-22 09:43 A.M CEST
-
-* Fixed a possible race condition that could happen when storing/querying
-  task results using the database backend.
-
-* Now has console script entry points in the setup.py file, so tools like
-  Buildout will correctly install the programs celeryd and celeryinit.
-
-.. _version-0.8.2:
-
-0.8.2
-=====
-:release-date: 2009-11-20 03:40 P.M CEST
-
-* QOS Prefetch count was not applied properly, as it was set for every message
-  received (which apparently behaves like, "receive one more"), instead of only
-  set when our wanted value changed.
-
-.. _version-0.8.1:
-
-0.8.1
-=================================
-:release-date: 2009-11-16 05:21 P.M CEST
-
-.. _v081-very-important:
-
-Very important note
--------------------
-
-This release (with carrot 0.8.0) enables AMQP QoS (quality of service), which
-means the workers will only receive as many messages as it can handle at a
-time. As with any release, you should test this version upgrade on your
-development servers before rolling it out to production!
-
-.. _v081-important:
-
-Important changes
------------------
-
-* If you're using Python < 2.6 and you use the multiprocessing backport, then
-  multiprocessing version 2.6.2.1 is required.
-
-* All AMQP_* settings has been renamed to BROKER_*, and in addition
-  AMQP_SERVER has been renamed to BROKER_HOST, so before where you had::
-
-        AMQP_SERVER = "localhost"
-        AMQP_PORT = 5678
-        AMQP_USER = "myuser"
-        AMQP_PASSWORD = "mypassword"
-        AMQP_VHOST = "celery"
-
-  You need to change that to::
-
-        BROKER_HOST = "localhost"
-        BROKER_PORT = 5678
-        BROKER_USER = "myuser"
-        BROKER_PASSWORD = "mypassword"
-        BROKER_VHOST = "celery"
-
-* Custom carrot backends now need to include the backend class name, so before
-  where you had::
-
-        CARROT_BACKEND = "mycustom.backend.module"
-
-  you need to change it to::
-
-        CARROT_BACKEND = "mycustom.backend.module.Backend"
-
-  where `Backend` is the class name. This is probably `"Backend"`, as
-  that was the previously implied name.
-
-* New version requirement for carrot: 0.8.0
-
-.. _v081-changes:
-
-Changes
--------
-
-* Incorporated the multiprocessing backport patch that fixes the
-  `processName` error.
-
-* Ignore the result of PeriodicTask's by default.
-
-* Added a Redis result store backend
-
-* Allow /etc/default/celeryd to define additional options for the celeryd init
-  script.
-
-* MongoDB periodic tasks issue when using different time than UTC fixed.
-
-* Windows specific: Negate test for available os.fork (thanks miracle2k)
-
-* Now tried to handle broken PID files.
-
-* Added a Django test runner to contrib that sets
-  `CELERY_ALWAYS_EAGER = True` for testing with the database backend.
-
-* Added a :setting:`CELERY_CACHE_BACKEND` setting for using something other
-  than the django-global cache backend.
-
-* Use custom implementation of functools.partial (curry) for Python 2.4 support
-  (Probably still problems with running on 2.4, but it will eventually be
-  supported)
-
-* Prepare exception to pickle when saving :state:`RETRY` status for all backends.
-
-* SQLite no concurrency limit should only be effective if the database backend
-  is used.
-
-
-.. _version-0.8.0:
-
-0.8.0
-=====
-:release-date: 2009-09-22 03:06 P.M CEST
-
-.. _v080-incompatible:
-
-Backward incompatible changes
------------------------------
-
-* Add traceback to result value on failure.
-
-    .. note::
-
-        If you use the database backend you have to re-create the
-        database table `celery_taskmeta`.
-
-        Contact the :ref:`mailing-list` or :ref:`irc-channel` channel
-        for help doing this.
-
-* Database tables are now only created if the database backend is used,
-  so if you change back to the database backend at some point,
-  be sure to initialize tables (django: `syncdb`, python: `celeryinit`).
-
-  .. note::
-
-     This is only applies if using Django version 1.1 or higher.
-
-* Now depends on `carrot` version 0.6.0.
-
-* Now depends on python-daemon 1.4.8
-
-.. _v080-important:
-
-Important changes
------------------
-
-* Celery can now be used in pure Python (outside of a Django project).
-
-    This means celery is no longer Django specific.
-
-    For more information see the FAQ entry
-    :ref:`faq-is-celery-for-django-only`.
-
-* Celery now supports task retries.
-
-    See `Cookbook: Retrying Tasks`_ for more information.
-
-.. _`Cookbook: Retrying Tasks`:
-    http://celery.github.com/celery/cookbook/task-retries.html
-
-* We now have an AMQP result store backend.
-
-    It uses messages to publish task return value and status. And it's
-    incredibly fast!
-
-    See issue #6 for more info!
-
-* AMQP QoS (prefetch count) implemented:
-
-    This to not receive more messages than we can handle.
-
-* Now redirects stdout/stderr to the celeryd log file when detached
-
-* Now uses `inspect.getargspec` to only pass default arguments
-    the task supports.
-
-* Add Task.on_success, .on_retry, .on_failure handlers
-    See :meth:`celery.task.base.Task.on_success`,
-        :meth:`celery.task.base.Task.on_retry`,
-        :meth:`celery.task.base.Task.on_failure`,
-
-* `celery.utils.gen_unique_id`: Workaround for
-    http://bugs.python.org/issue4607
-
-* You can now customize what happens at worker start, at process init, etc.,
-    by creating your own loaders. (see :mod:`celery.loaders.default`,
-    :mod:`celery.loaders.djangoapp`, :mod:`celery.loaders`.)
-
-* Support for multiple AMQP exchanges and queues.
-
-    This feature misses documentation and tests, so anyone interested
-    is encouraged to improve this situation.
-
-* celeryd now survives a restart of the AMQP server!
-
-  Automatically re-establish AMQP broker connection if it's lost.
-
-  New settings:
-
-    * AMQP_CONNECTION_RETRY
-        Set to `True` to enable connection retries.
-
-    * AMQP_CONNECTION_MAX_RETRIES.
-        Maximum number of restarts before we give up. Default: `100`.
-
-.. _v080-news:
-
-News
-----
-
-*  Fix an incompatibility between python-daemon and multiprocessing,
-    which resulted in the `[Errno 10] No child processes` problem when
-    detaching.
-
-* Fixed a possible DjangoUnicodeDecodeError being raised when saving pickled
-    data to Django`s memcached cache backend.
-
-* Better Windows compatibility.
-
-* New version of the pickled field (taken from
-    http://www.djangosnippets.org/snippets/513/)
-
-* New signals introduced: `task_sent`, `task_prerun` and
-    `task_postrun`, see :mod:`celery.signals` for more information.
-
-* `TaskSetResult.join` caused `TypeError` when `timeout=None`.
-    Thanks Jerzy Kozera.  Closes #31
-
-* `views.apply` should return `HttpResponse` instance.
-    Thanks to Jerzy Kozera. Closes #32
-
-* `PeriodicTask`: Save conversion of `run_every` from `int`
-    to `timedelta` to the class attribute instead of on the instance.
-
-* Exceptions has been moved to `celery.exceptions`, but are still
-    available in the previous module.
-
-* Try to rollback transaction and retry saving result if an error happens
-    while setting task status with the database backend.
-
-* jail() refactored into :class:`celery.execute.ExecuteWrapper`.
-
-* `views.apply` now correctly sets mime-type to "application/json"
-
-* `views.task_status` now returns exception if state is :state:`RETRY`
-
-* `views.task_status` now returns traceback if state is :state:`FAILURE`
-    or :state:`RETRY`
-
-* Documented default task arguments.
-
-* Add a sensible __repr__ to ExceptionInfo for easier debugging
-
-* Fix documentation typo `.. import map` -> `.. import dmap`.
-    Thanks to mikedizon
-
-.. _version-0.6.0:
-
-0.6.0
-=====
-:release-date: 2009-08-07 06:54 A.M CET
-
-.. _v060-important:
-
-Important changes
------------------
-
-* Fixed a bug where tasks raising unpickleable exceptions crashed pool
-    workers. So if you've had pool workers mysteriously disappearing, or
-    problems with celeryd stopping working, this has been fixed in this
-    version.
-
-* Fixed a race condition with periodic tasks.
-
-* The task pool is now supervised, so if a pool worker crashes,
-    goes away or stops responding, it is automatically replaced with
-    a new one.
-
-* Task.name is now automatically generated out of class module+name, e.g.
-    `"djangotwitter.tasks.UpdateStatusesTask"`. Very convenient. No idea why
-    we didn't do this before. Some documentation is updated to not manually
-    specify a task name.
-
-.. _v060-news:
-
-News
-----
-
-* Tested with Django 1.1
-
-* New Tutorial: Creating a click counter using carrot and celery
-
-* Database entries for periodic tasks are now created at `celeryd`
-    startup instead of for each check (which has been a forgotten TODO/XXX
-    in the code for a long time)
-
-* New settings variable: :setting:`CELERY_TASK_RESULT_EXPIRES`
-    Time (in seconds, or a `datetime.timedelta` object) for when after
-    stored task results are deleted. For the moment this only works for the
-    database backend.
-
-* `celeryd` now emits a debug log message for which periodic tasks
-    has been launched.
-
-* The periodic task table is now locked for reading while getting
-    periodic task status. (MySQL only so far, seeking patches for other
-    engines)
-
-* A lot more debugging information is now available by turning on the
-    `DEBUG` log level (`--loglevel=DEBUG`).
-
-* Functions/methods with a timeout argument now works correctly.
-
-* New: `celery.strategy.even_time_distribution`:
-    With an iterator yielding task args, kwargs tuples, evenly distribute
-    the processing of its tasks throughout the time window available.
-
-* Log message `Unknown task ignored...` now has log level `ERROR`
-
-* Log message `"Got task from broker"` is now emitted for all tasks, even if
-    the task has an ETA (estimated time of arrival). Also the message now
-    includes the ETA for the task (if any).
-
-* Acknowledgement now happens in the pool callback. Can't do ack in the job
-    target, as it's not pickleable (can't share AMQP connection, etc.)).
-
-* Added note about .delay hanging in README
-
-* Tests now passing in Django 1.1
-
-* Fixed discovery to make sure app is in INSTALLED_APPS
-
-* Previously overridden pool behavior (process reap, wait until pool worker
-    available, etc.) is now handled by `multiprocessing.Pool` itself.
-
-* Convert statistics data to Unicode for use as kwargs. Thanks Lucy!
-
-.. _version-0.4.1:
-
-0.4.1
-=====
-:release-date: 2009-07-02 01:42 P.M CET
-
-* Fixed a bug with parsing the message options (`mandatory`,
-  `routing_key`, `priority`, `immediate`)
-
-.. _version-0.4.0:
-
-0.4.0
-=====
-:release-date: 2009-07-01 07:29 P.M CET
-
-* Adds eager execution. `celery.execute.apply`|`Task.apply` executes the
-  function blocking until the task is done, for API compatibility it
-  returns an `celery.result.EagerResult` instance. You can configure
-  celery to always run tasks locally by setting the
-  :setting:`CELERY_ALWAYS_EAGER` setting to `True`.
-
-* Now depends on `anyjson`.
-
-* 99% coverage using python `coverage` 3.0.
-
-.. _version-0.3.20:
-
-0.3.20
-======
-:release-date: 2009-06-25 08:42 P.M CET
-
-* New arguments to `apply_async` (the advanced version of
-  `delay_task`), `countdown` and `eta`;
-
-    >>> # Run 10 seconds into the future.
-    >>> res = apply_async(MyTask, countdown=10);
-
-    >>> # Run 1 day from now
-    >>> res = apply_async(MyTask,
-    ...                   eta=datetime.now() + timedelta(days=1))
-
-* Now unlinks stale PID files
-
-* Lots of more tests.
-
-* Now compatible with carrot >= 0.5.0.
-
-* **IMPORTANT** The `subtask_ids` attribute on the `TaskSetResult`
-  instance has been removed. To get this information instead use:
-
-        >>> subtask_ids = [subtask.id for subtask in ts_res.subtasks]
-
-* `Taskset.run()` now respects extra message options from the task class.
-
-* Task: Add attribute `ignore_result`: Don't store the status and
-  return value. This means you can't use the
-  `celery.result.AsyncResult` to check if the task is
-  done, or get its return value. Only use if you need the performance
-  and is able live without these features. Any exceptions raised will
-  store the return value/status as usual.
-
-* Task: Add attribute `disable_error_emails` to disable sending error
-  emails for that task.
-
-* Should now work on Windows (although running in the background won't
-  work, so using the `--detach` argument results in an exception
-  being raised.)
-
-* Added support for statistics for profiling and monitoring.
-  To start sending statistics start `celeryd` with the
-  `--statistics option. Then after a while you can dump the results
-  by running `python manage.py celerystats`. See
-  `celery.monitoring` for more information.
-
-* The celery daemon can now be supervised (i.e. it is automatically
-  restarted if it crashes). To use this start celeryd with the
-  --supervised` option (or alternatively `-S`).
-
-* views.apply: View applying a task. Example
-
-    ::
-
-        http://e.com/celery/apply/task_name/arg1/arg2//?kwarg1=a&kwarg2=b
-
-
-    .. warning::
-
-        Use with caution! Do not expose this URL to the public
-        without first ensuring that your code is safe!
-
-* Refactored `celery.task`. It's now split into three modules:
-
-    * celery.task
-
-        Contains `apply_async`, `delay_task`, `discard_all`, and task
-        shortcuts, plus imports objects from `celery.task.base` and
-        `celery.task.builtins`
-
-    * celery.task.base
-
-        Contains task base classes: `Task`, `PeriodicTask`,
-        `TaskSet`, `AsynchronousMapTask`, `ExecuteRemoteTask`.
-
-    * celery.task.builtins
-
-        Built-in tasks: `PingTask`, `DeleteExpiredTaskMetaTask`.
-
-.. _version-0.3.7:
-
-0.3.7
-=====
-:release-date: 2008-06-16 11:41 P.M CET
-
-* **IMPORTANT** Now uses AMQP`s `basic.consume` instead of
-  `basic.get`. This means we're no longer polling the broker for
-  new messages.
-
-* **IMPORTANT** Default concurrency limit is now set to the number of CPUs
-  available on the system.
-
-* **IMPORTANT** `tasks.register`: Renamed `task_name` argument to
-  `name`, so
-
-        >>> tasks.register(func, task_name="mytask")
-
-  has to be replaced with:
-
-        >>> tasks.register(func, name="mytask")
-
-* The daemon now correctly runs if the pidlock is stale.
-
-* Now compatible with carrot 0.4.5
-
-* Default AMQP connection timeout is now 4 seconds.
-* `AsyncResult.read()` was always returning `True`.
-
-*  Only use README as long_description if the file exists so easy_install
-   doesn't break.
-
-* `celery.view`: JSON responses now properly set its mime-type.
-
-* `apply_async` now has a `connection` keyword argument so you
-  can re-use the same AMQP connection if you want to execute
-  more than one task.
-
-* Handle failures in task_status view such that it won't throw 500s.
-
-* Fixed typo `AMQP_SERVER` in documentation to `AMQP_HOST`.
-
-* Worker exception emails sent to administrators now works properly.
-
-* No longer depends on `django`, so installing `celery` won't affect
-  the preferred Django version installed.
-
-* Now works with PostgreSQL (psycopg2) again by registering the
-  `PickledObject` field.
-
-* `celeryd`: Added `--detach` option as an alias to `--daemon`, and
-  it's the term used in the documentation from now on.
-
-* Make sure the pool and periodic task worker thread is terminated
-  properly at exit. (So `Ctrl-C` works again).
-
-* Now depends on `python-daemon`.
-
-* Removed dependency to `simplejson`
-
-* Cache Backend: Re-establishes connection for every task process
-  if the Django cache backend is memcached/libmemcached.
-
-* Tyrant Backend: Now re-establishes the connection for every task
-  executed.
-
-.. _version-0.3.3:
-
-0.3.3
-=====
-:release-date: 2009-06-08 01:07 P.M CET
-
-* The `PeriodicWorkController` now sleeps for 1 second between checking
-  for periodic tasks to execute.
-
-.. _version-0.3.2:
-
-0.3.2
-=====
-:release-date: 2009-06-08 01:07 P.M CET
-
-* celeryd: Added option `--discard`: Discard (delete!) all waiting
-  messages in the queue.
-
-* celeryd: The `--wakeup-after` option was not handled as a float.
-
-.. _version-0.3.1:
-
-0.3.1
-=====
-:release-date: 2009-06-08 01:07 P.M CET
-
-* The `PeriodicTask` worker is now running in its own thread instead
-  of blocking the `TaskController` loop.
-
-* Default `QUEUE_WAKEUP_AFTER` has been lowered to `0.1` (was `0.3`)
-
-.. _version-0.3.0:
-
-0.3.0
-=====
-:release-date: 2009-06-08 12:41 P.M CET
-
-.. warning::
-
-    This is a development version, for the stable release, please
-    see versions 0.2.x.
-
-**VERY IMPORTANT:** Pickle is now the encoder used for serializing task
-arguments, so be sure to flush your task queue before you upgrade.
-
-* **IMPORTANT** TaskSet.run() now returns a celery.result.TaskSetResult
-  instance, which lets you inspect the status and return values of a
-  taskset as it was a single entity.
-
-* **IMPORTANT** Celery now depends on carrot >= 0.4.1.
-
-* The celery daemon now sends task errors to the registered admin emails.
-  To turn off this feature, set `SEND_CELERY_TASK_ERROR_EMAILS` to
-  `False` in your `settings.py`. Thanks to Grégoire Cachet.
-
-* You can now run the celery daemon by using `manage.py`::
-
-        $ python manage.py celeryd
-
-  Thanks to Grégoire Cachet.
-
-* Added support for message priorities, topic exchanges, custom routing
-  keys for tasks. This means we have introduced
-  `celery.task.apply_async`, a new way of executing tasks.
-
-  You can use `celery.task.delay` and `celery.Task.delay` like usual, but
-  if you want greater control over the message sent, you want
-  `celery.task.apply_async` and `celery.Task.apply_async`.
-
-  This also means the AMQP configuration has changed. Some settings has
-  been renamed, while others are new::
-
-        CELERY_AMQP_EXCHANGE
-        CELERY_AMQP_PUBLISHER_ROUTING_KEY
-        CELERY_AMQP_CONSUMER_ROUTING_KEY
-        CELERY_AMQP_CONSUMER_QUEUE
-        CELERY_AMQP_EXCHANGE_TYPE
-
-  See the entry `Can I send some tasks to only some servers?`_ in the
-  `FAQ`_ for more information.
-
-.. _`Can I send some tasks to only some servers?`:
-        http://bit.ly/celery_AMQP_routing
-.. _`FAQ`: http://celery.github.com/celery/faq.html
-
-* Task errors are now logged using log level `ERROR` instead of `INFO`,
-  and stacktraces are dumped. Thanks to Grégoire Cachet.
-
-* Make every new worker process re-establish it's Django DB connection,
-  this solving the "MySQL connection died?" exceptions.
-  Thanks to Vitaly Babiy and Jirka Vejrazka.
-
-* **IMPORTANT** Now using pickle to encode task arguments. This means you
-  now can pass complex python objects to tasks as arguments.
-
-* Removed dependency to `yadayada`.
-
-* Added a FAQ, see `docs/faq.rst`.
-
-* Now converts any Unicode keys in task `kwargs` to regular strings.
-  Thanks Vitaly Babiy.
-
-* Renamed the `TaskDaemon` to `WorkController`.
-
-* `celery.datastructures.TaskProcessQueue` is now renamed to
-  `celery.pool.TaskPool`.
-
-* The pool algorithm has been refactored for greater performance and
-  stability.
-
-.. _version-0.2.0:
-
-0.2.0
-=====
-:release-date: 2009-05-20 05:14 P.M CET
-
-* Final release of 0.2.0
-
-* Compatible with carrot version 0.4.0.
-
-* Fixes some syntax errors related to fetching results
-  from the database backend.
-
-.. _version-0.2.0-pre3:
-
-0.2.0-pre3
-==========
-:release-date: 2009-05-20 05:14 P.M CET
-
-* *Internal release*. Improved handling of unpickleable exceptions,
-  `get_result` now tries to recreate something looking like the
-  original exception.
-
-.. _version-0.2.0-pre2:
-
-0.2.0-pre2
-==========
-:release-date: 2009-05-20 01:56 P.M CET
-
-* Now handles unpickleable exceptions (like the dynamically generated
-  subclasses of `django.core.exception.MultipleObjectsReturned`).
-
-.. _version-0.2.0-pre1:
-
-0.2.0-pre1
-==========
-:release-date: 2009-05-20 12:33 P.M CET
-
-* It's getting quite stable, with a lot of new features, so bump
-  version to 0.2. This is a pre-release.
-
-* `celery.task.mark_as_read()` and `celery.task.mark_as_failure()` has
-  been removed. Use `celery.backends.default_backend.mark_as_read()`,
-  and `celery.backends.default_backend.mark_as_failure()` instead.
-
-.. _version-0.1.15:
-
-0.1.15
-======
-:release-date: 2009-05-19 04:13 P.M CET
-
-* The celery daemon was leaking AMQP connections, this should be fixed,
-  if you have any problems with too many files open (like `emfile`
-  errors in `rabbit.log`, please contact us!
-
-.. _version-0.1.14:
-
-0.1.14
-======
-:release-date: 2009-05-19 01:08 P.M CET
-
-* Fixed a syntax error in the `TaskSet` class.  (No such variable
-  `TimeOutError`).
-
-.. _version-0.1.13:
-
-0.1.13
-======
-:release-date: 2009-05-19 12:36 P.M CET
-
-* Forgot to add `yadayada` to install requirements.
-
-* Now deletes all expired task results, not just those marked as done.
-
-* Able to load the Tokyo Tyrant backend class without django
-  configuration, can specify tyrant settings directly in the class
-  constructor.
-
-* Improved API documentation
-
-* Now using the Sphinx documentation system, you can build
-  the html documentation by doing ::
-
-        $ cd docs
-        $ make html
-
-  and the result will be in `docs/.build/html`.
-
-.. _version-0.1.12:
-
-0.1.12
-======
-:release-date: 2009-05-18 04:38 P.M CET
-
-* `delay_task()` etc. now returns `celery.task.AsyncResult` object,
-  which lets you check the result and any failure that might have
-  happened.  It kind of works like the `multiprocessing.AsyncResult`
-  class returned by `multiprocessing.Pool.map_async`.
-
-* Added dmap() and dmap_async(). This works like the
-  `multiprocessing.Pool` versions except they are tasks
-  distributed to the celery server. Example:
-
-        >>> from celery.task import dmap
-        >>> import operator
-        >>> dmap(operator.add, [[2, 2], [4, 4], [8, 8]])
-        >>> [4, 8, 16]
-
-        >>> from celery.task import dmap_async
-        >>> import operator
-        >>> result = dmap_async(operator.add, [[2, 2], [4, 4], [8, 8]])
-        >>> result.ready()
-        False
-        >>> time.sleep(1)
-        >>> result.ready()
-        True
-        >>> result.result
-        [4, 8, 16]
-
-* Refactored the task metadata cache and database backends, and added
-  a new backend for Tokyo Tyrant. You can set the backend in your django
-  settings file. E.g.::
-
-        CELERY_RESULT_BACKEND = "database"; # Uses the database
-        CELERY_RESULT_BACKEND = "cache"; # Uses the django cache framework
-        CELERY_RESULT_BACKEND = "tyrant"; # Uses Tokyo Tyrant
-        TT_HOST = "localhost"; # Hostname for the Tokyo Tyrant server.
-        TT_PORT = 6657; # Port of the Tokyo Tyrant server.
-
-.. _version-0.1.11:
-
-0.1.11
-======
-:release-date: 2009-05-12 02:08 P.M CET
-
-* The logging system was leaking file descriptors, resulting in
-  servers stopping with the EMFILES (too many open files) error. (fixed)
-
-.. _version-0.1.10:
-
-0.1.10
-======
-:release-date: 2009-05-11 12:46 P.M CET
-
-* Tasks now supports both positional arguments and keyword arguments.
-
-* Requires carrot 0.3.8.
-
-* The daemon now tries to reconnect if the connection is lost.
-
-.. _version-0.1.8:
-
-0.1.8
-=====
-:release-date: 2009-05-07 12:27 P.M CET
-
-* Better test coverage
-* More documentation
-* celeryd doesn't emit `Queue is empty` message if
-  `settings.CELERYD_EMPTY_MSG_EMIT_EVERY` is 0.
-
-.. _version-0.1.7:
-
-0.1.7
-=====
-:release-date: 2009-04-30 01:50 P.M CET
-
-* Added some unit tests
-
-* Can now use the database for task metadata (like if the task has
-  been executed or not). Set `settings.CELERY_TASK_META`
-
-* Can now run `python setup.py test` to run the unit tests from
-  within the `tests` project.
-
-* Can set the AMQP exchange/routing key/queue using
-  `settings.CELERY_AMQP_EXCHANGE`, `settings.CELERY_AMQP_ROUTING_KEY`,
-  and `settings.CELERY_AMQP_CONSUMER_QUEUE`.
-
-.. _version-0.1.6:
-
-0.1.6
-=====
-:release-date: 2009-04-28 02:13 P.M CET
-
-* Introducing `TaskSet`. A set of subtasks is executed and you can
-  find out how many, or if all them, are done (excellent for progress
-  bars and such)
-
-* Now catches all exceptions when running `Task.__call__`, so the
-  daemon doesn't die. This doesn't happen for pure functions yet, only
-  `Task` classes.
-
-* `autodiscover()` now works with zipped eggs.
-
-* celeryd: Now adds current working directory to `sys.path` for
-  convenience.
-
-* The `run_every` attribute of `PeriodicTask` classes can now be a
-  `datetime.timedelta()` object.
-
-* celeryd: You can now set the `DJANGO_PROJECT_DIR` variable
-  for `celeryd` and it will add that to `sys.path` for easy launching.
-
-* Can now check if a task has been executed or not via HTTP.
-
-* You can do this by including the celery `urls.py` into your project,
-
-        >>> url(r'^celery/$', include("celery.urls"))
-
-  then visiting the following url,::
-
-        http://mysite/celery/$task_id/done/
-
-  this will return a JSON dictionary like e.g:
-
-        >>> {"task": {"id": $task_id, "executed": true}}
-
-* `delay_task` now returns string id, not `uuid.UUID` instance.
-
-* Now has `PeriodicTasks`, to have `cron` like functionality.
-
-* Project changed name from `crunchy` to `celery`. The details of
-  the name change request is in `docs/name_change_request.txt`.
-
-.. _version-0.1.0:
-
-0.1.0
-=====
-:release-date: 2009-04-24 11:28 A.M CET
+3.0.0 (Chiastic Slide)
+======================
+:release-date: 2012-07-07 01:30 P.M BST
+:by: Ask Solem
 
 
-* Initial release
+See :ref:`whatsnew-3.0`.

+ 151 - 180
README.rst

@@ -4,7 +4,7 @@
 
 
 .. image:: http://cloud.github.com/downloads/celery/celery/celery_128.png
 .. image:: http://cloud.github.com/downloads/celery/celery/celery_128.png
 
 
-:Version: 2.6.0rc5
+:Version: 3.1.0a1 (DEVEL)
 :Web: http://celeryproject.org/
 :Web: http://celeryproject.org/
 :Download: http://pypi.python.org/pypi/celery/
 :Download: http://pypi.python.org/pypi/celery/
 :Source: http://github.com/celery/celery/
 :Source: http://github.com/celery/celery/
@@ -13,32 +13,152 @@
 
 
 --
 --
 
 
-.. contents::
-    :local:
+What is a Task Queue?
+=====================
 
 
-.. _celery-synopsis:
+Task queues are used as a mechanism to distribute work across threads or
+machines.
 
 
-Synopsis
-========
+A task queue's input is a unit of work, called a task, dedicated worker
+processes then constantly monitor the queue for new work to perform.
 
 
-Celery is an open source asynchronous task queue/job queue based on
-distributed message passing. It is focused on real-time operation,
-but supports scheduling as well.
+Celery communicates via messages using a broker
+to mediate between clients and workers.  To initiate a task a client puts a
+message on the queue, the broker then delivers the message to a worker.
 
 
-The execution units, called tasks, are executed concurrently on one or
-more worker nodes using multiprocessing, `Eventlet`_ or `gevent`_.  Tasks can
-execute asynchronously (in the background) or synchronously
-(wait until ready).
-
-Celery is used in production systems to process millions of tasks every hour.
+A Celery system can consist of multiple workers and brokers, giving way
+to high availability and horizontal scaling.
 
 
 Celery is written in Python, but the protocol can be implemented in any
 Celery is written in Python, but the protocol can be implemented in any
-language.  It can also `operate with other languages using webhooks`_.
-There's also `RCelery` for the Ruby programming language, and a `PHP client`.
+language.  So far there's RCelery_ for the Ruby programming language, and a
+`PHP client`, but language interoperability can also be achieved
+by using webhooks.
+
+.. _RCelery: http://leapfrogdevelopment.github.com/rcelery/
+.. _`PHP client`: https://github.com/gjedeer/celery-php
+.. _`using webhooks`:
+    http://celery.github.com/celery/userguide/remote-tasks.html
+
+What do I need?
+===============
+
+Celery version 3.0 runs on,
+
+- Python (2.5, 2.6, 2.7, 3.2, 3.3)
+- PyPy (1.8, 1.9)
+- Jython (2.5, 2.7).
+
+This is the last version to support Python 2.5,
+and from Celery 3.1, Python 2.6 or later is required.
+The last version to support Python 2.4 was Celery series 2.2.
+
+*Celery* requires a message broker to send and receive messages.
+The RabbitMQ, Redis and MongoDB broker transports are feature complete,
+but there's also support for a myriad of other solutions, including
+using SQLite for local development.
+
+*Celery* can run on a single machine, on multiple machines, or even
+across datacenters.
+
+Get Started
+===========
+
+If this is the first time you're trying to use Celery, or you are
+new to Celery 3.0 coming from previous versions then you should read our
+getting started tutorials:
+
+- `First steps with Celery`_
+
+    Tutorial teaching you the bare minimum needed to get started with Celery.
+
+- `Next steps`_
+
+    A more complete overview, showing more features.
+
+.. _`First steps with Celery`:
+    http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html
+
+.. _`Next steps`:
+    http://docs.celeryproject.org/en/latest/getting-started/next-steps.html
+
+Celery is...
+============
+
+- **Simple**
+
+    Celery is easy to use and maintain, and does *not need configuration files*.
+
+    It has an active, friendly community you can talk to for support,
+    including a `mailing-list`_ and and an IRC channel.
+
+    Here's one of the simplest applications you can make::
+
+        from celery import Celery
+
+        celery = Celery('hello', broker='amqp://guest@localhost//')
+
+        @celery.task
+        def hello():
+            return 'hello world'
+
+- **Highly Available**
+
+    Workers and clients will automatically retry in the event
+    of connection loss or failure, and some brokers support
+    HA in way of *Master/Master* or *Master/Slave* replication.
+
+- **Fast**
+
+    A single Celery process can process millions of tasks a minute,
+    with sub-millisecond round-trip latency (using RabbitMQ,
+    py-librabbitmq, and optimized settings).
 
 
-The recommended message broker is `RabbitMQ`_, but support for
-`Redis`_, `MongoDB`_, `Beanstalk`_, `Amazon SQS`_, `CouchDB`_ and
-databases (using `SQLAlchemy`_ or the `Django ORM`_) is also available.
+- **Flexible**
+
+    Almost every part of *Celery* can be extended or used on its own,
+    Custom pool implementations, serializers, compression schemes, logging,
+    schedulers, consumers, producers, autoscalers, broker transports and much more.
+
+It supports...
+==============
+
+    - **Brokers**
+
+        - RabbitMQ_, Redis_,
+        - MongoDB_, Beanstalk_,
+        - CouchDB_, SQLAlchemy_,
+        - Django ORM, Amazon SQS,
+        - and more...
+
+    - **Concurrency**
+
+        - multiprocessing, Eventlet_, gevent_, threads/single threaded
+
+    - **Result Stores**
+
+        - AMQP, Redis
+        - memcached, MongoDB
+        - SQLAlchemy, Django ORM
+        - Apache Cassandra
+
+    - **Serialization**
+
+        - *pickle*, *json*, *yaml*, *msgpack*.
+        - *zlib*, *bzip2* compression.
+        - Cryptographic message signing.
+
+.. _`Eventlet`: http://eventlet.net/
+.. _`gevent`: http://gevent.org/
+
+.. _RabbitMQ: http://rabbitmq.com
+.. _Redis: http://redis.io
+.. _MongoDB: http://mongodb.org
+.. _Beanstalk: http://kr.github.com/beanstalkd
+.. _CouchDB: http://couchdb.apache.org
+.. _SQLAlchemy: http://sqlalchemy.org
+
+Framework Integration
+=====================
 
 
 Celery is easy to integrate with web frameworks, some of which even have
 Celery is easy to integrate with web frameworks, some of which even have
 integration packages:
 integration packages:
@@ -50,27 +170,18 @@ integration packages:
     +--------------------+------------------------+
     +--------------------+------------------------+
     | `Pylons`_          | `celery-pylons`_       |
     | `Pylons`_          | `celery-pylons`_       |
     +--------------------+------------------------+
     +--------------------+------------------------+
-    | `Flask`_           | `flask-celery`_        |
+    | `Flask`_           | not needed             |
     +--------------------+------------------------+
     +--------------------+------------------------+
     | `web2py`_          | `web2py-celery`_       |
     | `web2py`_          | `web2py-celery`_       |
     +--------------------+------------------------+
     +--------------------+------------------------+
     | `Tornado`_         | `tornado-celery`_      |
     | `Tornado`_         | `tornado-celery`_      |
     +--------------------+------------------------+
     +--------------------+------------------------+
 
 
-.. _`RCelery`: http://leapfrogdevelopment.github.com/rcelery/
-.. _`PHP client`: https://github.com/gjedeer/celery-php
-.. _`RabbitMQ`: http://www.rabbitmq.com/
-.. _`Redis`: http://code.google.com/p/redis/
-.. _`SQLAlchemy`: http://www.sqlalchemy.org/
+The integration packages are not strictly necessary, but they can make
+development easier, and sometimes they add important hooks like closing
+database connections at ``fork``.
+
 .. _`Django`: http://djangoproject.com/
 .. _`Django`: http://djangoproject.com/
-.. _`Django ORM`: http://djangoproject.com/
-.. _`Memcached`: http://memcached.org/
-.. _`Eventlet`: http://eventlet.net/
-.. _`gevent`: http://gevent.org/
-.. _`Beanstalk`: http://kr.github.com/beanstalkd/
-.. _`MongoDB`: http://mongodb.org/
-.. _`CouchDB`: http://couchdb.apache.org/
-.. _`Amazon SQS`: http://aws.amazon.com/sqs/
 .. _`Pylons`: http://pylonshq.com/
 .. _`Pylons`: http://pylonshq.com/
 .. _`Flask`: http://flask.pocoo.org/
 .. _`Flask`: http://flask.pocoo.org/
 .. _`web2py`: http://web2py.com/
 .. _`web2py`: http://web2py.com/
@@ -79,152 +190,9 @@ integration packages:
 .. _`pyramid_celery`: http://pypi.python.org/pypi/pyramid_celery/
 .. _`pyramid_celery`: http://pypi.python.org/pypi/pyramid_celery/
 .. _`django-celery`: http://pypi.python.org/pypi/django-celery
 .. _`django-celery`: http://pypi.python.org/pypi/django-celery
 .. _`celery-pylons`: http://pypi.python.org/pypi/celery-pylons
 .. _`celery-pylons`: http://pypi.python.org/pypi/celery-pylons
-.. _`flask-celery`: http://github.com/ask/flask-celery/
 .. _`web2py-celery`: http://code.google.com/p/web2py-celery/
 .. _`web2py-celery`: http://code.google.com/p/web2py-celery/
 .. _`Tornado`: http://www.tornadoweb.org/
 .. _`Tornado`: http://www.tornadoweb.org/
 .. _`tornado-celery`: http://github.com/mher/tornado-celery/
 .. _`tornado-celery`: http://github.com/mher/tornado-celery/
-.. _`operate with other languages using webhooks`:
-    http://celery.github.com/celery/userguide/remote-tasks.html
-.. _`limited support`:
-    http://kombu.readthedocs.org/en/latest/introduction.html#transport-comparison
-
-.. _celery-overview:
-
-Overview
-========
-
-This is a high level overview of the architecture.
-
-.. image:: http://cloud.github.com/downloads/celery/celery/Celery-Overview-v4.jpg
-
-The broker delivers tasks to the worker instances.
-A worker instance is started by running the `celery worker` program.
-You can have many networked machines running worker instances, forming a
-cluster, or you can run everything on a single machine.
-
-The return value of the task can be stored for later retrieval,
-and the progress of the task can be tracked (called the task's *state*).
-
-.. _celery-example:
-
-Example
-=======
-
-You probably want to see some code by now, so here's an example task
-which adds two numbers:
-::
-
-    from celery import task
-
-    @task()
-    def add(x, y):
-        return x + y
-
-You can execute the task in the background, or wait for it to finish::
-
-    >>> result = add.delay(4, 4)
-    >>> result.wait() # wait for and return the result
-    8
-
-Simple!
-
-.. _celery-features:
-
-Features
-========
-
-    +-----------------+----------------------------------------------------+
-    | Messaging       | Supported brokers include `RabbitMQ`_, `Redis`_,   |
-    |                 | `MongoDB`_, `Beanstalk`_, SQL databases,           |
-    |                 | Amazon SQS and more.                               |
-    +-----------------+----------------------------------------------------+
-    | Fault-tolerant  | Excellent configurable error recovery when using   |
-    |                 | `RabbitMQ`, ensures your tasks are never lost.     |
-    +-----------------+----------------------------------------------------+
-    | Distributed     | Runs on one or more machines. Supports             |
-    |                 | broker `clustering`_ and `HA`_ when used in        |
-    |                 | combination with `RabbitMQ`_.  You can set up new  |
-    |                 | workers without central configuration (e.g. use    |
-    |                 | your grandma's laptop to help if the queue is      |
-    |                 | temporarily congested).                            |
-    +-----------------+----------------------------------------------------+
-    | Concurrency     | Concurrency is achieved by using multiprocessing,  |
-    |                 | `Eventlet`_, `gevent` or a mix of these.           |
-    +-----------------+----------------------------------------------------+
-    | Scheduling      | Supports recurring tasks like cron, or specifying  |
-    |                 | an exact date or countdown for when after the task |
-    |                 | should be executed.                                |
-    +-----------------+----------------------------------------------------+
-    | Latency         | Low latency means you are able to execute tasks    |
-    |                 | *while the user is waiting*.                       |
-    +-----------------+----------------------------------------------------+
-    | Return Values   | Task return values can be saved to the selected    |
-    |                 | result store backend. You can wait for the result, |
-    |                 | retrieve it later, or ignore it.                   |
-    +-----------------+----------------------------------------------------+
-    | Result Stores   | Database, `MongoDB`_, `Redis`_, `Memcached`_,      |
-    |                 | `Cassandra`, or `AMQP`_ (message notification).    |
-    +-----------------+----------------------------------------------------+
-    | Webhooks        | Your tasks can also be HTTP callbacks, enabling    |
-    |                 | cross-language communication.                      |
-    +-----------------+----------------------------------------------------+
-    | Rate limiting   | Supports rate limiting by using the token bucket   |
-    |                 | algorithm, which accounts for bursts of traffic.   |
-    |                 | Rate limits can be set for each task type, or      |
-    |                 | globally for all.                                  |
-    +-----------------+----------------------------------------------------+
-    | Routing         | Using AMQP's flexible routing model you can route  |
-    |                 | tasks to different workers, or select different    |
-    |                 | message topologies, by configuration or even at    |
-    |                 | runtime.                                           |
-    +-----------------+----------------------------------------------------+
-    | Remote-control  | Worker nodes can be controlled from remote by      |
-    |                 | using broadcast messaging.  A range of built-in    |
-    |                 | commands exist in addition to the ability to       |
-    |                 | easily define your own. (AMQP/Redis only)          |
-    +-----------------+----------------------------------------------------+
-    | Monitoring      | You can capture everything happening with the      |
-    |                 | workers in real-time by subscribing to events.     |
-    |                 | A real-time web monitor is in development.         |
-    +-----------------+----------------------------------------------------+
-    | Serialization   | Supports Pickle, JSON, YAML, or easily defined     |
-    |                 | custom schemes. One task invocation can have a     |
-    |                 | different scheme than another.                     |
-    +-----------------+----------------------------------------------------+
-    | Tracebacks      | Errors and tracebacks are stored and can be        |
-    |                 | investigated after the fact.                       |
-    +-----------------+----------------------------------------------------+
-    | UUID            | Every task has an UUID (Universally Unique         |
-    |                 | Identifier), which is the task id used to query    |
-    |                 | task status and return value.                      |
-    +-----------------+----------------------------------------------------+
-    | Retries         | Tasks can be retried if they fail, with            |
-    |                 | configurable maximum number of retries, and delays |
-    |                 | between each retry.                                |
-    +-----------------+----------------------------------------------------+
-    | Task Sets       | A Task set is a task consisting of several         |
-    |                 | sub-tasks. You can find out how many, or if all    |
-    |                 | of the sub-tasks has been executed, and even       |
-    |                 | retrieve the results in order. Progress bars,      |
-    |                 | anyone?                                            |
-    +-----------------+----------------------------------------------------+
-    | Made for Web    | You can query status and results via URLs,         |
-    |                 | enabling the ability to poll task status using     |
-    |                 | Ajax.                                              |
-    +-----------------+----------------------------------------------------+
-    | Error Emails    | Can be configured to send emails to the            |
-    |                 | administrators when tasks fails.                   |
-    +-----------------+----------------------------------------------------+
-    | Message signing | Supports message signing. Messages are signed      |
-    |                 | using public-key cryptography.                     |
-    +-----------------+----------------------------------------------------+
-
-
-.. _`clustering`: http://www.rabbitmq.com/clustering.html
-.. _`HA`: http://www.rabbitmq.com/pacemaker.html
-.. _`AMQP`: http://www.amqp.org/
-.. _`Stomp`: http://stomp.codehaus.org/
-.. _`Tokyo Tyrant`: http://tokyocabinet.sourceforge.net/
 
 
 .. _celery-documentation:
 .. _celery-documentation:
 
 
@@ -232,9 +200,9 @@ Documentation
 =============
 =============
 
 
 The `latest documentation`_ with user guides, tutorials and API reference
 The `latest documentation`_ with user guides, tutorials and API reference
-is hosted at Github.
+is hosted at Read The Docs.
 
 
-.. _`latest documentation`: http://celery.github.com/celery/
+.. _`latest documentation`: http://docs.celeryproject.org/en/latest/
 
 
 .. _celery-installation:
 .. _celery-installation:
 
 
@@ -296,7 +264,10 @@ You can install it by doing the following,::
     $ tar xvfz celery-0.0.0.tar.gz
     $ tar xvfz celery-0.0.0.tar.gz
     $ cd celery-0.0.0
     $ cd celery-0.0.0
     $ python setup.py build
     $ python setup.py build
-    # python setup.py install # as root
+    # python setup.py install
+
+The last command must be executed as a privileged user if
+you are not currently using a virtualenv.
 
 
 .. _celery-installing-from-git:
 .. _celery-installing-from-git:
 
 

+ 4 - 2
celery/__compat__.py

@@ -14,7 +14,9 @@ from __future__ import absolute_import
 import operator
 import operator
 import sys
 import sys
 
 
+from functools import reduce
 from importlib import import_module
 from importlib import import_module
+from itertools import imap
 from types import ModuleType
 from types import ModuleType
 
 
 from .local import Proxy
 from .local import Proxy
@@ -167,7 +169,7 @@ def recreate_module(name, compat_modules=(), by_module={}, direct={},
     cattrs = dict(_compat_modules=compat_modules,
     cattrs = dict(_compat_modules=compat_modules,
                   _all_by_module=by_module, _direct=direct,
                   _all_by_module=by_module, _direct=direct,
                   _object_origins=origins,
                   _object_origins=origins,
-                  __all__=tuple(set(reduce(operator.add, map(tuple, [
+                  __all__=tuple(set(reduce(operator.add, imap(tuple, [
                                 compat_modules, origins, direct, attrs])))))
                                 compat_modules, origins, direct, attrs])))))
     new_module = create_module(name, attrs, cls_attrs=cattrs, base=base)
     new_module = create_module(name, attrs, cls_attrs=cattrs, base=base)
     new_module.__dict__.update(dict((mod, get_compat_module(new_module, mod))
     new_module.__dict__.update(dict((mod, get_compat_module(new_module, mod))
@@ -187,7 +189,7 @@ def get_compat_module(pkg, name):
         fqdn = '.'.join([pkg.__name__, name])
         fqdn = '.'.join([pkg.__name__, name])
         module = sys.modules[fqdn] = import_module(attrs)
         module = sys.modules[fqdn] = import_module(attrs)
         return module
         return module
-    attrs['__all__'] = attrs.keys()
+    attrs['__all__'] = list(attrs)
     return create_module(name, dict(attrs), pkg=pkg, prepare_attr=prepare)
     return create_module(name, dict(attrs), pkg=pkg, prepare_attr=prepare)
 
 
 
 

+ 33 - 8
celery/__init__.py

@@ -7,30 +7,55 @@
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
-VERSION = (2, 6, 0, 'rc5')
+SERIES = 'DEVEL'
+VERSION = (3, 1, 0, 'a1')
 __version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])
 __version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])
 __author__ = 'Ask Solem'
 __author__ = 'Ask Solem'
 __contact__ = 'ask@celeryproject.org'
 __contact__ = 'ask@celeryproject.org'
 __homepage__ = 'http://celeryproject.org'
 __homepage__ = 'http://celeryproject.org'
 __docformat__ = 'restructuredtext'
 __docformat__ = 'restructuredtext'
+__all__ = [
+    'celery', 'bugreport', 'shared_task', 'task',
+    'current_app', 'current_task',
+    'chain', 'chord', 'chunks', 'group', 'subtask',
+    'xmap', 'xstarmap', 'uuid', 'version', '__version__',
+]
+VERSION_BANNER = '{0} ({1})'.format(__version__, SERIES)
 
 
 # -eof meta-
 # -eof meta-
 
 
+# This is for static analyzers
+Celery = object
+bugreport = lambda *a, **kw: None
+shared_task = lambda *a, **kw: None
+Task = object
+current_app = object()
+current_task = object()
+chain = lambda *a, **kw: None
+chord = lambda *a, **kw: None
+chunks = lambda *a, **kw: None
+group = lambda *a, **kw: None
+subtask = lambda *a, **kw: None
+xmap = lambda *a, **kw: None
+xstarmap = lambda *a, **kw: None
+uuid = lambda: None
+
 # Lazy loading
 # Lazy loading
 from .__compat__ import recreate_module
 from .__compat__ import recreate_module
 
 
 old_module, new_module = recreate_module(__name__,  # pragma: no cover
 old_module, new_module = recreate_module(__name__,  # pragma: no cover
     by_module={
     by_module={
-        'celery.app':       ['Celery', 'bugreport'],
-        'celery.app.task':  ['Task'],
-        'celery._state':    ['current_app', 'current_task'],
-        'celery.canvas':    ['chain', 'chord', 'chunks',
-                             'group', 'subtask', 'xmap', 'xstarmap'],
-        'celery.utils':     ['uuid'],
+        'celery.app':      ['Celery', 'bugreport', 'shared_task'],
+        'celery.app.task': ['Task'],
+        'celery._state':   ['current_app', 'current_task'],
+        'celery.canvas':   ['chain', 'chord', 'chunks', 'group',
+                            'subtask', 'xmap', 'xstarmap'],
+        'celery.utils':    ['uuid'],
     },
     },
     direct={'task': 'celery.task'},
     direct={'task': 'celery.task'},
     __package__='celery', __file__=__file__,
     __package__='celery', __file__=__file__,
     __path__=__path__, __doc__=__doc__, __version__=__version__,
     __path__=__path__, __doc__=__doc__, __version__=__version__,
     __author__=__author__, __contact__=__contact__,
     __author__=__author__, __contact__=__contact__,
-    __homepage__=__homepage__, __docformat__=__docformat__, VERSION=VERSION,
+    __homepage__=__homepage__, __docformat__=__docformat__,
+    VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER,
 )
 )

+ 24 - 0
celery/__main__.py

@@ -0,0 +1,24 @@
+from __future__ import absolute_import
+
+import sys
+
+
+def maybe_patch_concurrency():
+    from celery.platforms import maybe_patch_concurrency
+    maybe_patch_concurrency(sys.argv, ['-P'], ['--pool'])
+
+
+def main():
+    maybe_patch_concurrency()
+    from celery.bin.celery import main
+    main()
+
+
+def _compat_worker():
+    maybe_patch_concurrency()
+    from celery.bin.celeryd import main
+    main()
+
+
+if __name__ == '__main__':
+    main()

+ 26 - 0
celery/_state.py

@@ -12,12 +12,17 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
 import threading
 import threading
+import weakref
 
 
 from celery.local import Proxy
 from celery.local import Proxy
 from celery.utils.threads import LocalStack
 from celery.utils.threads import LocalStack
 
 
+#: Global default app used when no current app.
 default_app = None
 default_app = None
 
 
+#: List of all app instances (weakrefs), must not be used directly.
+_apps = set()
+
 
 
 class _TLS(threading.local):
 class _TLS(threading.local):
     #: Apps with the :attr:`~celery.app.base.BaseApp.set_as_current` attribute
     #: Apps with the :attr:`~celery.app.base.BaseApp.set_as_current` attribute
@@ -60,5 +65,26 @@ def get_current_worker_task():
             return task
             return task
 
 
 
 
+#: Proxy to current app.
 current_app = Proxy(get_current_app)
 current_app = Proxy(get_current_app)
+
+#: Proxy to current task.
 current_task = Proxy(get_current_task)
 current_task = Proxy(get_current_task)
+
+
+def _register_app(app):
+    _apps.add(weakref.ref(app))
+
+
+def _get_active_apps():
+    dirty = []
+    try:
+        for appref in _apps:
+            app = appref()
+            if app is None:
+                dirty.append(appref)
+            else:
+                yield app
+    finally:
+        while dirty:
+            _apps.discard(dirty.pop())

+ 55 - 0
celery/app/__init__.py

@@ -16,8 +16,11 @@ from celery._state import (  # noqa
         set_default_app,
         set_default_app,
         get_current_app as current_app,
         get_current_app as current_app,
         get_current_task as current_task,
         get_current_task as current_task,
+        _get_active_apps,
 )
 )
+from celery.utils import gen_task_name
 
 
+from .builtins import shared_task as _shared_task
 from .base import Celery, AppPickler  # noqa
 from .base import Celery, AppPickler  # noqa
 
 
 #: Proxy always returning the app set as default.
 #: Proxy always returning the app set as default.
@@ -80,3 +83,55 @@ else:
     disable_trace()
     disable_trace()
 
 
 App = Celery  # XXX Compat
 App = Celery  # XXX Compat
+
+
+def shared_task(*args, **kwargs):
+    """Task decorator that creates shared tasks,
+    and returns a proxy that always returns the task from the current apps
+    task registry.
+
+    This can be used by library authors to create tasks that will work
+    for any app environment.
+
+    Example:
+
+        >>> from celery import Celery, shared_task
+        >>> @shared_task
+        ... def add(x, y):
+        ...     return x + y
+
+        >>> app1 = Celery(broker='amqp://')
+        >>> add.app is app1
+        True
+
+        >>> app2 = Celery(broker='redis://')
+        >>> add.app is app2
+
+    """
+
+    def create_shared_task(**options):
+
+        def __inner(fun):
+            name = options.get('name')
+            # Set as shared task so that unfinalized apps,
+            # and future apps will load the task.
+            _shared_task(lambda app: app._task_from_fun(fun, **options))
+
+            # Force all finalized apps to take this task as well.
+            for app in _get_active_apps():
+                if app.finalized:
+                    with app._finalize_mutex:
+                        app._task_from_fun(fun, **options)
+
+            # Returns a proxy that always gets the task from the current
+            # apps task registry.
+            def task_by_cons():
+                app = current_app()
+                return app.tasks[name or gen_task_name(app,
+                            fun.__name__, fun.__module__)]
+            return Proxy(task_by_cons)
+        return __inner
+
+    if len(args) == 1 and callable(args[0]):
+        return create_shared_task(**kwargs)(args[0])
+    return create_shared_task(**kwargs)

+ 1 - 2
celery/app/abstract.py

@@ -60,5 +60,4 @@ class configurated(object):
                 setattr(self, attr_name, attr_value)
                 setattr(self, attr_name, attr_value)
 
 
     def confopts_as_dict(self):
     def confopts_as_dict(self):
-        return dict((key, getattr(self, key))
-                        for key in self.__confopts__.iterkeys())
+        return dict((key, getattr(self, key)) for key in self.__confopts__)

+ 31 - 13
celery/app/amqp.py

@@ -14,6 +14,7 @@ from weakref import WeakValueDictionary
 from kombu import Connection, Consumer, Exchange, Producer, Queue
 from kombu import Connection, Consumer, Exchange, Producer, Queue
 from kombu.common import entry_to_queue
 from kombu.common import entry_to_queue
 from kombu.pools import ProducerPool
 from kombu.pools import ProducerPool
+from kombu.utils.encoding import safe_repr
 
 
 from celery import signals
 from celery import signals
 from celery.utils import cached_property, uuid
 from celery.utils import cached_property, uuid
@@ -24,7 +25,8 @@ from . import routes as _routes
 
 
 #: Human readable queue declaration.
 #: Human readable queue declaration.
 QUEUE_FORMAT = """
 QUEUE_FORMAT = """
-. %(name)s exchange:%(exchange)s(%(exchange_type)s) binding:%(routing_key)s
+. {0.name:<16} exchange={0.exchange.name}({0.exchange.type}) \
+key={0.routing_key}
 """
 """
 
 
 
 
@@ -91,6 +93,8 @@ class Queues(dict):
     def add_compat(self, name, **options):
     def add_compat(self, name, **options):
         # docs used to use binding_key as routing key
         # docs used to use binding_key as routing key
         options.setdefault('routing_key', options.get('binding_key'))
         options.setdefault('routing_key', options.get('binding_key'))
+        if options['routing_key'] is None:
+            options['routing_key'] = name
         q = self[name] = entry_to_queue(name, **options)
         q = self[name] = entry_to_queue(name, **options)
         return q
         return q
 
 
@@ -99,16 +103,20 @@ class Queues(dict):
         active = self.consume_from
         active = self.consume_from
         if not active:
         if not active:
             return ''
             return ''
-        info = [QUEUE_FORMAT.strip() % {
-                    'name': (name + ':').ljust(12),
-                    'exchange': q.exchange.name,
-                    'exchange_type': q.exchange.type,
-                    'routing_key': q.routing_key}
-                        for name, q in sorted(active.iteritems())]
+        info = [QUEUE_FORMAT.strip().format(q)
+                    for _, q in sorted(active.iteritems())]
         if indent_first:
         if indent_first:
             return textindent('\n'.join(info), indent)
             return textindent('\n'.join(info), indent)
         return info[0] + '\n' + textindent('\n'.join(info[1:]), indent)
         return info[0] + '\n' + textindent('\n'.join(info[1:]), indent)
 
 
+    def select_add(self, queue, **kwargs):
+        """Add new task queue that will be consumed from even when
+        a subset has been selected using the :option:`-Q` option."""
+        q = self.add(queue, **kwargs)
+        if self._consume_from is not None:
+            self._consume_from[q.name] = q
+        return q
+
     def select_subset(self, wanted):
     def select_subset(self, wanted):
         """Sets :attr:`consume_from` by selecting a subset of the
         """Sets :attr:`consume_from` by selecting a subset of the
         currently defined queues.
         currently defined queues.
@@ -120,7 +128,7 @@ class Queues(dict):
 
 
     def select_remove(self, queue):
     def select_remove(self, queue):
         if self._consume_from is None:
         if self._consume_from is None:
-            self.select_subset(k for k in self.keys() if k != queue)
+            self.select_subset(k for k in self if k != queue)
         else:
         else:
             self._consume_from.pop(queue, None)
             self._consume_from.pop(queue, None)
 
 
@@ -148,7 +156,7 @@ class TaskProducer(Producer):
         self.queues = self.app.amqp.queues  # shortcut
         self.queues = self.app.amqp.queues  # shortcut
         super(TaskProducer, self).__init__(channel, exchange, *args, **kwargs)
         super(TaskProducer, self).__init__(channel, exchange, *args, **kwargs)
 
 
-    def delay_task(self, task_name, task_args=None, task_kwargs=None,
+    def publish_task(self, task_name, task_args=None, task_kwargs=None,
             countdown=None, eta=None, task_id=None, group_id=None,
             countdown=None, eta=None, task_id=None, group_id=None,
             taskset_id=None,  # compat alias to group_id
             taskset_id=None,  # compat alias to group_id
             expires=None, exchange=None, exchange_type=None,
             expires=None, exchange=None, exchange_type=None,
@@ -156,10 +164,12 @@ class TaskProducer(Producer):
             queue=None, now=None, retries=0, chord=None, callbacks=None,
             queue=None, now=None, retries=0, chord=None, callbacks=None,
             errbacks=None, mandatory=None, priority=None, immediate=None,
             errbacks=None, mandatory=None, priority=None, immediate=None,
             routing_key=None, serializer=None, delivery_mode=None,
             routing_key=None, serializer=None, delivery_mode=None,
-            compression=None, reply_to=None, **kwargs):
+            compression=None, reply_to=None, timeout=None, soft_timeout=None,
+            **kwargs):
         """Send task message."""
         """Send task message."""
         retry = self.retry if retry is None else retry
         retry = self.retry if retry is None else retry
         # merge default and custom policy
         # merge default and custom policy
+        retry = self.retry if retry is None else retry
         _rp = (dict(self.retry_policy, **retry_policy) if retry_policy
         _rp = (dict(self.retry_policy, **retry_policy) if retry_policy
                                                        else self.retry_policy)
                                                        else self.retry_policy)
         task_id = task_id or uuid()
         task_id = task_id or uuid()
@@ -177,6 +187,7 @@ class TaskProducer(Producer):
             expires = now + timedelta(seconds=expires)
             expires = now + timedelta(seconds=expires)
         eta = eta and eta.isoformat()
         eta = eta and eta.isoformat()
         expires = expires and expires.isoformat()
         expires = expires and expires.isoformat()
+        timeouts = (timeout, soft_timeout)
 
 
         body = {'task': task_name,
         body = {'task': task_name,
                 'id': task_id,
                 'id': task_id,
@@ -189,6 +200,7 @@ class TaskProducer(Producer):
                 'callbacks': callbacks,
                 'callbacks': callbacks,
                 'errbacks': errbacks,
                 'errbacks': errbacks,
                 'reply_to': reply_to}
                 'reply_to': reply_to}
+                'timeouts': timeouts}
         group_id = group_id or taskset_id
         group_id = group_id or taskset_id
         if group_id:
         if group_id:
             body['taskset'] = group_id
             body['taskset'] = group_id
@@ -205,15 +217,21 @@ class TaskProducer(Producer):
 
 
         signals.task_sent.send(sender=task_name, **body)
         signals.task_sent.send(sender=task_name, **body)
         if event_dispatcher:
         if event_dispatcher:
+            exname = exchange or self.exchange
+            if isinstance(exname, Exchange):
+                exname = exname.name
             event_dispatcher.send('task-sent', uuid=task_id,
             event_dispatcher.send('task-sent', uuid=task_id,
                                                name=task_name,
                                                name=task_name,
-                                               args=repr(task_args),
-                                               kwargs=repr(task_kwargs),
+                                               args=safe_repr(task_args),
+                                               kwargs=safe_repr(task_kwargs),
                                                retries=retries,
                                                retries=retries,
                                                eta=eta,
                                                eta=eta,
                                                expires=expires,
                                                expires=expires,
-                                               queue=queue)
+                                               queue=queue,
+                                               exchange=exname,
+                                               routing_key=routing_key)
         return task_id
         return task_id
+    delay_task = publish_task   # XXX Compat
 
 
 
 
 class TaskPublisher(TaskProducer):
 class TaskPublisher(TaskProducer):

+ 2 - 2
celery/app/annotations.py

@@ -20,7 +20,7 @@ _first_match_any = firstmethod('annotate_any')
 
 
 
 
 def resolve_all(anno, task):
 def resolve_all(anno, task):
-    return filter(None, (_first_match(anno, task), _first_match_any(anno)))
+    return (x for x in (_first_match(anno, task), _first_match_any(anno)) if x)
 
 
 
 
 class MapAnnotation(dict):
 class MapAnnotation(dict):
@@ -52,4 +52,4 @@ def prepare(annotations):
         return ()
         return ()
     elif not isinstance(annotations, (list, tuple)):
     elif not isinstance(annotations, (list, tuple)):
         annotations = (annotations, )
         annotations = (annotations, )
-    return map(expand_annotation, annotations)
+    return [expand_annotation(a) for a in annotations]

+ 74 - 34
celery/app/base.py

@@ -7,14 +7,15 @@
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
+import threading
 import warnings
 import warnings
 
 
 from collections import deque
 from collections import deque
 from contextlib import contextmanager
 from contextlib import contextmanager
 from copy import deepcopy
 from copy import deepcopy
 from functools import wraps
 from functools import wraps
+from operator import attrgetter
 
 
 from billiard.util import register_after_fork
 from billiard.util import register_after_fork
 from kombu.clocks import LamportClock
 from kombu.clocks import LamportClock
@@ -24,7 +25,7 @@ from celery import platforms
 from celery.exceptions import AlwaysEagerIgnored
 from celery.exceptions import AlwaysEagerIgnored
 from celery.loaders import get_loader_cls
 from celery.loaders import get_loader_cls
 from celery.local import PromiseProxy, maybe_evaluate
 from celery.local import PromiseProxy, maybe_evaluate
-from celery._state import _task_stack, _tls, get_current_app
+from celery._state import _task_stack, _tls, get_current_app, _register_app
 from celery.utils.functional import first
 from celery.utils.functional import first
 from celery.utils.imports import instantiate, symbol_by_name
 from celery.utils.imports import instantiate, symbol_by_name
 
 
@@ -72,7 +73,11 @@ class Celery(object):
         self.registry_cls = symbol_by_name(self.registry_cls)
         self.registry_cls = symbol_by_name(self.registry_cls)
         self.accept_magic_kwargs = accept_magic_kwargs
         self.accept_magic_kwargs = accept_magic_kwargs
 
 
+        self.configured = False
+        self._pending_defaults = deque()
+
         self.finalized = False
         self.finalized = False
+        self._finalize_mutex = threading.Lock()
         self._pending = deque()
         self._pending = deque()
         self._tasks = tasks
         self._tasks = tasks
         if not isinstance(self._tasks, TaskRegistry):
         if not isinstance(self._tasks, TaskRegistry):
@@ -89,10 +94,20 @@ class Celery(object):
         if self.set_as_current:
         if self.set_as_current:
             self.set_current()
             self.set_current()
         self.on_init()
         self.on_init()
+        _register_app(self)
 
 
     def set_current(self):
     def set_current(self):
         _tls.current_app = self
         _tls.current_app = self
 
 
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *exc_info):
+        self.close()
+
+    def close(self):
+        self._maybe_close_pool()
+
     def on_init(self):
     def on_init(self):
         """Optional callback called at init."""
         """Optional callback called at init."""
         pass
         pass
@@ -148,16 +163,24 @@ class Celery(object):
         return task
         return task
 
 
     def finalize(self):
     def finalize(self):
-        if not self.finalized:
-            self.finalized = True
-            load_shared_tasks(self)
+        with self._finalize_mutex:
+            if not self.finalized:
+                self.finalized = True
+                load_shared_tasks(self)
 
 
-            pending = self._pending
-            while pending:
-                maybe_evaluate(pending.pop())
+                pending = self._pending
+                while pending:
+                    maybe_evaluate(pending.popleft())
 
 
-            for task in self._tasks.itervalues():
-                task.bind(self)
+                for task in self._tasks.itervalues():
+                    task.bind(self)
+
+    def add_defaults(self, fun):
+        if not callable(fun):
+            d, fun = fun, lambda: d
+        if self.configured:
+            return self.conf.add_defaults(fun())
+        self._pending_defaults.append(fun)
 
 
     def config_from_object(self, obj, silent=False):
     def config_from_object(self, obj, silent=False):
         del(self.conf)
         del(self.conf)
@@ -171,8 +194,10 @@ class Celery(object):
         self.conf.update(self.loader.cmdline_config_parser(argv, namespace))
         self.conf.update(self.loader.cmdline_config_parser(argv, namespace))
 
 
     def send_task(self, name, args=None, kwargs=None, countdown=None,
     def send_task(self, name, args=None, kwargs=None, countdown=None,
-            eta=None, task_id=None, publisher=None, connection=None,
-            result_cls=None, expires=None, queues=None, **options):
+            eta=None, task_id=None, producer=None, connection=None,
+            result_cls=None, expires=None, queues=None, publisher=None,
+            **options):
+        producer = producer or publisher  # XXX compat
         if self.conf.CELERY_ALWAYS_EAGER:  # pragma: no cover
         if self.conf.CELERY_ALWAYS_EAGER:  # pragma: no cover
             warnings.warn(AlwaysEagerIgnored(
             warnings.warn(AlwaysEagerIgnored(
                 'CELERY_ALWAYS_EAGER has no effect on send_task'))
                 'CELERY_ALWAYS_EAGER has no effect on send_task'))
@@ -182,16 +207,15 @@ class Celery(object):
         options.setdefault('compression',
         options.setdefault('compression',
                            self.conf.CELERY_MESSAGE_COMPRESSION)
                            self.conf.CELERY_MESSAGE_COMPRESSION)
         options = router.route(options, name, args, kwargs)
         options = router.route(options, name, args, kwargs)
-        with self.default_producer(publisher) as producer:
-            return result_cls(producer.delay_task(name, args, kwargs,
-                                                  task_id=task_id,
-                                                  countdown=countdown, eta=eta,
-                                                  expires=expires, **options))
-
-    def connection(self, hostname=None, userid=None,
-            password=None, virtual_host=None, port=None, ssl=None,
-            insist=None, connect_timeout=None, transport=None,
-            transport_options=None, **kwargs):
+        with self.producer_or_acquire(producer) as producer:
+            return result_cls(producer.publish_task(name, args, kwargs,
+                        task_id=task_id,
+                        countdown=countdown, eta=eta,
+                        expires=expires, **options))
+
+    def connection(self, hostname=None, userid=None, password=None,
+            virtual_host=None, port=None, ssl=None, connect_timeout=None,
+            transport=None, transport_options=None, heartbeat=None, **kwargs):
         conf = self.conf
         conf = self.conf
         return self.amqp.Connection(
         return self.amqp.Connection(
                     hostname or conf.BROKER_HOST,
                     hostname or conf.BROKER_HOST,
@@ -200,29 +224,36 @@ class Celery(object):
                     virtual_host or conf.BROKER_VHOST,
                     virtual_host or conf.BROKER_VHOST,
                     port or conf.BROKER_PORT,
                     port or conf.BROKER_PORT,
                     transport=transport or conf.BROKER_TRANSPORT,
                     transport=transport or conf.BROKER_TRANSPORT,
-                    insist=self.either('BROKER_INSIST', insist),
                     ssl=self.either('BROKER_USE_SSL', ssl),
                     ssl=self.either('BROKER_USE_SSL', ssl),
                     connect_timeout=self.either(
                     connect_timeout=self.either(
-                                'BROKER_CONNECTION_TIMEOUT', connect_timeout),
+                        'BROKER_CONNECTION_TIMEOUT', connect_timeout),
+                    heartbeat=heartbeat,
                     transport_options=dict(conf.BROKER_TRANSPORT_OPTIONS,
                     transport_options=dict(conf.BROKER_TRANSPORT_OPTIONS,
                                            **transport_options or {}))
                                            **transport_options or {}))
     broker_connection = connection
     broker_connection = connection
 
 
     @contextmanager
     @contextmanager
-    def default_connection(self, connection=None, *args, **kwargs):
+    def connection_or_acquire(self, connection=None, pool=True,
+            *args, **kwargs):
         if connection:
         if connection:
             yield connection
             yield connection
         else:
         else:
-            with self.pool.acquire(block=True) as connection:
-                yield connection
+            if pool:
+                with self.pool.acquire(block=True) as connection:
+                    yield connection
+            else:
+                with self.connection() as connection:
+                    yield connection
+    default_connection = connection_or_acquire  # XXX compat
 
 
     @contextmanager
     @contextmanager
-    def default_producer(self, producer=None):
+    def producer_or_acquire(self, producer=None):
         if producer:
         if producer:
             yield producer
             yield producer
         else:
         else:
             with self.amqp.producer_pool.acquire(block=True) as producer:
             with self.amqp.producer_pool.acquire(block=True) as producer:
                 yield producer
                 yield producer
+    default_producer = producer_or_acquire  # XXX compat
 
 
     def with_default_connection(self, fun):
     def with_default_connection(self, fun):
         """With any function accepting a `connection`
         """With any function accepting a `connection`
@@ -234,14 +265,14 @@ class Celery(object):
 
 
         **Deprecated**
         **Deprecated**
 
 
-        Use ``with app.default_connection(connection)`` instead.
+        Use ``with app.connection_or_acquire(connection)`` instead.
 
 
         """
         """
         @wraps(fun)
         @wraps(fun)
         def _inner(*args, **kwargs):
         def _inner(*args, **kwargs):
             connection = kwargs.pop('connection', None)
             connection = kwargs.pop('connection', None)
-            with self.default_connection(connection) as c:
-                return fun(*args, **dict(kwargs, connection=c))
+            with self.connection_or_acquire(connection) as c:
+                return fun(*args, connection=c, **kwargs)
         return _inner
         return _inner
 
 
     def prepare_config(self, c):
     def prepare_config(self, c):
@@ -283,14 +314,23 @@ class Celery(object):
         return backend(app=self, url=url)
         return backend(app=self, url=url)
 
 
     def _get_config(self):
     def _get_config(self):
+        self.configured = True
         s = Settings({}, [self.prepare_config(self.loader.conf),
         s = Settings({}, [self.prepare_config(self.loader.conf),
                              deepcopy(DEFAULTS)])
                              deepcopy(DEFAULTS)])
+
+        # load lazy config dict initializers.
+        pending = self._pending_defaults
+        while pending:
+            s.add_defaults(pending.popleft()())
         if self._preconf:
         if self._preconf:
             for key, value in self._preconf.iteritems():
             for key, value in self._preconf.iteritems():
                 setattr(s, key, value)
                 setattr(s, key, value)
         return s
         return s
 
 
     def _after_fork(self, obj_):
     def _after_fork(self, obj_):
+        self._maybe_close_pool()
+
+    def _maybe_close_pool(self):
         if self._pool:
         if self._pool:
             self._pool.force_close_all()
             self._pool.force_close_all()
             self._pool = None
             self._pool = None
@@ -328,11 +368,11 @@ class Celery(object):
         return type(name or Class.__name__, (Class, ), attrs)
         return type(name or Class.__name__, (Class, ), attrs)
 
 
     def _rgetattr(self, path):
     def _rgetattr(self, path):
-        return reduce(getattr, [self] + path.split('.'))
+        return attrgetter(path)(self)
 
 
     def __repr__(self):
     def __repr__(self):
-        return '<%s %s:0x%x>' % (self.__class__.__name__,
-                                 self.main or '__main__', id(self), )
+        return '<{0} {1}:0x{2:x}>'.format(
+            type(self).__name__, self.main or '__main__', id(self))
 
 
     def __reduce__(self):
     def __reduce__(self):
         # Reduce only pickles the configuration changes,
         # Reduce only pickles the configuration changes,

+ 129 - 71
celery/app/builtins.py

@@ -8,9 +8,9 @@
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
-from itertools import starmap
+from collections import deque
+from itertools import imap, izip, starmap
 
 
 from celery._state import get_current_worker_task
 from celery._state import get_current_worker_task
 from celery.utils import uuid
 from celery.utils import uuid
@@ -69,10 +69,13 @@ def add_unlock_chord_task(app):
     from celery.canvas import subtask
     from celery.canvas import subtask
     from celery import result as _res
     from celery import result as _res
 
 
-    @app.task(name='celery.chord_unlock', max_retries=None)
-    def unlock_chord(group_id, callback, interval=1, propagate=False,
-            max_retries=None, result=None):
-        result = _res.GroupResult(group_id, map(_res.AsyncResult, result))
+    @app.task(name='celery.chord_unlock', max_retries=None,
+              default_retry_delay=1, ignore_result=True)
+    def unlock_chord(group_id, callback, interval=None, propagate=False,
+            max_retries=None, result=None, Result=_res.AsyncResult):
+        if interval is None:
+            interval = unlock_chord.default_retry_delay
+        result = _res.GroupResult(group_id, [Result(r) for r in result])
         j = result.join_native if result.supports_native_join else result.join
         j = result.join_native if result.supports_native_join else result.join
         if result.ready():
         if result.ready():
             subtask(callback).delay(j(propagate=propagate))
             subtask(callback).delay(j(propagate=propagate))
@@ -88,7 +91,8 @@ def add_map_task(app):
     @app.task(name='celery.map')
     @app.task(name='celery.map')
     def xmap(task, it):
     def xmap(task, it):
         task = subtask(task).type
         task = subtask(task).type
-        return list(map(task, it))
+        return list(imap(task, it))
+    return xmap
 
 
 
 
 @shared_task
 @shared_task
@@ -99,6 +103,7 @@ def add_starmap_task(app):
     def xstarmap(task, it):
     def xstarmap(task, it):
         task = subtask(task).type
         task = subtask(task).type
         return list(starmap(task, it))
         return list(starmap(task, it))
+    return xstarmap
 
 
 
 
 @shared_task
 @shared_task
@@ -108,12 +113,13 @@ def add_chunk_task(app):
     @app.task(name='celery.chunks')
     @app.task(name='celery.chunks')
     def chunks(task, it, n):
     def chunks(task, it, n):
         return _chunks.apply_chunks(task, it, n)
         return _chunks.apply_chunks(task, it, n)
+    return chunks
 
 
 
 
 @shared_task
 @shared_task
 def add_group_task(app):
 def add_group_task(app):
     _app = app
     _app = app
-    from celery.canvas import subtask
+    from celery.canvas import maybe_subtask, subtask
     from celery.result import from_serializable
     from celery.result import from_serializable
 
 
     class Group(app.Task):
     class Group(app.Task):
@@ -121,53 +127,64 @@ def add_group_task(app):
         name = 'celery.group'
         name = 'celery.group'
         accept_magic_kwargs = False
         accept_magic_kwargs = False
 
 
-        def run(self, tasks, result, group_id):
+        def run(self, tasks, result, group_id, partial_args):
             app = self.app
             app = self.app
             result = from_serializable(result)
             result = from_serializable(result)
+            # any partial args are added to all tasks in the group
+            taskit = (subtask(task).clone(partial_args)
+                        for i, task in enumerate(tasks))
             if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER:
             if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER:
                 return app.GroupResult(result.id,
                 return app.GroupResult(result.id,
-                        [subtask(task).apply(group_id=group_id)
-                            for task in tasks])
-            with app.default_producer() as pub:
-                [subtask(task).apply_async(group_id=group_id, publisher=pub,
-                                           add_to_parent=False)
-                        for task in tasks]
+                        [task.apply(group_id=group_id) for task in taskit])
+            with app.producer_or_acquire() as pub:
+                [task.apply_async(group_id=group_id, publisher=pub,
+                                  add_to_parent=False) for task in taskit]
             parent = get_current_worker_task()
             parent = get_current_worker_task()
             if parent:
             if parent:
                 parent.request.children.append(result)
                 parent.request.children.append(result)
             return result
             return result
 
 
-        def prepare(self, options, tasks, **kwargs):
-            r = []
+        def prepare(self, options, tasks, args, **kwargs):
+            AsyncResult = self.AsyncResult
             options['group_id'] = group_id = \
             options['group_id'] = group_id = \
                     options.setdefault('task_id', uuid())
                     options.setdefault('task_id', uuid())
-            for task in tasks:
+
+            def prepare_member(task):
+                task = maybe_subtask(task)
                 opts = task.options
                 opts = task.options
                 opts['group_id'] = group_id
                 opts['group_id'] = group_id
                 try:
                 try:
                     tid = opts['task_id']
                     tid = opts['task_id']
                 except KeyError:
                 except KeyError:
                     tid = opts['task_id'] = uuid()
                     tid = opts['task_id'] = uuid()
-                r.append(self.AsyncResult(tid))
-            return tasks, self.app.GroupResult(group_id, r), group_id
+                return task, AsyncResult(tid)
+
+            try:
+                tasks, res = list(izip(*[prepare_member(task)
+                                                for task in tasks]))
+            except ValueError:  # tasks empty
+                tasks, res = [], []
+            return (tasks, self.app.GroupResult(group_id, res), group_id, args)
 
 
-        def apply_async(self, args=(), kwargs={}, **options):
+        def apply_async(self, partial_args=(), kwargs={}, **options):
             if self.app.conf.CELERY_ALWAYS_EAGER:
             if self.app.conf.CELERY_ALWAYS_EAGER:
-                return self.apply(args, kwargs, **options)
-            tasks, result, gid = self.prepare(options, **kwargs)
-            super(Group, self).apply_async(
-                    (list(tasks), result, gid), **options)
+                return self.apply(partial_args, kwargs, **options)
+            tasks, result, gid, args = self.prepare(options,
+                                            args=partial_args, **kwargs)
+            super(Group, self).apply_async((list(tasks),
+                result.serializable(), gid, args), **options)
             return result
             return result
 
 
         def apply(self, args=(), kwargs={}, **options):
         def apply(self, args=(), kwargs={}, **options):
-            tasks, result, gid = self.prepare(options, **kwargs)
-            return super(Group, self).apply((tasks, result, gid), **options)
+            return super(Group, self).apply(
+                    self.prepare(options, args=args, **kwargs),
+                    **options).get()
     return Group
     return Group
 
 
 
 
 @shared_task
 @shared_task
 def add_chain_task(app):
 def add_chain_task(app):
-    from celery.canvas import maybe_subtask
+    from celery.canvas import chord, group, maybe_subtask
     _app = app
     _app = app
 
 
     class Chain(app.Task):
     class Chain(app.Task):
@@ -175,36 +192,64 @@ def add_chain_task(app):
         name = 'celery.chain'
         name = 'celery.chain'
         accept_magic_kwargs = False
         accept_magic_kwargs = False
 
 
-        def apply_async(self, args=(), kwargs={}, **options):
+        def prepare_steps(self, args, tasks):
+            steps = deque(tasks)
+            next_step = prev_task = prev_res = None
+            tasks, results = [], []
+            i = 0
+            while steps:
+                # First task get partial args from chain.
+                task = maybe_subtask(steps.popleft())
+                task = task.clone() if i else task.clone(args)
+                i += 1
+                tid = task.options.get('task_id')
+                if tid is None:
+                    tid = task.options['task_id'] = uuid()
+                res = task.type.AsyncResult(tid)
+
+                # automatically upgrade group(..) | s to chord(group, s)
+                if isinstance(task, group):
+                    try:
+                        next_step = steps.popleft()
+                    except IndexError:
+                        next_step = None
+                if next_step is not None:
+                    task = chord(task, body=next_step, task_id=tid)
+                if prev_task:
+                    # link previous task to this task.
+                    prev_task.link(task)
+                    # set the results parent attribute.
+                    res.parent = prev_res
+
+                results.append(res)
+                tasks.append(task)
+                prev_task, prev_res = task, res
+
+            return tasks, results
+
+        def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
+                task_id=None, **options):
             if self.app.conf.CELERY_ALWAYS_EAGER:
             if self.app.conf.CELERY_ALWAYS_EAGER:
                 return self.apply(args, kwargs, **options)
                 return self.apply(args, kwargs, **options)
             options.pop('publisher', None)
             options.pop('publisher', None)
-            group_id = options.pop('group_id', None)
-            chord = options.pop('chord', None)
-            tasks = [maybe_subtask(t).clone(
-                        task_id=options.pop('task_id', uuid()),
-                        **options
-                    )
-                    for t in kwargs['tasks']]
-            reduce(lambda a, b: a.link(b), tasks)
+            tasks, results = self.prepare_steps(args, kwargs['tasks'])
+            result = results[-1]
             if group_id:
             if group_id:
                 tasks[-1].set(group_id=group_id)
                 tasks[-1].set(group_id=group_id)
             if chord:
             if chord:
                 tasks[-1].set(chord=chord)
                 tasks[-1].set(chord=chord)
+            if task_id:
+                tasks[-1].set(task_id=task_id)
+                result = tasks[-1].type.AsyncResult(task_id)
             tasks[0].apply_async()
             tasks[0].apply_async()
-            results = [task.type.AsyncResult(task.options['task_id'])
-                            for task in tasks]
-            reduce(lambda a, b: a.set_parent(b), reversed(results))
-            return results[-1]
-
-        def apply(self, args=(), kwargs={}, **options):
-            tasks = [maybe_subtask(task).clone() for task in kwargs['tasks']]
-            res = prev = None
-            for task in tasks:
-                res = task.apply((prev.get(), ) if prev else ())
-                res.parent, prev = prev, res
-            return res
+            return result
 
 
+        def apply(self, args=(), kwargs={}, subtask=maybe_subtask, **options):
+            last, fargs = None, args  # fargs passed to first task only
+            for task in kwargs['tasks']:
+                res = subtask(task).clone(fargs).apply(last and (last.get(), ))
+                res.parent, last, fargs = last, res, None
+            return last
     return Chain
     return Chain
 
 
 
 
@@ -223,43 +268,57 @@ def add_chord_task(app):
         accept_magic_kwargs = False
         accept_magic_kwargs = False
         ignore_result = False
         ignore_result = False
 
 
-        def run(self, header, body, interval=1, max_retries=None,
-                propagate=False, eager=False, **kwargs):
-            if not isinstance(header, group):
-                header = group(header)
-            r = []
+        def run(self, header, body, partial_args=(), interval=1,
+                max_retries=None, propagate=False, eager=False, **kwargs):
             group_id = uuid()
             group_id = uuid()
-            for task in header.tasks:
-                opts = task.options
-                try:
-                    tid = opts['task_id']
-                except KeyError:
-                    tid = opts['task_id'] = uuid()
-                opts['chord'] = body
-                opts['group_id'] = group_id
-                r.append(app.AsyncResult(tid))
+            AsyncResult = self.app.AsyncResult
+            prepare_member = self._prepare_member
+
+            # - convert back to group if serialized
+            if not isinstance(header, group):
+                header = group([maybe_subtask(t) for t in  header])
+            # - eager applies the group inline
             if eager:
             if eager:
-                return header.apply(task_id=group_id)
+                return header.apply(args=partial_args, task_id=group_id)
+
+            results = [AsyncResult(prepare_member(task, body, group_id))
+                            for task in header.tasks]
+
+            # - fallback implementations schedules the chord_unlock task here
             app.backend.on_chord_apply(group_id, body,
             app.backend.on_chord_apply(group_id, body,
                                        interval=interval,
                                        interval=interval,
                                        max_retries=max_retries,
                                        max_retries=max_retries,
                                        propagate=propagate,
                                        propagate=propagate,
-                                       result=r)
-            return header(task_id=group_id)
+                                       result=results)
+            # - call the header group, returning the GroupResult.
+            return header(*partial_args, task_id=group_id)
+
+        def _prepare_member(self, task, body, group_id):
+            opts = task.options
+            # d.setdefault would work but generating uuid's are expensive
+            try:
+                task_id = opts['task_id']
+            except KeyError:
+                task_id = opts['task_id'] = uuid()
+            opts.update(chord=body, group_id=group_id)
+            return task_id
 
 
         def apply_async(self, args=(), kwargs={}, task_id=None, **options):
         def apply_async(self, args=(), kwargs={}, task_id=None, **options):
             if self.app.conf.CELERY_ALWAYS_EAGER:
             if self.app.conf.CELERY_ALWAYS_EAGER:
                 return self.apply(args, kwargs, **options)
                 return self.apply(args, kwargs, **options)
             group_id = options.pop('group_id', None)
             group_id = options.pop('group_id', None)
             chord = options.pop('chord', None)
             chord = options.pop('chord', None)
-            header, body = (list(kwargs['header']),
-                            maybe_subtask(kwargs['body']))
+            header = kwargs.pop('header')
+            body = kwargs.pop('body')
+            header, body = (list(maybe_subtask(header)),
+                            maybe_subtask(body))
             if group_id:
             if group_id:
                 body.set(group_id=group_id)
                 body.set(group_id=group_id)
             if chord:
             if chord:
                 body.set(chord=chord)
                 body.set(chord=chord)
             callback_id = body.options.setdefault('task_id', task_id or uuid())
             callback_id = body.options.setdefault('task_id', task_id or uuid())
-            parent = super(Chord, self).apply_async((header, body), **options)
+            parent = super(Chord, self).apply_async((header, body, args),
+                                                     kwargs, **options)
             body_result = self.AsyncResult(callback_id)
             body_result = self.AsyncResult(callback_id)
             body_result.parent = parent
             body_result.parent = parent
             return body_result
             return body_result
@@ -269,6 +328,5 @@ def add_chord_task(app):
             res = super(Chord, self).apply(args, dict(kwargs, eager=True),
             res = super(Chord, self).apply(args, dict(kwargs, eager=True),
                                            **options)
                                            **options)
             return maybe_subtask(body).apply(
             return maybe_subtask(body).apply(
-                        args=(res.get(propagate=propagate).get().join(), ))
-
+                        args=(res.get(propagate=propagate).get(), ))
     return Chord
     return Chord

+ 18 - 11
celery/app/control.py

@@ -8,7 +8,6 @@
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 from kombu.pidbox import Mailbox
 from kombu.pidbox import Mailbox
 from kombu.utils import cached_property
 from kombu.utils import cached_property
@@ -27,12 +26,13 @@ class Inspect(object):
     app = None
     app = None
 
 
     def __init__(self, destination=None, timeout=1, callback=None,
     def __init__(self, destination=None, timeout=1, callback=None,
-            connection=None, app=None):
+            connection=None, app=None, limit=None):
         self.app = app or self.app
         self.app = app or self.app
         self.destination = destination
         self.destination = destination
         self.timeout = timeout
         self.timeout = timeout
         self.callback = callback
         self.callback = callback
         self.connection = connection
         self.connection = connection
+        self.limit = limit
 
 
     def _prepare(self, reply):
     def _prepare(self, reply):
         if not reply:
         if not reply:
@@ -49,6 +49,7 @@ class Inspect(object):
                                       destination=self.destination,
                                       destination=self.destination,
                                       callback=self.callback,
                                       callback=self.callback,
                                       connection=self.connection,
                                       connection=self.connection,
+                                      limit=self.limit,
                                       timeout=self.timeout, reply=True))
                                       timeout=self.timeout, reply=True))
 
 
     def report(self):
     def report(self):
@@ -69,8 +70,8 @@ class Inspect(object):
     def revoked(self):
     def revoked(self):
         return self._request('dump_revoked')
         return self._request('dump_revoked')
 
 
-    def registered(self):
-        return self._request('dump_tasks')
+    def registered(self, *taskinfoitems):
+        return self._request('dump_tasks', taskinfoitems=taskinfoitems)
     registered_tasks = registered
     registered_tasks = registered
 
 
     def ping(self):
     def ping(self):
@@ -79,13 +80,16 @@ class Inspect(object):
     def active_queues(self):
     def active_queues(self):
         return self._request('active_queues')
         return self._request('active_queues')
 
 
+    def conf(self):
+        return self._request('dump_conf')
+
 
 
 class Control(object):
 class Control(object):
     Mailbox = Mailbox
     Mailbox = Mailbox
 
 
     def __init__(self, app=None):
     def __init__(self, app=None):
         self.app = app_or_default(app)
         self.app = app_or_default(app)
-        self.mailbox = self.Mailbox('celeryd', type='fanout')
+        self.mailbox = self.Mailbox('celery', type='fanout')
 
 
     @cached_property
     @cached_property
     def inspect(self):
     def inspect(self):
@@ -100,7 +104,7 @@ class Control(object):
         :returns: the number of tasks discarded.
         :returns: the number of tasks discarded.
 
 
         """
         """
-        with self.app.default_connection(connection) as conn:
+        with self.app.connection_or_acquire(connection) as conn:
             return self.app.amqp.TaskConsumer(conn).purge()
             return self.app.amqp.TaskConsumer(conn).purge()
     discard_all = purge
     discard_all = purge
 
 
@@ -155,7 +159,7 @@ class Control(object):
                               **kwargs)
                               **kwargs)
 
 
     def add_consumer(self, queue, exchange=None, exchange_type='direct',
     def add_consumer(self, queue, exchange=None, exchange_type='direct',
-            routing_key=None, **options):
+            routing_key=None, options=None, **kwargs):
         """Tell all (or specific) workers to start consuming from a new queue.
         """Tell all (or specific) workers to start consuming from a new queue.
 
 
         Only the queue name is required as if only the queue is specified
         Only the queue name is required as if only the queue is specified
@@ -172,14 +176,17 @@ class Control(object):
         :keyword exchange_type: Type of exchange (defaults to 'direct')
         :keyword exchange_type: Type of exchange (defaults to 'direct')
             command to, when empty broadcast to all workers.
             command to, when empty broadcast to all workers.
         :keyword routing_key: Optional routing key.
         :keyword routing_key: Optional routing key.
+        :keyword options: Additional options as supported
+            by :meth:`kombu.entitiy.Queue.from_dict`.
 
 
         See :meth:`broadcast` for supported keyword arguments.
         See :meth:`broadcast` for supported keyword arguments.
 
 
         """
         """
         return self.broadcast('add_consumer',
         return self.broadcast('add_consumer',
-                arguments={'queue': queue, 'exchange': exchange,
-                           'exchange_type': exchange_type,
-                           'routing_key': routing_key}, **options)
+                arguments=dict({'queue': queue, 'exchange': exchange,
+                                'exchange_type': exchange_type,
+                                'routing_key': routing_key}, **options or {}),
+                **kwargs)
 
 
     def cancel_consumer(self, queue, **kwargs):
     def cancel_consumer(self, queue, **kwargs):
         """Tell all (or specific) workers to stop consuming from ``queue``.
         """Tell all (or specific) workers to stop consuming from ``queue``.
@@ -247,7 +254,7 @@ class Control(object):
             received.
             received.
 
 
         """
         """
-        with self.app.default_connection(connection) as conn:
+        with self.app.connection_or_acquire(connection) as conn:
             arguments = dict(arguments or {}, **extra_kwargs)
             arguments = dict(arguments or {}, **extra_kwargs)
             return self.mailbox(conn)._broadcast(command, arguments,
             return self.mailbox(conn)._broadcast(command, arguments,
                                                  destination, reply, timeout,
                                                  destination, reply, timeout,

+ 15 - 20
celery/app/defaults.py

@@ -36,8 +36,8 @@ DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s'
 DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \
 DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \
 %(task_name)s[%(task_id)s]: %(message)s"""
 %(task_name)s[%(task_id)s]: %(message)s"""
 
 
-_BROKER_OLD = {'deprecate_by': '2.5', 'remove_by': '3.0', 'alt': 'BROKER_URL'}
-_REDIS_OLD = {'deprecate_by': '2.5', 'remove_by': '3.0',
+_BROKER_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0', 'alt': 'BROKER_URL'}
+_REDIS_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0',
               'alt': 'URL form of CELERY_RESULT_BACKEND'}
               'alt': 'URL form of CELERY_RESULT_BACKEND'}
 
 
 
 
@@ -58,7 +58,8 @@ class Option(object):
         return self.typemap[self.type](value)
         return self.typemap[self.type](value)
 
 
     def __repr__(self):
     def __repr__(self):
-        return '<Option: type->%s default->%r>' % (self.type, self.default)
+        return '<Option: type->{0} default->{1!r}>'.format(self.type,
+                                                           self.default)
 
 
 
 
 NAMESPACES = {
 NAMESPACES = {
@@ -67,9 +68,8 @@ NAMESPACES = {
         'CONNECTION_TIMEOUT': Option(4, type='float'),
         'CONNECTION_TIMEOUT': Option(4, type='float'),
         'CONNECTION_RETRY': Option(True, type='bool'),
         'CONNECTION_RETRY': Option(True, type='bool'),
         'CONNECTION_MAX_RETRIES': Option(100, type='int'),
         'CONNECTION_MAX_RETRIES': Option(100, type='int'),
+        'HEARTBEAT': Option(10, type='int'),
         'POOL_LIMIT': Option(10, type='int'),
         'POOL_LIMIT': Option(10, type='int'),
-        'INSIST': Option(False, type='bool',
-                         deprecate_by='2.4', remove_by='3.0'),
         'USE_SSL': Option(False, type='bool'),
         'USE_SSL': Option(False, type='bool'),
         'TRANSPORT': Option(type='string'),
         'TRANSPORT': Option(type='string'),
         'TRANSPORT_OPTIONS': Option({}, type='dict'),
         'TRANSPORT_OPTIONS': Option({}, type='dict'),
@@ -90,11 +90,6 @@ NAMESPACES = {
     'CELERY': {
     'CELERY': {
         'ACKS_LATE': Option(False, type='bool'),
         'ACKS_LATE': Option(False, type='bool'),
         'ALWAYS_EAGER': Option(False, type='bool'),
         'ALWAYS_EAGER': Option(False, type='bool'),
-        'AMQP_TASK_RESULT_EXPIRES': Option(type='float',
-                deprecate_by='2.5', remove_by='3.0',
-                alt='CELERY_TASK_RESULT_EXPIRES'),
-        'AMQP_TASK_RESULT_CONNECTION_MAX': Option(1, type='int',
-                remove_by='2.5', alt='BROKER_POOL_LIMIT'),
         'ANNOTATIONS': Option(type='any'),
         'ANNOTATIONS': Option(type='any'),
         'BROADCAST_QUEUE': Option('celeryctl'),
         'BROADCAST_QUEUE': Option('celeryctl'),
         'BROADCAST_EXCHANGE': Option('celeryctl'),
         'BROADCAST_EXCHANGE': Option('celeryctl'),
@@ -110,7 +105,7 @@ NAMESPACES = {
         'DEFAULT_EXCHANGE_TYPE': Option('direct'),
         'DEFAULT_EXCHANGE_TYPE': Option('direct'),
         'DEFAULT_DELIVERY_MODE': Option(2, type='string'),
         'DEFAULT_DELIVERY_MODE': Option(2, type='string'),
         'EAGER_PROPAGATES_EXCEPTIONS': Option(False, type='bool'),
         'EAGER_PROPAGATES_EXCEPTIONS': Option(False, type='bool'),
-        'ENABLE_UTC': Option(False, type='bool'),
+        'ENABLE_UTC': Option(True, type='bool'),
         'EVENT_SERIALIZER': Option('json'),
         'EVENT_SERIALIZER': Option('json'),
         'IMPORTS': Option((), type='tuple'),
         'IMPORTS': Option((), type='tuple'),
         'INCLUDE': Option((), type='tuple'),
         'INCLUDE': Option((), type='tuple'),
@@ -136,8 +131,6 @@ NAMESPACES = {
         'SEND_TASK_ERROR_EMAILS': Option(False, type='bool'),
         'SEND_TASK_ERROR_EMAILS': Option(False, type='bool'),
         'SEND_TASK_SENT_EVENT': Option(False, type='bool'),
         'SEND_TASK_SENT_EVENT': Option(False, type='bool'),
         'STORE_ERRORS_EVEN_IF_IGNORED': Option(False, type='bool'),
         'STORE_ERRORS_EVEN_IF_IGNORED': Option(False, type='bool'),
-        'TASK_ERROR_WHITELIST': Option((), type='tuple',
-            deprecate_by='2.5', remove_by='3.0'),
         'TASK_PUBLISH_RETRY': Option(True, type='bool'),
         'TASK_PUBLISH_RETRY': Option(True, type='bool'),
         'TASK_PUBLISH_RETRY_POLICY': Option({
         'TASK_PUBLISH_RETRY_POLICY': Option({
                 'max_retries': 100,
                 'max_retries': 100,
@@ -154,6 +147,7 @@ NAMESPACES = {
         'SECURITY_KEY': Option(type='string'),
         'SECURITY_KEY': Option(type='string'),
         'SECURITY_CERTIFICATE': Option(type='string'),
         'SECURITY_CERTIFICATE': Option(type='string'),
         'SECURITY_CERT_STORE': Option(type='string'),
         'SECURITY_CERT_STORE': Option(type='string'),
+        'WORKER_DIRECT': Option(False, type='bool'),
     },
     },
     'CELERYD': {
     'CELERYD': {
         'AUTOSCALER': Option('celery.worker.autoscale.Autoscaler'),
         'AUTOSCALER': Option('celery.worker.autoscale.Autoscaler'),
@@ -167,14 +161,15 @@ NAMESPACES = {
         'CONSUMER': Option(type='string'),
         'CONSUMER': Option(type='string'),
         'LOG_FORMAT': Option(DEFAULT_PROCESS_LOG_FMT),
         'LOG_FORMAT': Option(DEFAULT_PROCESS_LOG_FMT),
         'LOG_COLOR': Option(type='bool'),
         'LOG_COLOR': Option(type='bool'),
-        'LOG_LEVEL': Option('WARN', deprecate_by='2.4', remove_by='3.0',
+        'LOG_LEVEL': Option('WARN', deprecate_by='2.4', remove_by='4.0',
                             alt='--loglevel argument'),
                             alt='--loglevel argument'),
-        'LOG_FILE': Option(deprecate_by='2.4', remove_by='3.0',
+        'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
                             alt='--logfile argument'),
                             alt='--logfile argument'),
         'MEDIATOR': Option('celery.worker.mediator.Mediator'),
         'MEDIATOR': Option('celery.worker.mediator.Mediator'),
         'MAX_TASKS_PER_CHILD': Option(type='int'),
         'MAX_TASKS_PER_CHILD': Option(type='int'),
         'POOL': Option(DEFAULT_POOL),
         'POOL': Option(DEFAULT_POOL),
         'POOL_PUTLOCKS': Option(True, type='bool'),
         'POOL_PUTLOCKS': Option(True, type='bool'),
+        'POOL_RESTARTS': Option(False, type='bool'),
         'PREFETCH_MULTIPLIER': Option(4, type='int'),
         'PREFETCH_MULTIPLIER': Option(4, type='int'),
         'STATE_DB': Option(),
         'STATE_DB': Option(),
         'TASK_LOG_FORMAT': Option(DEFAULT_TASK_LOG_FMT),
         'TASK_LOG_FORMAT': Option(DEFAULT_TASK_LOG_FMT),
@@ -187,15 +182,15 @@ NAMESPACES = {
         'SCHEDULER': Option('celery.beat.PersistentScheduler'),
         'SCHEDULER': Option('celery.beat.PersistentScheduler'),
         'SCHEDULE_FILENAME': Option('celerybeat-schedule'),
         'SCHEDULE_FILENAME': Option('celerybeat-schedule'),
         'MAX_LOOP_INTERVAL': Option(0, type='float'),
         'MAX_LOOP_INTERVAL': Option(0, type='float'),
-        'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='3.0',
+        'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0',
                             alt='--loglevel argument'),
                             alt='--loglevel argument'),
-        'LOG_FILE': Option(deprecate_by='2.4', remove_by='3.0',
+        'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
                            alt='--logfile argument'),
                            alt='--logfile argument'),
     },
     },
     'CELERYMON': {
     'CELERYMON': {
-        'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='3.0',
+        'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0',
                             alt='--loglevel argument'),
                             alt='--loglevel argument'),
-        'LOG_FILE': Option(deprecate_by='2.4', remove_by='3.0',
+        'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
                            alt='--logfile argument'),
                            alt='--logfile argument'),
         'LOG_FORMAT': Option(DEFAULT_LOG_FMT),
         'LOG_FORMAT': Option(DEFAULT_LOG_FMT),
     },
     },
@@ -229,7 +224,7 @@ def find_deprecated_settings(source):
     from celery.utils import warn_deprecated
     from celery.utils import warn_deprecated
     for name, opt in flatten(NAMESPACES):
     for name, opt in flatten(NAMESPACES):
         if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None):
         if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None):
-            warn_deprecated(description='The %r setting' % (name, ),
+            warn_deprecated(description='The {0!r} setting'.format(name),
                             deprecation=opt.deprecate_by,
                             deprecation=opt.deprecate_by,
                             removal=opt.remove_by,
                             removal=opt.remove_by,
                             alternative=opt.alt)
                             alternative=opt.alt)

+ 9 - 7
celery/app/log.py

@@ -16,12 +16,13 @@ import logging
 import os
 import os
 import sys
 import sys
 
 
+from logging.handlers import WatchedFileHandler
+
 from kombu.log import NullHandler
 from kombu.log import NullHandler
 
 
 from celery import signals
 from celery import signals
 from celery._state import get_current_task
 from celery._state import get_current_task
 from celery.utils import isatty
 from celery.utils import isatty
-from celery.utils.compat import WatchedFileHandler
 from celery.utils.log import (
 from celery.utils.log import (
     get_logger, mlevel,
     get_logger, mlevel,
     ColorFormatter, ensure_process_aware_logger,
     ColorFormatter, ensure_process_aware_logger,
@@ -94,12 +95,13 @@ class Logging(object):
             if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
             if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
                 root.handlers = []
                 root.handlers = []
 
 
-            for logger in filter(None, (root, get_multiprocessing_logger())):
-                self.setup_handlers(logger, logfile, format,
-                                    colorize, **kwargs)
-                if loglevel:
-                    logger.setLevel(loglevel)
-                signals.after_setup_logger.send(sender=None, logger=logger,
+            for logger in root, get_multiprocessing_logger():
+                if logger is not None:
+                    self.setup_handlers(logger, logfile, format,
+                                        colorize, **kwargs)
+                    if loglevel:
+                        logger.setLevel(loglevel)
+                    signals.after_setup_logger.send(sender=None, logger=logger,
                                             loglevel=loglevel, logfile=logfile,
                                             loglevel=loglevel, logfile=logfile,
                                             format=format, colorize=colorize)
                                             format=format, colorize=colorize)
             # then setup the root task logger.
             # then setup the root task logger.

+ 11 - 3
celery/app/registry.py

@@ -10,6 +10,9 @@ from __future__ import absolute_import
 
 
 import inspect
 import inspect
 
 
+from importlib import import_module
+
+from celery._state import get_current_app
 from celery.exceptions import NotRegistered
 from celery.exceptions import NotRegistered
 
 
 
 
@@ -43,7 +46,7 @@ class TaskRegistry(dict):
         except KeyError:
         except KeyError:
             raise self.NotRegistered(name)
             raise self.NotRegistered(name)
 
 
-    # -- these methods are irrelevant now and will be removed in 3.0
+    # -- these methods are irrelevant now and will be removed in 4.0
     def regular(self):
     def regular(self):
         return self.filter_types('regular')
         return self.filter_types('regular')
 
 
@@ -56,5 +59,10 @@ class TaskRegistry(dict):
 
 
 
 
 def _unpickle_task(name):
 def _unpickle_task(name):
-    from celery import current_app
-    return current_app.tasks[name]
+    return get_current_app().tasks[name]
+
+
+def _unpickle_task_v2(name, module=None):
+    if module:
+        import_module(module)
+    return get_current_app().tasks[name]

+ 2 - 2
celery/app/routes.py

@@ -64,7 +64,7 @@ class Router(object):
             except KeyError:
             except KeyError:
                 if not self.create_missing:
                 if not self.create_missing:
                     raise QueueNotFound(
                     raise QueueNotFound(
-                        'Queue %r is not defined in CELERY_QUEUES' % queue)
+                        'Queue {0!r} missing from CELERY_QUEUES'.format(queue))
                 for key in 'exchange', 'routing_key':
                 for key in 'exchange', 'routing_key':
                     if route.get(key) is None:
                     if route.get(key) is None:
                         route[key] = queue
                         route[key] = queue
@@ -92,4 +92,4 @@ def prepare(routes):
         return ()
         return ()
     if not isinstance(routes, (list, tuple)):
     if not isinstance(routes, (list, tuple)):
         routes = (routes, )
         routes = (routes, )
-    return map(expand_route, routes)
+    return [expand_route(route) for route in routes]

+ 39 - 57
celery/app/task.py

@@ -7,9 +7,7 @@
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
-import os
 import sys
 import sys
 
 
 from celery import current_app
 from celery import current_app
@@ -19,27 +17,22 @@ from celery._state import get_current_worker_task, _task_stack
 from celery.datastructures import ExceptionInfo
 from celery.datastructures import ExceptionInfo
 from celery.exceptions import MaxRetriesExceededError, RetryTaskError
 from celery.exceptions import MaxRetriesExceededError, RetryTaskError
 from celery.result import EagerResult
 from celery.result import EagerResult
-from celery.utils import fun_takes_kwargs, uuid, maybe_reraise
+from celery.utils import gen_task_name, fun_takes_kwargs, uuid, maybe_reraise
 from celery.utils.functional import mattrgetter, maybe_list
 from celery.utils.functional import mattrgetter, maybe_list
 from celery.utils.imports import instantiate
 from celery.utils.imports import instantiate
 from celery.utils.mail import ErrorMail
 from celery.utils.mail import ErrorMail
 
 
 from .annotations import resolve_all as resolve_all_annotations
 from .annotations import resolve_all as resolve_all_annotations
-from .registry import _unpickle_task
+from .registry import _unpickle_task_v2
 
 
 #: extracts attributes related to publishing a message from an object.
 #: extracts attributes related to publishing a message from an object.
 extract_exec_options = mattrgetter(
 extract_exec_options = mattrgetter(
     'queue', 'routing_key', 'exchange',
     'queue', 'routing_key', 'exchange',
     'immediate', 'mandatory', 'priority', 'expires',
     'immediate', 'mandatory', 'priority', 'expires',
     'serializer', 'delivery_mode', 'compression',
     'serializer', 'delivery_mode', 'compression',
+    'timeout', 'soft_timeout',
 )
 )
 
 
-#: Billiard sets this when execv is enabled.
-#: We use it to find out the name of the original ``__main__``
-#: module, so that we can properly rewrite the name of the
-#: task to be that of ``App.main``.
-MP_MAIN_FILE = os.environ.get('MP_MAIN_FILE') or None
-
 
 
 class Context(object):
 class Context(object):
     # Default context
     # Default context
@@ -76,7 +69,7 @@ class Context(object):
             return default
             return default
 
 
     def __repr__(self):
     def __repr__(self):
-        return '<Context: %r>' % (vars(self, ))
+        return '<Context: {0!r}>'.format(vars(self))
 
 
     @property
     @property
     def children(self):
     def children(self):
@@ -112,15 +105,9 @@ class TaskType(type):
         app = attrs['_app'] = _app1 or _app2 or current_app
         app = attrs['_app'] = _app1 or _app2 or current_app
 
 
         # - Automatically generate missing/empty name.
         # - Automatically generate missing/empty name.
-        autoname = False
-        if not attrs.get('name'):
-            try:
-                module_name = sys.modules[task_module].__name__
-            except KeyError:  # pragma: no cover
-                # Fix for manage.py shell_plus (Issue #366).
-                module_name = task_module
-            attrs['name'] = '.'.join(filter(None, [module_name, name]))
-            autoname = True
+        task_name = attrs.get('name')
+        if not task_name:
+            attrs['name'] = task_name = gen_task_name(app, name, task_module)
 
 
         # - Create and register class.
         # - Create and register class.
         # Because of the way import happens (recursively)
         # Because of the way import happens (recursively)
@@ -128,17 +115,6 @@ class TaskType(type):
         # with the framework.  There should only be one class for each task
         # with the framework.  There should only be one class for each task
         # name, so we always return the registered version.
         # name, so we always return the registered version.
         tasks = app._tasks
         tasks = app._tasks
-
-        # - If the task module is used as the __main__ script
-        # - we need to rewrite the module part of the task name
-        # - to match App.main.
-        if MP_MAIN_FILE and sys.modules[task_module].__file__ == MP_MAIN_FILE:
-            # - see comment about :envvar:`MP_MAIN_FILE` above.
-            task_module = '__main__'
-        if autoname and task_module == '__main__' and app.main:
-            attrs['name'] = '.'.join([app.main, name])
-
-        task_name = attrs['name']
         if task_name not in tasks:
         if task_name not in tasks:
             tasks.register(new(cls, name, bases, attrs))
             tasks.register(new(cls, name, bases, attrs))
         instance = tasks[task_name]
         instance = tasks[task_name]
@@ -146,9 +122,8 @@ class TaskType(type):
         return instance.__class__
         return instance.__class__
 
 
     def __repr__(cls):
     def __repr__(cls):
-        if cls._app:
-            return '<class %s of %s>' % (cls.__name__, cls._app, )
-        return '<unbound %s>' % (cls.__name__, )
+        return ('<class {0.__name__} of {0._app}>' if cls._app
+           else '<unbound {0.__name__}>').format(cls)
 
 
 
 
 class Task(object):
 class Task(object):
@@ -181,8 +156,8 @@ class Task(object):
     abstract = True
     abstract = True
 
 
     #: If disabled the worker will not forward magic keyword arguments.
     #: If disabled the worker will not forward magic keyword arguments.
-    #: Deprecated and scheduled for removal in v3.0.
-    accept_magic_kwargs = None
+    #: Deprecated and scheduled for removal in v4.0.
+    accept_magic_kwargs = False
 
 
     #: Maximum number of retries before giving up.  If set to :const:`None`,
     #: Maximum number of retries before giving up.  If set to :const:`None`,
     #: it will **never** stop retrying.
     #: it will **never** stop retrying.
@@ -200,15 +175,15 @@ class Task(object):
     #: If enabled the worker will not store task state and return values
     #: If enabled the worker will not store task state and return values
     #: for this task.  Defaults to the :setting:`CELERY_IGNORE_RESULT`
     #: for this task.  Defaults to the :setting:`CELERY_IGNORE_RESULT`
     #: setting.
     #: setting.
-    ignore_result = False
+    ignore_result = None
 
 
     #: When enabled errors will be stored even if the task is otherwise
     #: When enabled errors will be stored even if the task is otherwise
     #: configured to ignore results.
     #: configured to ignore results.
-    store_errors_even_if_ignored = False
+    store_errors_even_if_ignored = None
 
 
     #: If enabled an email will be sent to :setting:`ADMINS` whenever a task
     #: If enabled an email will be sent to :setting:`ADMINS` whenever a task
     #: of this type fails.
     #: of this type fails.
-    send_error_emails = False
+    send_error_emails = None
 
 
     #: The name of a serializer that are registered with
     #: The name of a serializer that are registered with
     #: :mod:`kombu.serialization.registry`.  Default is `'pickle'`.
     #: :mod:`kombu.serialization.registry`.  Default is `'pickle'`.
@@ -239,7 +214,7 @@ class Task(object):
     #:
     #:
     #: The application default can be overridden using the
     #: The application default can be overridden using the
     #: :setting:`CELERY_TRACK_STARTED` setting.
     #: :setting:`CELERY_TRACK_STARTED` setting.
-    track_started = False
+    track_started = None
 
 
     #: When enabled messages for this task will be acknowledged **after**
     #: When enabled messages for this task will be acknowledged **after**
     #: the task has been executed, and not *just before* which is the
     #: the task has been executed, and not *just before* which is the
@@ -283,7 +258,6 @@ class Task(object):
         for attr_name, config_name in self.from_config:
         for attr_name, config_name in self.from_config:
             if getattr(self, attr_name, None) is None:
             if getattr(self, attr_name, None) is None:
                 setattr(self, attr_name, conf[config_name])
                 setattr(self, attr_name, conf[config_name])
-        self.accept_magic_kwargs = app.accept_magic_kwargs
         if self.accept_magic_kwargs is None:
         if self.accept_magic_kwargs is None:
             self.accept_magic_kwargs = app.accept_magic_kwargs
             self.accept_magic_kwargs = app.accept_magic_kwargs
         if self.backend is None:
         if self.backend is None:
@@ -344,10 +318,15 @@ class Task(object):
             self.pop_request()
             self.pop_request()
             _task_stack.pop()
             _task_stack.pop()
 
 
-    # - tasks are pickled into the name of the task only, and the reciever
-    # - simply grabs it from the local registry.
     def __reduce__(self):
     def __reduce__(self):
-        return (_unpickle_task, (self.name, ), None)
+        # - tasks are pickled into the name of the task only, and the reciever
+        # - simply grabs it from the local registry.
+        # - in later versions the module of the task is also included,
+        # - and the receiving side tries to import that module so that
+        # - it will work even if the task has not been registered.
+        mod = type(self).__module__
+        mod = mod if mod and mod in sys.modules else None
+        return (_unpickle_task_v2, (self.name, mod), None)
 
 
     def run(self, *args, **kwargs):
     def run(self, *args, **kwargs):
         """The body of the task executed by workers."""
         """The body of the task executed by workers."""
@@ -483,19 +462,19 @@ class Task(object):
 
 
         if connection:
         if connection:
             producer = app.amqp.TaskProducer(connection)
             producer = app.amqp.TaskProducer(connection)
-        with app.default_producer(producer) as P:
+        with app.producer_or_acquire(producer) as P:
             evd = None
             evd = None
             if conf.CELERY_SEND_TASK_SENT_EVENT:
             if conf.CELERY_SEND_TASK_SENT_EVENT:
                 evd = app.events.Dispatcher(channel=P.channel,
                 evd = app.events.Dispatcher(channel=P.channel,
                                             buffer_while_offline=False)
                                             buffer_while_offline=False)
 
 
             extra_properties = self.backend.on_task_apply(task_id)
             extra_properties = self.backend.on_task_apply(task_id)
-            task_id = P.delay_task(self.name, args, kwargs,
-                                   task_id=task_id,
-                                   event_dispatcher=evd,
-                                   callbacks=maybe_list(link),
-                                   errbacks=maybe_list(link_error),
-                                   **dict(options, **extra_properties))
+            task_id = P.publish_task(self.name, args, kwargs,
+                                     task_id=task_id,
+                                     event_dispatcher=evd,
+                                     callbacks=maybe_list(link),
+                                     errbacks=maybe_list(link_error),
+                                     **dict(options, **extra_properties))
         result = self.AsyncResult(task_id)
         result = self.AsyncResult(task_id)
         if add_to_parent:
         if add_to_parent:
             parent = get_current_worker_task()
             parent = get_current_worker_task()
@@ -516,6 +495,8 @@ class Task(object):
         :keyword eta: Explicit time and date to run the retry at
         :keyword eta: Explicit time and date to run the retry at
                       (must be a :class:`~datetime.datetime` instance).
                       (must be a :class:`~datetime.datetime` instance).
         :keyword max_retries: If set, overrides the default retry limit.
         :keyword max_retries: If set, overrides the default retry limit.
+        :keyword timeout: If set, overrides the default timeout.
+        :keyword soft_timeout: If set, overrides the default soft timeout.
         :keyword \*\*options: Any extra options to pass on to
         :keyword \*\*options: Any extra options to pass on to
                               meth:`apply_async`.
                               meth:`apply_async`.
         :keyword throw: If this is :const:`False`, do not raise the
         :keyword throw: If this is :const:`False`, do not raise the
@@ -539,7 +520,7 @@ class Task(object):
             ...     twitter = Twitter(oauth=auth)
             ...     twitter = Twitter(oauth=auth)
             ...     try:
             ...     try:
             ...         twitter.post_status_update(message)
             ...         twitter.post_status_update(message)
-            ...     except twitter.FailWhale, exc:
+            ...     except twitter.FailWhale as exc:
             ...         # Retry in 5 minutes.
             ...         # Retry in 5 minutes.
             ...         raise tweet.retry(countdown=60 * 5, exc=exc)
             ...         raise tweet.retry(countdown=60 * 5, exc=exc)
 
 
@@ -570,13 +551,15 @@ class Task(object):
         options.update({'retries': request.retries + 1,
         options.update({'retries': request.retries + 1,
                         'task_id': request.id,
                         'task_id': request.id,
                         'countdown': countdown,
                         'countdown': countdown,
-                        'eta': eta})
+                        'eta': eta,
+                        'link': request.callbacks,
+                        'link_error': request.errbacks})
 
 
         if max_retries is not None and options['retries'] > max_retries:
         if max_retries is not None and options['retries'] > max_retries:
             if exc:
             if exc:
                 maybe_reraise()
                 maybe_reraise()
             raise self.MaxRetriesExceededError(
             raise self.MaxRetriesExceededError(
-                    """Can't retry %s[%s] args:%s kwargs:%s""" % (
+                    "Can't retry {0}[{1}] args:{2} kwargs:{3}".format(
                         self.name, options['task_id'], args, kwargs))
                         self.name, options['task_id'], args, kwargs))
 
 
         # If task was executed eagerly using apply(),
         # If task was executed eagerly using apply(),
@@ -585,8 +568,7 @@ class Task(object):
             self.apply(args=args, kwargs=kwargs, **options).get()
             self.apply(args=args, kwargs=kwargs, **options).get()
         else:
         else:
             self.apply_async(args=args, kwargs=kwargs, **options)
             self.apply_async(args=args, kwargs=kwargs, **options)
-        ret = RetryTaskError(eta and 'Retry at %s' % eta
-                                  or 'Retry in %s secs.' % countdown, exc)
+        ret = RetryTaskError(exc=exc, when=eta or countdown)
         if throw:
         if throw:
             raise ret
             raise ret
         return ret
         return ret
@@ -795,7 +777,7 @@ class Task(object):
 
 
     def __repr__(self):
     def __repr__(self):
         """`repr(task)`"""
         """`repr(task)`"""
-        return '<@task: %s>' % (self.name, )
+        return '<@task: {0.name}>'.format(self)
 
 
     @property
     @property
     def request(self):
     def request(self):

+ 33 - 26
celery/app/utils.py

@@ -20,13 +20,13 @@ from .defaults import find
 
 
 #: Format used to generate bugreport information.
 #: Format used to generate bugreport information.
 BUGREPORT_INFO = """
 BUGREPORT_INFO = """
-software -> celery:%(celery_v)s kombu:%(kombu_v)s py:%(py_v)s
-            billiard:%(billiard_v)s %(driver_v)s
-platform -> system:%(system)s arch:%(arch)s imp:%(py_i)s
-loader   -> %(loader)s
-settings -> transport:%(transport)s results:%(results)s
+software -> celery:{celery_v} kombu:{kombu_v} py:{py_v}
+            billiard:{billiard_v} {driver_v}
+platform -> system:{system} arch:{arch} imp:{py_i}
+loader   -> {loader}
+settings -> transport:{transport} results:{results}
 
 
-%(human_settings)s
+{human_settings}
 """
 """
 
 
 
 
@@ -52,6 +52,11 @@ class Settings(datastructures.ConfigurationView):
         return (os.environ.get('CELERY_BROKER_URL') or
         return (os.environ.get('CELERY_BROKER_URL') or
                 self.first('BROKER_URL', 'BROKER_HOST'))
                 self.first('BROKER_URL', 'BROKER_HOST'))
 
 
+    @property
+    def CELERY_TIMEZONE(self):
+        # this way we also support django's time zone.
+        return self.first('CELERY_TIMEZONE', 'TIME_ZONE')
+
     def without_defaults(self):
     def without_defaults(self):
         """Returns the current configuration, but without defaults."""
         """Returns the current configuration, but without defaults."""
         # the last stash is the default settings, so just skip that
         # the last stash is the default settings, so just skip that
@@ -85,12 +90,12 @@ class Settings(datastructures.ConfigurationView):
             False
             False
 
 
         """
         """
-        return self['_'.join(filter(None, parts))]
+        return self['_'.join(part for part in parts if part)]
 
 
     def humanize(self):
     def humanize(self):
         """Returns a human readable string showing changes to the
         """Returns a human readable string showing changes to the
         configuration."""
         configuration."""
-        return '\n'.join('%s %s' % (key + ':', pretty(value, width=50))
+        return '\n'.join('{0}: {1}'.format(key, pretty(value, width=50))
                         for key, value in self.without_defaults().iteritems())
                         for key, value in self.without_defaults().iteritems())
 
 
 
 
@@ -131,22 +136,24 @@ def bugreport(app):
     import kombu
     import kombu
 
 
     try:
     try:
-        trans = app.connection().transport
-        driver_v = '%s:%s' % (trans.driver_name, trans.driver_version())
+        conn = app.connection()
+        driver_v = '{0}:{1}'.format(conn.transport.driver_name,
+                                    conn.transport.driver_version())
+        transport = conn.transport_cls
     except Exception:
     except Exception:
-        driver_v = ''
-
-    return BUGREPORT_INFO % {
-        'system': _platform.system(),
-        'arch': ', '.join(filter(None, _platform.architecture())),
-        'py_i': platforms.pyimplementation(),
-        'celery_v': celery.__version__,
-        'kombu_v': kombu.__version__,
-        'billiard_v': billiard.__version__,
-        'py_v': _platform.python_version(),
-        'driver_v': driver_v,
-        'transport': app.conf.BROKER_TRANSPORT or 'amqp',
-        'results': app.conf.CELERY_RESULT_BACKEND or 'disabled',
-        'human_settings': app.conf.humanize(),
-        'loader': qualname(app.loader.__class__),
-    }
+        transport = driver_v = ''
+
+    return BUGREPORT_INFO.format(
+        system=_platform.system(),
+        arch=', '.join(x for x in _platform.architecture() if x),
+        py_i=platforms.pyimplementation(),
+        celery_v=celery.VERSION_BANNER,
+        kombu_v=kombu.__version__,
+        billiard_v=billiard.__version__,
+        py_v=_platform.python_version(),
+        driver_v=driver_v,
+        transport=transport,
+        results=app.conf.CELERY_RESULT_BACKEND or 'disabled',
+        human_settings=app.conf.humanize(),
+        loader=qualname(app.loader.__class__),
+    )

+ 19 - 19
celery/apps/beat.py

@@ -15,7 +15,7 @@ from __future__ import absolute_import
 import socket
 import socket
 import sys
 import sys
 
 
-from celery import __version__, platforms, beat
+from celery import VERSION_BANNER, platforms, beat
 from celery.app import app_or_default
 from celery.app import app_or_default
 from celery.app.abstract import configurated, from_config
 from celery.app.abstract import configurated, from_config
 from celery.utils.imports import qualname
 from celery.utils.imports import qualname
@@ -24,12 +24,12 @@ from celery.utils.timeutils import humanize_seconds
 
 
 STARTUP_INFO_FMT = """
 STARTUP_INFO_FMT = """
 Configuration ->
 Configuration ->
-    . broker -> %(conninfo)s
-    . loader -> %(loader)s
-    . scheduler -> %(scheduler)s
-%(scheduler_info)s
-    . logfile -> %(logfile)s@%(loglevel)s
-    . maxinterval -> %(hmax_interval)s (%(max_interval)ss)
+    . broker -> {conninfo}
+    . loader -> {loader}
+    . scheduler -> {scheduler}
+{scheduler_info}
+    . logfile -> {logfile}@%{loglevel}
+    . maxinterval -> {hmax_interval} ({max_interval}s)
 """.strip()
 """.strip()
 
 
 logger = get_logger('celery.beat')
 logger = get_logger('celery.beat')
@@ -62,7 +62,7 @@ class Beat(configurated):
 
 
     def run(self):
     def run(self):
         print(str(self.colored.cyan(
         print(str(self.colored.cyan(
-                    'celerybeat v%s is starting.' % __version__)))
+                    'celerybeat v{0} is starting.'.format(VERSION_BANNER))))
         self.init_loader()
         self.init_loader()
         self.set_process_title()
         self.set_process_title()
         self.start_scheduler()
         self.start_scheduler()
@@ -95,7 +95,7 @@ class Beat(configurated):
         try:
         try:
             self.install_sync_handler(beat)
             self.install_sync_handler(beat)
             beat.start()
             beat.start()
-        except Exception, exc:
+        except Exception as exc:
             logger.critical('celerybeat raised exception %s: %r',
             logger.critical('celerybeat raised exception %s: %r',
                             exc.__class__, exc,
                             exc.__class__, exc,
                             exc_info=True)
                             exc_info=True)
@@ -108,16 +108,16 @@ class Beat(configurated):
 
 
     def startup_info(self, beat):
     def startup_info(self, beat):
         scheduler = beat.get_scheduler(lazy=True)
         scheduler = beat.get_scheduler(lazy=True)
-        return STARTUP_INFO_FMT % {
-            'conninfo': self.app.connection().as_uri(),
-            'logfile': self.logfile or '[stderr]',
-            'loglevel': LOG_LEVELS[self.loglevel],
-            'loader': qualname(self.app.loader),
-            'scheduler': qualname(scheduler),
-            'scheduler_info': scheduler.info,
-            'hmax_interval': humanize_seconds(beat.max_interval),
-            'max_interval': beat.max_interval,
-        }
+        return STARTUP_INFO_FMT.format(
+            conninfo=self.app.connection().as_uri(),
+            logfile=self.logfile or '[stderr]',
+            loglevel=LOG_LEVELS[self.loglevel],
+            loader=qualname(self.app.loader),
+            scheduler=qualname(scheduler),
+            scheduler_info=scheduler.info,
+            hmax_interval=humanize_seconds(beat.max_interval),
+            max_interval=beat.max_interval,
+            )
 
 
     def set_process_title(self):
     def set_process_title(self):
         arg_start = 'manage' in sys.argv[0] and 2 or 1
         arg_start = 'manage' in sys.argv[0] and 2 or 1

+ 87 - 157
celery/apps/worker.py

@@ -10,47 +10,39 @@
     platform tweaks, and so on.
     platform tweaks, and so on.
 
 
 """
 """
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
 
 
 import logging
 import logging
 import os
 import os
-import socket
 import sys
 import sys
 import warnings
 import warnings
 
 
 from functools import partial
 from functools import partial
 
 
-from billiard import cpu_count, current_process
+from billiard import current_process
 
 
-from celery import __version__, platforms, signals
-from celery.app import app_or_default
-from celery.app.abstract import configurated, from_config
-from celery.exceptions import ImproperlyConfigured, SystemTerminate
+from celery import VERSION_BANNER, platforms, signals
+from celery.exceptions import SystemTerminate
 from celery.loaders.app import AppLoader
 from celery.loaders.app import AppLoader
 from celery.utils import cry, isatty
 from celery.utils import cry, isatty
 from celery.utils.imports import qualname
 from celery.utils.imports import qualname
-from celery.utils.log import get_logger, mlevel, set_in_sighandler
+from celery.utils.log import get_logger, in_sighandler, set_in_sighandler
 from celery.utils.text import pluralize
 from celery.utils.text import pluralize
 from celery.worker import WorkController
 from celery.worker import WorkController
 
 
-try:
-    from greenlet import GreenletExit
-    IGNORE_ERRORS = (GreenletExit, )
-except ImportError:  # pragma: no cover
-    IGNORE_ERRORS = ()
-
 logger = get_logger(__name__)
 logger = get_logger(__name__)
+is_jython = sys.platform.startswith('java')
+is_pypy = hasattr(sys, 'pypy_version_info')
 
 
 
 
 def active_thread_count():
 def active_thread_count():
     from threading import enumerate
     from threading import enumerate
-    # must use .getName on Python 2.5
     return sum(1 for t in enumerate()
     return sum(1 for t in enumerate()
-        if not t.getName().startswith('Dummy-'))
+        if not t.name.startswith('Dummy-'))
 
 
 
 
 def safe_say(msg):
 def safe_say(msg):
-    sys.__stderr__.write('\n%s\n' % msg)
+    print('\n{0}'.format(msg), file=sys.__stderr__)
 
 
 ARTLINES = [
 ARTLINES = [
     ' --------------',
     ' --------------',
@@ -68,86 +60,48 @@ ARTLINES = [
 ]
 ]
 
 
 BANNER = """\
 BANNER = """\
-celery@%(hostname)s v%(version)s
+celery@{hostname} v{version}
 
 
 [Configuration]
 [Configuration]
-. broker:      %(conninfo)s
-. app:         %(app)s
-. concurrency: %(concurrency)s
-. events:      %(events)s
+. broker:      {conninfo}
+. app:         {app}
+. concurrency: {concurrency}
+. events:      {events}
 
 
 [Queues]
 [Queues]
-%(queues)s
+{queues}
 """
 """
 
 
 EXTRA_INFO_FMT = """
 EXTRA_INFO_FMT = """
 [Tasks]
 [Tasks]
-%(tasks)s
-"""
-
-UNKNOWN_QUEUE = """\
-Trying to select queue subset of %r, but queue %s is not
-defined in the CELERY_QUEUES setting.
-
-If you want to automatically declare unknown queues you can
-enable the CELERY_CREATE_MISSING_QUEUES setting.
+{tasks}
 """
 """
 
 
 
 
-class Worker(configurated):
-    WorkController = WorkController
-
-    app = None
-    inherit_confopts = (WorkController, )
-    loglevel = from_config('log_level')
-    redirect_stdouts = from_config()
-    redirect_stdouts_level = from_config()
-
-    def __init__(self, hostname=None, purge=False, beat=False,
-            queues=None, include=None, app=None, pidfile=None,
-            autoscale=None, autoreload=False, no_execv=False, **kwargs):
-        self.app = app = app_or_default(app or self.app)
-        self.hostname = hostname or socket.gethostname()
+class Worker(WorkController):
 
 
+    def on_before_init(self, purge=False, redirect_stdouts=None,
+            redirect_stdouts_level=None, **kwargs):
         # this signal can be used to set up configuration for
         # this signal can be used to set up configuration for
         # workers by name.
         # workers by name.
+        conf = self.app.conf
         signals.celeryd_init.send(sender=self.hostname, instance=self,
         signals.celeryd_init.send(sender=self.hostname, instance=self,
-                                  conf=self.app.conf)
-
-        self.setup_defaults(kwargs, namespace='celeryd')
-        if not self.concurrency:
-            try:
-                self.concurrency = cpu_count()
-            except NotImplementedError:
-                self.concurrency = 2
+                                  conf=conf)
         self.purge = purge
         self.purge = purge
-        self.beat = beat
-        self.use_queues = [] if queues is None else queues
-        self.queues = None
-        self.include = include
-        self.pidfile = pidfile
-        self.autoscale = None
-        self.autoreload = autoreload
-        self.no_execv = no_execv
-        if autoscale:
-            max_c, _, min_c = autoscale.partition(',')
-            self.autoscale = [int(max_c), min_c and int(min_c) or 0]
         self._isatty = isatty(sys.stdout)
         self._isatty = isatty(sys.stdout)
-
-        self.colored = app.log.colored(self.logfile)
-
-        if isinstance(self.use_queues, basestring):
-            self.use_queues = self.use_queues.split(',')
-        if self.include:
-            if isinstance(self.include, basestring):
-                self.include = self.include.split(',')
-            app.conf.CELERY_INCLUDE = (
-                tuple(app.conf.CELERY_INCLUDE) + tuple(self.include))
-        self.loglevel = mlevel(self.loglevel)
-
-    def run(self):
-        self.init_queues()
-        self.app.loader.init_worker()
+        self.colored = self.app.log.colored(self.logfile)
+        if redirect_stdouts is None:
+            redirect_stdouts = conf.CELERY_REDIRECT_STDOUTS,
+        if redirect_stdouts_level is None:
+            redirect_stdouts_level = conf.CELERY_REDIRECT_STDOUTS_LEVEL
+        self.redirect_stdouts = redirect_stdouts
+        self.redirect_stdouts_level = redirect_stdouts_level
+
+    def on_start(self):
+        # this signal can be used to e.g. change queues after
+        # the -Q option has been applied.
+        signals.celeryd_after_setup.send(sender=self.hostname, instance=self,
+                                         conf=self.app.conf)
 
 
         if getattr(os, 'getuid', None) and os.getuid() == 0:
         if getattr(os, 'getuid', None) and os.getuid() == 0:
             warnings.warn(RuntimeWarning(
             warnings.warn(RuntimeWarning(
@@ -161,22 +115,12 @@ class Worker(configurated):
         print(str(self.colored.cyan(' \n', self.startup_info())) +
         print(str(self.colored.cyan(' \n', self.startup_info())) +
               str(self.colored.reset(self.extra_info() or '')))
               str(self.colored.reset(self.extra_info() or '')))
         self.set_process_status('-active-')
         self.set_process_status('-active-')
-
         self.redirect_stdouts_to_logger()
         self.redirect_stdouts_to_logger()
-        try:
-            self.run_worker()
-        except IGNORE_ERRORS:
-            pass
+        self.install_platform_tweaks(self)
 
 
     def on_consumer_ready(self, consumer):
     def on_consumer_ready(self, consumer):
         signals.worker_ready.send(sender=consumer)
         signals.worker_ready.send(sender=consumer)
-        print('celery@%s has started.' % self.hostname)
-
-    def init_queues(self):
-        try:
-            self.app.select_queues(self.use_queues)
-        except KeyError, exc:
-            raise ImproperlyConfigured(UNKNOWN_QUEUE % (self.use_queues, exc))
+        print('celery@{0.hostname} has started.'.format(self))
 
 
     def redirect_stdouts_to_logger(self):
     def redirect_stdouts_to_logger(self):
         self.app.log.setup(self.loglevel, self.logfile,
         self.app.log.setup(self.loglevel, self.logfile,
@@ -184,49 +128,50 @@ class Worker(configurated):
 
 
     def purge_messages(self):
     def purge_messages(self):
         count = self.app.control.purge()
         count = self.app.control.purge()
-        print('purge: Erased %d %s from the queue.\n' % (
+        print('purge: Erased {0} {1} from the queue.\n'.format(
                 count, pluralize(count, 'message')))
                 count, pluralize(count, 'message')))
 
 
     def tasklist(self, include_builtins=True):
     def tasklist(self, include_builtins=True):
-        tasks = self.app.tasks.keys()
+        tasks = self.app.tasks
         if not include_builtins:
         if not include_builtins:
-            tasks = filter(lambda s: not s.startswith('celery.'), tasks)
-        return '\n'.join('  . %s' % task for task in sorted(tasks))
+            tasks = [t for t in tasks if not t.startswith('celery.')]
+        return '\n'.join('  . {0}'.format(task) for task in sorted(tasks))
 
 
     def extra_info(self):
     def extra_info(self):
         if self.loglevel <= logging.INFO:
         if self.loglevel <= logging.INFO:
             include_builtins = self.loglevel <= logging.DEBUG
             include_builtins = self.loglevel <= logging.DEBUG
             tasklist = self.tasklist(include_builtins=include_builtins)
             tasklist = self.tasklist(include_builtins=include_builtins)
-            return EXTRA_INFO_FMT % {'tasks': tasklist}
+            return EXTRA_INFO_FMT.format(tasks=tasklist)
 
 
     def startup_info(self):
     def startup_info(self):
         app = self.app
         app = self.app
         concurrency = unicode(self.concurrency)
         concurrency = unicode(self.concurrency)
-        appr = '%s:0x%x' % (app.main or '__main__', id(app))
+        appr = '{0}:0x{1:x}'.format(app.main or '__main__', id(app))
         if not isinstance(app.loader, AppLoader):
         if not isinstance(app.loader, AppLoader):
             loader = qualname(app.loader)
             loader = qualname(app.loader)
             if loader.startswith('celery.loaders'):
             if loader.startswith('celery.loaders'):
                 loader = loader[14:]
                 loader = loader[14:]
-            appr += ' (%s)' % loader
+            appr += ' ({0})'.format(loader)
         if self.autoscale:
         if self.autoscale:
-            concurrency = '{min=%s, max=%s}' % tuple(self.autoscale)
+            max, min = self.autoscale
+            concurrency = '{{min={0}, max={1}}}'.format(min, max)
         pool = self.pool_cls
         pool = self.pool_cls
         if not isinstance(pool, basestring):
         if not isinstance(pool, basestring):
             pool = pool.__module__
             pool = pool.__module__
-        concurrency += ' (%s)' % pool.split('.')[-1]
+        concurrency += ' ({0})'.format(pool.split('.')[-1])
         events = 'ON'
         events = 'ON'
         if not self.send_events:
         if not self.send_events:
             events = 'OFF (enable -E to monitor this worker)'
             events = 'OFF (enable -E to monitor this worker)'
 
 
-        banner = (BANNER % {
-            'app': appr,
-            'hostname': self.hostname,
-            'version': __version__,
-            'conninfo': self.app.connection().as_uri(),
-            'concurrency': concurrency,
-            'events': events,
-            'queues': app.amqp.queues.format(indent=0, indent_first=False),
-        }).splitlines()
+        banner = BANNER.format(
+            app=appr,
+            hostname=self.hostname,
+            version=VERSION_BANNER,
+            conninfo=self.app.connection().as_uri(),
+            concurrency=concurrency,
+            events=events,
+            queues=app.amqp.queues.format(indent=0, indent_first=False),
+        ).splitlines()
 
 
         # integrate the ASCII art.
         # integrate the ASCII art.
         for i, x in enumerate(banner):
         for i, x in enumerate(banner):
@@ -236,18 +181,6 @@ class Worker(configurated):
                 banner[i] = ' ' * 16 + banner[i]
                 banner[i] = ' ' * 16 + banner[i]
         return '\n'.join(banner) + '\n'
         return '\n'.join(banner) + '\n'
 
 
-    def run_worker(self):
-        worker = self.WorkController(app=self.app,
-                    hostname=self.hostname,
-                    ready_callback=self.on_consumer_ready, beat=self.beat,
-                    autoscale=self.autoscale, autoreload=self.autoreload,
-                    no_execv=self.no_execv,
-                    pidfile=self.pidfile,
-                    **self.confopts_as_dict())
-        self.install_platform_tweaks(worker)
-        signals.worker_init.send(sender=worker)
-        worker.start()
-
     def install_platform_tweaks(self, worker):
     def install_platform_tweaks(self, worker):
         """Install platform specific tweaks and workarounds."""
         """Install platform specific tweaks and workarounds."""
         if self.app.IS_OSX:
         if self.app.IS_OSX:
@@ -276,7 +209,7 @@ class Worker(configurated):
 
 
     def set_process_status(self, info):
     def set_process_status(self, info):
         return platforms.set_mp_process_title('celeryd',
         return platforms.set_mp_process_title('celeryd',
-                info='%s (%s)' % (info, platforms.strargv(sys.argv)),
+                info='{0} ({1})'.format(info, platforms.strargv(sys.argv)),
                 hostname=self.hostname)
                 hostname=self.hostname)
 
 
 
 
@@ -284,47 +217,54 @@ def _shutdown_handler(worker, sig='TERM', how='Warm', exc=SystemExit,
         callback=None):
         callback=None):
 
 
     def _handle_request(signum, frame):
     def _handle_request(signum, frame):
-        set_in_sighandler(True)
-        try:
+        with in_sighandler():
             from celery.worker import state
             from celery.worker import state
             if current_process()._name == 'MainProcess':
             if current_process()._name == 'MainProcess':
                 if callback:
                 if callback:
                     callback(worker)
                     callback(worker)
-                safe_say('celeryd: %s shutdown (MainProcess)' % how)
+                safe_say('celeryd: {0} shutdown (MainProcess)'.format(how))
             if active_thread_count() > 1:
             if active_thread_count() > 1:
                 setattr(state, {'Warm': 'should_stop',
                 setattr(state, {'Warm': 'should_stop',
                                 'Cold': 'should_terminate'}[how], True)
                                 'Cold': 'should_terminate'}[how], True)
             else:
             else:
                 raise exc()
                 raise exc()
-        finally:
-            set_in_sighandler(False)
     _handle_request.__name__ = 'worker_' + how
     _handle_request.__name__ = 'worker_' + how
     platforms.signals[sig] = _handle_request
     platforms.signals[sig] = _handle_request
 install_worker_term_handler = partial(
 install_worker_term_handler = partial(
     _shutdown_handler, sig='SIGTERM', how='Warm', exc=SystemExit,
     _shutdown_handler, sig='SIGTERM', how='Warm', exc=SystemExit,
 )
 )
-install_worker_term_hard_handler = partial(
-    _shutdown_handler, sig='SIGQUIT', how='Cold', exc=SystemTerminate,
-)
+if not is_jython:
+    install_worker_term_hard_handler = partial(
+        _shutdown_handler, sig='SIGQUIT', how='Cold', exc=SystemTerminate,
+    )
+else:
+    install_worker_term_handler = lambda *a, **kw: None
 
 
 
 
 def on_SIGINT(worker):
 def on_SIGINT(worker):
     safe_say('celeryd: Hitting Ctrl+C again will terminate all running tasks!')
     safe_say('celeryd: Hitting Ctrl+C again will terminate all running tasks!')
     install_worker_term_hard_handler(worker, sig='SIGINT')
     install_worker_term_hard_handler(worker, sig='SIGINT')
-install_worker_int_handler = partial(
-    _shutdown_handler, sig='SIGINT', callback=on_SIGINT
-)
+if not is_jython:
+    install_worker_int_handler = partial(
+        _shutdown_handler, sig='SIGINT', callback=on_SIGINT
+    )
+else:
+    install_worker_int_handler = lambda *a, **kw: None
+
+
+def _clone_current_worker():
+    if os.fork() == 0:
+        os.execv(sys.executable, [sys.executable] + sys.argv)
 
 
 
 
 def install_worker_restart_handler(worker, sig='SIGHUP'):
 def install_worker_restart_handler(worker, sig='SIGHUP'):
 
 
-    def restart_worker_sig_handler(signum, frame):
+    def restart_worker_sig_handler(*args):
         """Signal handler restarting the current python program."""
         """Signal handler restarting the current python program."""
         set_in_sighandler(True)
         set_in_sighandler(True)
-        safe_say('Restarting celeryd (%s)' % (' '.join(sys.argv), ))
-        pid = os.fork()
-        if pid == 0:
-            os.execv(sys.executable, [sys.executable] + sys.argv)
+        safe_say('Restarting celeryd ({0})'.format(' '.join(sys.argv)))
+        import atexit
+        atexit.register(_clone_current_worker)
         from celery.worker import state
         from celery.worker import state
         state.should_stop = True
         state.should_stop = True
     platforms.signals[sig] = restart_worker_sig_handler
     platforms.signals[sig] = restart_worker_sig_handler
@@ -332,32 +272,25 @@ def install_worker_restart_handler(worker, sig='SIGHUP'):
 
 
 def install_cry_handler():
 def install_cry_handler():
     # Jython/PyPy does not have sys._current_frames
     # Jython/PyPy does not have sys._current_frames
-    is_jython = sys.platform.startswith('java')
-    is_pypy = hasattr(sys, 'pypy_version_info')
     if is_jython or is_pypy:  # pragma: no cover
     if is_jython or is_pypy:  # pragma: no cover
         return
         return
 
 
-    def cry_handler(signum, frame):
+    def cry_handler(*args):
         """Signal handler logging the stacktrace of all active threads."""
         """Signal handler logging the stacktrace of all active threads."""
-        set_in_sighandler(True)
-        try:
+        with in_sighandler():
             safe_say(cry())
             safe_say(cry())
-        finally:
-            set_in_sighandler(False)
     platforms.signals['SIGUSR1'] = cry_handler
     platforms.signals['SIGUSR1'] = cry_handler
 
 
 
 
 def install_rdb_handler(envvar='CELERY_RDBSIG',
 def install_rdb_handler(envvar='CELERY_RDBSIG',
                         sig='SIGUSR2'):  # pragma: no cover
                         sig='SIGUSR2'):  # pragma: no cover
 
 
-    def rdb_handler(signum, frame):
+    def rdb_handler(*args):
         """Signal handler setting a rdb breakpoint at the current frame."""
         """Signal handler setting a rdb breakpoint at the current frame."""
-        set_in_sighandler(True)
-        try:
+        with in_sighandler():
+            _, frame = args
             from celery.contrib import rdb
             from celery.contrib import rdb
             rdb.set_trace(frame)
             rdb.set_trace(frame)
-        finally:
-            set_in_sighandler(False)
     if os.environ.get(envvar):
     if os.environ.get(envvar):
         platforms.signals[sig] = rdb_handler
         platforms.signals[sig] = rdb_handler
 
 
@@ -365,10 +298,7 @@ def install_rdb_handler(envvar='CELERY_RDBSIG',
 def install_HUP_not_supported_handler(worker, sig='SIGHUP'):
 def install_HUP_not_supported_handler(worker, sig='SIGHUP'):
 
 
     def warn_on_HUP_handler(signum, frame):
     def warn_on_HUP_handler(signum, frame):
-        set_in_sighandler(True)
-        try:
-            safe_say('%(sig)s not supported: Restarting with %(sig)s is '
-                     'unstable on this platform!' % {'sig': sig})
-        finally:
-            set_in_sighandler(False)
+        with in_sighandler():
+            safe_say('{sig} not supported: Restarting with {sig} is '
+                     'unstable on this platform!'.format(sig=sig))
     platforms.signals[sig] = warn_on_HUP_handler
     platforms.signals[sig] = warn_on_HUP_handler

+ 3 - 3
celery/backends/__init__.py

@@ -18,7 +18,7 @@ from celery.utils.imports import symbol_by_name
 from celery.utils.functional import memoize
 from celery.utils.functional import memoize
 
 
 UNKNOWN_BACKEND = """\
 UNKNOWN_BACKEND = """\
-Unknown result backend: %r.  Did you spell that correctly? (%r)\
+Unknown result backend: {0!r}.  Did you spell that correctly? ({1!r})\
 """
 """
 
 
 BACKEND_ALIASES = {
 BACKEND_ALIASES = {
@@ -44,8 +44,8 @@ def get_backend_cls(backend=None, loader=None):
     aliases = dict(BACKEND_ALIASES, **loader.override_backends)
     aliases = dict(BACKEND_ALIASES, **loader.override_backends)
     try:
     try:
         return symbol_by_name(backend, aliases)
         return symbol_by_name(backend, aliases)
-    except ValueError, exc:
-        raise ValueError, ValueError(UNKNOWN_BACKEND % (
+    except ValueError as exc:
+        raise ValueError, ValueError(UNKNOWN_BACKEND.format(
                     backend, exc)), sys.exc_info()[2]
                     backend, exc)), sys.exc_info()[2]
 
 
 
 

+ 5 - 14
celery/backends/amqp.py

@@ -9,7 +9,6 @@
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import socket
 import socket
 import threading
 import threading
@@ -24,7 +23,7 @@ from celery import states
 from celery.exceptions import TimeoutError
 from celery.exceptions import TimeoutError
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
 
 
-from .base import BaseDictBackend
+from .base import BaseBackend
 
 
 logger = get_logger(__name__)
 logger = get_logger(__name__)
 
 
@@ -36,11 +35,11 @@ class BacklogLimitExceeded(Exception):
 def repair_uuid(s):
 def repair_uuid(s):
     # Historically the dashes in UUIDS are removed from AMQ entity names,
     # Historically the dashes in UUIDS are removed from AMQ entity names,
     # but there is no known reason to.  Hopefully we'll be able to fix
     # but there is no known reason to.  Hopefully we'll be able to fix
-    # this in v3.0.
+    # this in v4.0.
     return '%s-%s-%s-%s-%s' % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])
     return '%s-%s-%s-%s-%s' % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])
 
 
 
 
-class AMQPBackend(BaseDictBackend):
+class AMQPBackend(BaseBackend):
     """Publishes results by sending messages."""
     """Publishes results by sending messages."""
     Exchange = Exchange
     Exchange = Exchange
     Queue = Queue
     Queue = Queue
@@ -74,17 +73,9 @@ class AMQPBackend(BaseDictBackend):
         self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
         self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
         self.auto_delete = auto_delete
         self.auto_delete = auto_delete
 
 
-        # AMQP_TASK_RESULT_EXPIRES setting is deprecated and will be
-        # removed in version 3.0.
-        dexpires = conf.CELERY_AMQP_TASK_RESULT_EXPIRES
-
         self.expires = None
         self.expires = None
-        if 'expires' in kwargs:
-            if kwargs['expires'] is not None:
-                self.expires = self.prepare_expires(kwargs['expires'])
-        else:
-            self.expires = self.prepare_expires(dexpires)
-
+        if 'expires' not in kwargs or kwargs['expires'] is not None:
+            self.expires = self.prepare_expires(kwargs.get('expires'))
         if self.expires:
         if self.expires:
             self.queue_arguments['x-expires'] = int(self.expires * 1000)
             self.queue_arguments['x-expires'] = int(self.expires * 1000)
         self.mutex = threading.Lock()
         self.mutex = threading.Lock()

+ 71 - 130
celery/backends/base.py

@@ -7,8 +7,6 @@
 
 
     - :class:`BaseBackend` defines the interface.
     - :class:`BaseBackend` defines the interface.
 
 
-    - :class:`BaseDictBackend` assumes the fields are stored in a dict.
-
     - :class:`KeyValueStoreBackend` is a common base class
     - :class:`KeyValueStoreBackend` is a common base class
       using K/V semantics like _get and _put.
       using K/V semantics like _get and _put.
 
 
@@ -19,6 +17,7 @@ import time
 import sys
 import sys
 
 
 from datetime import timedelta
 from datetime import timedelta
+from itertools import imap
 
 
 from kombu import serialization
 from kombu import serialization
 from kombu.utils.encoding import bytes_to_str, ensure_bytes, from_utf8
 from kombu.utils.encoding import bytes_to_str, ensure_bytes, from_utf8
@@ -27,7 +26,7 @@ from celery import states
 from celery.app import current_task
 from celery.app import current_task
 from celery.datastructures import LRUCache
 from celery.datastructures import LRUCache
 from celery.exceptions import TimeoutError, TaskRevokedError
 from celery.exceptions import TimeoutError, TaskRevokedError
-from celery.result import from_serializable
+from celery.result import from_serializable, GroupResult
 from celery.utils import timeutils
 from celery.utils import timeutils
 from celery.utils.serialization import (
 from celery.utils.serialization import (
         get_pickled_exception,
         get_pickled_exception,
@@ -45,7 +44,6 @@ def unpickle_backend(cls, args, kwargs):
 
 
 
 
 class BaseBackend(object):
 class BaseBackend(object):
-    """Base backend class."""
     READY_STATES = states.READY_STATES
     READY_STATES = states.READY_STATES
     UNREADY_STATES = states.UNREADY_STATES
     UNREADY_STATES = states.UNREADY_STATES
     EXCEPTION_STATES = states.EXCEPTION_STATES
     EXCEPTION_STATES = states.EXCEPTION_STATES
@@ -60,44 +58,16 @@ class BaseBackend(object):
     #: If true the backend must implement :meth:`get_many`.
     #: If true the backend must implement :meth:`get_many`.
     supports_native_join = False
     supports_native_join = False
 
 
-    def __init__(self, *args, **kwargs):
+    def __init__(self, app=None, serializer=None, max_cached_results=None,
+            **kwargs):
         from celery.app import app_or_default
         from celery.app import app_or_default
-        self.app = app_or_default(kwargs.get('app'))
-        self.serializer = kwargs.get('serializer',
-                                     self.app.conf.CELERY_RESULT_SERIALIZER)
+        self.app = app_or_default(app)
+        self.serializer = serializer or self.app.conf.CELERY_RESULT_SERIALIZER
         (self.content_type,
         (self.content_type,
          self.content_encoding,
          self.content_encoding,
          self.encoder) = serialization.registry._encoders[self.serializer]
          self.encoder) = serialization.registry._encoders[self.serializer]
-
-    def encode(self, data):
-        _, _, payload = serialization.encode(data, serializer=self.serializer)
-        return payload
-
-    def decode(self, payload):
-        payload = is_py3k and payload or str(payload)
-        return serialization.decode(payload,
-                                    content_type=self.content_type,
-                                    content_encoding=self.content_encoding)
-
-    def prepare_expires(self, value, type=None):
-        if value is None:
-            value = self.app.conf.CELERY_TASK_RESULT_EXPIRES
-        if isinstance(value, timedelta):
-            value = timeutils.timedelta_seconds(value)
-        if value is not None and type:
-            return type(value)
-        return value
-
-    def encode_result(self, result, status):
-        if status in self.EXCEPTION_STATES and isinstance(result, Exception):
-            return self.prepare_exception(result)
-        else:
-            return self.prepare_value(result)
-
-    def store_result(self, task_id, result, status, traceback=None):
-        """Store the result and status of a task."""
-        raise NotImplementedError(
-                'store_result is not supported by this backend.')
+        self._cache = LRUCache(limit=max_cached_results or
+                                      self.app.conf.CELERY_MAX_CACHED_RESULTS)
 
 
     def mark_as_started(self, task_id, **meta):
     def mark_as_started(self, task_id, **meta):
         """Mark a task as started"""
         """Mark a task as started"""
@@ -118,8 +88,8 @@ class BaseBackend(object):
         return self.store_result(task_id, exc, status=states.RETRY,
         return self.store_result(task_id, exc, status=states.RETRY,
                                  traceback=traceback)
                                  traceback=traceback)
 
 
-    def mark_as_revoked(self, task_id):
-        return self.store_result(task_id, TaskRevokedError(),
+    def mark_as_revoked(self, task_id, reason=''):
+        return self.store_result(task_id, TaskRevokedError(reason),
                                  status=states.REVOKED, traceback=None)
                                  status=states.REVOKED, traceback=None)
 
 
     def prepare_exception(self, exc):
     def prepare_exception(self, exc):
@@ -137,11 +107,19 @@ class BaseBackend(object):
 
 
     def prepare_value(self, result):
     def prepare_value(self, result):
         """Prepare value for storage."""
         """Prepare value for storage."""
+        if isinstance(result, GroupResult):
+            return result.serializable()
         return result
         return result
 
 
-    def forget(self, task_id):
-        raise NotImplementedError('%s does not implement forget.' % (
-                    self.__class__))
+    def encode(self, data):
+        _, _, payload = serialization.encode(data, serializer=self.serializer)
+        return payload
+
+    def decode(self, payload):
+        payload = is_py3k and payload or str(payload)
+        return serialization.decode(payload,
+                                    content_type=self.content_type,
+                                    content_encoding=self.content_encoding)
 
 
     def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5):
     def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5):
         """Wait for task and return its result.
         """Wait for task and return its result.
@@ -172,98 +150,33 @@ class BaseBackend(object):
             if timeout and time_elapsed >= timeout:
             if timeout and time_elapsed >= timeout:
                 raise TimeoutError('The operation timed out.')
                 raise TimeoutError('The operation timed out.')
 
 
-    def cleanup(self):
-        """Backend cleanup. Is run by
-        :class:`celery.task.DeleteExpiredTaskMetaTask`."""
-        pass
-
-    def process_cleanup(self):
-        """Cleanup actions to do at the end of a task worker process."""
-        pass
-
-    def get_status(self, task_id):
-        """Get the status of a task."""
-        raise NotImplementedError(
-                'get_status is not supported by this backend.')
-
-    def get_result(self, task_id):
-        """Get the result of a task."""
-        raise NotImplementedError(
-                'get_result is not supported by this backend.')
-
-    def get_children(self, task_id):
-        raise NotImplementedError(
-                'get_children is not supported by this backend.')
-
-    def get_traceback(self, task_id):
-        """Get the traceback for a failed task."""
-        raise NotImplementedError(
-                'get_traceback is not supported by this backend.')
-
-    def save_group(self, group_id, result):
-        """Store the result and status of a task."""
-        raise NotImplementedError(
-                'save_group is not supported by this backend.')
-
-    def restore_group(self, group_id, cache=True):
-        """Get the result of a group."""
-        raise NotImplementedError(
-                'restore_group is not supported by this backend.')
-
-    def delete_group(self, group_id):
-        raise NotImplementedError(
-                'delete_group is not supported by this backend.')
-
-    def reload_task_result(self, task_id):
-        """Reload task result, even if it has been previously fetched."""
-        raise NotImplementedError(
-                'reload_task_result is not supported by this backend.')
-
-    def reload_group_result(self, task_id):
-        """Reload group result, even if it has been previously fetched."""
-        raise NotImplementedError(
-                'reload_group_result is not supported by this backend.')
-
-    def on_task_apply(self, task_id):
-        pass
-
-    def on_chord_part_return(self, task, propagate=False):
-        pass
-
-    def fallback_chord_unlock(self, group_id, body, result=None, **kwargs):
-        kwargs['result'] = [r.id for r in result]
-        self.app.tasks['celery.chord_unlock'].apply_async((group_id, body, ),
-                                                          kwargs, countdown=1)
-    on_chord_apply = fallback_chord_unlock
-
-    def current_task_children(self):
-        current = current_task()
-        if current:
-            return [r.serializable() for r in current.request.children]
-
-    def __reduce__(self, args=(), kwargs={}):
-        return (unpickle_backend, (self.__class__, args, kwargs))
-
-
-class BaseDictBackend(BaseBackend):
+    def prepare_expires(self, value, type=None):
+        if value is None:
+            value = self.app.conf.CELERY_TASK_RESULT_EXPIRES
+        if isinstance(value, timedelta):
+            value = timeutils.timedelta_seconds(value)
+        if value is not None and type:
+            return type(value)
+        return value
 
 
-    def __init__(self, *args, **kwargs):
-        super(BaseDictBackend, self).__init__(*args, **kwargs)
-        self._cache = LRUCache(limit=kwargs.get('max_cached_results') or
-                                 self.app.conf.CELERY_MAX_CACHED_RESULTS)
+    def encode_result(self, result, status):
+        if status in self.EXCEPTION_STATES and isinstance(result, Exception):
+            return self.prepare_exception(result)
+        else:
+            return self.prepare_value(result)
 
 
     def store_result(self, task_id, result, status, traceback=None, **kwargs):
     def store_result(self, task_id, result, status, traceback=None, **kwargs):
-        """Store task result and status."""
+        """Update task state and result."""
         result = self.encode_result(result, status)
         result = self.encode_result(result, status)
-        return self._store_result(task_id, result, status, traceback, **kwargs)
+        self._store_result(task_id, result, status, traceback, **kwargs)
+        return result
 
 
     def forget(self, task_id):
     def forget(self, task_id):
         self._cache.pop(task_id, None)
         self._cache.pop(task_id, None)
         self._forget(task_id)
         self._forget(task_id)
 
 
     def _forget(self, task_id):
     def _forget(self, task_id):
-        raise NotImplementedError('%s does not implement forget.' % (
-                    self.__class__))
+        raise NotImplementedError('backend does not implement forget.')
 
 
     def get_status(self, task_id):
     def get_status(self, task_id):
         """Get the status of a task."""
         """Get the status of a task."""
@@ -301,11 +214,12 @@ class BaseDictBackend(BaseBackend):
         return meta
         return meta
 
 
     def reload_task_result(self, task_id):
     def reload_task_result(self, task_id):
+        """Reload task result, even if it has been previously fetched."""
         self._cache[task_id] = self.get_task_meta(task_id, cache=False)
         self._cache[task_id] = self.get_task_meta(task_id, cache=False)
 
 
     def reload_group_result(self, group_id):
     def reload_group_result(self, group_id):
-        self._cache[group_id] = self.get_group_meta(group_id,
-                                                    cache=False)
+        """Reload group result, even if it has been previously fetched."""
+        self._cache[group_id] = self.get_group_meta(group_id, cache=False)
 
 
     def get_group_meta(self, group_id, cache=True):
     def get_group_meta(self, group_id, cache=True):
         if cache:
         if cache:
@@ -333,8 +247,35 @@ class BaseDictBackend(BaseBackend):
         self._cache.pop(group_id, None)
         self._cache.pop(group_id, None)
         return self._delete_group(group_id)
         return self._delete_group(group_id)
 
 
+    def cleanup(self):
+        """Backend cleanup. Is run by
+        :class:`celery.task.DeleteExpiredTaskMetaTask`."""
+        pass
+
+    def process_cleanup(self):
+        """Cleanup actions to do at the end of a task worker process."""
+        pass
+
+    def on_chord_part_return(self, task, propagate=False):
+        pass
+
+    def fallback_chord_unlock(self, group_id, body, result=None, **kwargs):
+        kwargs['result'] = [r.id for r in result]
+        self.app.tasks['celery.chord_unlock'].apply_async((group_id, body, ),
+                                                          kwargs, countdown=1)
+    on_chord_apply = fallback_chord_unlock
+
+    def current_task_children(self):
+        current = current_task()
+        if current:
+            return [r.serializable() for r in current.request.children]
+
+    def __reduce__(self, args=(), kwargs={}):
+        return (unpickle_backend, (self.__class__, args, kwargs))
+BaseDictBackend = BaseBackend  # XXX compat
+
 
 
-class KeyValueStoreBackend(BaseDictBackend):
+class KeyValueStoreBackend(BaseBackend):
     task_keyprefix = ensure_bytes('celery-task-meta-')
     task_keyprefix = ensure_bytes('celery-task-meta-')
     group_keyprefix = ensure_bytes('celery-taskset-meta-')
     group_keyprefix = ensure_bytes('celery-taskset-meta-')
     chord_keyprefix = ensure_bytes('chord-unlock-')
     chord_keyprefix = ensure_bytes('chord-unlock-')
@@ -410,11 +351,11 @@ class KeyValueStoreBackend(BaseDictBackend):
             r = self._mget_to_results(self.mget([self.get_key_for_task(k)
             r = self._mget_to_results(self.mget([self.get_key_for_task(k)
                                                     for k in keys]), keys)
                                                     for k in keys]), keys)
             self._cache.update(r)
             self._cache.update(r)
-            ids.difference_update(set(map(bytes_to_str, r)))
+            ids.difference_update(set(imap(bytes_to_str, r)))
             for key, value in r.iteritems():
             for key, value in r.iteritems():
                 yield bytes_to_str(key), value
                 yield bytes_to_str(key), value
             if timeout and iterations * interval >= timeout:
             if timeout and iterations * interval >= timeout:
-                raise TimeoutError('Operation timed out (%s)' % (timeout, ))
+                raise TimeoutError('Operation timed out ({0})'.format(timeout))
             time.sleep(interval)  # don't busy loop.
             time.sleep(interval)  # don't busy loop.
             iterations += 0
             iterations += 0
 
 

+ 14 - 9
celery/backends/cache.py

@@ -16,6 +16,15 @@ from .base import KeyValueStoreBackend
 
 
 _imp = [None]
 _imp = [None]
 
 
+REQUIRES_BACKEND = """\
+The memcached backend requires either pylibmc or python-memcached.\
+"""
+
+UNKNOWN_BACKEND = """\
+The cache backend {0!r} is unknown,
+Please use one of the following backends instead: {1}\
+"""
+
 
 
 def import_best_memcache():
 def import_best_memcache():
     if _imp[0] is None:
     if _imp[0] is None:
@@ -27,9 +36,7 @@ def import_best_memcache():
             try:
             try:
                 import memcache  # noqa
                 import memcache  # noqa
             except ImportError:
             except ImportError:
-                raise ImproperlyConfigured(
-                    'Memcached backend requires either the pylibmc '
-                    'or memcache library')
+                raise ImproperlyConfigured(REQUIRES_BACKEND)
         _imp[0] = (is_pylibmc, memcache)
         _imp[0] = (is_pylibmc, memcache)
     return _imp[0]
     return _imp[0]
 
 
@@ -77,7 +84,7 @@ class CacheBackend(KeyValueStoreBackend):
     implements_incr = True
     implements_incr = True
 
 
     def __init__(self, expires=None, backend=None, options={}, **kwargs):
     def __init__(self, expires=None, backend=None, options={}, **kwargs):
-        super(CacheBackend, self).__init__(self, **kwargs)
+        super(CacheBackend, self).__init__(**kwargs)
 
 
         self.options = dict(self.app.conf.CELERY_CACHE_BACKEND_OPTIONS,
         self.options = dict(self.app.conf.CELERY_CACHE_BACKEND_OPTIONS,
                             **options)
                             **options)
@@ -90,10 +97,8 @@ class CacheBackend(KeyValueStoreBackend):
         try:
         try:
             self.Client = backends[self.backend]()
             self.Client = backends[self.backend]()
         except KeyError:
         except KeyError:
-            raise ImproperlyConfigured(
-                    'Unknown cache backend: %s. Please use one of the '
-                    'following backends: %s' % (self.backend,
-                                                ', '.join(backends.keys())))
+            raise ImproperlyConfigured(UNKNOWN_BACKEND.format(
+                self.backend, ', '.join(backends)))
 
 
     def get(self, key):
     def get(self, key):
         return self.client.get(key)
         return self.client.get(key)
@@ -119,7 +124,7 @@ class CacheBackend(KeyValueStoreBackend):
 
 
     def __reduce__(self, args=(), kwargs={}):
     def __reduce__(self, args=(), kwargs={}):
         servers = ';'.join(self.servers)
         servers = ';'.join(self.servers)
-        backend = '%s://%s/' % (self.backend, servers)
+        backend = '{0}://{1}/'.format(self.backend, servers)
         kwargs.update(
         kwargs.update(
             dict(backend=backend,
             dict(backend=backend,
                  expires=self.expires,
                  expires=self.expires,

+ 4 - 4
celery/backends/cassandra.py

@@ -23,12 +23,12 @@ from celery.exceptions import ImproperlyConfigured
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
 from celery.utils.timeutils import maybe_timedelta, timedelta_seconds
 from celery.utils.timeutils import maybe_timedelta, timedelta_seconds
 
 
-from .base import BaseDictBackend
+from .base import BaseBackend
 
 
 logger = get_logger(__name__)
 logger = get_logger(__name__)
 
 
 
 
-class CassandraBackend(BaseDictBackend):
+class CassandraBackend(BaseBackend):
     """Highly fault tolerant Cassandra backend.
     """Highly fault tolerant Cassandra backend.
 
 
     .. attribute:: servers
     .. attribute:: servers
@@ -142,11 +142,11 @@ class CassandraBackend(BaseDictBackend):
             if self.detailed_mode:
             if self.detailed_mode:
                 meta['result'] = result
                 meta['result'] = result
                 cf.insert(task_id, {date_done: self.encode(meta)},
                 cf.insert(task_id, {date_done: self.encode(meta)},
-                          ttl=timedelta_seconds(self.expires))
+                          ttl=self.expires and timedelta_seconds(self.expires))
             else:
             else:
                 meta['result'] = self.encode(result)
                 meta['result'] = self.encode(result)
                 cf.insert(task_id, meta,
                 cf.insert(task_id, meta,
-                          ttl=timedelta_seconds(self.expires))
+                          ttl=self.expires and timedelta_seconds(self.expires))
 
 
         return self._retry_on_error(_do_store)
         return self._retry_on_error(_do_store)
 
 

+ 2 - 2
celery/backends/database/__init__.py

@@ -14,7 +14,7 @@ from celery import states
 from celery.exceptions import ImproperlyConfigured
 from celery.exceptions import ImproperlyConfigured
 from celery.utils.timeutils import maybe_timedelta
 from celery.utils.timeutils import maybe_timedelta
 
 
-from celery.backends.base import BaseDictBackend
+from celery.backends.base import BaseBackend
 
 
 from .models import Task, TaskSet
 from .models import Task, TaskSet
 from .session import ResultSession
 from .session import ResultSession
@@ -49,7 +49,7 @@ def retry(fun):
     return _inner
     return _inner
 
 
 
 
-class DatabaseBackend(BaseDictBackend):
+class DatabaseBackend(BaseBackend):
     """The database result backend."""
     """The database result backend."""
     # ResultSet.iterate should sleep this much between each pool,
     # ResultSet.iterate should sleep this much between each pool,
     # to not bombard the database with queries.
     # to not bombard the database with queries.

+ 0 - 71
celery/backends/database/a805d4bd.py

@@ -1,71 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-    celery.backends.database.a805d4bd
-    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-    This module fixes a bug with pickling and relative imports in Python < 2.6.
-
-    The problem is with pickling an e.g. `exceptions.KeyError` instance.
-    As SQLAlchemy has its own `exceptions` module, pickle will try to
-    lookup :exc:`KeyError` in the wrong module, resulting in this exception::
-
-        cPickle.PicklingError: Can't pickle <type 'exceptions.KeyError'>:
-            attribute lookup exceptions.KeyError failed
-
-    doing `import exceptions` just before the dump in `sqlalchemy.types`
-    reveals the source of the bug::
-
-        EXCEPTIONS: <module 'sqlalchemy.exc' from '/var/lib/hudson/jobs/celery/
-            workspace/buildenv/lib/python2.5/site-packages/sqlalchemy/exc.pyc'>
-
-    Hence the random module name 'a805d5bd' is taken to decrease the chances of
-    a collision.
-
-"""
-from __future__ import absolute_import
-
-from sqlalchemy.types import PickleType as _PickleType
-
-
-class PickleType(_PickleType):  # pragma: no cover
-
-    def bind_processor(self, dialect):
-        impl_processor = self.impl.bind_processor(dialect)
-        dumps = self.pickler.dumps
-        protocol = self.protocol
-        if impl_processor:
-
-            def process(value):
-                if value is not None:
-                    value = dumps(value, protocol)
-                return impl_processor(value)
-
-        else:
-
-            def process(value):  # noqa
-                if value is not None:
-                    value = dumps(value, protocol)
-                return value
-        return process
-
-    def result_processor(self, dialect, coltype):
-        impl_processor = self.impl.result_processor(dialect, coltype)
-        loads = self.pickler.loads
-        if impl_processor:
-
-            def process(value):
-                value = impl_processor(value)
-                if value is not None:
-                    return loads(value)
-        else:
-
-            def process(value):  # noqa
-                if value is not None:
-                    return loads(value)
-        return process
-
-    def copy_value(self, value):
-        if self.mutable:
-            return self.pickler.loads(self.pickler.dumps(value, self.protocol))
-        else:
-            return value

+ 0 - 50
celery/backends/database/dfd042c7.py

@@ -1,50 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-    celery.backends.database.dfd042c7
-    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-    SQLAlchemy 0.5.8 version of :mod:`~celery.backends.database.a805d4bd`,
-    see the docstring of that module for an explanation of why we need
-    this workaround.
-
-"""
-from __future__ import absolute_import
-
-from sqlalchemy.types import PickleType as _PickleType
-from sqlalchemy import util
-
-
-class PickleType(_PickleType):  # pragma: no cover
-
-    def process_bind_param(self, value, dialect):
-        dumps = self.pickler.dumps
-        protocol = self.protocol
-        if value is not None:
-            return dumps(value, protocol)
-
-    def process_result_value(self, value, dialect):
-        loads = self.pickler.loads
-        if value is not None:
-            return loads(str(value))
-
-    def copy_value(self, value):
-        if self.mutable:
-            return self.pickler.loads(self.pickler.dumps(value, self.protocol))
-        else:
-            return value
-
-    def compare_values(self, x, y):
-        if self.comparator:
-            return self.comparator(x, y)
-        elif self.mutable and not hasattr(x, '__eq__') and x is not None:
-            util.warn_deprecated(
-                    'Objects stored with PickleType when mutable=True '
-                    'must implement __eq__() for reliable comparison.')
-            a = self.pickler.dumps(x, self.protocol)
-            b = self.pickler.dumps(y, self.protocol)
-            return a == b
-        else:
-            return x == y
-
-    def is_mutable(self):
-        return self.mutable

+ 3 - 8
celery/backends/database/models.py

@@ -11,17 +11,12 @@ from __future__ import absolute_import
 from datetime import datetime
 from datetime import datetime
 
 
 import sqlalchemy as sa
 import sqlalchemy as sa
+from sqlalchemy.types import PickleType
 
 
 from celery import states
 from celery import states
 
 
 from .session import ResultModelBase
 from .session import ResultModelBase
 
 
-# See docstring of a805d4bd for an explanation for this workaround ;)
-if sa.__version__.startswith('0.5'):
-    from .dfd042c7 import PickleType
-else:
-    from .a805d4bd import PickleType  # noqa
-
 
 
 class Task(ResultModelBase):
 class Task(ResultModelBase):
     """Task result/status."""
     """Task result/status."""
@@ -49,7 +44,7 @@ class Task(ResultModelBase):
                 'date_done': self.date_done}
                 'date_done': self.date_done}
 
 
     def __repr__(self):
     def __repr__(self):
-        return '<Task %s state: %s>' % (self.task_id, self.status)
+        return '<Task {0.task_id} state: {0.status}>'.format(self)
 
 
 
 
 class TaskSet(ResultModelBase):
 class TaskSet(ResultModelBase):
@@ -74,4 +69,4 @@ class TaskSet(ResultModelBase):
                 'date_done': self.date_done}
                 'date_done': self.date_done}
 
 
     def __repr__(self):
     def __repr__(self):
-        return '<TaskSet: %s>' % (self.taskset_id, )
+        return '<TaskSet: {0.taskset_id}>'.format(self)

+ 2 - 2
celery/backends/mongodb.py

@@ -29,7 +29,7 @@ from celery import states
 from celery.exceptions import ImproperlyConfigured
 from celery.exceptions import ImproperlyConfigured
 from celery.utils.timeutils import maybe_timedelta
 from celery.utils.timeutils import maybe_timedelta
 
 
-from .base import BaseDictBackend
+from .base import BaseBackend
 
 
 
 
 class Bunch(object):
 class Bunch(object):
@@ -38,7 +38,7 @@ class Bunch(object):
         self.__dict__.update(kw)
         self.__dict__.update(kw)
 
 
 
 
-class MongoBackend(BaseDictBackend):
+class MongoBackend(BaseBackend):
     mongodb_host = 'localhost'
     mongodb_host = 'localhost'
     mongodb_port = 27017
     mongodb_port = 27017
     mongodb_user = None
     mongodb_user = None

+ 2 - 2
celery/backends/redis.py

@@ -58,9 +58,9 @@ class RedisBackend(KeyValueStoreBackend):
 
 
         # For compatibility with the old REDIS_* configuration keys.
         # For compatibility with the old REDIS_* configuration keys.
         def _get(key):
         def _get(key):
-            for prefix in 'CELERY_REDIS_%s', 'REDIS_%s':
+            for prefix in 'CELERY_REDIS_{0}', 'REDIS_{0}':
                 try:
                 try:
-                    return conf[prefix % key]
+                    return conf[prefix.format(key)]
                 except KeyError:
                 except KeyError:
                     pass
                     pass
         if host and '://' in host:
         if host and '://' in host:

+ 40 - 15
celery/beat.py

@@ -15,6 +15,8 @@ import shelve
 import sys
 import sys
 import traceback
 import traceback
 
 
+from threading import Event, Thread
+
 from billiard import Process, ensure_multiprocessing
 from billiard import Process, ensure_multiprocessing
 from kombu.utils import reprcall
 from kombu.utils import reprcall
 from kombu.utils.functional import maybe_promise
 from kombu.utils.functional import maybe_promise
@@ -27,12 +29,12 @@ from .app import app_or_default
 from .schedules import maybe_schedule, crontab
 from .schedules import maybe_schedule, crontab
 from .utils import cached_property
 from .utils import cached_property
 from .utils.imports import instantiate
 from .utils.imports import instantiate
-from .utils.threads import Event, Thread
 from .utils.timeutils import humanize_seconds
 from .utils.timeutils import humanize_seconds
 from .utils.log import get_logger
 from .utils.log import get_logger
 
 
 logger = get_logger(__name__)
 logger = get_logger(__name__)
-debug, info, error = logger.debug, logger.info, logger.error
+debug, info, error, warning = (logger.debug, logger.info,
+                               logger.error, logger.warning)
 
 
 DEFAULT_MAX_INTERVAL = 300  # 5 minutes
 DEFAULT_MAX_INTERVAL = 300  # 5 minutes
 
 
@@ -118,9 +120,8 @@ class ScheduleEntry(object):
         return vars(self).iteritems()
         return vars(self).iteritems()
 
 
     def __repr__(self):
     def __repr__(self):
-        return ('<Entry: %s %s {%s}' % (self.name,
-                    reprcall(self.task, self.args or (), self.kwargs or {}),
-                    self.schedule))
+        return '<Entry: {0.name} {call} {0.schedule}'.format(self,
+            call=reprcall(self.task, self.args or (), self.kwargs or {}))
 
 
 
 
 class Scheduler(object):
 class Scheduler(object):
@@ -174,7 +175,7 @@ class Scheduler(object):
             info('Scheduler: Sending due task %s', entry.task)
             info('Scheduler: Sending due task %s', entry.task)
             try:
             try:
                 result = self.apply_async(entry, publisher=publisher)
                 result = self.apply_async(entry, publisher=publisher)
-            except Exception, exc:
+            except Exception as exc:
                 error('Message Error: %s\n%s',
                 error('Message Error: %s\n%s',
                       exc, traceback.format_stack(), exc_info=True)
                       exc, traceback.format_stack(), exc_info=True)
             else:
             else:
@@ -203,7 +204,7 @@ class Scheduler(object):
                 (time.time() - self._last_sync) > self.sync_every)
                 (time.time() - self._last_sync) > self.sync_every)
 
 
     def reserve(self, entry):
     def reserve(self, entry):
-        new_entry = self.schedule[entry.name] = entry.next()
+        new_entry = self.schedule[entry.name] = next(entry)
         return new_entry
         return new_entry
 
 
     def apply_async(self, entry, publisher=None, **kwargs):
     def apply_async(self, entry, publisher=None, **kwargs):
@@ -222,10 +223,10 @@ class Scheduler(object):
                 result = self.send_task(entry.task, entry.args, entry.kwargs,
                 result = self.send_task(entry.task, entry.args, entry.kwargs,
                                         publisher=publisher,
                                         publisher=publisher,
                                         **entry.options)
                                         **entry.options)
-        except Exception, exc:
+        except Exception as exc:
             raise SchedulingError, SchedulingError(
             raise SchedulingError, SchedulingError(
-                "Couldn't apply scheduled task %s: %s" % (
-                    entry.name, exc)), sys.exc_info()[2]
+                "Couldn't apply scheduled task {0.name}: {exc}".format(
+                    entry, exc)), sys.exc_info()[2]
         finally:
         finally:
             if self.should_sync():
             if self.should_sync():
                 self._do_sync()
                 self._do_sync()
@@ -324,7 +325,7 @@ class PersistentScheduler(Scheduler):
         for suffix in self.known_suffixes:
         for suffix in self.known_suffixes:
             try:
             try:
                 os.remove(self.schedule_filename + suffix)
                 os.remove(self.schedule_filename + suffix)
-            except OSError, exc:
+            except OSError as exc:
                 if exc.errno != errno.ENOENT:
                 if exc.errno != errno.ENOENT:
                     raise
                     raise
 
 
@@ -333,7 +334,7 @@ class PersistentScheduler(Scheduler):
             self._store = self.persistence.open(self.schedule_filename,
             self._store = self.persistence.open(self.schedule_filename,
                                                 writeback=True)
                                                 writeback=True)
             entries = self._store.setdefault('entries', {})
             entries = self._store.setdefault('entries', {})
-        except Exception, exc:
+        except Exception as exc:
             error('Removing corrupted schedule file %r: %r',
             error('Removing corrupted schedule file %r: %r',
                   self.schedule_filename, exc, exc_info=True)
                   self.schedule_filename, exc, exc_info=True)
             self._remove_db()
             self._remove_db()
@@ -341,11 +342,31 @@ class PersistentScheduler(Scheduler):
                                                 writeback=True)
                                                 writeback=True)
         else:
         else:
             if '__version__' not in self._store:
             if '__version__' not in self._store:
+                warning('Reset: Account for new __version__ field')
                 self._store.clear()   # remove schedule at 2.2.2 upgrade.
                 self._store.clear()   # remove schedule at 2.2.2 upgrade.
+            if 'tz' not in self._store:
+                warning('Reset: Account for new tz field')
+                self._store.clear()   # remove schedule at 3.0.8 upgrade
+            if 'utc_enabled' not in self._store:
+                warning('Reset: Account for new utc_enabled field')
+                self._store.clear()   # remove schedule at 3.0.9 upgrade
+
+        tz = self.app.conf.CELERY_TIMEZONE
+        stored_tz = self._store.get('tz')
+        if stored_tz is not None and stored_tz != tz:
+            warning('Reset: Timezone changed from %r to %r', stored_tz, tz)
+            self._store.clear()   # Timezone changed, reset db!
+        utc = self.app.conf.CELERY_ENABLE_UTC
+        stored_utc = self._store.get('utc_enabled')
+        if stored_utc is not None and stored_utc != utc:
+            choices = {True: 'enabled', False: 'disabled'}
+            warning('Reset: UTC changed from %s to %s',
+                    choices[stored_utc], choices[utc])
+            self._store.clear()   # UTC setting changed, reset db!
         entries = self._store.setdefault('entries', {})
         entries = self._store.setdefault('entries', {})
         self.merge_inplace(self.app.conf.CELERYBEAT_SCHEDULE)
         self.merge_inplace(self.app.conf.CELERYBEAT_SCHEDULE)
         self.install_default_entries(self.schedule)
         self.install_default_entries(self.schedule)
-        self._store['__version__'] = __version__
+        self._store.update(__version__=__version__, tz=tz, utc_enabled=utc)
         self.sync()
         self.sync()
         debug('Current schedule:\n' + '\n'.join(repr(entry)
         debug('Current schedule:\n' + '\n'.join(repr(entry)
                                     for entry in entries.itervalues()))
                                     for entry in entries.itervalues()))
@@ -367,7 +388,7 @@ class PersistentScheduler(Scheduler):
 
 
     @property
     @property
     def info(self):
     def info(self):
-        return '    . db -> %s' % (self.schedule_filename, )
+        return '    . db -> {self.schedule_filename}'.format(self=self)
 
 
 
 
 class Service(object):
 class Service(object):
@@ -385,6 +406,10 @@ class Service(object):
         self._is_shutdown = Event()
         self._is_shutdown = Event()
         self._is_stopped = Event()
         self._is_stopped = Event()
 
 
+    def __reduce__(self):
+        return self.__class__, (self.max_interval, self.schedule_filename,
+                                self.scheduler_cls, self.app)
+
     def start(self, embedded_process=False):
     def start(self, embedded_process=False):
         info('Celerybeat: Starting...')
         info('Celerybeat: Starting...')
         debug('Celerybeat: Ticking with max interval->%s',
         debug('Celerybeat: Ticking with max interval->%s',
@@ -470,7 +495,7 @@ def EmbeddedService(*args, **kwargs):
     """Return embedded clock service.
     """Return embedded clock service.
 
 
     :keyword thread: Run threaded instead of as a separate process.
     :keyword thread: Run threaded instead of as a separate process.
-        Default is :const:`False`.
+        Uses :mod:`multiprocessing` by default, if available.
 
 
     """
     """
     if kwargs.pop('thread', False) or _Process is None:
     if kwargs.pop('thread', False) or _Process is None:

+ 66 - 17
celery/bin/base.py

@@ -56,20 +56,22 @@ Daemon Options
     Optional directory to change to after detaching.
     Optional directory to change to after detaching.
 
 
 """
 """
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
 
 
 import os
 import os
 import re
 import re
+import socket
 import sys
 import sys
 import warnings
 import warnings
 
 
 from collections import defaultdict
 from collections import defaultdict
+from itertools import izip
 from optparse import OptionParser, IndentedHelpFormatter, make_option as Option
 from optparse import OptionParser, IndentedHelpFormatter, make_option as Option
 from types import ModuleType
 from types import ModuleType
 
 
 import celery
 import celery
 from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning
 from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning
-from celery.platforms import EX_FAILURE, EX_USAGE
+from celery.platforms import EX_FAILURE, EX_USAGE, maybe_patch_concurrency
 from celery.utils import text
 from celery.utils import text
 from celery.utils.imports import symbol_by_name, import_from_cwd
 from celery.utils.imports import symbol_by_name, import_from_cwd
 
 
@@ -78,20 +80,21 @@ for warning in (CDeprecationWarning, CPendingDeprecationWarning):
     warnings.simplefilter('once', warning, 0)
     warnings.simplefilter('once', warning, 0)
 
 
 ARGV_DISABLED = """
 ARGV_DISABLED = """
-Unrecognized command line arguments: %s
+Unrecognized command line arguments: {0}
 
 
 Try --help?
 Try --help?
 """
 """
 
 
 find_long_opt = re.compile(r'.+?(--.+?)(?:\s|,|$)')
 find_long_opt = re.compile(r'.+?(--.+?)(?:\s|,|$)')
 find_rst_ref = re.compile(r':\w+:`(.+?)`')
 find_rst_ref = re.compile(r':\w+:`(.+?)`')
+find_sformat = re.compile(r'%(\w)')
 
 
 
 
 class HelpFormatter(IndentedHelpFormatter):
 class HelpFormatter(IndentedHelpFormatter):
 
 
     def format_epilog(self, epilog):
     def format_epilog(self, epilog):
         if epilog:
         if epilog:
-            return '\n%s\n\n' % epilog
+            return '\n{0}\n\n'.format(epilog)
         return ''
         return ''
 
 
     def format_description(self, description):
     def format_description(self, description):
@@ -112,7 +115,7 @@ class Command(object):
     args = ''
     args = ''
 
 
     #: Application version.
     #: Application version.
-    version = celery.__version__
+    version = celery.VERSION_BANNER
 
 
     #: If false the parser will raise an exception if positional
     #: If false the parser will raise an exception if positional
     #: args are provided.
     #: args are provided.
@@ -144,6 +147,9 @@ class Command(object):
     #: Text to print in --help before option list.
     #: Text to print in --help before option list.
     description = ''
     description = ''
 
 
+    #: Set to true if this command doesn't have subcommands
+    leaf = True
+
     def __init__(self, app=None, get_app=None):
     def __init__(self, app=None, get_app=None):
         self.app = app
         self.app = app
         self.get_app = get_app or self._get_default_app
         self.get_app = get_app or self._get_default_app
@@ -161,13 +167,32 @@ class Command(object):
         """
         """
         if argv is None:
         if argv is None:
             argv = list(sys.argv)
             argv = list(sys.argv)
+        # Should we load any special concurrency environment?
+        self.maybe_patch_concurrency(argv)
+        self.on_concurrency_setup()
+
+        # Dump version and exit if '--version' arg set.
+        self.early_version(argv)
         argv = self.setup_app_from_commandline(argv)
         argv = self.setup_app_from_commandline(argv)
         prog_name = os.path.basename(argv[0])
         prog_name = os.path.basename(argv[0])
         return self.handle_argv(prog_name, argv[1:])
         return self.handle_argv(prog_name, argv[1:])
 
 
+    def run_from_argv(self, prog_name, argv=None):
+        return self.handle_argv(prog_name, sys.argv if argv is None else argv)
+
+    def maybe_patch_concurrency(self, argv=None):
+        argv = argv or sys.argv
+        pool_option = self.with_pool_option(argv)
+        if pool_option:
+            maybe_patch_concurrency(argv, *pool_option)
+            short_opts, long_opts = pool_option
+
+    def on_concurrency_setup(self):
+        pass
+
     def usage(self, command):
     def usage(self, command):
         """Returns the command-line usage string for this app."""
         """Returns the command-line usage string for this app."""
-        return '%%prog [options] %s' % (self.args, )
+        return '%%prog [options] {0.args}'.format(self)
 
 
     def get_options(self):
     def get_options(self):
         """Get supported command line options."""
         """Get supported command line options."""
@@ -197,25 +222,27 @@ class Command(object):
             options = dict((k, self.expanduser(v))
             options = dict((k, self.expanduser(v))
                             for k, v in vars(options).iteritems()
                             for k, v in vars(options).iteritems()
                                 if not k.startswith('_'))
                                 if not k.startswith('_'))
-        args = map(self.expanduser, args)
+        args = [self.expanduser(arg) for arg in args]
         self.check_args(args)
         self.check_args(args)
         return options, args
         return options, args
 
 
     def check_args(self, args):
     def check_args(self, args):
         if not self.supports_args and args:
         if not self.supports_args and args:
-            self.die(ARGV_DISABLED % (', '.join(args, )), EX_USAGE)
+            self.die(ARGV_DISABLED.format(', '.join(args)), EX_USAGE)
 
 
     def die(self, msg, status=EX_FAILURE):
     def die(self, msg, status=EX_FAILURE):
-        sys.stderr.write(msg + '\n')
+        print(msg, file=sys.stderr)
         sys.exit(status)
         sys.exit(status)
 
 
+    def early_version(self, argv):
+        if '--version' in argv:
+            print(self.version)
+            sys.exit(0)
+
     def parse_options(self, prog_name, arguments):
     def parse_options(self, prog_name, arguments):
         """Parse the available options."""
         """Parse the available options."""
         # Don't want to load configuration to just print the version,
         # Don't want to load configuration to just print the version,
         # so we handle --version manually here.
         # so we handle --version manually here.
-        if '--version' in arguments:
-            sys.stdout.write('%s\n' % self.version)
-            sys.exit(0)
         parser = self.create_parser(prog_name)
         parser = self.create_parser(prog_name)
         return parser.parse_args(arguments)
         return parser.parse_args(arguments)
 
 
@@ -235,7 +262,7 @@ class Command(object):
             for long_opt, help in doc.iteritems():
             for long_opt, help in doc.iteritems():
                 option = parser.get_option(long_opt)
                 option = parser.get_option(long_opt)
                 if option is not None:
                 if option is not None:
-                    option.help = ' '.join(help) % {'default': option.default}
+                    option.help = ' '.join(help).format(default=option.default)
         return parser
         return parser
 
 
     def prepare_preload_options(self, options):
     def prepare_preload_options(self, options):
@@ -264,17 +291,22 @@ class Command(object):
             os.environ['CELERY_CONFIG_MODULE'] = config_module
             os.environ['CELERY_CONFIG_MODULE'] = config_module
         if app:
         if app:
             self.app = self.find_app(app)
             self.app = self.find_app(app)
-        else:
+        elif self.app is None:
             self.app = self.get_app(loader=loader)
             self.app = self.get_app(loader=loader)
         if self.enable_config_from_cmdline:
         if self.enable_config_from_cmdline:
             argv = self.process_cmdline_config(argv)
             argv = self.process_cmdline_config(argv)
         return argv
         return argv
 
 
     def find_app(self, app):
     def find_app(self, app):
-        sym = self.symbol_by_name(app)
+        try:
+            sym = self.symbol_by_name(app)
+        except AttributeError:
+            # last part was not an attribute, but a module
+            sym = import_from_cwd(app)
         if isinstance(sym, ModuleType):
         if isinstance(sym, ModuleType):
             if getattr(sym, '__path__', None):
             if getattr(sym, '__path__', None):
-                return self.find_app('%s.celery:' % (app.replace(':', ''), ))
+                return self.find_app('{0}.celery:'.format(
+                            app.replace(':', '')))
             return sym.celery
             return sym.celery
         return sym
         return sym
 
 
@@ -296,7 +328,7 @@ class Command(object):
         opts = {}
         opts = {}
         for opt in self.preload_options:
         for opt in self.preload_options:
             for t in (opt._long_opts, opt._short_opts):
             for t in (opt._long_opts, opt._short_opts):
-                opts.update(dict(zip(t, [opt.dest] * len(t))))
+                opts.update(dict(izip(t, [opt.dest] * len(t))))
         index = 0
         index = 0
         length = len(args)
         length = len(args)
         while index < length:
         while index < length:
@@ -327,6 +359,23 @@ class Command(object):
                     line.strip()).replace('`', ''))
                     line.strip()).replace('`', ''))
         return options
         return options
 
 
+    def with_pool_option(self, argv):
+        """Returns tuple of ``(short_opts, long_opts)`` if the command
+        supports a pool argument, and used to monkey patch eventlet/gevent
+        environments as early as possible.
+
+        E.g::
+              has_pool_option = (['-P'], ['--pool'])
+        """
+        pass
+
+    def simple_format(self, s, match=find_sformat, expand=r'\1', **keys):
+        if s:
+            host = socket.gethostname()
+            name, _, domain = host.partition('.')
+            keys = dict({'%': '%', 'h': host, 'n': name, 'd': domain}, **keys)
+            return match.sub(lambda m: keys[m.expand(expand)], s)
+
     def _get_default_app(self, *args, **kwargs):
     def _get_default_app(self, *args, **kwargs):
         from celery.app import default_app
         from celery.app import default_app
         return default_app._get_current_object()  # omit proxy
         return default_app._get_current_object()  # omit proxy

+ 19 - 20
celery/bin/camqadm.py

@@ -5,13 +5,14 @@ The :program:`celery amqp` command.
 .. program:: celery amqp
 .. program:: celery amqp
 
 
 """
 """
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
 
 
 import cmd
 import cmd
 import sys
 import sys
 import shlex
 import shlex
 import pprint
 import pprint
 
 
+from functools import partial
 from itertools import count
 from itertools import count
 
 
 from amqplib import client_0_8 as amqp
 from amqplib import client_0_8 as amqp
@@ -35,9 +36,7 @@ Example:
     -> queue.delete myqueue yes no
     -> queue.delete myqueue yes no
 """
 """
 
 
-
-def say(m, fh=sys.stderr):
-    fh.write('%s\n' % (m, ))
+say = partial(print, file=sys.stderr)
 
 
 
 
 class Spec(object):
 class Spec(object):
@@ -100,11 +99,11 @@ class Spec(object):
             return response
             return response
         if callable(self.returns):
         if callable(self.returns):
             return self.returns(response)
             return self.returns(response)
-        return self.returns % (response, )
+        return self.returns.format(response)
 
 
     def format_arg(self, name, type, default_value=None):
     def format_arg(self, name, type, default_value=None):
         if default_value is not None:
         if default_value is not None:
-            return '%s:%s' % (name, default_value)
+            return '{0}:{1}'.format(name, default_value)
         return name
         return name
 
 
     def format_signature(self):
     def format_signature(self):
@@ -121,7 +120,7 @@ def dump_message(message):
 
 
 
 
 def format_declare_queue(ret):
 def format_declare_queue(ret):
-    return 'ok. queue:%s messages:%s consumers:%s.' % ret
+    return 'ok. queue:{0} messages:{1} consumers:{2}.'.format(*ret)
 
 
 
 
 class AMQShell(cmd.Cmd):
 class AMQShell(cmd.Cmd):
@@ -145,7 +144,7 @@ class AMQShell(cmd.Cmd):
     """
     """
     conn = None
     conn = None
     chan = None
     chan = None
-    prompt_fmt = '%d> '
+    prompt_fmt = '{self.counter}> '
     identchars = cmd.IDENTCHARS = '.'
     identchars = cmd.IDENTCHARS = '.'
     needs_reconnect = False
     needs_reconnect = False
     counter = 1
     counter = 1
@@ -176,9 +175,9 @@ class AMQShell(cmd.Cmd):
         'queue.delete': Spec(('queue', str),
         'queue.delete': Spec(('queue', str),
                              ('if_unused', bool, 'no'),
                              ('if_unused', bool, 'no'),
                              ('if_empty', bool, 'no'),
                              ('if_empty', bool, 'no'),
-                             returns='ok. %d messages deleted.'),
+                             returns='ok. {0} messages deleted.'),
         'queue.purge': Spec(('queue', str),
         'queue.purge': Spec(('queue', str),
-                            returns='ok. %d messages deleted.'),
+                            returns='ok. {0} messages deleted.'),
         'basic.get': Spec(('queue', str),
         'basic.get': Spec(('queue', str),
                           ('no_ack', bool, 'off'),
                           ('no_ack', bool, 'off'),
                           returns=dump_message),
                           returns=dump_message),
@@ -200,10 +199,10 @@ class AMQShell(cmd.Cmd):
     def note(self, m):
     def note(self, m):
         """Say something to the user. Disabled if :attr:`silent`."""
         """Say something to the user. Disabled if :attr:`silent`."""
         if not self.silent:
         if not self.silent:
-            say(m, fh=self.out)
+            say(m, file=self.out)
 
 
     def say(self, m):
     def say(self, m):
-        say(m, fh=self.out)
+        say(m, file=self.out)
 
 
     def get_amqp_api_command(self, cmd, arglist):
     def get_amqp_api_command(self, cmd, arglist):
         """With a command name and a list of arguments, convert the arguments
         """With a command name and a list of arguments, convert the arguments
@@ -234,19 +233,19 @@ class AMQShell(cmd.Cmd):
 
 
     def display_command_help(self, cmd, short=False):
     def display_command_help(self, cmd, short=False):
         spec = self.amqp[cmd]
         spec = self.amqp[cmd]
-        self.say('%s %s' % (cmd, spec.format_signature()))
+        self.say('{0} {1}'.format(cmd, spec.format_signature()))
 
 
     def do_help(self, *args):
     def do_help(self, *args):
         if not args:
         if not args:
             self.say(HELP_HEADER)
             self.say(HELP_HEADER)
-            for cmd_name in self.amqp.keys():
+            for cmd_name in self.amqp:
                 self.display_command_help(cmd_name, short=True)
                 self.display_command_help(cmd_name, short=True)
             self.say(EXAMPLE_TEXT)
             self.say(EXAMPLE_TEXT)
         else:
         else:
             self.display_command_help(args[0])
             self.display_command_help(args[0])
 
 
     def default(self, line):
     def default(self, line):
-        self.say("unknown syntax: '%s'. how about some 'help'?" % line)
+        self.say("unknown syntax: {0!r}. how about some 'help'?".format(line))
 
 
     def get_names(self):
     def get_names(self):
         return set(self.builtins) | set(self.amqp)
         return set(self.builtins) | set(self.amqp)
@@ -304,9 +303,9 @@ class AMQShell(cmd.Cmd):
             self.counter = self.inc_counter()
             self.counter = self.inc_counter()
             try:
             try:
                 self.respond(self.dispatch(cmd, arg))
                 self.respond(self.dispatch(cmd, arg))
-            except (AttributeError, KeyError), exc:
+            except (AttributeError, KeyError) as exc:
                 self.default(line)
                 self.default(line)
-            except Exception, exc:
+            except Exception as exc:
                 self.say(exc)
                 self.say(exc)
                 self.needs_reconnect = True
                 self.needs_reconnect = True
 
 
@@ -326,7 +325,7 @@ class AMQShell(cmd.Cmd):
 
 
     @property
     @property
     def prompt(self):
     def prompt(self):
-        return self.prompt_fmt % self.counter
+        return self.prompt_fmt.format(self=self)
 
 
 
 
 class AMQPAdmin(object):
 class AMQPAdmin(object):
@@ -343,7 +342,7 @@ class AMQPAdmin(object):
         if conn:
         if conn:
             conn.close()
             conn.close()
         conn = self.app.connection()
         conn = self.app.connection()
-        self.note('-> connecting to %s.' % conn.as_uri())
+        self.note('-> connecting to {0}.'.format(conn.as_uri()))
         conn.connect()
         conn.connect()
         self.note('-> connected.')
         self.note('-> connected.')
         return conn
         return conn
@@ -360,7 +359,7 @@ class AMQPAdmin(object):
 
 
     def note(self, m):
     def note(self, m):
         if not self.silent:
         if not self.silent:
-            say(m, fh=self.out)
+            say(m, file=self.out)
 
 
 
 
 class AMQPAdminCommand(Command):
 class AMQPAdminCommand(Command):

+ 161 - 99
celery/bin/celery.py

@@ -6,20 +6,21 @@ The :program:`celery` umbrella command.
 .. program:: celery
 .. program:: celery
 
 
 """
 """
-from __future__ import absolute_import
-from __future__ import with_statement
+from __future__ import absolute_import, print_function
 
 
 import anyjson
 import anyjson
+import heapq
 import sys
 import sys
+import warnings
 
 
-from billiard import freeze_support
 from importlib import import_module
 from importlib import import_module
+from itertools import imap
 from pprint import pformat
 from pprint import pformat
 
 
-from celery import __version__
 from celery.platforms import EX_OK, EX_FAILURE, EX_UNAVAILABLE, EX_USAGE
 from celery.platforms import EX_OK, EX_FAILURE, EX_UNAVAILABLE, EX_USAGE
 from celery.utils import term
 from celery.utils import term
 from celery.utils import text
 from celery.utils import text
+from celery.utils.functional import memoize
 from celery.utils.imports import symbol_by_name
 from celery.utils.imports import symbol_by_name
 from celery.utils.timeutils import maybe_iso8601
 from celery.utils.timeutils import maybe_iso8601
 
 
@@ -28,19 +29,31 @@ from celery.bin.base import Command as BaseCommand, Option
 HELP = """
 HELP = """
 ---- -- - - ---- Commands- -------------- --- ------------
 ---- -- - - ---- Commands- -------------- --- ------------
 
 
-%(commands)s
+{commands}
 ---- -- - - --------- -- - -------------- --- ------------
 ---- -- - - --------- -- - -------------- --- ------------
 
 
-Type '%(prog_name)s <command> --help' for help using a specific command.
+Type '{prog_name} <command> --help' for help using a specific command.
+"""
+
+MIGRATE_PROGRESS_FMT = """\
+Migrating task {state.count}/{state.strtotal}: \
+{body[task]}[{body[id]}]\
 """
 """
 
 
 commands = {}
 commands = {}
 
 
-command_classes = (
+command_classes = [
     ('Main', ['worker', 'events', 'beat', 'shell', 'multi', 'amqp'], 'green'),
     ('Main', ['worker', 'events', 'beat', 'shell', 'multi', 'amqp'], 'green'),
     ('Remote Control', ['status', 'inspect', 'control'], 'blue'),
     ('Remote Control', ['status', 'inspect', 'control'], 'blue'),
-    ('Utils', ['purge', 'list', 'migrate', 'apply', 'result', 'report'], None),
-)
+    ('Utils', ['purge', 'list', 'migrate', 'call', 'result', 'report'], None),
+]
+
+
+@memoize()
+def _get_extension_classes():
+    extensions = []
+    command_classes.append(('Extensions', extensions, 'magenta'))
+    return extensions
 
 
 
 
 class Error(Exception):
 class Error(Exception):
@@ -54,19 +67,38 @@ class Error(Exception):
         return self.reason
         return self.reason
 
 
 
 
-def command(fun, name=None, sortpri=0):
-    commands[name or fun.__name__] = fun
-    fun.sortpri = sortpri
-    return fun
+def command(*args, **kwargs):
+
+    def _register(fun):
+        commands[kwargs.get('name') or fun.__name__] = fun
+        return fun
+
+    return _register(args[0]) if args else _register
+
+
+def load_extension_commands(namespace='celery.commands'):
+    try:
+        from pkg_resources import iter_entry_points
+    except ImportError:
+        return
+
+    for ep in iter_entry_points(namespace):
+        sym = ':'.join([ep.module_name, ep.attrs[0]])
+        try:
+            cls = symbol_by_name(sym)
+        except (ImportError, SyntaxError) as exc:
+            warnings.warn(
+                'Cannot load extension {0!r}: {1!r}'.format(sym, exc))
+        else:
+            heapq.heappush(_get_extension_classes(), ep.name)
+            command(cls, name=ep.name)
 
 
 
 
 class Command(BaseCommand):
 class Command(BaseCommand):
     help = ''
     help = ''
     args = ''
     args = ''
-    version = __version__
     prog_name = 'celery'
     prog_name = 'celery'
     show_body = True
     show_body = True
-    leaf = True
     show_reply = True
     show_reply = True
 
 
     option_list = (
     option_list = (
@@ -87,8 +119,8 @@ class Command(BaseCommand):
     def __call__(self, *args, **kwargs):
     def __call__(self, *args, **kwargs):
         try:
         try:
             ret = self.run(*args, **kwargs)
             ret = self.run(*args, **kwargs)
-        except Error, exc:
-            self.error(self.colored.red('Error: %s' % exc))
+        except Error as exc:
+            self.error(self.colored.red('Error: {0!r}'.format(exc)))
             return exc.status
             return exc.status
 
 
         return ret if ret is not None else EX_OK
         return ret if ret is not None else EX_OK
@@ -101,10 +133,7 @@ class Command(BaseCommand):
         self.out(s, fh=self.stderr)
         self.out(s, fh=self.stderr)
 
 
     def out(self, s, fh=None):
     def out(self, s, fh=None):
-        s = str(s)
-        if not s.endswith('\n'):
-            s += '\n'
-        (fh or self.stdout).write(s)
+        print(s, file=fh or self.stdout)
 
 
     def run_from_argv(self, prog_name, argv):
     def run_from_argv(self, prog_name, argv):
         self.prog_name = prog_name
         self.prog_name = prog_name
@@ -119,13 +148,13 @@ class Command(BaseCommand):
         return self(*args, **options)
         return self(*args, **options)
 
 
     def usage(self, command):
     def usage(self, command):
-        return '%%prog %s [options] %s' % (command, self.args)
+        return '%%prog {0} [options] {self.args}'.format(command, self=self)
 
 
     def prettify_list(self, n):
     def prettify_list(self, n):
         c = self.colored
         c = self.colored
         if not n:
         if not n:
             return '- empty -'
             return '- empty -'
-        return '\n'.join(str(c.reset(c.white('*'), ' %s' % (item, )))
+        return '\n'.join(str(c.reset(c.white('*'), ' {0}'.format(item)))
                             for item in n)
                             for item in n)
 
 
     def prettify_dict_ok_error(self, n):
     def prettify_dict_ok_error(self, n):
@@ -140,7 +169,7 @@ class Command(BaseCommand):
 
 
     def say_remote_command_reply(self, replies):
     def say_remote_command_reply(self, replies):
         c = self.colored
         c = self.colored
-        node = replies.keys()[0]
+        node = iter(replies).next()  # <-- take first.
         reply = replies[node]
         reply = replies[node]
         status, preply = self.prettify(reply)
         status, preply = self.prettify(reply)
         self.say_chat('->', c.cyan(node, ': ') + status,
         self.say_chat('->', c.cyan(node, ': ') + status,
@@ -191,6 +220,7 @@ class Delegate(Command):
         return self.target.run(*args, **kwargs)
         return self.target.run(*args, **kwargs)
 
 
 
 
+@command
 class multi(Command):
 class multi(Command):
     """Start multiple worker instances."""
     """Start multiple worker instances."""
 
 
@@ -200,9 +230,9 @@ class multi(Command):
     def run_from_argv(self, prog_name, argv):
     def run_from_argv(self, prog_name, argv):
         from celery.bin.celeryd_multi import MultiTool
         from celery.bin.celeryd_multi import MultiTool
         return MultiTool().execute_from_commandline(argv, prog_name)
         return MultiTool().execute_from_commandline(argv, prog_name)
-multi = command(multi)
 
 
 
 
+@command
 class worker(Delegate):
 class worker(Delegate):
     """Start worker instance.
     """Start worker instance.
 
 
@@ -217,9 +247,13 @@ class worker(Delegate):
         celery worker --autoscale=10,0
         celery worker --autoscale=10,0
     """
     """
     Command = 'celery.bin.celeryd:WorkerCommand'
     Command = 'celery.bin.celeryd:WorkerCommand'
-worker = command(worker, sortpri=01)
 
 
+    def run_from_argv(self, prog_name, argv):
+        self.target.maybe_detach(argv)
+        super(worker, self).run_from_argv(prog_name, argv)
 
 
+
+@command
 class events(Delegate):
 class events(Delegate):
     """Event-stream utilities.
     """Event-stream utilities.
 
 
@@ -240,9 +274,9 @@ class events(Delegate):
         celery events -C mod.attr -F 1.0 --detach --maxrate=100/m -l info
         celery events -C mod.attr -F 1.0 --detach --maxrate=100/m -l info
     """
     """
     Command = 'celery.bin.celeryev:EvCommand'
     Command = 'celery.bin.celeryev:EvCommand'
-events = command(events, sortpri=10)
 
 
 
 
+@command
 class beat(Delegate):
 class beat(Delegate):
     """Start the celerybeat periodic task scheduler.
     """Start the celerybeat periodic task scheduler.
 
 
@@ -254,9 +288,9 @@ class beat(Delegate):
 
 
     """
     """
     Command = 'celery.bin.celerybeat:BeatCommand'
     Command = 'celery.bin.celerybeat:BeatCommand'
-beat = command(beat, sortpri=20)
 
 
 
 
+@command
 class amqp(Delegate):
 class amqp(Delegate):
     """AMQP Administration Shell.
     """AMQP Administration Shell.
 
 
@@ -275,9 +309,9 @@ class amqp(Delegate):
 
 
     """
     """
     Command = 'celery.bin.camqadm:AMQPAdminCommand'
     Command = 'celery.bin.camqadm:AMQPAdminCommand'
-amqp = command(amqp, sortpri=30)
 
 
 
 
+@command(name='list')
 class list_(Command):
 class list_(Command):
     """Get info from broker.
     """Get info from broker.
 
 
@@ -295,8 +329,7 @@ class list_(Command):
         except NotImplementedError:
         except NotImplementedError:
             raise Error('Your transport cannot list bindings.')
             raise Error('Your transport cannot list bindings.')
 
 
-        fmt = lambda q, e, r: self.out('%s %s %s' % (q.ljust(28),
-                                                     e.ljust(28), r))
+        fmt = lambda q, e, r: self.out('{0:<28} {1:<28} {2}'.format(q, e, r))
         fmt('Queue', 'Exchange', 'Routing Key')
         fmt('Queue', 'Exchange', 'Routing Key')
         fmt('-' * 16, '-' * 16, '-' * 16)
         fmt('-' * 16, '-' * 16, '-' * 16)
         for b in bindings:
         for b in bindings:
@@ -304,25 +337,25 @@ class list_(Command):
 
 
     def run(self, what=None, *_, **kw):
     def run(self, what=None, *_, **kw):
         topics = {'bindings': self.list_bindings}
         topics = {'bindings': self.list_bindings}
-        available = ', '.join(topics.keys())
+        available = ', '.join(topics)
         if not what:
         if not what:
-            raise Error('You must specify what to list (%s)' % available)
+            raise Error('You must specify one of {0}'.format(available))
         if what not in topics:
         if what not in topics:
-            raise Error('unknown topic %r (choose one of: %s)' % (
+            raise Error('unknown topic {0!r} (choose one of: {1})'.format(
                             what, available))
                             what, available))
         with self.app.connection() as conn:
         with self.app.connection() as conn:
             self.app.amqp.TaskConsumer(conn).declare()
             self.app.amqp.TaskConsumer(conn).declare()
             topics[what](conn.manager)
             topics[what](conn.manager)
-list_ = command(list_, 'list')
 
 
 
 
-class apply(Command):
-    """Apply a task by name.
+@command
+class call(Command):
+    """Call a task by name.
 
 
     Examples::
     Examples::
 
 
-        celery apply tasks.add --args='[2, 2]'
-        celery apply tasks.add --args='[2, 2]' --countdown=10
+        celery call tasks.add --args='[2, 2]'
+        celery call tasks.add --args='[2, 2]' --countdown=10
     """
     """
     args = '<task_name>'
     args = '<task_name>'
     option_list = Command.option_list + (
     option_list = Command.option_list + (
@@ -369,28 +402,29 @@ class apply(Command):
                                  eta=maybe_iso8601(kw.get('eta')),
                                  eta=maybe_iso8601(kw.get('eta')),
                                  expires=expires)
                                  expires=expires)
         self.out(res.id)
         self.out(res.id)
-apply = command(apply)
 
 
 
 
+@command
 class purge(Command):
 class purge(Command):
     """Erase all messages from all known task queues.
     """Erase all messages from all known task queues.
 
 
     WARNING: There is no undo operation for this command.
     WARNING: There is no undo operation for this command.
 
 
     """
     """
+    fmt_purged = 'Purged {mnum} {messages} from {qnum} known task {queues}.'
+    fmt_empty = 'No messages purged from {qnum} {queues}'
+
     def run(self, *args, **kwargs):
     def run(self, *args, **kwargs):
-        queues = len(self.app.amqp.queues.keys())
-        messages_removed = self.app.control.purge()
-        if messages_removed:
-            self.out('Purged %s %s from %s known task %s.' % (
-                messages_removed, text.pluralize(messages_removed, 'message'),
-                queues, text.pluralize(queues, 'queue')))
-        else:
-            self.out('No messages purged from %s known %s' % (
-                queues, text.pluralize(queues, 'queue')))
-purge = command(purge)
+        queues = len(self.app.amqp.queues)
+        messages = self.app.control.purge()
+        fmt = self.fmt_purged if messages else self.fmt_empty
+        self.out(fmt.format(
+            mnum=messages, qnum=queues,
+            messages=text.pluralize(messages, 'message'),
+            queues=text.pluralize(queues, 'queue')))
 
 
 
 
+@command
 class result(Command):
 class result(Command):
     """Gives the return value for a given task id.
     """Gives the return value for a given task id.
 
 
@@ -421,7 +455,6 @@ class result(Command):
         else:
         else:
             value = result.get()
             value = result.get()
         self.out(self.prettify(value)[1])
         self.out(self.prettify(value)[1])
-result = command(result)
 
 
 
 
 class _RemoteControl(Command):
 class _RemoteControl(Command):
@@ -445,8 +478,8 @@ class _RemoteControl(Command):
             # see if it uses args.
             # see if it uses args.
             meth = getattr(self, command)
             meth = getattr(self, command)
             return text.join([
             return text.join([
-                '|' + text.indent('%s%s %s' % (prefix, color(command),
-                                               meth.__doc__), indent), help,
+                '|' + text.indent('{0}{1} {2}'.format(prefix, color(command),
+                                                meth.__doc__), indent), help,
             ])
             ])
 
 
         except AttributeError:
         except AttributeError:
@@ -469,7 +502,7 @@ class _RemoteControl(Command):
         ])
         ])
 
 
     def usage(self, command):
     def usage(self, command):
-        return '%%prog %s [options] %s <command> [arg1 .. argN]' % (
+        return '%%prog {0} [options] {1} <command> [arg1 .. argN]'.format(
                 command, self.args)
                 command, self.args)
 
 
     def call(self, *args, **kwargs):
     def call(self, *args, **kwargs):
@@ -477,30 +510,29 @@ class _RemoteControl(Command):
 
 
     def run(self, *args, **kwargs):
     def run(self, *args, **kwargs):
         if not args:
         if not args:
-            raise Error('Missing %s method. See --help' % self.name)
+            raise Error('Missing {0.name} method. See --help'.format(self))
         return self.do_call_method(args, **kwargs)
         return self.do_call_method(args, **kwargs)
 
 
     def do_call_method(self, args, **kwargs):
     def do_call_method(self, args, **kwargs):
         method = args[0]
         method = args[0]
         if method == 'help':
         if method == 'help':
-            raise Error("Did you mean '%s --help'?" % self.name)
+            raise Error("Did you mean '{0.name} --help'?".format(self))
         if method not in self.choices:
         if method not in self.choices:
-            raise Error('Unknown %s method %s' % (self.name, method))
+            raise Error('Unknown {0.name} method {1}'.format(self, method))
 
 
         destination = kwargs.get('destination')
         destination = kwargs.get('destination')
         timeout = kwargs.get('timeout') or self.choices[method][0]
         timeout = kwargs.get('timeout') or self.choices[method][0]
         if destination and isinstance(destination, basestring):
         if destination and isinstance(destination, basestring):
-            destination = map(str.strip, destination.split(','))
+            destination = list(imap(str.strip, destination.split(',')))
 
 
         try:
         try:
             handler = getattr(self, method)
             handler = getattr(self, method)
         except AttributeError:
         except AttributeError:
             handler = self.call
             handler = self.call
 
 
-        # XXX Python 2.5 does not support X(*args, foo=1)
-        kwargs = {"timeout": timeout, "destination": destination,
-                  "callback": self.say_remote_command_reply}
-        replies = handler(method, *args[1:], **kwargs)
+        replies = handler(method, *args[1:], timeout=timeout,
+                          destination=destination,
+                          callback=self.say_remote_command_reply)
         if not replies:
         if not replies:
             raise Error('No nodes replied within time constraint.',
             raise Error('No nodes replied within time constraint.',
                         status=EX_UNAVAILABLE)
                         status=EX_UNAVAILABLE)
@@ -516,6 +548,7 @@ class _RemoteControl(Command):
             self.out(body)
             self.out(body)
 
 
 
 
+@command
 class inspect(_RemoteControl):
 class inspect(_RemoteControl):
     """Inspect the worker at runtime.
     """Inspect the worker at runtime.
 
 
@@ -544,9 +577,9 @@ class inspect(_RemoteControl):
     def call(self, method, *args, **options):
     def call(self, method, *args, **options):
         i = self.app.control.inspect(**options)
         i = self.app.control.inspect(**options)
         return getattr(i, method)(*args)
         return getattr(i, method)(*args)
-inspect = command(inspect)
 
 
 
 
+@command
 class control(_RemoteControl):
 class control(_RemoteControl):
     """Workers remote control.
     """Workers remote control.
 
 
@@ -580,9 +613,7 @@ class control(_RemoteControl):
     }
     }
 
 
     def call(self, method, *args, **options):
     def call(self, method, *args, **options):
-        # XXX Python 2.5 doesn't support X(*args, reply=True, **kwargs)
-        return getattr(self.app.control, method)(
-                *args, **dict(options, retry=True))
+        return getattr(self.app.control, method)(*args, retry=True, **options)
 
 
     def pool_grow(self, method, n=1, **kwargs):
     def pool_grow(self, method, n=1, **kwargs):
         """[N=1]"""
         """[N=1]"""
@@ -598,24 +629,24 @@ class control(_RemoteControl):
 
 
     def rate_limit(self, method, task_name, rate_limit, **kwargs):
     def rate_limit(self, method, task_name, rate_limit, **kwargs):
         """<task_name> <rate_limit> (e.g. 5/s | 5/m | 5/h)>"""
         """<task_name> <rate_limit> (e.g. 5/s | 5/m | 5/h)>"""
-        return self.call(method, task_name, rate_limit, **kwargs)
+        return self.call(method, task_name, rate_limit, reply=True, **kwargs)
 
 
     def time_limit(self, method, task_name, soft, hard=None, **kwargs):
     def time_limit(self, method, task_name, soft, hard=None, **kwargs):
         """<task_name> <soft_secs> [hard_secs]"""
         """<task_name> <soft_secs> [hard_secs]"""
-        return self.call(method, task_name, soft, hard, **kwargs)
+        return self.call(method, task_name, soft, hard, reply=True, **kwargs)
 
 
     def add_consumer(self, method, queue, exchange=None,
     def add_consumer(self, method, queue, exchange=None,
             exchange_type='direct', routing_key=None, **kwargs):
             exchange_type='direct', routing_key=None, **kwargs):
         """<queue> [exchange [type [routing_key]]]"""
         """<queue> [exchange [type [routing_key]]]"""
         return self.call(method, queue, exchange,
         return self.call(method, queue, exchange,
-                         exchange_type, routing_key, **kwargs)
+                         exchange_type, routing_key, reply=True, **kwargs)
 
 
     def cancel_consumer(self, method, queue, **kwargs):
     def cancel_consumer(self, method, queue, **kwargs):
         """<queue>"""
         """<queue>"""
-        return self.call(method, queue, **kwargs)
-control = command(control)
+        return self.call(method, queue, reply=True, **kwargs)
 
 
 
 
+@command
 class status(Command):
 class status(Command):
     """Show list of workers that are online."""
     """Show list of workers that are online."""
     option_list = inspect.option_list
     option_list = inspect.option_list
@@ -631,11 +662,11 @@ class status(Command):
                         status=EX_UNAVAILABLE)
                         status=EX_UNAVAILABLE)
         nodecount = len(replies)
         nodecount = len(replies)
         if not kwargs.get('quiet', False):
         if not kwargs.get('quiet', False):
-            self.out('\n%s %s online.' % (nodecount,
-                                          text.pluralize(nodecount, 'node')))
-status = command(status)
+            self.out('\n{0} {1} online.'.format(
+                nodecount, text.pluralize(nodecount, 'node')))
 
 
 
 
+@command
 class migrate(Command):
 class migrate(Command):
     """Migrate tasks from one broker to another.
     """Migrate tasks from one broker to another.
 
 
@@ -647,12 +678,25 @@ class migrate(Command):
     NOTE: This command is experimental, make sure you have
     NOTE: This command is experimental, make sure you have
           a backup of the tasks before you continue.
           a backup of the tasks before you continue.
     """
     """
-    def usage(self, command):
-        return '%%prog %s <source_url> <dest_url>' % (command, )
+    args = '<source_url> <dest_url>'
+    option_list = Command.option_list + (
+            Option('--limit', '-n', type='int',
+                    help='Number of tasks to consume (int)'),
+            Option('--timeout', '-t', type='float', default=1.0,
+                    help='Timeout in seconds (float) waiting for tasks'),
+            Option('--ack-messages', '-a', action='store_true',
+                    help='Ack messages from source broker.'),
+            Option('--tasks', '-T',
+                    help='List of task names to filter on.'),
+            Option('--queues', '-Q',
+                    help='List of queues to migrate.'),
+            Option('--forever', '-F', action='store_true',
+                    help='Continually migrate tasks until killed.'),
+    )
+    progress_fmt = MIGRATE_PROGRESS_FMT
 
 
     def on_migrate_task(self, state, body, message):
     def on_migrate_task(self, state, body, message):
-        self.out('Migrating task %s/%s: %s[%s]' % (
-            state.count, state.strtotal, body['task'], body['id']))
+        self.out(self.progress_fmt.format(state=state, body=body))
 
 
     def run(self, *args, **kwargs):
     def run(self, *args, **kwargs):
         if len(args) != 2:
         if len(args) != 2:
@@ -662,21 +706,24 @@ class migrate(Command):
 
 
         migrate_tasks(Connection(args[0]),
         migrate_tasks(Connection(args[0]),
                       Connection(args[1]),
                       Connection(args[1]),
-                      callback=self.on_migrate_task)
-migrate = command(migrate)
+                      callback=self.on_migrate_task,
+                      **kwargs)
 
 
 
 
+@command
 class shell(Command):  # pragma: no cover
 class shell(Command):  # pragma: no cover
     """Start shell session with convenient access to celery symbols.
     """Start shell session with convenient access to celery symbols.
 
 
     The following symbols will be added to the main globals:
     The following symbols will be added to the main globals:
 
 
         - celery:  the current application.
         - celery:  the current application.
-        - chord, group, chain, chunks, xmap, xstarmap
-          subtask, Task
+        - chord, group, chain, chunks,
+          xmap, xstarmap subtask, Task
         - all registered tasks.
         - all registered tasks.
 
 
-    Example Session::
+    Example Session:
+
+    .. code-block:: bash
 
 
         $ celery shell
         $ celery shell
 
 
@@ -775,31 +822,29 @@ class shell(Command):  # pragma: no cover
         import bpython
         import bpython
         bpython.embed(self.locals)
         bpython.embed(self.locals)
 
 
-shell = command(shell)
-
 
 
+@command
 class help(Command):
 class help(Command):
     """Show help screen and exit."""
     """Show help screen and exit."""
 
 
     def usage(self, command):
     def usage(self, command):
-        return '%%prog <command> [options] %s' % (self.args, )
+        return '%%prog <command> [options] {0.args}'.format(self)
 
 
     def run(self, *args, **kwargs):
     def run(self, *args, **kwargs):
         self.parser.print_help()
         self.parser.print_help()
-        self.out(HELP % {'prog_name': self.prog_name,
-                         'commands': CeleryCommand.list_commands()})
+        self.out(HELP.format(prog_name=self.prog_name,
+                             commands=CeleryCommand.list_commands()))
 
 
         return EX_USAGE
         return EX_USAGE
-help = command(help)
 
 
 
 
+@command
 class report(Command):
 class report(Command):
     """Shows information useful to include in bugreports."""
     """Shows information useful to include in bugreports."""
 
 
     def run(self, *args, **kwargs):
     def run(self, *args, **kwargs):
         self.out(self.app.bugreport())
         self.out(self.app.bugreport())
         return EX_OK
         return EX_OK
-report = command(report)
 
 
 
 
 class CeleryCommand(BaseCommand):
 class CeleryCommand(BaseCommand):
@@ -852,12 +897,13 @@ class CeleryCommand(BaseCommand):
     def get_command_info(self, command, indent=0, color=None):
     def get_command_info(self, command, indent=0, color=None):
         colored = term.colored().names[color] if color else lambda x: x
         colored = term.colored().names[color] if color else lambda x: x
         obj = self.commands[command]
         obj = self.commands[command]
+        cmd = 'celery {0}'.format(colored(command))
         if obj.leaf:
         if obj.leaf:
-            return '|' + text.indent('celery %s' % colored(command), indent)
+            return '|' + text.indent(cmd, indent)
         return text.join([
         return text.join([
             ' ',
             ' ',
-            '|' + text.indent('celery %s --help' % colored(command), indent),
-            obj.list_commands(indent, 'celery %s' % command, colored),
+            '|' + text.indent('{0} --help'.format(cmd), indent),
+            obj.list_commands(indent, 'celery {0}'.format(command), colored),
         ])
         ])
 
 
     @classmethod
     @classmethod
@@ -866,13 +912,22 @@ class CeleryCommand(BaseCommand):
         ret = []
         ret = []
         for cls, commands, color in command_classes:
         for cls, commands, color in command_classes:
             ret.extend([
             ret.extend([
-                text.indent('+ %s: ' % white(cls), indent),
+                text.indent('+ {0}: '.format(white(cls)), indent),
                 '\n'.join(self.get_command_info(command, indent + 4, color)
                 '\n'.join(self.get_command_info(command, indent + 4, color)
                             for command in commands),
                             for command in commands),
                 ''
                 ''
             ])
             ])
         return '\n'.join(ret).strip()
         return '\n'.join(ret).strip()
 
 
+    def with_pool_option(self, argv):
+        if len(argv) > 1 and argv[1] == 'worker':
+            # this command supports custom pools
+            # that may have to be loaded as early as possible.
+            return (['-P'], ['--pool'])
+
+    def on_concurrency_setup(self):
+        load_extension_commands()
+
 
 
 def determine_exit_status(ret):
 def determine_exit_status(ret):
     if isinstance(ret, int):
     if isinstance(ret, int):
@@ -880,14 +935,21 @@ def determine_exit_status(ret):
     return EX_OK if ret else EX_FAILURE
     return EX_OK if ret else EX_FAILURE
 
 
 
 
-def main():
+def main(argv=None):
     # Fix for setuptools generated scripts, so that it will
     # Fix for setuptools generated scripts, so that it will
     # work with multiprocessing fork emulation.
     # work with multiprocessing fork emulation.
     # (see multiprocessing.forking.get_preparation_data())
     # (see multiprocessing.forking.get_preparation_data())
-    if __name__ != '__main__':  # pragma: no cover
-        sys.modules['__main__'] = sys.modules[__name__]
-    freeze_support()
-    CeleryCommand().execute_from_commandline()
+    try:
+        if __name__ != '__main__':  # pragma: no cover
+            sys.modules['__main__'] = sys.modules[__name__]
+        cmd = CeleryCommand()
+        cmd.maybe_patch_concurrency()
+        from billiard import freeze_support
+        freeze_support()
+        cmd.execute_from_commandline(argv)
+    except KeyboardInterrupt:
+        pass
+
 
 
 if __name__ == '__main__':          # pragma: no cover
 if __name__ == '__main__':          # pragma: no cover
     main()
     main()

+ 1 - 2
celery/bin/celerybeat.py

@@ -17,7 +17,7 @@ The :program:`celery beat` command.
 
 
     Path to the schedule database. Defaults to `celerybeat-schedule`.
     Path to the schedule database. Defaults to `celerybeat-schedule`.
     The extension '.db' may be appended to the filename.
     The extension '.db' may be appended to the filename.
-    Default is %(default)s.
+    Default is {default}.
 
 
 .. cmdoption:: -S, --scheduler
 .. cmdoption:: -S, --scheduler
 
 
@@ -38,7 +38,6 @@ The :program:`celery beat` command.
     `ERROR`, `CRITICAL`, or `FATAL`.
     `ERROR`, `CRITICAL`, or `FATAL`.
 
 
 """
 """
-from __future__ import with_statement
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
 import os
 import os

+ 33 - 26
celery/bin/celeryd.py

@@ -31,7 +31,8 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 
 
 .. cmdoption:: -n, --hostname
 .. cmdoption:: -n, --hostname
 
 
-    Set custom hostname, e.g. 'foo.example.com'.
+    Set custom hostname, e.g. 'w1.%h'. Expands: %h (hostname),
+    %n (name) and %d, (domain).
 
 
 .. cmdoption:: -B, --beat
 .. cmdoption:: -B, --beat
 
 
@@ -62,7 +63,7 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 .. cmdoption:: -S, --statedb
 .. cmdoption:: -S, --statedb
 
 
     Path to the state database. The extension '.db' may
     Path to the state database. The extension '.db' may
-    be appended to the filename. Default: %(default)s
+    be appended to the filename. Default: {default}
 
 
 .. cmdoption:: -E, --events
 .. cmdoption:: -E, --events
 
 
@@ -117,10 +118,9 @@ from __future__ import absolute_import
 
 
 import sys
 import sys
 
 
-from billiard import freeze_support
-
 from celery import concurrency
 from celery import concurrency
-from celery.bin.base import Command, Option
+from celery.bin.base import Command, Option, daemon_options
+from celery.bin.celeryd_detach import detached_celeryd
 from celery.utils.log import LOG_LEVELS, mlevel
 from celery.utils.log import LOG_LEVELS, mlevel
 
 
 
 
@@ -131,35 +131,42 @@ class WorkerCommand(Command):
     supports_args = False
     supports_args = False
 
 
     def execute_from_commandline(self, argv=None):
     def execute_from_commandline(self, argv=None):
-        if argv is None:
-            argv = list(sys.argv)
-        try:
-            pool = argv[argv.index('-P') + 1]
-        except ValueError:
-            pass
-        else:
-            # set up eventlet/gevent environments ASAP.
-            concurrency.get_implementation(pool)
+        self.maybe_detach(argv)
         return super(WorkerCommand, self).execute_from_commandline(argv)
         return super(WorkerCommand, self).execute_from_commandline(argv)
 
 
-    def run(self, *args, **kwargs):
-        kwargs.pop('app', None)
+    def maybe_detach(self, argv, dopts=['-D', '--detach']):
+        argv = list(sys.argv) if argv is None else argv
+        if any(arg in argv for arg in dopts):
+            argv = [arg for arg in argv if arg not in dopts]
+            # never returns
+            detached_celeryd().execute_from_commandline(argv)
+            raise SystemExit(0)
+
+    def run(self, hostname=None, pool_cls=None, loglevel=None,
+            app=None, **kwargs):
         # Pools like eventlet/gevent needs to patch libs as early
         # Pools like eventlet/gevent needs to patch libs as early
         # as possible.
         # as possible.
-        kwargs['pool_cls'] = concurrency.get_implementation(
-                    kwargs.get('pool_cls') or self.app.conf.CELERYD_POOL)
+        pool_cls = (concurrency.get_implementation(pool_cls) or
+                    self.app.conf.CELERYD_POOL)
         if self.app.IS_WINDOWS and kwargs.get('beat'):
         if self.app.IS_WINDOWS and kwargs.get('beat'):
             self.die('-B option does not work on Windows.  '
             self.die('-B option does not work on Windows.  '
                      'Please run celerybeat as a separate service.')
                      'Please run celerybeat as a separate service.')
-        loglevel = kwargs.get('loglevel')
+        hostname = self.simple_format(hostname)
         if loglevel:
         if loglevel:
             try:
             try:
-                kwargs['loglevel'] = mlevel(loglevel)
+                loglevel = mlevel(loglevel)
             except KeyError:  # pragma: no cover
             except KeyError:  # pragma: no cover
-                self.die('Unknown level %r. Please use one of %s.' % (
-                    loglevel, '|'.join(l for l in LOG_LEVELS.keys()
+                self.die('Unknown level {0!r}. Please use one of {1}.'.format(
+                    loglevel, '|'.join(l for l in LOG_LEVELS
                       if isinstance(l, basestring))))
                       if isinstance(l, basestring))))
-        return self.app.Worker(**kwargs).run()
+        return self.app.Worker(
+            hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, **kwargs
+        ).start()
+
+    def with_pool_option(self, argv):
+        # this command support custom pools
+        # that may have to be loaded as early as possible.
+        return (['-P'], ['--pool'])
 
 
     def get_options(self):
     def get_options(self):
         conf = self.app.conf
         conf = self.app.conf
@@ -168,7 +175,6 @@ class WorkerCommand(Command):
                 default=conf.CELERYD_CONCURRENCY, type='int'),
                 default=conf.CELERYD_CONCURRENCY, type='int'),
             Option('-P', '--pool', default=conf.CELERYD_POOL, dest='pool_cls'),
             Option('-P', '--pool', default=conf.CELERYD_POOL, dest='pool_cls'),
             Option('--purge', '--discard', default=False, action='store_true'),
             Option('--purge', '--discard', default=False, action='store_true'),
-            Option('-f', '--logfile', default=conf.CELERYD_LOG_FILE),
             Option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL),
             Option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL),
             Option('-n', '--hostname'),
             Option('-n', '--hostname'),
             Option('-B', '--beat', action='store_true'),
             Option('-B', '--beat', action='store_true'),
@@ -187,11 +193,11 @@ class WorkerCommand(Command):
                 default=conf.CELERYD_MAX_TASKS_PER_CHILD, type='int'),
                 default=conf.CELERYD_MAX_TASKS_PER_CHILD, type='int'),
             Option('--queues', '-Q', default=[]),
             Option('--queues', '-Q', default=[]),
             Option('--include', '-I', default=[]),
             Option('--include', '-I', default=[]),
-            Option('--pidfile'),
             Option('--autoscale'),
             Option('--autoscale'),
             Option('--autoreload', action='store_true'),
             Option('--autoreload', action='store_true'),
             Option('--no-execv', action='store_true', default=False),
             Option('--no-execv', action='store_true', default=False),
-        )
+            Option('-D', '--detach', action='store_true'),
+        ) + daemon_options()
 
 
 
 
 def main():
 def main():
@@ -200,6 +206,7 @@ def main():
     # (see multiprocessing.forking.get_preparation_data())
     # (see multiprocessing.forking.get_preparation_data())
     if __name__ != '__main__':  # pragma: no cover
     if __name__ != '__main__':  # pragma: no cover
         sys.modules['__main__'] = sys.modules[__name__]
         sys.modules['__main__'] = sys.modules[__name__]
+    from billiard import freeze_support
     freeze_support()
     freeze_support()
     worker = WorkerCommand()
     worker = WorkerCommand()
     worker.execute_from_commandline()
     worker.execute_from_commandline()

+ 7 - 8
celery/bin/celeryd_detach.py

@@ -11,14 +11,13 @@
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
+import celery
 import os
 import os
 import sys
 import sys
 
 
 from optparse import OptionParser, BadOptionError
 from optparse import OptionParser, BadOptionError
 
 
-from celery import __version__
 from celery.platforms import EX_FAILURE, detached
 from celery.platforms import EX_FAILURE, detached
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
 
 
@@ -73,9 +72,9 @@ class PartialOptionParser(OptionParser):
                 nargs = option.nargs
                 nargs = option.nargs
                 if len(rargs) < nargs:
                 if len(rargs) < nargs:
                     if nargs == 1:
                     if nargs == 1:
-                        self.error('%s option requires an argument' % opt)
+                        self.error('{0} requires an argument'.format(opt))
                     else:
                     else:
-                        self.error('%s option requires %d arguments' % (
+                        self.error('{0} requires {1} arguments'.format(
                                     opt, nargs))
                                     opt, nargs))
                 elif nargs == 1:
                 elif nargs == 1:
                     value = rargs.pop(0)
                     value = rargs.pop(0)
@@ -84,7 +83,7 @@ class PartialOptionParser(OptionParser):
                     del rargs[0:nargs]
                     del rargs[0:nargs]
 
 
             elif had_explicit_value:
             elif had_explicit_value:
-                self.error('%s option does not take a value' % opt)
+                self.error('{0} option does not take a value'.format(opt))
             else:
             else:
                 value = None
                 value = None
             option.process(opt, value, values, self)
             option.process(opt, value, values, self)
@@ -104,7 +103,7 @@ class PartialOptionParser(OptionParser):
 class detached_celeryd(object):
 class detached_celeryd(object):
     option_list = OPTION_LIST
     option_list = OPTION_LIST
     usage = '%prog [options] [celeryd options]'
     usage = '%prog [options] [celeryd options]'
-    version = __version__
+    version = celery.VERSION_BANNER
     description = ('Detaches Celery worker nodes.  See `celeryd --help` '
     description = ('Detaches Celery worker nodes.  See `celeryd --help` '
                    'for the list of supported worker arguments.')
                    'for the list of supported worker arguments.')
     command = sys.executable
     command = sys.executable
@@ -122,9 +121,9 @@ class detached_celeryd(object):
         parser = self.Parser(prog_name)
         parser = self.Parser(prog_name)
         options, values = parser.parse_args(argv)
         options, values = parser.parse_args(argv)
         if options.logfile:
         if options.logfile:
-            parser.leftovers.append('--logfile=%s' % (options.logfile, ))
+            parser.leftovers.append('--logfile={0}'.format(options.logfile))
         if options.pidfile:
         if options.pidfile:
-            parser.leftovers.append('--pidfile=%s' % (options.pidfile, ))
+            parser.leftovers.append('--pidfile={0}'.format(options.pidfile))
         return options, values, parser.leftovers
         return options, values, parser.leftovers
 
 
     def execute_from_commandline(self, argv=None):
     def execute_from_commandline(self, argv=None):

+ 44 - 42
celery/bin/celeryd_multi.py

@@ -6,7 +6,7 @@
 Examples
 Examples
 ========
 ========
 
 
-::
+.. code-block:: bash
 
 
     # Single worker with explicit name and events enabled.
     # Single worker with explicit name and events enabled.
     $ celeryd-multi start Leslie -E
     $ celeryd-multi start Leslie -E
@@ -48,12 +48,12 @@ Examples
     #   * Three of the workers processes the images and video queue
     #   * Three of the workers processes the images and video queue
     #   * Two of the workers processes the data queue with loglevel DEBUG
     #   * Two of the workers processes the data queue with loglevel DEBUG
     #   * the rest processes the default' queue.
     #   * the rest processes the default' queue.
-    $ celeryd-multi start 10 -l INFO -Q:1-3 images,video -Q:4,5:data
+    $ celeryd-multi start 10 -l INFO -Q:1-3 images,video -Q:4,5 data
         -Q default -L:4,5 DEBUG
         -Q default -L:4,5 DEBUG
 
 
     # You can show the commands necessary to start the workers with
     # You can show the commands necessary to start the workers with
     # the 'show' command:
     # the 'show' command:
-    $ celeryd-multi show 10 -l INFO -Q:1-3 images,video -Q:4,5:data
+    $ celeryd-multi show 10 -l INFO -Q:1-3 images,video -Q:4,5 data
         -Q default -L:4,5 DEBUG
         -Q default -L:4,5 DEBUG
 
 
     # Additional options are added to each celeryd',
     # Additional options are added to each celeryd',
@@ -88,7 +88,7 @@ Examples
     celeryd -n xuzzy.myhost -c 3
     celeryd -n xuzzy.myhost -c 3
 
 
 """
 """
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
 
 
 import errno
 import errno
 import os
 import os
@@ -97,13 +97,14 @@ import socket
 import sys
 import sys
 
 
 from collections import defaultdict
 from collections import defaultdict
+from itertools import imap
 from subprocess import Popen
 from subprocess import Popen
 from time import sleep
 from time import sleep
 
 
 from kombu.utils import cached_property
 from kombu.utils import cached_property
 from kombu.utils.encoding import from_utf8
 from kombu.utils.encoding import from_utf8
 
 
-from celery import __version__
+from celery import VERSION_BANNER
 from celery.platforms import PIDFile, shellsplit
 from celery.platforms import PIDFile, shellsplit
 from celery.utils import term
 from celery.utils import term
 from celery.utils.text import pluralize
 from celery.utils.text import pluralize
@@ -113,16 +114,16 @@ SIGNAMES = set(sig for sig in dir(signal)
 SIGMAP = dict((getattr(signal, name), name) for name in SIGNAMES)
 SIGMAP = dict((getattr(signal, name), name) for name in SIGNAMES)
 
 
 USAGE = """\
 USAGE = """\
-usage: %(prog_name)s start <node1 node2 nodeN|range> [celeryd options]
-       %(prog_name)s stop <n1 n2 nN|range> [-SIG (default: -TERM)]
-       %(prog_name)s restart <n1 n2 nN|range> [-SIG] [celeryd options]
-       %(prog_name)s kill <n1 n2 nN|range>
+usage: {prog_name} start <node1 node2 nodeN|range> [celeryd options]
+       {prog_name} stop <n1 n2 nN|range> [-SIG (default: -TERM)]
+       {prog_name} restart <n1 n2 nN|range> [-SIG] [celeryd options]
+       {prog_name} kill <n1 n2 nN|range>
 
 
-       %(prog_name)s show <n1 n2 nN|range> [celeryd options]
-       %(prog_name)s get hostname <n1 n2 nN|range> [-qv] [celeryd options]
-       %(prog_name)s names <n1 n2 nN|range>
-       %(prog_name)s expand template <n1 n2 nN|range>
-       %(prog_name)s help
+       {prog_name} show <n1 n2 nN|range> [celeryd options]
+       {prog_name} get hostname <n1 n2 nN|range> [-qv] [celeryd options]
+       {prog_name} names <n1 n2 nN|range>
+       {prog_name} expand template <n1 n2 nN|range>
+       {prog_name} help
 
 
 additional options (must appear after command name):
 additional options (must appear after command name):
 
 
@@ -152,7 +153,8 @@ class MultiTool(object):
         self.commands = {'start': self.start,
         self.commands = {'start': self.start,
                          'show': self.show,
                          'show': self.show,
                          'stop': self.stop,
                          'stop': self.stop,
-                         'stop_verify': self.stop_verify,
+                         'stopwait': self.stopwait,
+                         'stop_verify': self.stopwait,
                          'restart': self.restart,
                          'restart': self.restart,
                          'kill': self.kill,
                          'kill': self.kill,
                          'names': self.names,
                          'names': self.names,
@@ -182,12 +184,12 @@ class MultiTool(object):
         try:
         try:
             self.commands[argv[0]](argv[1:], cmd)
             self.commands[argv[0]](argv[1:], cmd)
         except KeyError:
         except KeyError:
-            self.error('Invalid command: %s' % argv[0])
+            self.error('Invalid command: {0}'.format(argv[0]))
 
 
         return self.retcode
         return self.retcode
 
 
     def say(self, m, newline=True):
     def say(self, m, newline=True):
-        self.fh.write('%s%s' % (m, '\n' if newline else ''))
+        print(m, file=self.fh, end='\n' if newline else '')
 
 
     def names(self, argv, cmd):
     def names(self, argv, cmd):
         p = NamespacedOptionParser(argv)
         p = NamespacedOptionParser(argv)
@@ -215,7 +217,7 @@ class MultiTool(object):
         retcodes = []
         retcodes = []
         self.note('> Starting nodes...')
         self.note('> Starting nodes...')
         for nodename, argv, _ in multi_args(p, cmd):
         for nodename, argv, _ in multi_args(p, cmd):
-            self.note('\t> %s: ' % (nodename, ), newline=False)
+            self.note('\t> {0}: '.format(nodename), newline=False)
             retcode = self.waitexec(argv)
             retcode = self.waitexec(argv)
             self.note(retcode and self.FAILED or self.OK)
             self.note(retcode and self.FAILED or self.OK)
             retcodes.append(retcode)
             retcodes.append(retcode)
@@ -224,15 +226,15 @@ class MultiTool(object):
     def with_detacher_default_options(self, p):
     def with_detacher_default_options(self, p):
         p.options.setdefault('--pidfile', 'celeryd@%n.pid')
         p.options.setdefault('--pidfile', 'celeryd@%n.pid')
         p.options.setdefault('--logfile', 'celeryd@%n.log')
         p.options.setdefault('--logfile', 'celeryd@%n.log')
-        p.options.setdefault('--cmd', '-m celery.bin.celeryd_detach')
+        p.options.setdefault('--cmd', '-m celery worker --detach')
 
 
     def signal_node(self, nodename, pid, sig):
     def signal_node(self, nodename, pid, sig):
         try:
         try:
             os.kill(pid, sig)
             os.kill(pid, sig)
-        except OSError, exc:
+        except OSError as exc:
             if exc.errno != errno.ESRCH:
             if exc.errno != errno.ESRCH:
                 raise
                 raise
-            self.note('Could not signal %s (%s): No such process' % (
+            self.note('Could not signal {0} ({1}): No such process'.format(
                         nodename, pid))
                         nodename, pid))
             return False
             return False
         return True
         return True
@@ -240,7 +242,7 @@ class MultiTool(object):
     def node_alive(self, pid):
     def node_alive(self, pid):
         try:
         try:
             os.kill(pid, 0)
             os.kill(pid, 0)
-        except OSError, exc:
+        except OSError as exc:
             if exc.errno == errno.ESRCH:
             if exc.errno == errno.ESRCH:
                 return False
                 return False
             raise
             raise
@@ -261,16 +263,15 @@ class MultiTool(object):
         for node in list(P):
         for node in list(P):
             if node in P:
             if node in P:
                 nodename, _, pid = node
                 nodename, _, pid = node
-                self.note('\t> %s: %s -> %s' % (nodename,
-                                                SIGMAP[sig][3:],
-                                                pid))
+                self.note('\t> {0}: {1} -> {2}'.format(
+                    nodename, SIGMAP[sig][3:], pid))
                 if not self.signal_node(nodename, pid, sig):
                 if not self.signal_node(nodename, pid, sig):
                     on_down(node)
                     on_down(node)
 
 
         def note_waiting():
         def note_waiting():
             left = len(P)
             left = len(P)
             if left:
             if left:
-                self.note(self.colored.blue('> Waiting for %s %s...' % (
+                self.note(self.colored.blue('> Waiting for {0} {1}...'.format(
                     left, pluralize(left, 'node'))), newline=False)
                     left, pluralize(left, 'node'))), newline=False)
 
 
         if retry:
         if retry:
@@ -282,7 +283,7 @@ class MultiTool(object):
                     self.note('.', newline=False)
                     self.note('.', newline=False)
                     nodename, _, pid = node
                     nodename, _, pid = node
                     if not self.node_alive(pid):
                     if not self.node_alive(pid):
-                        self.note('\n\t> %s: %s' % (nodename, self.OK))
+                        self.note('\n\t> {0}: {1}'.format(nodename, self.OK))
                         on_down(node)
                         on_down(node)
                         note_waiting()
                         note_waiting()
                         break
                         break
@@ -304,7 +305,7 @@ class MultiTool(object):
             if pid:
             if pid:
                 nodes.append((nodename, tuple(argv), pid))
                 nodes.append((nodename, tuple(argv), pid))
             else:
             else:
-                self.note('> %s: %s' % (nodename, self.DOWN))
+                self.note('> {0}: {1}'.format(nodename, self.DOWN))
                 if callback:
                 if callback:
                     callback(nodename, argv, pid)
                     callback(nodename, argv, pid)
 
 
@@ -314,7 +315,7 @@ class MultiTool(object):
         self.splash()
         self.splash()
         p = NamespacedOptionParser(argv)
         p = NamespacedOptionParser(argv)
         for nodename, _, pid in self.getpids(p, cmd):
         for nodename, _, pid in self.getpids(p, cmd):
-            self.note('Killing node %s (%s)' % (nodename, pid))
+            self.note('Killing node {0} ({1})'.format(nodename, pid))
             self.signal_node(nodename, pid, signal.SIGKILL)
             self.signal_node(nodename, pid, signal.SIGKILL)
 
 
     def stop(self, argv, cmd, retry=None, callback=None):
     def stop(self, argv, cmd, retry=None, callback=None):
@@ -337,7 +338,7 @@ class MultiTool(object):
 
 
         def on_node_shutdown(nodename, argv, pid):
         def on_node_shutdown(nodename, argv, pid):
             self.note(self.colored.blue(
             self.note(self.colored.blue(
-                '> Restarting node %s: ' % nodename), newline=False)
+                '> Restarting node {0}: '.format(nodename)), newline=False)
             retval = self.waitexec(argv)
             retval = self.waitexec(argv)
             self.note(retval and self.FAILED or self.OK)
             self.note(retval and self.FAILED or self.OK)
             retvals.append(retval)
             retvals.append(retval)
@@ -345,11 +346,12 @@ class MultiTool(object):
         self._stop_nodes(p, cmd, retry=2, callback=on_node_shutdown)
         self._stop_nodes(p, cmd, retry=2, callback=on_node_shutdown)
         self.retval = int(any(retvals))
         self.retval = int(any(retvals))
 
 
-    def stop_verify(self, argv, cmd):
+    def stopwait(self, argv, cmd):
         self.splash()
         self.splash()
         p = NamespacedOptionParser(argv)
         p = NamespacedOptionParser(argv)
         self.with_detacher_default_options(p)
         self.with_detacher_default_options(p)
         return self._stop_nodes(p, cmd, retry=2)
         return self._stop_nodes(p, cmd, retry=2)
+    stop_verify = stopwait  # compat
 
 
     def expand(self, argv, cmd=None):
     def expand(self, argv, cmd=None):
         template = argv[0]
         template = argv[0]
@@ -362,24 +364,24 @@ class MultiTool(object):
 
 
     def usage(self):
     def usage(self):
         self.splash()
         self.splash()
-        self.say(USAGE % {'prog_name': self.prog_name})
+        self.say(USAGE.format(prog_name=self.prog_name))
 
 
     def splash(self):
     def splash(self):
         if not self.nosplash:
         if not self.nosplash:
             c = self.colored
             c = self.colored
-            self.note(c.cyan('celeryd-multi v%s' % __version__))
+            self.note(c.cyan('celeryd-multi v{0}'.format(VERSION_BANNER)))
 
 
     def waitexec(self, argv, path=sys.executable):
     def waitexec(self, argv, path=sys.executable):
         args = ' '.join([path] + list(argv))
         args = ' '.join([path] + list(argv))
         argstr = shellsplit(from_utf8(args))
         argstr = shellsplit(from_utf8(args))
         pipe = Popen(argstr, env=self.env)
         pipe = Popen(argstr, env=self.env)
-        self.info('  %s' % ' '.join(argstr))
+        self.info('  {0}'.format(' '.join(argstr)))
         retcode = pipe.wait()
         retcode = pipe.wait()
         if retcode < 0:
         if retcode < 0:
-            self.note('* Child was terminated by signal %s' % (-retcode, ))
+            self.note('* Child was terminated by signal {0}'.format(-retcode))
             return -retcode
             return -retcode
         elif retcode > 0:
         elif retcode > 0:
-            self.note('* Child terminated with failure code %s' % (retcode, ))
+            self.note('* Child terminated with errorcode {0}'.format(retcode))
         return retcode
         return retcode
 
 
     def error(self, msg=None):
     def error(self, msg=None):
@@ -425,7 +427,7 @@ def multi_args(p, cmd='celeryd', append='', prefix='', suffix=''):
         except ValueError:
         except ValueError:
             pass
             pass
         else:
         else:
-            names = map(str, range(1, noderange + 1))
+            names = list(imap(str, range(1, noderange + 1)))
             prefix = 'celery'
             prefix = 'celery'
     cmd = options.pop('--cmd', cmd)
     cmd = options.pop('--cmd', cmd)
     append = options.pop('--append', append)
     append = options.pop('--append', append)
@@ -517,8 +519,8 @@ def format_opt(opt, value):
     if not value:
     if not value:
         return opt
         return opt
     if opt.startswith('--'):
     if opt.startswith('--'):
-        return '%s=%s' % (opt, value)
-    return '%s %s' % (opt, value)
+        return '{0}={1}'.format(opt, value)
+    return '{0} {1}'.format(opt, value)
 
 
 
 
 def parse_ns_range(ns, ranges=False):
 def parse_ns_range(ns, ranges=False):
@@ -526,19 +528,19 @@ def parse_ns_range(ns, ranges=False):
     for space in ',' in ns and ns.split(',') or [ns]:
     for space in ',' in ns and ns.split(',') or [ns]:
         if ranges and '-' in space:
         if ranges and '-' in space:
             start, stop = space.split('-')
             start, stop = space.split('-')
-            x = map(str, range(int(start), int(stop) + 1))
+            x = list(imap(str, range(int(start), int(stop) + 1)))
             ret.extend(x)
             ret.extend(x)
         else:
         else:
             ret.append(space)
             ret.append(space)
     return ret
     return ret
 
 
 
 
-def abbreviations(map):
+def abbreviations(mapping):
 
 
     def expand(S):
     def expand(S):
         ret = S
         ret = S
         if S is not None:
         if S is not None:
-            for short, long in map.items():
+            for short, long in mapping.items():
                 ret = ret.replace(short, long)
                 ret = ret.replace(short, long)
         return ret
         return ret
 
 

+ 2 - 3
celery/bin/celeryev.py

@@ -36,7 +36,6 @@ The :program:`celery events` command.
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import os
 import os
 import sys
 import sys
@@ -100,8 +99,8 @@ class EvCommand(Command):
             return cam()
             return cam()
 
 
     def set_process_status(self, prog, info=''):
     def set_process_status(self, prog, info=''):
-        prog = '%s:%s' % (self.prog_name, prog)
-        info = '%s %s' % (info, strargv(sys.argv))
+        prog = '{0}:{1}'.format(self.prog_name, prog)
+        info = '{0} {1}'.format(info, strargv(sys.argv))
         return set_process_title(prog, info=info)
         return set_process_title(prog, info=info)
 
 
     def get_options(self):
     def get_options(self):

+ 62 - 37
celery/canvas.py

@@ -3,20 +3,22 @@
     celery.canvas
     celery.canvas
     ~~~~~~~~~~~~~
     ~~~~~~~~~~~~~
 
 
-    Designing task workflows.
+    Composing task workflows.
+
+    Documentation for these functions are in :mod:`celery`.
+    You should not import from this module directly.
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
 from operator import itemgetter
 from operator import itemgetter
-from itertools import chain as _chain
+from itertools import chain as _chain, imap
 
 
 from kombu.utils import fxrange, kwdict, reprcall
 from kombu.utils import fxrange, kwdict, reprcall
 
 
 from celery import current_app
 from celery import current_app
 from celery.local import Proxy
 from celery.local import Proxy
 from celery.utils import cached_property, uuid
 from celery.utils import cached_property, uuid
-from celery.utils.compat import chain_from_iterable
 from celery.utils.functional import (
 from celery.utils.functional import (
     maybe_list, is_list, regen,
     maybe_list, is_list, regen,
     chunks as _chunks,
     chunks as _chunks,
@@ -73,7 +75,7 @@ class Signature(dict):
     def from_dict(self, d):
     def from_dict(self, d):
         typ = d.get('subtask_type')
         typ = d.get('subtask_type')
         if typ:
         if typ:
-            return self.TYPES[typ].from_dict(d)
+            return self.TYPES[typ].from_dict(kwdict(d))
         return Signature(d)
         return Signature(d)
 
 
     def __init__(self, task=None, args=None, kwargs=None, options=None,
     def __init__(self, task=None, args=None, kwargs=None, options=None,
@@ -158,12 +160,14 @@ class Signature(dict):
         return self.append_to_list_option('link_error', errback)
         return self.append_to_list_option('link_error', errback)
 
 
     def flatten_links(self):
     def flatten_links(self):
-        return list(chain_from_iterable(_chain([[self]],
+        return list(_chain.from_iterable(_chain([[self]],
                 (link.flatten_links()
                 (link.flatten_links()
                     for link in maybe_list(self.options.get('link')) or []))))
                     for link in maybe_list(self.options.get('link')) or []))))
 
 
     def __or__(self, other):
     def __or__(self, other):
-        if isinstance(other, chain):
+        if not isinstance(self, chain) and isinstance(other, chain):
+            return chain((self,) + other.tasks)
+        elif isinstance(other, chain):
             return chain(*self.tasks + other.tasks)
             return chain(*self.tasks + other.tasks)
         elif isinstance(other, Signature):
         elif isinstance(other, Signature):
             if isinstance(self, chain):
             if isinstance(self, chain):
@@ -201,19 +205,20 @@ class chain(Signature):
 
 
     def __init__(self, *tasks, **options):
     def __init__(self, *tasks, **options):
         tasks = tasks[0] if len(tasks) == 1 and is_list(tasks[0]) else tasks
         tasks = tasks[0] if len(tasks) == 1 and is_list(tasks[0]) else tasks
-        Signature.__init__(self, 'celery.chain', (), {'tasks': tasks}, options)
+        Signature.__init__(self,
+            'celery.chain', (), {'tasks': tasks}, **options)
         self.tasks = tasks
         self.tasks = tasks
         self.subtask_type = 'chain'
         self.subtask_type = 'chain'
 
 
     def __call__(self, *args, **kwargs):
     def __call__(self, *args, **kwargs):
-        return self.apply_async(*args, **kwargs)
+        return self.apply_async(args, kwargs)
 
 
     @classmethod
     @classmethod
     def from_dict(self, d):
     def from_dict(self, d):
         return chain(*d['kwargs']['tasks'], **kwdict(d['options']))
         return chain(*d['kwargs']['tasks'], **kwdict(d['options']))
 
 
     def __repr__(self):
     def __repr__(self):
-        return ' | '.join(map(repr, self.tasks))
+        return ' | '.join(imap(repr, self.tasks))
 Signature.register_type(chain)
 Signature.register_type(chain)
 
 
 
 
@@ -223,7 +228,7 @@ class _basemap(Signature):
 
 
     def __init__(self, task, it, **options):
     def __init__(self, task, it, **options):
         Signature.__init__(self, self._task_name, (),
         Signature.__init__(self, self._task_name, (),
-                {'task': task, 'it': regen(it)}, **options)
+                {'task': task, 'it': regen(it)}, immutable=True, **options)
 
 
     def apply_async(self, args=(), kwargs={}, **opts):
     def apply_async(self, args=(), kwargs={}, **opts):
         # need to evaluate generators
         # need to evaluate generators
@@ -241,7 +246,8 @@ class xmap(_basemap):
 
 
     def __repr__(self):
     def __repr__(self):
         task, it = self._unpack_args(self.kwargs)
         task, it = self._unpack_args(self.kwargs)
-        return '[%s(x) for x in %s]' % (task.task, truncate(repr(it), 100))
+        return '[{0}(x) for x in {1}]'.format(task.task,
+                                              truncate(repr(it), 100))
 Signature.register_type(xmap)
 Signature.register_type(xmap)
 
 
 
 
@@ -250,7 +256,8 @@ class xstarmap(_basemap):
 
 
     def __repr__(self):
     def __repr__(self):
         task, it = self._unpack_args(self.kwargs)
         task, it = self._unpack_args(self.kwargs)
-        return '[%s(*x) for x in %s]' % (task.task, truncate(repr(it), 100))
+        return '[{0}(*x) for x in {1}]'.format(task.task,
+                                               truncate(repr(it), 100))
 Signature.register_type(xstarmap)
 Signature.register_type(xstarmap)
 
 
 
 
@@ -259,22 +266,21 @@ class chunks(Signature):
 
 
     def __init__(self, task, it, n, **options):
     def __init__(self, task, it, n, **options):
         Signature.__init__(self, 'celery.chunks', (),
         Signature.__init__(self, 'celery.chunks', (),
-                {'task': task, 'it': regen(it), 'n': n}, **options)
+                {'task': task, 'it': regen(it), 'n': n},
+                immutable=True, **options)
 
 
     @classmethod
     @classmethod
     def from_dict(self, d):
     def from_dict(self, d):
         return chunks(*self._unpack_args(d['kwargs']), **d['options'])
         return chunks(*self._unpack_args(d['kwargs']), **d['options'])
 
 
     def apply_async(self, args=(), kwargs={}, **opts):
     def apply_async(self, args=(), kwargs={}, **opts):
-        # need to evaluate generators
-        task, it, n = self._unpack_args(self.kwargs)
-        return self.type.apply_async((),
-                {'task': task, 'it': list(it), 'n': n}, **opts)
+        return self.group().apply_async(args, kwargs, **opts)
 
 
     def __call__(self, **options):
     def __call__(self, **options):
         return self.group()(**options)
         return self.group()(**options)
 
 
     def group(self):
     def group(self):
+        # need to evaluate generators
         task, it, n = self._unpack_args(self.kwargs)
         task, it, n = self._unpack_args(self.kwargs)
         return group(xstarmap(task, part) for part in _chunks(iter(it), n))
         return group(xstarmap(task, part) for part in _chunks(iter(it), n))
 
 
@@ -284,22 +290,31 @@ class chunks(Signature):
 Signature.register_type(chunks)
 Signature.register_type(chunks)
 
 
 
 
+def _maybe_group(tasks):
+    if isinstance(tasks, group):
+        tasks = list(tasks.tasks)
+    else:
+        tasks = regen(tasks if is_list(tasks) else tasks)
+    return tasks
+
+
 class group(Signature):
 class group(Signature):
 
 
     def __init__(self, *tasks, **options):
     def __init__(self, *tasks, **options):
-        tasks = regen(tasks[0] if len(tasks) == 1 and is_list(tasks[0])
-                               else tasks)
-        Signature.__init__(self, 'celery.group', (), {'tasks': tasks}, options)
+        if len(tasks) == 1:
+            tasks = _maybe_group(tasks[0])
+        Signature.__init__(self,
+            'celery.group', (), {'tasks': tasks}, **options)
         self.tasks, self.subtask_type = tasks, 'group'
         self.tasks, self.subtask_type = tasks, 'group'
 
 
     @classmethod
     @classmethod
     def from_dict(self, d):
     def from_dict(self, d):
         return group(d['kwargs']['tasks'], **kwdict(d['options']))
         return group(d['kwargs']['tasks'], **kwdict(d['options']))
 
 
-    def __call__(self, **options):
-        tasks, result, gid = self.type.prepare(options,
-                                map(Signature.clone, self.tasks))
-        return self.type(tasks, result, gid)
+    def __call__(self, *partial_args, **options):
+        tasks, result, gid, args = self.type.prepare(options,
+                    [Signature.clone(t) for t in self.tasks], partial_args)
+        return self.type(tasks, result, gid, args)
 
 
     def skew(self, start=1.0, stop=None, step=1.0):
     def skew(self, start=1.0, stop=None, step=1.0):
         _next_skew = fxrange(start, stop, step, repeatlast=True).next
         _next_skew = fxrange(start, stop, step, repeatlast=True).next
@@ -307,6 +322,9 @@ class group(Signature):
             task.set(countdown=_next_skew())
             task.set(countdown=_next_skew())
         return self
         return self
 
 
+    def __iter__(self):
+        return iter(self.tasks)
+
     def __repr__(self):
     def __repr__(self):
         return repr(self.tasks)
         return repr(self.tasks)
 Signature.register_type(group)
 Signature.register_type(group)
@@ -315,25 +333,30 @@ Signature.register_type(group)
 class chord(Signature):
 class chord(Signature):
     Chord = Chord
     Chord = Chord
 
 
-    def __init__(self, header, body=None, **options):
-        Signature.__init__(self, 'celery.chord', (),
-                         {'header': regen(header),
-                          'body': maybe_subtask(body)}, options)
+    def __init__(self, header, body=None, task='celery.chord',
+            args=(), kwargs={}, **options):
+        Signature.__init__(self, task, args, dict(kwargs,
+            header=_maybe_group(header), body=maybe_subtask(body)), **options)
         self.subtask_type = 'chord'
         self.subtask_type = 'chord'
 
 
     @classmethod
     @classmethod
     def from_dict(self, d):
     def from_dict(self, d):
-        kwargs = d['kwargs']
-        return chord(kwargs['header'], kwargs.get('body'),
-                     **kwdict(d['options']))
+        args, d['kwargs'] = self._unpack_args(**kwdict(d['kwargs']))
+        return self(*args, **kwdict(d))
+
+    @staticmethod
+    def _unpack_args(header=None, body=None, **kwargs):
+        # Python signatures are better at extracting keys from dicts
+        # than manually popping things off.
+        return (header, body), kwargs
 
 
-    def __call__(self, body=None, **options):
+    def __call__(self, body=None, **kwargs):
         _chord = self.Chord
         _chord = self.Chord
-        self.kwargs['body'] = body or self.kwargs['body']
+        body = self.kwargs['body'] = body or self.kwargs['body']
         if _chord.app.conf.CELERY_ALWAYS_EAGER:
         if _chord.app.conf.CELERY_ALWAYS_EAGER:
-            return self.apply((), {}, **options)
+            return self.apply((), kwargs)
         callback_id = body.options.setdefault('task_id', uuid())
         callback_id = body.options.setdefault('task_id', uuid())
-        _chord(**self.kwargs)
+        _chord(**dict(self.kwargs, **kwargs))
         return _chord.AsyncResult(callback_id)
         return _chord.AsyncResult(callback_id)
 
 
     def clone(self, *args, **kwargs):
     def clone(self, *args, **kwargs):
@@ -356,7 +379,7 @@ class chord(Signature):
     def __repr__(self):
     def __repr__(self):
         if self.body:
         if self.body:
             return self.body.reprcall(self.tasks)
             return self.body.reprcall(self.tasks)
-        return '<chord without body: %r>' % (self.tasks, )
+        return '<chord without body: {0.tasks!r}>'.format(self)
 
 
     @property
     @property
     def tasks(self):
     def tasks(self):
@@ -377,4 +400,6 @@ def subtask(varies, *args, **kwargs):
 
 
 
 
 def maybe_subtask(d):
 def maybe_subtask(d):
-    return subtask(d) if d is not None and not isinstance(d, Signature) else d
+    if d is not None and isinstance(d, dict) and not isinstance(d, Signature):
+        return subtask(d)
+    return d

+ 1 - 1
celery/concurrency/__init__.py

@@ -8,7 +8,7 @@
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
-from celery.utils.imports import symbol_by_name
+from celery.local import symbol_by_name
 
 
 ALIASES = {
 ALIASES = {
     'processes': 'celery.concurrency.processes:TaskPool',
     'processes': 'celery.concurrency.processes:TaskPool',

+ 5 - 3
celery/concurrency/base.py

@@ -56,10 +56,12 @@ class BasePool(object):
     #: only used by multiprocessing pool
     #: only used by multiprocessing pool
     uses_semaphore = False
     uses_semaphore = False
 
 
-    def __init__(self, limit=None, putlocks=True, **options):
+    def __init__(self, limit=None, putlocks=True, forking_enable=True,
+            **options):
         self.limit = limit
         self.limit = limit
         self.putlocks = putlocks
         self.putlocks = putlocks
         self.options = options
         self.options = options
+        self.forking_enable = forking_enable
         self._does_debug = logger.isEnabledFor(logging.DEBUG)
         self._does_debug = logger.isEnabledFor(logging.DEBUG)
 
 
     def on_start(self):
     def on_start(self):
@@ -91,11 +93,11 @@ class BasePool(object):
 
 
     def terminate_job(self, pid):
     def terminate_job(self, pid):
         raise NotImplementedError(
         raise NotImplementedError(
-                '%s does not implement kill_job' % (self.__class__, ))
+                '{0} does not implement kill_job'.format(type(self)))
 
 
     def restart(self):
     def restart(self):
         raise NotImplementedError(
         raise NotImplementedError(
-                '%s does not implement restart' % (self.__class__, ))
+                '{0} does not implement restart'.format(type(self)))
 
 
     def stop(self):
     def stop(self):
         self.on_stop()
         self.on_stop()

+ 23 - 2
celery/concurrency/eventlet.py

@@ -9,11 +9,32 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
 import os
 import os
-if not os.environ.get('EVENTLET_NOPATCH'):
+import sys
+
+EVENTLET_NOPATCH = os.environ.get('EVENTLET_NOPATCH', False)
+EVENTLET_DBLOCK = int(os.environ.get('EVENTLET_NOBLOCK', 0))
+W_RACE = """\
+Celery module with %s imported before eventlet patched\
+"""
+RACE_MODS = ('billiard.', 'celery.', 'kombu.')
+
+
+#: Warn if we couldn't patch early enough,
+#: and thread/socket depending celery modules have already been loaded.
+for mod in (mod for mod in sys.modules if mod.startswith(RACE_MODS)):
+    for side in ('thread', 'threading', 'socket'):
+        if getattr(mod, side, None):
+            import warnings
+            warnings.warn(RuntimeWarning(W_RACE % side))
+
+
+PATCHED = [0]
+if not EVENTLET_NOPATCH and not PATCHED[0]:
+    PATCHED[0] += 1
     import eventlet
     import eventlet
     import eventlet.debug
     import eventlet.debug
     eventlet.monkey_patch()
     eventlet.monkey_patch()
-    eventlet.debug.hub_prevent_multiple_readers(False)
+    eventlet.debug.hub_blocking_detection(EVENTLET_DBLOCK)
 
 
 from time import time
 from time import time
 
 

+ 34 - 5
celery/concurrency/gevent.py

@@ -9,9 +9,23 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
 import os
 import os
-if not os.environ.get('GEVENT_NOPATCH'):
-    from gevent import monkey
+
+PATCHED = [0]
+if not os.environ.get('GEVENT_NOPATCH') and not PATCHED[0]:
+    PATCHED[0] += 1
+    from gevent import monkey, version_info
     monkey.patch_all()
     monkey.patch_all()
+    if version_info[0] == 0:
+        # Signals are not working along gevent in version prior 1.0
+        # and they are not monkey patch by monkey.patch_all()
+        from gevent import signal as _gevent_signal
+        _signal = __import__('signal')
+        _signal.signal = _gevent_signal
+
+try:
+    from gevent import Timeout
+except ImportError:
+    Timeout = None  # noqa
 
 
 from time import time
 from time import time
 
 
@@ -20,6 +34,17 @@ from celery.utils import timer2
 from .base import apply_target, BasePool
 from .base import apply_target, BasePool
 
 
 
 
+def apply_timeout(target, args=(), kwargs={}, callback=None,
+                  accept_callback=None, pid=None, timeout=None,
+                  timeout_callback=None, **rest):
+    try:
+        with Timeout(timeout):
+            return apply_target(target, args, kwargs, callback,
+                                accept_callback, pid, **rest)
+    except Timeout:
+        return timeout_callback(False, timeout)
+
+
 class Schedule(timer2.Schedule):
 class Schedule(timer2.Schedule):
 
 
     def __init__(self, *args, **kwargs):
     def __init__(self, *args, **kwargs):
@@ -90,6 +115,7 @@ class TaskPool(BasePool):
         from gevent.pool import Pool
         from gevent.pool import Pool
         self.Pool = Pool
         self.Pool = Pool
         self.spawn_n = spawn_raw
         self.spawn_n = spawn_raw
+        self.timeout = kwargs.get('timeout')
         super(TaskPool, self).__init__(*args, **kwargs)
         super(TaskPool, self).__init__(*args, **kwargs)
 
 
     def on_start(self):
     def on_start(self):
@@ -101,9 +127,12 @@ class TaskPool(BasePool):
             self._pool.join()
             self._pool.join()
 
 
     def on_apply(self, target, args=None, kwargs=None, callback=None,
     def on_apply(self, target, args=None, kwargs=None, callback=None,
-            accept_callback=None, **_):
-        return self._quick_put(apply_target, target, args, kwargs,
-                               callback, accept_callback)
+            accept_callback=None, timeout=None, timeout_callback=None, **_):
+        timeout = self.timeout if timeout is None else timeout
+        return self._quick_put(apply_timeout if timeout else apply_target,
+                               target, args, kwargs, callback, accept_callback,
+                               timeout=timeout,
+                               timeout_callback=timeout_callback)
 
 
     def grow(self, n=1):
     def grow(self, n=1):
         self._pool._semaphore.counter += n
         self._pool._semaphore.counter += n

+ 5 - 12
celery/concurrency/processes/__init__.py → celery/concurrency/processes.py

@@ -12,23 +12,15 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
 import os
 import os
-import platform
-import signal as _signal
+
+from billiard import forking_enable
+from billiard.pool import Pool, RUN, CLOSE
 
 
 from celery import platforms
 from celery import platforms
 from celery import signals
 from celery import signals
 from celery._state import set_default_app
 from celery._state import set_default_app
 from celery.concurrency.base import BasePool
 from celery.concurrency.base import BasePool
 from celery.task import trace
 from celery.task import trace
-from billiard.pool import Pool, RUN, CLOSE
-
-if platform.system() == 'Windows':  # pragma: no cover
-    # On Windows os.kill calls TerminateProcess which cannot be
-    # handled by # any process, so this is needed to terminate the task
-    # *and its children* (if any).
-    from ._win import kill_processtree as _kill  # noqa
-else:
-    from os import kill as _kill                 # noqa
 
 
 #: List of signals to reset when a child process starts.
 #: List of signals to reset when a child process starts.
 WORKER_SIGRESET = frozenset(['SIGTERM',
 WORKER_SIGRESET = frozenset(['SIGTERM',
@@ -79,6 +71,7 @@ class TaskPool(BasePool):
         Will pre-fork all workers so they're ready to accept tasks.
         Will pre-fork all workers so they're ready to accept tasks.
 
 
         """
         """
+        forking_enable(self.forking_enable)
         P = self._pool = self.Pool(processes=self.limit,
         P = self._pool = self.Pool(processes=self.limit,
                                    initializer=process_initializer,
                                    initializer=process_initializer,
                                    **self.options)
                                    **self.options)
@@ -109,7 +102,7 @@ class TaskPool(BasePool):
             self._pool.close()
             self._pool.close()
 
 
     def terminate_job(self, pid, signal=None):
     def terminate_job(self, pid, signal=None):
-        _kill(pid, signal or _signal.SIGTERM)
+        return self._pool.terminate_job(pid, signal)
 
 
     def grow(self, n=1):
     def grow(self, n=1):
         return self._pool.grow(n)
         return self._pool.grow(n)

+ 0 - 116
celery/concurrency/processes/_win.py

@@ -1,116 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-    celery.concurrency.processes._win
-    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-    Windows utilities to terminate process groups.
-
-"""
-from __future__ import absolute_import
-
-import os
-
-# psutil is painfully slow in win32. So to avoid adding big
-# dependencies like pywin32 a ctypes based solution is preferred
-
-# Code based on the winappdbg project http://winappdbg.sourceforge.net/
-# (BSD License)
-from ctypes import (
-    byref, sizeof, windll,
-    Structure, WinError, POINTER,
-    c_size_t, c_char, c_void_p,
-)
-from ctypes.wintypes import DWORD, LONG
-
-ERROR_NO_MORE_FILES = 18
-INVALID_HANDLE_VALUE = c_void_p(-1).value
-
-
-class PROCESSENTRY32(Structure):
-    _fields_ = [
-        ('dwSize',              DWORD),
-        ('cntUsage',            DWORD),
-        ('th32ProcessID',       DWORD),
-        ('th32DefaultHeapID',   c_size_t),
-        ('th32ModuleID',        DWORD),
-        ('cntThreads',          DWORD),
-        ('th32ParentProcessID', DWORD),
-        ('pcPriClassBase',      LONG),
-        ('dwFlags',             DWORD),
-        ('szExeFile',           c_char * 260),
-    ]
-LPPROCESSENTRY32 = POINTER(PROCESSENTRY32)
-
-
-def CreateToolhelp32Snapshot(dwFlags=2, th32ProcessID=0):
-    hSnapshot = windll.kernel32.CreateToolhelp32Snapshot(dwFlags,
-                                                         th32ProcessID)
-    if hSnapshot == INVALID_HANDLE_VALUE:
-        raise WinError()
-    return hSnapshot
-
-
-def Process32First(hSnapshot, pe=None):
-    return _Process32n(windll.kernel32.Process32First, hSnapshot, pe)
-
-
-def Process32Next(hSnapshot, pe=None):
-    return _Process32n(windll.kernel32.Process32Next, hSnapshot, pe)
-
-
-def _Process32n(fun, hSnapshot, pe=None):
-    if pe is None:
-        pe = PROCESSENTRY32()
-    pe.dwSize = sizeof(PROCESSENTRY32)
-    success = fun(hSnapshot, byref(pe))
-    if not success:
-        if windll.kernel32.GetLastError() == ERROR_NO_MORE_FILES:
-            return
-        raise WinError()
-    return pe
-
-
-def get_all_processes_pids():
-    """Return a dictionary with all processes pids as keys and their
-       parents as value. Ignore processes with no parents.
-    """
-    h = CreateToolhelp32Snapshot()
-    parents = {}
-    pe = Process32First(h)
-    while pe:
-        if pe.th32ParentProcessID:
-            parents[pe.th32ProcessID] = pe.th32ParentProcessID
-        pe = Process32Next(h, pe)
-
-    return parents
-
-
-def get_processtree_pids(pid, include_parent=True):
-    """Return a list with all the pids of a process tree"""
-    parents = get_all_processes_pids()
-    all_pids = parents.keys()
-    pids = set([pid])
-    while 1:
-        pids_new = pids.copy()
-
-        for _pid in all_pids:
-            if parents[_pid] in pids:
-                pids_new.add(_pid)
-
-        if pids_new == pids:
-            break
-
-        pids = pids_new.copy()
-
-    if not include_parent:
-        pids.remove(pid)
-
-    return list(pids)
-
-
-def kill_processtree(pid, signum):
-    """Kill a process and all its descendants"""
-    family_pids = get_processtree_pids(pid)
-
-    for _pid in family_pids:
-        os.kill(_pid, signum)

+ 6 - 0
celery/concurrency/threads.py

@@ -8,10 +8,16 @@
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
+import os
+
 from celery.utils.compat import UserDict
 from celery.utils.compat import UserDict
 
 
 from .base import apply_target, BasePool
 from .base import apply_target, BasePool
 
 
+#: Makes sure we don't use threading.local for stacks
+#: since apparently they don't work properly.
+os.environ['USE_PURE_LOCALS'] = '1'
+
 
 
 class NullDict(UserDict):
 class NullDict(UserDict):
 
 

+ 3 - 3
celery/contrib/batches.py

@@ -21,7 +21,7 @@ A click counter that flushes the buffer every 100 messages, and every
         from collections import Counter
         from collections import Counter
         count = Counter(request.kwargs['url'] for request in requests)
         count = Counter(request.kwargs['url'] for request in requests)
         for url, count in count.items():
         for url, count in count.items():
-            print('>>> Clicks: %s -> %s' % (url, count))
+            print('>>> Clicks: {0} -> {1}'.format(url, count))
 
 
 Registering the click is done as follows:
 Registering the click is done as follows:
 
 
@@ -79,7 +79,7 @@ def apply_batches_task(task, args, loglevel, logfile):
     task.push_request(loglevel=loglevel, logfile=logfile)
     task.push_request(loglevel=loglevel, logfile=logfile)
     try:
     try:
         result = task(*args)
         result = task(*args)
-    except Exception, exc:
+    except Exception as exc:
         result = None
         result = None
         logger.error('Error: %r', exc, exc_info=True)
         logger.error('Error: %r', exc, exc_info=True)
     finally:
     finally:
@@ -139,7 +139,7 @@ class Batches(Task):
         self._logging = None
         self._logging = None
 
 
     def run(self, requests):
     def run(self, requests):
-        raise NotImplementedError('%r must implement run(requests)' % (self, ))
+        raise NotImplementedError('must implement run(requests)')
 
 
     def flush(self, requests):
     def flush(self, requests):
         return self.apply_buffer(requests, ([SimpleRequest.from_request(r)
         return self.apply_buffer(requests, ([SimpleRequest.from_request(r)

+ 292 - 26
celery/contrib/migrate.py

@@ -6,22 +6,35 @@
     Migration tools.
     Migration tools.
 
 
 """
 """
-from __future__ import absolute_import
-from __future__ import with_statement
+from __future__ import absolute_import, print_function
 
 
 import socket
 import socket
 
 
 from functools import partial
 from functools import partial
+from itertools import cycle, islice
 
 
-from kombu import eventloop
+from kombu import eventloop, Queue
+from kombu.common import maybe_declare
 from kombu.exceptions import StdChannelError
 from kombu.exceptions import StdChannelError
 from kombu.utils.encoding import ensure_bytes
 from kombu.utils.encoding import ensure_bytes
 
 
 from celery.app import app_or_default
 from celery.app import app_or_default
+from celery.utils import worker_direct
+
+
+MOVING_PROGRESS_FMT = """\
+Moving task {state.filtered}/{state.strtotal}: \
+{body[task]}[{body[id]}]\
+"""
+
+
+class StopFiltering(Exception):
+    pass
 
 
 
 
 class State(object):
 class State(object):
     count = 0
     count = 0
+    filtered = 0
     total_apx = 0
     total_apx = 0
 
 
     @property
     @property
@@ -30,15 +43,22 @@ class State(object):
             return u'?'
             return u'?'
         return unicode(self.total_apx)
         return unicode(self.total_apx)
 
 
+    def __repr__(self):
+        if self.filtered:
+            return '^{0.filtered}'.format(self)
+        return '{0.count}/{0.strtotal}'.format(self)
 
 
-def migrate_task(producer, body_, message,
+
+def republish(producer, message, exchange=None, routing_key=None,
         remove_props=['application_headers',
         remove_props=['application_headers',
                       'content_type',
                       'content_type',
-                      'content_encoding']):
+                      'content_encoding',
+                      'headers']):
     body = ensure_bytes(message.body)  # use raw message body.
     body = ensure_bytes(message.body)  # use raw message body.
     info, headers, props = (message.delivery_info,
     info, headers, props = (message.delivery_info,
-                            message.headers,
-                            message.properties)
+                            message.headers, message.properties)
+    exchange = info['exchange'] if exchange is None else exchange
+    routing_key = info['routing_key'] if routing_key is None else routing_key
     ctype, enc = message.content_type, message.content_encoding
     ctype, enc = message.content_type, message.content_encoding
     # remove compression header, as this will be inserted again
     # remove compression header, as this will be inserted again
     # when the message is recompressed.
     # when the message is recompressed.
@@ -47,47 +67,293 @@ def migrate_task(producer, body_, message,
     for key in remove_props:
     for key in remove_props:
         props.pop(key, None)
         props.pop(key, None)
 
 
-    producer.publish(ensure_bytes(body), exchange=info['exchange'],
-                           routing_key=info['routing_key'],
-                           compression=compression,
-                           headers=headers,
-                           content_type=ctype,
-                           content_encoding=enc,
-                           **props)
+    producer.publish(ensure_bytes(body), exchange=exchange,
+                     routing_key=routing_key, compression=compression,
+                     headers=headers, content_type=ctype,
+                     content_encoding=enc, **props)
+
+
+def migrate_task(producer, body_, message, queues=None):
+    info = message.delivery_info
+    queues = {} if queues is None else queues
+    republish(producer, message,
+              exchange=queues.get(info['exchange']),
+              routing_key=queues.get(info['routing_key']))
+
+
+def filter_callback(callback, tasks):
+
+    def filtered(body, message):
+        if tasks and message.payload['task'] not in tasks:
+            return
+
+        return callback(body, message)
+    return filtered
+
+
+def migrate_tasks(source, dest, migrate=migrate_task, app=None,
+        queues=None, **kwargs):
+    app = app_or_default(app)
+    queues = prepare_queues(queues)
+    producer = app.amqp.TaskProducer(dest)
+    migrate = partial(migrate, producer, queues=queues)
+
+    def on_declare_queue(queue):
+        new_queue = queue(producer.channel)
+        new_queue.name = queues.get(queue.name, queue.name)
+        if new_queue.routing_key == queue.name:
+            new_queue.routing_key = queues.get(queue.name,
+                                               new_queue.routing_key)
+        if new_queue.exchange.name == queue.name:
+            new_queue.exchange.name = queues.get(queue.name, queue.name)
+        new_queue.declare()
+
+    return start_filter(app, source, migrate, queues=queues,
+                        on_declare_queue=on_declare_queue, **kwargs)
+
 
 
+def _maybe_queue(app, q):
+    if isinstance(q, basestring):
+        return app.amqp.queues[q]
+    return q
 
 
-def migrate_tasks(source, dest, timeout=1.0, app=None,
-        migrate=None, callback=None):
-    state = State()
+
+def move(predicate, connection=None, exchange=None, routing_key=None,
+        source=None, app=None, callback=None, limit=None, transform=None,
+        **kwargs):
+    """Find tasks by filtering them and move the tasks to a new queue.
+
+    :param predicate: Filter function used to decide which messages
+        to move.  Must accept the standard signature of ``(body, message)``
+        used by Kombu consumer callbacks. If the predicate wants the message
+        to be moved it must return either:
+
+            1) a tuple of ``(exchange, routing_key)``, or
+
+            2) a :class:`~kombu.entity.Queue` instance, or
+
+            3) any other true value which means the specified
+               ``exchange`` and ``routing_key`` arguments will be used.
+
+    :keyword connection: Custom connection to use.
+    :keyword source: Optional list of source queues to use instead of the
+        default (which is the queues in :setting:`CELERY_QUEUES`).
+        This list can also contain new :class:`~kombu.entity.Queue` instances.
+    :keyword exchange: Default destination exchange.
+    :keyword routing_key: Default destination routing key.
+    :keyword limit: Limit number of messages to filter.
+    :keyword callback: Callback called after message moved,
+        with signature ``(state, body, message)``.
+    :keyword transform: Optional function to transform the return
+        value (destination) of the filter function.
+
+    Also supports the same keyword arguments as :func:`start_filter`.
+
+    To demonstrate, the :func:`move_task_by_id` operation can be implemented
+    like this:
+
+    .. code-block:: python
+
+        def is_wanted_task(body, message):
+            if body['id'] == wanted_id:
+                return Queue('foo', exchange=Exchange('foo'),
+                             routing_key='foo')
+
+        move(is_wanted_task)
+
+    or with a transform:
+
+    .. code-block:: python
+
+        def transform(value):
+            if isinstance(value, basestring):
+                return Queue(value, Exchange(value), value)
+            return value
+
+        move(is_wanted_task, transform=transform)
+
+    The predicate may also return a tuple of ``(exchange, routing_key)``
+    to specify the destination to where the task should be moved,
+    or a :class:`~kombu.entitiy.Queue` instance.
+    Any other true value means that the task will be moved to the
+    default exchange/routing_key.
+
+    """
     app = app_or_default(app)
     app = app_or_default(app)
+    queues = [_maybe_queue(app, queue) for queue in source or []] or None
+    with app.connection_or_acquire(connection, pool=False) as conn:
+        producer = app.amqp.TaskProducer(conn)
+        state = State()
+
+        def on_task(body, message):
+            ret = predicate(body, message)
+            if ret:
+                if transform:
+                    ret = transform(ret)
+                if isinstance(ret, Queue):
+                    maybe_declare(ret, conn.default_channel)
+                    ex, rk = ret.exchange.name, ret.routing_key
+                else:
+                    ex, rk = expand_dest(ret, exchange, routing_key)
+                republish(producer, message,
+                        exchange=ex, routing_key=rk)
+                message.ack()
+
+                state.filtered += 1
+                if callback:
+                    callback(state, body, message)
+                if limit and state.filtered >= limit:
+                    raise StopFiltering()
+
+        return start_filter(app, conn, on_task, consume_from=queues, **kwargs)
+
+
+def expand_dest(ret, exchange, routing_key):
+    try:
+        ex, rk = ret
+    except (TypeError, ValueError):
+        ex, rk = exchange, routing_key
+    return ex, rk
+
+
+def task_id_eq(task_id, body, message):
+    return body['id'] == task_id
+
+
+def task_id_in(ids, body, message):
+    return body['id'] in ids
+
+
+def prepare_queues(queues):
+    if isinstance(queues, basestring):
+        queues = queues.split(',')
+    if isinstance(queues, list):
+        queues = dict(tuple(islice(cycle(q.split(':')), None, 2))
+                        for q in queues)
+    if queues is None:
+        queues = {}
+    return queues
+
+
+def start_filter(app, conn, filter, limit=None, timeout=1.0,
+        ack_messages=False, tasks=None, queues=None,
+        callback=None, forever=False, on_declare_queue=None,
+        consume_from=None, state=None, **kwargs):
+    state = state or State()
+    queues = prepare_queues(queues)
+    if isinstance(tasks, basestring):
+        tasks = set(tasks.split(','))
+    if tasks is None:
+        tasks = set([])
 
 
     def update_state(body, message):
     def update_state(body, message):
         state.count += 1
         state.count += 1
+        if limit and state.count >= limit:
+            raise StopFiltering()
 
 
-    producer = app.amqp.TaskProducer(dest)
-    if migrate is None:
-        migrate = partial(migrate_task, producer)
-    consumer = app.amqp.TaskConsumer(source)
+    def ack_message(body, message):
+        message.ack()
+
+    consumer = app.amqp.TaskConsumer(conn, queues=consume_from)
+
+    if tasks:
+        filter = filter_callback(filter, tasks)
+        update_state = filter_callback(update_state, tasks)
+        ack_message = filter_callback(ack_message, tasks)
+
+    consumer.register_callback(filter)
     consumer.register_callback(update_state)
     consumer.register_callback(update_state)
+    if ack_messages:
+        consumer.register_callback(ack_message)
     if callback is not None:
     if callback is not None:
         callback = partial(callback, state)
         callback = partial(callback, state)
+        if tasks:
+            callback = filter_callback(callback, tasks)
         consumer.register_callback(callback)
         consumer.register_callback(callback)
-    consumer.register_callback(migrate)
 
 
     # declare all queues on the new broker.
     # declare all queues on the new broker.
     for queue in consumer.queues:
     for queue in consumer.queues:
-        queue(producer.channel).declare()
+        if queues and queue.name not in queues:
+            continue
+        if on_declare_queue is not None:
+            on_declare_queue(queue)
         try:
         try:
             _, mcount, _ = queue(consumer.channel).queue_declare(passive=True)
             _, mcount, _ = queue(consumer.channel).queue_declare(passive=True)
             if mcount:
             if mcount:
                 state.total_apx += mcount
                 state.total_apx += mcount
-        except source.channel_errors + (StdChannelError, ):
+        except conn.channel_errors + (StdChannelError, ):
             pass
             pass
 
 
     # start migrating messages.
     # start migrating messages.
     with consumer:
     with consumer:
         try:
         try:
-            for _ in eventloop(source, timeout=timeout):  # pragma: no cover
+            for _ in eventloop(conn,  # pragma: no cover
+                               timeout=timeout, ignore_timeouts=forever):
                 pass
                 pass
         except socket.timeout:
         except socket.timeout:
-            return
+            pass
+        except StopFiltering:
+            pass
+    return state
+
+
+def move_task_by_id(task_id, dest, **kwargs):
+    """Find a task by id and move it to another queue.
+
+    :param task_id: Id of task to move.
+    :param dest: Destination queue.
+
+    Also supports the same keyword arguments as :func:`move`.
+
+    """
+    return move_by_idmap({task_id: dest}, **kwargs)
+
+
+def move_by_idmap(map, **kwargs):
+    """Moves tasks by matching from a ``task_id: queue`` mapping,
+    where ``queue`` is a queue to move the task to.
+
+    Example::
+
+        >>> reroute_idmap({
+        ...     '5bee6e82-f4ac-468e-bd3d-13e8600250bc': Queue(...),
+        ...     'ada8652d-aef3-466b-abd2-becdaf1b82b3': Queue(...),
+        ...     '3a2b140d-7db1-41ba-ac90-c36a0ef4ab1f': Queue(...)},
+        ...   queues=['hipri'])
+
+    """
+    def task_id_in_map(body, message):
+        return map.get(body['id'])
+
+    # adding the limit means that we don't have to consume any more
+    # when we've found everything.
+    return move(task_id_in_map, limit=len(map), **kwargs)
+
+
+def move_by_taskmap(map, **kwargs):
+    """Moves tasks by matching from a ``task_name: queue`` mapping,
+    where ``queue`` is the queue to move the task to.
+
+    Example::
+
+        >>> reroute_idmap({
+        ...     'tasks.add': Queue(...),
+        ...     'tasks.mul': Queue(...),
+        ... })
+
+    """
+
+    def task_name_in_map(body, message):
+        return map.get(body['task'])  # <- name of task
+
+    return move(task_name_in_map, **kwargs)
+
+
+def filter_status(state, body, message, **kwargs):
+    print(MOVING_PROGRESS_FMT.format(state=state, body=body, **kwargs))
+
+
+move_direct = partial(move, transform=worker_direct)
+move_direct_by_id = partial(move_task_by_id, transform=worker_direct)
+move_direct_by_idmap = partial(move_by_idmap, transform=worker_direct)
+move_direct_by_taskmap = partial(move_by_taskmap, transform=worker_direct)

+ 30 - 15
celery/contrib/rdb.py

@@ -34,13 +34,14 @@ Inspired by http://snippets.dzone.com/posts/show/7248
     base port.  The selected port will be logged by the worker.
     base port.  The selected port will be logged by the worker.
 
 
 """
 """
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
 
 
 import errno
 import errno
 import os
 import os
 import socket
 import socket
 import sys
 import sys
 
 
+from itertools import imap
 from pdb import Pdb
 from pdb import Pdb
 
 
 from billiard import current_process
 from billiard import current_process
@@ -55,6 +56,23 @@ _current = [None]
 
 
 _frame = getattr(sys, '_getframe')
 _frame = getattr(sys, '_getframe')
 
 
+NO_AVAILABLE_PORT = """\
+{self.ident}: Couldn't find an available port.
+
+Please specify one using the CELERY_RDB_PORT environment variable.
+"""
+
+BANNER = """\
+{self.ident}: Please telnet into {self.host} {self.port}.
+
+Type `exit` in session to continue.
+
+{self.ident}: Waiting for client...
+"""
+
+SESSION_STARTED = '{self.ident}: Now in session with {self.remote_addr}.'
+SESSION_ENDED = '{self.ident}: Session with {self.remote_addr} ended.'
+
 
 
 class Rdb(Pdb):
 class Rdb(Pdb):
     me = 'Remote Debugger'
     me = 'Remote Debugger'
@@ -71,15 +89,14 @@ class Rdb(Pdb):
         self._sock, this_port = self.get_avail_port(host, port,
         self._sock, this_port = self.get_avail_port(host, port,
             port_search_limit, port_skew)
             port_search_limit, port_skew)
         self._sock.listen(1)
         self._sock.listen(1)
-        me = '%s:%s' % (self.me, this_port)
-        context = self.context = {'me': me, 'host': host, 'port': this_port}
-        self.say('%(me)s: Please telnet %(host)s %(port)s.'
-                 '  Type `exit` in session to continue.' % context)
-        self.say('%(me)s: Waiting for client...' % context)
+        self.ident = '{0}:{1}'.format(self.me, this_port)
+        self.host = host
+        self.port = this_port
+        self.say(BANNER.format(self=self))
 
 
         self._client, address = self._sock.accept()
         self._client, address = self._sock.accept()
-        context['remote_addr'] = ':'.join(map(str, address))
-        self.say('%(me)s: In session with %(remote_addr)s' % context)
+        self.remote_addr = ':'.join(imap(str, address))
+        self.say(SESSION_STARTED.format(self=self))
         self._handle = sys.stdin = sys.stdout = self._client.makefile('rw')
         self._handle = sys.stdin = sys.stdout = self._client.makefile('rw')
         Pdb.__init__(self, completekey='tab',
         Pdb.__init__(self, completekey='tab',
                            stdin=self._handle, stdout=self._handle)
                            stdin=self._handle, stdout=self._handle)
@@ -96,19 +113,17 @@ class Rdb(Pdb):
             this_port = port + skew + i
             this_port = port + skew + i
             try:
             try:
                 _sock.bind((host, this_port))
                 _sock.bind((host, this_port))
-            except socket.error, exc:
+            except socket.error as exc:
                 if exc.errno in [errno.EADDRINUSE, errno.EINVAL]:
                 if exc.errno in [errno.EADDRINUSE, errno.EINVAL]:
                     continue
                     continue
                 raise
                 raise
             else:
             else:
                 return _sock, this_port
                 return _sock, this_port
         else:
         else:
-            raise Exception(
-                '%s: Could not find available port. Please set using '
-                'environment variable CELERY_RDB_PORT' % (self.me, ))
+            raise Exception(NO_AVAILABLE_PORT.format(self=self))
 
 
     def say(self, m):
     def say(self, m):
-        self.out.write(m + '\n')
+        print(m, file=self.out)
 
 
     def _close_session(self):
     def _close_session(self):
         self.stdin, self.stdout = sys.stdin, sys.stdout = self._prev_handles
         self.stdin, self.stdout = sys.stdin, sys.stdout = self._prev_handles
@@ -116,7 +131,7 @@ class Rdb(Pdb):
         self._client.close()
         self._client.close()
         self._sock.close()
         self._sock.close()
         self.active = False
         self.active = False
-        self.say('%(me)s: Session %(remote_addr)s ended.' % self.context)
+        self.say(SESSION_ENDED.format(self=self))
 
 
     def do_continue(self, arg):
     def do_continue(self, arg):
         self._close_session()
         self._close_session()
@@ -135,7 +150,7 @@ class Rdb(Pdb):
             frame = _frame().f_back
             frame = _frame().f_back
         try:
         try:
             Pdb.set_trace(self, frame)
             Pdb.set_trace(self, frame)
-        except socket.error, exc:
+        except socket.error as exc:
             # connection reset by peer.
             # connection reset by peer.
             if exc.errno != errno.ECONNRESET:
             if exc.errno != errno.ECONNRESET:
                 raise
                 raise

+ 23 - 17
celery/datastructures.py

@@ -6,13 +6,13 @@
     Custom types and data structures.
     Custom types and data structures.
 
 
 """
 """
-from __future__ import absolute_import
-from __future__ import with_statement
+from __future__ import absolute_import, print_function
 
 
 import sys
 import sys
 import time
 import time
 
 
 from collections import defaultdict
 from collections import defaultdict
+from functools import partial
 from itertools import chain
 from itertools import chain
 
 
 from billiard.einfo import ExceptionInfo  # noqa
 from billiard.einfo import ExceptionInfo  # noqa
@@ -164,16 +164,17 @@ class DependencyGraph(object):
         :param fh: A file, or a file-like object to write the graph to.
         :param fh: A file, or a file-like object to write the graph to.
 
 
         """
         """
-        fh.write('digraph dependencies {\n')
+        P = partial(print, file=fh)
+        P('digraph dependencies {')
         for obj, adjacent in self.iteritems():
         for obj, adjacent in self.iteritems():
             if not adjacent:
             if not adjacent:
-                fh.write(ws + '"%s"\n' % (obj, ))
+                P(ws + '"{0}"'.format(obj))
             for req in adjacent:
             for req in adjacent:
-                fh.write(ws + '"%s" -> "%s"\n' % (obj, req))
-        fh.write('}\n')
+                P(ws + '"{0}" -> "{1}"'.format(obj, req))
+        P('}')
 
 
     def __iter__(self):
     def __iter__(self):
-        return self.adjacent.iterkeys()
+        return iter(self.adjacent)
 
 
     def __getitem__(self, node):
     def __getitem__(self, node):
         return self.adjacent[node]
         return self.adjacent[node]
@@ -191,11 +192,11 @@ class DependencyGraph(object):
     def __repr__(self):
     def __repr__(self):
         return '\n'.join(self.repr_node(N) for N in self)
         return '\n'.join(self.repr_node(N) for N in self)
 
 
-    def repr_node(self, obj, level=1):
-        output = ['%s(%s)' % (obj, self.valency_of(obj))]
+    def repr_node(self, obj, level=1, fmt='{0}({1})'):
+        output = [fmt.format(obj, self.valency_of(obj))]
         if obj in self:
         if obj in self:
             for other in self[obj]:
             for other in self[obj]:
-                d = '%s(%s)' % (other, self.valency_of(other))
+                d = fmt.format(other, self.valency_of(other))
                 output.append('     ' * level + d)
                 output.append('     ' * level + d)
                 output.extend(self.repr_node(other, level + 1).split('\n')[1:])
                 output.extend(self.repr_node(other, level + 1).split('\n')[1:])
         return '\n'.join(output)
         return '\n'.join(output)
@@ -214,7 +215,8 @@ class AttributeDictMixin(object):
             return self[k]
             return self[k]
         except KeyError:
         except KeyError:
             raise AttributeError(
             raise AttributeError(
-                "'%s' object has no attribute '%s'" % (type(self).__name__, k))
+                "{0!r} object has no attribute {1!r}".format(
+                    type(self).__name__, k))
 
 
     def __setattr__(self, key, value):
     def __setattr__(self, key, value):
         """`d[key] = value -> d.key = value`"""
         """`d[key] = value -> d.key = value`"""
@@ -262,11 +264,11 @@ class DictAttribute(object):
         return hasattr(self.obj, key)
         return hasattr(self.obj, key)
 
 
     def _iterate_keys(self):
     def _iterate_keys(self):
-        return vars(self.obj).iterkeys()
+        return iter(vars(self.obj))
     iterkeys = _iterate_keys
     iterkeys = _iterate_keys
 
 
     def __iter__(self):
     def __iter__(self):
-        return self.iterkeys()
+        return self._iterate_keys()
 
 
     def _iterate_items(self):
     def _iterate_items(self):
         return vars(self.obj).iteritems()
         return vars(self.obj).iteritems()
@@ -278,7 +280,7 @@ class DictAttribute(object):
     else:
     else:
 
 
         def keys(self):
         def keys(self):
-            return list(self._iterate_keys())
+            return list(self)
 
 
         def items(self):
         def items(self):
             return list(self._iterate_items())
             return list(self._iterate_items())
@@ -302,6 +304,10 @@ class ConfigurationView(AttributeDictMixin):
         self.__dict__.update(changes=changes, defaults=defaults,
         self.__dict__.update(changes=changes, defaults=defaults,
                              _order=[changes] + defaults)
                              _order=[changes] + defaults)
 
 
+    def add_defaults(self, d):
+        self.defaults.insert(0, d)
+        self._order.insert(1, d)
+
     def __getitem__(self, key):
     def __getitem__(self, key):
         for d in self._order:
         for d in self._order:
             try:
             try:
@@ -342,7 +348,7 @@ class ConfigurationView(AttributeDictMixin):
         return repr(dict(self.iteritems()))
         return repr(dict(self.iteritems()))
 
 
     def __iter__(self):
     def __iter__(self):
-        return self.iterkeys()
+        return self._iterate_keys()
 
 
     def _iter(self, op):
     def _iter(self, op):
         # defaults must be first in the stream, so values in
         # defaults must be first in the stream, so values in
@@ -350,7 +356,7 @@ class ConfigurationView(AttributeDictMixin):
         return chain(*[op(d) for d in reversed(self._order)])
         return chain(*[op(d) for d in reversed(self._order)])
 
 
     def _iterate_keys(self):
     def _iterate_keys(self):
-        return uniq(self._iter(lambda d: d.iterkeys()))
+        return uniq(self._iter(lambda d: d))
     iterkeys = _iterate_keys
     iterkeys = _iterate_keys
 
 
     def _iterate_items(self):
     def _iterate_items(self):
@@ -433,7 +439,7 @@ class LimitedSet(object):
         return iter(self._data)
         return iter(self._data)
 
 
     def __repr__(self):
     def __repr__(self):
-        return 'LimitedSet(%r)' % (self._data.keys(), )
+        return 'LimitedSet({0!r})'.format(list(self._data))
 
 
     @property
     @property
     def chronologically(self):
     def chronologically(self):

+ 15 - 25
celery/events/__init__.py

@@ -9,7 +9,6 @@
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import time
 import time
 import socket
 import socket
@@ -19,7 +18,8 @@ from collections import deque
 from contextlib import contextmanager
 from contextlib import contextmanager
 from copy import copy
 from copy import copy
 
 
-from kombu import eventloop, Exchange, Queue, Consumer, Producer
+from kombu import Exchange, Queue, Producer
+from kombu.mixins import ConsumerMixin
 from kombu.utils import cached_property
 from kombu.utils import cached_property
 
 
 from celery.app import app_or_default
 from celery.app import app_or_default
@@ -132,7 +132,7 @@ class EventDispatcher(object):
                 try:
                 try:
                     self.publisher.publish(event,
                     self.publisher.publish(event,
                                            routing_key=type.replace('-', '.'))
                                            routing_key=type.replace('-', '.'))
-                except Exception, exc:
+                except Exception as exc:
                     if not self.buffer_while_offline:
                     if not self.buffer_while_offline:
                         raise
                         raise
                     self._outbound_buffer.append((type, fields, exc))
                     self._outbound_buffer.append((type, fields, exc))
@@ -154,7 +154,7 @@ class EventDispatcher(object):
         self.publisher = None
         self.publisher = None
 
 
 
 
-class EventReceiver(object):
+class EventReceiver(ConsumerMixin):
     """Capture events.
     """Capture events.
 
 
     :param connection: Connection to the broker.
     :param connection: Connection to the broker.
@@ -165,14 +165,12 @@ class EventReceiver(object):
     handler.
     handler.
 
 
     """
     """
-    handlers = {}
 
 
     def __init__(self, connection, handlers=None, routing_key='#',
     def __init__(self, connection, handlers=None, routing_key='#',
             node_id=None, app=None, queue_prefix='celeryev'):
             node_id=None, app=None, queue_prefix='celeryev'):
         self.app = app_or_default(app)
         self.app = app_or_default(app)
         self.connection = connection
         self.connection = connection
-        if handlers is not None:
-            self.handlers = handlers
+        self.handlers = {} if handlers is None else handlers
         self.routing_key = routing_key
         self.routing_key = routing_key
         self.node_id = node_id or uuid()
         self.node_id = node_id or uuid()
         self.queue_prefix = queue_prefix
         self.queue_prefix = queue_prefix
@@ -191,21 +189,17 @@ class EventReceiver(object):
         handler = self.handlers.get(type) or self.handlers.get('*')
         handler = self.handlers.get(type) or self.handlers.get('*')
         handler and handler(event)
         handler and handler(event)
 
 
-    @contextmanager
-    def consumer(self, wakeup=True):
-        """Create event consumer."""
-        consumer = Consumer(self.connection,
-                            queues=[self.queue], no_ack=True)
-        consumer.register_callback(self._receive)
-        with consumer:
-            if wakeup:
-                self.wakeup_workers(channel=consumer.channel)
-            yield consumer
+    def get_consumers(self, Consumer, channel):
+        return [Consumer(queues=[self.queue],
+                         callbacks=[self._receive], no_ack=True)]
+
+    def on_consume_ready(self, connection, channel, consumers,
+            wakeup=True, **kwargs):
+        if wakeup:
+            self.wakeup_workers(channel=channel)
 
 
     def itercapture(self, limit=None, timeout=None, wakeup=True):
     def itercapture(self, limit=None, timeout=None, wakeup=True):
-        with self.consumer(wakeup=wakeup) as consumer:
-            yield consumer
-            self.drain_events(limit=limit, timeout=timeout)
+        return self.consume(limit=limit, timeout=timeout, wakeup=wakeup)
 
 
     def capture(self, limit=None, timeout=None, wakeup=True):
     def capture(self, limit=None, timeout=None, wakeup=True):
         """Open up a consumer capturing events.
         """Open up a consumer capturing events.
@@ -214,17 +208,13 @@ class EventReceiver(object):
         stop unless forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.
         stop unless forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.
 
 
         """
         """
-        list(self.itercapture(limit=limit, timeout=timeout, wakeup=wakeup))
+        return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup))
 
 
     def wakeup_workers(self, channel=None):
     def wakeup_workers(self, channel=None):
         self.app.control.broadcast('heartbeat',
         self.app.control.broadcast('heartbeat',
                                    connection=self.connection,
                                    connection=self.connection,
                                    channel=channel)
                                    channel=channel)
 
 
-    def drain_events(self, **kwargs):
-        for _ in eventloop(self.connection, **kwargs):
-            pass
-
     def _receive(self, body, message):
     def _receive(self, body, message):
         type = body.pop('type').lower()
         type = body.pop('type').lower()
         clock = body.get('clock')
         clock = body.get('clock')

+ 25 - 23
celery/events/cursesmon.py

@@ -6,8 +6,7 @@
     Graphical monitor of Celery events using curses.
     Graphical monitor of Celery events using curses.
 
 
 """
 """
-from __future__ import absolute_import
-from __future__ import with_statement
+from __future__ import absolute_import, print_function
 
 
 import curses
 import curses
 import sys
 import sys
@@ -19,7 +18,7 @@ from itertools import count
 from textwrap import wrap
 from textwrap import wrap
 from math import ceil
 from math import ceil
 
 
-from celery import __version__
+from celery import VERSION_BANNER
 from celery import states
 from celery import states
 from celery.app import app_or_default
 from celery.app import app_or_default
 from celery.utils.text import abbr, abbrtask
 from celery.utils.text import abbr, abbrtask
@@ -35,6 +34,10 @@ MIN_TASK_WIDTH = 16
 # this module is considered experimental
 # this module is considered experimental
 # we don't care about coverage.
 # we don't care about coverage.
 
 
+STATUS_SCREEN = """\
+events: {s.event_count} tasks:{s.task_count} workers:{w_alive}/{w_all}
+"""
+
 
 
 class CursesMonitor(object):  # pragma: no cover
 class CursesMonitor(object):  # pragma: no cover
     keymap = {}
     keymap = {}
@@ -49,7 +52,7 @@ class CursesMonitor(object):  # pragma: no cover
     online_str = 'Workers online: '
     online_str = 'Workers online: '
     help_title = 'Keys: '
     help_title = 'Keys: '
     help = ('j:up k:down i:info t:traceback r:result c:revoke ^c: quit')
     help = ('j:up k:down i:info t:traceback r:result c:revoke ^c: quit')
-    greet = 'celeryev %s' % __version__
+    greet = 'celeryev {0}'.format(VERSION_BANNER)
     info_str = 'Info: '
     info_str = 'Info: '
 
 
     def __init__(self, state, keymap=None, app=None):
     def __init__(self, state, keymap=None, app=None):
@@ -87,7 +90,8 @@ class CursesMonitor(object):  # pragma: no cover
         state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH)
         state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH)
         timestamp = timestamp.ljust(TIMESTAMP_WIDTH)
         timestamp = timestamp.ljust(TIMESTAMP_WIDTH)
 
 
-        row = '%s %s %s %s %s ' % (uuid, worker, task, timestamp, state)
+        row = '{0} {1} {2} {3} {4} '.format(uuid, worker, task,
+                                            timestamp, state)
         if self.screen_width is None:
         if self.screen_width is None:
             self.screen_width = len(row[:mx])
             self.screen_width = len(row[:mx])
         return row[:mx]
         return row[:mx]
@@ -201,7 +205,7 @@ class CursesMonitor(object):  # pragma: no cover
                 curline = y()
                 curline = y()
 
 
                 host, response = subreply.items()[0]
                 host, response = subreply.items()[0]
-                host = '%s: ' % host
+                host = '{0}: '.format(host)
                 self.win.addstr(curline, 3, host, curses.A_BOLD)
                 self.win.addstr(curline, 3, host, curses.A_BOLD)
                 attr = curses.A_NORMAL
                 attr = curses.A_NORMAL
                 text = ''
                 text = ''
@@ -275,7 +279,7 @@ class CursesMonitor(object):  # pragma: no cover
                                 curses.A_NORMAL)
                                 curses.A_NORMAL)
 
 
         return self.alert(alert_callback,
         return self.alert(alert_callback,
-                'Task details for %s' % self.selected_task)
+                'Task details for {0.selected_task}'.format(self))
 
 
     def selection_traceback(self):
     def selection_traceback(self):
         if not self.selected_task:
         if not self.selected_task:
@@ -290,7 +294,7 @@ class CursesMonitor(object):  # pragma: no cover
                 self.win.addstr(y(), 3, line)
                 self.win.addstr(y(), 3, line)
 
 
         return self.alert(alert_callback,
         return self.alert(alert_callback,
-                'Task Exception Traceback for %s' % self.selected_task)
+                'Task Exception Traceback for {0.selected_task}'.format(self))
 
 
     def selection_result(self):
     def selection_result(self):
         if not self.selected_task:
         if not self.selected_task:
@@ -305,7 +309,7 @@ class CursesMonitor(object):  # pragma: no cover
                 self.win.addstr(y(), 3, line)
                 self.win.addstr(y(), 3, line)
 
 
         return self.alert(alert_callback,
         return self.alert(alert_callback,
-                'Task Result for %s' % self.selected_task)
+                'Task Result for {0.selected_task}'.format(self))
 
 
     def display_task_row(self, lineno, task):
     def display_task_row(self, lineno, task):
         state_color = self.state_colors.get(task.state)
         state_color = self.state_colors.get(task.state)
@@ -364,13 +368,12 @@ class CursesMonitor(object):  # pragma: no cover
             except KeyError:
             except KeyError:
                 pass
                 pass
             else:
             else:
-                info = selection.info(['args', 'kwargs',
-                                       'result', 'runtime', 'eta'])
+                info = selection.info()
                 if 'runtime' in info:
                 if 'runtime' in info:
-                    info['runtime'] = '%.2fs' % info['runtime']
+                    info['runtime'] = '{0:.2fs}'.format(info['runtime'])
                 if 'result' in info:
                 if 'result' in info:
                     info['result'] = abbr(info['result'], 16)
                     info['result'] = abbr(info['result'], 16)
-                info = ' '.join('%s=%s' % (key, value)
+                info = ' '.join('{0}={1}'.format(key, value)
                             for key, value in info.items())
                             for key, value in info.items())
                 detail = '... -> key i'
                 detail = '... -> key i'
             infowin = abbr(info,
             infowin = abbr(info,
@@ -396,11 +399,10 @@ class CursesMonitor(object):  # pragma: no cover
         # Info
         # Info
         win.addstr(my - 3, x, self.info_str, curses.A_BOLD)
         win.addstr(my - 3, x, self.info_str, curses.A_BOLD)
         win.addstr(my - 3, x + len(self.info_str),
         win.addstr(my - 3, x + len(self.info_str),
-                'events:%s tasks:%s workers:%s/%s' % (
-                    self.state.event_count, self.state.task_count,
-                    len([w for w in self.state.workers.values()
-                            if w.alive]),
-                    len(self.state.workers)),
+                STATUS_SCREEN.format(s=self.state,
+                    w_alive=len([w for w in self.state.workers.values()
+                                    if w.alive]),
+                    w_all=len(self.state.workers)),
                 curses.A_DIM)
                 curses.A_DIM)
 
 
         # Help
         # Help
@@ -475,11 +477,11 @@ class DisplayThread(threading.Thread):  # pragma: no cover
 def capture_events(app, state, display):  # pragma: no cover
 def capture_events(app, state, display):  # pragma: no cover
 
 
     def on_connection_error(exc, interval):
     def on_connection_error(exc, interval):
-        sys.stderr.write('Connection Error: %r. Retry in %ss.' % (
-            exc, interval))
+        print('Connection Error: {0!r}. Retry in {1}s.'.format(
+                exc, interval), file=sys.stderr)
 
 
     while 1:
     while 1:
-        sys.stderr.write('-> evtop: starting capture...\n')
+        print('-> evtop: starting capture...', file=sys.stderr)
         with app.connection() as conn:
         with app.connection() as conn:
             try:
             try:
                 conn.ensure_connection(on_connection_error,
                 conn.ensure_connection(on_connection_error,
@@ -489,8 +491,8 @@ def capture_events(app, state, display):  # pragma: no cover
                 display.init_screen()
                 display.init_screen()
                 with recv.consumer():
                 with recv.consumer():
                     recv.drain_events(timeout=1, ignore_timeouts=True)
                     recv.drain_events(timeout=1, ignore_timeouts=True)
-            except (conn.connection_errors, conn.channel_errors), exc:
-                sys.stderr.write('Connection lost: %r' % (exc, ))
+            except conn.connection_errors + conn.channel_errors as exc:
+                print('Connection lost: {0!r}'.format(exc), file=sys.stderr)
 
 
 
 
 def evtop(app=None):  # pragma: no cover
 def evtop(app=None):  # pragma: no cover

+ 11 - 14
celery/events/dumper.py

@@ -7,7 +7,7 @@
     as they happen.  Think of it like a `tcpdump` for Celery events.
     as they happen.  Think of it like a `tcpdump` for Celery events.
 
 
 """
 """
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
 
 
 import sys
 import sys
 
 
@@ -31,17 +31,13 @@ def humanize_type(type):
         return type.lower().replace('-', ' ')
         return type.lower().replace('-', ' ')
 
 
 
 
-def say(msg, out=sys.stdout):
-    out.write(msg + '\n')
-
-
 class Dumper(object):
 class Dumper(object):
 
 
     def __init__(self, out=sys.stdout):
     def __init__(self, out=sys.stdout):
         self.out = out
         self.out = out
 
 
     def say(self, msg):
     def say(self, msg):
-        say(msg, out=self.out)
+        print(msg, file=self.out)
 
 
     def on_event(self, event):
     def on_event(self, event):
         timestamp = datetime.utcfromtimestamp(event.pop('timestamp'))
         timestamp = datetime.utcfromtimestamp(event.pop('timestamp'))
@@ -50,7 +46,8 @@ class Dumper(object):
         if type.startswith('task-'):
         if type.startswith('task-'):
             uuid = event.pop('uuid')
             uuid = event.pop('uuid')
             if type in ('task-received', 'task-sent'):
             if type in ('task-received', 'task-sent'):
-                task = TASK_NAMES[uuid] = '%s(%s) args=%s kwargs=%s' % (
+                task = TASK_NAMES[uuid] = '{0}({1}) args={2} kwargs={3}' \
+                    .format(
                         event.pop('name'), uuid,
                         event.pop('name'), uuid,
                         event.pop('args'),
                         event.pop('args'),
                         event.pop('kwargs'))
                         event.pop('kwargs'))
@@ -58,17 +55,17 @@ class Dumper(object):
                 task = TASK_NAMES.get(uuid, '')
                 task = TASK_NAMES.get(uuid, '')
             return self.format_task_event(hostname, timestamp,
             return self.format_task_event(hostname, timestamp,
                                           type, task, event)
                                           type, task, event)
-        fields = ', '.join('%s=%s' % (key, event[key])
-                        for key in sorted(event.keys()))
+        fields = ', '.join('{0}={1}'.format(key, event[key])
+                        for key in sorted(event))
         sep = fields and ':' or ''
         sep = fields and ':' or ''
-        self.say('%s [%s] %s%s %s' % (hostname, timestamp,
-                                      humanize_type(type), sep, fields))
+        self.say('{0} [{1}] {2}{3} {4}'.format(hostname, timestamp,
+                                            humanize_type(type), sep, fields))
 
 
     def format_task_event(self, hostname, timestamp, type, task, event):
     def format_task_event(self, hostname, timestamp, type, task, event):
-        fields = ', '.join('%s=%s' % (key, event[key])
-                        for key in sorted(event.keys()))
+        fields = ', '.join('{0}={1}'.format(key, event[key])
+                        for key in sorted(event))
         sep = fields and ':' or ''
         sep = fields and ':' or ''
-        self.say('%s [%s] %s%s %s %s' % (hostname, timestamp,
+        self.say('{0} [{1}] {2}{3} {4} {5}'.format(hostname, timestamp,
                     humanize_type(type), sep, task, fields))
                     humanize_type(type), sep, task, fields))
 
 
 
 

+ 2 - 3
celery/events/snapshot.py

@@ -94,9 +94,8 @@ def evcam(camera, freq=1.0, maxrate=None, loglevel=0,
 
 
     app.log.setup_logging_subsystem(loglevel, logfile)
     app.log.setup_logging_subsystem(loglevel, logfile)
 
 
-    logger.info(
-        '-> evcam: Taking snapshots with %s (every %s secs.)\n' % (
-            camera, freq))
+    print('-> evcam: Taking snapshots with {0} (every {1} secs.)'.format(
+                camera, freq))
     state = app.events.State()
     state = app.events.State()
     cam = instantiate(camera, state, app=app, freq=freq,
     cam = instantiate(camera, state, app=app, freq=freq,
                       maxrate=maxrate, timer=timer)
                       maxrate=maxrate, timer=timer)

+ 36 - 29
celery/events/state.py

@@ -17,11 +17,10 @@
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import heapq
 import heapq
+import threading
 
 
-from threading import Lock
 from time import time
 from time import time
 
 
 from kombu.utils import kwdict
 from kombu.utils import kwdict
@@ -55,17 +54,17 @@ class Worker(Element):
         self.heartbeats = []
         self.heartbeats = []
 
 
     def on_online(self, timestamp=None, **kwargs):
     def on_online(self, timestamp=None, **kwargs):
-        """Callback for the `worker-online` event."""
+        """Callback for the :event:`worker-online` event."""
         self.update(**kwargs)
         self.update(**kwargs)
         self._heartpush(timestamp)
         self._heartpush(timestamp)
 
 
     def on_offline(self, **kwargs):
     def on_offline(self, **kwargs):
-        """Callback for the `worker-offline` event."""
+        """Callback for the :event:`worker-offline` event."""
         self.update(**kwargs)
         self.update(**kwargs)
         self.heartbeats = []
         self.heartbeats = []
 
 
     def on_heartbeat(self, timestamp=None, **kwargs):
     def on_heartbeat(self, timestamp=None, **kwargs):
-        """Callback for the `worker-heartbeat` event."""
+        """Callback for the :event:`worker-heartbeat` event."""
         self.update(**kwargs)
         self.update(**kwargs)
         self._heartpush(timestamp)
         self._heartpush(timestamp)
 
 
@@ -76,8 +75,11 @@ class Worker(Element):
                 self.heartbeats = self.heartbeats[self.heartbeat_max:]
                 self.heartbeats = self.heartbeats[self.heartbeat_max:]
 
 
     def __repr__(self):
     def __repr__(self):
-        return '<Worker: %s (%s)' % (self.hostname,
-                                     self.alive and 'ONLINE' or 'OFFLINE')
+        return '<Worker: {0.hostname} (0.status_string)'.format(self)
+
+    @property
+    def status_string(self):
+        return 'ONLINE' if self.alive else 'OFFLINE'
 
 
     @property
     @property
     def heartbeat_expires(self):
     def heartbeat_expires(self):
@@ -93,8 +95,8 @@ class Task(Element):
     """Task State."""
     """Task State."""
 
 
     #: How to merge out of order events.
     #: How to merge out of order events.
-    #: Disorder is detected by logical ordering (e.g. task-received must have
-    #: happened before a task-failed event).
+    #: Disorder is detected by logical ordering (e.g. :event:`task-received`
+    #: must have happened before a :event:`task-failed` event).
     #:
     #:
     #: A merge rule consists of a state and a list of fields to keep from
     #: A merge rule consists of a state and a list of fields to keep from
     #: that state. ``(RECEIVED, ('name', 'args')``, means the name and args
     #: that state. ``(RECEIVED, ('name', 'args')``, means the name and args
@@ -105,7 +107,8 @@ class Task(Element):
 
 
     #: meth:`info` displays these fields by default.
     #: meth:`info` displays these fields by default.
     _info_fields = ('args', 'kwargs', 'retries', 'result',
     _info_fields = ('args', 'kwargs', 'retries', 'result',
-                    'eta', 'runtime', 'expires', 'exception')
+                    'eta', 'runtime', 'expires', 'exception',
+                    'exchange', 'routing_key')
 
 
     #: Default values.
     #: Default values.
     _defaults = dict(uuid=None, name=None, state=states.PENDING,
     _defaults = dict(uuid=None, name=None, state=states.PENDING,
@@ -114,7 +117,7 @@ class Task(Element):
                      revoked=False, args=None, kwargs=None, eta=None,
                      revoked=False, args=None, kwargs=None, eta=None,
                      expires=None, retries=None, worker=None, result=None,
                      expires=None, retries=None, worker=None, result=None,
                      exception=None, timestamp=None, runtime=None,
                      exception=None, timestamp=None, runtime=None,
-                     traceback=None)
+                     traceback=None, exchange=None, routing_key=None)
 
 
     def __init__(self, **fields):
     def __init__(self, **fields):
         super(Task, self).__init__(**dict(self._defaults, **fields))
         super(Task, self).__init__(**dict(self._defaults, **fields))
@@ -146,37 +149,37 @@ class Task(Element):
             super(Task, self).update(fields)
             super(Task, self).update(fields)
 
 
     def on_sent(self, timestamp=None, **fields):
     def on_sent(self, timestamp=None, **fields):
-        """Callback for the ``task-sent`` event."""
+        """Callback for the :event:`task-sent` event."""
         self.sent = timestamp
         self.sent = timestamp
         self.update(states.PENDING, timestamp, fields)
         self.update(states.PENDING, timestamp, fields)
 
 
     def on_received(self, timestamp=None, **fields):
     def on_received(self, timestamp=None, **fields):
-        """Callback for the ``task-received`` event."""
+        """Callback for the :event:`task-received` event."""
         self.received = timestamp
         self.received = timestamp
         self.update(states.RECEIVED, timestamp, fields)
         self.update(states.RECEIVED, timestamp, fields)
 
 
     def on_started(self, timestamp=None, **fields):
     def on_started(self, timestamp=None, **fields):
-        """Callback for the ``task-started`` event."""
+        """Callback for the :event:`task-started` event."""
         self.started = timestamp
         self.started = timestamp
         self.update(states.STARTED, timestamp, fields)
         self.update(states.STARTED, timestamp, fields)
 
 
     def on_failed(self, timestamp=None, **fields):
     def on_failed(self, timestamp=None, **fields):
-        """Callback for the ``task-failed`` event."""
+        """Callback for the :event:`task-failed` event."""
         self.failed = timestamp
         self.failed = timestamp
         self.update(states.FAILURE, timestamp, fields)
         self.update(states.FAILURE, timestamp, fields)
 
 
     def on_retried(self, timestamp=None, **fields):
     def on_retried(self, timestamp=None, **fields):
-        """Callback for the ``task-retried`` event."""
+        """Callback for the :event:`task-retried` event."""
         self.retried = timestamp
         self.retried = timestamp
         self.update(states.RETRY, timestamp, fields)
         self.update(states.RETRY, timestamp, fields)
 
 
     def on_succeeded(self, timestamp=None, **fields):
     def on_succeeded(self, timestamp=None, **fields):
-        """Callback for the ``task-succeeded`` event."""
+        """Callback for the :event:`task-succeeded` event."""
         self.succeeded = timestamp
         self.succeeded = timestamp
         self.update(states.SUCCESS, timestamp, fields)
         self.update(states.SUCCESS, timestamp, fields)
 
 
     def on_revoked(self, timestamp=None, **fields):
     def on_revoked(self, timestamp=None, **fields):
-        """Callback for the ``task-revoked`` event."""
+        """Callback for the :event:`task-revoked` event."""
         self.revoked = timestamp
         self.revoked = timestamp
         self.update(states.REVOKED, timestamp, fields)
         self.update(states.REVOKED, timestamp, fields)
 
 
@@ -185,14 +188,18 @@ class Task(Element):
 
 
     def info(self, fields=None, extra=[]):
     def info(self, fields=None, extra=[]):
         """Information about this task suitable for on-screen display."""
         """Information about this task suitable for on-screen display."""
-        if fields is None:
-            fields = self._info_fields
-        return dict((key, getattr(self, key, None))
-                        for key in list(fields) + list(extra)
-                            if getattr(self, key, None) is not None)
+        fields = self._info_fields if fields is None else fields
+
+        def _keys():
+            for key in list(fields) + list(extra):
+                value = getattr(self, key, None)
+                if value is not None:
+                    yield key, value
+
+        return dict(_keys())
 
 
     def __repr__(self):
     def __repr__(self):
-        return '<Task: %s(%s) %s>' % (self.name, self.uuid, self.state)
+        return '<Task: {0.name}({0.uuid}) {0.state}>'.format(self)
 
 
     @property
     @property
     def ready(self):
     def ready(self):
@@ -211,7 +218,7 @@ class State(object):
         self.event_callback = callback
         self.event_callback = callback
         self.group_handlers = {'worker': self.worker_event,
         self.group_handlers = {'worker': self.worker_event,
                                'task': self.task_event}
                                'task': self.task_event}
-        self._mutex = Lock()
+        self._mutex = threading.Lock()
 
 
     def freeze_while(self, fun, *args, **kwargs):
     def freeze_while(self, fun, *args, **kwargs):
         clear_after = kwargs.pop('clear_after', False)
         clear_after = kwargs.pop('clear_after', False)
@@ -268,7 +275,7 @@ class State(object):
         hostname = fields.pop('hostname', None)
         hostname = fields.pop('hostname', None)
         if hostname:
         if hostname:
             worker = self.get_or_create_worker(hostname)
             worker = self.get_or_create_worker(hostname)
-            handler = getattr(worker, 'on_%s' % type, None)
+            handler = getattr(worker, 'on_' + type, None)
             if handler:
             if handler:
                 handler(**fields)
                 handler(**fields)
 
 
@@ -278,7 +285,7 @@ class State(object):
         hostname = fields.pop('hostname')
         hostname = fields.pop('hostname')
         worker = self.get_or_create_worker(hostname)
         worker = self.get_or_create_worker(hostname)
         task = self.get_or_create_task(uuid)
         task = self.get_or_create_task(uuid)
-        handler = getattr(task, 'on_%s' % type, None)
+        handler = getattr(task, 'on_' + type, None)
         if type == 'received':
         if type == 'received':
             self.task_count += 1
             self.task_count += 1
         if handler:
         if handler:
@@ -347,8 +354,8 @@ class State(object):
         return [w for w in self.workers.values() if w.alive]
         return [w for w in self.workers.values() if w.alive]
 
 
     def __repr__(self):
     def __repr__(self):
-        return '<ClusterState: events=%s tasks=%s>' % (self.event_count,
-                                                       self.task_count)
+        return '<State: events={0.event_count} tasks={0.task_count}>' \
+                    .format(self)
 
 
 
 
 state = State()
 state = State()

+ 26 - 5
celery/exceptions.py

@@ -13,7 +13,7 @@ from billiard.exceptions import (  # noqa
 )
 )
 
 
 UNREGISTERED_FMT = """\
 UNREGISTERED_FMT = """\
-Task of kind %s is not registered, please make sure it's imported.\
+Task of kind {0} is not registered, please make sure it's imported.\
 """
 """
 
 
 
 
@@ -41,7 +41,7 @@ class NotRegistered(KeyError):
     """The task is not registered."""
     """The task is not registered."""
 
 
     def __repr__(self):
     def __repr__(self):
-        return UNREGISTERED_FMT % str(self)
+        return UNREGISTERED_FMT.format(self)
 
 
 
 
 class AlreadyRegistered(Exception):
 class AlreadyRegistered(Exception):
@@ -59,9 +59,30 @@ class MaxRetriesExceededError(Exception):
 class RetryTaskError(Exception):
 class RetryTaskError(Exception):
     """The task is to be retried later."""
     """The task is to be retried later."""
 
 
-    def __init__(self, message, exc, *args, **kwargs):
-        self.exc = exc
-        Exception.__init__(self, message, exc, *args, **kwargs)
+    def __init__(self, message=None, exc=None, when=None, **kwargs):
+        from kombu.utils.encoding import safe_repr
+        self.message = message
+        if isinstance(exc, basestring):
+            self.exc, self.excs = None, exc
+        else:
+            self.exc, self.excs = exc, safe_repr(exc) if exc else None
+        self.when = when
+        Exception.__init__(self, exc, when, **kwargs)
+
+    def humanize(self):
+        if isinstance(self.when, int):
+            return 'in {0.when}s'.format(self)
+        return 'at {0.when}'.format(self)
+
+    def __str__(self):
+        if self.message:
+            return self.message
+        if self.excs:
+            return 'Retry {0}: {1!r}'.format(self.humanize(), self.excs)
+        return 'Retry {0}'.format(self.humanize())
+
+    def __reduce__(self):
+        return self.__class__, (self.message, self.excs, self.when)
 
 
 
 
 class TaskRevokedError(Exception):
 class TaskRevokedError(Exception):

+ 2 - 2
celery/loaders/__init__.py

@@ -23,13 +23,13 @@ def get_loader_cls(loader):
     return symbol_by_name(loader, LOADER_ALIASES)
     return symbol_by_name(loader, LOADER_ALIASES)
 
 
 
 
-@deprecated(deprecation='2.5', removal='3.0',
+@deprecated(deprecation='2.5', removal='4.0',
         alternative='celery.current_app.loader')
         alternative='celery.current_app.loader')
 def current_loader():
 def current_loader():
     return current_app.loader
     return current_app.loader
 
 
 
 
-@deprecated(deprecation='2.5', removal='3.0',
+@deprecated(deprecation='2.5', removal='4.0',
             alternative='celery.current_app.conf')
             alternative='celery.current_app.conf')
 def load_settings():
 def load_settings():
     return current_app.conf
     return current_app.conf

+ 18 - 5
celery/loaders/base.py

@@ -14,6 +14,7 @@ import os
 import re
 import re
 
 
 from datetime import datetime
 from datetime import datetime
+from itertools import imap
 
 
 from kombu.utils.encoding import safe_str
 from kombu.utils.encoding import safe_str
 
 
@@ -26,7 +27,7 @@ from celery.utils.functional import maybe_list
 BUILTIN_MODULES = frozenset()
 BUILTIN_MODULES = frozenset()
 
 
 ERROR_ENVVAR_NOT_SET = (
 ERROR_ENVVAR_NOT_SET = (
-"""The environment variable %r is not set,
+"""The environment variable {0!r} is not set,
 and as such the configuration could not be loaded.
 and as such the configuration could not be loaded.
 Please set this variable and make it point to
 Please set this variable and make it point to
 a configuration module.""")
 a configuration module.""")
@@ -45,6 +46,9 @@ class BaseLoader(object):
         * What happens when the worker starts?
         * What happens when the worker starts?
             See :meth:`on_worker_init`.
             See :meth:`on_worker_init`.
 
 
+        * What happens when the worker shuts down?
+            See :meth:`on_worker_shutdown`.
+
         * What modules are imported to find tasks?
         * What modules are imported to find tasks?
 
 
     """
     """
@@ -79,6 +83,11 @@ class BaseLoader(object):
         starts."""
         starts."""
         pass
         pass
 
 
+    def on_worker_shutdown(self):
+        """This method is called when the worker (:program:`celery worker`)
+        shuts down."""
+        pass
+
     def on_worker_process_init(self):
     def on_worker_process_init(self):
         """This method is called when a child process starts."""
         """This method is called when a child process starts."""
         pass
         pass
@@ -107,6 +116,9 @@ class BaseLoader(object):
             self.import_default_modules()
             self.import_default_modules()
             self.on_worker_init()
             self.on_worker_init()
 
 
+    def shutdown_worker(self):
+        self.on_worker_shutdown()
+
     def init_worker_process(self):
     def init_worker_process(self):
         self.on_worker_process_init()
         self.on_worker_process_init()
 
 
@@ -115,7 +127,8 @@ class BaseLoader(object):
         if not module_name:
         if not module_name:
             if silent:
             if silent:
                 return False
                 return False
-            raise ImproperlyConfigured(self.error_envvar_not_set % module_name)
+            raise ImproperlyConfigured(
+                    self.error_envvar_not_set.format(module_name))
         return self.config_from_object(module_name, silent=silent)
         return self.config_from_object(module_name, silent=silent)
 
 
     def config_from_object(self, obj, silent=False):
     def config_from_object(self, obj, silent=False):
@@ -173,12 +186,12 @@ class BaseLoader(object):
             else:
             else:
                 try:
                 try:
                     value = NAMESPACES[ns][key].to_python(value)
                     value = NAMESPACES[ns][key].to_python(value)
-                except ValueError, exc:
+                except ValueError as exc:
                     # display key name in error message.
                     # display key name in error message.
-                    raise ValueError('%r: %s' % (ns_key, exc))
+                    raise ValueError('{0!r}: {1}'.format(ns_key, exc))
             return ns_key, value
             return ns_key, value
 
 
-        return dict(map(getarg, args))
+        return dict(imap(getarg, args))
 
 
     def mail_admins(self, subject, body, fail_silently=False,
     def mail_admins(self, subject, body, fail_silently=False,
             sender=None, to=None, host=None, port=None,
             sender=None, to=None, host=None, port=None,

+ 10 - 10
celery/loaders/default.py

@@ -25,12 +25,12 @@ DEFAULT_CONFIG_MODULE = 'celeryconfig'
 C_WNOCONF = strtobool(os.environ.get('C_WNOCONF', False))
 C_WNOCONF = strtobool(os.environ.get('C_WNOCONF', False))
 
 
 CONFIG_INVALID_NAME = """
 CONFIG_INVALID_NAME = """
-Error: Module '%(module)s' doesn't exist, or it's not a valid \
+Error: Module '{module}' doesn't exist, or it's not a valid \
 Python module name.
 Python module name.
 """
 """
 
 
 CONFIG_WITH_SUFFIX = CONFIG_INVALID_NAME + """
 CONFIG_WITH_SUFFIX = CONFIG_INVALID_NAME + """
-Did you mean '%(suggest)s'?
+Did you mean '{suggest}'?
 """
 """
 
 
 
 
@@ -53,18 +53,18 @@ class Loader(BaseLoader):
         except NotAPackage:
         except NotAPackage:
             if configname.endswith('.py'):
             if configname.endswith('.py'):
                 raise NotAPackage, NotAPackage(
                 raise NotAPackage, NotAPackage(
-                        CONFIG_WITH_SUFFIX % {
-                            'module': configname,
-                            'suggest': configname[:-3]}), sys.exc_info()[2]
+                        CONFIG_WITH_SUFFIX.format(
+                            module=configname,
+                            suggest=configname[:-3])), sys.exc_info()[2]
             raise NotAPackage, NotAPackage(
             raise NotAPackage, NotAPackage(
-                    CONFIG_INVALID_NAME % {
-                        'module': configname}), sys.exc_info()[2]
+                    CONFIG_INVALID_NAME.format(
+                        module=configname)), sys.exc_info()[2]
         except ImportError:
         except ImportError:
             # billiard sets this if forked using execv
             # billiard sets this if forked using execv
             if C_WNOCONF and not os.environ.get('FORKED_BY_MULTIPROCESSING'):
             if C_WNOCONF and not os.environ.get('FORKED_BY_MULTIPROCESSING'):
                 warnings.warn(NotConfigured(
                 warnings.warn(NotConfigured(
-                    'No %r module found! Please make sure it exists and '
-                    'is available to Python.' % (configname, )))
+                    'No {module} module found! Please make sure it exists and '
+                    'is available to Python.'.format(module=configname)))
             return self.setup_settings({})
             return self.setup_settings({})
         else:
         else:
             celeryconfig = self.import_from_cwd(configname)
             celeryconfig = self.import_from_cwd(configname)
@@ -75,4 +75,4 @@ class Loader(BaseLoader):
             return self.setup_settings(usercfg)
             return self.setup_settings(usercfg)
 
 
     def wanted_module_item(self, item):
     def wanted_module_item(self, item):
-        return item[0].isupper() and not item.startswith('_')
+        return not item.startswith('_')

+ 62 - 223
celery/local.py

@@ -12,27 +12,74 @@
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
-# since each thread has its own greenlet we can just use those as identifiers
-# for the context.  If greenlets are not available we fall back to the
-# current thread ident.
-try:
-    from greenlet import getcurrent as get_ident
-except ImportError:  # pragma: no cover
+import importlib
+import sys
+
+
+def symbol_by_name(name, aliases={}, imp=None, package=None,
+        sep='.', default=None, **kwargs):
+    """Get symbol by qualified name.
+
+    The name should be the full dot-separated path to the class::
+
+        modulename.ClassName
+
+    Example::
+
+        celery.concurrency.processes.TaskPool
+                                    ^- class name
+
+    or using ':' to separate module and symbol::
+
+        celery.concurrency.processes:TaskPool
+
+    If `aliases` is provided, a dict containing short name/long name
+    mappings, the name is looked up in the aliases first.
+
+    Examples:
+
+        >>> symbol_by_name('celery.concurrency.processes.TaskPool')
+        <class 'celery.concurrency.processes.TaskPool'>
+
+        >>> symbol_by_name('default', {
+        ...     'default': 'celery.concurrency.processes.TaskPool'})
+        <class 'celery.concurrency.processes.TaskPool'>
+
+        # Does not try to look up non-string names.
+        >>> from celery.concurrency.processes import TaskPool
+        >>> symbol_by_name(TaskPool) is TaskPool
+        True
+
+    """
+    if imp is None:
+        imp = importlib.import_module
+
+    if not isinstance(name, basestring):
+        return name                                 # already a class
+
+    name = aliases.get(name) or name
+    sep = ':' if ':' in name else sep
+    module_name, _, cls_name = name.rpartition(sep)
+    if not module_name:
+        cls_name, module_name = None, package if package else cls_name
     try:
     try:
-        from thread import get_ident  # noqa
-    except ImportError:  # pragma: no cover
         try:
         try:
-            from dummy_thread import get_ident  # noqa
-        except ImportError:  # pragma: no cover
-            from _thread import get_ident  # noqa
+            module = imp(module_name, package=package, **kwargs)
+        except ValueError, exc:
+            raise ValueError, ValueError(
+                    "Couldn't import %r: %s" % (name, exc)), sys.exc_info()[2]
+        return getattr(module, cls_name) if cls_name else module
+    except (ImportError, AttributeError):
+        if default is None:
+            raise
+    return default
 
 
 
 
 def try_import(module, default=None):
 def try_import(module, default=None):
     """Try to import and return module, or return
     """Try to import and return module, or return
     None if the module does not exist."""
     None if the module does not exist."""
-    from importlib import import_module
     try:
     try:
-        return import_module(module)
+        return importlib.import_module(module)
     except ImportError:
     except ImportError:
         return default
         return default
 
 
@@ -82,7 +129,7 @@ class Proxy(object):
         try:
         try:
             return getattr(self.__local, self.__name__)
             return getattr(self.__local, self.__name__)
         except AttributeError:
         except AttributeError:
-            raise RuntimeError('no object bound to %s' % self.__name__)
+            raise RuntimeError('no object bound to {0.__name__}'.format(self))
 
 
     @property
     @property
     def __dict__(self):
     def __dict__(self):
@@ -95,7 +142,7 @@ class Proxy(object):
         try:
         try:
             obj = self._get_current_object()
             obj = self._get_current_object()
         except RuntimeError:  # pragma: no cover
         except RuntimeError:  # pragma: no cover
-            return '<%s unbound>' % self.__class__.__name__
+            return '<{0} unbound>'.format(self.__class__.__name__)
         return repr(obj)
         return repr(obj)
 
 
     def __nonzero__(self):
     def __nonzero__(self):
@@ -221,211 +268,3 @@ def maybe_evaluate(obj):
         return obj.__maybe_evaluate__()
         return obj.__maybe_evaluate__()
     except AttributeError:
     except AttributeError:
         return obj
         return obj
-
-
-def release_local(local):
-    """Releases the contents of the local for the current context.
-    This makes it possible to use locals without a manager.
-
-    Example::
-
-        >>> loc = Local()
-        >>> loc.foo = 42
-        >>> release_local(loc)
-        >>> hasattr(loc, 'foo')
-        False
-
-    With this function one can release :class:`Local` objects as well
-    as :class:`StackLocal` objects.  However it is not possible to
-    release data held by proxies that way, one always has to retain
-    a reference to the underlying local object in order to be able
-    to release it.
-
-    .. versionadded:: 0.6.1
-    """
-    local.__release_local__()
-
-
-class Local(object):
-    __slots__ = ('__storage__', '__ident_func__')
-
-    def __init__(self):
-        object.__setattr__(self, '__storage__', {})
-        object.__setattr__(self, '__ident_func__', get_ident)
-
-    def __iter__(self):
-        return iter(self.__storage__.items())
-
-    def __call__(self, proxy):
-        """Create a proxy for a name."""
-        return Proxy(self, proxy)
-
-    def __release_local__(self):
-        self.__storage__.pop(self.__ident_func__(), None)
-
-    def __getattr__(self, name):
-        try:
-            return self.__storage__[self.__ident_func__()][name]
-        except KeyError:
-            raise AttributeError(name)
-
-    def __setattr__(self, name, value):
-        ident = self.__ident_func__()
-        storage = self.__storage__
-        try:
-            storage[ident][name] = value
-        except KeyError:
-            storage[ident] = {name: value}
-
-    def __delattr__(self, name):
-        try:
-            del self.__storage__[self.__ident_func__()][name]
-        except KeyError:
-            raise AttributeError(name)
-
-
-class LocalStack(object):
-    """This class works similar to a :class:`Local` but keeps a stack
-    of objects instead.  This is best explained with an example::
-
-        >>> ls = LocalStack()
-        >>> ls.push(42)
-        >>> ls.top
-        42
-        >>> ls.push(23)
-        >>> ls.top
-        23
-        >>> ls.pop()
-        23
-        >>> ls.top
-        42
-
-    They can be force released by using a :class:`LocalManager` or with
-    the :func:`release_local` function but the correct way is to pop the
-    item from the stack after using.  When the stack is empty it will
-    no longer be bound to the current context (and as such released).
-
-    By calling the stack without arguments it returns a proxy that resolves to
-    the topmost item on the stack.
-
-    """
-
-    def __init__(self):
-        self._local = Local()
-
-    def __release_local__(self):
-        self._local.__release_local__()
-
-    def _get__ident_func__(self):
-        return self._local.__ident_func__
-
-    def _set__ident_func__(self, value):
-        object.__setattr__(self._local, '__ident_func__', value)
-    __ident_func__ = property(_get__ident_func__, _set__ident_func__)
-    del _get__ident_func__, _set__ident_func__
-
-    def __call__(self):
-        def _lookup():
-            rv = self.top
-            if rv is None:
-                raise RuntimeError('object unbound')
-            return rv
-        return Proxy(_lookup)
-
-    def push(self, obj):
-        """Pushes a new item to the stack"""
-        rv = getattr(self._local, 'stack', None)
-        if rv is None:
-            self._local.stack = rv = []
-        rv.append(obj)
-        return rv
-
-    def pop(self):
-        """Removes the topmost item from the stack, will return the
-        old value or `None` if the stack was already empty.
-        """
-        stack = getattr(self._local, 'stack', None)
-        if stack is None:
-            return None
-        elif len(stack) == 1:
-            release_local(self._local)
-            return stack[-1]
-        else:
-            return stack.pop()
-
-    @property
-    def stack(self):
-        """get_current_worker_task uses this to find
-        the original task that was executed by the worker."""
-        stack = getattr(self._local, 'stack', None)
-        if stack is not None:
-            return stack
-        return []
-
-    @property
-    def top(self):
-        """The topmost item on the stack.  If the stack is empty,
-        `None` is returned.
-        """
-        try:
-            return self._local.stack[-1]
-        except (AttributeError, IndexError):
-            return None
-
-
-class LocalManager(object):
-    """Local objects cannot manage themselves. For that you need a local
-    manager.  You can pass a local manager multiple locals or add them later
-    by appending them to `manager.locals`.  Everytime the manager cleans up
-    it, will clean up all the data left in the locals for this context.
-
-    The `ident_func` parameter can be added to override the default ident
-    function for the wrapped locals.
-
-    .. versionchanged:: 0.6.1
-       Instead of a manager the :func:`release_local` function can be used
-       as well.
-
-    .. versionchanged:: 0.7
-       `ident_func` was added.
-    """
-
-    def __init__(self, locals=None, ident_func=None):
-        if locals is None:
-            self.locals = []
-        elif isinstance(locals, Local):
-            self.locals = [locals]
-        else:
-            self.locals = list(locals)
-        if ident_func is not None:
-            self.ident_func = ident_func
-            for local in self.locals:
-                object.__setattr__(local, '__ident_func__', ident_func)
-        else:
-            self.ident_func = get_ident
-
-    def get_ident(self):
-        """Return the context identifier the local objects use internally for
-        this context.  You cannot override this method to change the behavior
-        but use it to link other context local objects (such as SQLAlchemy's
-        scoped sessions) to the Werkzeug locals.
-
-        .. versionchanged:: 0.7
-           You can pass a different ident function to the local manager that
-           will then be propagated to all the locals passed to the
-           constructor.
-        """
-        return self.ident_func()
-
-    def cleanup(self):
-        """Manually clean up the data in the locals for this context.  Call
-        this at the end of the request or use `make_middleware()`.
-        """
-        for local in self.locals:
-            release_local(local)
-
-    def __repr__(self):
-        return '<%s storages: %d>' % (
-            self.__class__.__name__,
-            len(self.locals)
-        )

+ 80 - 44
celery/platforms.py

@@ -7,8 +7,7 @@
     users, groups, and so on.
     users, groups, and so on.
 
 
 """
 """
-from __future__ import absolute_import
-from __future__ import with_statement
+from __future__ import absolute_import, print_function
 
 
 import atexit
 import atexit
 import errno
 import errno
@@ -19,10 +18,10 @@ import signal as _signal
 import sys
 import sys
 
 
 from contextlib import contextmanager
 from contextlib import contextmanager
+from itertools import imap
 
 
 from .local import try_import
 from .local import try_import
 
 
-from billiard import current_process
 from kombu.utils.limits import TokenBucket
 from kombu.utils.limits import TokenBucket
 
 
 _setproctitle = try_import('setproctitle')
 _setproctitle = try_import('setproctitle')
@@ -41,16 +40,14 @@ IS_WINDOWS = SYSTEM == 'Windows'
 
 
 DAEMON_UMASK = 0
 DAEMON_UMASK = 0
 DAEMON_WORKDIR = '/'
 DAEMON_WORKDIR = '/'
-DAEMON_REDIRECT_TO = getattr(os, 'devnull', '/dev/null')
-
 
 
 PIDFILE_FLAGS = os.O_CREAT | os.O_EXCL | os.O_WRONLY
 PIDFILE_FLAGS = os.O_CREAT | os.O_EXCL | os.O_WRONLY
 PIDFILE_MODE = ((os.R_OK | os.W_OK) << 6) | ((os.R_OK) << 3) | ((os.R_OK))
 PIDFILE_MODE = ((os.R_OK | os.W_OK) << 6) | ((os.R_OK) << 3) | ((os.R_OK))
 
 
 _setps_bucket = TokenBucket(0.5)  # 30/m, every 2 seconds
 _setps_bucket = TokenBucket(0.5)  # 30/m, every 2 seconds
 
 
-PIDLOCKED = """ERROR: Pidfile (%s) already exists.
-Seems we're already running? (PID: %s)"""
+PIDLOCKED = """ERROR: Pidfile ({0}) already exists.
+Seems we're already running? (PID: {1})"""
 
 
 
 
 def pyimplementation():
 def pyimplementation():
@@ -59,14 +56,37 @@ def pyimplementation():
     elif sys.platform.startswith('java'):
     elif sys.platform.startswith('java'):
         return 'Jython ' + sys.platform
         return 'Jython ' + sys.platform
     elif hasattr(sys, 'pypy_version_info'):
     elif hasattr(sys, 'pypy_version_info'):
-        v = '.'.join(map(str, sys.pypy_version_info[:3]))
+        v = '.'.join(imap(str, sys.pypy_version_info[:3]))
         if sys.pypy_version_info[3:]:
         if sys.pypy_version_info[3:]:
-            v += '-' + ''.join(map(str, sys.pypy_version_info[3:]))
+            v += '-' + ''.join(imap(str, sys.pypy_version_info[3:]))
         return 'PyPy ' + v
         return 'PyPy ' + v
     else:
     else:
         return 'CPython'
         return 'CPython'
 
 
 
 
+def _find_option_with_arg(argv, short_opts=None, long_opts=None):
+    for i, arg in enumerate(argv):
+        if arg.startswith('-'):
+            if long_opts and arg.startswith('--'):
+                name, _, val = arg.partition('=')
+                if name in long_opts:
+                    return val
+            if short_opts and arg in short_opts:
+                return argv[i + 1]
+    raise KeyError('|'.join(short_opts or [] + long_opts or []))
+
+
+def maybe_patch_concurrency(argv, short_opts=None, long_opts=None):
+    try:
+        pool = _find_option_with_arg(argv, short_opts, long_opts)
+    except KeyError:
+        pass
+    else:
+        # set up eventlet/gevent environments ASAP.
+        from celery import concurrency
+        concurrency.get_implementation(pool)
+
+
 class LockFailed(Exception):
 class LockFailed(Exception):
     """Raised if a pidlock can't be acquired."""
     """Raised if a pidlock can't be acquired."""
     pass
     pass
@@ -106,7 +126,7 @@ class PIDFile(object):
         """Acquire lock."""
         """Acquire lock."""
         try:
         try:
             self.write_pid()
             self.write_pid()
-        except OSError, exc:
+        except OSError as exc:
             raise LockFailed, LockFailed(str(exc)), sys.exc_info()[2]
             raise LockFailed, LockFailed(str(exc)), sys.exc_info()[2]
         return self
         return self
     __enter__ = acquire
     __enter__ = acquire
@@ -124,7 +144,7 @@ class PIDFile(object):
         """Reads and returns the current pid."""
         """Reads and returns the current pid."""
         try:
         try:
             fh = open(self.path, 'r')
             fh = open(self.path, 'r')
-        except IOError, exc:
+        except IOError as exc:
             if exc.errno == errno.ENOENT:
             if exc.errno == errno.ENOENT:
                 return
                 return
             raise
             raise
@@ -133,20 +153,20 @@ class PIDFile(object):
             line = fh.readline()
             line = fh.readline()
             if line.strip() == line:  # must contain '\n'
             if line.strip() == line:  # must contain '\n'
                 raise ValueError(
                 raise ValueError(
-                    'Partially written or invalid pidfile %r' % (self.path))
+                    'Partial or invalid pidfile {0.path}'.format(self))
         finally:
         finally:
             fh.close()
             fh.close()
 
 
         try:
         try:
             return int(line.strip())
             return int(line.strip())
         except ValueError:
         except ValueError:
-            raise ValueError('PID file %r contents invalid.' % self.path)
+            raise ValueError('PID file {0.path} invalid.'.format(self))
 
 
     def remove(self):
     def remove(self):
         """Removes the lock."""
         """Removes the lock."""
         try:
         try:
             os.unlink(self.path)
             os.unlink(self.path)
-        except OSError, exc:
+        except OSError as exc:
             if exc.errno in (errno.ENOENT, errno.EACCES):
             if exc.errno in (errno.ENOENT, errno.EACCES):
                 return
                 return
             raise
             raise
@@ -156,8 +176,8 @@ class PIDFile(object):
         (does not respond to signals)."""
         (does not respond to signals)."""
         try:
         try:
             pid = self.read_pid()
             pid = self.read_pid()
-        except ValueError, exc:
-            sys.stderr.write('Broken pidfile found. Removing it.\n')
+        except ValueError as exc:
+            print('Broken pidfile found. Removing it.', file=sys.stderr)
             self.remove()
             self.remove()
             return True
             return True
         if not pid:
         if not pid:
@@ -166,16 +186,16 @@ class PIDFile(object):
 
 
         try:
         try:
             os.kill(pid, 0)
             os.kill(pid, 0)
-        except os.error, exc:
+        except os.error as exc:
             if exc.errno == errno.ESRCH:
             if exc.errno == errno.ESRCH:
-                sys.stderr.write('Stale pidfile exists. Removing it.\n')
+                print('Stale pidfile exists. Removing it.', file=sys.stderr)
                 self.remove()
                 self.remove()
                 return True
                 return True
         return False
         return False
 
 
     def write_pid(self):
     def write_pid(self):
         pid = os.getpid()
         pid = os.getpid()
-        content = '%d\n' % (pid, )
+        content = '{0}\n'.format(pid)
 
 
         pidfile_fd = os.open(self.path, PIDFILE_FLAGS, PIDFILE_MODE)
         pidfile_fd = os.open(self.path, PIDFILE_FLAGS, PIDFILE_MODE)
         pidfile = os.fdopen(pidfile_fd, 'w')
         pidfile = os.fdopen(pidfile_fd, 'w')
@@ -218,14 +238,26 @@ def create_pidlock(pidfile):
         pidlock = create_pidlock('/var/run/app.pid')
         pidlock = create_pidlock('/var/run/app.pid')
 
 
     """
     """
+    pidlock = _create_pidlock(pidfile)
+    atexit.register(pidlock.release)
+    return pidlock
+
+
+def _create_pidlock(pidfile):
     pidlock = PIDFile(pidfile)
     pidlock = PIDFile(pidfile)
     if pidlock.is_locked() and not pidlock.remove_if_stale():
     if pidlock.is_locked() and not pidlock.remove_if_stale():
-        raise SystemExit(PIDLOCKED % (pidfile, pidlock.read_pid()))
+        raise SystemExit(PIDLOCKED.format(pidfile, pidlock.read_pid()))
     pidlock.acquire()
     pidlock.acquire()
-    atexit.register(pidlock.release)
     return pidlock
     return pidlock
 
 
 
 
+def fileno(f):
+    try:
+        return f.fileno()
+    except AttributeError:
+        pass
+
+
 class DaemonContext(object):
 class DaemonContext(object):
     _is_open = False
     _is_open = False
     workdir = DAEMON_WORKDIR
     workdir = DAEMON_WORKDIR
@@ -236,6 +268,12 @@ class DaemonContext(object):
         self.workdir = workdir or self.workdir
         self.workdir = workdir or self.workdir
         self.umask = self.umask if umask is None else umask
         self.umask = self.umask if umask is None else umask
         self.fake = fake
         self.fake = fake
+        self.stdfds = (sys.stdin, sys.stdout, sys.stderr)
+
+    def redirect_to_null(self, fd):
+        if fd:
+            dest = os.open(os.devnull, os.O_RDWR)
+            os.dup2(dest, fd)
 
 
     def open(self):
     def open(self):
         if not self._is_open:
         if not self._is_open:
@@ -245,13 +283,14 @@ class DaemonContext(object):
             os.chdir(self.workdir)
             os.chdir(self.workdir)
             os.umask(self.umask)
             os.umask(self.umask)
 
 
+            preserve = [fileno(f) for f in self.stdfds if fileno(f)]
             for fd in reversed(range(get_fdmax(default=2048))):
             for fd in reversed(range(get_fdmax(default=2048))):
-                with ignore_EBADF():
-                    os.close(fd)
+                if fd not in preserve:
+                    with ignore_EBADF():
+                        os.close(fd)
 
 
-            os.open(DAEMON_REDIRECT_TO, os.O_RDWR)
-            os.dup2(0, 1)
-            os.dup2(0, 2)
+            for fd in self.stdfds:
+                self.redirect_to_null(fileno(fd))
 
 
             self._is_open = True
             self._is_open = True
     __enter__ = open
     __enter__ = open
@@ -301,8 +340,7 @@ def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0,
             # Now in detached child process with effective user set to nobody,
             # Now in detached child process with effective user set to nobody,
             # and we know that our logfile can be written to, and that
             # and we know that our logfile can be written to, and that
             # the pidfile is not locked.
             # the pidfile is not locked.
-            pidlock = create_pidlock('/var/run/app.pid').acquire()
-            atexit.register(pidlock.release)
+            pidlock = create_pidlock('/var/run/app.pid')
 
 
             # Run the program
             # Run the program
             program.run(logfile='/var/log/app.log')
             program.run(logfile='/var/log/app.log')
@@ -322,7 +360,8 @@ def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0,
     # we need to know that we have access to the logfile.
     # we need to know that we have access to the logfile.
     logfile and open(logfile, 'a').close()
     logfile and open(logfile, 'a').close()
     # Doesn't actually create the pidfile, but makes sure it's not stale.
     # Doesn't actually create the pidfile, but makes sure it's not stale.
-    pidfile and create_pidlock(pidfile)
+    if pidfile:
+        _create_pidlock(pidfile).release()
 
 
     return DaemonContext(umask=umask, workdir=workdir, fake=fake)
     return DaemonContext(umask=umask, workdir=workdir, fake=fake)
 
 
@@ -340,7 +379,7 @@ def parse_uid(uid):
         try:
         try:
             return pwd.getpwnam(uid).pw_uid
             return pwd.getpwnam(uid).pw_uid
         except (AttributeError, KeyError):
         except (AttributeError, KeyError):
-            raise KeyError('User does not exist: %r' % (uid, ))
+            raise KeyError('User does not exist: {0}'.format(uid))
 
 
 
 
 def parse_gid(gid):
 def parse_gid(gid):
@@ -356,7 +395,7 @@ def parse_gid(gid):
         try:
         try:
             return grp.getgrnam(gid).gr_gid
             return grp.getgrnam(gid).gr_gid
         except (AttributeError, KeyError):
         except (AttributeError, KeyError):
-            raise KeyError('Group does not exist: %r' % (gid, ))
+            raise KeyError('Group does not exist: {0}'.format(gid))
 
 
 
 
 def _setgroups_hack(groups):
 def _setgroups_hack(groups):
@@ -372,7 +411,7 @@ def _setgroups_hack(groups):
             if len(groups) <= 1:
             if len(groups) <= 1:
                 raise
                 raise
             groups[:] = groups[:-1]
             groups[:] = groups[:-1]
-        except OSError, exc:  # error from the OS.
+        except OSError as exc:  # error from the OS.
             if exc.errno != errno.EINVAL or len(groups) <= 1:
             if exc.errno != errno.EINVAL or len(groups) <= 1:
                 raise
                 raise
             groups[:] = groups[:-1]
             groups[:] = groups[:-1]
@@ -386,7 +425,7 @@ def setgroups(groups):
         pass
         pass
     try:
     try:
         return _setgroups_hack(groups[:max_groups])
         return _setgroups_hack(groups[:max_groups])
-    except OSError, exc:
+    except OSError as exc:
         if exc.errno != errno.EPERM:
         if exc.errno != errno.EPERM:
             raise
             raise
         if any(group not in groups for group in os.getgroups()):
         if any(group not in groups for group in os.getgroups()):
@@ -573,8 +612,8 @@ def set_process_title(progname, info=None):
     Only works if :mod:`setproctitle` is installed.
     Only works if :mod:`setproctitle` is installed.
 
 
     """
     """
-    proctitle = '[%s]' % progname
-    proctitle = '%s %s' % (proctitle, info) if info else proctitle
+    proctitle = '[{0}]'.format(progname)
+    proctitle = '{0} {1}'.format(proctitle, info) if info else proctitle
     if _setproctitle:
     if _setproctitle:
         _setproctitle.setproctitle(proctitle)
         _setproctitle.setproctitle(proctitle)
     return proctitle
     return proctitle
@@ -594,24 +633,21 @@ else:
 
 
         """
         """
         if not rate_limit or _setps_bucket.can_consume(1):
         if not rate_limit or _setps_bucket.can_consume(1):
+            from billiard import current_process
             if hostname:
             if hostname:
-                progname = '%s@%s' % (progname, hostname.split('.')[0])
+                progname = '{0}@{1}'.format(progname, hostname.split('.')[0])
             return set_process_title(
             return set_process_title(
-                '%s:%s' % (progname, current_process().name), info=info)
+                '{0}:{1}'.format(progname, current_process().name), info=info)
 
 
 
 
-def shellsplit(s, posix=True):
-    # posix= option to shlex.split first available in Python 2.6+
-    lexer = shlex.shlex(s, posix=not IS_WINDOWS)
-    lexer.whitespace_split = True
-    lexer.commenters = ''
-    return list(lexer)
+def shellsplit(s):
+    return shlex.split(s, posix=not IS_WINDOWS)
 
 
 
 
 @contextmanager
 @contextmanager
 def ignore_EBADF():
 def ignore_EBADF():
     try:
     try:
         yield
         yield
-    except OSError, exc:
+    except OSError as exc:
         if exc.errno != errno.EBADF:
         if exc.errno != errno.EBADF:
             raise
             raise

+ 41 - 27
celery/result.py

@@ -7,7 +7,6 @@
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import time
 import time
 
 
@@ -75,14 +74,20 @@ class AsyncResult(ResultBase):
         """Forget about (and possibly remove the result of) this task."""
         """Forget about (and possibly remove the result of) this task."""
         self.backend.forget(self.id)
         self.backend.forget(self.id)
 
 
-    def revoke(self, connection=None):
+    def revoke(self, connection=None, terminate=False, signal=None):
         """Send revoke signal to all workers.
         """Send revoke signal to all workers.
 
 
         Any worker receiving the task, or having reserved the
         Any worker receiving the task, or having reserved the
         task, *must* ignore it.
         task, *must* ignore it.
 
 
+        :keyword terminate: Also terminate the process currently working
+            on the task (if any).
+        :keyword signal: Name of signal to send to process if terminate.
+            Default is TERM.
+
         """
         """
-        self.app.control.revoke(self.id, connection=connection)
+        self.app.control.revoke(self.id, connection=connection,
+                                terminate=terminate, signal=signal)
 
 
     def get(self, timeout=None, propagate=True, interval=0.5):
     def get(self, timeout=None, propagate=True, interval=0.5):
         """Wait until task is ready, and return its result.
         """Wait until task is ready, and return its result.
@@ -97,7 +102,7 @@ class AsyncResult(ResultBase):
         :keyword propagate: Re-raise exception if the task failed.
         :keyword propagate: Re-raise exception if the task failed.
         :keyword interval: Time to wait (in seconds) before retrying to
         :keyword interval: Time to wait (in seconds) before retrying to
            retrieve the result.  Note that this does not have any effect
            retrieve the result.  Note that this does not have any effect
-           when using the AMQP result store backend, as it does not
+           when using the amqp result store backend, as it does not
            use polling.
            use polling.
 
 
         :raises celery.exceptions.TimeoutError: if `timeout` is not
         :raises celery.exceptions.TimeoutError: if `timeout` is not
@@ -143,7 +148,7 @@ class AsyncResult(ResultBase):
             [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
             [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
 
 
         """
         """
-        for _, R in self.iterdeps():
+        for _, R in self.iterdeps(intermediate=intermediate):
             yield R, R.get(**kwargs)
             yield R, R.get(**kwargs)
 
 
     def get_leaf(self):
     def get_leaf(self):
@@ -198,7 +203,7 @@ class AsyncResult(ResultBase):
         return hash(self.id)
         return hash(self.id)
 
 
     def __repr__(self):
     def __repr__(self):
-        return '<%s: %s>' % (self.__class__.__name__, self.id)
+        return '<{0}: {1}>'.format(type(self).__name__, self.id)
 
 
     def __eq__(self, other):
     def __eq__(self, other):
         if isinstance(other, AsyncResult):
         if isinstance(other, AsyncResult):
@@ -217,10 +222,6 @@ class AsyncResult(ResultBase):
     def __reduce_args__(self):
     def __reduce_args__(self):
         return self.id, self.backend, self.task_name, self.parent
         return self.id, self.backend, self.task_name, self.parent
 
 
-    def set_parent(self, parent):
-        self.parent = parent
-        return parent
-
     @cached_property
     @cached_property
     def graph(self):
     def graph(self):
         return self.build_graph()
         return self.build_graph()
@@ -233,7 +234,7 @@ class AsyncResult(ResultBase):
     def children(self):
     def children(self):
         children = self.backend.get_children(self.id)
         children = self.backend.get_children(self.id)
         if children:
         if children:
-            return map(from_serializable, children)
+            return [from_serializable(child) for child in children]
 
 
     @property
     @property
     def result(self):
     def result(self):
@@ -281,12 +282,14 @@ class AsyncResult(ResultBase):
         return self.backend.get_status(self.id)
         return self.backend.get_status(self.id)
     status = state
     status = state
 
 
-    def _get_task_id(self):
+    @property
+    def task_id(self):
+        """compat alias to :attr:`id`"""
         return self.id
         return self.id
 
 
-    def _set_task_id(self, id):
+    @task_id.setter  # noqa
+    def task_id(self, id):
         self.id = id
         self.id = id
-    task_id = property(_get_task_id, _set_task_id)
 BaseAsyncResult = AsyncResult  # for backwards compatibility.
 BaseAsyncResult = AsyncResult  # for backwards compatibility.
 
 
 
 
@@ -398,7 +401,7 @@ class ResultSet(ResultBase):
 
 
     def revoke(self, connection=None):
     def revoke(self, connection=None):
         """Revoke all tasks in the set."""
         """Revoke all tasks in the set."""
-        with self.app.default_connection(connection) as conn:
+        with self.app.connection_or_acquire(connection) as conn:
             for result in self.results:
             for result in self.results:
                 result.revoke(connection=conn)
                 result.revoke(connection=conn)
 
 
@@ -435,7 +438,7 @@ class ResultSet(ResultBase):
             time.sleep(interval)
             time.sleep(interval)
             elapsed += interval
             elapsed += interval
             if timeout and elapsed >= timeout:
             if timeout and elapsed >= timeout:
-                raise TimeoutError("The operation timed out")
+                raise TimeoutError('The operation timed out')
 
 
     def get(self, timeout=None, propagate=True, interval=0.5):
     def get(self, timeout=None, propagate=True, interval=0.5):
         """See :meth:`join`
         """See :meth:`join`
@@ -472,7 +475,7 @@ class ResultSet(ResultBase):
 
 
         :keyword interval: Time to wait (in seconds) before retrying to
         :keyword interval: Time to wait (in seconds) before retrying to
                            retrieve a result from the set.  Note that this
                            retrieve a result from the set.  Note that this
-                           does not have any effect when using the AMQP
+                           does not have any effect when using the amqp
                            result store backend, as it does not use polling.
                            result store backend, as it does not use polling.
 
 
         :raises celery.exceptions.TimeoutError: if `timeout` is not
         :raises celery.exceptions.TimeoutError: if `timeout` is not
@@ -503,7 +506,7 @@ class ResultSet(ResultBase):
         Note that this does not support collecting the results
         Note that this does not support collecting the results
         for different task types using different backends.
         for different task types using different backends.
 
 
-        This is currently only supported by the AMQP, Redis and cache
+        This is currently only supported by the amqp, Redis and cache
         result backends.
         result backends.
 
 
         """
         """
@@ -519,7 +522,7 @@ class ResultSet(ResultBase):
         Note that this does not support collecting the results
         Note that this does not support collecting the results
         for different task types using different backends.
         for different task types using different backends.
 
 
-        This is currently only supported by the AMQP, Redis and cache
+        This is currently only supported by the amqp, Redis and cache
         result backends.
         result backends.
 
 
         """
         """
@@ -539,8 +542,8 @@ class ResultSet(ResultBase):
         return NotImplemented
         return NotImplemented
 
 
     def __repr__(self):
     def __repr__(self):
-        return '<%s: %r>' % (self.__class__.__name__,
-                             [r.id for r in self.results])
+        return '<{0}: [{1}]>'.format(type(self).__name__,
+                                     ', '.join(r.id for r in self.results))
 
 
     @property
     @property
     def subtasks(self):
     def subtasks(self):
@@ -603,12 +606,16 @@ class GroupResult(ResultSet):
         return NotImplemented
         return NotImplemented
 
 
     def __repr__(self):
     def __repr__(self):
-        return '<%s: %s %r>' % (self.__class__.__name__, self.id,
-                                [r.id for r in self.results])
+        return '<{0}: {1} [{2}]>'.format(type(self).__name__, self.id,
+                                         ', '.join(r.id for r in self.results))
 
 
     def serializable(self):
     def serializable(self):
         return self.id, [r.serializable() for r in self.results]
         return self.id, [r.serializable() for r in self.results]
 
 
+    @property
+    def children(self):
+        return self.results
+
     @classmethod
     @classmethod
     def restore(self, id, backend=None):
     def restore(self, id, backend=None):
         """Restore previously saved group result."""
         """Restore previously saved group result."""
@@ -634,16 +641,19 @@ class TaskSetResult(GroupResult):
         """Deprecated: Use ``len(r)``."""
         """Deprecated: Use ``len(r)``."""
         return len(self)
         return len(self)
 
 
-    def _get_taskset_id(self):
+    @property
+    def taskset_id(self):
+        """compat alias to :attr:`self.id`"""
         return self.id
         return self.id
 
 
-    def _set_taskset_id(self, id):
+    @taskset_id.setter  # noqa
+    def taskset_id(self, id):
         self.id = id
         self.id = id
-    taskset_id = property(_get_taskset_id, _set_taskset_id)
 
 
 
 
 class EagerResult(AsyncResult):
 class EagerResult(AsyncResult):
     """Result that we know has already been executed."""
     """Result that we know has already been executed."""
+    task_name = None
 
 
     def __init__(self, id, ret_value, state, traceback=None):
     def __init__(self, id, ret_value, state, traceback=None):
         self.id = id
         self.id = id
@@ -680,7 +690,7 @@ class EagerResult(AsyncResult):
         self._state = states.REVOKED
         self._state = states.REVOKED
 
 
     def __repr__(self):
     def __repr__(self):
-        return "<EagerResult: %s>" % self.id
+        return '<EagerResult: {0.id}>'.format(self)
 
 
     @property
     @property
     def result(self):
     def result(self):
@@ -697,3 +707,7 @@ class EagerResult(AsyncResult):
     def traceback(self):
     def traceback(self):
         """The traceback if the task failed."""
         """The traceback if the task failed."""
         return self._traceback
         return self._traceback
+
+    @property
+    def supports_native_join(self):
+        return False

+ 59 - 23
celery/schedules.py

@@ -12,14 +12,28 @@ from __future__ import absolute_import
 import re
 import re
 
 
 from datetime import datetime, timedelta
 from datetime import datetime, timedelta
+
 from dateutil.relativedelta import relativedelta
 from dateutil.relativedelta import relativedelta
+from kombu.utils import cached_property
 
 
 from . import current_app
 from . import current_app
 from .utils import is_iterable
 from .utils import is_iterable
-from .utils.timeutils import (timedelta_seconds, weekday, maybe_timedelta,
-                              remaining, humanize_seconds)
+from .utils.timeutils import (
+    timedelta_seconds, weekday, maybe_timedelta, remaining,
+    humanize_seconds, timezone, maybe_make_aware
+)
 from .datastructures import AttributeDict
 from .datastructures import AttributeDict
 
 
+CRON_PATTERN_INVALID = """\
+Invalid crontab pattern. Valid range is {min}-{max}. \
+'{value}' was found.\
+"""
+
+CRON_INVALID_TYPE = """\
+Argument cronspec needs to be of any of the following types: \
+int, basestring, or an iterable type. {type!r} was given.\
+"""
+
 
 
 class ParseException(Exception):
 class ParseException(Exception):
     """Raised by crontab_parser when the input can't be parsed."""
     """Raised by crontab_parser when the input can't be parsed."""
@@ -34,12 +48,11 @@ class schedule(object):
         self.nowfun = nowfun
         self.nowfun = nowfun
 
 
     def now(self):
     def now(self):
-        return (self.nowfun or current_app.now)()
+        return (self.nowfun or self.app.now)()
 
 
     def remaining_estimate(self, last_run_at):
     def remaining_estimate(self, last_run_at):
-        """Returns when the periodic task should run next as a timedelta."""
-        return remaining(last_run_at, self.run_every, relative=self.relative,
-                         now=self.now())
+        return remaining(last_run_at, self.run_every,
+                         self.maybe_make_aware(self.now()), self.relative)
 
 
     def is_due(self, last_run_at):
     def is_due(self, last_run_at):
         """Returns tuple of two items `(is_due, next_time_to_run)`,
         """Returns tuple of two items `(is_due, next_time_to_run)`,
@@ -69,14 +82,20 @@ class schedule(object):
             the django-celery database scheduler the value is 5 seconds.
             the django-celery database scheduler the value is 5 seconds.
 
 
         """
         """
+        last_run_at = self.maybe_make_aware(last_run_at)
         rem_delta = self.remaining_estimate(last_run_at)
         rem_delta = self.remaining_estimate(last_run_at)
         rem = timedelta_seconds(rem_delta)
         rem = timedelta_seconds(rem_delta)
         if rem == 0:
         if rem == 0:
             return True, self.seconds
             return True, self.seconds
         return False, rem
         return False, rem
 
 
+    def maybe_make_aware(self, dt):
+        if self.utc_enabled:
+            return maybe_make_aware(dt, self.tz)
+        return dt
+
     def __repr__(self):
     def __repr__(self):
-        return '<freq: %s>' % self.human_seconds
+        return '<freq: {0.human_seconds}>'.format(self)
 
 
     def __eq__(self, other):
     def __eq__(self, other):
         if isinstance(other, schedule):
         if isinstance(other, schedule):
@@ -91,6 +110,23 @@ class schedule(object):
     def human_seconds(self):
     def human_seconds(self):
         return humanize_seconds(self.seconds)
         return humanize_seconds(self.seconds)
 
 
+    @cached_property
+    def app(self):
+        return current_app._get_current_object()
+
+    @cached_property
+    def tz(self):
+        return timezone.get_timezone(self.app.conf.CELERY_TIMEZONE)
+
+    @cached_property
+    def utc_enabled(self):
+        return self.app.conf.CELERY_ENABLE_UTC
+
+    @cached_property
+    def to_local(self):
+        return (timezone.to_local if self.utc_enabled
+                                  else timezone.to_local_fallback)
+
 
 
 class crontab_parser(object):
 class crontab_parser(object):
     """Parser for crontab expressions. Any expression of the form 'groups'
     """Parser for crontab expressions. Any expression of the form 'groups'
@@ -191,11 +227,11 @@ class crontab_parser(object):
             try:
             try:
                 i = weekday(s)
                 i = weekday(s)
             except KeyError:
             except KeyError:
-                raise ValueError("Invalid weekday literal '%s'." % s)
+                raise ValueError('Invalid weekday literal {0!r}.'.format(s))
 
 
         if i < self.min_:
         if i < self.min_:
-            raise ValueError('Invalid beginning range: %s < %s.' %
-                                                   (i, self.min_))
+            raise ValueError(
+                'Invalid beginning range: {0} < {1}.'.format(i, self.min_))
         return i
         return i
 
 
 
 
@@ -300,19 +336,13 @@ class crontab(schedule):
         elif is_iterable(cronspec):
         elif is_iterable(cronspec):
             result = set(cronspec)
             result = set(cronspec)
         else:
         else:
-            raise TypeError(
-                    'Argument cronspec needs to be of any of the '
-                    'following types: int, basestring, or an iterable type. '
-                    "'%s' was given." % type(cronspec))
+            raise TypeError(CRON_INVALID_TYPE.format(type=type(cronspec)))
 
 
         # assure the result does not preceed the min or exceed the max
         # assure the result does not preceed the min or exceed the max
         for number in result:
         for number in result:
             if number >= max_ + min_ or number < min_:
             if number >= max_ + min_ or number < min_:
-                raise ValueError(
-                        'Invalid crontab pattern. Valid '
-                        "range is %d-%d. '%d' was found." %
-                        (min_, max_ - 1 + min_, number))
-
+                raise ValueError(CRON_PATTERN_INVALID.format(
+                    min=min_, max=max_ - 1 + min_, value=number))
         return result
         return result
 
 
     def _delta_to_next(self, last_run_at, next_hour, next_minute):
     def _delta_to_next(self, last_run_at, next_hour, next_minute):
@@ -386,7 +416,10 @@ class crontab(schedule):
         self.day_of_week = self._expand_cronspec(day_of_week, 7)
         self.day_of_week = self._expand_cronspec(day_of_week, 7)
         self.day_of_month = self._expand_cronspec(day_of_month, 31, 1)
         self.day_of_month = self._expand_cronspec(day_of_month, 31, 1)
         self.month_of_year = self._expand_cronspec(month_of_year, 12, 1)
         self.month_of_year = self._expand_cronspec(month_of_year, 12, 1)
-        self.nowfun = nowfun or current_app.now
+        self.nowfun = nowfun
+
+    def now(self):
+        return (self.nowfun or self.app.now)()
 
 
     def __repr__(self):
     def __repr__(self):
         return ('<crontab: %s %s %s %s %s (m/h/d/dM/MY)>' %
         return ('<crontab: %s %s %s %s %s (m/h/d/dM/MY)>' %
@@ -403,8 +436,10 @@ class crontab(schedule):
                                  self._orig_day_of_month,
                                  self._orig_day_of_month,
                                  self._orig_month_of_year), None)
                                  self._orig_month_of_year), None)
 
 
-    def remaining_estimate(self, last_run_at):
+    def remaining_estimate(self, last_run_at, tz=None):
         """Returns when the periodic task should run next as a timedelta."""
         """Returns when the periodic task should run next as a timedelta."""
+        tz = tz or self.tz
+        last_run_at = self.maybe_make_aware(last_run_at)
         dow_num = last_run_at.isoweekday() % 7  # Sunday is day 0, not day 7
         dow_num = last_run_at.isoweekday() % 7  # Sunday is day 0, not day 7
 
 
         execute_this_date = (last_run_at.month in self.month_of_year and
         execute_this_date = (last_run_at.month in self.month_of_year and
@@ -453,7 +488,8 @@ class crontab(schedule):
                     delta = self._delta_to_next(last_run_at,
                     delta = self._delta_to_next(last_run_at,
                                                 next_hour, next_minute)
                                                 next_hour, next_minute)
 
 
-        return remaining(last_run_at, delta, now=self.nowfun())
+        return remaining(self.to_local(last_run_at, tz),
+                         delta, self.to_local(self.now(), tz))
 
 
     def is_due(self, last_run_at):
     def is_due(self, last_run_at):
         """Returns tuple of two items `(is_due, next_time_to_run)`,
         """Returns tuple of two items `(is_due, next_time_to_run)`,
@@ -466,7 +502,7 @@ class crontab(schedule):
         rem = timedelta_seconds(rem_delta)
         rem = timedelta_seconds(rem_delta)
         due = rem == 0
         due = rem == 0
         if due:
         if due:
-            rem_delta = self.remaining_estimate(last_run_at=self.nowfun())
+            rem_delta = self.remaining_estimate(self.now())
             rem = timedelta_seconds(rem_delta)
             rem = timedelta_seconds(rem_delta)
         return due, rem
         return due, rem
 
 

+ 0 - 1
celery/security/__init__.py

@@ -7,7 +7,6 @@
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 from kombu.serialization import registry
 from kombu.serialization import registry
 
 

+ 6 - 7
celery/security/certificate.py

@@ -7,7 +7,6 @@
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import glob
 import glob
 import os
 import os
@@ -22,7 +21,7 @@ class Certificate(object):
 
 
     def __init__(self, cert):
     def __init__(self, cert):
         assert crypto is not None
         assert crypto is not None
-        with reraise_errors('Invalid certificate: %r'):
+        with reraise_errors('Invalid certificate: {0!r}'):
             self._cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
             self._cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
 
 
     def has_expired(self):
     def has_expired(self):
@@ -40,11 +39,11 @@ class Certificate(object):
 
 
     def get_id(self):
     def get_id(self):
         """Serial number/issuer pair uniquely identifies a certificate"""
         """Serial number/issuer pair uniquely identifies a certificate"""
-        return '%s %s' % (self.get_issuer(), self.get_serial_number())
+        return '{0} {1}'.format(self.get_issuer(), self.get_serial_number())
 
 
     def verify(self, data, signature, digest):
     def verify(self, data, signature, digest):
         """Verifies the signature for string containing data."""
         """Verifies the signature for string containing data."""
-        with reraise_errors('Bad signature: %r'):
+        with reraise_errors('Bad signature: {0!r}'):
             crypto.verify(self._cert, signature, data, digest)
             crypto.verify(self._cert, signature, data, digest)
 
 
 
 
@@ -64,11 +63,11 @@ class CertStore(object):
         try:
         try:
             return self._certs[id]
             return self._certs[id]
         except KeyError:
         except KeyError:
-            raise SecurityError('Unknown certificate: %r' % (id, ))
+            raise SecurityError('Unknown certificate: {0!r}'.format(id))
 
 
     def add_cert(self, cert):
     def add_cert(self, cert):
         if cert.get_id() in self._certs:
         if cert.get_id() in self._certs:
-            raise SecurityError('Duplicate certificate: %r' % (id, ))
+            raise SecurityError('Duplicate certificate: {0!r}'.format(id))
         self._certs[cert.get_id()] = cert
         self._certs[cert.get_id()] = cert
 
 
 
 
@@ -84,5 +83,5 @@ class FSCertStore(CertStore):
                 cert = Certificate(f.read())
                 cert = Certificate(f.read())
                 if cert.has_expired():
                 if cert.has_expired():
                     raise SecurityError(
                     raise SecurityError(
-                        'Expired certificate: %r' % (cert.get_id(), ))
+                        'Expired certificate: {0!r}'.format(cert.get_id()))
                 self.add_cert(cert)
                 self.add_cert(cert)

+ 2 - 3
celery/security/key.py

@@ -7,7 +7,6 @@
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 from .utils import crypto, reraise_errors
 from .utils import crypto, reraise_errors
 
 
@@ -15,10 +14,10 @@ from .utils import crypto, reraise_errors
 class PrivateKey(object):
 class PrivateKey(object):
 
 
     def __init__(self, key):
     def __init__(self, key):
-        with reraise_errors('Invalid private key: %r'):
+        with reraise_errors('Invalid private key: {0!r}'):
             self._key = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
             self._key = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
 
 
     def sign(self, data, digest):
     def sign(self, data, digest):
         """sign string containing data."""
         """sign string containing data."""
-        with reraise_errors('Unable to sign data: %r'):
+        with reraise_errors('Unable to sign data: {0!r}'):
             return crypto.sign(self._key, data, digest)
             return crypto.sign(self._key, data, digest)

+ 4 - 4
celery/security/serialization.py

@@ -7,10 +7,10 @@
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import base64
 import base64
 
 
+from itertools import izip
 from kombu.serialization import registry, encode, decode
 from kombu.serialization import registry, encode, decode
 from kombu.utils.encoding import bytes_to_str, str_to_bytes
 from kombu.utils.encoding import bytes_to_str, str_to_bytes
 
 
@@ -41,7 +41,7 @@ class SecureSerializer(object):
         """serialize data structure into string"""
         """serialize data structure into string"""
         assert self._key is not None
         assert self._key is not None
         assert self._cert is not None
         assert self._cert is not None
-        with reraise_errors('Unable to serialize: %r', (Exception, )):
+        with reraise_errors('Unable to serialize: {0!r}', (Exception, )):
             content_type, content_encoding, body = encode(
             content_type, content_encoding, body = encode(
                     data, serializer=self._serializer)
                     data, serializer=self._serializer)
             # What we sign is the serialized body, not the body itself.
             # What we sign is the serialized body, not the body itself.
@@ -55,7 +55,7 @@ class SecureSerializer(object):
     def deserialize(self, data):
     def deserialize(self, data):
         """deserialize data structure from string"""
         """deserialize data structure from string"""
         assert self._cert_store is not None
         assert self._cert_store is not None
-        with reraise_errors('Unable to deserialize: %r', (Exception, )):
+        with reraise_errors('Unable to deserialize: {0!r}', (Exception, )):
             payload = self._unpack(data)
             payload = self._unpack(data)
             signature, signer, body = (payload['signature'],
             signature, signer, body = (payload['signature'],
                                        payload['signer'],
                                        payload['signer'],
@@ -72,7 +72,7 @@ class SecureSerializer(object):
     def _unpack(self, payload, sep='\x00\x01',
     def _unpack(self, payload, sep='\x00\x01',
             fields=('signer', 'signature', 'content_type',
             fields=('signer', 'signature', 'content_type',
                     'content_encoding', 'body')):
                     'content_encoding', 'body')):
-        return dict(zip(fields, b64decode(payload).split(sep)))
+        return dict(izip(fields, b64decode(payload).split(sep)))
 
 
 
 
 def register_auth(key=None, cert=None, store=None, digest='sha1',
 def register_auth(key=None, cert=None, store=None, digest='sha1',

+ 3 - 3
celery/security/utils.py

@@ -21,10 +21,10 @@ except ImportError:  # pragma: no cover
 
 
 
 
 @contextmanager
 @contextmanager
-def reraise_errors(msg='%r', errors=None):
+def reraise_errors(msg='{0!r}', errors=None):
     assert crypto is not None
     assert crypto is not None
     errors = (crypto.Error, ) if errors is None else errors
     errors = (crypto.Error, ) if errors is None else errors
     try:
     try:
         yield
         yield
-    except errors, exc:
-        raise SecurityError, SecurityError(msg % (exc, )), sys.exc_info()[2]
+    except errors as exc:
+        raise SecurityError, SecurityError(msg.format(exc)), sys.exc_info()[2]

+ 2 - 1
celery/signals.py

@@ -24,7 +24,8 @@ task_success = Signal(providing_args=['result'])
 task_failure = Signal(providing_args=[
 task_failure = Signal(providing_args=[
     'task_id', 'exception', 'args', 'kwargs', 'traceback', 'einfo'])
     'task_id', 'exception', 'args', 'kwargs', 'traceback', 'einfo'])
 task_revoked = Signal(providing_args=['terminated', 'signum', 'expired'])
 task_revoked = Signal(providing_args=['terminated', 'signum', 'expired'])
-celeryd_init = Signal(providing_args=['instance'])
+celeryd_init = Signal(providing_args=['instance', 'conf'])
+celeryd_after_setup = Signal(providing_args=['instance', 'conf'])
 worker_init = Signal(providing_args=[])
 worker_init = Signal(providing_args=[])
 worker_process_init = Signal(providing_args=[])
 worker_process_init = Signal(providing_args=[])
 worker_ready = Signal(providing_args=[])
 worker_ready = Signal(providing_args=[])

+ 7 - 0
celery/states.py

@@ -101,12 +101,19 @@ class state(str):
     def __le__(self, other):
     def __le__(self, other):
         return self.compare(other, lambda a, b: a >= b, False)
         return self.compare(other, lambda a, b: a >= b, False)
 
 
+#: Task state is unknown (assumed pending since you know the id).
 PENDING = 'PENDING'
 PENDING = 'PENDING'
+#: Task was received by a worker.
 RECEIVED = 'RECEIVED'
 RECEIVED = 'RECEIVED'
+#: Task was started by a worker (:setting:`CELERY_TRACK_STARTED`).
 STARTED = 'STARTED'
 STARTED = 'STARTED'
+#: Task succeeded
 SUCCESS = 'SUCCESS'
 SUCCESS = 'SUCCESS'
+#: Task failed
 FAILURE = 'FAILURE'
 FAILURE = 'FAILURE'
+#: Task was revoked.
 REVOKED = 'REVOKED'
 REVOKED = 'REVOKED'
+#: Task is waiting for retry.
 RETRY = 'RETRY'
 RETRY = 'RETRY'
 
 
 READY_STATES = frozenset([SUCCESS, FAILURE, REVOKED])
 READY_STATES = frozenset([SUCCESS, FAILURE, REVOKED])

+ 21 - 1
celery/task/__init__.py

@@ -3,7 +3,10 @@
     celery.task
     celery.task
     ~~~~~~~~~~~
     ~~~~~~~~~~~
 
 
-    This is the old task module, it should not be used anymore.
+    This is the old task module, it should not be used anymore,
+    import from the main 'celery' module instead.
+    If you're looking for the decorator implementation then that's in
+    ``celery.app.base.Celery.task``.
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
@@ -12,6 +15,23 @@ from celery._state import current_app, current_task as current
 from celery.__compat__ import MagicModule, recreate_module
 from celery.__compat__ import MagicModule, recreate_module
 from celery.local import Proxy
 from celery.local import Proxy
 
 
+__all__ = [
+    'BaseTask', 'Task', 'PeriodicTask',
+    'task', 'periodic_task',
+    'group', 'chord', 'subtask', 'TaskSet',
+]
+
+# This is for static analyzers
+BaseTask = object
+Task = object
+PeriodicTask = object
+task = lambda *a, **kw: None
+periodic_task = lambda *a, **kw: None
+group = lambda *a, **kw: None
+chord = lambda *a, **kw: None
+subtask = lambda *a, **kw: None
+TaskSet = object
+
 
 
 class module(MagicModule):
 class module(MagicModule):
 
 

+ 9 - 11
celery/task/base.py

@@ -21,8 +21,8 @@ from celery.utils.log import get_task_logger
 
 
 #: list of methods that must be classmethods in the old API.
 #: list of methods that must be classmethods in the old API.
 _COMPAT_CLASSMETHODS = (
 _COMPAT_CLASSMETHODS = (
-    'delay', 'apply_async', 'retry', 'apply',
-    'AsyncResult', 'subtask', 'push_request', 'pop_request')
+    'delay', 'apply_async', 'retry', 'apply', 'AsyncResult', 'subtask',
+)
 
 
 
 
 class Task(BaseTask):
 class Task(BaseTask):
@@ -45,13 +45,12 @@ class Task(BaseTask):
     immediate = False
     immediate = False
     priority = None
     priority = None
     type = 'regular'
     type = 'regular'
-    error_whitelist = ()
     disable_error_emails = False
     disable_error_emails = False
+    accept_magic_kwargs = False
 
 
     from_config = BaseTask.from_config + (
     from_config = BaseTask.from_config + (
         ('exchange_type', 'CELERY_DEFAULT_EXCHANGE_TYPE'),
         ('exchange_type', 'CELERY_DEFAULT_EXCHANGE_TYPE'),
         ('delivery_mode', 'CELERY_DEFAULT_DELIVERY_MODE'),
         ('delivery_mode', 'CELERY_DEFAULT_DELIVERY_MODE'),
-        ('error_whitelist', 'CELERY_TASK_ERROR_WHITELIST'),
     )
     )
 
 
     # In old Celery the @task decorator didn't exist, so one would create
     # In old Celery the @task decorator didn't exist, so one would create
@@ -72,7 +71,7 @@ class Task(BaseTask):
         return get_task_logger(self.name)
         return get_task_logger(self.name)
 
 
     @classmethod
     @classmethod
-    def establish_connection(self, connect_timeout=None):
+    def establish_connection(self):
         """Deprecated method used to get a broker connection.
         """Deprecated method used to get a broker connection.
 
 
         Should be replaced with :meth:`@Celery.connection`
         Should be replaced with :meth:`@Celery.connection`
@@ -88,11 +87,10 @@ class Task(BaseTask):
             with celery.connection() as conn:
             with celery.connection() as conn:
                 ...
                 ...
         """
         """
-        return self._get_app().connection(
-                connect_timeout=connect_timeout)
+        return self._get_app().connection()
 
 
     def get_publisher(self, connection=None, exchange=None,
     def get_publisher(self, connection=None, exchange=None,
-            connect_timeout=None, exchange_type=None, **options):
+            exchange_type=None, **options):
         """Deprecated method to get the task publisher (now called producer).
         """Deprecated method to get the task publisher (now called producer).
 
 
         Should be replaced with :class:`@amqp.TaskProducer`:
         Should be replaced with :class:`@amqp.TaskProducer`:
@@ -107,7 +105,7 @@ class Task(BaseTask):
         exchange = self.exchange if exchange is None else exchange
         exchange = self.exchange if exchange is None else exchange
         if exchange_type is None:
         if exchange_type is None:
             exchange_type = self.exchange_type
             exchange_type = self.exchange_type
-        connection = connection or self.establish_connection(connect_timeout)
+        connection = connection or self.establish_connection()
         return self._get_app().amqp.TaskProducer(connection,
         return self._get_app().amqp.TaskProducer(connection,
                 exchange=exchange and Exchange(exchange, exchange_type),
                 exchange=exchange and Exchange(exchange, exchange_type),
                 routing_key=self.routing_key, **options)
                 routing_key=self.routing_key, **options)
@@ -174,7 +172,7 @@ def task(*args, **kwargs):
         def refresh_feed(url):
         def refresh_feed(url):
             try:
             try:
                 return Feed.objects.get(url=url).refresh()
                 return Feed.objects.get(url=url).refresh()
-            except socket.error, exc:
+            except socket.error as exc:
                 refresh_feed.retry(exc=exc)
                 refresh_feed.retry(exc=exc)
 
 
     Calling the resulting task:
     Calling the resulting task:
@@ -209,7 +207,7 @@ def periodic_task(*args, **options):
                 def refresh_feed(url):
                 def refresh_feed(url):
                     try:
                     try:
                         return Feed.objects.get(url=url).refresh()
                         return Feed.objects.get(url=url).refresh()
-                    except socket.error, exc:
+                    except socket.error as exc:
                         current.retry(exc=exc)
                         current.retry(exc=exc)
 
 
             Calling the resulting task:
             Calling the resulting task:

+ 7 - 7
celery/task/http.py

@@ -67,7 +67,7 @@ def extract_response(raw_response, loads=anyjson.loads):
         raise InvalidResponseError('Empty response')
         raise InvalidResponseError('Empty response')
     try:
     try:
         payload = loads(raw_response)
         payload = loads(raw_response)
-    except ValueError, exc:
+    except ValueError as exc:
         raise InvalidResponseError, InvalidResponseError(
         raise InvalidResponseError, InvalidResponseError(
                 str(exc)), sys.exc_info()[2]
                 str(exc)), sys.exc_info()[2]
 
 
@@ -108,13 +108,13 @@ class MutableURL(object):
         scheme, netloc, path, params, query, fragment = self.parts
         scheme, netloc, path, params, query, fragment = self.parts
         query = urlencode(utf8dict(self.query.items()))
         query = urlencode(utf8dict(self.query.items()))
         components = [scheme + '://', netloc, path or '/',
         components = [scheme + '://', netloc, path or '/',
-                      ';%s' % params   if params   else '',
-                      '?%s' % query    if query    else '',
-                      '#%s' % fragment if fragment else '']
+                      ';{0}'.format(params)   if params   else '',
+                      '?{0}'.format(query)    if query    else '',
+                      '#{0}'.format(fragment) if fragment else '']
         return ''.join(filter(None, components))
         return ''.join(filter(None, components))
 
 
     def __repr__(self):
     def __repr__(self):
-        return '<%s: %s>' % (self.__class__.__name__, str(self))
+        return '<{0}: {1}>'.format(type(self).__name__, self)
 
 
 
 
 class HttpDispatch(object):
 class HttpDispatch(object):
@@ -127,14 +127,14 @@ class HttpDispatch(object):
     :param logger: Logger used for user/system feedback.
     :param logger: Logger used for user/system feedback.
 
 
     """
     """
-    user_agent = 'celery/%s' % celery_version
+    user_agent = 'celery/{version}'.format(version=celery_version)
     timeout = 5
     timeout = 5
 
 
     def __init__(self, url, method, task_kwargs, **kwargs):
     def __init__(self, url, method, task_kwargs, **kwargs):
         self.url = url
         self.url = url
         self.method = method
         self.method = method
         self.task_kwargs = task_kwargs
         self.task_kwargs = task_kwargs
-        self.logger = kwargs.get("logger") or logger
+        self.logger = kwargs.get('logger') or logger
 
 
     def make_request(self, url, method, params):
     def make_request(self, url, method, params):
         """Makes an HTTP request and returns the response."""
         """Makes an HTTP request and returns the response."""

+ 6 - 7
celery/task/sets.py

@@ -8,7 +8,6 @@
 
 
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 from celery._state import get_current_worker_task
 from celery._state import get_current_worker_task
 from celery.app import app_or_default
 from celery.app import app_or_default
@@ -38,15 +37,14 @@ class TaskSet(list):
         self.Publisher = Publisher or self.app.amqp.TaskProducer
         self.Publisher = Publisher or self.app.amqp.TaskProducer
         self.total = len(self)  # XXX compat
         self.total = len(self)  # XXX compat
 
 
-    def apply_async(self, connection=None, connect_timeout=None,
-            publisher=None, taskset_id=None):
+    def apply_async(self, connection=None, publisher=None, taskset_id=None):
         """Apply TaskSet."""
         """Apply TaskSet."""
         app = self.app
         app = self.app
 
 
         if app.conf.CELERY_ALWAYS_EAGER:
         if app.conf.CELERY_ALWAYS_EAGER:
             return self.apply(taskset_id=taskset_id)
             return self.apply(taskset_id=taskset_id)
 
 
-        with app.default_connection(connection, connect_timeout) as conn:
+        with app.connection_or_acquire(connection) as conn:
             setid = taskset_id or uuid()
             setid = taskset_id or uuid()
             pub = publisher or self.Publisher(conn)
             pub = publisher or self.Publisher(conn)
             results = self._async_results(setid, pub)
             results = self._async_results(setid, pub)
@@ -69,9 +67,10 @@ class TaskSet(list):
     def _sync_results(self, taskset_id):
     def _sync_results(self, taskset_id):
         return [task.apply(taskset_id=taskset_id) for task in self]
         return [task.apply(taskset_id=taskset_id) for task in self]
 
 
-    def _get_tasks(self):
+    @property
+    def tasks(self):
         return self
         return self
 
 
-    def _set_tasks(self, tasks):
+    @tasks.setter  # noqa
+    def tasks(self, tasks):
         self[:] = tasks
         self[:] = tasks
-    tasks = property(_get_tasks, _set_tasks)

+ 51 - 31
celery/task/trace.py

@@ -53,16 +53,27 @@ except AttributeError:
     pass
     pass
 
 
 
 
-def mro_lookup(cls, attr, stop=()):
+def mro_lookup(cls, attr, stop=(), monkey_patched=[]):
     """Returns the first node by MRO order that defines an attribute.
     """Returns the first node by MRO order that defines an attribute.
 
 
     :keyword stop: A list of types that if reached will stop the search.
     :keyword stop: A list of types that if reached will stop the search.
+    :keyword monkey_patched: Use one of the stop classes if the attr's
+        module origin is not in this list, this to detect monkey patched
+        attributes.
 
 
     :returns None: if the attribute was not found.
     :returns None: if the attribute was not found.
 
 
     """
     """
     for node in cls.mro():
     for node in cls.mro():
         if node in stop:
         if node in stop:
+            try:
+                attr = node.__dict__[attr]
+                module_origin = attr.__module__
+            except (AttributeError, KeyError):
+                pass
+            else:
+                if module_origin not in monkey_patched:
+                    return node
             return
             return
         if attr in node.__dict__:
         if attr in node.__dict__:
             return node
             return node
@@ -71,7 +82,8 @@ def mro_lookup(cls, attr, stop=()):
 def task_has_custom(task, attr):
 def task_has_custom(task, attr):
     """Returns true if the task or one of its bases
     """Returns true if the task or one of its bases
     defines ``attr`` (excluding the one in BaseTask)."""
     defines ``attr`` (excluding the one in BaseTask)."""
-    return mro_lookup(task.__class__, attr, stop=(BaseTask, object))
+    return mro_lookup(task.__class__, attr, stop=(BaseTask, object),
+                      monkey_patched=['celery.app.task'])
 
 
 
 
 class TraceInfo(object):
 class TraceInfo(object):
@@ -93,20 +105,16 @@ class TraceInfo(object):
 
 
     def handle_retry(self, task, store_errors=True):
     def handle_retry(self, task, store_errors=True):
         """Handle retry exception."""
         """Handle retry exception."""
-        # Create a simpler version of the RetryTaskError that stringifies
-        # the original exception instead of including the exception instance.
-        # This is for reporting the retry in logs, email etc, while
-        # guaranteeing pickleability.
+        # the exception raised is the RetryTaskError semi-predicate,
+        # and it's exc' attribute is the original exception raised (if any).
         req = task.request
         req = task.request
         type_, _, tb = sys.exc_info()
         type_, _, tb = sys.exc_info()
         try:
         try:
-            exc = self.retval
-            message, orig_exc = exc.args
-            expanded_msg = '%s: %s' % (message, str(orig_exc))
-            einfo = ExceptionInfo((type_, type_(expanded_msg, None), tb))
+            pred = self.retval
+            einfo = ExceptionInfo((type_, pred, tb))
             if store_errors:
             if store_errors:
-                task.backend.mark_as_retry(req.id, orig_exc, einfo.traceback)
-            task.on_retry(exc, req.id, req.args, req.kwargs, einfo)
+                task.backend.mark_as_retry(req.id, pred.exc, einfo.traceback)
+            task.on_retry(pred.exc, req.id, req.args, req.kwargs, einfo)
             return einfo
             return einfo
         finally:
         finally:
             del(tb)
             del(tb)
@@ -114,7 +122,7 @@ class TraceInfo(object):
     def handle_failure(self, task, store_errors=True):
     def handle_failure(self, task, store_errors=True):
         """Handle exception."""
         """Handle exception."""
         req = task.request
         req = task.request
-        _, type_, tb = sys.exc_info()
+        type_, _, tb = sys.exc_info()
         try:
         try:
             exc = self.retval
             exc = self.retval
             einfo = ExceptionInfo((type_, get_pickleable_exception(exc), tb))
             einfo = ExceptionInfo((type_, get_pickleable_exception(exc), tb))
@@ -124,7 +132,7 @@ class TraceInfo(object):
             signals.task_failure.send(sender=task, task_id=req.id,
             signals.task_failure.send(sender=task, task_id=req.id,
                                       exception=exc, args=req.args,
                                       exception=exc, args=req.args,
                                       kwargs=req.kwargs,
                                       kwargs=req.kwargs,
-                                      traceback=einfo.traceback,
+                                      traceback=einfo.tb,
                                       einfo=einfo)
                                       einfo=einfo)
             return einfo
             return einfo
         finally:
         finally:
@@ -133,6 +141,28 @@ class TraceInfo(object):
 
 
 def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
 def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
         Info=TraceInfo, eager=False, propagate=False):
         Info=TraceInfo, eager=False, propagate=False):
+    """Builts a function that tracing the tasks execution; catches all
+    exceptions, and saves the state and result of the task execution
+    to the result backend.
+
+    If the call was successful, it saves the result to the task result
+    backend, and sets the task status to `"SUCCESS"`.
+
+    If the call raises :exc:`~celery.exceptions.RetryTaskError`, it extracts
+    the original exception, uses that as the result and sets the task status
+    to `"RETRY"`.
+
+    If the call results in an exception, it saves the exception as the task
+    result, and sets the task status to `"FAILURE"`.
+
+    Returns a function that takes the following arguments:
+
+        :param uuid: The unique id of the task.
+        :param args: List of positional args to pass on to the function.
+        :param kwargs: Keyword arguments mapping to pass on to the function.
+        :keyword request: Request dict.
+
+    """
     # If the task doesn't define a custom __call__ method
     # If the task doesn't define a custom __call__ method
     # we optimize it away by simply calling the run method directly,
     # we optimize it away by simply calling the run method directly,
     # saving the extra method call and a line less in the stack trace.
     # saving the extra method call and a line less in the stack trace.
@@ -193,11 +223,11 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                 try:
                 try:
                     R = retval = fun(*args, **kwargs)
                     R = retval = fun(*args, **kwargs)
                     state = SUCCESS
                     state = SUCCESS
-                except RetryTaskError, exc:
+                except RetryTaskError as exc:
                     I = Info(RETRY, exc)
                     I = Info(RETRY, exc)
                     state, retval = I.state, I.retval
                     state, retval = I.state, I.retval
                     R = I.handle_error_state(task, eager=eager)
                     R = I.handle_error_state(task, eager=eager)
-                except Exception, exc:
+                except Exception as exc:
                     if propagate:
                     if propagate:
                         raise
                         raise
                     I = Info(FAILURE, exc)
                     I = Info(FAILURE, exc)
@@ -205,18 +235,8 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                     R = I.handle_error_state(task, eager=eager)
                     R = I.handle_error_state(task, eager=eager)
                     [subtask(errback).apply_async((uuid, ))
                     [subtask(errback).apply_async((uuid, ))
                         for errback in task_request.errbacks or []]
                         for errback in task_request.errbacks or []]
-                except BaseException, exc:
+                except BaseException as exc:
                     raise
                     raise
-                except:  # pragma: no cover
-                    # For Python2.5 where raising strings are still allowed
-                    # (but deprecated)
-                    if propagate:
-                        raise
-                    I = Info(FAILURE, None)
-                    state, retval = I.state, I.retval
-                    R = I.handle_error_state(task, eager=eager)
-                    [subtask(errback).apply_async((uuid, ))
-                        for errback in task_request.errbacks or []]
                 else:
                 else:
                     # callback tasks must be applied before the result is
                     # callback tasks must be applied before the result is
                     # stored, so that result.children is populated.
                     # stored, so that result.children is populated.
@@ -247,10 +267,10 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                         loader_cleanup()
                         loader_cleanup()
                     except (KeyboardInterrupt, SystemExit, MemoryError):
                     except (KeyboardInterrupt, SystemExit, MemoryError):
                         raise
                         raise
-                    except Exception, exc:
+                    except Exception as exc:
                         _logger.error('Process cleanup failed: %r', exc,
                         _logger.error('Process cleanup failed: %r', exc,
                                       exc_info=True)
                                       exc_info=True)
-        except Exception, exc:
+        except Exception as exc:
             if eager:
             if eager:
                 raise
                 raise
             R = report_internal_error(task, exc)
             R = report_internal_error(task, exc)
@@ -264,7 +284,7 @@ def trace_task(task, uuid, args, kwargs, request={}, **opts):
         if task.__trace__ is None:
         if task.__trace__ is None:
             task.__trace__ = build_tracer(task.name, task, **opts)
             task.__trace__ = build_tracer(task.name, task, **opts)
         return task.__trace__(uuid, args, kwargs, request)[0]
         return task.__trace__(uuid, args, kwargs, request)[0]
-    except Exception, exc:
+    except Exception as exc:
         return report_internal_error(task, exc)
         return report_internal_error(task, exc)
 
 
 
 
@@ -284,7 +304,7 @@ def report_internal_error(task, exc):
         _value = task.backend.prepare_exception(exc)
         _value = task.backend.prepare_exception(exc)
         exc_info = ExceptionInfo((_type, _value, _tb), internal=True)
         exc_info = ExceptionInfo((_type, _value, _tb), internal=True)
         warn(RuntimeWarning(
         warn(RuntimeWarning(
-            'Exception raised outside body: %r:\n%s' % (
+            'Exception raised outside body: {0!r}:\n{1}'.format(
                 exc, exc_info.traceback)))
                 exc, exc_info.traceback)))
         return exc_info
         return exc_info
     finally:
     finally:

+ 1 - 3
celery/tests/__init__.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import logging
 import logging
 import os
 import os
@@ -80,7 +79,6 @@ def import_all_modules(name=__name__, file=__file__,
 
 
 
 
 if os.environ.get('COVER_ALL_MODULES') or '--with-coverage3' in sys.argv:
 if os.environ.get('COVER_ALL_MODULES') or '--with-coverage3' in sys.argv:
-    from celery.tests.utils import catch_warnings
-    with catch_warnings(record=True):
+    with warnings.catch_warnings(record=True):
         import_all_modules()
         import_all_modules()
     warnings.resetwarnings()
     warnings.resetwarnings()

+ 8 - 9
celery/tests/app/test_amqp.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 from kombu import Exchange, Queue
 from kombu import Exchange, Queue
 from mock import Mock
 from mock import Mock
@@ -25,16 +24,16 @@ class test_TaskProducer(AppCase):
         publisher.declare()
         publisher.declare()
 
 
     def test_retry_policy(self):
     def test_retry_policy(self):
-        pub = self.app.amqp.TaskProducer(Mock())
-        pub.channel.connection.client.declared_entities = set()
-        pub.delay_task('tasks.add', (2, 2), {},
-                       retry_policy={'frobulate': 32.4})
+        prod = self.app.amqp.TaskProducer(Mock())
+        prod.channel.connection.client.declared_entities = set()
+        prod.publish_task('tasks.add', (2, 2), {},
+                          retry_policy={'frobulate': 32.4})
 
 
     def test_publish_no_retry(self):
     def test_publish_no_retry(self):
-        pub = self.app.amqp.TaskProducer(Mock())
-        pub.channel.connection.client.declared_entities = set()
-        pub.delay_task('tasks.add', (2, 2), {}, retry=False, chord=123)
-        self.assertFalse(pub.connection.ensure.call_count)
+        prod = self.app.amqp.TaskProducer(Mock())
+        prod.channel.connection.client.declared_entities = set()
+        prod.publish_task('tasks.add', (2, 2), {}, retry=False, chord=123)
+        self.assertFalse(prod.connection.ensure.call_count)
 
 
 
 
 class test_compat_TaskPublisher(AppCase):
 class test_compat_TaskPublisher(AppCase):

+ 25 - 20
celery/tests/app/test_app.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import os
 import os
 
 
@@ -143,8 +142,8 @@ class test_App(Case):
         check.assert_called_with(foo)
         check.assert_called_with(foo)
 
 
     def test_task_sets_main_name_MP_MAIN_FILE(self):
     def test_task_sets_main_name_MP_MAIN_FILE(self):
-        from celery.app import task as _task
-        _task.MP_MAIN_FILE = __file__
+        from celery import utils as _utils
+        _utils.MP_MAIN_FILE = __file__
         try:
         try:
             app = Celery('xuzzy', set_as_current=False)
             app = Celery('xuzzy', set_as_current=False)
 
 
@@ -154,17 +153,26 @@ class test_App(Case):
 
 
             self.assertEqual(foo.name, 'xuzzy.foo')
             self.assertEqual(foo.name, 'xuzzy.foo')
         finally:
         finally:
-            _task.MP_MAIN_FILE = None
+            _utils.MP_MAIN_FILE = None
 
 
     def test_base_task_inherits_magic_kwargs_from_app(self):
     def test_base_task_inherits_magic_kwargs_from_app(self):
-        from celery.app.task import Task
+        from celery.task import Task as OldTask
 
 
-        class timkX(Task):
+        class timkX(OldTask):
             abstract = True
             abstract = True
 
 
         app = Celery(set_as_current=False, accept_magic_kwargs=True)
         app = Celery(set_as_current=False, accept_magic_kwargs=True)
         timkX.bind(app)
         timkX.bind(app)
-        self.assertTrue(timkX.accept_magic_kwargs)
+        # see #918
+        self.assertFalse(timkX.accept_magic_kwargs)
+
+        from celery import Task as NewTask
+
+        class timkY(NewTask):
+            abstract = True
+
+        timkY.bind(app)
+        self.assertFalse(timkY.accept_magic_kwargs)
 
 
     def test_annotate_decorator(self):
     def test_annotate_decorator(self):
         from celery.app.task import Task
         from celery.app.task import Task
@@ -205,7 +213,7 @@ class test_App(Case):
         def aawsX():
         def aawsX():
             pass
             pass
 
 
-        with patch('celery.app.amqp.TaskProducer.delay_task') as dt:
+        with patch('celery.app.amqp.TaskProducer.publish_task') as dt:
             aawsX.apply_async((4, 5))
             aawsX.apply_async((4, 5))
             args = dt.call_args[0][1]
             args = dt.call_args[0][1]
             self.assertEqual(args, ('hello', 4, 5))
             self.assertEqual(args, ('hello', 4, 5))
@@ -298,7 +306,6 @@ class test_App(Case):
     def test_config_from_cmdline(self):
     def test_config_from_cmdline(self):
         cmdline = ['.always_eager=no',
         cmdline = ['.always_eager=no',
                    '.result_backend=/dev/null',
                    '.result_backend=/dev/null',
-                   '.task_error_whitelist=(list)["a", "b", "c"]',
                    'celeryd.prefetch_multiplier=368',
                    'celeryd.prefetch_multiplier=368',
                    '.foobarstring=(string)300',
                    '.foobarstring=(string)300',
                    '.foobarint=(int)300',
                    '.foobarint=(int)300',
@@ -307,8 +314,6 @@ class test_App(Case):
         self.assertFalse(self.app.conf.CELERY_ALWAYS_EAGER)
         self.assertFalse(self.app.conf.CELERY_ALWAYS_EAGER)
         self.assertEqual(self.app.conf.CELERY_RESULT_BACKEND, '/dev/null')
         self.assertEqual(self.app.conf.CELERY_RESULT_BACKEND, '/dev/null')
         self.assertEqual(self.app.conf.CELERYD_PREFETCH_MULTIPLIER, 368)
         self.assertEqual(self.app.conf.CELERYD_PREFETCH_MULTIPLIER, 368)
-        self.assertListEqual(self.app.conf.CELERY_TASK_ERROR_WHITELIST,
-                             ['a', 'b', 'c'])
         self.assertEqual(self.app.conf.CELERY_FOOBARSTRING, '300')
         self.assertEqual(self.app.conf.CELERY_FOOBARSTRING, '300')
         self.assertEqual(self.app.conf.CELERY_FOOBARINT, 300)
         self.assertEqual(self.app.conf.CELERY_FOOBARINT, 300)
         self.assertDictEqual(self.app.conf.CELERY_RESULT_ENGINE_OPTIONS,
         self.assertDictEqual(self.app.conf.CELERY_RESULT_ENGINE_OPTIONS,
@@ -433,20 +438,20 @@ class test_App(Case):
             chan.close()
             chan.close()
         assert conn.transport_cls == 'memory'
         assert conn.transport_cls == 'memory'
 
 
-        pub = self.app.amqp.TaskProducer(conn,
+        prod = self.app.amqp.TaskProducer(conn,
                 exchange=Exchange('foo_exchange'))
                 exchange=Exchange('foo_exchange'))
 
 
         dispatcher = Dispatcher()
         dispatcher = Dispatcher()
-        self.assertTrue(pub.delay_task('footask', (), {},
-                                       exchange='moo_exchange',
-                                       routing_key='moo_exchange',
-                                       event_dispatcher=dispatcher))
+        self.assertTrue(prod.publish_task('footask', (), {},
+                                          exchange='moo_exchange',
+                                          routing_key='moo_exchange',
+                                          event_dispatcher=dispatcher))
         self.assertTrue(dispatcher.sent)
         self.assertTrue(dispatcher.sent)
         self.assertEqual(dispatcher.sent[0][0], 'task-sent')
         self.assertEqual(dispatcher.sent[0][0], 'task-sent')
-        self.assertTrue(pub.delay_task('footask', (), {},
-                                       event_dispatcher=dispatcher,
-                                       exchange='bar_exchange',
-                                       routing_key='bar_exchange'))
+        self.assertTrue(prod.publish_task('footask', (), {},
+                                          event_dispatcher=dispatcher,
+                                          exchange='bar_exchange',
+                                          routing_key='bar_exchange'))
 
 
     def test_error_mail_sender(self):
     def test_error_mail_sender(self):
         x = ErrorMail.subject % {'name': 'task_name',
         x = ErrorMail.subject % {'name': 'task_name',

+ 5 - 6
celery/tests/app/test_beat.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import errno
 import errno
 
 
@@ -62,9 +61,9 @@ class test_ScheduleEntry(Case):
         self.assertEqual(entry.total_run_count, 0)
         self.assertEqual(entry.total_run_count, 0)
 
 
         next_run_at = entry.last_run_at + timedelta(seconds=10)
         next_run_at = entry.last_run_at + timedelta(seconds=10)
-        next = entry.next(next_run_at)
-        self.assertGreaterEqual(next.last_run_at, next_run_at)
-        self.assertEqual(next.total_run_count, 1)
+        next_entry = entry.next(next_run_at)
+        self.assertGreaterEqual(next_entry.last_run_at, next_run_at)
+        self.assertEqual(next_entry.total_run_count, 1)
 
 
     def test_is_due(self):
     def test_is_due(self):
         entry = self.create_entry(schedule=timedelta(seconds=10))
         entry = self.create_entry(schedule=timedelta(seconds=10))
@@ -73,8 +72,8 @@ class test_ScheduleEntry(Case):
         self.assertGreater(next_time_to_run1, 9)
         self.assertGreater(next_time_to_run1, 9)
 
 
         next_run_at = entry.last_run_at - timedelta(seconds=10)
         next_run_at = entry.last_run_at - timedelta(seconds=10)
-        next = entry.next(next_run_at)
-        due2, next_time_to_run2 = next.is_due()
+        next_entry = entry.next(next_run_at)
+        due2, next_time_to_run2 = next_entry.is_due()
         self.assertTrue(due2)
         self.assertTrue(due2)
         self.assertGreater(next_time_to_run2, 9)
         self.assertGreater(next_time_to_run2, 9)
 
 

+ 1 - 1
celery/tests/app/test_builtins.py

@@ -92,7 +92,7 @@ class test_group(Case):
         x = group([add.s(4, 4), add.s(8, 8)])
         x = group([add.s(4, 4), add.s(8, 8)])
         x.name = self.task.name
         x.name = self.task.name
         res = x.apply()
         res = x.apply()
-        self.assertEqual(res.get().join(), [8, 16])
+        self.assertEqual(res.get(), [8, 16])
 
 
     def test_apply_async(self):
     def test_apply_async(self):
         x = group([add.s(4, 4), add.s(8, 8)])
         x = group([add.s(4, 4), add.s(8, 8)])

+ 0 - 1
celery/tests/app/test_control.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 from functools import wraps
 from functools import wraps
 
 

+ 1 - 2
celery/tests/app/test_defaults.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import sys
 import sys
 
 
@@ -34,7 +33,7 @@ class test_defaults(Case):
 
 
     def test_deprecated(self):
     def test_deprecated(self):
         source = Mock()
         source = Mock()
-        source.BROKER_INSIST = True
+        source.CELERYD_LOG_LEVEL = 2
         with patch('celery.utils.warn_deprecated') as warn:
         with patch('celery.utils.warn_deprecated') as warn:
             self.defaults.find_deprecated_settings(source)
             self.defaults.find_deprecated_settings(source)
             self.assertTrue(warn.called)
             self.assertTrue(warn.called)

+ 3 - 4
celery/tests/app/test_loaders.py

@@ -1,8 +1,8 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import os
 import os
 import sys
 import sys
+import warnings
 
 
 from mock import Mock, patch
 from mock import Mock, patch
 
 
@@ -20,7 +20,6 @@ from celery.utils.imports import NotAPackage
 from celery.utils.mail import SendmailWarning
 from celery.utils.mail import SendmailWarning
 
 
 from celery.tests.utils import AppCase, Case
 from celery.tests.utils import AppCase, Case
-from celery.tests.compat import catch_warnings
 
 
 
 
 class ObjectConfig(object):
 class ObjectConfig(object):
@@ -160,7 +159,7 @@ class test_DefaultLoader(Case):
         self.assertTrue(l.wanted_module_item('Foo'))
         self.assertTrue(l.wanted_module_item('Foo'))
         self.assertFalse(l.wanted_module_item('_FOO'))
         self.assertFalse(l.wanted_module_item('_FOO'))
         self.assertFalse(l.wanted_module_item('__FOO'))
         self.assertFalse(l.wanted_module_item('__FOO'))
-        self.assertFalse(l.wanted_module_item('foo'))
+        self.assertTrue(l.wanted_module_item('foo'))
 
 
     @patch('celery.loaders.default.find_module')
     @patch('celery.loaders.default.find_module')
     def test_read_configuration_not_a_package(self, find_module):
     def test_read_configuration_not_a_package(self, find_module):
@@ -237,7 +236,7 @@ class test_DefaultLoader(Case):
             def find_module(self, name):
             def find_module(self, name):
                 raise ImportError(name)
                 raise ImportError(name)
 
 
-        with catch_warnings(record=True):
+        with warnings.catch_warnings(record=True):
             l = _Loader()
             l = _Loader()
             self.assertDictEqual(l.conf, {})
             self.assertDictEqual(l.conf, {})
             context_executed[0] = True
             context_executed[0] = True

+ 0 - 2
celery/tests/app/test_log.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import sys
 import sys
 import logging
 import logging
@@ -219,7 +218,6 @@ class test_default_logger(AppCase):
             p.flush()
             p.flush()
             p.close()
             p.close()
             self.assertFalse(p.isatty())
             self.assertFalse(p.isatty())
-            self.assertIsNone(p.fileno())
 
 
     def test_logging_proxy_recurse_protection(self):
     def test_logging_proxy_recurse_protection(self):
         logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
         logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,

+ 1 - 2
celery/tests/app/test_routes.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 from functools import wraps
 from functools import wraps
 
 
@@ -13,7 +12,7 @@ from celery.tests.utils import Case
 
 
 
 
 def Router(*args, **kwargs):
 def Router(*args, **kwargs):
-    return routes.Router(*args, **dict(kwargs, app=current_app))
+    return routes.Router(*args, app=current_app, **kwargs)
 
 
 
 
 @task()
 @task()

+ 6 - 17
celery/tests/backends/test_amqp.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import socket
 import socket
 
 
@@ -64,7 +63,7 @@ class test_AMQPBackend(AppCase):
         tid3 = uuid()
         tid3 = uuid()
         try:
         try:
             raise KeyError('foo')
             raise KeyError('foo')
-        except KeyError, exception:
+        except KeyError as exception:
             einfo = ExceptionInfo()
             einfo = ExceptionInfo()
             tb1.mark_as_failure(tid3, exception, traceback=einfo.traceback)
             tb1.mark_as_failure(tid3, exception, traceback=einfo.traceback)
             self.assertEqual(tb2.get_status(tid3), states.FAILURE)
             self.assertEqual(tb2.get_status(tid3), states.FAILURE)
@@ -77,16 +76,6 @@ class test_AMQPBackend(AppCase):
             tid = uuid()
             tid = uuid()
             self.assertEqual(repair_uuid(tid.replace('-', '')), tid)
             self.assertEqual(repair_uuid(tid.replace('-', '')), tid)
 
 
-    def test_expires_defaults_to_config_deprecated_setting(self):
-        app = app_or_default()
-        prev = app.conf.CELERY_AMQP_TASK_RESULT_EXPIRES
-        app.conf.CELERY_AMQP_TASK_RESULT_EXPIRES = 10
-        try:
-            b = self.create_backend()
-            self.assertEqual(b.queue_arguments.get('x-expires'), 10 * 1000.0)
-        finally:
-            app.conf.CELERY_AMQP_TASK_RESULT_EXPIRES = prev
-
     def test_expires_is_int(self):
     def test_expires_is_int(self):
         b = self.create_backend(expires=48)
         b = self.create_backend(expires=48)
         self.assertEqual(b.queue_arguments.get('x-expires'), 48 * 1000.0)
         self.assertEqual(b.queue_arguments.get('x-expires'), 48 * 1000.0)
@@ -254,7 +243,7 @@ class test_AMQPBackend(AppCase):
 
 
         b = Backend()
         b = Backend()
         with self.assertRaises(KeyError):
         with self.assertRaises(KeyError):
-            b.get_many(['id1']).next()
+            next(b.get_many(['id1']))
 
 
     def test_test_get_many_raises_inner_block(self):
     def test_test_get_many_raises_inner_block(self):
 
 
@@ -265,19 +254,19 @@ class test_AMQPBackend(AppCase):
 
 
         b = Backend()
         b = Backend()
         with self.assertRaises(KeyError):
         with self.assertRaises(KeyError):
-            b.get_many(['id1']).next()
+            next(b.get_many(['id1']))
 
 
     def test_no_expires(self):
     def test_no_expires(self):
         b = self.create_backend(expires=None)
         b = self.create_backend(expires=None)
         app = app_or_default()
         app = app_or_default()
-        prev = app.conf.CELERY_AMQP_TASK_RESULT_EXPIRES
-        app.conf.CELERY_AMQP_TASK_RESULT_EXPIRES = None
+        prev = app.conf.CELERY_TASK_RESULT_EXPIRES
+        app.conf.CELERY_TASK_RESULT_EXPIRES = None
         try:
         try:
             b = self.create_backend(expires=None)
             b = self.create_backend(expires=None)
             with self.assertRaises(KeyError):
             with self.assertRaises(KeyError):
                 b.queue_arguments['x-expires']
                 b.queue_arguments['x-expires']
         finally:
         finally:
-            app.conf.CELERY_AMQP_TASK_RESULT_EXPIRES = prev
+            app.conf.CELERY_TASK_RESULT_EXPIRES = prev
 
 
     def test_process_cleanup(self):
     def test_process_cleanup(self):
         self.create_backend().process_cleanup()
         self.create_backend().process_cleanup()

+ 0 - 1
celery/tests/backends/test_backends.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 from mock import patch
 from mock import patch
 
 

+ 12 - 54
celery/tests/backends/test_base.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import sys
 import sys
 import types
 import types
@@ -17,8 +16,11 @@ from celery.utils.serialization import UnpickleableExceptionWrapper
 from celery.utils.serialization import get_pickleable_exception as gpe
 from celery.utils.serialization import get_pickleable_exception as gpe
 
 
 from celery import states
 from celery import states
-from celery.backends.base import BaseBackend, KeyValueStoreBackend
-from celery.backends.base import BaseDictBackend, DisabledBackend
+from celery.backends.base import (
+    BaseBackend,
+    KeyValueStoreBackend,
+    DisabledBackend,
+)
 from celery.utils import uuid
 from celery.utils import uuid
 
 
 from celery.tests.utils import Case
 from celery.tests.utils import Case
@@ -49,53 +51,9 @@ class test_serialization(Case):
 
 
 class test_BaseBackend_interface(Case):
 class test_BaseBackend_interface(Case):
 
 
-    def test_get_status(self):
-        with self.assertRaises(NotImplementedError):
-            b.get_status('SOMExx-N0Nex1stant-IDxx-')
-
     def test__forget(self):
     def test__forget(self):
         with self.assertRaises(NotImplementedError):
         with self.assertRaises(NotImplementedError):
-            b.forget('SOMExx-N0Nex1stant-IDxx-')
-
-    def test_get_children(self):
-        with self.assertRaises(NotImplementedError):
-            b.get_children('SOMExx-N0Nex1stant-IDxx-')
-
-    def test_store_result(self):
-        with self.assertRaises(NotImplementedError):
-            b.store_result('SOMExx-N0nex1stant-IDxx-', 42, states.SUCCESS)
-
-    def test_mark_as_started(self):
-        with self.assertRaises(NotImplementedError):
-            b.mark_as_started('SOMExx-N0nex1stant-IDxx-')
-
-    def test_reload_task_result(self):
-        with self.assertRaises(NotImplementedError):
-            b.reload_task_result('SOMExx-N0nex1stant-IDxx-')
-
-    def test_reload_group_result(self):
-        with self.assertRaises(NotImplementedError):
-            b.reload_group_result('SOMExx-N0nex1stant-IDxx-')
-
-    def test_get_result(self):
-        with self.assertRaises(NotImplementedError):
-            b.get_result('SOMExx-N0nex1stant-IDxx-')
-
-    def test_restore_group(self):
-        with self.assertRaises(NotImplementedError):
-            b.restore_group('SOMExx-N0nex1stant-IDxx-')
-
-    def test_delete_group(self):
-        with self.assertRaises(NotImplementedError):
-            b.delete_group('SOMExx-N0nex1stant-IDxx-')
-
-    def test_save_group(self):
-        with self.assertRaises(NotImplementedError):
-            b.save_group('SOMExx-N0nex1stant-IDxx-', 'blergh')
-
-    def test_get_traceback(self):
-        with self.assertRaises(NotImplementedError):
-            b.get_traceback('SOMExx-N0nex1stant-IDxx-')
+            b._forget('SOMExx-N0Nex1stant-IDxx-')
 
 
     def test_forget(self):
     def test_forget(self):
         with self.assertRaises(NotImplementedError):
         with self.assertRaises(NotImplementedError):
@@ -164,7 +122,7 @@ class KVBackend(KeyValueStoreBackend):
 
 
     def __init__(self, *args, **kwargs):
     def __init__(self, *args, **kwargs):
         self.db = {}
         self.db = {}
-        super(KVBackend, self).__init__(KeyValueStoreBackend)
+        super(KVBackend, self).__init__()
 
 
     def get(self, key):
     def get(self, key):
         return self.db.get(key)
         return self.db.get(key)
@@ -182,10 +140,10 @@ class KVBackend(KeyValueStoreBackend):
         self.db.pop(key, None)
         self.db.pop(key, None)
 
 
 
 
-class DictBackend(BaseDictBackend):
+class DictBackend(BaseBackend):
 
 
     def __init__(self, *args, **kwargs):
     def __init__(self, *args, **kwargs):
-        BaseDictBackend.__init__(self, *args, **kwargs)
+        BaseBackend.__init__(self, *args, **kwargs)
         self._data = {'can-delete': {'result': 'foo'}}
         self._data = {'can-delete': {'result': 'foo'}}
 
 
     def _restore_group(self, group_id):
     def _restore_group(self, group_id):
@@ -200,7 +158,7 @@ class DictBackend(BaseDictBackend):
         self._data.pop(group_id, None)
         self._data.pop(group_id, None)
 
 
 
 
-class test_BaseDictBackend(Case):
+class test_BaseBackend_dict(Case):
 
 
     def setUp(self):
     def setUp(self):
         self.b = DictBackend()
         self.b = DictBackend()
@@ -218,13 +176,13 @@ class test_BaseDictBackend(Case):
         self.assertEqual(str(e), "'foo'")
         self.assertEqual(str(e), "'foo'")
 
 
     def test_save_group(self):
     def test_save_group(self):
-        b = BaseDictBackend()
+        b = BaseBackend()
         b._save_group = Mock()
         b._save_group = Mock()
         b.save_group('foofoo', 'xxx')
         b.save_group('foofoo', 'xxx')
         b._save_group.assert_called_with('foofoo', 'xxx')
         b._save_group.assert_called_with('foofoo', 'xxx')
 
 
     def test_forget_interface(self):
     def test_forget_interface(self):
-        b = BaseDictBackend()
+        b = BaseBackend()
         with self.assertRaises(NotImplementedError):
         with self.assertRaises(NotImplementedError):
             b.forget('foo')
             b.forget('foo')
 
 

+ 1 - 2
celery/tests/backends/test_cache.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import sys
 import sys
 import types
 import types
@@ -51,7 +50,7 @@ class test_CacheBackend(Case):
     def test_mark_as_failure(self):
     def test_mark_as_failure(self):
         try:
         try:
             raise KeyError('foo')
             raise KeyError('foo')
-        except KeyError, exception:
+        except KeyError as exception:
             self.tb.mark_as_failure(self.tid, exception)
             self.tb.mark_as_failure(self.tid, exception)
             self.assertEqual(self.tb.get_status(self.tid), states.FAILURE)
             self.assertEqual(self.tb.get_status(self.tid), states.FAILURE)
             self.assertIsInstance(self.tb.get_result(self.tid), KeyError)
             self.assertIsInstance(self.tb.get_result(self.tid), KeyError)

+ 0 - 1
celery/tests/backends/test_cassandra.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import socket
 import socket
 
 

+ 2 - 22
celery/tests/backends/test_database.py

@@ -1,7 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
-
-import sys
 
 
 from datetime import datetime
 from datetime import datetime
 
 
@@ -50,23 +47,6 @@ class test_DatabaseBackend(Case):
             with self.assertRaises(ImproperlyConfigured):
             with self.assertRaises(ImproperlyConfigured):
                 _sqlalchemy_installed()
                 _sqlalchemy_installed()
 
 
-    def test_pickle_hack_for_sqla_05(self):
-        import sqlalchemy as sa
-        from celery.backends.database import session
-        prev_base = session.ResultModelBase
-        prev_ver, sa.__version__ = sa.__version__, '0.5.0'
-        prev_models = sys.modules.pop('celery.backends.database.models', None)
-        try:
-            from sqlalchemy.ext.declarative import declarative_base
-            session.ResultModelBase = declarative_base()
-            from celery.backends.database.dfd042c7 import PickleType as Type1
-            from celery.backends.database.models import PickleType as Type2
-            self.assertIs(Type1, Type2)
-        finally:
-            sys.modules['celery.backends.database.models'] = prev_models
-            sa.__version__ = prev_ver
-            session.ResultModelBase = prev_base
-
     def test_missing_dburi_raises_ImproperlyConfigured(self):
     def test_missing_dburi_raises_ImproperlyConfigured(self):
         conf = app_or_default().conf
         conf = app_or_default().conf
         prev, conf.CELERY_RESULT_DBURI = conf.CELERY_RESULT_DBURI, None
         prev, conf.CELERY_RESULT_DBURI = conf.CELERY_RESULT_DBURI, None
@@ -129,7 +109,7 @@ class test_DatabaseBackend(Case):
         tid = uuid()
         tid = uuid()
         try:
         try:
             raise KeyError('foo')
             raise KeyError('foo')
-        except KeyError, exception:
+        except KeyError as exception:
             import traceback
             import traceback
             trace = '\n'.join(traceback.format_stack())
             trace = '\n'.join(traceback.format_stack())
             tb.mark_as_retry(tid, exception, traceback=trace)
             tb.mark_as_retry(tid, exception, traceback=trace)
@@ -143,7 +123,7 @@ class test_DatabaseBackend(Case):
         tid3 = uuid()
         tid3 = uuid()
         try:
         try:
             raise KeyError('foo')
             raise KeyError('foo')
-        except KeyError, exception:
+        except KeyError as exception:
             import traceback
             import traceback
             trace = '\n'.join(traceback.format_stack())
             trace = '\n'.join(traceback.format_stack())
             tb.mark_as_failure(tid3, exception, traceback=trace)
             tb.mark_as_failure(tid3, exception, traceback=trace)

+ 0 - 1
celery/tests/backends/test_mongodb.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import datetime
 import datetime
 import uuid
 import uuid

+ 0 - 1
celery/tests/backends/test_redis.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 from datetime import timedelta
 from datetime import timedelta
 
 

+ 2 - 4
celery/tests/bin/test_base.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 import os
 import os
 
 
@@ -44,11 +43,10 @@ class test_Command(AppCase):
             Command().run()
             Command().run()
 
 
     @patch('sys.stdout')
     @patch('sys.stdout')
-    def test_parse_options_version_only(self, stdout):
+    def test_early_version(self, stdout):
         cmd = Command()
         cmd = Command()
         with self.assertRaises(SystemExit):
         with self.assertRaises(SystemExit):
-            cmd.parse_options('prog', ['--version'])
-        stdout.write.assert_called_with(cmd.version + '\n')
+            cmd.early_version(['--version'])
 
 
     def test_execute_from_commandline(self):
     def test_execute_from_commandline(self):
         cmd = MockCommand()
         cmd = MockCommand()

+ 0 - 1
celery/tests/bin/test_camqadm.py

@@ -1,5 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
-from __future__ import with_statement
 
 
 from mock import Mock, patch
 from mock import Mock, patch
 
 

Some files were not shown because too many files changed in this diff