浏览代码

Merge branch 'master' into 5.0-devel

Ask Solem 9 年之前
父节点
当前提交
1a1a99e9b9
共有 100 个文件被更改,包括 5308 次插入3334 次删除
  1. 4 0
      .gitignore
  2. 55 0
      .landscape.yml
  3. 16 9
      .travis.yml
  4. 207 141
      CONTRIBUTING.rst
  5. 2 0
      CONTRIBUTORS.txt
  6. 3 0
      MANIFEST.in
  7. 23 10
      Makefile
  8. 60 41
      README.rst
  9. 22 14
      celery/__init__.py
  10. 3 2
      celery/__main__.py
  11. 9 5
      celery/_state.py
  12. 10 7
      celery/app/__init__.py
  13. 23 17
      celery/app/amqp.py
  14. 3 2
      celery/app/annotations.py
  15. 63 0
      celery/app/backends.py
  16. 171 95
      celery/app/base.py
  17. 12 16
      celery/app/builtins.py
  18. 160 47
      celery/app/control.py
  19. 16 9
      celery/app/defaults.py
  20. 39 0
      celery/app/events.py
  21. 13 11
      celery/app/log.py
  22. 2 0
      celery/app/registry.py
  23. 2 2
      celery/app/routes.py
  24. 120 73
      celery/app/task.py
  25. 29 14
      celery/app/trace.py
  26. 28 10
      celery/app/utils.py
  27. 14 10
      celery/apps/beat.py
  28. 483 0
      celery/apps/multi.py
  29. 41 20
      celery/apps/worker.py
  30. 0 59
      celery/backends/__init__.py
  31. 49 17
      celery/backends/async.py
  32. 72 34
      celery/backends/base.py
  33. 3 0
      celery/backends/cache.py
  34. 6 6
      celery/backends/cassandra.py
  35. 2 1
      celery/backends/consul.py
  36. 2 1
      celery/backends/database/__init__.py
  37. 1 1
      celery/backends/database/models.py
  38. 1 0
      celery/backends/database/session.py
  39. 1 1
      celery/backends/filesystem.py
  40. 8 6
      celery/backends/mongodb.py
  41. 24 22
      celery/backends/redis.py
  42. 5 3
      celery/backends/riak.py
  43. 93 67
      celery/backends/rpc.py
  44. 61 48
      celery/beat.py
  45. 11 10
      celery/bin/amqp.py
  46. 166 140
      celery/bin/base.py
  47. 26 14
      celery/bin/beat.py
  48. 292 233
      celery/bin/celery.py
  49. 50 86
      celery/bin/celeryd_detach.py
  50. 28 17
      celery/bin/events.py
  51. 39 22
      celery/bin/graph.py
  52. 7 5
      celery/bin/logtool.py
  53. 258 453
      celery/bin/multi.py
  54. 92 83
      celery/bin/worker.py
  55. 6 14
      celery/bootsteps.py
  56. 349 216
      celery/canvas.py
  57. 2 1
      celery/concurrency/__init__.py
  58. 72 62
      celery/concurrency/asynpool.py
  59. 4 1
      celery/concurrency/base.py
  60. 8 4
      celery/concurrency/eventlet.py
  61. 15 10
      celery/concurrency/gevent.py
  62. 3 2
      celery/concurrency/prefork.py
  63. 8 5
      celery/concurrency/solo.py
  64. 13 8
      celery/contrib/abortable.py
  65. 120 64
      celery/contrib/migrate.py
  66. 166 0
      celery/contrib/pytest.py
  67. 6 5
      celery/contrib/rdb.py
  68. 6 2
      celery/contrib/sphinx.py
  69. 0 0
      celery/contrib/testing/__init__.py
  70. 105 0
      celery/contrib/testing/app.py
  71. 186 0
      celery/contrib/testing/manager.py
  72. 95 0
      celery/contrib/testing/mocks.py
  73. 9 0
      celery/contrib/testing/tasks.py
  74. 154 0
      celery/contrib/testing/worker.py
  75. 8 418
      celery/events/__init__.py
  76. 6 4
      celery/events/cursesmon.py
  77. 227 0
      celery/events/dispatcher.py
  78. 4 2
      celery/events/dumper.py
  79. 58 0
      celery/events/event.py
  80. 134 0
      celery/events/receiver.py
  81. 5 3
      celery/events/snapshot.py
  82. 36 12
      celery/events/state.py
  83. 16 20
      celery/exceptions.py
  84. 0 149
      celery/five.py
  85. 1 0
      celery/fixups/__init__.py
  86. 20 113
      celery/fixups/django.py
  87. 1 1
      celery/loaders/__init__.py
  88. 1 1
      celery/loaders/app.py
  89. 12 17
      celery/loaders/base.py
  90. 1 2
      celery/loaders/default.py
  91. 224 8
      celery/local.py
  92. 106 80
      celery/platforms.py
  93. 57 42
      celery/result.py
  94. 114 75
      celery/schedules.py
  95. 7 7
      celery/security/certificate.py
  96. 2 1
      celery/security/key.py
  97. 4 3
      celery/security/serialization.py
  98. 1 0
      celery/security/utils.py
  99. 6 4
      celery/states.py
  100. 0 94
      celery/tests/__init__.py

+ 4 - 0
.gitignore

@@ -25,3 +25,7 @@ celery/tests/cover/
 .ve*
 .ve*
 cover/
 cover/
 .vagrant/
 .vagrant/
+.cache/
+htmlcov/
+coverage.xml
+test.db

+ 55 - 0
.landscape.yml

@@ -0,0 +1,55 @@
+doc-warnings: false
+test-warnings: false
+max-line-length: 79
+inherits:
+    - strictness_veryhigh
+uses:
+    - celery
+autodetect: true
+requirements:
+    - requirements/default.txt
+    - requirements/test.txt
+ignore-paths:
+    - docs
+    - t
+python-targets:
+    - 2
+    - 3
+pep8:
+    full: true
+    disable:
+        - N806
+        - N802
+        - N801
+        - N803
+pyroma:
+    run: true
+pylint:
+    disable:
+        - missing-docstring
+        - too-many-arguments
+        - too-many-locals
+        - redefined-builtin
+        - not-callable
+        - cyclic-import
+        - expression-not-assigned
+        - lost-exception
+        - dangerous-default-value
+        - unused-argument
+        - protected-access
+        - invalid-name
+        - too-many-instance-attributes
+        - bad-builtin
+        - abstract-method
+        - global-statement
+        - too-many-public-methods
+        - no-self-use
+        - unnecessary-lambda
+        - too-few-public-methods
+        - attribute-defined-outside-init
+        - too-many-ancestors
+        - too-many-return-statements
+        - bad-mcs-classmethod-argument
+        - bad-mcs-method-argument
+    options:
+        exclude-protected: _reader, _writer, _popen, _sentinel_poll, _job, _is_alive, _write_to, _scheduled_for, _terminated, _accepted, _set_terminated, _payload, _cancel

+ 16 - 9
.travis.yml

@@ -1,5 +1,5 @@
 language: python
 language: python
-sudo: false
+sudo: required
 cache: false
 cache: false
 python:
 python:
     - '3.5'
     - '3.5'
@@ -9,15 +9,22 @@ env:
   global:
   global:
     PYTHONUNBUFFERED=yes
     PYTHONUNBUFFERED=yes
   matrix:
   matrix:
-    - TOXENV=2.7
-    - TOXENV=3.4
-    - TOXENV=3.5
-    - TOXENV=pypy PYPY_VERSION="5.3"
-    - TOXENV=pypy3
+    - TOXENV=2.7-unit
+    - TOXENV=2.7-integration
+    - TOXENV=3.4-unit
+    - TOXENV=3.4-integration
+    - TOXENV=3.5-unit
+    - TOXENV=3.5-integration
+    - TOXENV=pypy-unit PYPY_VERSION="5.3"
+    - TOXENV=pypy-integration PYPY_VERSION="5.3"
+    - TOXENV=pypy3-unit
+    - TOXENV=pypy3-integration
     - TOXENV=flake8
     - TOXENV=flake8
     - TOXENV=flakeplus
     - TOXENV=flakeplus
     - TOXENV=apicheck
     - TOXENV=apicheck
     - TOXENV=configcheck
     - TOXENV=configcheck
+    - TOXENV=pydocstyle
+    - TOXENV=cov
 before_install:
 before_install:
     - |
     - |
           if [ "$TOXENV" = "pypy" ]; then
           if [ "$TOXENV" = "pypy" ]; then
@@ -33,12 +40,12 @@ before_install:
           fi
           fi
 install: travis_retry pip install -U tox
 install: travis_retry pip install -U tox
 script: tox -v -- -v
 script: tox -v -- -v
-after_success:
-  - .tox/$TRAVIS_PYTHON_VERSION/bin/coverage xml
-  - .tox/$TRAVIS_PYTHON_VERSION/bin/codecov -e TOXENV
 notifications:
 notifications:
   irc:
   irc:
     channels:
     channels:
       - "chat.freenode.net#celery"
       - "chat.freenode.net#celery"
     on_success: change
     on_success: change
     on_failure: change
     on_failure: change
+services:
+    - rabbitmq
+    - redis

+ 207 - 141
CONTRIBUTING.rst

@@ -6,18 +6,18 @@
 
 
 Welcome!
 Welcome!
 
 
-This document is fairly extensive and you are not really expected
+This document is fairly extensive and you aren't really expected
 to study this in detail for small contributions;
 to study this in detail for small contributions;
 
 
     The most important rule is that contributing must be easy
     The most important rule is that contributing must be easy
-    and that the community is friendly and not nitpicking on details
+    and that the community is friendly and not nitpicking on details,
     such as coding style.
     such as coding style.
 
 
 If you're reporting a bug you should read the Reporting bugs section
 If you're reporting a bug you should read the Reporting bugs section
 below to ensure that your bug report contains enough information
 below to ensure that your bug report contains enough information
 to successfully diagnose the issue, and if you're contributing code
 to successfully diagnose the issue, and if you're contributing code
 you should try to mimic the conventions you see surrounding the code
 you should try to mimic the conventions you see surrounding the code
-you are working on, but in the end all patches will be cleaned up by
+you're working on, but in the end all patches will be cleaned up by
 the person merging the changes so don't worry too much.
 the person merging the changes so don't worry too much.
 
 
 .. contents::
 .. contents::
@@ -28,8 +28,8 @@ the person merging the changes so don't worry too much.
 Community Code of Conduct
 Community Code of Conduct
 =========================
 =========================
 
 
-The goal is to maintain a diverse community that is pleasant for everyone.
-That is why we would greatly appreciate it if everyone contributing to and
+The goal is to maintain a diverse community that's pleasant for everyone.
+That's why we would greatly appreciate it if everyone contributing to and
 interacting with the community also followed this Code of Conduct.
 interacting with the community also followed this Code of Conduct.
 
 
 The Code of Conduct covers our behavior as members of the community,
 The Code of Conduct covers our behavior as members of the community,
@@ -42,72 +42,72 @@ the `Pylons Code of Conduct`_.
 .. _`Ubuntu Code of Conduct`: http://www.ubuntu.com/community/conduct
 .. _`Ubuntu Code of Conduct`: http://www.ubuntu.com/community/conduct
 .. _`Pylons Code of Conduct`: http://docs.pylonshq.com/community/conduct.html
 .. _`Pylons Code of Conduct`: http://docs.pylonshq.com/community/conduct.html
 
 
-Be considerate.
----------------
+Be considerate
+--------------
 
 
 Your work will be used by other people, and you in turn will depend on the
 Your work will be used by other people, and you in turn will depend on the
-work of others.  Any decision you take will affect users and colleagues, and
+work of others. Any decision you take will affect users and colleagues, and
 we expect you to take those consequences into account when making decisions.
 we expect you to take those consequences into account when making decisions.
 Even if it's not obvious at the time, our contributions to Celery will impact
 Even if it's not obvious at the time, our contributions to Celery will impact
-the work of others.  For example, changes to code, infrastructure, policy,
+the work of others. For example, changes to code, infrastructure, policy,
 documentation and translations during a release may negatively impact
 documentation and translations during a release may negatively impact
 others work.
 others work.
 
 
-Be respectful.
---------------
+Be respectful
+-------------
 
 
-The Celery community and its members treat one another with respect.  Everyone
-can make a valuable contribution to Celery.  We may not always agree, but
-disagreement is no excuse for poor behavior and poor manners.  We might all
+The Celery community and its members treat one another with respect. Everyone
+can make a valuable contribution to Celery. We may not always agree, but
+disagreement is no excuse for poor behavior and poor manners. We might all
 experience some frustration now and then, but we cannot allow that frustration
 experience some frustration now and then, but we cannot allow that frustration
-to turn into a personal attack.  It's important to remember that a community
-where people feel uncomfortable or threatened is not a productive one.  We
+to turn into a personal attack. It's important to remember that a community
+where people feel uncomfortable or threatened isn't a productive one. We
 expect members of the Celery community to be respectful when dealing with
 expect members of the Celery community to be respectful when dealing with
 other contributors as well as with people outside the Celery project and with
 other contributors as well as with people outside the Celery project and with
 users of Celery.
 users of Celery.
 
 
-Be collaborative.
------------------
+Be collaborative
+----------------
 
 
 Collaboration is central to Celery and to the larger free software community.
 Collaboration is central to Celery and to the larger free software community.
-We should always be open to collaboration.  Your work should be done
+We should always be open to collaboration. Your work should be done
 transparently and patches from Celery should be given back to the community
 transparently and patches from Celery should be given back to the community
-when they are made, not just when the distribution releases.  If you wish
+when they're made, not just when the distribution releases. If you wish
 to work on new code for existing upstream projects, at least keep those
 to work on new code for existing upstream projects, at least keep those
-projects informed of your ideas and progress.  It many not be possible to
+projects informed of your ideas and progress. It many not be possible to
 get consensus from upstream, or even from your colleagues about the correct
 get consensus from upstream, or even from your colleagues about the correct
 implementation for an idea, so don't feel obliged to have that agreement
 implementation for an idea, so don't feel obliged to have that agreement
 before you begin, but at least keep the outside world informed of your work,
 before you begin, but at least keep the outside world informed of your work,
-and publish your work in a way that allows outsiders to test, discuss and
+and publish your work in a way that allows outsiders to test, discuss, and
 contribute to your efforts.
 contribute to your efforts.
 
 
-When you disagree, consult others.
-----------------------------------
+When you disagree, consult others
+---------------------------------
 
 
 Disagreements, both political and technical, happen all the time and
 Disagreements, both political and technical, happen all the time and
-the Celery community is no exception.  It is important that we resolve
+the Celery community is no exception. It's important that we resolve
 disagreements and differing views constructively and with the help of the
 disagreements and differing views constructively and with the help of the
-community and community process.  If you really want to go a different
+community and community process. If you really want to go a different
 way, then we encourage you to make a derivative distribution or alternate
 way, then we encourage you to make a derivative distribution or alternate
 set of packages that still build on the work we've done to utilize as common
 set of packages that still build on the work we've done to utilize as common
 of a core as possible.
 of a core as possible.
 
 
-When you are unsure, ask for help.
-----------------------------------
+When you're unsure, ask for help
+--------------------------------
 
 
-Nobody knows everything, and nobody is expected to be perfect.  Asking
+Nobody knows everything, and nobody is expected to be perfect. Asking
 questions avoids many problems down the road, and so questions are
 questions avoids many problems down the road, and so questions are
-encouraged.  Those who are asked questions should be responsive and helpful.
+encouraged. Those who are asked questions should be responsive and helpful.
 However, when asking a question, care must be taken to do so in an appropriate
 However, when asking a question, care must be taken to do so in an appropriate
 forum.
 forum.
 
 
-Step down considerately.
-------------------------
+Step down considerately
+-----------------------
 
 
-Developers on every project come and go and Celery is no different.  When you
+Developers on every project come and go and Celery is no different. When you
 leave or disengage from the project, in whole or in part, we ask that you do
 leave or disengage from the project, in whole or in part, we ask that you do
-so in a way that minimizes disruption to the project.  This means you should
-tell people you are leaving and take the proper steps to ensure that others
+so in a way that minimizes disruption to the project. This means you should
+tell people you're leaving and take the proper steps to ensure that others
 can pick up where you leave off.
 can pick up where you leave off.
 
 
 .. _reporting-bugs:
 .. _reporting-bugs:
@@ -165,34 +165,34 @@ Bugs can always be described to the `mailing-list`_, but the best
 way to report an issue and to ensure a timely response is to use the
 way to report an issue and to ensure a timely response is to use the
 issue tracker.
 issue tracker.
 
 
-1) **Create a GitHub account.**
+1) **Create a GitHub account**.
 
 
 You need to `create a GitHub account`_ to be able to create new issues
 You need to `create a GitHub account`_ to be able to create new issues
 and participate in the discussion.
 and participate in the discussion.
 
 
 .. _`create a GitHub account`: https://github.com/signup/free
 .. _`create a GitHub account`: https://github.com/signup/free
 
 
-2) **Determine if your bug is really a bug.**
+2) **Determine if your bug is really a bug**.
 
 
-You should not file a bug if you are requesting support.  For that you can use
+You shouldn't file a bug if you're requesting support. For that you can use
 the `mailing-list`_, or `irc-channel`_.
 the `mailing-list`_, or `irc-channel`_.
 
 
-3) **Make sure your bug hasn't already been reported.**
+3) **Make sure your bug hasn't already been reported**.
 
 
-Search through the appropriate Issue tracker.  If a bug like yours was found,
+Search through the appropriate Issue tracker. If a bug like yours was found,
 check if you have new information that could be reported to help
 check if you have new information that could be reported to help
 the developers fix the bug.
 the developers fix the bug.
 
 
-4) **Check if you're using the latest version.**
+4) **Check if you're using the latest version**.
 
 
 A bug could be fixed by some other improvements and fixes - it might not have an
 A bug could be fixed by some other improvements and fixes - it might not have an
 existing report in the bug tracker. Make sure you're using the latest releases of
 existing report in the bug tracker. Make sure you're using the latest releases of
-celery, billiard, kombu, amqp and vine.
+celery, billiard, kombu, amqp, and vine.
 
 
-5) **Collect information about the bug.**
+5) **Collect information about the bug**.
 
 
 To have the best chance of having a bug fixed, we need to be able to easily
 To have the best chance of having a bug fixed, we need to be able to easily
-reproduce the conditions that caused it.  Most of the time this information
+reproduce the conditions that caused it. Most of the time this information
 will be from a Python traceback message, though some bugs might be in design,
 will be from a Python traceback message, though some bugs might be in design,
 spelling or other errors on the website/docs/code.
 spelling or other errors on the website/docs/code.
 
 
@@ -202,18 +202,19 @@ spelling or other errors on the website/docs/code.
        etc.), the version of your Python interpreter, and the version of Celery,
        etc.), the version of your Python interpreter, and the version of Celery,
        and related packages that you were running when the bug occurred.
        and related packages that you were running when the bug occurred.
 
 
-    C) If you are reporting a race condition or a deadlock, tracebacks can be
+    C) If you're reporting a race condition or a deadlock, tracebacks can be
        hard to get or might not be that useful. Try to inspect the process to
        hard to get or might not be that useful. Try to inspect the process to
        get more diagnostic data. Some ideas:
        get more diagnostic data. Some ideas:
 
 
-       * Enable celery's ``breakpoint_signal`` and use it
-         to inspect the process's state.  This will allow you to open a
+       * Enable Celery's ``breakpoint_signal`` and use it
+         to inspect the process's state. This will allow you to open a
          ``pdb`` session.
          ``pdb`` session.
        * Collect tracing data using `strace`_(Linux),
        * Collect tracing data using `strace`_(Linux),
          ``dtruss`` (macOS), and ``ktrace`` (BSD),
          ``dtruss`` (macOS), and ``ktrace`` (BSD),
-         `ltrace`_ and `lsof`_.
+         `ltrace`_, and `lsof`_.
 
 
     D) Include the output from the ``celery report`` command:
     D) Include the output from the ``celery report`` command:
+
         ::
         ::
 
 
             $ celery -A proj report
             $ celery -A proj report
@@ -224,7 +225,7 @@ spelling or other errors on the website/docs/code.
         confidential information like API tokens and authentication
         confidential information like API tokens and authentication
         credentials.
         credentials.
 
 
-6) **Submit the bug.**
+6) **Submit the bug**.
 
 
 By default `GitHub`_ will email you to let you know when new comments have
 By default `GitHub`_ will email you to let you know when new comments have
 been made on your bug. In the event you've turned this feature off, you
 been made on your bug. In the event you've turned this feature off, you
@@ -249,9 +250,10 @@ issue tracker.
 * ``amqp``: https://github.com/celery/py-amqp/issues
 * ``amqp``: https://github.com/celery/py-amqp/issues
 * ``vine``: https://github.com/celery/vine/issues
 * ``vine``: https://github.com/celery/vine/issues
 * ``librabbitmq``: https://github.com/celery/librabbitmq/issues
 * ``librabbitmq``: https://github.com/celery/librabbitmq/issues
-* ``django-celery``: https://github.com/celery/django-celery/issues
+* ``django-celery-beat``: https://github.com/celery/django-celery-beat/issues
+* ``django-celery-results``: https://github.com/celery/django-celery-results/issues
 
 
-If you are unsure of the origin of the bug you can ask the
+If you're unsure of the origin of the bug you can ask the
 `mailing-list`_, or just use the Celery issue tracker.
 `mailing-list`_, or just use the Celery issue tracker.
 
 
 Contributors guide to the code base
 Contributors guide to the code base
@@ -282,7 +284,7 @@ Branches
 
 
 Current active version branches:
 Current active version branches:
 
 
-* master (https://github.com/celery/celery/tree/master)
+* dev (which git calls "master") (https://github.com/celery/celery/tree/master)
 * 3.1 (https://github.com/celery/celery/tree/3.1)
 * 3.1 (https://github.com/celery/celery/tree/3.1)
 * 3.0 (https://github.com/celery/celery/tree/3.0)
 * 3.0 (https://github.com/celery/celery/tree/3.0)
 
 
@@ -292,13 +294,14 @@ You can see the state of any branch by looking at the Changelog:
 
 
 If the branch is in active development the topmost version info should
 If the branch is in active development the topmost version info should
 contain meta-data like:
 contain meta-data like:
+
 ::
 ::
 
 
     2.4.0
     2.4.0
     ======
     ======
     :release-date: TBA
     :release-date: TBA
     :status: DEVELOPMENT
     :status: DEVELOPMENT
-    :branch: master
+    :branch: dev (git calls this master)
 
 
 The ``status`` field can be one of:
 The ``status`` field can be one of:
 
 
@@ -317,17 +320,19 @@ The ``status`` field can be one of:
     When a branch is frozen the focus is on testing the version as much
     When a branch is frozen the focus is on testing the version as much
     as possible before it is released.
     as possible before it is released.
 
 
-``master`` branch
------------------
+dev branch
+----------
 
 
-The master branch is where development of the next version happens.
+The dev branch (called "master" by git), is where development of the next
+version happens.
 
 
 Maintenance branches
 Maintenance branches
 --------------------
 --------------------
 
 
-Maintenance branches are named after the version, e.g. the maintenance branch
-for the 2.2.x series is named ``2.2``.  Previously these were named
-``releaseXX-maint``.
+Maintenance branches are named after the version -- for example,
+the maintenance branch for the 2.2.x series is named ``2.2``.
+
+Previously these were named ``releaseXX-maint``.
 
 
 The versions we currently maintain is:
 The versions we currently maintain is:
 
 
@@ -344,7 +349,7 @@ Archived branches
 
 
 Archived branches are kept for preserving history only,
 Archived branches are kept for preserving history only,
 and theoretically someone could provide patches for these if they depend
 and theoretically someone could provide patches for these if they depend
-on a series that is no longer officially supported.
+on a series that's no longer officially supported.
 
 
 An archived version is named ``X.Y-archived``.
 An archived version is named ``X.Y-archived``.
 
 
@@ -366,17 +371,20 @@ Feature branches
 ----------------
 ----------------
 
 
 Major new features are worked on in dedicated branches.
 Major new features are worked on in dedicated branches.
-There is no strict naming requirement for these branches.
+There's no strict naming requirement for these branches.
 
 
-Feature branches are removed once they have been merged into a release branch.
+Feature branches are removed once they've been merged into a release branch.
 
 
 Tags
 Tags
 ====
 ====
 
 
-Tags are used exclusively for tagging releases.  A release tag is
-named with the format ``vX.Y.Z``, e.g. ``v2.3.1``.
-Experimental releases contain an additional identifier ``vX.Y.Z-id``, e.g.
-``v3.0.0-rc1``.  Experimental tags may be removed after the official release.
+- Tags are used exclusively for tagging releases. A release tag is
+  named with the format ``vX.Y.Z`` -- for example ``v2.3.1``.
+
+- Experimental releases contain an additional identifier ``vX.Y.Z-id`` --
+  for example ``v3.0.0-rc1``.
+
+- Experimental tags may be removed after the official release.
 
 
 .. _contributing-changes:
 .. _contributing-changes:
 
 
@@ -388,7 +396,7 @@ Working on Features & Patches
     Contributing to Celery should be as simple as possible,
     Contributing to Celery should be as simple as possible,
     so none of these steps should be considered mandatory.
     so none of these steps should be considered mandatory.
 
 
-    You can even send in patches by email if that is your preferred
+    You can even send in patches by email if that's your preferred
     work method. We won't like you any less, any contribution you make
     work method. We won't like you any less, any contribution you make
     is always appreciated!
     is always appreciated!
 
 
@@ -403,12 +411,14 @@ is in the GitHub Guide: `Fork a Repo`_.
 
 
 After you have cloned the repository you should checkout your copy
 After you have cloned the repository you should checkout your copy
 to a directory on your machine:
 to a directory on your machine:
+
 ::
 ::
 
 
     $ git clone git@github.com:username/celery.git
     $ git clone git@github.com:username/celery.git
 
 
 When the repository is cloned enter the directory to set up easy access
 When the repository is cloned enter the directory to set up easy access
 to upstream changes:
 to upstream changes:
+
 ::
 ::
 
 
     $ cd celery
     $ cd celery
@@ -417,6 +427,7 @@ to upstream changes:
 
 
 If you need to pull in new changes from upstream you should
 If you need to pull in new changes from upstream you should
 always use the ``--rebase`` option to ``git pull``:
 always use the ``--rebase`` option to ``git pull``:
+
 ::
 ::
 
 
     git pull --rebase upstream master
     git pull --rebase upstream master
@@ -426,7 +437,7 @@ commit notes. See `Rebasing merge commits in git`_.
 If you want to learn more about rebasing see the `Rebase`_
 If you want to learn more about rebasing see the `Rebase`_
 section in the GitHub guides.
 section in the GitHub guides.
 
 
-If you need to work on a different branch than ``master`` you can
+If you need to work on a different branch than the one git calls ``master``, you can
 fetch and checkout a remote branch like this::
 fetch and checkout a remote branch like this::
 
 
     git checkout --track -b 3.0-devel origin/3.0-devel
     git checkout --track -b 3.0-devel origin/3.0-devel
@@ -447,24 +458,27 @@ A complete list of the dependencies needed are located in
 
 
 If you're working on the development version, then you need to
 If you're working on the development version, then you need to
 install the development requirements first:
 install the development requirements first:
+
 ::
 ::
 
 
     $ pip install -U -r requirements/dev.txt
     $ pip install -U -r requirements/dev.txt
 
 
 Both the stable and the development version have testing related
 Both the stable and the development version have testing related
 dependencies, so install these next:
 dependencies, so install these next:
+
 ::
 ::
 
 
     $ pip install -U -r requirements/test.txt
     $ pip install -U -r requirements/test.txt
     $ pip install -U -r requirements/default.txt
     $ pip install -U -r requirements/default.txt
 
 
 After installing the dependencies required, you can now execute
 After installing the dependencies required, you can now execute
-the test suite by calling ``nosetests <nose>``:
+the test suite by calling ``py.test <pytest``:
+
 ::
 ::
 
 
-    $ nosetests
+    $ py.test
 
 
-Some useful options to ``nosetests`` are:
+Some useful options to ``py.test`` are:
 
 
 * ``-x``
 * ``-x``
 
 
@@ -474,19 +488,16 @@ Some useful options to ``nosetests`` are:
 
 
     Don't capture output
     Don't capture output
 
 
-* ``-nologcapture``
-
-    Don't capture log output.
-
 * ``-v``
 * ``-v``
 
 
     Run with verbose output.
     Run with verbose output.
 
 
 If you want to run the tests for a single test file only
 If you want to run the tests for a single test file only
 you can do so like this:
 you can do so like this:
+
 ::
 ::
 
 
-    $ nosetests celery.tests.test_worker.test_worker_job
+    $ py.test t/unit/worker/test_worker_job.py
 
 
 .. _contributing-pull-requests:
 .. _contributing-pull-requests:
 
 
@@ -497,7 +508,7 @@ When your feature/bugfix is complete you may want to submit
 a pull requests so that it can be reviewed by the maintainers.
 a pull requests so that it can be reviewed by the maintainers.
 
 
 Creating pull requests is easy, and also let you track the progress
 Creating pull requests is easy, and also let you track the progress
-of your contribution.  Read the `Pull Requests`_ section in the GitHub
+of your contribution. Read the `Pull Requests`_ section in the GitHub
 Guide to learn how this is done.
 Guide to learn how this is done.
 
 
 You can also attach pull requests to existing issues by following
 You can also attach pull requests to existing issues by following
@@ -510,42 +521,56 @@ the steps outlined here: http://bit.ly/koJoso
 Calculating test coverage
 Calculating test coverage
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 
 
-To calculate test coverage you must first install the ``coverage`` module.
+To calculate test coverage you must first install the ``pytest-cov`` module.
+
+Installing the ``pytest-cov`` module:
 
 
-Installing the ``coverage`` module:
 ::
 ::
 
 
-    $ pip install -U coverage
+    $ pip install -U pytest-cov
 
 
-Code coverage in HTML:
-::
+Code coverage in HTML format
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+#. Run ``py.test`` with the ``--cov-report=html`` argument enabled:
+
+    ::
+
+        $ py.test --cov=celery --cov-report=html
+
+#. The coverage output will then be located in the ``htmlcov/`` directory:
+
+    ::
 
 
-    $ nosetests --with-coverage --cover-html
+        $ open htmlcov/index.html
 
 
-The coverage output will then be located at
-``celery/tests/cover/index.html``.
+Code coverage in XML (Cobertura-style)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+#. Run ``py.test`` with the ``--cov-report=xml`` argument enabled:
 
 
-Code coverage in XML (Cobertura-style):
 ::
 ::
 
 
-    $ nosetests --with-coverage --cover-xml --cover-xml-file=coverage.xml
+    $ py.test --cov=celery --cov-report=xml
 
 
-The coverage XML output will then be located at ``coverage.xml``
+#. The coverage XML output will then be located in the ``coverage.xml`` file.
 
 
 .. _contributing-tox:
 .. _contributing-tox:
 
 
 Running the tests on all supported Python versions
 Running the tests on all supported Python versions
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 
-There is a ``tox`` configuration file in the top directory of the
+There's a ``tox`` configuration file in the top directory of the
 distribution.
 distribution.
 
 
 To run the tests for all supported Python versions simply execute:
 To run the tests for all supported Python versions simply execute:
+
 ::
 ::
 
 
     $ tox
     $ tox
 
 
 Use the ``tox -e`` option if you only want to test specific Python versions:
 Use the ``tox -e`` option if you only want to test specific Python versions:
+
 ::
 ::
 
 
     $ tox -e 2.7
     $ tox -e 2.7
@@ -555,12 +580,14 @@ Building the documentation
 
 
 To build the documentation you need to install the dependencies
 To build the documentation you need to install the dependencies
 listed in ``requirements/docs.txt``:
 listed in ``requirements/docs.txt``:
+
 ::
 ::
 
 
     $ pip install -U -r requirements/docs.txt
     $ pip install -U -r requirements/docs.txt
 
 
 After these dependencies are installed you should be able to
 After these dependencies are installed you should be able to
 build the docs by running:
 build the docs by running:
+
 ::
 ::
 
 
     $ cd docs
     $ cd docs
@@ -575,25 +602,28 @@ After building succeeds the documentation is available at ``_build/html``.
 Verifying your contribution
 Verifying your contribution
 ---------------------------
 ---------------------------
 
 
-To use these tools you need to install a few dependencies.  These dependencies
+To use these tools you need to install a few dependencies. These dependencies
 can be found in ``requirements/pkgutils.txt``.
 can be found in ``requirements/pkgutils.txt``.
 
 
 Installing the dependencies:
 Installing the dependencies:
+
 ::
 ::
 
 
     $ pip install -U -r requirements/pkgutils.txt
     $ pip install -U -r requirements/pkgutils.txt
 
 
-pyflakes & PEP8
-~~~~~~~~~~~~~~~
+pyflakes & PEP-8
+~~~~~~~~~~~~~~~~
 
 
-To ensure that your changes conform to PEP8 and to run pyflakes
+To ensure that your changes conform to ``8`` and to run pyflakes
 execute:
 execute:
+
 ::
 ::
 
 
     $ make flakecheck
     $ make flakecheck
 
 
 To not return a negative exit code when this command fails use
 To not return a negative exit code when this command fails use
 the ``flakes`` target instead:
 the ``flakes`` target instead:
+
 ::
 ::
 
 
     $ make flakes§
     $ make flakes§
@@ -603,6 +633,7 @@ API reference
 
 
 To make sure that all modules have a corresponding section in the API
 To make sure that all modules have a corresponding section in the API
 reference please execute:
 reference please execute:
+
 ::
 ::
 
 
     $ make apicheck
     $ make apicheck
@@ -611,7 +642,7 @@ reference please execute:
 If files are missing you can add them by copying an existing reference file.
 If files are missing you can add them by copying an existing reference file.
 
 
 If the module is internal it should be part of the internal reference
 If the module is internal it should be part of the internal reference
-located in ``docs/internals/reference/``.  If the module is public
+located in ``docs/internals/reference/``. If the module is public
 it should be located in ``docs/reference/``.
 it should be located in ``docs/reference/``.
 
 
 For example if reference is missing for the module ``celery.worker.awesome``
 For example if reference is missing for the module ``celery.worker.awesome``
@@ -619,12 +650,14 @@ and this module is considered part of the public API, use the following steps:
 
 
 
 
 Use an existing file as a template:
 Use an existing file as a template:
+
 ::
 ::
 
 
     $ cd docs/reference/
     $ cd docs/reference/
     $ cp celery.schedules.rst celery.worker.awesome.rst
     $ cp celery.schedules.rst celery.worker.awesome.rst
 
 
 Edit the file using your favorite editor:
 Edit the file using your favorite editor:
+
 ::
 ::
 
 
     $ vim celery.worker.awesome.rst
     $ vim celery.worker.awesome.rst
@@ -634,6 +667,7 @@ Edit the file using your favorite editor:
 
 
 
 
 Edit the index using your favorite editor:
 Edit the index using your favorite editor:
+
 ::
 ::
 
 
     $ vim index.rst
     $ vim index.rst
@@ -642,6 +676,7 @@ Edit the index using your favorite editor:
 
 
 
 
 Commit your changes:
 Commit your changes:
+
 ::
 ::
 
 
     # Add the file to git
     # Add the file to git
@@ -659,18 +694,16 @@ You should probably be able to pick up the coding style
 from surrounding code, but it is a good idea to be aware of the
 from surrounding code, but it is a good idea to be aware of the
 following conventions.
 following conventions.
 
 
-* All Python code must follow the `PEP-8`_ guidelines.
+* All Python code must follow the ``8`` guidelines.
 
 
-`pep8.py`_ is an utility you can use to verify that your code
+``pep8`` is a utility you can use to verify that your code
 is following the conventions.
 is following the conventions.
 
 
-.. _`PEP-8`: http://www.python.org/dev/peps/pep-0008/
-.. _`pep8.py`: http://pypi.python.org/pypi/pep8
-
-* Docstrings must follow the `PEP-257`_ conventions, and use the following
+* Docstrings must follow the ``257`` conventions, and use the following
   style.
   style.
 
 
     Do this:
     Do this:
+
     ::
     ::
 
 
         def method(self, arg):
         def method(self, arg):
@@ -681,6 +714,7 @@ is following the conventions.
             """
             """
 
 
     or:
     or:
+
     ::
     ::
 
 
         def method(self, arg):
         def method(self, arg):
@@ -688,6 +722,7 @@ is following the conventions.
 
 
 
 
     but not this:
     but not this:
+
     ::
     ::
 
 
         def method(self, arg):
         def method(self, arg):
@@ -695,17 +730,16 @@ is following the conventions.
             Short description.
             Short description.
             """
             """
 
 
-.. _`PEP-257`: http://www.python.org/dev/peps/pep-0257/
-
-* Lines should not exceed 78 columns.
+* Lines shouldn't exceed 78 columns.
 
 
   You can enforce this in ``vim`` by setting the ``textwidth`` option:
   You can enforce this in ``vim`` by setting the ``textwidth`` option:
+
   ::
   ::
 
 
         set textwidth=78
         set textwidth=78
 
 
   If adhering to this limit makes the code less readable, you have one more
   If adhering to this limit makes the code less readable, you have one more
-  character to go on, which means 78 is a soft limit, and 79 is the hard
+  character to go on. This means 78 is a soft limit, and 79 is the hard
   limit :)
   limit :)
 
 
 * Import order
 * Import order
@@ -726,6 +760,7 @@ is following the conventions.
     Within these sections the imports should be sorted by module name.
     Within these sections the imports should be sorted by module name.
 
 
     Example:
     Example:
+
     ::
     ::
 
 
         import threading
         import threading
@@ -736,7 +771,7 @@ is following the conventions.
 
 
         from .platforms import Pidfile
         from .platforms import Pidfile
         from .five import zip_longest, items, range
         from .five import zip_longest, items, range
-        from .utils import timeutils
+        from .utils.time import maybe_timedelta
 
 
 * Wild-card imports must not be used (`from xxx import *`).
 * Wild-card imports must not be used (`from xxx import *`).
 
 
@@ -748,12 +783,12 @@ is following the conventions.
         from __future__ import absolute_import
         from __future__ import absolute_import
 
 
     * If the module uses the ``with`` statement and must be compatible
     * If the module uses the ``with`` statement and must be compatible
-      with Python 2.5 (celery is not) then it must also enable that::
+      with Python 2.5 (celery isn't) then it must also enable that::
 
 
         from __future__ import with_statement
         from __future__ import with_statement
 
 
     * Every future import must be on its own line, as older Python 2.5
     * Every future import must be on its own line, as older Python 2.5
-      releases did not support importing multiple features on the
+      releases didn't support importing multiple features on the
       same future import line::
       same future import line::
 
 
         # Good
         # Good
@@ -763,14 +798,15 @@ is following the conventions.
         # Bad
         # Bad
         from __future__ import absolute_import, with_statement
         from __future__ import absolute_import, with_statement
 
 
-     (Note that this rule does not apply if the package does not include
+     (Note that this rule doesn't apply if the package doesn't include
      support for Python 2.5)
      support for Python 2.5)
 
 
 
 
 * Note that we use "new-style` relative imports when the distribution
 * Note that we use "new-style` relative imports when the distribution
-  does not support Python versions below 2.5
+  doesn't support Python versions below 2.5
 
 
     This requires Python 2.5 or later:
     This requires Python 2.5 or later:
+
     ::
     ::
 
 
         from . import submodule
         from . import submodule
@@ -789,15 +825,17 @@ that require third-party libraries must be added.
 
 
 1) Add a new requirements file in `requirements/extras`
 1) Add a new requirements file in `requirements/extras`
 
 
-    E.g. for the Cassandra backend this is
+    For the Cassandra backend this is
     ``requirements/extras/cassandra.txt``, and the file looks like this:
     ``requirements/extras/cassandra.txt``, and the file looks like this:
+
     ::
     ::
 
 
         pycassa
         pycassa
 
 
     These are pip requirement files so you can have version specifiers and
     These are pip requirement files so you can have version specifiers and
-    multiple packages are separated by newline.  A more complex example could
+    multiple packages are separated by newline. A more complex example could
     be:
     be:
+
     ::
     ::
 
 
         # pycassa 2.0 breaks Foo
         # pycassa 2.0 breaks Foo
@@ -821,6 +859,7 @@ that require third-party libraries must be added.
 
 
     After you've made changes to this file you need to render
     After you've made changes to this file you need to render
     the distro ``README`` file:
     the distro ``README`` file:
+
     ::
     ::
 
 
         $ pip install -U requirements/pkgutils.txt
         $ pip install -U requirements/pkgutils.txt
@@ -829,7 +868,7 @@ that require third-party libraries must be added.
 
 
 That's all that needs to be done, but remember that if your feature
 That's all that needs to be done, but remember that if your feature
 adds additional configuration options then these needs to be documented
 adds additional configuration options then these needs to be documented
-in ``docs/configuration.rst``.  Also all settings need to be added to the
+in ``docs/configuration.rst``. Also all settings need to be added to the
 ``celery/app/defaults.py`` module.
 ``celery/app/defaults.py`` module.
 
 
 Result backends require a separate section in the ``docs/configuration.rst``
 Result backends require a separate section in the ``docs/configuration.rst``
@@ -844,7 +883,7 @@ This is a list of people that can be contacted for questions
 regarding the official git repositories, PyPI packages
 regarding the official git repositories, PyPI packages
 Read the Docs pages.
 Read the Docs pages.
 
 
-If the issue is not an emergency then it is better
+If the issue isn't an emergency then it's better
 to `report an issue`_.
 to `report an issue`_.
 
 
 
 
@@ -857,6 +896,12 @@ Ask Solem
 :github: https://github.com/ask
 :github: https://github.com/ask
 :twitter: http://twitter.com/#!/asksol
 :twitter: http://twitter.com/#!/asksol
 
 
+Asif Saif Uddin
+~~~~~~~~~~~~~~~
+
+:github: https://github.com/auvipy
+:twitter: https://twitter.com/#!/auvipy
+
 Dmitry Malinovsky
 Dmitry Malinovsky
 ~~~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~
 
 
@@ -917,7 +962,7 @@ Packages
 :git: https://github.com/celery/celery
 :git: https://github.com/celery/celery
 :CI: http://travis-ci.org/#!/celery/celery
 :CI: http://travis-ci.org/#!/celery/celery
 :Windows-CI: https://ci.appveyor.com/project/ask/celery
 :Windows-CI: https://ci.appveyor.com/project/ask/celery
-:PyPI: http://pypi.python.org/pypi/celery
+:PyPI: ``celery``
 :docs: http://docs.celeryproject.org
 :docs: http://docs.celeryproject.org
 
 
 ``kombu``
 ``kombu``
@@ -928,7 +973,7 @@ Messaging library.
 :git: https://github.com/celery/kombu
 :git: https://github.com/celery/kombu
 :CI: http://travis-ci.org/#!/celery/kombu
 :CI: http://travis-ci.org/#!/celery/kombu
 :Windows-CI: https://ci.appveyor.com/project/ask/kombu
 :Windows-CI: https://ci.appveyor.com/project/ask/kombu
-:PyPI: http://pypi.python.org/pypi/kombu
+:PyPI: ``kombu``
 :docs: https://kombu.readthedocs.io
 :docs: https://kombu.readthedocs.io
 
 
 ``amqp``
 ``amqp``
@@ -939,7 +984,7 @@ Python AMQP 0.9.1 client.
 :git: https://github.com/celery/py-amqp
 :git: https://github.com/celery/py-amqp
 :CI: http://travis-ci.org/#!/celery/py-amqp
 :CI: http://travis-ci.org/#!/celery/py-amqp
 :Windows-CI: https://ci.appveyor.com/project/ask/py-amqp
 :Windows-CI: https://ci.appveyor.com/project/ask/py-amqp
-:PyPI: http://pypi.python.org/pypi/amqp
+:PyPI: ``amqp``
 :docs: https://amqp.readthedocs.io
 :docs: https://amqp.readthedocs.io
 
 
 ``vine``
 ``vine``
@@ -950,19 +995,39 @@ Promise/deferred implementation.
 :git: https://github.com/celery/vine/
 :git: https://github.com/celery/vine/
 :CI: http://travis-ci.org/#!/celery/vine/
 :CI: http://travis-ci.org/#!/celery/vine/
 :Windows-CI: https://ci.appveyor.com/project/ask/vine
 :Windows-CI: https://ci.appveyor.com/project/ask/vine
-:PyPI: http://pypi.python.org/pypi/vine
+:PyPI: ``vine``
 :docs: https://vine.readthedocs.io
 :docs: https://vine.readthedocs.io
 
 
 ``billiard``
 ``billiard``
 ------------
 ------------
 
 
 Fork of multiprocessing containing improvements
 Fork of multiprocessing containing improvements
-that will eventually be merged into the Python stdlib.
+that'll eventually be merged into the Python stdlib.
 
 
 :git: https://github.com/celery/billiard
 :git: https://github.com/celery/billiard
 :CI: http://travis-ci.org/#!/celery/billiard/
 :CI: http://travis-ci.org/#!/celery/billiard/
 :Windows-CI: https://ci.appveyor.com/project/ask/billiard
 :Windows-CI: https://ci.appveyor.com/project/ask/billiard
-:PyPI: http://pypi.python.org/pypi/billiard
+:PyPI: ``billiard``
+
+``django-celery-beat``
+----------------------
+
+Database-backed Periodic Tasks with admin interface using the Django ORM.
+
+:git: https://github.com/celery/django-celery-beat
+:CI: http://travis-ci.org/#!/celery/django-celery-beat
+:Windows-CI: https://ci.appveyor.com/project/ask/django-celery-beat
+:PyPI: ``django-celery-beat``
+
+``django-celery-results``
+-------------------------
+
+Store task results in the Django ORM, or using the Django Cache Framework.
+
+:git: https://github.com/celery/django-celery-results
+:CI: http://travis-ci.org/#!/celery/django-celery-results
+:Windows-CI: https://ci.appveyor.com/project/ask/django-celery-results
+:PyPI: ``django-celery-results``
 
 
 ``librabbitmq``
 ``librabbitmq``
 ---------------
 ---------------
@@ -970,16 +1035,7 @@ that will eventually be merged into the Python stdlib.
 Very fast Python AMQP client written in C.
 Very fast Python AMQP client written in C.
 
 
 :git: https://github.com/celery/librabbitmq
 :git: https://github.com/celery/librabbitmq
-:PyPI: http://pypi.python.org/pypi/librabbitmq
-
-``django-celery``
------------------
-
-Django <-> Celery Integration.
-
-:git: https://github.com/celery/django-celery
-:PyPI: http://pypi.python.org/pypi/django-celery
-:docs: http://docs.celeryproject.org/en/latest/django
+:PyPI: ``librabbitmq``
 
 
 ``cell``
 ``cell``
 --------
 --------
@@ -987,7 +1043,7 @@ Django <-> Celery Integration.
 Actor library.
 Actor library.
 
 
 :git: https://github.com/celery/cell
 :git: https://github.com/celery/cell
-:PyPI: http://pypi.python.org/pypi/cell
+:PyPI: ``cell``
 
 
 ``cyme``
 ``cyme``
 --------
 --------
@@ -995,49 +1051,55 @@ Actor library.
 Distributed Celery Instance manager.
 Distributed Celery Instance manager.
 
 
 :git: https://github.com/celery/cyme
 :git: https://github.com/celery/cyme
-:PyPI: http://pypi.python.org/pypi/cyme
+:PyPI: ``cyme``
 :docs: https://cyme.readthedocs.io/
 :docs: https://cyme.readthedocs.io/
 
 
 
 
 Deprecated
 Deprecated
 ----------
 ----------
 
 
+- ``django-celery``
+
+:git: https://github.com/celery/django-celery
+:PyPI: ``django-celery``
+:docs: http://docs.celeryproject.org/en/latest/django
+
 - ``Flask-Celery``
 - ``Flask-Celery``
 
 
 :git: https://github.com/ask/Flask-Celery
 :git: https://github.com/ask/Flask-Celery
-:PyPI: http://pypi.python.org/pypi/Flask-Celery
+:PyPI: ``Flask-Celery``
 
 
 - ``celerymon``
 - ``celerymon``
 
 
 :git: https://github.com/celery/celerymon
 :git: https://github.com/celery/celerymon
-:PyPI: http://pypi.python.org/pypi/celerymon
+:PyPI: ``celerymon``
 
 
 - ``carrot``
 - ``carrot``
 
 
 :git: https://github.com/ask/carrot
 :git: https://github.com/ask/carrot
-:PyPI: http://pypi.python.org/pypi/carrot
+:PyPI: ``carrot``
 
 
 - ``ghettoq``
 - ``ghettoq``
 
 
 :git: https://github.com/ask/ghettoq
 :git: https://github.com/ask/ghettoq
-:PyPI: http://pypi.python.org/pypi/ghettoq
+:PyPI: ``ghettoq``
 
 
 - ``kombu-sqlalchemy``
 - ``kombu-sqlalchemy``
 
 
 :git: https://github.com/ask/kombu-sqlalchemy
 :git: https://github.com/ask/kombu-sqlalchemy
-:PyPI: http://pypi.python.org/pypi/kombu-sqlalchemy
+:PyPI: ``kombu-sqlalchemy``
 
 
 - ``django-kombu``
 - ``django-kombu``
 
 
 :git: https://github.com/ask/django-kombu
 :git: https://github.com/ask/django-kombu
-:PyPI: http://pypi.python.org/pypi/django-kombu
+:PyPI: ``django-kombu``
 
 
 - ``pylibrabbitmq``
 - ``pylibrabbitmq``
 
 
 Old name for ``librabbitmq``.
 Old name for ``librabbitmq``.
 
 
 :git: ``None``
 :git: ``None``
-:PyPI: http://pypi.python.org/pypi/pylibrabbitmq
+:PyPI: ``pylibrabbitmq``
 
 
 .. _release-procedure:
 .. _release-procedure:
 
 
@@ -1054,19 +1116,22 @@ The version number must be updated two places:
     * ``docs/include/introduction.txt``
     * ``docs/include/introduction.txt``
 
 
 After you have changed these files you must render
 After you have changed these files you must render
-the ``README`` files.  There is a script to convert sphinx syntax
+the ``README`` files. There's a script to convert sphinx syntax
 to generic reStructured Text syntax, and the make target `readme`
 to generic reStructured Text syntax, and the make target `readme`
 does this for you:
 does this for you:
+
 ::
 ::
 
 
     $ make readme
     $ make readme
 
 
 Now commit the changes:
 Now commit the changes:
+
 ::
 ::
 
 
     $ git commit -a -m "Bumps version to X.Y.Z"
     $ git commit -a -m "Bumps version to X.Y.Z"
 
 
 and make a new version tag:
 and make a new version tag:
+
 ::
 ::
 
 
     $ git tag vX.Y.Z
     $ git tag vX.Y.Z
@@ -1076,6 +1141,7 @@ Releasing
 ---------
 ---------
 
 
 Commands to make a new public stable release:
 Commands to make a new public stable release:
+
 ::
 ::
 
 
     $ make distcheck  # checks pep8, autodoc index, runs tests and more
     $ make distcheck  # checks pep8, autodoc index, runs tests and more
@@ -1091,8 +1157,8 @@ following:
 
 
 * Enter "Edit project"
 * Enter "Edit project"
 
 
-    Change default branch to the branch of this series, e.g. ``2.4``
-    for series 2.4.
+    Change default branch to the branch of this series, for example, use
+    the ``2.4`` branch for the 2.4 series.
 
 
 * Also add the previous version under the "versions" tab.
 * Also add the previous version under the "versions" tab.
 
 

+ 2 - 0
CONTRIBUTORS.txt

@@ -217,3 +217,5 @@ Anand Reddy Pandikunta, 2016/06/18
 Adriano Martins de Jesus, 2016/06/22
 Adriano Martins de Jesus, 2016/06/22
 Kevin Richardson, 2016/06/29
 Kevin Richardson, 2016/06/29
 Andrew Stewart, 2016/07/04
 Andrew Stewart, 2016/07/04
+Xin Li, 2016/08/03
+Alli Witheford, 2016/09/29

+ 3 - 0
MANIFEST.in

@@ -7,6 +7,7 @@ include TODO
 include setup.cfg
 include setup.cfg
 include setup.py
 include setup.py
 
 
+recursive-include t *.py
 recursive-include docs *
 recursive-include docs *
 recursive-include extra/bash-completion *
 recursive-include extra/bash-completion *
 recursive-include extra/centos *
 recursive-include extra/centos *
@@ -17,7 +18,9 @@ recursive-include extra/systemd *
 recursive-include extra/zsh-completion *
 recursive-include extra/zsh-completion *
 recursive-include examples *
 recursive-include examples *
 recursive-include requirements *.txt *.rst
 recursive-include requirements *.txt *.rst
+recursive-include celery/utils/static *.png
 
 
+recursive-exclude docs/_build *
 recursive-exclude * __pycache__
 recursive-exclude * __pycache__
 recursive-exclude * *.py[co]
 recursive-exclude * *.py[co]
 recursive-exclude * .*.sw[a-z]
 recursive-exclude * .*.sw[a-z]

+ 23 - 10
Makefile

@@ -1,13 +1,19 @@
 PROJ=celery
 PROJ=celery
 PGPIDENT="Celery Security Team"
 PGPIDENT="Celery Security Team"
 PYTHON=python
 PYTHON=python
+PYTEST=py.test
 GIT=git
 GIT=git
 TOX=tox
 TOX=tox
-NOSETESTS=nosetests
 ICONV=iconv
 ICONV=iconv
 FLAKE8=flake8
 FLAKE8=flake8
+PYDOCSTYLE=pydocstyle
+PYROMA=pyroma
 FLAKEPLUS=flakeplus
 FLAKEPLUS=flakeplus
 SPHINX2RST=sphinx2rst
 SPHINX2RST=sphinx2rst
+RST2HTML=rst2html.py
+DEVNULL=/dev/null
+
+TESTDIR=t
 
 
 SPHINX_DIR=docs/
 SPHINX_DIR=docs/
 SPHINX_BUILDDIR="${SPHINX_DIR}/_build"
 SPHINX_BUILDDIR="${SPHINX_DIR}/_build"
@@ -36,6 +42,7 @@ help:
 	@echo "    flakes --------  - Check code for syntax and style errors."
 	@echo "    flakes --------  - Check code for syntax and style errors."
 	@echo "      flakecheck     - Run flake8 on the source code."
 	@echo "      flakecheck     - Run flake8 on the source code."
 	@echo "      flakepluscheck - Run flakeplus on the source code."
 	@echo "      flakepluscheck - Run flakeplus on the source code."
+	@echo "      pep257check    - Run pep257 on the source code."
 	@echo "readme               - Regenerate README.rst file."
 	@echo "readme               - Regenerate README.rst file."
 	@echo "contrib              - Regenerate CONTRIBUTING.rst file"
 	@echo "contrib              - Regenerate CONTRIBUTING.rst file"
 	@echo "clean-dist --------- - Clean all distribution build artifacts."
 	@echo "clean-dist --------- - Clean all distribution build artifacts."
@@ -69,10 +76,10 @@ Documentation:
 	(cd "$(SPHINX_DIR)"; $(MAKE) html)
 	(cd "$(SPHINX_DIR)"; $(MAKE) html)
 	mv "$(SPHINX_HTMLDIR)" $(DOCUMENTATION)
 	mv "$(SPHINX_HTMLDIR)" $(DOCUMENTATION)
 
 
-docs: Documentation
+docs: clean-docs Documentation
 
 
 clean-docs:
 clean-docs:
-	-rm -rf "$(SPHINX_BUILDDIR)"
+	-rm -rf "$(SPHINX_BUILDDIR)" "$(DOCUMENTATION)"
 
 
 lint: flakecheck apicheck configcheck readmecheck
 lint: flakecheck apicheck configcheck readmecheck
 
 
@@ -83,26 +90,33 @@ configcheck:
 	(cd "$(SPHINX_DIR)"; $(MAKE) configcheck)
 	(cd "$(SPHINX_DIR)"; $(MAKE) configcheck)
 
 
 flakecheck:
 flakecheck:
-	# the only way to enable all-1 errors is to ignore one of them.
-	$(FLAKE8) --ignore=X999 "$(PROJ)"
+	$(FLAKE8) "$(PROJ)" "$(TESTDIR)"
+
+pep257check:
+	$(PYDOCSTYLE) "$(PROJ)"
 
 
 flakediag:
 flakediag:
 	-$(MAKE) flakecheck
 	-$(MAKE) flakecheck
 
 
 flakepluscheck:
 flakepluscheck:
-	$(FLAKEPLUS) --$(FLAKEPLUSTARGET) "$(PROJ)"
+	$(FLAKEPLUS) --$(FLAKEPLUSTARGET) "$(PROJ)" "$(TESTDIR)"
 
 
 flakeplusdiag:
 flakeplusdiag:
 	-$(MAKE) flakepluscheck
 	-$(MAKE) flakepluscheck
 
 
-flakes: flakediag flakeplusdiag
+flakes: flakediag flakeplusdiag pep257check
 
 
 clean-readme:
 clean-readme:
 	-rm -f $(README)
 	-rm -f $(README)
 
 
-readmecheck:
+readmecheck-unicode:
 	$(ICONV) -f ascii -t ascii $(README) >/dev/null
 	$(ICONV) -f ascii -t ascii $(README) >/dev/null
 
 
+readmecheck-rst:
+	-$(RST2HTML) $(README) >$(DEVNULL)
+
+readmecheck: readmecheck-unicode readmecheck-rst
+
 $(README):
 $(README):
 	$(SPHINX2RST) "$(README_SRC)" --ascii > $@
 	$(SPHINX2RST) "$(README_SRC)" --ascii > $@
 
 
@@ -138,7 +152,7 @@ test:
 	$(PYTHON) setup.py test
 	$(PYTHON) setup.py test
 
 
 cov:
 cov:
-	$(NOSETESTS) -xv --with-coverage --cover-html --cover-branch
+	$(PYTEST) -x --cov="$(PROJ)" --cov-report=html
 
 
 build:
 build:
 	$(PYTHON) setup.py sdist bdist_wheel
 	$(PYTHON) setup.py sdist bdist_wheel
@@ -158,4 +172,3 @@ graph: clean-graph $(WORKER_GRAPH)
 
 
 authorcheck:
 authorcheck:
 	git shortlog -se | cut -f2 | extra/release/attribution.py
 	git shortlog -se | cut -f2 | extra/release/attribution.py
-

+ 60 - 41
README.rst

@@ -1,12 +1,12 @@
 =================================
 =================================
- celery - Distributed Task Queue
+ Celery - Distributed Task Queue
 =================================
 =================================
 
 
 .. image:: http://cloud.github.com/downloads/celery/celery/celery_128.png
 .. image:: http://cloud.github.com/downloads/celery/celery/celery_128.png
 
 
-|build-status| |coverage| |bitdeli|
+|build-status| |coverage| |license| |wheel| |pyversion| |pyimp|
 
 
-:Version: 4.0.0rc3 (0today8)
+:Version: 4.0.0rc5 (0today8)
 :Web: http://celeryproject.org/
 :Web: http://celeryproject.org/
 :Download: http://pypi.python.org/pypi/celery/
 :Download: http://pypi.python.org/pypi/celery/
 :Source: https://github.com/celery/celery/
 :Source: https://github.com/celery/celery/
@@ -15,8 +15,8 @@
 
 
 --
 --
 
 
-What is a Task Queue?
-=====================
+What's a Task Queue?
+====================
 
 
 Task queues are used as a mechanism to distribute work across threads or
 Task queues are used as a mechanism to distribute work across threads or
 machines.
 machines.
@@ -25,14 +25,14 @@ A task queue's input is a unit of work, called a task, dedicated worker
 processes then constantly monitor the queue for new work to perform.
 processes then constantly monitor the queue for new work to perform.
 
 
 Celery communicates via messages, usually using a broker
 Celery communicates via messages, usually using a broker
-to mediate between clients and workers.  To initiate a task a client puts a
+to mediate between clients and workers. To initiate a task a client puts a
 message on the queue, the broker then delivers the message to a worker.
 message on the queue, the broker then delivers the message to a worker.
 
 
 A Celery system can consist of multiple workers and brokers, giving way
 A Celery system can consist of multiple workers and brokers, giving way
 to high availability and horizontal scaling.
 to high availability and horizontal scaling.
 
 
 Celery is written in Python, but the protocol can be implemented in any
 Celery is written in Python, but the protocol can be implemented in any
-language.  In addition to Python there's node-celery_ for Node.js,
+language. In addition to Python there's node-celery_ for Node.js,
 and a `PHP client`_.
 and a `PHP client`_.
 
 
 Language interoperability can also be achieved
 Language interoperability can also be achieved
@@ -52,9 +52,9 @@ Celery version 5.0 runs on,
 
 
 
 
 This is the last version to support Python 2.7,
 This is the last version to support Python 2.7,
-and from the next version (Celery 5.x) Python 3.6 or newer is required.
+and from the next version (Celery 5.x) Python 3.5 or newer is required.
 
 
-If you are running an older version of Python, you need to be running
+If you're running an older version of Python, you need to be running
 an older version of Celery:
 an older version of Celery:
 
 
 - Python 2.6: Celery series 3.1 or earlier.
 - Python 2.6: Celery series 3.1 or earlier.
@@ -62,12 +62,13 @@ an older version of Celery:
 - Python 2.4 was Celery series 2.2 or earlier.
 - Python 2.4 was Celery series 2.2 or earlier.
 
 
 Celery is a project with minimal funding,
 Celery is a project with minimal funding,
-so we do not support Microsoft Windows.
-Please do not open any issues related to that platform.
+so we don't support Microsoft Windows.
+Please don't open any issues related to that platform.
 
 
 *Celery* is usually used with a message broker to send and receive messages.
 *Celery* is usually used with a message broker to send and receive messages.
-The RabbitMQ transports is feature complete, but there's also Qpid and Amazon
-SQS broker support.
+The RabbitMQ, Redis transports are feature complete,
+but there's also experimental support for a myriad of other solutions, including
+using SQLite for local development.
 
 
 *Celery* can run on a single machine, on multiple machines, or even
 *Celery* can run on a single machine, on multiple machines, or even
 across datacenters.
 across datacenters.
@@ -75,7 +76,7 @@ across datacenters.
 Get Started
 Get Started
 ===========
 ===========
 
 
-If this is the first time you're trying to use Celery, or you are
+If this is the first time you're trying to use Celery, or you're
 new to Celery 4.0 coming from previous versions then you should read our
 new to Celery 4.0 coming from previous versions then you should read our
 getting started tutorials:
 getting started tutorials:
 
 
@@ -94,14 +95,14 @@ getting started tutorials:
     http://docs.celeryproject.org/en/latest/getting-started/next-steps.html
     http://docs.celeryproject.org/en/latest/getting-started/next-steps.html
 
 
 Celery is...
 Celery is...
-==========
+=============
 
 
 - **Simple**
 - **Simple**
 
 
     Celery is easy to use and maintain, and does *not need configuration files*.
     Celery is easy to use and maintain, and does *not need configuration files*.
 
 
     It has an active, friendly community you can talk to for support,
     It has an active, friendly community you can talk to for support,
-    including a `mailing-list`_ and and an IRC channel.
+    like at our `mailing-list`_, or the IRC channel.
 
 
     Here's one of the simplest applications you can make::
     Here's one of the simplest applications you can make::
 
 
@@ -117,7 +118,7 @@ Celery is...
 
 
     Workers and clients will automatically retry in the event
     Workers and clients will automatically retry in the event
     of connection loss or failure, and some brokers support
     of connection loss or failure, and some brokers support
-    HA in way of *Master/Master* or *Master/Slave* replication.
+    HA in way of *Primary/Primary* or *Primary/Replica* replication.
 
 
 - **Fast**
 - **Fast**
 
 
@@ -129,14 +130,14 @@ Celery is...
 
 
     Almost every part of *Celery* can be extended or used on its own,
     Almost every part of *Celery* can be extended or used on its own,
     Custom pool implementations, serializers, compression schemes, logging,
     Custom pool implementations, serializers, compression schemes, logging,
-    schedulers, consumers, producers, broker transports and much more.
+    schedulers, consumers, producers, broker transports, and much more.
 
 
 It supports...
 It supports...
-============
+================
 
 
     - **Message Transports**
     - **Message Transports**
 
 
-        - RabbitMQ_, Amazon SQS
+        - RabbitMQ_, Redis_, Amazon SQS
 
 
     - **Concurrency**
     - **Concurrency**
 
 
@@ -182,7 +183,7 @@ integration packages:
     | `Tornado`_         | `tornado-celery`_      |
     | `Tornado`_         | `tornado-celery`_      |
     +--------------------+------------------------+
     +--------------------+------------------------+
 
 
-The integration packages are not strictly necessary, but they can make
+The integration packages aren't strictly necessary, but they can make
 development easier, and sometimes they add important hooks like closing
 development easier, and sometimes they add important hooks like closing
 database connections at ``fork``.
 database connections at ``fork``.
 
 
@@ -193,7 +194,6 @@ database connections at ``fork``.
 .. _`Bottle`: http://bottlepy.org/
 .. _`Bottle`: http://bottlepy.org/
 .. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html
 .. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html
 .. _`pyramid_celery`: http://pypi.python.org/pypi/pyramid_celery/
 .. _`pyramid_celery`: http://pypi.python.org/pypi/pyramid_celery/
-.. _`django-celery`: http://pypi.python.org/pypi/django-celery
 .. _`celery-pylons`: http://pypi.python.org/pypi/celery-pylons
 .. _`celery-pylons`: http://pypi.python.org/pypi/celery-pylons
 .. _`web2py-celery`: http://code.google.com/p/web2py-celery/
 .. _`web2py-celery`: http://code.google.com/p/web2py-celery/
 .. _`Tornado`: http://www.tornadoweb.org/
 .. _`Tornado`: http://www.tornadoweb.org/
@@ -204,8 +204,8 @@ database connections at ``fork``.
 Documentation
 Documentation
 =============
 =============
 
 
-The `latest documentation`_ with user guides, tutorials and API reference
-is hosted at Read The Docs.
+The `latest documentation`_ is hosted at Read The Docs, containing user guides,
+tutorials, and an API reference.
 
 
 .. _`latest documentation`: http://docs.celeryproject.org/en/latest/
 .. _`latest documentation`: http://docs.celeryproject.org/en/latest/
 
 
@@ -217,15 +217,12 @@ Installation
 You can install Celery either via the Python Package Index (PyPI)
 You can install Celery either via the Python Package Index (PyPI)
 or from source.
 or from source.
 
 
-To install using `pip`,:
-::
-
-    $ pip install -U Celery
+To install using ``pip``:
 
 
-To install using `easy_install`,:
 ::
 ::
 
 
-    $ easy_install -U Celery
+
+    $ pip install -U Celery
 
 
 .. _bundles:
 .. _bundles:
 
 
@@ -236,13 +233,15 @@ Celery also defines a group of bundles that can be used
 to install Celery and the dependencies for a given feature.
 to install Celery and the dependencies for a given feature.
 
 
 You can specify these in your requirements or on the ``pip``
 You can specify these in your requirements or on the ``pip``
-command-line by using brackets.  Multiple bundles can be specified by
+command-line by using brackets. Multiple bundles can be specified by
 separating them by commas.
 separating them by commas.
+
 ::
 ::
 
 
+
     $ pip install "celery[librabbitmq]"
     $ pip install "celery[librabbitmq]"
 
 
-    $ pip install "celery[librabbitmq,auth,msgpack]"
+    $ pip install "celery[librabbitmq,redis,auth,msgpack]"
 
 
 The following bundles are available:
 The following bundles are available:
 
 
@@ -273,6 +272,9 @@ Transports and Backends
 :``celery[librabbitmq]``:
 :``celery[librabbitmq]``:
     for using the librabbitmq C library.
     for using the librabbitmq C library.
 
 
+:``celery[redis]``:
+    for using Redis as a message transport or as a result backend.
+
 :``celery[sqs]``:
 :``celery[sqs]``:
     for using Amazon SQS as a message transport (*experimental*).
     for using Amazon SQS as a message transport (*experimental*).
 
 
@@ -317,19 +319,22 @@ Transports and Backends
 Downloading and installing from source
 Downloading and installing from source
 --------------------------------------
 --------------------------------------
 
 
-Download the latest version of Celery from
+Download the latest version of Celery from PyPI:
+
 http://pypi.python.org/pypi/celery/
 http://pypi.python.org/pypi/celery/
 
 
 You can install it by doing the following,:
 You can install it by doing the following,:
+
 ::
 ::
 
 
+
     $ tar xvfz celery-0.0.0.tar.gz
     $ tar xvfz celery-0.0.0.tar.gz
     $ cd celery-0.0.0
     $ cd celery-0.0.0
     $ python setup.py build
     $ python setup.py build
     # python setup.py install
     # python setup.py install
 
 
 The last command must be executed as a privileged user if
 The last command must be executed as a privileged user if
-you are not currently using a virtualenv.
+you aren't currently using a virtualenv.
 
 
 .. _celery-installing-from-git:
 .. _celery-installing-from-git:
 
 
@@ -340,12 +345,14 @@ With pip
 ~~~~~~~~
 ~~~~~~~~
 
 
 The Celery development version also requires the development
 The Celery development version also requires the development
-versions of ``kombu``, ``amqp``, ``billiard`` and ``vine``.
+versions of ``kombu``, ``amqp``, ``billiard``, and ``vine``.
 
 
 You can install the latest snapshot of these using the following
 You can install the latest snapshot of these using the following
 pip commands:
 pip commands:
+
 ::
 ::
 
 
+
     $ pip install https://github.com/celery/celery/zipball/master#egg=celery
     $ pip install https://github.com/celery/celery/zipball/master#egg=celery
     $ pip install https://github.com/celery/billiard/zipball/master#egg=billiard
     $ pip install https://github.com/celery/billiard/zipball/master#egg=billiard
     $ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp
     $ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp
@@ -367,7 +374,7 @@ Getting Help
 Mailing list
 Mailing list
 ------------
 ------------
 
 
-For discussions about the usage, development, and future of celery,
+For discussions about the usage, development, and future of Celery,
 please join the `celery-users`_ mailing list.
 please join the `celery-users`_ mailing list.
 
 
 .. _`celery-users`: http://groups.google.com/group/celery-users/
 .. _`celery-users`: http://groups.google.com/group/celery-users/
@@ -387,7 +394,7 @@ network.
 Bug tracker
 Bug tracker
 ===========
 ===========
 
 
-If you have any suggestions, bug reports or annoyances please report them
+If you have any suggestions, bug reports, or annoyances please report them
 to our issue tracker at https://github.com/celery/celery/issues/
 to our issue tracker at https://github.com/celery/celery/issues/
 
 
 .. _wiki:
 .. _wiki:
@@ -404,7 +411,7 @@ Contributing
 
 
 Development of `celery` happens at GitHub: https://github.com/celery/celery
 Development of `celery` happens at GitHub: https://github.com/celery/celery
 
 
-You are highly encouraged to participate in the development
+You're highly encouraged to participate in the development
 of `celery`. If you don't like GitHub (for some reason) you're welcome
 of `celery`. If you don't like GitHub (for some reason) you're welcome
 to send regular patches.
 to send regular patches.
 
 
@@ -431,7 +438,19 @@ file in the top distribution directory for the full license text.
 .. |coverage| image:: https://codecov.io/github/celery/celery/coverage.svg?branch=master
 .. |coverage| image:: https://codecov.io/github/celery/celery/coverage.svg?branch=master
     :target: https://codecov.io/github/celery/celery?branch=master
     :target: https://codecov.io/github/celery/celery?branch=master
 
 
-.. |bitdeli| image:: https://d2weczhvl823v0.cloudfront.net/celery/celery/trend.png
-    :alt: Bitdeli badge
-    :target: https://bitdeli.com/free
+.. |license| image:: https://img.shields.io/pypi/l/celery.svg
+    :alt: BSD License
+    :target: https://opensource.org/licenses/BSD-3-Clause
+
+.. |wheel| image:: https://img.shields.io/pypi/wheel/celery.svg
+    :alt: Celery can be installed via wheel
+    :target: http://pypi.python.org/pypi/celery/
+
+.. |pyversion| image:: https://img.shields.io/pypi/pyversions/celery.svg
+    :alt: Supported Python versions.
+    :target: http://pypi.python.org/pypi/celery/
+
+.. |pyimp| image:: https://img.shields.io/pypi/implementation/celery.svg
+    :alt: Support Python implementations.
+    :target: http://pypi.python.org/pypi/celery/
 
 

+ 22 - 14
celery/__init__.py

@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
-"""Distributed Task Queue"""
+"""Distributed Task Queue."""
 # :copyright: (c) 2015-2016 Ask Solem.  All rights reserved.
 # :copyright: (c) 2015-2016 Ask Solem.  All rights reserved.
 # :copyright: (c) 2012-2014 GoPivotal, Inc., All rights reserved.
 # :copyright: (c) 2012-2014 GoPivotal, Inc., All rights reserved.
 # :copyright: (c) 2009 - 2012 Ask Solem and individual contributors,
 # :copyright: (c) 2009 - 2012 Ask Solem and individual contributors,
@@ -13,11 +13,12 @@ from collections import namedtuple
 
 
 SERIES = '0today8'
 SERIES = '0today8'
 
 
-__version__ = '4.0.0rc3'
+__version__ = '4.0.0rc5'
 __author__ = 'Ask Solem'
 __author__ = 'Ask Solem'
 __contact__ = 'ask@celeryproject.org'
 __contact__ = 'ask@celeryproject.org'
 __homepage__ = 'http://celeryproject.org'
 __homepage__ = 'http://celeryproject.org'
 __docformat__ = 'restructuredtext'
 __docformat__ = 'restructuredtext'
+__keywords__ = 'task job queue distributed messaging actor'
 
 
 # -eof meta-
 # -eof meta-
 
 
@@ -25,7 +26,7 @@ __all__ = [
     'Celery', 'bugreport', 'shared_task', 'task',
     'Celery', 'bugreport', 'shared_task', 'task',
     'current_app', 'current_task', 'maybe_signature',
     'current_app', 'current_task', 'maybe_signature',
     'chain', 'chord', 'chunks', 'group', 'signature',
     'chain', 'chord', 'chunks', 'group', 'signature',
-    'xmap', 'xstarmap', 'uuid', 'version', '__version__',
+    'xmap', 'xstarmap', 'uuid',
 ]
 ]
 
 
 VERSION_BANNER = '{0} ({1})'.format(__version__, SERIES)
 VERSION_BANNER = '{0} ({1})'.format(__version__, SERIES)
@@ -40,8 +41,8 @@ _temp = re.match(
     r'(\d+)\.(\d+).(\d+)(.+)?', __version__).groups()
     r'(\d+)\.(\d+).(\d+)(.+)?', __version__).groups()
 VERSION = version_info = version_info_t(
 VERSION = version_info = version_info_t(
     int(_temp[0]), int(_temp[1]), int(_temp[2]), _temp[3] or '', '')
     int(_temp[0]), int(_temp[1]), int(_temp[2]), _temp[3] or '', '')
-del(_temp)
-del(re)
+del _temp
+del re
 
 
 if os.environ.get('C_IMPDEBUG'):  # pragma: no cover
 if os.environ.get('C_IMPDEBUG'):  # pragma: no cover
     import builtins
     import builtins
@@ -105,24 +106,28 @@ def _patch_eventlet():
 
 
 
 
 def _patch_gevent():
 def _patch_gevent():
-    from gevent import monkey, signal as gsignal, version_info
+    import gevent
+    from gevent import monkey, signal as gevent_signal
 
 
     monkey.patch_all()
     monkey.patch_all()
-    if version_info[0] == 0:  # pragma: no cover
+    if gevent.version_info[0] == 0:  # pragma: no cover
         # Signals aren't working in gevent versions <1.0,
         # Signals aren't working in gevent versions <1.0,
-        # and are not monkey patched by patch_all()
+        # and aren't monkey patched by patch_all()
         _signal = __import__('signal')
         _signal = __import__('signal')
-        _signal.signal = gsignal
+        _signal.signal = gevent_signal
 
 
 
 
 def maybe_patch_concurrency(argv=sys.argv,
 def maybe_patch_concurrency(argv=sys.argv,
                             short_opts=['-P'], long_opts=['--pool'],
                             short_opts=['-P'], long_opts=['--pool'],
                             patches={'eventlet': _patch_eventlet,
                             patches={'eventlet': _patch_eventlet,
                                      'gevent': _patch_gevent}):
                                      'gevent': _patch_gevent}):
-    """With short and long opt alternatives that specify the command line
+    """Apply eventlet/gevent monkeypatches.
+
+    With short and long opt alternatives that specify the command line
     option to set the pool, this makes sure that anything that needs
     option to set the pool, this makes sure that anything that needs
     to be patched is completed as early as possible.
     to be patched is completed as early as possible.
-    (e.g. eventlet/gevent monkey patches)."""
+    (e.g., eventlet/gevent monkey patches).
+    """
     try:
     try:
         pool = _find_option_with_arg(argv, short_opts, long_opts)
         pool = _find_option_with_arg(argv, short_opts, long_opts)
     except KeyError:
     except KeyError:
@@ -140,9 +145,12 @@ def maybe_patch_concurrency(argv=sys.argv,
         concurrency.get_implementation(pool)
         concurrency.get_implementation(pool)
 
 
 # Lazy loading
 # Lazy loading
-from celery import five  # noqa
+from . import local  # noqa
+
 
 
-old_module, new_module = five.recreate_module(  # pragma: no cover
+# this just creates a new module, that imports stuff on first attribute
+# access.  This makes the library faster to use.
+old_module, new_module = local.recreate_module(  # pragma: no cover
     __name__,
     __name__,
     by_module={
     by_module={
         'celery.app': ['Celery', 'bugreport', 'shared_task'],
         'celery.app': ['Celery', 'bugreport', 'shared_task'],
@@ -159,7 +167,7 @@ old_module, new_module = five.recreate_module(  # pragma: no cover
     __package__='celery', __file__=__file__,
     __package__='celery', __file__=__file__,
     __path__=__path__, __doc__=__doc__, __version__=__version__,
     __path__=__path__, __doc__=__doc__, __version__=__version__,
     __author__=__author__, __contact__=__contact__,
     __author__=__author__, __contact__=__contact__,
-    __homepage__=__homepage__, __docformat__=__docformat__, five=five,
+    __homepage__=__homepage__, __docformat__=__docformat__, local=local,
     VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER,
     VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER,
     version_info_t=version_info_t,
     version_info_t=version_info_t,
     version_info=version_info,
     version_info=version_info,

+ 3 - 2
celery/__main__.py

@@ -7,10 +7,11 @@ __all__ = ['main']
 
 
 
 
 def main():
 def main():
+    """Entrypoint to the ``celery`` umbrella command."""
     if 'multi' not in sys.argv:
     if 'multi' not in sys.argv:
         maybe_patch_concurrency()
         maybe_patch_concurrency()
-    from celery.bin.celery import main
-    main()
+    from celery.bin.celery import main as _main
+    _main()
 
 
 
 
 if __name__ == '__main__':  # pragma: no cover
 if __name__ == '__main__':  # pragma: no cover

+ 9 - 5
celery/_state.py

@@ -23,18 +23,18 @@ __all__ = [
 #: Global default app used when no current app.
 #: Global default app used when no current app.
 default_app = None
 default_app = None
 
 
-#: List of all app instances (weakrefs), must not be used directly.
+#: List of all app instances (weakrefs), mustn't be used directly.
 _apps = weakref.WeakSet()
 _apps = weakref.WeakSet()
 
 
-#: global set of functions to call whenever a new app is finalized
-#: E.g. Shared tasks, and built-in tasks are created
-#: by adding callbacks here.
+#: Global set of functions to call whenever a new app is finalized.
+#: Shared tasks, and built-in tasks are created by adding callbacks here.
 _on_app_finalizers = set()
 _on_app_finalizers = set()
 
 
 _task_join_will_block = False
 _task_join_will_block = False
 
 
 
 
 def connect_on_app_finalize(callback):
 def connect_on_app_finalize(callback):
+    """Connect callback to be called when any app is finalized."""
     _on_app_finalizers.add(callback)
     _on_app_finalizers.add(callback)
     return callback
     return callback
 
 
@@ -65,6 +65,7 @@ _task_stack = LocalStack()
 
 
 
 
 def set_default_app(app):
 def set_default_app(app):
+    """Set default app."""
     global default_app
     global default_app
     default_app = app
     default_app = app
 
 
@@ -86,7 +87,10 @@ def _set_current_app(app):
 
 
 if os.environ.get('C_STRICT_APP'):  # pragma: no cover
 if os.environ.get('C_STRICT_APP'):  # pragma: no cover
     def get_current_app():
     def get_current_app():
-        raise Exception('USES CURRENT APP')
+        """Return the current app."""
+        raise RuntimeError('USES CURRENT APP')
+elif os.environ.get('C_WARN_APP'):  # pragma: no cover
+    def get_current_app():  # noqa
         import traceback
         import traceback
         print('-- USES CURRENT_APP', file=sys.stderr)  # noqa+
         print('-- USES CURRENT_APP', file=sys.stderr)  # noqa+
         traceback.print_stack(file=sys.stderr)
         traceback.print_stack(file=sys.stderr)

+ 10 - 7
celery/app/__init__.py

@@ -40,6 +40,7 @@ pop_current_task = _task_stack.pop
 
 
 
 
 def bugreport(app=None):
 def bugreport(app=None):
+    """Return information useful in bug reports."""
     return (app or current_app()).bugreport()
     return (app or current_app()).bugreport()
 
 
 
 
@@ -69,11 +70,13 @@ def _app_or_default_trace(app=None):  # pragma: no cover
 
 
 
 
 def enable_trace():
 def enable_trace():
+    """Enable tracing of app instances."""
     global app_or_default
     global app_or_default
     app_or_default = _app_or_default_trace
     app_or_default = _app_or_default_trace
 
 
 
 
 def disable_trace():
 def disable_trace():
+    """Disable tracing of app instances."""
     global app_or_default
     global app_or_default
     app_or_default = _app_or_default
     app_or_default = _app_or_default
 
 
@@ -84,9 +87,9 @@ else:
 
 
 
 
 def shared_task(*args, **kwargs):
 def shared_task(*args, **kwargs):
-    """Create shared tasks (decorator).
+    """Create shared task (decorator).
 
 
-    This can be used by library authors to create tasks that will work
+    This can be used by library authors to create tasks that'll work
     for any app environment.
     for any app environment.
 
 
     Returns:
     Returns:
@@ -94,19 +97,19 @@ def shared_task(*args, **kwargs):
         current apps task registry.
         current apps task registry.
 
 
     Example:
     Example:
+
         >>> from celery import Celery, shared_task
         >>> from celery import Celery, shared_task
         >>> @shared_task
         >>> @shared_task
         ... def add(x, y):
         ... def add(x, y):
         ...     return x + y
         ...     return x + y
-
-        >>> app1 = Celery(broker='amqp://A.example.com')
+        ...
+        >>> app1 = Celery(broker='amqp://')
         >>> add.app is app1
         >>> add.app is app1
         True
         True
-
-        >>> app2 = Celery(broker='amqp://B.example.com')
+        >>> app2 = Celery(broker='redis://')
         >>> add.app is app2
         >>> add.app is app2
+        True
     """
     """
-
     def create_shared_task(**options):
     def create_shared_task(**options):
 
 
         def __inner(fun):
         def __inner(fun):

+ 23 - 17
celery/app/amqp.py

@@ -16,7 +16,7 @@ from celery import signals
 from celery.utils.nodenames import anon_nodename
 from celery.utils.nodenames import anon_nodename
 from celery.utils.saferepr import saferepr
 from celery.utils.saferepr import saferepr
 from celery.utils.text import indent as textindent
 from celery.utils.text import indent as textindent
-from celery.utils.timeutils import maybe_make_aware, to_utc
+from celery.utils.time import maybe_make_aware, to_utc
 
 
 from . import routes as _routes
 from . import routes as _routes
 
 
@@ -51,6 +51,7 @@ class Queues(dict):
         ha_policy (Sequence, str): Default HA policy for queues with none set.
         ha_policy (Sequence, str): Default HA policy for queues with none set.
         max_priority (int): Default x-max-priority for queues with none set.
         max_priority (int): Default x-max-priority for queues with none set.
     """
     """
+
     #: If set, this is a subset of queues to consume from.
     #: If set, this is a subset of queues to consume from.
     #: The rest of the queues are then used for routing only.
     #: The rest of the queues are then used for routing only.
     _consume_from = None
     _consume_from = None
@@ -153,17 +154,18 @@ class Queues(dict):
         return info[0] + '\n' + textindent('\n'.join(info[1:]), indent)
         return info[0] + '\n' + textindent('\n'.join(info[1:]), indent)
 
 
     def select_add(self, queue, **kwargs):
     def select_add(self, queue, **kwargs):
-        """Add new task queue that will be consumed from even when
-        a subset has been selected using the
-        :option:`celery worker -Q` option."""
+        """Add new task queue that'll be consumed from.
+
+        The queue will be active even when a subset has been selected
+        using the :option:`celery worker -Q` option.
+        """
         q = self.add(queue, **kwargs)
         q = self.add(queue, **kwargs)
         if self._consume_from is not None:
         if self._consume_from is not None:
             self._consume_from[q.name] = q
             self._consume_from[q.name] = q
         return q
         return q
 
 
     def select(self, include):
     def select(self, include):
-        """Sets :attr:`consume_from` by selecting a subset of the
-        currently defined queues.
+        """Select a subset of currently defined queues to consume from.
 
 
         Arguments:
         Arguments:
             include (Sequence[str], str): Names of queues to consume from.
             include (Sequence[str], str): Names of queues to consume from.
@@ -174,7 +176,7 @@ class Queues(dict):
             }
             }
 
 
     def deselect(self, exclude):
     def deselect(self, exclude):
-        """Deselect queues so that they will not be consumed from.
+        """Deselect queues so that they won't be consumed from.
 
 
         Arguments:
         Arguments:
             exclude (Sequence[str], str): Names of queues to avoid
             exclude (Sequence[str], str): Names of queues to avoid
@@ -200,6 +202,8 @@ class Queues(dict):
 
 
 
 
 class AMQP:
 class AMQP:
+    """App AMQP API: app.amqp."""
+
     Connection = Connection
     Connection = Connection
     Consumer = Consumer
     Consumer = Consumer
     Producer = Producer
     Producer = Producer
@@ -217,8 +221,8 @@ class AMQP:
     _producer_pool = None
     _producer_pool = None
 
 
     # Exchange class/function used when defining automatic queues.
     # Exchange class/function used when defining automatic queues.
-    # E.g. you can use ``autoexchange = lambda n: None`` to use the
-    # AMQP default exchange, which is a shortcut to bypass routing
+    # For example, you can use ``autoexchange = lambda n: None`` to use the
+    # AMQP default exchange: a shortcut to bypass routing
     # and instead send directly to the queue named in the routing key.
     # and instead send directly to the queue named in the routing key.
     autoexchange = None
     autoexchange = None
 
 
@@ -246,8 +250,8 @@ class AMQP:
 
 
     def Queues(self, queues, create_missing=None, ha_policy=None,
     def Queues(self, queues, create_missing=None, ha_policy=None,
                autoexchange=None, max_priority=None):
                autoexchange=None, max_priority=None):
-        """Create new :class:`Queues` instance, using queue defaults
-        from the current configuration."""
+        # Create new :class:`Queues` instance, using queue defaults
+        # from the current configuration.
         conf = self.app.conf
         conf = self.app.conf
         if create_missing is None:
         if create_missing is None:
             create_missing = conf.task_create_missing_queues
             create_missing = conf.task_create_missing_queues
@@ -372,9 +376,9 @@ class AMQP:
         kwargs = kwargs or {}
         kwargs = kwargs or {}
         utc = self.utc
         utc = self.utc
         if not isinstance(args, (list, tuple)):
         if not isinstance(args, (list, tuple)):
-            raise ValueError('task args must be a list or tuple')
+            raise TypeError('task args must be a list or tuple')
         if not isinstance(kwargs, Mapping):
         if not isinstance(kwargs, Mapping):
-            raise ValueError('task keyword arguments must be a mapping')
+            raise TypeError('task keyword arguments must be a mapping')
         if countdown:  # convert countdown to ETA
         if countdown:  # convert countdown to ETA
             self._verify_seconds(countdown, 'countdown')
             self._verify_seconds(countdown, 'countdown')
             now = now or self.app.now()
             now = now or self.app.now()
@@ -484,10 +488,12 @@ class AMQP:
                 except AttributeError:
                 except AttributeError:
                     exchange_type = 'direct'
                     exchange_type = 'direct'
 
 
-            if not exchange and not routing_key and exchange_type == 'direct':
-                exchange, routing_key = '', qname
-            else:
-                exchange = exchange or queue.exchange.name or default_exchange
+            # convert to anon-exchange, when exchange not set and direct ex.
+            if not exchange or not routing_key and exchange_type == 'direct':
+                    exchange, routing_key = '', qname
+            elif exchange is None:
+                # not topic exchange, and exchange not undefined
+                exchange = queue.exchange.name or default_exchange
                 routing_key = routing_key or queue.routing_key or default_rkey
                 routing_key = routing_key or queue.routing_key or default_rkey
             if declare is None and queue and not isinstance(queue, Broadcast):
             if declare is None and queue and not isinstance(queue, Broadcast):
                 declare = [queue]
                 declare = [queue]

+ 3 - 2
celery/app/annotations.py

@@ -17,6 +17,7 @@ __all__ = ['MapAnnotation', 'prepare', 'resolve_all']
 
 
 
 
 class MapAnnotation(dict):
 class MapAnnotation(dict):
+    """Annotation map: task_name => attributes."""
 
 
     def annotate_any(self):
     def annotate_any(self):
         try:
         try:
@@ -32,8 +33,7 @@ class MapAnnotation(dict):
 
 
 
 
 def prepare(annotations):
 def prepare(annotations):
-    """Expands the :setting:`task_annotations` setting."""
-
+    """Expand the :setting:`task_annotations` setting."""
     def expand_annotation(annotation):
     def expand_annotation(annotation):
         if isinstance(annotation, dict):
         if isinstance(annotation, dict):
             return MapAnnotation(annotation)
             return MapAnnotation(annotation)
@@ -49,4 +49,5 @@ def prepare(annotations):
 
 
 
 
 def resolve_all(anno, task):
 def resolve_all(anno, task):
+    """Resolve all pending annotations."""
     return (x for x in (_first_match(anno, task), _first_match_any(anno)) if x)
     return (x for x in (_first_match(anno, task), _first_match_any(anno)) if x)

+ 63 - 0
celery/app/backends.py

@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+"""Backend selection."""
+import sys
+import types
+from celery.exceptions import ImproperlyConfigured
+from celery._state import current_app
+from celery.utils.imports import load_extension_class_names, symbol_by_name
+
+__all__ = ['by_name', 'by_url']
+
+UNKNOWN_BACKEND = """
+Unknown result backend: {0!r}.  Did you spell that correctly? ({1!r})
+"""
+
+BACKEND_ALIASES = {
+    'amqp': 'celery.backends.amqp:AMQPBackend',
+    'rpc': 'celery.backends.rpc.RPCBackend',
+    'cache': 'celery.backends.cache:CacheBackend',
+    'redis': 'celery.backends.redis:RedisBackend',
+    'mongodb': 'celery.backends.mongodb:MongoBackend',
+    'db': 'celery.backends.database:DatabaseBackend',
+    'database': 'celery.backends.database:DatabaseBackend',
+    'elasticsearch': 'celery.backends.elasticsearch:ElasticsearchBackend',
+    'cassandra': 'celery.backends.cassandra:CassandraBackend',
+    'couchbase': 'celery.backends.couchbase:CouchbaseBackend',
+    'couchdb': 'celery.backends.couchdb:CouchBackend',
+    'riak': 'celery.backends.riak:RiakBackend',
+    'file': 'celery.backends.filesystem:FilesystemBackend',
+    'disabled': 'celery.backends.base:DisabledBackend',
+    'consul': 'celery.backends.consul:ConsulBackend'
+}
+
+
+def by_name(backend=None, loader=None,
+            extension_namespace='celery.result_backends'):
+    """Get backend class by name/alias."""
+    backend = backend or 'disabled'
+    loader = loader or current_app.loader
+    aliases = dict(BACKEND_ALIASES, **loader.override_backends)
+    aliases.update(
+        load_extension_class_names(extension_namespace) or {})
+    try:
+        cls = symbol_by_name(backend, aliases)
+    except ValueError as exc:
+        raise ImproperlyConfigured(UNKNOWN_BACKEND.format(
+            backend, exc)).with_traceback(sys.exc_info()[2])
+    if isinstance(cls, types.ModuleType):
+        raise ImproperlyConfigured(UNKNOWN_BACKEND.strip().format(
+            backend, 'is a Python module, not a backend class.'))
+    return cls
+
+
+def by_url(backend=None, loader=None):
+    """Get backend class by URL."""
+    url = None
+    if backend and '://' in backend:
+        url = backend
+        scheme, _, _ = url.partition('://')
+        if '+' in scheme:
+            backend, url = url.split('+', 1)
+        else:
+            backend = scheme
+    return by_name(backend, loader), url

+ 171 - 95
celery/app/base.py

@@ -31,17 +31,20 @@ from celery.utils import abstract
 from celery.utils.collections import AttributeDictMixin
 from celery.utils.collections import AttributeDictMixin
 from celery.utils.dispatch import Signal
 from celery.utils.dispatch import Signal
 from celery.utils.functional import first, maybe_list, head_from_fun
 from celery.utils.functional import first, maybe_list, head_from_fun
-from celery.utils.timeutils import timezone
+from celery.utils.time import timezone
 from celery.utils.imports import gen_task_name, instantiate, symbol_by_name
 from celery.utils.imports import gen_task_name, instantiate, symbol_by_name
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
 from celery.utils.objects import FallbackContext, mro_lookup
 from celery.utils.objects import FallbackContext, mro_lookup
 
 
 from .annotations import prepare as prepare_annotations
 from .annotations import prepare as prepare_annotations
+from . import backends
 from .defaults import find_deprecated_settings
 from .defaults import find_deprecated_settings
 from .registry import TaskRegistry
 from .registry import TaskRegistry
 from .utils import (
 from .utils import (
     AppPickler, Settings,
     AppPickler, Settings,
-    bugreport, _unpickle_app, _unpickle_app_v2, appstr, detect_settings,
+    bugreport, _unpickle_app, _unpickle_app_v2,
+    _old_key_to_new, _new_key_to_old,
+    appstr, detect_settings,
 )
 )
 
 
 # Load all builtin tasks
 # Load all builtin tasks
@@ -51,53 +54,93 @@ __all__ = ['Celery']
 
 
 logger = get_logger(__name__)
 logger = get_logger(__name__)
 
 
-USING_EXECV = os.environ.get('FORKED_BY_MULTIPROCESSING')
 BUILTIN_FIXUPS = {
 BUILTIN_FIXUPS = {
     'celery.fixups.django:fixup',
     'celery.fixups.django:fixup',
 }
 }
+USING_EXECV = os.environ.get('FORKED_BY_MULTIPROCESSING')
 
 
-ERR_ENVVAR_NOT_SET = """\
+ERR_ENVVAR_NOT_SET = """
 The environment variable {0!r} is not set,
 The environment variable {0!r} is not set,
 and as such the configuration could not be loaded.
 and as such the configuration could not be loaded.
-Please set this variable and make it point to
-a configuration module."""
+
+Please set this variable and make sure it points to
+a valid configuration module.
+
+Example:
+    {0}="proj.celeryconfig"
+"""
 
 
 
 
 def app_has_custom(app, attr):
 def app_has_custom(app, attr):
+    """Return true if app has customized method `attr`.
+
+    Note:
+        This is used for optimizations in cases where we know
+        how the default behavior works, but need to account
+        for someone using inheritance to override a method/property.
+    """
     return mro_lookup(app.__class__, attr, stop={Celery, object},
     return mro_lookup(app.__class__, attr, stop={Celery, object},
                       monkey_patched=[__name__])
                       monkey_patched=[__name__])
 
 
 
 
 def _unpickle_appattr(reverse_name, args):
 def _unpickle_appattr(reverse_name, args):
-    """Given an attribute name and a list of args, gets
-    the attribute from the current app and calls it."""
+    """Unpickle app."""
+    # Given an attribute name and a list of args, gets
+    # the attribute from the current app and calls it.
     return get_current_app()._rgetattr(reverse_name)(*args)
     return get_current_app()._rgetattr(reverse_name)(*args)
 
 
 
 
 def _after_fork_cleanup_app(app):
 def _after_fork_cleanup_app(app):
+    # This is used with multiprocessing.register_after_fork,
+    # so need to be at module level.
     try:
     try:
         app._after_fork()
         app._after_fork()
-    except Exception as exc:
+    except Exception as exc:  # pylint: disable=broad-except
         logger.info('after forker raised exception: %r', exc, exc_info=1)
         logger.info('after forker raised exception: %r', exc, exc_info=1)
 
 
 
 
 class PendingConfiguration(UserDict, AttributeDictMixin):
 class PendingConfiguration(UserDict, AttributeDictMixin):
     # `app.conf` will be of this type before being explicitly configured,
     # `app.conf` will be of this type before being explicitly configured,
-    # which means the app can keep any configuration set directly
+    # meaning the app can keep any configuration set directly
     # on `app.conf` before the `app.config_from_object` call.
     # on `app.conf` before the `app.config_from_object` call.
     #
     #
     # accessing any key will finalize the configuration,
     # accessing any key will finalize the configuration,
     # replacing `app.conf` with a concrete settings object.
     # replacing `app.conf` with a concrete settings object.
 
 
     callback = None
     callback = None
-    data = None
+    _data = None
 
 
     def __init__(self, conf, callback):
     def __init__(self, conf, callback):
-        object.__setattr__(self, 'data', conf)
+        object.__setattr__(self, '_data', conf)
         object.__setattr__(self, 'callback', callback)
         object.__setattr__(self, 'callback', callback)
 
 
-    def __getitem__(self, key):
-        return self.callback(key)
+    def __setitem__(self, key, value):
+        self._data[key] = value
+
+    def clear(self):
+        self._data.clear()
+
+    def update(self, *args, **kwargs):
+        self._data.update(*args, **kwargs)
+
+    def setdefault(self, *args, **kwargs):
+        return self._data.setdefault(*args, **kwargs)
+
+    def __contains__(self, key):
+        # XXX will not show finalized configuration
+        # setdefault will cause `key in d` to happen,
+        # so for setdefault to be lazy, so does contains.
+        return key in self._data
+
+    def __len__(self):
+        return len(self.data)
+
+    def __repr__(self):
+        return repr(self.data)
+
+    @cached_property
+    def data(self):
+        return self.callback()
 
 
 
 
 @abstract.AbstractApp.register
 @abstract.AbstractApp.register
@@ -108,28 +151,31 @@ class Celery:
         main (str): Name of the main module if running as `__main__`.
         main (str): Name of the main module if running as `__main__`.
             This is used as the prefix for auto-generated task names.
             This is used as the prefix for auto-generated task names.
 
 
+    Keyword Arguments:
         broker (str): URL of the default broker used.
         broker (str): URL of the default broker used.
-        loader (str, type): The loader class, or the name of the loader
-            class to use.  Default is :class:`celery.loaders.app.AppLoader`.
-        backend (str, type): The result store backend class, or the name of the
-            backend class to use. Default is the value of the
-            :setting:`result_backend` setting.
-        amqp (str, type): AMQP object or class name.
-        events (str, type): Events object or class name.
-        log (str, type): Log object or class name.
-        control (str, type): Control object or class name.
-        set_as_current (bool):  Make this the global current app.
-        tasks (str, type): A task registry or the name of a registry class.
-        include (List[str]): List of modules every worker should import.
-        fixups (List[str]): List of fix-up plug-ins (see e.g.
-            :mod:`celery.fixups.django`).
+        backend (Union[str, type]): The result store backend class,
+            or the name of the backend class to use.
+
+            Default is the value of the :setting:`result_backend` setting.
         autofinalize (bool): If set to False a :exc:`RuntimeError`
         autofinalize (bool): If set to False a :exc:`RuntimeError`
             will be raised if the task registry or tasks are used before
             will be raised if the task registry or tasks are used before
             the app is finalized.
             the app is finalized.
-        config_source (str, type): receives a class with class level attributes
-            that allows configurating Celery from a single object.
-            All attributes described in the documentation can be defined.
+        set_as_current (bool):  Make this the global current app.
+        include (List[str]): List of modules every worker should import.
+
+        amqp (Union[str, type]): AMQP object or class name.
+        events (Union[str, type]): Events object or class name.
+        log (Union[str, type]): Log object or class name.
+        control (Union[str, type]): Control object or class name.
+        tasks (Union[str, type]): A task registry, or the name of
+            a registry class.
+        fixups (List[str]): List of fix-up plug-ins (e.g., see
+            :mod:`celery.fixups.django`).
+        config_source (Union[str, type]): Take configuration from a class,
+            or object.  Attributes may include any setings described in
+            the documentation.
     """
     """
+
     #: This is deprecated, use :meth:`reduce_keys` instead
     #: This is deprecated, use :meth:`reduce_keys` instead
     Pickler = AppPickler
     Pickler = AppPickler
 
 
@@ -154,8 +200,8 @@ class Celery:
 
 
     amqp_cls = 'celery.app.amqp:AMQP'
     amqp_cls = 'celery.app.amqp:AMQP'
     backend_cls = None
     backend_cls = None
-    events_cls = 'celery.events:Events'
-    loader_cls = 'celery.loaders.app:AppLoader'
+    events_cls = 'celery.app.events:Events'
+    loader_cls = None
     log_cls = 'celery.app.log:Logging'
     log_cls = 'celery.app.log:Logging'
     control_cls = 'celery.app.control:Control'
     control_cls = 'celery.app.control:Control'
     task_cls = 'celery.app.task:Task'
     task_cls = 'celery.app.task:Task'
@@ -187,7 +233,7 @@ class Celery:
         self.main = main
         self.main = main
         self.amqp_cls = amqp or self.amqp_cls
         self.amqp_cls = amqp or self.amqp_cls
         self.events_cls = events or self.events_cls
         self.events_cls = events or self.events_cls
-        self.loader_cls = loader or self.loader_cls
+        self.loader_cls = loader or self._get_default_loader()
         self.log_cls = log or self.log_cls
         self.log_cls = log or self.log_cls
         self.control_cls = control or self.control_cls
         self.control_cls = control or self.control_cls
         self.task_cls = task_cls or self.task_cls
         self.task_cls = task_cls or self.task_cls
@@ -211,7 +257,7 @@ class Celery:
             self._tasks = TaskRegistry(self._tasks or {})
             self._tasks = TaskRegistry(self._tasks or {})
 
 
         # If the class defines a custom __reduce_args__ we need to use
         # If the class defines a custom __reduce_args__ we need to use
-        # the old way of pickling apps, which is pickling a list of
+        # the old way of pickling apps: pickling a list of
         # args instead of the new way that pickles a dict of keywords.
         # args instead of the new way that pickles a dict of keywords.
         self._using_v1_reduce = app_has_custom(self, '__reduce_args__')
         self._using_v1_reduce = app_has_custom(self, '__reduce_args__')
 
 
@@ -224,8 +270,9 @@ class Celery:
         self.__autoset('include', include)
         self.__autoset('include', include)
         self._conf = Settings(
         self._conf = Settings(
             PendingConfiguration(
             PendingConfiguration(
-                self._preconf, self._get_from_conf_and_finalize),
+                self._preconf, self._finalize_pending_conf),
             prefix=self.namespace,
             prefix=self.namespace,
+            keys=(_old_key_to_new, _new_key_to_old),
         )
         )
 
 
         # - Apply fix-ups.
         # - Apply fix-ups.
@@ -247,6 +294,14 @@ class Celery:
         self.on_init()
         self.on_init()
         _register_app(self)
         _register_app(self)
 
 
+    def _get_default_loader(self):
+        # the --loader command-line argument sets the environment variable.
+        return (
+            os.environ.get('CELERY_LOADER') or
+            self.loader_cls or
+            'celery.loaders.app:AppLoader'
+        )
+
     def on_init(self):
     def on_init(self):
         """Optional callback called at init."""
         """Optional callback called at init."""
         pass
         pass
@@ -257,11 +312,11 @@ class Celery:
             self._preconf_set_by_auto.add(key)
             self._preconf_set_by_auto.add(key)
 
 
     def set_current(self):
     def set_current(self):
-        """Makes this the current app for this thread."""
+        """Make this the current app for this thread."""
         _set_current_app(self)
         _set_current_app(self)
 
 
     def set_default(self):
     def set_default(self):
-        """Makes this the default app for all threads."""
+        """Make this the default app for all threads."""
         set_default_app(self)
         set_default_app(self)
 
 
     def _ensure_after_fork(self):
     def _ensure_after_fork(self):
@@ -270,17 +325,11 @@ class Celery:
             if register_after_fork is not None:
             if register_after_fork is not None:
                 register_after_fork(self, _after_fork_cleanup_app)
                 register_after_fork(self, _after_fork_cleanup_app)
 
 
-    def __enter__(self):
-        return self
-
-    def __exit__(self, *exc_info):
-        self.close()
-
     def close(self):
     def close(self):
         """Clean up after the application.
         """Clean up after the application.
 
 
-        Only necessary for dynamically created apps for which you can
-        use the :keyword:`with` statement instead
+        Only necessary for dynamically created apps, and you should
+        probably use the :keyword:`with` statement instead.
 
 
         Example:
         Example:
             >>> with Celery(set_as_current=False) as app:
             >>> with Celery(set_as_current=False) as app:
@@ -331,7 +380,7 @@ class Celery:
             a proxy object, so that the act of creating the task is not
             a proxy object, so that the act of creating the task is not
             performed until the task is used or the task registry is accessed.
             performed until the task is used or the task registry is accessed.
 
 
-            If you are depending on binding to be deferred, then you must
+            If you're depending on binding to be deferred, then you must
             not access any attributes on the returned object until the
             not access any attributes on the returned object until the
             application is fully set up (finalized).
             application is fully set up (finalized).
         """
         """
@@ -423,8 +472,11 @@ class Celery:
         return gen_task_name(self, name, module)
         return gen_task_name(self, name, module)
 
 
     def finalize(self, auto=False):
     def finalize(self, auto=False):
-        """Finalizes the app by loading built-in tasks,
-        and evaluating pending task decorators."""
+        """Finalize the app.
+
+        This loads built-in tasks, evaluates pending task decorators,
+        reads configuration, etc.
+        """
         with self._finalize_mutex:
         with self._finalize_mutex:
             if not self.finalized:
             if not self.finalized:
                 if auto and not self.autofinalize:
                 if auto and not self.autofinalize:
@@ -467,8 +519,9 @@ class Celery:
 
 
     def config_from_object(self, obj,
     def config_from_object(self, obj,
                            silent=False, force=False, namespace=None):
                            silent=False, force=False, namespace=None):
-        """Reads configuration from object, where object is either
-        an object or the name of a module to import.
+        """Read configuration from object.
+
+        Object is either an actual object or the name of a module to import.
 
 
         Example:
         Example:
             >>> celery.config_from_object('myapp.celeryconfig')
             >>> celery.config_from_object('myapp.celeryconfig')
@@ -503,7 +556,7 @@ class Celery:
             if silent:
             if silent:
                 return False
                 return False
             raise ImproperlyConfigured(
             raise ImproperlyConfigured(
-                ERR_ENVVAR_NOT_SET.format(variable_name))
+                ERR_ENVVAR_NOT_SET.strip().format(variable_name))
         return self.config_from_object(module_name, silent=silent, force=force)
         return self.config_from_object(module_name, silent=silent, force=force)
 
 
     def config_from_cmdline(self, argv, namespace='celery'):
     def config_from_cmdline(self, argv, namespace='celery'):
@@ -533,7 +586,7 @@ class Celery:
             digest (str): Digest algorithm used when signing messages.
             digest (str): Digest algorithm used when signing messages.
                 Default is ``sha1``.
                 Default is ``sha1``.
             serializer (str): Serializer used to encode messages after
             serializer (str): Serializer used to encode messages after
-                they have been signed.  See :setting:`task_serializer` for
+                they've been signed.  See :setting:`task_serializer` for
                 the serializers supported.  Default is ``json``.
                 the serializers supported.  Default is ``json``.
         """
         """
         from celery.security import setup_security
         from celery.security import setup_security
@@ -542,12 +595,14 @@ class Celery:
 
 
     def autodiscover_tasks(self, packages=None,
     def autodiscover_tasks(self, packages=None,
                            related_name='tasks', force=False):
                            related_name='tasks', force=False):
-        """Try to auto-discover and import modules with a specific name (by
-        default 'tasks').
+        """Auto-discover task modules.
 
 
-        If the name is empty, this will be delegated to fix-ups (e.g. Django).
+        Searches a list of packages for a "tasks.py" module (or use
+        related_name argument).
 
 
-        For example if you have an directory layout like this:
+        If the name is empty, this will be delegated to fix-ups (e.g., Django).
+
+        For example if you have a directory layout like this:
 
 
         .. code-block:: text
         .. code-block:: text
 
 
@@ -570,10 +625,10 @@ class Celery:
                 This argument may also be a callable, in which case the
                 This argument may also be a callable, in which case the
                 value returned is used (for lazy evaluation).
                 value returned is used (for lazy evaluation).
             related_name (str): The name of the module to find.  Defaults
             related_name (str): The name of the module to find.  Defaults
-                to "tasks", which means it look for "module.tasks" for every
-                module in ``packages``.
+                to "tasks": meaning "look for 'module.tasks' for every
+                module in ``packages``."
             force (bool): By default this call is lazy so that the actual
             force (bool): By default this call is lazy so that the actual
-                auto-discovery will not happen until an application imports
+                auto-discovery won't happen until an application imports
                 the default modules.  Forcing will cause the auto-discovery
                 the default modules.  Forcing will cause the auto-discovery
                 to happen immediately.
                 to happen immediately.
         """
         """
@@ -614,7 +669,7 @@ class Celery:
         Supports the same arguments as :meth:`@-Task.apply_async`.
         Supports the same arguments as :meth:`@-Task.apply_async`.
 
 
         Arguments:
         Arguments:
-            name (str): Name of task to call (e.g. `"tasks.add"`).
+            name (str): Name of task to call (e.g., `"tasks.add"`).
             result_cls (~@AsyncResult): Specify custom result class.
             result_cls (~@AsyncResult): Specify custom result class.
         """
         """
         parent = have_parent = None
         parent = have_parent = None
@@ -651,8 +706,9 @@ class Celery:
         if connection:
         if connection:
             producer = amqp.Producer(connection)
             producer = amqp.Producer(connection)
         with self.producer_or_acquire(producer) as P:
         with self.producer_or_acquire(producer) as P:
-            self.backend.on_task_call(P, task_id)
-            amqp.send_task_message(P, name, message, **options)
+            with P.connection._reraise_as_library_errors():
+                self.backend.on_task_call(P, task_id)
+                amqp.send_task_message(P, name, message, **options)
         result = (result_cls or self.AsyncResult)(task_id)
         result = (result_cls or self.AsyncResult)(task_id)
         if add_to_parent:
         if add_to_parent:
             if not have_parent:
             if not have_parent:
@@ -755,7 +811,9 @@ class Celery:
         return self.connection_for_write()
         return self.connection_for_write()
 
 
     def connection_or_acquire(self, connection=None, pool=True, *_, **__):
     def connection_or_acquire(self, connection=None, pool=True, *_, **__):
-        """For use within a :keyword:`with` statement to get a connection
+        """Context used to acquire a connection from the pool.
+
+        For use within a :keyword:`with` statement to get a connection
         from the pool if one is not already provided.
         from the pool if one is not already provided.
 
 
         Arguments:
         Arguments:
@@ -765,7 +823,9 @@ class Celery:
         return FallbackContext(connection, self._acquire_connection, pool=pool)
         return FallbackContext(connection, self._acquire_connection, pool=pool)
 
 
     def producer_or_acquire(self, producer=None):
     def producer_or_acquire(self, producer=None):
-        """For use within a :keyword:`with` statement to get a producer
+        """Context used to acquire a producer from the pool.
+
+        For use within a :keyword:`with` statement to get a producer
         from the pool if one is not already provided
         from the pool if one is not already provided
 
 
         Arguments:
         Arguments:
@@ -781,34 +841,47 @@ class Celery:
         return find_deprecated_settings(c)
         return find_deprecated_settings(c)
 
 
     def now(self):
     def now(self):
-        """Return the current time and date as a
-        :class:`~datetime.datetime` object."""
+        """Return the current time and date as a datetime."""
         return self.loader.now(utc=self.conf.enable_utc)
         return self.loader.now(utc=self.conf.enable_utc)
 
 
     def select_queues(self, queues=None):
     def select_queues(self, queues=None):
-        """Select a subset of queues, where queues must be a list of queue
-        names to keep."""
+        """Select subset of queues.
+
+        Arguments:
+            queues (Sequence[str]): a list of queue names to keep.
+        """
         return self.amqp.queues.select(queues)
         return self.amqp.queues.select(queues)
 
 
-    def either(self, default_key, *values):
-        """Fallback to the value of a configuration key if none of the
-        `*values` are true."""
+    def either(self, default_key, *defaults):
+        """Get key from configuration or use default values.
+
+        Fallback to the value of a configuration key if none of the
+        `*values` are true.
+        """
         return first(None, [
         return first(None, [
-            first(None, values), starpromise(self.conf.get, default_key),
+            first(None, defaults), starpromise(self.conf.get, default_key),
         ])
         ])
 
 
     def bugreport(self):
     def bugreport(self):
-        """Return a string with information useful for the Celery core
-        developers when reporting a bug."""
+        """Return information useful in bug reports."""
         return bugreport(self)
         return bugreport(self)
 
 
     def _get_backend(self):
     def _get_backend(self):
-        from celery.backends import get_backend_by_url
-        backend, url = get_backend_by_url(
+        backend, url = backends.by_url(
             self.backend_cls or self.conf.result_backend,
             self.backend_cls or self.conf.result_backend,
             self.loader)
             self.loader)
         return backend(app=self, url=url)
         return backend(app=self, url=url)
 
 
+    def _finalize_pending_conf(self):
+        """Get config value by key and finalize loading the configuration.
+
+        Note:
+            This is used by PendingConfiguration:
+                as soon as you access a key the configuration is read.
+        """
+        conf = self._conf = self._load_config()
+        return conf
+
     def _load_config(self):
     def _load_config(self):
         if isinstance(self.on_configure, Signal):
         if isinstance(self.on_configure, Signal):
             self.on_configure.send(sender=self)
             self.on_configure.send(sender=self)
@@ -854,7 +927,7 @@ class Celery:
     def signature(self, *args, **kwargs):
     def signature(self, *args, **kwargs):
         """Return a new :class:`~celery.Signature` bound to this app."""
         """Return a new :class:`~celery.Signature` bound to this app."""
         kwargs['app'] = self
         kwargs['app'] = self
-        return self.canvas.signature(*args, **kwargs)
+        return self._canvas.signature(*args, **kwargs)
 
 
     def add_periodic_task(self, schedule, sig,
     def add_periodic_task(self, schedule, sig,
                           args=(), kwargs=(), name=None, **opts):
                           args=(), kwargs=(), name=None, **opts):
@@ -883,8 +956,7 @@ class Celery:
         self._conf.beat_schedule[key] = entry
         self._conf.beat_schedule[key] = entry
 
 
     def create_task_cls(self):
     def create_task_cls(self):
-        """Creates a base task class using default configuration
-        taken from this app."""
+        """Create a base task class bound to this app."""
         return self.subclass_with_self(
         return self.subclass_with_self(
             self.task_cls, name='Task', attribute='_app',
             self.task_cls, name='Task', attribute='_app',
             keep_reduce=True, abstract=True,
             keep_reduce=True, abstract=True,
@@ -892,11 +964,10 @@ class Celery:
 
 
     def subclass_with_self(self, Class, name=None, attribute='app',
     def subclass_with_self(self, Class, name=None, attribute='app',
                            reverse=None, keep_reduce=False, **kw):
                            reverse=None, keep_reduce=False, **kw):
-        """Subclass an app-compatible class by setting its app attribute
-        to be this app instance.
+        """Subclass an app-compatible class.
 
 
         App-compatible means that the class has a class attribute that
         App-compatible means that the class has a class attribute that
-        provides the default app it should use, e.g.
+        provides the default app it should use, for example:
         ``class Foo: app = None``.
         ``class Foo: app = None``.
 
 
         Arguments:
         Arguments:
@@ -905,9 +976,10 @@ class Celery:
             attribute (str): Name of the attribute holding the app,
             attribute (str): Name of the attribute holding the app,
                 Default is 'app'.
                 Default is 'app'.
             reverse (str): Reverse path to this object used for pickling
             reverse (str): Reverse path to this object used for pickling
-                purposes.  E.g. for ``app.AsyncResult`` use ``"AsyncResult"``.
+                purposes. For example, to get ``app.AsyncResult``,
+                use ``"AsyncResult"``.
             keep_reduce (bool): If enabled a custom ``__reduce__``
             keep_reduce (bool): If enabled a custom ``__reduce__``
-                implementation will not be provided.
+                implementation won't be provided.
         """
         """
         Class = symbol_by_name(Class)
         Class = symbol_by_name(Class)
         reverse = reverse if reverse else Class.__name__
         reverse = reverse if reverse else Class.__name__
@@ -928,6 +1000,12 @@ class Celery:
     def _rgetattr(self, path):
     def _rgetattr(self, path):
         return attrgetter(path)(self)
         return attrgetter(path)(self)
 
 
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *exc_info):
+        self.close()
+
     def __repr__(self):
     def __repr__(self):
         return '<{0} {1}>'.format(type(self).__name__, appstr(self))
         return '<{0} {1}>'.format(type(self).__name__, appstr(self))
 
 
@@ -946,8 +1024,7 @@ class Celery:
         )
         )
 
 
     def __reduce_keys__(self):
     def __reduce_keys__(self):
-        """Return keyword arguments used to reconstruct the object
-        when unpickling."""
+        """Keyword arguments used to reconstruct the object when unpickling."""
         return {
         return {
             'main': self.main,
             'main': self.main,
             'changes':
             'changes':
@@ -1045,8 +1122,7 @@ class Celery:
 
 
     @property
     @property
     def current_task(self):
     def current_task(self):
-        """The instance of the task that is being executed, or
-        :const:`None`."""
+        """Instance of task being executed, or :const:`None`."""
         return _task_stack.top
         return _task_stack.top
 
 
     @property
     @property
@@ -1061,7 +1137,11 @@ class Celery:
     @cached_property
     @cached_property
     def oid(self):
     def oid(self):
         """Universally unique identifier for this app."""
         """Universally unique identifier for this app."""
-        return oid_from(self)
+        # since 4.0: thread.get_ident() is not included when
+        # generating the process id.  This is due to how the RPC
+        # backend now dedicates a single thread to receive results,
+        # which would not work if each thread has a separate id.
+        return oid_from(self, threads=False)
 
 
     @cached_property
     @cached_property
     def amqp(self):
     def amqp(self):
@@ -1080,10 +1160,6 @@ class Celery:
             self._conf = self._load_config()
             self._conf = self._load_config()
         return self._conf
         return self._conf
 
 
-    def _get_from_conf_and_finalize(self, key):
-        conf = self._conf = self._load_config()
-        return conf[key]
-
     @conf.setter
     @conf.setter
     def conf(self, d):  # noqa
     def conf(self, d):  # noqa
         self._conf = d
         self._conf = d
@@ -1109,7 +1185,7 @@ class Celery:
         return instantiate(self.log_cls, app=self)
         return instantiate(self.log_cls, app=self)
 
 
     @cached_property
     @cached_property
-    def canvas(self):
+    def _canvas(self):
         from celery import canvas
         from celery import canvas
         return canvas
         return canvas
 
 

+ 12 - 16
celery/app/builtins.py

@@ -13,8 +13,7 @@ logger = get_logger(__name__)
 
 
 @connect_on_app_finalize
 @connect_on_app_finalize
 def add_backend_cleanup_task(app):
 def add_backend_cleanup_task(app):
-    """The backend cleanup task can be used to clean up the default result
-    backend.
+    """Task used to clean up expired results.
 
 
     If the configured backend requires periodic cleanup this task is also
     If the configured backend requires periodic cleanup this task is also
     automatically configured to run every day at 4am (requires
     automatically configured to run every day at 4am (requires
@@ -28,19 +27,20 @@ def add_backend_cleanup_task(app):
 
 
 @connect_on_app_finalize
 @connect_on_app_finalize
 def add_accumulate_task(app):
 def add_accumulate_task(app):
-    """This task is used by Task.replace when replacing a task with
-    a group, to "collect" results."""
+    """Task used by Task.replace when replacing task with group."""
     @app.task(bind=True, name='celery.accumulate', shared=False, lazy=False)
     @app.task(bind=True, name='celery.accumulate', shared=False, lazy=False)
     def accumulate(self, *args, **kwargs):
     def accumulate(self, *args, **kwargs):
         index = kwargs.get('index')
         index = kwargs.get('index')
         return args[index] if index is not None else args
         return args[index] if index is not None else args
+    return accumulate
 
 
 
 
 @connect_on_app_finalize
 @connect_on_app_finalize
 def add_unlock_chord_task(app):
 def add_unlock_chord_task(app):
-    """This task is used by result backends without native chord support.
+    """Task used by result backends without native chord support.
 
 
-    It joins chords by creating a task chain polling the header for completion.
+    Will joins chord by creating a task chain polling the header
+    for completion.
     """
     """
     from celery.canvas import maybe_signature
     from celery.canvas import maybe_signature
     from celery.exceptions import ChordError
     from celery.exceptions import ChordError
@@ -78,22 +78,19 @@ def add_unlock_chord_task(app):
         try:
         try:
             with allow_join_result():
             with allow_join_result():
                 ret = j(timeout=3.0, propagate=True)
                 ret = j(timeout=3.0, propagate=True)
-        except Exception as exc:
+        except Exception as exc:  # pylint: disable=broad-except
             try:
             try:
                 culprit = next(deps._failed_join_report())
                 culprit = next(deps._failed_join_report())
-                reason = 'Dependency {0.id} raised {1!r}'.format(
-                    culprit, exc,
-                )
+                reason = 'Dependency {0.id} raised {1!r}'.format(culprit, exc)
             except StopIteration:
             except StopIteration:
                 reason = repr(exc)
                 reason = repr(exc)
-            logger.error('Chord %r raised: %r', group_id, exc, exc_info=1)
-            app.backend.chord_error_from_stack(callback,
-                                               ChordError(reason))
+            logger.exception('Chord %r raised: %r', group_id, exc)
+            app.backend.chord_error_from_stack(callback, ChordError(reason))
         else:
         else:
             try:
             try:
                 callback.delay(ret)
                 callback.delay(ret)
-            except Exception as exc:
-                logger.error('Chord %r raised: %r', group_id, exc, exc_info=1)
+            except Exception as exc:  # pylint: disable=broad-except
+                logger.exception('Chord %r raised: %r', group_id, exc)
                 app.backend.chord_error_from_stack(
                 app.backend.chord_error_from_stack(
                     callback,
                     callback,
                     exc=ChordError('Callback error: {0!r}'.format(exc)),
                     exc=ChordError('Callback error: {0!r}'.format(exc)),
@@ -159,7 +156,6 @@ def add_group_task(app):
 @connect_on_app_finalize
 @connect_on_app_finalize
 def add_chain_task(app):
 def add_chain_task(app):
     """No longer used, but here for backwards compatibility."""
     """No longer used, but here for backwards compatibility."""
-
     @app.task(name='celery.chain', shared=False, lazy=False)
     @app.task(name='celery.chain', shared=False, lazy=False)
     def chain(*args, **kwargs):
     def chain(*args, **kwargs):
         raise NotImplementedError('chain is not a real task')
         raise NotImplementedError('chain is not a real task')

+ 160 - 47
celery/app/control.py

@@ -25,6 +25,18 @@ the celery worker `-n` option.\
 
 
 
 
 def flatten_reply(reply):
 def flatten_reply(reply):
+    """Flatten node replies.
+
+    Convert from a list of replies in this format::
+
+        [{'a@example.com': reply},
+         {'b@example.com': reply}]
+
+    into this format::
+
+        {'a@example.com': reply,
+         'b@example.com': reply}
+    """
     nodes, dupes = {}, set()
     nodes, dupes = {}, set()
     for item in reply:
     for item in reply:
         [dupes.add(name) for name in item if name in nodes]
         [dupes.add(name) for name in item if name in nodes]
@@ -39,9 +51,11 @@ def flatten_reply(reply):
 
 
 
 
 class Inspect:
 class Inspect:
+    """API for app.control.inspect."""
+
     app = None
     app = None
 
 
-    def __init__(self, destination=None, timeout=1, callback=None,
+    def __init__(self, destination=None, timeout=1.0, callback=None,
                  connection=None, app=None, limit=None):
                  connection=None, app=None, limit=None):
         self.app = app or self.app
         self.app = app or self.app
         self.destination = destination
         self.destination = destination
@@ -75,36 +89,43 @@ class Inspect:
     def clock(self):
     def clock(self):
         return self._request('clock')
         return self._request('clock')
 
 
-    def active(self, safe=False):
-        return self._request('dump_active', safe=safe)
+    def active(self, safe=None):
+        # safe is ignored since 4.0
+        # as no objects will need serialization now that we
+        # have argsrepr/kwargsrepr.
+        return self._request('active')
 
 
-    def scheduled(self, safe=False):
-        return self._request('dump_schedule', safe=safe)
+    def scheduled(self, safe=None):
+        return self._request('scheduled')
 
 
-    def reserved(self, safe=False):
-        return self._request('dump_reserved', safe=safe)
+    def reserved(self, safe=None):
+        return self._request('reserved')
 
 
     def stats(self):
     def stats(self):
         return self._request('stats')
         return self._request('stats')
 
 
     def revoked(self):
     def revoked(self):
-        return self._request('dump_revoked')
+        return self._request('revoked')
 
 
     def registered(self, *taskinfoitems):
     def registered(self, *taskinfoitems):
-        return self._request('dump_tasks', taskinfoitems=taskinfoitems)
+        return self._request('registered', taskinfoitems=taskinfoitems)
     registered_tasks = registered
     registered_tasks = registered
 
 
-    def ping(self):
+    def ping(self, destination=None):
         return self._request('ping')
         return self._request('ping')
 
 
     def active_queues(self):
     def active_queues(self):
         return self._request('active_queues')
         return self._request('active_queues')
 
 
-    def query_task(self, ids):
+    def query_task(self, *ids):
+        # signature used be unary: query_task(ids=[id1, id2])
+        # we need this to preserve backward compatibility.
+        if len(ids) == 1 and isinstance(ids[0], (list, tuple)):
+            ids = ids[0]
         return self._request('query_task', ids=ids)
         return self._request('query_task', ids=ids)
 
 
     def conf(self, with_defaults=False):
     def conf(self, with_defaults=False):
-        return self._request('dump_conf', with_defaults=with_defaults)
+        return self._request('conf', with_defaults=with_defaults)
 
 
     def hello(self, from_node, revoked=None):
     def hello(self, from_node, revoked=None):
         return self._request('hello', from_node=from_node, revoked=revoked)
         return self._request('hello', from_node=from_node, revoked=revoked)
@@ -120,6 +141,8 @@ class Inspect:
 
 
 
 
 class Control:
 class Control:
+    """Worker remote control client."""
+
     Mailbox = Mailbox
     Mailbox = Mailbox
 
 
     def __init__(self, app=None):
     def __init__(self, app=None):
@@ -129,6 +152,10 @@ class Control:
             type='fanout',
             type='fanout',
             accept=['json'],
             accept=['json'],
             producer_pool=lazy(lambda: self.app.amqp.producer_pool),
             producer_pool=lazy(lambda: self.app.amqp.producer_pool),
+            queue_ttl=app.conf.control_queue_ttl,
+            reply_queue_ttl=app.conf.control_queue_ttl,
+            queue_expires=app.conf.control_queue_expires,
+            reply_queue_expires=app.conf.control_queue_expires,
         )
         )
 
 
     @cached_property
     @cached_property
@@ -154,9 +181,12 @@ class Control:
     discard_all = purge
     discard_all = purge
 
 
     def election(self, id, topic, action=None, connection=None):
     def election(self, id, topic, action=None, connection=None):
-        self.broadcast('election', connection=connection, arguments={
-            'id': id, 'topic': topic, 'action': action,
-        })
+        self.broadcast(
+            'election', connection=connection, destination=None,
+            arguments={
+                'id': id, 'topic': topic, 'action': action,
+            },
+        )
 
 
     def revoke(self, task_id, destination=None, terminate=False,
     def revoke(self, task_id, destination=None, terminate=False,
                signal=TERM_SIGNAME, **kwargs):
                signal=TERM_SIGNAME, **kwargs):
@@ -175,12 +205,25 @@ class Control:
         See Also:
         See Also:
             :meth:`broadcast` for supported keyword arguments.
             :meth:`broadcast` for supported keyword arguments.
         """
         """
-        return self.broadcast('revoke', destination=destination,
-                              arguments={'task_id': task_id,
-                                         'terminate': terminate,
-                                         'signal': signal}, **kwargs)
+        return self.broadcast('revoke', destination=destination, arguments={
+            'task_id': task_id,
+            'terminate': terminate,
+            'signal': signal,
+        }, **kwargs)
+
+    def terminate(self, task_id,
+                  destination=None, signal=TERM_SIGNAME, **kwargs):
+        """Tell all (or specific) workers to terminate a task by id.
+
+        See Also:
+            This is just a shortcut to :meth:`revoke` with the terminate
+            argument enabled.
+        """
+        return self.revoke(
+            task_id,
+            destination=destination, terminate=True, signal=signal, **kwargs)
 
 
-    def ping(self, destination=None, timeout=1, **kwargs):
+    def ping(self, destination=None, timeout=1.0, **kwargs):
         """Ping all (or specific) workers.
         """Ping all (or specific) workers.
 
 
         Returns:
         Returns:
@@ -189,12 +232,12 @@ class Control:
         See Also:
         See Also:
             :meth:`broadcast` for supported keyword arguments.
             :meth:`broadcast` for supported keyword arguments.
         """
         """
-        return self.broadcast('ping', reply=True, destination=destination,
-                              timeout=timeout, **kwargs)
+        return self.broadcast(
+            'ping', reply=True, arguments={}, destination=destination,
+            timeout=timeout, **kwargs)
 
 
     def rate_limit(self, task_name, rate_limit, destination=None, **kwargs):
     def rate_limit(self, task_name, rate_limit, destination=None, **kwargs):
-        """Tell all (or specific) workers to set a new rate limit
-        for task by type.
+        """Tell workers to set a new rate limit for task by type.
 
 
         Arguments:
         Arguments:
             task_name (str): Name of task to change rate limit for.
             task_name (str): Name of task to change rate limit for.
@@ -206,13 +249,18 @@ class Control:
         See Also:
         See Also:
             :meth:`broadcast` for supported keyword arguments.
             :meth:`broadcast` for supported keyword arguments.
         """
         """
-        return self.broadcast('rate_limit', destination=destination,
-                              arguments={'task_name': task_name,
-                                         'rate_limit': rate_limit},
-                              **kwargs)
-
-    def add_consumer(self, queue, exchange=None, exchange_type='direct',
-                     routing_key=None, options=None, **kwargs):
+        return self.broadcast(
+            'rate_limit',
+            destination=destination,
+            arguments={
+                'task_name': task_name,
+                'rate_limit': rate_limit,
+            },
+            **kwargs)
+
+    def add_consumer(self, queue,
+                     exchange=None, exchange_type='direct', routing_key=None,
+                     options=None, destination=None, **kwargs):
         """Tell all (or specific) workers to start consuming from a new queue.
         """Tell all (or specific) workers to start consuming from a new queue.
 
 
         Only the queue name is required as if only the queue is specified
         Only the queue name is required as if only the queue is specified
@@ -237,25 +285,29 @@ class Control:
         """
         """
         return self.broadcast(
         return self.broadcast(
             'add_consumer',
             'add_consumer',
-            arguments=dict({'queue': queue, 'exchange': exchange,
-                            'exchange_type': exchange_type,
-                            'routing_key': routing_key}, **options or {}),
+            destination=destination,
+            arguments=dict({
+                'queue': queue,
+                'exchange': exchange,
+                'exchange_type': exchange_type,
+                'routing_key': routing_key,
+            }, **options or {}),
             **kwargs
             **kwargs
         )
         )
 
 
-    def cancel_consumer(self, queue, **kwargs):
+    def cancel_consumer(self, queue, destination=None, **kwargs):
         """Tell all (or specific) workers to stop consuming from ``queue``.
         """Tell all (or specific) workers to stop consuming from ``queue``.
 
 
         See Also:
         See Also:
             Supports the same arguments as :meth:`broadcast`.
             Supports the same arguments as :meth:`broadcast`.
         """
         """
         return self.broadcast(
         return self.broadcast(
-            'cancel_consumer', arguments={'queue': queue}, **kwargs
-        )
+            'cancel_consumer', destination=destination,
+            arguments={'queue': queue}, **kwargs)
 
 
-    def time_limit(self, task_name, soft=None, hard=None, **kwargs):
-        """Tell all (or specific) workers to set time limits for
-        a task by type.
+    def time_limit(self, task_name, soft=None, hard=None,
+                   destination=None, **kwargs):
+        """Tell workers to set time limits for a task by type.
 
 
         Arguments:
         Arguments:
             task_name (str): Name of task to change time limits for.
             task_name (str): Name of task to change time limits for.
@@ -265,8 +317,13 @@ class Control:
         """
         """
         return self.broadcast(
         return self.broadcast(
             'time_limit',
             'time_limit',
-            arguments={'task_name': task_name,
-                       'hard': hard, 'soft': soft}, **kwargs)
+            arguments={
+                'task_name': task_name,
+                'hard': hard,
+                'soft': soft,
+            },
+            destination=destination,
+            **kwargs)
 
 
     def enable_events(self, destination=None, **kwargs):
     def enable_events(self, destination=None, **kwargs):
         """Tell all (or specific) workers to enable events.
         """Tell all (or specific) workers to enable events.
@@ -274,7 +331,8 @@ class Control:
         See Also:
         See Also:
             Supports the same arguments as :meth:`broadcast`.
             Supports the same arguments as :meth:`broadcast`.
         """
         """
-        return self.broadcast('enable_events', {}, destination, **kwargs)
+        return self.broadcast(
+            'enable_events', arguments={}, destination=destination, **kwargs)
 
 
     def disable_events(self, destination=None, **kwargs):
     def disable_events(self, destination=None, **kwargs):
         """Tell all (or specific) workers to disable events.
         """Tell all (or specific) workers to disable events.
@@ -282,7 +340,8 @@ class Control:
         See Also:
         See Also:
             Supports the same arguments as :meth:`broadcast`.
             Supports the same arguments as :meth:`broadcast`.
         """
         """
-        return self.broadcast('disable_events', {}, destination, **kwargs)
+        return self.broadcast(
+            'disable_events', arguments={}, destination=destination, **kwargs)
 
 
     def pool_grow(self, n=1, destination=None, **kwargs):
     def pool_grow(self, n=1, destination=None, **kwargs):
         """Tell all (or specific) workers to grow the pool by ``n``.
         """Tell all (or specific) workers to grow the pool by ``n``.
@@ -290,7 +349,8 @@ class Control:
         See Also:
         See Also:
             Supports the same arguments as :meth:`broadcast`.
             Supports the same arguments as :meth:`broadcast`.
         """
         """
-        return self.broadcast('pool_grow', {'n': n}, destination, **kwargs)
+        return self.broadcast(
+            'pool_grow', arguments={'n': n}, destination=destination, **kwargs)
 
 
     def pool_shrink(self, n=1, destination=None, **kwargs):
     def pool_shrink(self, n=1, destination=None, **kwargs):
         """Tell all (or specific) workers to shrink the pool by ``n``.
         """Tell all (or specific) workers to shrink the pool by ``n``.
@@ -298,10 +358,63 @@ class Control:
         See Also:
         See Also:
             Supports the same arguments as :meth:`broadcast`.
             Supports the same arguments as :meth:`broadcast`.
         """
         """
-        return self.broadcast('pool_shrink', {'n': n}, destination, **kwargs)
+        return self.broadcast(
+            'pool_shrink', arguments={'n': n},
+            destination=destination, **kwargs)
+
+    def autoscale(self, max, min, destination=None, **kwargs):
+        """Change worker(s) autoscale setting.
+
+        See Also:
+            Supports the same arguments as :meth:`broadcast`.
+        """
+        return self.broadcast(
+            'autoscale', arguments={'max': max, 'min': min},
+            destination=destination, **kwargs)
+
+    def shutdown(self, destination=None, **kwargs):
+        """Shutdown worker(s).
+
+        See Also:
+            Supports the same arguments as :meth:`broadcast`
+        """
+        return self.broadcast(
+            'shutdown', arguments={}, destination=destination, **kwargs)
+
+    def pool_restart(self, modules=None, reload=False, reloader=None,
+                     destination=None, **kwargs):
+        """Restart the execution pools of all or specific workers.
+
+        Keyword Arguments:
+            modules (Sequence[str]): List of modules to reload.
+            reload (bool): Flag to enable module reloading.  Default is False.
+            reloader (Any): Function to reload a module.
+            destination (Sequence[str]): List of worker names to send this
+                command to.
+
+        See Also:
+            Supports the same arguments as :meth:`broadcast`
+        """
+        return self.broadcast(
+            'pool_restart',
+            arguments={
+                'modules': modules,
+                'reload': reload,
+                'reloader': reloader,
+            },
+            destination=destination, **kwargs)
+
+    def heartbeat(self, destination=None, **kwargs):
+        """Tell worker(s) to send a heartbeat immediately.
+
+        See Also:
+            Supports the same arguments as :meth:`broadcast`
+        """
+        return self.broadcast(
+            'heartbeat', arguments={}, destination=destination, **kwargs)
 
 
     def broadcast(self, command, arguments=None, destination=None,
     def broadcast(self, command, arguments=None, destination=None,
-                  connection=None, reply=False, timeout=1, limit=None,
+                  connection=None, reply=False, timeout=1.0, limit=None,
                   callback=None, channel=None, **extra_kwargs):
                   callback=None, channel=None, **extra_kwargs):
         """Broadcast a control command to the celery workers.
         """Broadcast a control command to the celery workers.
 
 

+ 16 - 9
celery/app/defaults.py

@@ -1,10 +1,8 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
 """Configuration introspection and defaults."""
 """Configuration introspection and defaults."""
 import sys
 import sys
-
 from collections import deque, namedtuple
 from collections import deque, namedtuple
 from datetime import timedelta
 from datetime import timedelta
-
 from celery.utils.functional import memoize
 from celery.utils.functional import memoize
 from celery.utils.serialization import strtobool
 from celery.utils.serialization import strtobool
 
 
@@ -38,9 +36,9 @@ searchresult = namedtuple('searchresult', ('namespace', 'key', 'type'))
 
 
 def Namespace(__old__=None, **options):
 def Namespace(__old__=None, **options):
     if __old__ is not None:
     if __old__ is not None:
-        for opt in options.values():
+        for key, opt in options.items():
             if not opt.old:
             if not opt.old:
-                opt.old = __old__
+                opt.old = {o.format(key) for o in __old__}
     return options
     return options
 
 
 
 
@@ -49,6 +47,8 @@ def old_ns(ns):
 
 
 
 
 class Option:
 class Option:
+    """Decribes a Celery configuration option."""
+
     alt = None
     alt = None
     deprecate_by = None
     deprecate_by = None
     remove_by = None
     remove_by = None
@@ -123,6 +123,10 @@ NAMESPACES = Namespace(
         auth_provider=Option(type='string'),
         auth_provider=Option(type='string'),
         auth_kwargs=Option(type='string'),
         auth_kwargs=Option(type='string'),
     ),
     ),
+    control=Namespace(
+        queue_ttl=Option(300.0, type='float'),
+        queue_expires=Option(10.0, type='float'),
+    ),
     couchbase=Namespace(
     couchbase=Namespace(
         __old__=old_ns('celery_couchbase'),
         __old__=old_ns('celery_couchbase'),
 
 
@@ -149,7 +153,7 @@ NAMESPACES = Namespace(
         max_connections=Option(type='int'),
         max_connections=Option(type='int'),
         password=Option(type='string'),
         password=Option(type='string'),
         port=Option(type='int'),
         port=Option(type='int'),
-        socket_timeout=Option(5.0, type='float'),
+        socket_timeout=Option(120.0, type='float'),
     ),
     ),
     result=Namespace(
     result=Namespace(
         __old__=old_ns('celery_result'),
         __old__=old_ns('celery_result'),
@@ -199,16 +203,16 @@ NAMESPACES = Namespace(
         compression=Option(type='string', old={'celery_message_compression'}),
         compression=Option(type='string', old={'celery_message_compression'}),
         create_missing_queues=Option(True, type='bool'),
         create_missing_queues=Option(True, type='bool'),
         default_delivery_mode=Option(2, type='string'),
         default_delivery_mode=Option(2, type='string'),
-        default_exchange=Option('celery'),
-        default_exchange_type=Option('direct'),
         default_queue=Option('celery'),
         default_queue=Option('celery'),
+        default_exchange=Option(None, type='string'),  # taken from queue
+        default_exchange_type=Option('direct'),
+        default_routing_key=Option(None, type='string'),  # taken from queue
         default_rate_limit=Option(type='string'),
         default_rate_limit=Option(type='string'),
-        default_routing_key=Option('celery'),
         eager_propagates=Option(
         eager_propagates=Option(
             False, type='bool', old={'celery_eager_propagates_exceptions'},
             False, type='bool', old={'celery_eager_propagates_exceptions'},
         ),
         ),
         ignore_result=Option(False, type='bool'),
         ignore_result=Option(False, type='bool'),
-        protocol=Option(1, type='int', old={'celery_task_protocol'}),
+        protocol=Option(2, type='int', old={'celery_task_protocol'}),
         publish_retry=Option(
         publish_retry=Option(
             True, type='bool', old={'celery_task_publish_retry'},
             True, type='bool', old={'celery_task_publish_retry'},
         ),
         ),
@@ -241,6 +245,7 @@ NAMESPACES = Namespace(
     worker=Namespace(
     worker=Namespace(
         __old__=OLD_NS_WORKER,
         __old__=OLD_NS_WORKER,
         agent=Option(None, type='string'),
         agent=Option(None, type='string'),
+        autoscaler=Option('celery.worker.autoscale:Autoscaler'),
         concurrency=Option(0, type='int'),
         concurrency=Option(0, type='int'),
         consumer=Option('celery.worker.consumer:Consumer', type='string'),
         consumer=Option('celery.worker.consumer:Consumer', type='string'),
         direct=Option(False, type='bool', old={'celery_worker_direct'}),
         direct=Option(False, type='bool', old={'celery_worker_direct'}),
@@ -291,6 +296,7 @@ def _to_compat(ns, key, opt):
 
 
 
 
 def flatten(d, root='', keyfilter=_flatten_keys):
 def flatten(d, root='', keyfilter=_flatten_keys):
+    """Flatten settings."""
     stack = deque([(root, d)])
     stack = deque([(root, d)])
     while stack:
     while stack:
         ns, options = stack.popleft()
         ns, options = stack.popleft()
@@ -326,6 +332,7 @@ def find_deprecated_settings(source):  # pragma: no cover
 
 
 @memoize(maxsize=None)
 @memoize(maxsize=None)
 def find(name, namespace='celery'):
 def find(name, namespace='celery'):
+    """Find setting by name."""
     # - Try specified name-space first.
     # - Try specified name-space first.
     namespace = namespace.lower()
     namespace = namespace.lower()
     try:
     try:

+ 39 - 0
celery/app/events.py

@@ -0,0 +1,39 @@
+"""Implementation for the app.events shortcuts."""
+from contextlib import contextmanager
+from kombu.utils.objects import cached_property
+
+
+class Events(object):
+    """Implements app.events."""
+
+    receiver_cls = 'celery.events.receiver:EventReceiver'
+    dispatcher_cls = 'celery.events.dispatcher:EventDispatcher'
+    state_cls = 'celery.events.state:State'
+
+    def __init__(self, app=None):
+        self.app = app
+
+    @cached_property
+    def Receiver(self):
+        return self.app.subclass_with_self(
+            self.receiver_cls, reverse='events.Receiver')
+
+    @cached_property
+    def Dispatcher(self):
+        return self.app.subclass_with_self(
+            self.dispatcher_cls, reverse='events.Dispatcher')
+
+    @cached_property
+    def State(self):
+        return self.app.subclass_with_self(
+            self.state_cls, reverse='events.State')
+
+    @contextmanager
+    def default_dispatcher(self, hostname=None, enabled=True,
+                           buffer_while_offline=False):
+        with self.app.amqp.producer_pool.acquire(block=True) as prod:
+            # pylint: disable=too-many-function-args
+            # This is a property pylint...
+            with self.Dispatcher(prod.connection, hostname, enabled,
+                                 prod.channel, buffer_while_offline) as d:
+                yield d

+ 13 - 11
celery/app/log.py

@@ -17,7 +17,7 @@ from kombu.utils.encoding import set_default_encoding_file
 
 
 from celery import signals
 from celery import signals
 from celery._state import get_current_task
 from celery._state import get_current_task
-from celery.five import class_property
+from celery.local import class_property
 from celery.platforms import isatty
 from celery.platforms import isatty
 from celery.utils.log import (
 from celery.utils.log import (
     get_logger, mlevel,
     get_logger, mlevel,
@@ -33,6 +33,7 @@ MP_LOG = os.environ.get('MP_LOG', False)
 
 
 
 
 class TaskFormatter(ColorFormatter):
 class TaskFormatter(ColorFormatter):
+    """Formatter for tasks, adding the task name and id."""
 
 
     def format(self, record):
     def format(self, record):
         task = get_current_task()
         task = get_current_task()
@@ -46,6 +47,8 @@ class TaskFormatter(ColorFormatter):
 
 
 
 
 class Logging:
 class Logging:
+    """Application logging setup (app.log)."""
+
     #: The logging subsystem is only configured once per process.
     #: The logging subsystem is only configured once per process.
     #: setup_logging_subsystem sets this flag, and subsequent calls
     #: setup_logging_subsystem sets this flag, and subsequent calls
     #: will do nothing.
     #: will do nothing.
@@ -60,6 +63,7 @@ class Logging:
 
 
     def setup(self, loglevel=None, logfile=None, redirect_stdouts=False,
     def setup(self, loglevel=None, logfile=None, redirect_stdouts=False,
               redirect_level='WARNING', colorize=None, hostname=None):
               redirect_level='WARNING', colorize=None, hostname=None):
+        loglevel = mlevel(loglevel)
         handled = self.setup_logging_subsystem(
         handled = self.setup_logging_subsystem(
             loglevel, logfile, colorize=colorize, hostname=hostname,
             loglevel, logfile, colorize=colorize, hostname=hostname,
         )
         )
@@ -87,7 +91,7 @@ class Logging:
             return
             return
         if logfile and hostname:
         if logfile and hostname:
             logfile = node_format(logfile, hostname)
             logfile = node_format(logfile, hostname)
-        self.already_setup = True
+        Logging._setup = True
         loglevel = mlevel(loglevel or self.loglevel)
         loglevel = mlevel(loglevel or self.loglevel)
         format = format or self.format
         format = format or self.format
         colorize = self.supports_color(colorize, logfile)
         colorize = self.supports_color(colorize, logfile)
@@ -179,8 +183,7 @@ class Logging:
 
 
     def redirect_stdouts_to_logger(self, logger, loglevel=None,
     def redirect_stdouts_to_logger(self, logger, loglevel=None,
                                    stdout=True, stderr=True):
                                    stdout=True, stderr=True):
-        """Redirect :class:`sys.stdout` and :class:`sys.stderr` to a
-        logging instance.
+        """Redirect :class:`sys.stdout` and :class:`sys.stderr` to logger.
 
 
         Arguments:
         Arguments:
             logger (logging.Logger): Logger instance to redirect to.
             logger (logging.Logger): Logger instance to redirect to.
@@ -200,7 +203,7 @@ class Logging:
             # Windows does not support ANSI color codes.
             # Windows does not support ANSI color codes.
             return False
             return False
         if colorize or colorize is None:
         if colorize or colorize is None:
-            # Only use color if there is no active log file
+            # Only use color if there's no active log file
             # and stderr is an actual terminal.
             # and stderr is an actual terminal.
             return logfile is None and isatty(sys.stderr)
             return logfile is None and isatty(sys.stderr)
         return colorize
         return colorize
@@ -218,8 +221,7 @@ class Logging:
         return logger
         return logger
 
 
     def _detect_handler(self, logfile=None):
     def _detect_handler(self, logfile=None):
-        """Create log handler with either a filename, an open stream
-        or :const:`None` (stderr)."""
+        """Create handler from filename, an open stream or `None` (stderr)."""
         logfile = sys.__stderr__ if logfile is None else logfile
         logfile = sys.__stderr__ if logfile is None else logfile
         if hasattr(logfile, 'write'):
         if hasattr(logfile, 'write'):
             return logging.StreamHandler(logfile)
             return logging.StreamHandler(logfile)
@@ -239,9 +241,9 @@ class Logging:
         return get_logger(name)
         return get_logger(name)
 
 
     @class_property
     @class_property
-    def already_setup(cls):
-        return cls._setup
+    def already_setup(self):
+        return self._setup
 
 
     @already_setup.setter  # noqa
     @already_setup.setter  # noqa
-    def already_setup(cls, was_setup):
-        cls._setup = was_setup
+    def already_setup(self, was_setup):
+        self._setup = was_setup

+ 2 - 0
celery/app/registry.py

@@ -11,6 +11,8 @@ __all__ = ['TaskRegistry']
 
 
 
 
 class TaskRegistry(dict):
 class TaskRegistry(dict):
+    """Map of registered tasks."""
+
     NotRegistered = NotRegistered
     NotRegistered = NotRegistered
 
 
     def __missing__(self, key):
     def __missing__(self, key):

+ 2 - 2
celery/app/routes.py

@@ -54,6 +54,7 @@ class MapRoute:
 
 
 
 
 class Router:
 class Router:
+    """Route tasks based on the :setting:`task_routes` setting."""
 
 
     def __init__(self, routes=None, queues=None,
     def __init__(self, routes=None, queues=None,
                  create_missing=False, app=None):
                  create_missing=False, app=None):
@@ -118,8 +119,7 @@ def expand_router_string(router):
 
 
 
 
 def prepare(routes):
 def prepare(routes):
-    """Expands the :setting:`task_routes` setting."""
-
+    """Expand the :setting:`task_routes` setting."""
     def expand_route(route):
     def expand_route(route):
         if isinstance(route, (Mapping, list, tuple)):
         if isinstance(route, (Mapping, list, tuple)):
             return MapRoute(route)
             return MapRoute(route)

+ 120 - 73
celery/app/task.py

@@ -3,6 +3,7 @@
 import sys
 import sys
 
 
 from billiard.einfo import ExceptionInfo
 from billiard.einfo import ExceptionInfo
+from kombu.exceptions import OperationalError
 from kombu.utils.uuid import uuid
 from kombu.utils.uuid import uuid
 
 
 from celery import current_app, group
 from celery import current_app, group
@@ -10,12 +11,12 @@ from celery import states
 from celery._state import _task_stack
 from celery._state import _task_stack
 from celery.canvas import signature
 from celery.canvas import signature
 from celery.exceptions import Ignore, MaxRetriesExceededError, Reject, Retry
 from celery.exceptions import Ignore, MaxRetriesExceededError, Reject, Retry
-from celery.five import class_property
+from celery.local import class_property
 from celery.result import EagerResult
 from celery.result import EagerResult
 from celery.utils import abstract
 from celery.utils import abstract
 from celery.utils.functional import mattrgetter, maybe_list
 from celery.utils.functional import mattrgetter, maybe_list
 from celery.utils.imports import instantiate
 from celery.utils.imports import instantiate
-from celery.utils.serialization import maybe_reraise
+from celery.utils.serialization import raise_with_context
 
 
 from .annotations import resolve_all as resolve_all_annotations
 from .annotations import resolve_all as resolve_all_annotations
 from .registry import _unpickle_task_v2
 from .registry import _unpickle_task_v2
@@ -57,6 +58,8 @@ def _reprtask(task, fmt=None, flags=None):
 
 
 
 
 class Context:
 class Context:
+    """Task request variables (Task.request)."""
+
     logfile = None
     logfile = None
     loglevel = None
     loglevel = None
     hostname = None
     hostname = None
@@ -134,14 +137,17 @@ class Context:
 class Task:
 class Task:
     """Task base class.
     """Task base class.
 
 
-    When called tasks apply the :meth:`run` method.  This method must
-    be defined by all tasks (that is unless the :meth:`__call__` method
-    is overridden).
+    Note:
+        When called tasks apply the :meth:`run` method.  This method must
+        be defined by all tasks (that is unless the :meth:`__call__` method
+        is overridden).
     """
     """
+
     __trace__ = None
     __trace__ = None
     __v2_compat__ = False  # set by old base in celery.task.base
     __v2_compat__ = False  # set by old base in celery.task.base
 
 
     MaxRetriesExceededError = MaxRetriesExceededError
     MaxRetriesExceededError = MaxRetriesExceededError
+    OperationalError = OperationalError
 
 
     #: Execution strategy used, or the qualified name of one.
     #: Execution strategy used, or the qualified name of one.
     Strategy = 'celery.worker.strategy:default'
     Strategy = 'celery.worker.strategy:default'
@@ -168,7 +174,7 @@ class Task:
     #: a minute),`'100/h'` (hundred tasks an hour)
     #: a minute),`'100/h'` (hundred tasks an hour)
     rate_limit = None
     rate_limit = None
 
 
-    #: If enabled the worker will not store task state and return values
+    #: If enabled the worker won't store task state and return values
     #: for this task.  Defaults to the :setting:`task_ignore_result`
     #: for this task.  Defaults to the :setting:`task_ignore_result`
     #: setting.
     #: setting.
     ignore_result = None
     ignore_result = None
@@ -178,6 +184,14 @@ class Task:
     #: (``result.children``).
     #: (``result.children``).
     trail = True
     trail = True
 
 
+    #: If enabled the worker will send monitoring events related to
+    #: this task (but only if the worker is configured to send
+    #: task related events).
+    #: Note that this has no effect on the task-failure event case
+    #: where a task is not registered (as it will have no task class
+    #: to check this flag).
+    send_events = True
+
     #: When enabled errors will be stored even if the task is otherwise
     #: When enabled errors will be stored even if the task is otherwise
     #: configured to ignore results.
     #: configured to ignore results.
     store_errors_even_if_ignored = None
     store_errors_even_if_ignored = None
@@ -206,7 +220,7 @@ class Task:
     #: finished, or waiting to be retried.
     #: finished, or waiting to be retried.
     #:
     #:
     #: Having a 'started' status can be useful for when there are long
     #: Having a 'started' status can be useful for when there are long
-    #: running tasks and there is a need to report which task is currently
+    #: running tasks and there's a need to report what task is currently
     #: running.
     #: running.
     #:
     #:
     #: The application default can be overridden using the
     #: The application default can be overridden using the
@@ -214,12 +228,11 @@ class Task:
     track_started = None
     track_started = None
 
 
     #: When enabled messages for this task will be acknowledged **after**
     #: When enabled messages for this task will be acknowledged **after**
-    #: the task has been executed, and not *just before* which is the
-    #: default behavior.
+    #: the task has been executed, and not *just before* (the
+    #: default behavior).
     #:
     #:
     #: Please note that this means the task may be executed twice if the
     #: Please note that this means the task may be executed twice if the
-    #: worker crashes mid execution (which may be acceptable for some
-    #: applications).
+    #: worker crashes mid execution.
     #:
     #:
     #: The application default can be overridden with the
     #: The application default can be overridden with the
     #: :setting:`task_acks_late` setting.
     #: :setting:`task_acks_late` setting.
@@ -227,7 +240,7 @@ class Task:
 
 
     #: Even if :attr:`acks_late` is enabled, the worker will
     #: Even if :attr:`acks_late` is enabled, the worker will
     #: acknowledge tasks when the worker process executing them abruptly
     #: acknowledge tasks when the worker process executing them abruptly
-    #: exits or is signaled (e.g. :sig:`KILL`/:sig:`INT`, etc).
+    #: exits or is signaled (e.g., :sig:`KILL`/:sig:`INT`, etc).
     #:
     #:
     #: Setting this to true allows the message to be re-queued instead,
     #: Setting this to true allows the message to be re-queued instead,
     #: so that the task will execute again by the same worker, or another
     #: so that the task will execute again by the same worker, or another
@@ -240,9 +253,9 @@ class Task:
     #: Tuple of expected exceptions.
     #: Tuple of expected exceptions.
     #:
     #:
     #: These are errors that are expected in normal operation
     #: These are errors that are expected in normal operation
-    #: and that should not be regarded as a real error by the worker.
+    #: and that shouldn't be regarded as a real error by the worker.
     #: Currently this means that the state will be updated to an error
     #: Currently this means that the state will be updated to an error
-    #: state, but the worker will not log the event as an error.
+    #: state, but the worker won't log the event as an error.
     throws = ()
     throws = ()
 
 
     #: Default task expiry time.
     #: Default task expiry time.
@@ -254,7 +267,7 @@ class Task:
     #: Task request stack, the current request will be the topmost.
     #: Task request stack, the current request will be the topmost.
     request_stack = None
     request_stack = None
 
 
-    #: Some may expect a request to exist even if the task has not been
+    #: Some may expect a request to exist even if the task hasn't been
     #: called.  This should probably be deprecated.
     #: called.  This should probably be deprecated.
     _default_request = None
     _default_request = None
 
 
@@ -281,62 +294,66 @@ class Task:
     # - until the task is actually used
     # - until the task is actually used
 
 
     @classmethod
     @classmethod
-    def bind(self, app):
-        was_bound, self.__bound__ = self.__bound__, True
-        self._app = app
+    def bind(cls, app):
+        was_bound, cls.__bound__ = cls.__bound__, True
+        cls._app = app
         conf = app.conf
         conf = app.conf
-        self._exec_options = None  # clear option cache
+        cls._exec_options = None  # clear option cache
 
 
-        for attr_name, config_name in self.from_config:
-            if getattr(self, attr_name, None) is None:
-                setattr(self, attr_name, conf[config_name])
+        for attr_name, config_name in cls.from_config:
+            if getattr(cls, attr_name, None) is None:
+                setattr(cls, attr_name, conf[config_name])
 
 
         # decorate with annotations from config.
         # decorate with annotations from config.
         if not was_bound:
         if not was_bound:
-            self.annotate()
+            cls.annotate()
 
 
             from celery.utils.threads import LocalStack
             from celery.utils.threads import LocalStack
-            self.request_stack = LocalStack()
+            cls.request_stack = LocalStack()
 
 
         # PeriodicTask uses this to add itself to the PeriodicTask schedule.
         # PeriodicTask uses this to add itself to the PeriodicTask schedule.
-        self.on_bound(app)
+        cls.on_bound(app)
 
 
         return app
         return app
 
 
     @classmethod
     @classmethod
-    def on_bound(self, app):
-        """This method can be defined to do additional actions when the
-        task class is bound to an app."""
+    def on_bound(cls, app):
+        """Called when the task is bound to an app.
+
+        Note:
+            This class method can be defined to do additional actions when
+            the task class is bound to an app.
+        """
         pass
         pass
 
 
     @classmethod
     @classmethod
-    def _get_app(self):
-        if self._app is None:
-            self._app = current_app
-        if not self.__bound__:
+    def _get_app(cls):
+        if cls._app is None:
+            cls._app = current_app
+        if not cls.__bound__:
             # The app property's __set__  method is not called
             # The app property's __set__  method is not called
             # if Task.app is set (on the class), so must bind on use.
             # if Task.app is set (on the class), so must bind on use.
-            self.bind(self._app)
-        return self._app
+            cls.bind(cls._app)
+        return cls._app
     app = class_property(_get_app, bind)
     app = class_property(_get_app, bind)
 
 
     @classmethod
     @classmethod
-    def annotate(self):
-        for d in resolve_all_annotations(self.app.annotations, self):
+    def annotate(cls):
+        for d in resolve_all_annotations(cls.app.annotations, cls):
             for key, value in d.items():
             for key, value in d.items():
                 if key.startswith('@'):
                 if key.startswith('@'):
-                    self.add_around(key[1:], value)
+                    cls.add_around(key[1:], value)
                 else:
                 else:
-                    setattr(self, key, value)
+                    setattr(cls, key, value)
 
 
     @classmethod
     @classmethod
-    def add_around(self, attr, around):
-        orig = getattr(self, attr)
+    def add_around(cls, attr, around):
+        orig = getattr(cls, attr)
         if getattr(orig, '__wrapped__', None):
         if getattr(orig, '__wrapped__', None):
             orig = orig.__wrapped__
             orig = orig.__wrapped__
         meth = around(orig)
         meth = around(orig)
         meth.__wrapped__ = orig
         meth.__wrapped__ = orig
-        setattr(self, attr, meth)
+        setattr(cls, attr, meth)
 
 
     def __call__(self, *args, **kwargs):
     def __call__(self, *args, **kwargs):
         _task_stack.push(self)
         _task_stack.push(self)
@@ -355,7 +372,7 @@ class Task:
         # - simply grabs it from the local registry.
         # - simply grabs it from the local registry.
         # - in later versions the module of the task is also included,
         # - in later versions the module of the task is also included,
         # - and the receiving side tries to import that module so that
         # - and the receiving side tries to import that module so that
-        # - it will work even if the task has not been registered.
+        # - it will work even if the task hasn't been registered.
         mod = type(self).__module__
         mod = type(self).__module__
         mod = mod if mod and mod in sys.modules else None
         mod = mod if mod and mod in sys.modules else None
         return (_unpickle_task_v2, (self.name, mod), None)
         return (_unpickle_task_v2, (self.name, mod), None)
@@ -398,7 +415,7 @@ class Task:
 
 
             expires (float, ~datetime.datetime): Datetime or
             expires (float, ~datetime.datetime): Datetime or
                 seconds in the future for the task should expire.
                 seconds in the future for the task should expire.
-                The task will not be executed after the expiration time.
+                The task won't be executed after the expiration time.
 
 
             shadow (str): Override task name used in logs/monitoring.
             shadow (str): Override task name used in logs/monitoring.
                 Default is retrieved from :meth:`shadow_name`.
                 Default is retrieved from :meth:`shadow_name`.
@@ -426,7 +443,7 @@ class Task:
                 argument.
                 argument.
 
 
             routing_key (str): Custom routing key used to route the task to a
             routing_key (str): Custom routing key used to route the task to a
-                worker server. If in combination with a ``queue`` argument
+                worker server.  If in combination with a ``queue`` argument
                 only used to specify custom routing keys to topic exchanges.
                 only used to specify custom routing keys to topic exchanges.
 
 
             priority (int): The task priority, a number between 0 and 9.
             priority (int): The task priority, a number between 0 and 9.
@@ -434,15 +451,15 @@ class Task:
 
 
             serializer (str): Serialization method to use.
             serializer (str): Serialization method to use.
                 Can be `pickle`, `json`, `yaml`, `msgpack` or any custom
                 Can be `pickle`, `json`, `yaml`, `msgpack` or any custom
-                serialization method that has been registered
+                serialization method that's been registered
                 with :mod:`kombu.serialization.registry`.
                 with :mod:`kombu.serialization.registry`.
                 Defaults to the :attr:`serializer` attribute.
                 Defaults to the :attr:`serializer` attribute.
 
 
             compression (str): Optional compression method
             compression (str): Optional compression method
                 to use.  Can be one of ``zlib``, ``bzip2``,
                 to use.  Can be one of ``zlib``, ``bzip2``,
                 or any custom compression methods registered with
                 or any custom compression methods registered with
-                :func:`kombu.compression.register`. Defaults to
-                the :setting:`task_compression` setting.
+                :func:`kombu.compression.register`.
+                Defaults to the :setting:`task_compression` setting.
 
 
             link (~@Signature): A single, or a list of tasks signatures
             link (~@Signature): A single, or a list of tasks signatures
                 to apply if the task returns successfully.
                 to apply if the task returns successfully.
@@ -550,7 +567,7 @@ class Task:
         Note:
         Note:
             Although the task will never return above as `retry` raises an
             Although the task will never return above as `retry` raises an
             exception to notify the worker, we use `raise` in front of the
             exception to notify the worker, we use `raise` in front of the
-            retry to convey that the rest of the block will not be executed.
+            retry to convey that the rest of the block won't be executed.
 
 
         Arguments:
         Arguments:
             args (Tuple): Positional arguments to retry with.
             args (Tuple): Positional arguments to retry with.
@@ -569,15 +586,15 @@ class Task:
             eta (~datetime.dateime): Explicit time and date to run the
             eta (~datetime.dateime): Explicit time and date to run the
                 retry at.
                 retry at.
             max_retries (int): If set, overrides the default retry limit for
             max_retries (int): If set, overrides the default retry limit for
-                this execution. Changes to this parameter do not propagate to
-                subsequent task retry attempts. A value of :const:`None`, means
-                "use the default", so if you want infinite retries you would
+                this execution.  Changes to this parameter don't propagate to
+                subsequent task retry attempts.  A value of :const:`None`,
+                means "use the default", so if you want infinite retries you'd
                 have to set the :attr:`max_retries` attribute of the task to
                 have to set the :attr:`max_retries` attribute of the task to
                 :const:`None` first.
                 :const:`None` first.
             time_limit (int): If set, overrides the default time limit.
             time_limit (int): If set, overrides the default time limit.
             soft_time_limit (int): If set, overrides the default soft
             soft_time_limit (int): If set, overrides the default soft
                 time limit.
                 time limit.
-            throw (bool): If this is :const:`False`, do not raise the
+            throw (bool): If this is :const:`False`, don't raise the
                 :exc:`~@Retry` exception, that tells the worker to mark
                 :exc:`~@Retry` exception, that tells the worker to mark
                 the task as being retried.  Note that this means the task
                 the task as being retried.  Note that this means the task
                 will be marked as failed if the task raises an exception,
                 will be marked as failed if the task raises an exception,
@@ -598,8 +615,9 @@ class Task:
         # Not in worker or emulated by (apply/always_eager),
         # Not in worker or emulated by (apply/always_eager),
         # so just raise the original exception.
         # so just raise the original exception.
         if request.called_directly:
         if request.called_directly:
-            maybe_reraise()  # raise orig stack if PyErr_Occurred
-            raise exc or Retry('Task can be retried', None)
+            # raises orig stack if PyErr_Occurred,
+            # and augments with exc' if that argument is defined.
+            raise_with_context(exc or Retry('Task can be retried', None))
 
 
         if not eta and countdown is None:
         if not eta and countdown is None:
             countdown = self.default_retry_delay
             countdown = self.default_retry_delay
@@ -613,10 +631,9 @@ class Task:
 
 
         if max_retries is not None and retries > max_retries:
         if max_retries is not None and retries > max_retries:
             if exc:
             if exc:
-                # first try to re-raise the original exception
-                maybe_reraise()
-                # or if not in an except block then raise the custom exc.
-                raise exc
+                # On Py3: will augment any current exception with
+                # the exc' argument provided (raise exc from orig)
+                raise_with_context(exc)
             raise self.MaxRetriesExceededError(
             raise self.MaxRetriesExceededError(
                 "Can't retry {0}[{1}] args:{2} kwargs:{3}".format(
                 "Can't retry {0}[{1}] args:{2} kwargs:{3}".format(
                     self.name, request.id, S.args, S.kwargs))
                     self.name, request.id, S.args, S.kwargs))
@@ -704,44 +721,74 @@ class Task:
             task_id, backend=self.backend, **kwargs)
             task_id, backend=self.backend, **kwargs)
 
 
     def signature(self, args=None, *starargs, **starkwargs):
     def signature(self, args=None, *starargs, **starkwargs):
-        """Return :class:`~celery.signature` object for
-        this task, wrapping arguments and execution options
-        for a single task invocation."""
+        """Create signature.
+
+        Returns:
+            :class:`~celery.signature`:  object for
+                this task, wrapping arguments and execution options
+                for a single task invocation.
+        """
         starkwargs.setdefault('app', self.app)
         starkwargs.setdefault('app', self.app)
         return signature(self, args, *starargs, **starkwargs)
         return signature(self, args, *starargs, **starkwargs)
     subtask = signature
     subtask = signature
 
 
     def s(self, *args, **kwargs):
     def s(self, *args, **kwargs):
-        """``.s(*a, **k) -> .signature(a, k)``"""
+        """Create signature.
+
+        Shortcut for ``.s(*a, **k) -> .signature(a, k)``.
+        """
         return self.signature(args, kwargs)
         return self.signature(args, kwargs)
 
 
     def si(self, *args, **kwargs):
     def si(self, *args, **kwargs):
-        """``.si(*a, **k) -> .signature(a, k, immutable=True)``"""
+        """Create immutable signature.
+
+        Shortcut for ``.si(*a, **k) -> .signature(a, k, immutable=True)``.
+        """
         return self.signature(args, kwargs, immutable=True)
         return self.signature(args, kwargs, immutable=True)
 
 
     def chunks(self, it, n):
     def chunks(self, it, n):
-        """Creates a :class:`~celery.canvas.chunks` task for this task."""
+        """Create a :class:`~celery.canvas.chunks` task for this task."""
         from celery import chunks
         from celery import chunks
         return chunks(self.s(), it, n, app=self.app)
         return chunks(self.s(), it, n, app=self.app)
 
 
     def map(self, it):
     def map(self, it):
-        """Creates a :class:`~celery.canvas.xmap` task from ``it``."""
+        """Create a :class:`~celery.canvas.xmap` task from ``it``."""
         from celery import xmap
         from celery import xmap
         return xmap(self.s(), it, app=self.app)
         return xmap(self.s(), it, app=self.app)
 
 
     def starmap(self, it):
     def starmap(self, it):
-        """Creates a :class:`~celery.canvas.xstarmap` task from ``it``."""
+        """Create a :class:`~celery.canvas.xstarmap` task from ``it``."""
         from celery import xstarmap
         from celery import xstarmap
         return xstarmap(self.s(), it, app=self.app)
         return xstarmap(self.s(), it, app=self.app)
 
 
-    def send_event(self, type_, **fields):
+    def send_event(self, type_, retry=True, retry_policy=None, **fields):
+        """Send monitoring event message.
+
+        This can be used to add custom event types in :pypi:`Flower`
+        and other monitors.
+
+        Arguments:
+            type_ (str):  Type of event, e.g. ``"task-failed"``.
+
+        Keyword Arguments:
+            retry (bool):  Retry sending the message
+                if the connection is lost.  Default is taken from the
+                :setting:`task_publish_retry` setting.
+            retry_policy (Mapping): Retry settings.  Default is taken
+                from the :setting:`task_publish_retry_policy` setting.
+            **fields (**Any): Map containing information about the event.
+                Must be JSON serializable.
+        """
         req = self.request
         req = self.request
+        if retry_policy is None:
+            retry_policy = self.app.conf.task_publish_retry_policy
         with self.app.events.default_dispatcher(hostname=req.hostname) as d:
         with self.app.events.default_dispatcher(hostname=req.hostname) as d:
-            return d.send(type_, uuid=req.id, **fields)
+            return d.send(
+                type_,
+                uuid=req.id, retry=retry, retry_policy=retry_policy, **fields)
 
 
     def replace(self, sig):
     def replace(self, sig):
-        """Replace the current task, with a new task inheriting the
-        same task id.
+        """Replace this task, with a new task inheriting the task id.
 
 
         .. versionadded:: 4.0
         .. versionadded:: 4.0
 
 
@@ -751,7 +798,7 @@ class Task:
         Raises:
         Raises:
             ~@Ignore: This is always raised, so the best practice
             ~@Ignore: This is always raised, so the best practice
             is to always use ``raise self.replace(...)`` to convey
             is to always use ``raise self.replace(...)`` to convey
-            to the reader that the task will not continue after being replaced.
+            to the reader that the task won't continue after being replaced.
         """
         """
         chord = self.request.chord
         chord = self.request.chord
         if 'chord' in sig.options:
         if 'chord' in sig.options:
@@ -789,7 +836,7 @@ class Task:
 
 
         Arguments:
         Arguments:
             sig (~@Signature): Signature to extend chord with.
             sig (~@Signature): Signature to extend chord with.
-            lazy (bool): If enabled the new task will not actually be called,
+            lazy (bool): If enabled the new task won't actually be called,
                 and ``sig.delay()`` must be called manually.
                 and ``sig.delay()`` must be called manually.
         """
         """
         if not self.request.chord:
         if not self.request.chord:
@@ -891,7 +938,7 @@ class Task:
         self.request_stack.pop()
         self.request_stack.pop()
 
 
     def __repr__(self):
     def __repr__(self):
-        """`repr(task)`"""
+        """``repr(task)``."""
         return _reprtask(self, R_SELF_TASK if self.__self__ else R_INSTANCE)
         return _reprtask(self, R_SELF_TASK if self.__self__ else R_INSTANCE)
 
 
     def _get_request(self):
     def _get_request(self):

+ 29 - 14
celery/app/trace.py

@@ -10,6 +10,11 @@ errors are recorded, handlers are applied and so on.
 # but in the end it only resulted in bad performance and horrible tracebacks,
 # but in the end it only resulted in bad performance and horrible tracebacks,
 # so instead we now use one closure per task class.
 # so instead we now use one closure per task class.
 
 
+# pylint: disable=redefined-outer-name
+# We cache globals and attribute lookups, so disable this warning.
+# pylint: disable=broad-except
+# We know what we're doing...
+
 import logging
 import logging
 import os
 import os
 import sys
 import sys
@@ -112,8 +117,7 @@ trace_ok_t = namedtuple('trace_ok_t', ('retval', 'info', 'runtime', 'retstr'))
 
 
 
 
 def task_has_custom(task, attr):
 def task_has_custom(task, attr):
-    """Return true if the task or one of its bases
-    defines ``attr`` (excluding the one in BaseTask)."""
+    """Return true if the task overrides ``attr``."""
     return mro_lookup(task.__class__, attr, stop={BaseTask, object},
     return mro_lookup(task.__class__, attr, stop={BaseTask, object},
                       monkey_patched=['celery.app.task'])
                       monkey_patched=['celery.app.task'])
 
 
@@ -132,6 +136,8 @@ def get_log_policy(task, einfo, exc):
 
 
 
 
 class TraceInfo:
 class TraceInfo:
+    """Information about task execution."""
+
     __slots__ = ('state', 'retval')
     __slots__ = ('state', 'retval')
 
 
     def __init__(self, state, retval=None):
     def __init__(self, state, retval=None):
@@ -172,12 +178,13 @@ class TraceInfo:
             signals.task_retry.send(sender=task, request=req,
             signals.task_retry.send(sender=task, request=req,
                                     reason=reason, einfo=einfo)
                                     reason=reason, einfo=einfo)
             info(LOG_RETRY, {
             info(LOG_RETRY, {
-                'id': req.id, 'name': task.name,
-                'exc': safe_repr(reason.exc),
+                'id': req.id,
+                'name': task.name,
+                'exc': str(reason),
             })
             })
             return einfo
             return einfo
         finally:
         finally:
-            del(tb)
+            del tb
 
 
     def handle_failure(self, task, req, store_errors=True, call_errbacks=True):
     def handle_failure(self, task, req, store_errors=True, call_errbacks=True):
         """Handle exception."""
         """Handle exception."""
@@ -204,7 +211,7 @@ class TraceInfo:
             self._log_error(task, req, einfo)
             self._log_error(task, req, einfo)
             return einfo
             return einfo
         finally:
         finally:
-            del(tb)
+            del tb
 
 
     def _log_error(self, task, req, einfo):
     def _log_error(self, task, req, einfo):
         eobj = einfo.exception = get_pickled_exception(einfo.exception)
         eobj = einfo.exception = get_pickled_exception(einfo.exception)
@@ -238,8 +245,10 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                  Info=TraceInfo, eager=False, propagate=False, app=None,
                  Info=TraceInfo, eager=False, propagate=False, app=None,
                  monotonic=monotonic, truncate=truncate,
                  monotonic=monotonic, truncate=truncate,
                  trace_ok_t=trace_ok_t, IGNORE_STATES=IGNORE_STATES):
                  trace_ok_t=trace_ok_t, IGNORE_STATES=IGNORE_STATES):
-    """Return a function that traces task execution; catches all
-    exceptions and updates result backend with the state and result
+    """Return a function that traces task execution.
+
+    Catches all exceptions and updates result backend with the
+    state and result.
 
 
     If the call was successful, it saves the result to the task result
     If the call was successful, it saves the result to the task result
     backend, and sets the task status to `"SUCCESS"`.
     backend, and sets the task status to `"SUCCESS"`.
@@ -259,6 +268,9 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
         :keyword request: Request dict.
         :keyword request: Request dict.
 
 
     """
     """
+    # noqa: C901
+    # pylint: disable=too-many-statements
+
     # If the task doesn't define a custom __call__ method
     # If the task doesn't define a custom __call__ method
     # we optimize it away by simply calling the run method directly,
     # we optimize it away by simply calling the run method directly,
     # saving the extra method call and a line less in the stack trace.
     # saving the extra method call and a line less in the stack trace.
@@ -320,7 +332,7 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
         # retval - is the always unmodified return value.
         # retval - is the always unmodified return value.
         # state  - is the resulting task state.
         # state  - is the resulting task state.
 
 
-        # This function is very long because we have unrolled all the calls
+        # This function is very long because we've unrolled all the calls
         # for performance reasons, and because the function is so long
         # for performance reasons, and because the function is so long
         # we want the main variables (I, and R) to stand out visually from the
         # we want the main variables (I, and R) to stand out visually from the
         # the rest of the variables, so breaking PEP8 is worth it ;)
         # the rest of the variables, so breaking PEP8 is worth it ;)
@@ -416,13 +428,13 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                     except EncodeError as exc:
                     except EncodeError as exc:
                         I, R, state, retval = on_error(task_request, exc, uuid)
                         I, R, state, retval = on_error(task_request, exc, uuid)
                     else:
                     else:
+                        Rstr = saferepr(R, resultrepr_maxsize)
+                        T = monotonic() - time_start
                         if task_on_success:
                         if task_on_success:
                             task_on_success(retval, uuid, args, kwargs)
                             task_on_success(retval, uuid, args, kwargs)
                         if success_receivers:
                         if success_receivers:
                             send_success(sender=task, result=retval)
                             send_success(sender=task, result=retval)
                         if _does_info:
                         if _does_info:
-                            T = monotonic() - time_start
-                            Rstr = saferepr(R, resultrepr_maxsize)
                             info(LOG_SUCCESS, {
                             info(LOG_SUCCESS, {
                                 'id': uuid, 'name': name,
                                 'id': uuid, 'name': name,
                                 'return_value': Rstr, 'runtime': T,
                                 'return_value': Rstr, 'runtime': T,
@@ -466,6 +478,7 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
 
 
 
 
 def trace_task(task, uuid, args, kwargs, request={}, **opts):
 def trace_task(task, uuid, args, kwargs, request={}, **opts):
+    """Trace task execution."""
     try:
     try:
         if task.__trace__ is None:
         if task.__trace__ is None:
             task.__trace__ = build_tracer(task.name, task, **opts)
             task.__trace__ = build_tracer(task.name, task, **opts)
@@ -528,16 +541,17 @@ def report_internal_error(task, exc):
                 exc, exc_info.traceback)))
                 exc, exc_info.traceback)))
         return exc_info
         return exc_info
     finally:
     finally:
-        del(_tb)
+        del _tb
 
 
 
 
 def setup_worker_optimizations(app, hostname=None):
 def setup_worker_optimizations(app, hostname=None):
+    """Setup worker related optimizations."""
     global trace_task_ret
     global trace_task_ret
 
 
     hostname = hostname or gethostname()
     hostname = hostname or gethostname()
 
 
     # make sure custom Task.__call__ methods that calls super
     # make sure custom Task.__call__ methods that calls super
-    # will not mess up the request/task stack.
+    # won't mess up the request/task stack.
     _install_stack_protection()
     _install_stack_protection()
 
 
     # all new threads start without a current app, so if an app is not
     # all new threads start without a current app, so if an app is not
@@ -566,6 +580,7 @@ def setup_worker_optimizations(app, hostname=None):
 
 
 
 
 def reset_worker_optimizations():
 def reset_worker_optimizations():
+    """Reset previously configured optimizations."""
     global trace_task_ret
     global trace_task_ret
     trace_task_ret = _trace_task_ret
     trace_task_ret = _trace_task_ret
     try:
     try:
@@ -591,7 +606,7 @@ def _install_stack_protection():
     #   they work when tasks are called directly.
     #   they work when tasks are called directly.
     #
     #
     # The worker only optimizes away __call__ in the case
     # The worker only optimizes away __call__ in the case
-    # where it has not been overridden, so the request/task stack
+    # where it hasn't been overridden, so the request/task stack
     # will blow if a custom task class defines __call__ and also
     # will blow if a custom task class defines __call__ and also
     # calls super().
     # calls super().
     if not getattr(BaseTask, '_stackprotected', False):
     if not getattr(BaseTask, '_stackprotected', False):

+ 28 - 10
celery/app/utils.py

@@ -102,6 +102,20 @@ class Settings(ConfigurationView):
             self.first('broker_url', 'broker_host')
             self.first('broker_url', 'broker_host')
         )
         )
 
 
+    @property
+    def task_default_exchange(self):
+        return self.first(
+            'task_default_exchange',
+            'task_default_queue',
+        )
+
+    @property
+    def task_default_routing_key(self):
+        return self.first(
+            'task_default_routing_key',
+            'task_default_queue',
+        )
+
     @property
     @property
     def timezone(self):
     def timezone(self):
         # this way we also support django's time zone.
         # this way we also support django's time zone.
@@ -133,7 +147,7 @@ class Settings(ConfigurationView):
         return find(name, namespace)
         return find(name, namespace)
 
 
     def find_value_for_key(self, name, namespace='celery'):
     def find_value_for_key(self, name, namespace='celery'):
-        """Shortcut to ``get_by_parts(*find_option(name)[:-1])``"""
+        """Shortcut to ``get_by_parts(*find_option(name)[:-1])``."""
         return self.get_by_parts(*self.find_option(name, namespace)[:-1])
         return self.get_by_parts(*self.find_option(name, namespace)[:-1])
 
 
     def get_by_parts(self, *parts):
     def get_by_parts(self, *parts):
@@ -155,8 +169,7 @@ class Settings(ConfigurationView):
         })
         })
 
 
     def humanize(self, with_defaults=False, censored=True):
     def humanize(self, with_defaults=False, censored=True):
-        """Return a human readable string showing changes to the
-        configuration."""
+        """Return a human readable text showing configuration changes."""
         return '\n'.join(
         return '\n'.join(
             '{0}: {1}'.format(key, pretty(value, width=50))
             '{0}: {1}'.format(key, pretty(value, width=50))
             for key, value in self.table(with_defaults, censored).items())
             for key, value in self.table(with_defaults, censored).items())
@@ -213,8 +226,8 @@ def detect_settings(conf, preconf={}, ignore_keys=set(), prefix=None,
         # always use new format if prefix is used.
         # always use new format if prefix is used.
         info, left = _settings_info, set()
         info, left = _settings_info, set()
 
 
-    # only raise error for keys that the user did not provide two keys
-    # for (e.g. both ``result_expires`` and ``CELERY_TASK_RESULT_EXPIRES``).
+    # only raise error for keys that the user didn't provide two keys
+    # for (e.g., both ``result_expires`` and ``CELERY_TASK_RESULT_EXPIRES``).
     really_left = {key for key in left if info.convert[key] not in have}
     really_left = {key for key in left if info.convert[key] not in have}
     if really_left:
     if really_left:
         # user is mixing old/new, or new/old settings, give renaming
         # user is mixing old/new, or new/old settings, give renaming
@@ -226,7 +239,11 @@ def detect_settings(conf, preconf={}, ignore_keys=set(), prefix=None,
 
 
     preconf = {info.convert.get(k, k): v for k, v in preconf.items()}
     preconf = {info.convert.get(k, k): v for k, v in preconf.items()}
     defaults = dict(deepcopy(info.defaults), **preconf)
     defaults = dict(deepcopy(info.defaults), **preconf)
-    return Settings(preconf, [conf, defaults], info.key_t, prefix=prefix)
+    return Settings(
+        preconf, [conf, defaults],
+        (_old_key_to_new, _new_key_to_old),
+        prefix=prefix,
+    )
 
 
 
 
 class AppPickler:
 class AppPickler:
@@ -257,18 +274,18 @@ class AppPickler:
 
 
 
 
 def _unpickle_app(cls, pickler, *args):
 def _unpickle_app(cls, pickler, *args):
-    """Rebuild app for versions 2.5+"""
+    """Rebuild app for versions 2.5+."""
     return pickler()(cls, *args)
     return pickler()(cls, *args)
 
 
 
 
 def _unpickle_app_v2(cls, kwargs):
 def _unpickle_app_v2(cls, kwargs):
-    """Rebuild app for versions 3.1+"""
+    """Rebuild app for versions 3.1+."""
     kwargs['set_as_current'] = False
     kwargs['set_as_current'] = False
     return cls(**kwargs)
     return cls(**kwargs)
 
 
 
 
 def filter_hidden_settings(conf):
 def filter_hidden_settings(conf):
-
+    """Filter sensitive settings."""
     def maybe_censor(key, value, mask='*' * 8):
     def maybe_censor(key, value, mask='*' * 8):
         if isinstance(value, Mapping):
         if isinstance(value, Mapping):
             return filter_hidden_settings(value)
             return filter_hidden_settings(value)
@@ -297,7 +314,7 @@ def bugreport(app):
         driver_v = '{0}:{1}'.format(conn.transport.driver_name,
         driver_v = '{0}:{1}'.format(conn.transport.driver_name,
                                     conn.transport.driver_version())
                                     conn.transport.driver_version())
         transport = conn.transport_cls
         transport = conn.transport_cls
-    except Exception:
+    except Exception:  # pylint: disable=broad-except
         transport = driver_v = ''
         transport = driver_v = ''
 
 
     return BUGREPORT_INFO.format(
     return BUGREPORT_INFO.format(
@@ -317,6 +334,7 @@ def bugreport(app):
 
 
 
 
 def find_app(app, symbol_by_name=symbol_by_name, imp=import_from_cwd):
 def find_app(app, symbol_by_name=symbol_by_name, imp=import_from_cwd):
+    """Find app by name."""
     from .base import Celery
     from .base import Celery
 
 
     try:
     try:

+ 14 - 10
celery/apps/beat.py

@@ -1,5 +1,7 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
-"""This module is the 'program-version' of :mod:`celery.beat`.
+"""Beat command-line program.
+
+This module is the 'program-version' of :mod:`celery.beat`.
 
 
 It does everything necessary to run that module
 It does everything necessary to run that module
 as an actual application, like installing signal handlers
 as an actual application, like installing signal handlers
@@ -14,7 +16,7 @@ from datetime import datetime
 from celery import VERSION_BANNER, platforms, beat
 from celery import VERSION_BANNER, platforms, beat
 from celery.utils.imports import qualname
 from celery.utils.imports import qualname
 from celery.utils.log import LOG_LEVELS, get_logger
 from celery.utils.log import LOG_LEVELS, get_logger
-from celery.utils.timeutils import humanize_seconds
+from celery.utils.time import humanize_seconds
 
 
 __all__ = ['Beat']
 __all__ = ['Beat']
 
 
@@ -33,6 +35,8 @@ logger = get_logger('celery.beat')
 
 
 
 
 class Beat:
 class Beat:
+    """Beat as a service."""
+
     Service = beat.Service
     Service = beat.Service
 
 
     app = None
     app = None
@@ -40,14 +44,17 @@ class Beat:
     def __init__(self, max_interval=None, app=None,
     def __init__(self, max_interval=None, app=None,
                  socket_timeout=30, pidfile=None, no_color=None,
                  socket_timeout=30, pidfile=None, no_color=None,
                  loglevel='WARN', logfile=None, schedule=None,
                  loglevel='WARN', logfile=None, schedule=None,
-                 scheduler_cls=None, redirect_stdouts=None,
+                 scheduler=None,
+                 scheduler_cls=None,  # XXX use scheduler
+                 redirect_stdouts=None,
                  redirect_stdouts_level=None, **kwargs):
                  redirect_stdouts_level=None, **kwargs):
         self.app = app = app or self.app
         self.app = app = app or self.app
         either = self.app.either
         either = self.app.either
         self.loglevel = loglevel
         self.loglevel = loglevel
         self.logfile = logfile
         self.logfile = logfile
         self.schedule = either('beat_schedule_filename', schedule)
         self.schedule = either('beat_schedule_filename', schedule)
-        self.scheduler_cls = either('beat_scheduler', scheduler_cls)
+        self.scheduler_cls = either(
+            'beat_scheduler', scheduler, scheduler_cls)
         self.redirect_stdouts = either(
         self.redirect_stdouts = either(
             'worker_redirect_stdouts', redirect_stdouts)
             'worker_redirect_stdouts', redirect_stdouts)
         self.redirect_stdouts_level = either(
         self.redirect_stdouts_level = either(
@@ -130,8 +137,8 @@ class Beat:
             loader=qualname(self.app.loader),
             loader=qualname(self.app.loader),
             scheduler=qualname(scheduler),
             scheduler=qualname(scheduler),
             scheduler_info=scheduler.info,
             scheduler_info=scheduler.info,
-            hmax_interval=humanize_seconds(service.max_interval),
-            max_interval=service.max_interval,
+            hmax_interval=humanize_seconds(scheduler.max_interval),
+            max_interval=scheduler.max_interval,
         )
         )
 
 
     def set_process_title(self):
     def set_process_title(self):
@@ -141,11 +148,8 @@ class Beat:
         )
         )
 
 
     def install_sync_handler(self, service):
     def install_sync_handler(self, service):
-        """Install a `SIGTERM` + `SIGINT` handler that saves
-        the beat schedule."""
-
+        """Install a `SIGTERM` + `SIGINT` handler saving the schedule."""
         def _sync(signum, frame):
         def _sync(signum, frame):
             service.sync()
             service.sync()
             raise SystemExit()
             raise SystemExit()
-
         platforms.signals.update(SIGTERM=_sync, SIGINT=_sync)
         platforms.signals.update(SIGTERM=_sync, SIGINT=_sync)

+ 483 - 0
celery/apps/multi.py

@@ -0,0 +1,483 @@
+"""Start/stop/manage workers."""
+import errno
+import os
+import shlex
+import signal
+import sys
+
+from collections import OrderedDict, UserList, defaultdict
+from functools import partial
+from subprocess import Popen
+from time import sleep
+
+from kombu.utils.encoding import from_utf8
+from kombu.utils.objects import cached_property
+
+from celery.platforms import IS_WINDOWS, Pidfile, signal_name
+from celery.utils.nodenames import (
+    gethostname, host_format, node_format, nodesplit,
+)
+from celery.utils.saferepr import saferepr
+
+__all__ = ['Cluster', 'Node']
+
+CELERY_EXE = 'celery'
+
+
+def celery_exe(*args):
+    return ' '.join((CELERY_EXE,) + args)
+
+
+def build_nodename(name, prefix, suffix):
+    hostname = suffix
+    if '@' in name:
+        nodename = host_format(name)
+        shortname, hostname = nodesplit(nodename)
+        name = shortname
+    else:
+        shortname = '%s%s' % (prefix, name)
+        nodename = host_format(
+            '{0}@{1}'.format(shortname, hostname),
+        )
+    return name, nodename, hostname
+
+
+def build_expander(nodename, shortname, hostname):
+    return partial(
+        node_format,
+        name=nodename,
+        N=shortname,
+        d=hostname,
+        h=nodename,
+        i='%i',
+        I='%I',
+    )
+
+
+def format_opt(opt, value):
+    if not value:
+        return opt
+    if opt.startswith('--'):
+        return '{0}={1}'.format(opt, value)
+    return '{0} {1}'.format(opt, value)
+
+
+def _kwargs_to_command_line(kwargs):
+    return {
+        ('--{0}'.format(k.replace('_', '-'))
+         if len(k) > 1 else '-{0}'.format(k)): '{0}'.format(v)
+        for k, v in kwargs.items()
+    }
+
+
+class NamespacedOptionParser(object):
+
+    def __init__(self, args):
+        self.args = args
+        self.options = OrderedDict()
+        self.values = []
+        self.passthrough = ''
+        self.namespaces = defaultdict(lambda: OrderedDict())
+
+    def parse(self):
+        rargs = list(self.args)
+        pos = 0
+        while pos < len(rargs):
+            arg = rargs[pos]
+            if arg == '--':
+                self.passthrough = ' '.join(rargs[pos:])
+                break
+            elif arg[0] == '-':
+                if arg[1] == '-':
+                    self.process_long_opt(arg[2:])
+                else:
+                    value = None
+                    if len(rargs) > pos + 1 and rargs[pos + 1][0] != '-':
+                        value = rargs[pos + 1]
+                        pos += 1
+                    self.process_short_opt(arg[1:], value)
+            else:
+                self.values.append(arg)
+            pos += 1
+
+    def process_long_opt(self, arg, value=None):
+        if '=' in arg:
+            arg, value = arg.split('=', 1)
+        self.add_option(arg, value, short=False)
+
+    def process_short_opt(self, arg, value=None):
+        self.add_option(arg, value, short=True)
+
+    def optmerge(self, ns, defaults=None):
+        if defaults is None:
+            defaults = self.options
+        return OrderedDict(defaults, **self.namespaces[ns])
+
+    def add_option(self, name, value, short=False, ns=None):
+        prefix = short and '-' or '--'
+        dest = self.options
+        if ':' in name:
+            name, ns = name.split(':')
+            dest = self.namespaces[ns]
+        dest[prefix + name] = value
+
+
+class Node(object):
+    """Represents a node in a cluster."""
+
+    def __init__(self, name,
+                 cmd=None, append=None, options=None, extra_args=None):
+        self.name = name
+        self.cmd = cmd or '-m {0}'.format(celery_exe('worker', '--detach'))
+        self.append = append
+        self.extra_args = extra_args or ''
+        self.options = self._annotate_with_default_opts(
+            options or OrderedDict())
+        self.expander = self._prepare_expander()
+        self.argv = self._prepare_argv()
+        self._pid = None
+
+    def _annotate_with_default_opts(self, options):
+        options['-n'] = self.name
+        self._setdefaultopt(options, ['--pidfile', '-p'], '%n.pid')
+        self._setdefaultopt(options, ['--logfile', '-f'], '%n%I.log')
+        self._setdefaultopt(options, ['--executable'], sys.executable)
+        return options
+
+    def _setdefaultopt(self, d, alt, value):
+        for opt in alt[1:]:
+            try:
+                return d[opt]
+            except KeyError:
+                pass
+        return d.setdefault(alt[0], value)
+
+    def _prepare_expander(self):
+        shortname, hostname = self.name.split('@', 1)
+        return build_expander(
+            self.name, shortname, hostname)
+
+    def _prepare_argv(self):
+        argv = tuple(
+            [self.expander(self.cmd)] +
+            [format_opt(opt, self.expander(value))
+                for opt, value in self.options.items()] +
+            [self.extra_args]
+        )
+        if self.append:
+            argv += (self.expander(self.append),)
+        return argv
+
+    def alive(self):
+        return self.send(0)
+
+    def send(self, sig, on_error=None):
+        pid = self.pid
+        if pid:
+            try:
+                os.kill(pid, sig)
+            except ProcessLookupError:
+                maybe_call(on_error, self)
+                return False
+            return True
+        maybe_call(on_error, self)
+
+    def start(self, env=None, **kwargs):
+        return self._waitexec(
+            self.argv, path=self.executable, env=env, **kwargs)
+
+    def _waitexec(self, argv, path=sys.executable, env=None,
+                  on_spawn=None, on_signalled=None, on_failure=None):
+        argstr = self.prepare_argv(argv, path)
+        maybe_call(on_spawn, self, argstr=' '.join(argstr), env=env)
+        pipe = Popen(argstr, env=env)
+        return self.handle_process_exit(
+            pipe.wait(),
+            on_signalled=on_signalled,
+            on_failure=on_failure,
+        )
+
+    def handle_process_exit(self, retcode, on_signalled=None, on_failure=None):
+        if retcode < 0:
+            maybe_call(on_signalled, self, -retcode)
+            return -retcode
+        elif retcode > 0:
+            maybe_call(on_failure, self, retcode)
+        return retcode
+
+    def prepare_argv(self, argv, path):
+        args = ' '.join([path] + list(argv))
+        return shlex.split(from_utf8(args), posix=not IS_WINDOWS)
+
+    def getopt(self, *alt):
+        for opt in alt:
+            try:
+                return self.options[opt]
+            except KeyError:
+                pass
+        raise KeyError(alt[0])
+
+    def __repr__(self):
+        return '<{name}: {0.name}>'.format(self, name=type(self).__name__)
+
+    @cached_property
+    def pidfile(self):
+        return self.expander(self.getopt('--pidfile', '-p'))
+
+    @cached_property
+    def logfile(self):
+        return self.expander(self.getopt('--logfile', '-f'))
+
+    @property
+    def pid(self):
+        if self._pid is not None:
+            return self._pid
+        try:
+            return Pidfile(self.pidfile).read_pid()
+        except ValueError:
+            pass
+
+    @pid.setter
+    def pid(self, value):
+        self._pid = value
+
+    @cached_property
+    def executable(self):
+        return self.options['--executable']
+
+    @cached_property
+    def argv_with_executable(self):
+        return (self.executable,) + self.argv
+
+    @classmethod
+    def from_kwargs(cls, name, **kwargs):
+        return cls(name, options=_kwargs_to_command_line(kwargs))
+
+
+def maybe_call(fun, *args, **kwargs):
+    if fun is not None:
+        fun(*args, **kwargs)
+
+
+class MultiParser(object):
+    Node = Node
+
+    def __init__(self, cmd='celery worker',
+                 append='', prefix='', suffix='',
+                 range_prefix='celery'):
+        self.cmd = cmd
+        self.append = append
+        self.prefix = prefix
+        self.suffix = suffix
+        self.range_prefix = range_prefix
+
+    def parse(self, p):
+        names = p.values
+        options = dict(p.options)
+        ranges = len(names) == 1
+        prefix = self.prefix
+        cmd = options.pop('--cmd', self.cmd)
+        append = options.pop('--append', self.append)
+        hostname = options.pop('--hostname', options.pop('-n', gethostname()))
+        prefix = options.pop('--prefix', prefix) or ''
+        suffix = options.pop('--suffix', self.suffix) or hostname
+        suffix = '' if suffix in ('""', "''") else suffix
+
+        if ranges:
+            try:
+                names, prefix = self._get_ranges(names), self.range_prefix
+            except ValueError:
+                pass
+        self._update_ns_opts(p, names)
+        self._update_ns_ranges(p, ranges)
+
+        return (
+            self._node_from_options(
+                p, name, prefix, suffix, cmd, append, options)
+            for name in names
+        )
+
+    def _node_from_options(self, p, name, prefix,
+                           suffix, cmd, append, options):
+        namespace, nodename, _ = build_nodename(name, prefix, suffix)
+        namespace = nodename if nodename in p.namespaces else namespace
+        return Node(nodename, cmd, append,
+                    p.optmerge(namespace, options), p.passthrough)
+
+    def _get_ranges(self, names):
+        noderange = int(names[0])
+        return [str(n) for n in range(1, noderange + 1)]
+
+    def _update_ns_opts(self, p, names):
+        # Numbers in args always refers to the index in the list of names.
+        # (e.g., `start foo bar baz -c:1` where 1 is foo, 2 is bar, and so on).
+        for ns_name, ns_opts in list(p.namespaces.items()):
+            if ns_name.isdigit():
+                ns_index = int(ns_name) - 1
+                if ns_index < 0:
+                    raise KeyError('Indexes start at 1 got: %r' % (ns_name,))
+                try:
+                    p.namespaces[names[ns_index]].update(ns_opts)
+                except IndexError:
+                    raise KeyError('No node at index %r' % (ns_name,))
+
+    def _update_ns_ranges(self, p, ranges):
+        for ns_name, ns_opts in list(p.namespaces.items()):
+            if ',' in ns_name or (ranges and '-' in ns_name):
+                for subns in self._parse_ns_range(ns_name, ranges):
+                    p.namespaces[subns].update(ns_opts)
+                p.namespaces.pop(ns_name)
+
+    def _parse_ns_range(self, ns, ranges=False):
+        ret = []
+        for space in ',' in ns and ns.split(',') or [ns]:
+            if ranges and '-' in space:
+                start, stop = space.split('-')
+                ret.extend(
+                    str(n) for n in range(int(start), int(stop) + 1)
+                )
+            else:
+                ret.append(space)
+        return ret
+
+
+class Cluster(UserList):
+    """Represent a cluster of workers."""
+
+    def __init__(self, nodes, cmd=None, env=None,
+                 on_stopping_preamble=None,
+                 on_send_signal=None,
+                 on_still_waiting_for=None,
+                 on_still_waiting_progress=None,
+                 on_still_waiting_end=None,
+                 on_node_start=None,
+                 on_node_restart=None,
+                 on_node_shutdown_ok=None,
+                 on_node_status=None,
+                 on_node_signal=None,
+                 on_node_signal_dead=None,
+                 on_node_down=None,
+                 on_child_spawn=None,
+                 on_child_signalled=None,
+                 on_child_failure=None):
+        self.nodes = nodes
+        self.cmd = cmd or celery_exe('worker')
+        self.env = env
+
+        self.on_stopping_preamble = on_stopping_preamble
+        self.on_send_signal = on_send_signal
+        self.on_still_waiting_for = on_still_waiting_for
+        self.on_still_waiting_progress = on_still_waiting_progress
+        self.on_still_waiting_end = on_still_waiting_end
+        self.on_node_start = on_node_start
+        self.on_node_restart = on_node_restart
+        self.on_node_shutdown_ok = on_node_shutdown_ok
+        self.on_node_status = on_node_status
+        self.on_node_signal = on_node_signal
+        self.on_node_signal_dead = on_node_signal_dead
+        self.on_node_down = on_node_down
+        self.on_child_spawn = on_child_spawn
+        self.on_child_signalled = on_child_signalled
+        self.on_child_failure = on_child_failure
+
+    def start(self):
+        return [self.start_node(node) for node in self]
+
+    def start_node(self, node):
+        maybe_call(self.on_node_start, node)
+        retcode = self._start_node(node)
+        maybe_call(self.on_node_status, node, retcode)
+        return retcode
+
+    def _start_node(self, node):
+        return node.start(
+            self.env,
+            on_spawn=self.on_child_spawn,
+            on_signalled=self.on_child_signalled,
+            on_failure=self.on_child_failure,
+        )
+
+    def send_all(self, sig):
+        for node in self.getpids(on_down=self.on_node_down):
+            maybe_call(self.on_node_signal, node, signal_name(sig))
+            node.send(sig, self.on_node_signal_dead)
+
+    def kill(self):
+        return self.send_all(signal.SIGKILL)
+
+    def restart(self, sig=signal.SIGTERM):
+        retvals = []
+
+        def restart_on_down(node):
+            maybe_call(self.on_node_restart, node)
+            retval = self._start_node(node)
+            maybe_call(self.on_node_status, node, retval)
+            retvals.append(retval)
+
+        self._stop_nodes(retry=2, on_down=restart_on_down, sig=sig)
+        return retvals
+
+    def stop(self, retry=None, callback=None, sig=signal.SIGTERM):
+        return self._stop_nodes(retry=retry, on_down=callback, sig=sig)
+
+    def stopwait(self, retry=2, callback=None, sig=signal.SIGTERM):
+        return self._stop_nodes(retry=retry, on_down=callback, sig=sig)
+
+    def _stop_nodes(self, retry=None, on_down=None, sig=signal.SIGTERM):
+        on_down = on_down if on_down is not None else self.on_node_down
+        nodes = list(self.getpids(on_down=on_down))
+        if nodes:
+            for node in self.shutdown_nodes(nodes, sig=sig, retry=retry):
+                maybe_call(on_down, node)
+
+    def shutdown_nodes(self, nodes, sig=signal.SIGTERM, retry=None):
+        P = set(nodes)
+        maybe_call(self.on_stopping_preamble, nodes)
+        to_remove = set()
+        for node in P:
+            maybe_call(self.on_send_signal, node, signal_name(sig))
+            if not node.send(sig, self.on_node_signal_dead):
+                to_remove.add(node)
+                yield node
+        P -= to_remove
+        if retry:
+            maybe_call(self.on_still_waiting_for, P)
+            its = 0
+            while P:
+                to_remove = set()
+                for node in P:
+                    its += 1
+                    maybe_call(self.on_still_waiting_progress, P)
+                    if not node.alive():
+                        maybe_call(self.on_node_shutdown_ok, node)
+                        to_remove.add(node)
+                        yield node
+                        maybe_call(self.on_still_waiting_for, P)
+                        break
+                P -= to_remove
+                if P and not its % len(P):
+                    sleep(float(retry))
+            maybe_call(self.on_still_waiting_end)
+
+    def find(self, name):
+        for node in self:
+            if node.name == name:
+                return node
+        raise KeyError(name)
+
+    def getpids(self, on_down=None):
+        for node in self:
+            if node.pid:
+                yield node
+            else:
+                maybe_call(on_down, node)
+
+    def __repr__(self):
+        return '<{name}({0}): {1}>'.format(
+            len(self), saferepr([n.name for n in self]),
+            name=type(self).__name__,
+        )
+
+    @property
+    def data(self):
+        return self.nodes

+ 41 - 20
celery/apps/worker.py

@@ -1,5 +1,7 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
-"""This module is the 'program-version' of :mod:`celery.worker`.
+"""Worker command-line program.
+
+This module is the 'program-version' of :mod:`celery.worker`.
 
 
 It does everything necessary to run that module
 It does everything necessary to run that module
 as an actual application, like installing signal handlers,
 as an actual application, like installing signal handlers,
@@ -16,11 +18,15 @@ from functools import partial
 from billiard.process import current_process
 from billiard.process import current_process
 from kombu.utils.encoding import safe_str
 from kombu.utils.encoding import safe_str
 
 
-from celery import VERSION_BANNER, platforms, signals
+from celery import VERSION_BANNER
+from celery import platforms
+from celery import signals
 from celery.app import trace
 from celery.app import trace
 from celery.exceptions import WorkerShutdown, WorkerTerminate
 from celery.exceptions import WorkerShutdown, WorkerTerminate
 from celery.loaders.app import AppLoader
 from celery.loaders.app import AppLoader
 from celery.platforms import EX_FAILURE, EX_OK, check_privileges, isatty
 from celery.platforms import EX_FAILURE, EX_OK, check_privileges, isatty
+from celery.utils import static
+from celery.utils import term
 from celery.utils.debug import cry
 from celery.utils.debug import cry
 from celery.utils.imports import qualname
 from celery.utils.imports import qualname
 from celery.utils.log import get_logger, in_sighandler, set_in_sighandler
 from celery.utils.log import get_logger, in_sighandler, set_in_sighandler
@@ -68,6 +74,7 @@ BANNER = """\
 .> transport:   {conninfo}
 .> transport:   {conninfo}
 .> results:     {results}
 .> results:     {results}
 .> concurrency: {concurrency}
 .> concurrency: {concurrency}
+.> task events: {events}
 
 
 [queues]
 [queues]
 {queues}
 {queues}
@@ -80,8 +87,10 @@ EXTRA_INFO_FMT = """
 
 
 
 
 class Worker(WorkController):
 class Worker(WorkController):
+    """Worker as a program."""
 
 
-    def on_before_init(self, **kwargs):
+    def on_before_init(self, quiet=False, **kwargs):
+        self.quiet = quiet
         trace.setup_worker_optimizations(self.app, self.hostname)
         trace.setup_worker_optimizations(self.app, self.hostname)
 
 
         # this signal can be used to set up configuration for
         # this signal can be used to set up configuration for
@@ -116,12 +125,9 @@ class Worker(WorkController):
 
 
     def on_start(self):
     def on_start(self):
         app = self.app
         app = self.app
-        if not self._custom_logging and self.redirect_stdouts:
-            app.log.redirect_stdouts(self.redirect_stdouts_level)
-
         WorkController.on_start(self)
         WorkController.on_start(self)
 
 
-        # this signal can be used to e.g. change queues after
+        # this signal can be used to, for example, change queues after
         # the -Q option has been applied.
         # the -Q option has been applied.
         signals.celeryd_after_setup.send(
         signals.celeryd_after_setup.send(
             sender=self.hostname, instance=self, conf=app.conf,
             sender=self.hostname, instance=self, conf=app.conf,
@@ -130,18 +136,29 @@ class Worker(WorkController):
         if self.purge:
         if self.purge:
             self.purge_messages()
             self.purge_messages()
 
 
+        if not self.quiet:
+            self.emit_banner()
+
+        self.set_process_status('-active-')
+        self.install_platform_tweaks(self)
+        if not self._custom_logging and self.redirect_stdouts:
+            app.log.redirect_stdouts(self.redirect_stdouts_level)
+
+    def emit_banner(self):
         # Dump configuration to screen so we have some basic information
         # Dump configuration to screen so we have some basic information
         # for when users sends bug reports.
         # for when users sends bug reports.
+        use_image = term.supports_images()
+        if use_image:
+            print(term.imgcat(static.logo()))
         print(safe_str(''.join([
         print(safe_str(''.join([
-            str(self.colored.cyan(' \n', self.startup_info())),
+            str(self.colored.cyan(
+                ' \n', self.startup_info(artlines=not use_image))),
             str(self.colored.reset(self.extra_info() or '')),
             str(self.colored.reset(self.extra_info() or '')),
         ])), file=sys.__stdout__)
         ])), file=sys.__stdout__)
-        self.set_process_status('-active-')
-        self.install_platform_tweaks(self)
 
 
     def on_consumer_ready(self, consumer):
     def on_consumer_ready(self, consumer):
         signals.worker_ready.send(sender=consumer)
         signals.worker_ready.send(sender=consumer)
-        print('{0} ready.'.format(safe_str(self.hostname),))
+        logger.info('%s ready.', safe_str(self.hostname))
 
 
     def setup_logging(self, colorize=None):
     def setup_logging(self, colorize=None):
         if colorize is None and self.no_color is not None:
         if colorize is None and self.no_color is not None:
@@ -170,7 +187,7 @@ class Worker(WorkController):
             tasklist = self.tasklist(include_builtins=include_builtins)
             tasklist = self.tasklist(include_builtins=include_builtins)
             return EXTRA_INFO_FMT.format(tasks=tasklist)
             return EXTRA_INFO_FMT.format(tasks=tasklist)
 
 
-    def startup_info(self):
+    def startup_info(self, artlines=True):
         app = self.app
         app = self.app
         concurrency = str(self.concurrency)
         concurrency = str(self.concurrency)
         appr = '{0}:{1:#x}'.format(app.main or '__main__', id(app))
         appr = '{0}:{1:#x}'.format(app.main or '__main__', id(app))
@@ -179,13 +196,16 @@ class Worker(WorkController):
             if loader.startswith('celery.loaders'):  # pragma: no cover
             if loader.startswith('celery.loaders'):  # pragma: no cover
                 loader = loader[14:]
                 loader = loader[14:]
             appr += ' ({0})'.format(loader)
             appr += ' ({0})'.format(loader)
+        if self.autoscale:
+            max, min = self.autoscale
+            concurrency = '{{min={0}, max={1}}}'.format(min, max)
         pool = self.pool_cls
         pool = self.pool_cls
         if not isinstance(pool, str):
         if not isinstance(pool, str):
             pool = pool.__module__
             pool = pool.__module__
         concurrency += ' ({0})'.format(pool.split('.')[-1])
         concurrency += ' ({0})'.format(pool.split('.')[-1])
         events = 'ON'
         events = 'ON'
-        if not self.send_events:
-            events = 'OFF (enable -E to monitor this worker)'
+        if not self.task_events:
+            events = 'OFF (enable -E to monitor tasks in this worker)'
 
 
         banner = BANNER.format(
         banner = BANNER.format(
             app=appr,
             app=appr,
@@ -201,11 +221,12 @@ class Worker(WorkController):
         ).splitlines()
         ).splitlines()
 
 
         # integrate the ASCII art.
         # integrate the ASCII art.
-        for i, x in enumerate(banner):
-            try:
-                banner[i] = ' '.join([ARTLINES[i], banner[i]])
-            except IndexError:
-                banner[i] = ' ' * 16 + banner[i]
+        if artlines:
+            for i, _ in enumerate(banner):
+                try:
+                    banner[i] = ' '.join([ARTLINES[i], banner[i]])
+                except IndexError:
+                    banner[i] = ' ' * 16 + banner[i]
         return '\n'.join(banner) + '\n'
         return '\n'.join(banner) + '\n'
 
 
     def install_platform_tweaks(self, worker):
     def install_platform_tweaks(self, worker):
@@ -231,7 +252,7 @@ class Worker(WorkController):
         install_rdb_handler()
         install_rdb_handler()
 
 
     def macOS_proxy_detection_workaround(self):
     def macOS_proxy_detection_workaround(self):
-        """See https://github.com/celery/celery/issues#issue/161"""
+        """See https://github.com/celery/celery/issues#issue/161."""
         os.environ.setdefault('celery_dummy_proxy', 'set_by_celeryd')
         os.environ.setdefault('celery_dummy_proxy', 'set_by_celeryd')
 
 
     def set_process_status(self, info):
     def set_process_status(self, info):

+ 0 - 59
celery/backends/__init__.py

@@ -1,59 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Backend abstract factory (...did I just say that?) and alias definitions."""
-import sys
-import types
-
-from celery.exceptions import ImproperlyConfigured
-from celery._state import current_app
-from celery.utils.imports import symbol_by_name
-
-__all__ = ['get_backend_cls', 'get_backend_by_url']
-
-UNKNOWN_BACKEND = """\
-Unknown result backend: {0!r}.  Did you spell that correctly? ({1!r})\
-"""
-
-BACKEND_ALIASES = {
-    'rpc': 'celery.backends.rpc.RPCBackend',
-    'cache': 'celery.backends.cache:CacheBackend',
-    'redis': 'celery.backends.redis:RedisBackend',
-    'mongodb': 'celery.backends.mongodb:MongoBackend',
-    'db': 'celery.backends.database:DatabaseBackend',
-    'database': 'celery.backends.database:DatabaseBackend',
-    'elasticsearch': 'celery.backends.elasticsearch:ElasticsearchBackend',
-    'cassandra': 'celery.backends.cassandra:CassandraBackend',
-    'couchbase': 'celery.backends.couchbase:CouchbaseBackend',
-    'couchdb': 'celery.backends.couchdb:CouchBackend',
-    'riak': 'celery.backends.riak:RiakBackend',
-    'file': 'celery.backends.filesystem:FilesystemBackend',
-    'disabled': 'celery.backends.base:DisabledBackend',
-    'consul': 'celery.backends.consul:ConsulBackend'
-}
-
-
-def get_backend_cls(backend=None, loader=None):
-    """Get backend class by name/alias"""
-    backend = backend or 'disabled'
-    loader = loader or current_app.loader
-    aliases = dict(BACKEND_ALIASES, **loader.override_backends)
-    try:
-        cls = symbol_by_name(backend, aliases)
-    except ValueError as exc:
-        raise ImproperlyConfigured(UNKNOWN_BACKEND.format(
-            backend, exc)).with_traceback(sys.exc_info()[2])
-    if isinstance(cls, types.ModuleType):
-        raise ImproperlyConfigured(UNKNOWN_BACKEND.format(
-            backend, 'is a Python module, not a backend class.'))
-    return cls
-
-
-def get_backend_by_url(backend=None, loader=None):
-    url = None
-    if backend and '://' in backend:
-        url = backend
-        scheme, _, _ = url.partition('://')
-        if '+' in scheme:
-            backend, url = url.split('+', 1)
-        else:
-            backend = scheme
-    return get_backend_cls(backend, loader), url

+ 49 - 17
celery/backends/async.py

@@ -1,22 +1,29 @@
 """Async I/O backend support utilities."""
 """Async I/O backend support utilities."""
 import socket
 import socket
+import threading
 
 
 from collections import deque
 from collections import deque
 from time import monotonic, sleep
 from time import monotonic, sleep
 from weakref import WeakKeyDictionary
 from weakref import WeakKeyDictionary
 from queue import Empty
 from queue import Empty
 
 
-from kombu.syn import detect_environment
+from kombu.utils.compat import detect_environment
 from kombu.utils.objects import cached_property
 from kombu.utils.objects import cached_property
 
 
 from celery import states
 from celery import states
 from celery.exceptions import TimeoutError
 from celery.exceptions import TimeoutError
+from celery.utils.threads import THREAD_TIMEOUT_MAX
+
+__all__ = [
+    'AsyncBackendMixin', 'BaseResultConsumer', 'Drainer',
+    'register_drainer',
+]
 
 
 drainers = {}
 drainers = {}
 
 
 
 
 def register_drainer(name):
 def register_drainer(name):
-
+    """Decorator used to register a new result drainer type."""
     def _inner(cls):
     def _inner(cls):
         drainers[name] = cls
         drainers[name] = cls
         return cls
         return cls
@@ -25,12 +32,18 @@ def register_drainer(name):
 
 
 @register_drainer('default')
 @register_drainer('default')
 class Drainer:
 class Drainer:
+    """Result draining service."""
 
 
     def __init__(self, result_consumer):
     def __init__(self, result_consumer):
         self.result_consumer = result_consumer
         self.result_consumer = result_consumer
 
 
-    def drain_events_until(self, p, timeout=None, on_interval=None,
-                           monotonic=monotonic, wait=None):
+    def start(self):
+        pass
+
+    def stop(self):
+        pass
+
+    def drain_events_until(self, p, timeout=None, on_interval=None, wait=None):
         wait = wait or self.result_consumer.drain_events
         wait = wait or self.result_consumer.drain_events
         time_start = monotonic()
         time_start = monotonic()
 
 
@@ -54,25 +67,33 @@ class Drainer:
 class greenletDrainer(Drainer):
 class greenletDrainer(Drainer):
     spawn = None
     spawn = None
     _g = None
     _g = None
-    _stopped = False
+
+    def __init__(self, *args, **kwargs):
+        super(greenletDrainer, self).__init__(*args, **kwargs)
+        self._started = threading.Event()
+        self._stopped = threading.Event()
+        self._shutdown = threading.Event()
 
 
     def run(self):
     def run(self):
-        while not self._stopped:
+        self._started.set()
+        while not self._stopped.is_set():
             try:
             try:
                 self.result_consumer.drain_events(timeout=1)
                 self.result_consumer.drain_events(timeout=1)
             except socket.timeout:
             except socket.timeout:
                 pass
                 pass
+        self._shutdown.set()
 
 
     def start(self):
     def start(self):
-        if self._g is None:
+        if not self._started.is_set():
             self._g = self.spawn(self.run)
             self._g = self.spawn(self.run)
+            self._started.wait()
 
 
     def stop(self):
     def stop(self):
-        self._stopped = True
+        self._stopped.set()
+        self._shutdown.wait(THREAD_TIMEOUT_MAX)
 
 
     def wait_for(self, p, wait, timeout=None):
     def wait_for(self, p, wait, timeout=None):
-        if self._g is None:
-            self.start()
+        self.start()
         if not p.ready:
         if not p.ready:
             sleep(0)
             sleep(0)
 
 
@@ -96,6 +117,7 @@ class geventDrainer(greenletDrainer):
 
 
 
 
 class AsyncBackendMixin:
 class AsyncBackendMixin:
+    """Mixin for backends that enables the async API."""
 
 
     def _collect_into(self, result, bucket):
     def _collect_into(self, result, bucket):
         self.result_consumer.buckets[result] = bucket
         self.result_consumer.buckets[result] = bucket
@@ -107,6 +129,8 @@ class AsyncBackendMixin:
         if not results:
         if not results:
             raise StopIteration()
             raise StopIteration()
 
 
+        # we tell the result consumer to put consumed results
+        # into these buckets.
         bucket = deque()
         bucket = deque()
         for node in results:
         for node in results:
             if node._cache:
             if node._cache:
@@ -122,7 +146,9 @@ class AsyncBackendMixin:
             node = bucket.popleft()
             node = bucket.popleft()
             yield node.id, node._cache
             yield node.id, node._cache
 
 
-    def add_pending_result(self, result, weak=False):
+    def add_pending_result(self, result, weak=False, start_drainer=True):
+        if start_drainer:
+            self.result_consumer.drainer.start()
         try:
         try:
             self._maybe_resolve_from_buffer(result)
             self._maybe_resolve_from_buffer(result)
         except Empty:
         except Empty:
@@ -133,13 +159,14 @@ class AsyncBackendMixin:
         result._maybe_set_cache(self._pending_messages.take(result.id))
         result._maybe_set_cache(self._pending_messages.take(result.id))
 
 
     def _add_pending_result(self, task_id, result, weak=False):
     def _add_pending_result(self, task_id, result, weak=False):
-        weak, concrete = self._pending_results
-        if task_id not in weak and result.id not in concrete:
-            (weak if weak else concrete)[task_id] = result
+        concrete, weak_ = self._pending_results
+        if task_id not in weak_ and result.id not in concrete:
+            (weak_ if weak else concrete)[task_id] = result
             self.result_consumer.consume_from(task_id)
             self.result_consumer.consume_from(task_id)
 
 
     def add_pending_results(self, results, weak=False):
     def add_pending_results(self, results, weak=False):
-        return [self.add_pending_result(result, weak=weak)
+        self.result_consumer.drainer.start()
+        return [self.add_pending_result(result, weak=weak, start_drainer=False)
                 for result in results]
                 for result in results]
 
 
     def remove_pending_result(self, result):
     def remove_pending_result(self, result):
@@ -175,6 +202,7 @@ class AsyncBackendMixin:
 
 
 
 
 class BaseResultConsumer:
 class BaseResultConsumer:
+    """Manager responsible for consuming result messages."""
 
 
     def __init__(self, backend, app, accept,
     def __init__(self, backend, app, accept,
                  pending_results, pending_messages):
                  pending_results, pending_messages):
@@ -187,7 +215,7 @@ class BaseResultConsumer:
         self.buckets = WeakKeyDictionary()
         self.buckets = WeakKeyDictionary()
         self.drainer = drainers[detect_environment()](self)
         self.drainer = drainers[detect_environment()](self)
 
 
-    def start(self):
+    def start(self, initial_task_id, **kwargs):
         raise NotImplementedError()
         raise NotImplementedError()
 
 
     def stop(self):
     def stop(self):
@@ -260,7 +288,11 @@ class BaseResultConsumer:
                 result._maybe_set_cache(meta)
                 result._maybe_set_cache(meta)
                 buckets = self.buckets
                 buckets = self.buckets
                 try:
                 try:
-                    buckets.pop(result)
+                    # remove bucket for this result, since it's fulfilled
+                    bucket = buckets.pop(result)
                 except KeyError:
                 except KeyError:
                     pass
                     pass
+                else:
+                    # send to waiter via bucket
+                    bucket.append(result)
         sleep(0)
         sleep(0)

+ 72 - 34
celery/backends/base.py

@@ -24,7 +24,9 @@ from kombu.utils.url import maybe_sanitize_url
 from celery import states
 from celery import states
 from celery import current_app, group, maybe_signature
 from celery import current_app, group, maybe_signature
 from celery.app import current_task
 from celery.app import current_task
-from celery.exceptions import ChordError, TimeoutError, TaskRevokedError
+from celery.exceptions import (
+    ChordError, TimeoutError, TaskRevokedError, ImproperlyConfigured,
+)
 from celery.result import (
 from celery.result import (
     GroupResult, ResultBase, allow_join_result, result_from_tuple,
     GroupResult, ResultBase, allow_join_result, result_from_tuple,
 )
 )
@@ -49,6 +51,20 @@ pending_results_t = namedtuple('pending_results_t', (
     'concrete', 'weak',
     'concrete', 'weak',
 ))
 ))
 
 
+E_NO_BACKEND = """
+No result backend is configured.
+Please see the documentation for more information.
+"""
+
+E_CHORD_NO_BACKEND = """
+Starting chords requires a result backend to be configured.
+
+Note that a group chained with a task is also upgraded to be a chord,
+as this pattern requires synchronization.
+
+Result backends that supports chords: Redis, Database, Memcached, and more.
+"""
+
 
 
 def unpickle_backend(cls, args, kwargs):
 def unpickle_backend(cls, args, kwargs):
     """Return an unpickled backend."""
     """Return an unpickled backend."""
@@ -79,7 +95,7 @@ class Backend:
     supports_native_join = False
     supports_native_join = False
 
 
     #: If true the backend must automatically expire results.
     #: If true the backend must automatically expire results.
-    #: The daily backend_cleanup periodic task will not be triggered
+    #: The daily backend_cleanup periodic task won't be triggered
     #: in this case.
     #: in this case.
     supports_autoexpire = False
     supports_autoexpire = False
 
 
@@ -113,7 +129,7 @@ class Backend:
         self.url = url
         self.url = url
 
 
     def as_uri(self, include_password=False):
     def as_uri(self, include_password=False):
-        """Return the backend as an URI, sanitizing the password or not"""
+        """Return the backend as an URI, sanitizing the password or not."""
         # when using maybe_sanitize_url(), "/" is added
         # when using maybe_sanitize_url(), "/" is added
         # we're stripping it for consistency
         # we're stripping it for consistency
         if include_password:
         if include_password:
@@ -122,7 +138,7 @@ class Backend:
         return url[:-1] if url.endswith(':///') else url
         return url[:-1] if url.endswith(':///') else url
 
 
     def mark_as_started(self, task_id, **meta):
     def mark_as_started(self, task_id, **meta):
-        """Mark a task as started"""
+        """Mark a task as started."""
         return self.store_result(task_id, meta, states.STARTED)
         return self.store_result(task_id, meta, states.STARTED)
 
 
     def mark_as_done(self, task_id, result,
     def mark_as_done(self, task_id, result,
@@ -137,7 +153,7 @@ class Backend:
                         traceback=None, request=None,
                         traceback=None, request=None,
                         store_result=True, call_errbacks=True,
                         store_result=True, call_errbacks=True,
                         state=states.FAILURE):
                         state=states.FAILURE):
-        """Mark task as executed with failure. Stores the exception."""
+        """Mark task as executed with failure."""
         if store_result:
         if store_result:
             self.store_result(task_id, exc, state,
             self.store_result(task_id, exc, state,
                               traceback=traceback, request=request)
                               traceback=traceback, request=request)
@@ -175,22 +191,29 @@ class Backend:
 
 
     def mark_as_retry(self, task_id, exc, traceback=None,
     def mark_as_retry(self, task_id, exc, traceback=None,
                       request=None, store_result=True, state=states.RETRY):
                       request=None, store_result=True, state=states.RETRY):
-        """Mark task as being retries. Stores the current
-        exception (if any)."""
+        """Mark task as being retries.
+
+        Note:
+            Stores the current exception (if any).
+        """
         return self.store_result(task_id, exc, state,
         return self.store_result(task_id, exc, state,
                                  traceback=traceback, request=request)
                                  traceback=traceback, request=request)
 
 
     def chord_error_from_stack(self, callback, exc=None):
     def chord_error_from_stack(self, callback, exc=None):
-        from celery import group
+        # need below import for test for some crazy reason
+        from celery import group  # pylint: disable
         app = self.app
         app = self.app
-        backend = app._tasks[callback.task].backend
+        try:
+            backend = app._tasks[callback.task].backend
+        except KeyError:
+            backend = self
         try:
         try:
             group(
             group(
                 [app.signature(errback)
                 [app.signature(errback)
                  for errback in callback.options.get('link_error') or []],
                  for errback in callback.options.get('link_error') or []],
                 app=app,
                 app=app,
             ).apply_async((callback.id,))
             ).apply_async((callback.id,))
-        except Exception as eb_exc:
+        except Exception as eb_exc:  # pylint: disable=broad-except
             return backend.fail_from_current_stack(callback.id, exc=eb_exc)
             return backend.fail_from_current_stack(callback.id, exc=eb_exc)
         else:
         else:
             return backend.fail_from_current_stack(callback.id, exc=exc)
             return backend.fail_from_current_stack(callback.id, exc=exc)
@@ -203,7 +226,7 @@ class Backend:
             self.mark_as_failure(task_id, exc, ei.traceback)
             self.mark_as_failure(task_id, exc, ei.traceback)
             return ei
             return ei
         finally:
         finally:
-            del(tb)
+            del tb
 
 
     def prepare_exception(self, exc, serializer=None):
     def prepare_exception(self, exc, serializer=None):
         """Prepare exception for serialization."""
         """Prepare exception for serialization."""
@@ -229,9 +252,12 @@ class Backend:
         return result
         return result
 
 
     def encode(self, data):
     def encode(self, data):
-        _, _, payload = dumps(data, serializer=self.serializer)
+        _, _, payload = self._encode(data)
         return payload
         return payload
 
 
+    def _encode(self, data):
+        return dumps(data, serializer=self.serializer)
+
     def meta_from_decoded(self, meta):
     def meta_from_decoded(self, meta):
         if meta['status'] in self.EXCEPTION_STATES:
         if meta['status'] in self.EXCEPTION_STATES:
             meta['result'] = self.exception_to_python(meta['result'])
             meta['result'] = self.exception_to_python(meta['result'])
@@ -358,8 +384,11 @@ class Backend:
         return self._delete_group(group_id)
         return self._delete_group(group_id)
 
 
     def cleanup(self):
     def cleanup(self):
-        """Backend cleanup. Is run by
-        :class:`celery.task.DeleteExpiredTaskMetaTask`."""
+        """Backend cleanup.
+
+        Note:
+            This is run by :class:`celery.task.DeleteExpiredTaskMetaTask`.
+        """
         pass
         pass
 
 
     def process_cleanup(self):
     def process_cleanup(self):
@@ -413,9 +442,13 @@ class SyncBackendMixin:
         )
         )
 
 
     def wait_for_pending(self, result, timeout=None, interval=0.5,
     def wait_for_pending(self, result, timeout=None, interval=0.5,
-                         no_ack=True, on_interval=None, callback=None,
-                         propagate=True):
+                         no_ack=True, on_message=None, on_interval=None,
+                         callback=None, propagate=True):
         self._ensure_not_eager()
         self._ensure_not_eager()
+        if on_message is not None:
+            raise ImproperlyConfigured(
+                'Backend does not support on_message callback')
+
         meta = self.wait_for(
         meta = self.wait_for(
             result.id, timeout=timeout,
             result.id, timeout=timeout,
             interval=interval,
             interval=interval,
@@ -466,7 +499,8 @@ class SyncBackendMixin:
 
 
 
 
 class BaseBackend(Backend, SyncBackendMixin):
 class BaseBackend(Backend, SyncBackendMixin):
-    pass
+    """Base (synchronous) result backend."""
+BaseDictBackend = BaseBackend  # XXX compat
 
 
 
 
 class BaseKeyValueStoreBackend(Backend):
 class BaseKeyValueStoreBackend(Backend):
@@ -529,7 +563,7 @@ class BaseKeyValueStoreBackend(Backend):
         ])
         ])
 
 
     def _strip_prefix(self, key):
     def _strip_prefix(self, key):
-        """Takes bytes, emits string."""
+        """Take bytes: emit string."""
         key = self.key_t(key)
         key = self.key_t(key)
         for prefix in self.task_keyprefix, self.group_keyprefix:
         for prefix in self.task_keyprefix, self.group_keyprefix:
             if key.startswith(prefix):
             if key.startswith(prefix):
@@ -600,9 +634,11 @@ class BaseKeyValueStoreBackend(Backend):
 
 
     def _store_result(self, task_id, result, state,
     def _store_result(self, task_id, result, state,
                       traceback=None, request=None, **kwargs):
                       traceback=None, request=None, **kwargs):
-        meta = {'status': state, 'result': result, 'traceback': traceback,
-                'children': self.current_task_children(request),
-                'task_id': bytes_to_str(task_id)}
+        meta = {
+            'status': state, 'result': result, 'traceback': traceback,
+            'children': self.current_task_children(request),
+            'task_id': bytes_to_str(task_id),
+        }
         self.set(self.get_key_for_task(task_id), self.encode(meta))
         self.set(self.get_key_for_task(task_id), self.encode(meta))
         return result
         return result
 
 
@@ -651,9 +687,9 @@ class BaseKeyValueStoreBackend(Backend):
         key = self.get_key_for_chord(gid)
         key = self.get_key_for_chord(gid)
         try:
         try:
             deps = GroupResult.restore(gid, backend=self)
             deps = GroupResult.restore(gid, backend=self)
-        except Exception as exc:
+        except Exception as exc:  # pylint: disable=broad-except
             callback = maybe_signature(request.chord, app=app)
             callback = maybe_signature(request.chord, app=app)
-            logger.error('Chord %r raised: %r', gid, exc, exc_info=1)
+            logger.exception('Chord %r raised: %r', gid, exc)
             return self.chord_error_from_stack(
             return self.chord_error_from_stack(
                 callback,
                 callback,
                 ChordError('Cannot restore group: {0!r}'.format(exc)),
                 ChordError('Cannot restore group: {0!r}'.format(exc)),
@@ -663,8 +699,7 @@ class BaseKeyValueStoreBackend(Backend):
                 raise ValueError(gid)
                 raise ValueError(gid)
             except ValueError as exc:
             except ValueError as exc:
                 callback = maybe_signature(request.chord, app=app)
                 callback = maybe_signature(request.chord, app=app)
-                logger.error('Chord callback %r raised: %r', gid, exc,
-                             exc_info=1)
+                logger.exception('Chord callback %r raised: %r', gid, exc)
                 return self.chord_error_from_stack(
                 return self.chord_error_from_stack(
                     callback,
                     callback,
                     ChordError('GroupResult {0} no longer exists'.format(gid)),
                     ChordError('GroupResult {0} no longer exists'.format(gid)),
@@ -680,7 +715,7 @@ class BaseKeyValueStoreBackend(Backend):
             try:
             try:
                 with allow_join_result():
                 with allow_join_result():
                     ret = j(timeout=3.0, propagate=True)
                     ret = j(timeout=3.0, propagate=True)
-            except Exception as exc:
+            except Exception as exc:  # pylint: disable=broad-except
                 try:
                 try:
                     culprit = next(deps._failed_join_report())
                     culprit = next(deps._failed_join_report())
                     reason = 'Dependency {0.id} raised {1!r}'.format(
                     reason = 'Dependency {0.id} raised {1!r}'.format(
@@ -689,13 +724,13 @@ class BaseKeyValueStoreBackend(Backend):
                 except StopIteration:
                 except StopIteration:
                     reason = repr(exc)
                     reason = repr(exc)
 
 
-                logger.error('Chord %r raised: %r', gid, reason, exc_info=1)
+                logger.exception('Chord %r raised: %r', gid, reason)
                 self.chord_error_from_stack(callback, ChordError(reason))
                 self.chord_error_from_stack(callback, ChordError(reason))
             else:
             else:
                 try:
                 try:
                     callback.delay(ret)
                     callback.delay(ret)
-                except Exception as exc:
-                    logger.error('Chord %r raised: %r', gid, exc, exc_info=1)
+                except Exception as exc:  # pylint: disable=broad-except
+                    logger.exception('Chord %r raised: %r', gid, exc)
                     self.chord_error_from_stack(
                     self.chord_error_from_stack(
                         callback,
                         callback,
                         ChordError('Callback error: {0!r}'.format(exc)),
                         ChordError('Callback error: {0!r}'.format(exc)),
@@ -708,22 +743,25 @@ class BaseKeyValueStoreBackend(Backend):
 
 
 
 
 class KeyValueStoreBackend(BaseKeyValueStoreBackend, SyncBackendMixin):
 class KeyValueStoreBackend(BaseKeyValueStoreBackend, SyncBackendMixin):
-    pass
+    """Result backend base class for key/value stores."""
 
 
 
 
 class DisabledBackend(BaseBackend):
 class DisabledBackend(BaseBackend):
+    """Dummy result backend."""
+
     _cache = {}   # need this attribute to reset cache in tests.
     _cache = {}   # need this attribute to reset cache in tests.
 
 
     def store_result(self, *args, **kwargs):
     def store_result(self, *args, **kwargs):
         pass
         pass
 
 
+    def apply_chord(self, *args, **kwargs):
+        raise NotImplementedError(E_CHORD_NO_BACKEND.strip())
+
     def _is_disabled(self, *args, **kwargs):
     def _is_disabled(self, *args, **kwargs):
-        raise NotImplementedError(
-            'No result backend configured.  '
-            'Please see the documentation for more information.')
+        raise NotImplementedError(E_NO_BACKEND.strip())
 
 
     def as_uri(self, *args, **kwargs):
     def as_uri(self, *args, **kwargs):
         return 'disabled://'
         return 'disabled://'
 
 
     get_state = get_result = get_traceback = _is_disabled
     get_state = get_result = get_traceback = _is_disabled
-    wait_for = get_many = _is_disabled
+    get_task_meta_for = wait_for = get_many = _is_disabled

+ 3 - 0
celery/backends/cache.py

@@ -38,6 +38,8 @@ def import_best_memcache():
 
 
 
 
 def get_best_memcache(*args, **kwargs):
 def get_best_memcache(*args, **kwargs):
+    # pylint: disable=unpacking-non-sequence
+    #   This is most definitely a sequence, but pylint thinks it's not.
     is_pylibmc, memcache, key_t = import_best_memcache()
     is_pylibmc, memcache, key_t = import_best_memcache()
     Client = _Client = memcache.Client
     Client = _Client = memcache.Client
 
 
@@ -80,6 +82,7 @@ backends = {
 
 
 
 
 class CacheBackend(KeyValueStoreBackend):
 class CacheBackend(KeyValueStoreBackend):
+    """Cache result backend."""
 
 
     servers = None
     servers = None
     supports_autoexpire = True
     supports_autoexpire = True

+ 6 - 6
celery/backends/cassandra.py

@@ -19,7 +19,7 @@ logger = get_logger(__name__)
 
 
 E_NO_CASSANDRA = """
 E_NO_CASSANDRA = """
 You need to install the cassandra-driver library to
 You need to install the cassandra-driver library to
-use the Cassandra backend. See https://github.com/datastax/python-driver
+use the Cassandra backend.  See https://github.com/datastax/python-driver
 """
 """
 
 
 E_NO_SUCH_CASSANDRA_AUTH_PROVIDER = """
 E_NO_SUCH_CASSANDRA_AUTH_PROVIDER = """
@@ -62,7 +62,7 @@ def buf_t(x):
 
 
 
 
 class CassandraBackend(BaseBackend):
 class CassandraBackend(BaseBackend):
-    """Cassandra backend utilizing DataStax driver
+    """Cassandra backend utilizing DataStax driver.
 
 
     Raises:
     Raises:
         celery.exceptions.ImproperlyConfigured:
         celery.exceptions.ImproperlyConfigured:
@@ -128,7 +128,7 @@ class CassandraBackend(BaseBackend):
         self._session = None
         self._session = None
 
 
     def _get_connection(self, write=False):
     def _get_connection(self, write=False):
-        """Prepare the connection for action
+        """Prepare the connection for action.
 
 
         Arguments:
         Arguments:
             write (bool): are we a writer?
             write (bool): are we a writer?
@@ -141,8 +141,8 @@ class CassandraBackend(BaseBackend):
                 auth_provider=self.auth_provider)
                 auth_provider=self.auth_provider)
             self._session = self._connection.connect(self.keyspace)
             self._session = self._connection.connect(self.keyspace)
 
 
-            # We are forced to do concatenation below, as formatting would
-            # blow up on superficial %s that will be processed by Cassandra
+            # We're forced to do concatenation below, as formatting would
+            # blow up on superficial %s that'll be processed by Cassandra
             self._write_stmt = cassandra.query.SimpleStatement(
             self._write_stmt = cassandra.query.SimpleStatement(
                 Q_INSERT_RESULT.format(
                 Q_INSERT_RESULT.format(
                     table=self.table, expires=self.cqlexpires),
                     table=self.table, expires=self.cqlexpires),
@@ -156,7 +156,7 @@ class CassandraBackend(BaseBackend):
 
 
             if write:
             if write:
                 # Only possible writers "workers" are allowed to issue
                 # Only possible writers "workers" are allowed to issue
-                # CREATE TABLE. This is to prevent conflicting situations
+                # CREATE TABLE.  This is to prevent conflicting situations
                 # where both task-creator and task-executor would issue it
                 # where both task-creator and task-executor would issue it
                 # at the same time.
                 # at the same time.
 
 

+ 2 - 1
celery/backends/consul.py

@@ -26,6 +26,7 @@ the Consul result store backend."""
 
 
 class ConsulBackend(KeyValueStoreBackend):
 class ConsulBackend(KeyValueStoreBackend):
     """Consul.io K/V store backend for Celery."""
     """Consul.io K/V store backend for Celery."""
+
     consul = consul
     consul = consul
 
 
     supports_autoexpire = True
     supports_autoexpire = True
@@ -67,7 +68,7 @@ class ConsulBackend(KeyValueStoreBackend):
             yield self.get(key)
             yield self.get(key)
 
 
     def set(self, key, value):
     def set(self, key, value):
-        """Set a key in Consul
+        """Set a key in Consul.
 
 
         Before creating the key it will create a session inside Consul
         Before creating the key it will create a session inside Consul
         where it creates a session with a TTL
         where it creates a session with a TTL

+ 2 - 1
celery/backends/database/__init__.py

@@ -50,7 +50,7 @@ def retry(fun):
                 return fun(*args, **kwargs)
                 return fun(*args, **kwargs)
             except (DatabaseError, InvalidRequestError, StaleDataError):
             except (DatabaseError, InvalidRequestError, StaleDataError):
                 logger.warning(
                 logger.warning(
-                    'Failed operation %s. Retrying %s more times.',
+                    'Failed operation %s.  Retrying %s more times.',
                     fun.__name__, max_retries - retries - 1,
                     fun.__name__, max_retries - retries - 1,
                     exc_info=True)
                     exc_info=True)
                 if retries + 1 >= max_retries:
                 if retries + 1 >= max_retries:
@@ -61,6 +61,7 @@ def retry(fun):
 
 
 class DatabaseBackend(BaseBackend):
 class DatabaseBackend(BaseBackend):
     """The database result backend."""
     """The database result backend."""
+
     # ResultSet.iterate should sleep this much between each pool,
     # ResultSet.iterate should sleep this much between each pool,
     # to not bombard the database with queries.
     # to not bombard the database with queries.
     subpolling_interval = 0.5
     subpolling_interval = 0.5

+ 1 - 1
celery/backends/database/models.py

@@ -44,7 +44,7 @@ class Task(ResultModelBase):
 
 
 
 
 class TaskSet(ResultModelBase):
 class TaskSet(ResultModelBase):
-    """TaskSet result"""
+    """TaskSet result."""
 
 
     __tablename__ = 'celery_tasksetmeta'
     __tablename__ = 'celery_tasksetmeta'
     __table_args__ = {'sqlite_autoincrement': True}
     __table_args__ = {'sqlite_autoincrement': True}

+ 1 - 0
celery/backends/database/session.py

@@ -17,6 +17,7 @@ def _after_fork_cleanup_session(session):
 
 
 
 
 class SessionManager:
 class SessionManager:
+    """Manage SQLAlchemy sessions."""
 
 
     def __init__(self):
     def __init__(self):
         self._engines = {}
         self._engines = {}

+ 1 - 1
celery/backends/filesystem.py

@@ -48,7 +48,7 @@ class FilesystemBackend(KeyValueStoreBackend):
         self.open = open
         self.open = open
         self.unlink = unlink
         self.unlink = unlink
 
 
-        # Lets verify that we have everything setup right
+        # Lets verify that we've everything setup right
         self._do_directory_test(b'.fs-backend-' + uuid().encode(encoding))
         self._do_directory_test(b'.fs-backend-' + uuid().encode(encoding))
 
 
     def _find_path(self, url):
     def _find_path(self, url):

+ 8 - 6
celery/backends/mongodb.py

@@ -93,7 +93,7 @@ class MongoBackend(BaseBackend):
             if not isinstance(config, dict):
             if not isinstance(config, dict):
                 raise ImproperlyConfigured(
                 raise ImproperlyConfigured(
                     'MongoDB backend settings should be grouped in a dict')
                     'MongoDB backend settings should be grouped in a dict')
-            config = dict(config)  # do not modify original
+            config = dict(config)  # don't modify original
 
 
             if 'host' in config or 'port' in config:
             if 'host' in config or 'port' in config:
                 # these should take over uri conf
                 # these should take over uri conf
@@ -131,7 +131,7 @@ class MongoBackend(BaseBackend):
             if not host:
             if not host:
                 # The first pymongo.Connection() argument (host) can be
                 # The first pymongo.Connection() argument (host) can be
                 # a list of ['host:port'] elements or a mongodb connection
                 # a list of ['host:port'] elements or a mongodb connection
-                # URI. If this is the case, don't use self.port
+                # URI.  If this is the case, don't use self.port
                 # but let pymongo get the port(s) from the URI instead.
                 # but let pymongo get the port(s) from the URI instead.
                 # This enables the use of replica sets and sharding.
                 # This enables the use of replica sets and sharding.
                 # See pymongo.Connection() for more info.
                 # See pymongo.Connection() for more info.
@@ -255,8 +255,10 @@ class MongoBackend(BaseBackend):
 
 
     @cached_property
     @cached_property
     def database(self):
     def database(self):
-        """Get database from MongoDB connection and perform authentication
-        if necessary."""
+        """Get database from MongoDB connection.
+
+        performs authentication if necessary.
+        """
         return self._get_database()
         return self._get_database()
 
 
     @cached_property
     @cached_property
@@ -265,7 +267,7 @@ class MongoBackend(BaseBackend):
         collection = self.database[self.taskmeta_collection]
         collection = self.database[self.taskmeta_collection]
 
 
         # Ensure an index on date_done is there, if not process the index
         # Ensure an index on date_done is there, if not process the index
-        # in the background. Once completed cleanup will be much faster
+        # in the background.  Once completed cleanup will be much faster
         collection.ensure_index('date_done', background='true')
         collection.ensure_index('date_done', background='true')
         return collection
         return collection
 
 
@@ -275,7 +277,7 @@ class MongoBackend(BaseBackend):
         collection = self.database[self.groupmeta_collection]
         collection = self.database[self.groupmeta_collection]
 
 
         # Ensure an index on date_done is there, if not process the index
         # Ensure an index on date_done is there, if not process the index
-        # in the background. Once completed cleanup will be much faster
+        # in the background.  Once completed cleanup will be much faster
         collection.ensure_index('date_done', background='true')
         collection.ensure_index('date_done', background='true')
         return collection
         return collection
 
 

+ 24 - 22
celery/backends/redis.py

@@ -12,32 +12,28 @@ from celery.canvas import maybe_signature
 from celery.exceptions import ChordError, ImproperlyConfigured
 from celery.exceptions import ChordError, ImproperlyConfigured
 from celery.utils.functional import dictfilter
 from celery.utils.functional import dictfilter
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
-from celery.utils.timeutils import humanize_seconds
+from celery.utils.time import humanize_seconds
 
 
 from . import async
 from . import async
 from . import base
 from . import base
 
 
 try:
 try:
     import redis
     import redis
-    from redis.exceptions import ConnectionError
     from kombu.transport.redis import get_redis_error_classes
     from kombu.transport.redis import get_redis_error_classes
 except ImportError:                 # pragma: no cover
 except ImportError:                 # pragma: no cover
     redis = None                    # noqa
     redis = None                    # noqa
-    ConnectionError = None          # noqa
     get_redis_error_classes = None  # noqa
     get_redis_error_classes = None  # noqa
 
 
 __all__ = ['RedisBackend']
 __all__ = ['RedisBackend']
 
 
-REDIS_MISSING = """\
+E_REDIS_MISSING = """
 You need to install the redis library in order to use \
 You need to install the redis library in order to use \
-the Redis result store backend."""
-
-E_LOST = """\
-Connection to Redis lost: Retry (%s/%s) %s.\
+the Redis result store backend.
 """
 """
 
 
+E_LOST = 'Connection to Redis lost: Retry (%s/%s) %s.'
+
 logger = get_logger(__name__)
 logger = get_logger(__name__)
-error = logger.error
 
 
 
 
 class ResultConsumer(async.BaseResultConsumer):
 class ResultConsumer(async.BaseResultConsumer):
@@ -50,7 +46,7 @@ class ResultConsumer(async.BaseResultConsumer):
         self._decode_result = self.backend.decode_result
         self._decode_result = self.backend.decode_result
         self.subscribed_to = set()
         self.subscribed_to = set()
 
 
-    def start(self, initial_task_id):
+    def start(self, initial_task_id, **kwargs):
         self._pubsub = self.backend.client.pubsub(
         self._pubsub = self.backend.client.pubsub(
             ignore_subscribe_messages=True,
             ignore_subscribe_messages=True,
         )
         )
@@ -108,7 +104,7 @@ class RedisBackend(base.BaseKeyValueStoreBackend, async.AsyncBackendMixin):
         super().__init__(expires_type=int, **kwargs)
         super().__init__(expires_type=int, **kwargs)
         _get = self.app.conf.get
         _get = self.app.conf.get
         if self.redis is None:
         if self.redis is None:
-            raise ImproperlyConfigured(REDIS_MISSING)
+            raise ImproperlyConfigured(E_REDIS_MISSING.strip())
 
 
         if host and '://' in host:
         if host and '://' in host:
             url, host = host, None
             url, host = host, None
@@ -140,7 +136,7 @@ class RedisBackend(base.BaseKeyValueStoreBackend, async.AsyncBackendMixin):
         )
         )
 
 
     def _params_from_url(self, url, defaults):
     def _params_from_url(self, url, defaults):
-        scheme, host, port, user, password, path, query = _parse_url(url)
+        scheme, host, port, _, password, path, query = _parse_url(url)
         connparams = dict(
         connparams = dict(
             defaults, **dictfilter({
             defaults, **dictfilter({
                 'host': host, 'port': port, 'password': password,
                 'host': host, 'port': port, 'password': password,
@@ -189,8 +185,9 @@ class RedisBackend(base.BaseKeyValueStoreBackend, async.AsyncBackendMixin):
 
 
     def on_connection_error(self, max_retries, exc, intervals, retries):
     def on_connection_error(self, max_retries, exc, intervals, retries):
         tts = next(intervals)
         tts = next(intervals)
-        error(E_LOST, retries, max_retries or 'Inf',
-              humanize_seconds(tts, 'in '))
+        logger.error(
+            E_LOST.strip(),
+            retries, max_retries or 'Inf', humanize_seconds(tts, 'in '))
         return tts
         return tts
 
 
     def set(self, key, value, **retry_policy):
     def set(self, key, value, **retry_policy):
@@ -229,11 +226,16 @@ class RedisBackend(base.BaseKeyValueStoreBackend, async.AsyncBackendMixin):
 
 
     def apply_chord(self, header, partial_args, group_id, body,
     def apply_chord(self, header, partial_args, group_id, body,
                     result=None, options={}, **kwargs):
                     result=None, options={}, **kwargs):
-        # avoids saving the group in the redis db.
+        # Overrides this to avoid calling GroupResult.save
+        # pylint: disable=method-hidden
+        # Note that KeyValueStoreBackend.__init__ sets self.apply_chord
+        # if the implements_incr attr is set.  Redis backend doesn't set
+        # this flag.
         options['task_id'] = group_id
         options['task_id'] = group_id
         return header(*partial_args, **options or {})
         return header(*partial_args, **options or {})
 
 
-    def on_chord_part_return(self, request, state, result, propagate=None):
+    def on_chord_part_return(self, request, state, result,
+                             propagate=None, **kwargs):
         app = self.app
         app = self.app
         tid, gid = request.id, request.group
         tid, gid = request.id, request.group
         if not gid or not tid:
         if not gid or not tid:
@@ -267,18 +269,18 @@ class RedisBackend(base.BaseKeyValueStoreBackend, async.AsyncBackendMixin):
                         .execute()
                         .execute()
                 try:
                 try:
                     callback.delay([unpack(tup, decode) for tup in resl])
                     callback.delay([unpack(tup, decode) for tup in resl])
-                except Exception as exc:
-                    error('Chord callback for %r raised: %r',
-                          request.group, exc, exc_info=1)
+                except Exception as exc:  # pylint: disable=broad-except
+                    logger.exception(
+                        'Chord callback for %r raised: %r', request.group, exc)
                     return self.chord_error_from_stack(
                     return self.chord_error_from_stack(
                         callback,
                         callback,
                         ChordError('Callback error: {0!r}'.format(exc)),
                         ChordError('Callback error: {0!r}'.format(exc)),
                     )
                     )
         except ChordError as exc:
         except ChordError as exc:
-            error('Chord %r raised: %r', request.group, exc, exc_info=1)
+            logger.exception('Chord %r raised: %r', request.group, exc)
             return self.chord_error_from_stack(callback, exc)
             return self.chord_error_from_stack(callback, exc)
-        except Exception as exc:
-            error('Chord %r raised: %r', request.group, exc, exc_info=1)
+        except Exception as exc:  # pylint: disable=broad-except
+            logger.exception('Chord %r raised: %r', request.group, exc)
             return self.chord_error_from_stack(
             return self.chord_error_from_stack(
                 callback,
                 callback,
                 ChordError('Join error: {0!r}'.format(exc)),
                 ChordError('Join error: {0!r}'.format(exc)),

+ 5 - 3
celery/backends/riak.py

@@ -13,6 +13,8 @@ from celery.exceptions import ImproperlyConfigured
 
 
 from .base import KeyValueStoreBackend
 from .base import KeyValueStoreBackend
 
 
+__all__ = ['RiakBackend']
+
 E_BUCKET_NAME = """\
 E_BUCKET_NAME = """\
 Riak bucket names must be composed of ASCII characters only, not: {0!r}\
 Riak bucket names must be composed of ASCII characters only, not: {0!r}\
 """
 """
@@ -55,7 +57,7 @@ class RiakBackend(KeyValueStoreBackend):
     #: default Riak server port (8087)
     #: default Riak server port (8087)
     port = 8087
     port = 8087
 
 
-    # supports_autoexpire = False
+    _bucket = None
 
 
     def __init__(self, host=None, port=None, bucket_name=None, protocol=None,
     def __init__(self, host=None, port=None, bucket_name=None, protocol=None,
                  url=None, *args, **kwargs):
                  url=None, *args, **kwargs):
@@ -67,9 +69,9 @@ class RiakBackend(KeyValueStoreBackend):
                 'You need to install the riak library to use the '
                 'You need to install the riak library to use the '
                 'Riak backend.')
                 'Riak backend.')
 
 
-        uhost = uport = uname = upass = ubucket = None
+        uhost = uport = upass = ubucket = None
         if url:
         if url:
-            uprot, uhost, uport, uname, upass, ubucket, _ = _parse_url(url)
+            _, uhost, uport, _, upass, ubucket, _ = _parse_url(url)
             if ubucket:
             if ubucket:
                 ubucket = ubucket.strip('/')
                 ubucket = ubucket.strip('/')
 
 

+ 93 - 67
celery/backends/rpc.py

@@ -3,7 +3,9 @@
 
 
 RPC-style result backend, using reply-to and one queue per client.
 RPC-style result backend, using reply-to and one queue per client.
 """
 """
-from kombu import Consumer, Exchange, Producer, Queue
+import kombu
+import time
+
 from kombu.common import maybe_declare
 from kombu.common import maybe_declare
 from kombu.utils.compat import register_after_fork
 from kombu.utils.compat import register_after_fork
 from kombu.utils.objects import cached_property
 from kombu.utils.objects import cached_property
@@ -11,29 +13,23 @@ from kombu.utils.objects import cached_property
 from celery import current_task
 from celery import current_task
 from celery import states
 from celery import states
 from celery._state import task_join_will_block
 from celery._state import task_join_will_block
-from celery.utils.functional import dictfilter
-from celery.utils.timeutils import maybe_s_to_ms
 
 
 from . import base
 from . import base
 from .async import AsyncBackendMixin, BaseResultConsumer
 from .async import AsyncBackendMixin, BaseResultConsumer
 
 
-__all__ = ['BacklogLimitExceeded', 'BaseRPCBackend', 'RPCBackend']
+__all__ = ['BacklogLimitExceeded', 'RPCBackend']
 
 
 
 
 class BacklogLimitExceeded(Exception):
 class BacklogLimitExceeded(Exception):
     """Too much state history to fast-forward."""
     """Too much state history to fast-forward."""
 
 
 
 
-class NoCacheQueue(Queue):
-    can_cache_declaration = False
-
-
 def _on_after_fork_cleanup_backend(backend):
 def _on_after_fork_cleanup_backend(backend):
     backend._after_fork()
     backend._after_fork()
 
 
 
 
 class ResultConsumer(BaseResultConsumer):
 class ResultConsumer(BaseResultConsumer):
-    Consumer = Consumer
+    Consumer = kombu.Consumer
 
 
     _connection = None
     _connection = None
     _consumer = None
     _consumer = None
@@ -42,7 +38,7 @@ class ResultConsumer(BaseResultConsumer):
         super().__init__(*args, **kwargs)
         super().__init__(*args, **kwargs)
         self._create_binding = self.backend._create_binding
         self._create_binding = self.backend._create_binding
 
 
-    def start(self, initial_task_id, no_ack=True):
+    def start(self, initial_task_id, no_ack=True, **kwargs):
         self._connection = self.app.connection()
         self._connection = self.app.connection()
         initial_queue = self._create_binding(initial_task_id)
         initial_queue = self._create_binding(initial_task_id)
         self._consumer = self.Consumer(
         self._consumer = self.Consumer(
@@ -52,7 +48,10 @@ class ResultConsumer(BaseResultConsumer):
         self._consumer.consume()
         self._consumer.consume()
 
 
     def drain_events(self, timeout=None):
     def drain_events(self, timeout=None):
-        return self._connection.drain_events(timeout=timeout)
+        if self._connection:
+            return self._connection.drain_events(timeout=timeout)
+        elif timeout:
+            time.sleep(timeout)
 
 
     def stop(self):
     def stop(self):
         try:
         try:
@@ -79,17 +78,17 @@ class ResultConsumer(BaseResultConsumer):
             self._consumer.cancel_by_queue(self._create_binding(task_id).name)
             self._consumer.cancel_by_queue(self._create_binding(task_id).name)
 
 
 
 
-class BaseRPCBackend(base.Backend, AsyncBackendMixin):
+class RPCBackend(base.Backend, AsyncBackendMixin):
+    """Base class for the RPC result backend."""
 
 
-    Exchange = Exchange
-    Queue = NoCacheQueue
-    Consumer = Consumer
-    Producer = Producer
+    Exchange = kombu.Exchange
+    Producer = kombu.Producer
     ResultConsumer = ResultConsumer
     ResultConsumer = ResultConsumer
 
 
+    #: Exception raised when there are too many messages for a task id.
     BacklogLimitExceeded = BacklogLimitExceeded
     BacklogLimitExceeded = BacklogLimitExceeded
 
 
-    persistent = True
+    persistent = False
     supports_autoexpire = True
     supports_autoexpire = True
     supports_native_join = True
     supports_native_join = True
 
 
@@ -100,6 +99,16 @@ class BaseRPCBackend(base.Backend, AsyncBackendMixin):
         'interval_max': 1,
         'interval_max': 1,
     }
     }
 
 
+    class Consumer(kombu.Consumer):
+        """Consumer that requires manual declaration of queues."""
+
+        auto_declare = False
+
+    class Queue(kombu.Queue):
+        """Queue that never caches declaration."""
+
+        can_cache_declaration = False
+
     def __init__(self, app, connection=None, exchange=None, exchange_type=None,
     def __init__(self, app, connection=None, exchange=None, exchange_type=None,
                  persistent=None, serializer=None, auto_delete=True, **kwargs):
                  persistent=None, serializer=None, auto_delete=True, **kwargs):
         super().__init__(app, **kwargs)
         super().__init__(app, **kwargs)
@@ -115,9 +124,6 @@ class BaseRPCBackend(base.Backend, AsyncBackendMixin):
         )
         )
         self.serializer = serializer or conf.result_serializer
         self.serializer = serializer or conf.result_serializer
         self.auto_delete = auto_delete
         self.auto_delete = auto_delete
-        self.queue_arguments = dictfilter({
-            'x-expires': maybe_s_to_ms(self.expires),
-        })
         self.result_consumer = self.ResultConsumer(
         self.result_consumer = self.ResultConsumer(
             self, self.app, self.accept,
             self, self.app, self.accept,
             self._pending_results, self._pending_messages,
             self._pending_results, self._pending_messages,
@@ -126,9 +132,56 @@ class BaseRPCBackend(base.Backend, AsyncBackendMixin):
             register_after_fork(self, _on_after_fork_cleanup_backend)
             register_after_fork(self, _on_after_fork_cleanup_backend)
 
 
     def _after_fork(self):
     def _after_fork(self):
+        # clear state for child processes.
         self._pending_results.clear()
         self._pending_results.clear()
         self.result_consumer._after_fork()
         self.result_consumer._after_fork()
 
 
+    def _create_exchange(self, name, type='direct', delivery_mode=2):
+        # uses direct to queue routing (anon exchange).
+        return self.Exchange(None)
+
+    def _create_binding(self, task_id):
+        """Create new binding for task with id."""
+        # RPC backend caches the binding, as one queue is used for all tasks.
+        return self.binding
+
+    def on_task_call(self, producer, task_id):
+        # Called every time a task is sent when using this backend.
+        # We declare the queue we receive replies on in advance of sending
+        # the message, but we skip this if running in the prefork pool
+        # (task_join_will_block), as we know the queue is already declared.
+        if not task_join_will_block():
+            maybe_declare(self.binding(producer.channel), retry=True)
+
+    def destination_for(self, task_id, request):
+        """Get the destination for result by task id.
+
+        Returns:
+            Tuple[str, str]: tuple of ``(reply_to, correlation_id)``.
+        """
+        # Backends didn't always receive the `request`, so we must still
+        # support old code that relies on current_task.
+        try:
+            request = request or current_task.request
+        except AttributeError:
+            raise RuntimeError(
+                'RPC backend missing task request for {0!r}'.format(task_id))
+        return request.reply_to, request.correlation_id or task_id
+
+    def on_reply_declare(self, task_id):
+        # Return value here is used as the `declare=` argument
+        # for Producer.publish.
+        # By default we don't have to declare anything when sending a result.
+        pass
+
+    def on_result_fulfilled(self, result):
+        # This usually cancels the queue after the result is received,
+        # but we don't have to cancel since we have one queue per process.
+        pass
+
+    def as_uri(self, include_password=True):
+        return 'rpc://'
+
     def store_result(self, task_id, result, state,
     def store_result(self, task_id, result, state,
                      traceback=None, request=None, **kwargs):
                      traceback=None, request=None, **kwargs):
         """Send task return value and state."""
         """Send task return value and state."""
@@ -137,10 +190,7 @@ class BaseRPCBackend(base.Backend, AsyncBackendMixin):
             return
             return
         with self.app.amqp.producer_pool.acquire(block=True) as producer:
         with self.app.amqp.producer_pool.acquire(block=True) as producer:
             producer.publish(
             producer.publish(
-                {'task_id': task_id, 'status': state,
-                 'result': self.encode_result(result, state),
-                 'traceback': traceback,
-                 'children': self.current_task_children(request)},
+                self._to_result(task_id, state, result, traceback, request),
                 exchange=self.exchange,
                 exchange=self.exchange,
                 routing_key=routing_key,
                 routing_key=routing_key,
                 correlation_id=correlation_id,
                 correlation_id=correlation_id,
@@ -151,7 +201,20 @@ class BaseRPCBackend(base.Backend, AsyncBackendMixin):
             )
             )
         return result
         return result
 
 
+    def _to_result(self, task_id, state, result, traceback, request):
+        return {
+            'task_id': task_id,
+            'status': state,
+            'result': self.encode_result(result, state),
+            'traceback': traceback,
+            'children': self.current_task_children(request),
+        }
+
     def on_out_of_band_result(self, task_id, message):
     def on_out_of_band_result(self, task_id, message):
+        # Callback called when a reply for a task is received,
+        # but we have no idea what do do with it.
+        # Since the result is not pending, we put it in a separate
+        # buffer: probably it will become pending later.
         if self.result_consumer:
         if self.result_consumer:
             self.result_consumer.on_out_of_band_result(message)
             self.result_consumer.on_out_of_band_result(message)
         self._out_of_band[task_id] = message
         self._out_of_band[task_id] = message
@@ -168,7 +231,7 @@ class BaseRPCBackend(base.Backend, AsyncBackendMixin):
             tid = self._get_message_task_id(acc)
             tid = self._get_message_task_id(acc)
             prev, latest_by_id[tid] = latest_by_id.get(tid), acc
             prev, latest_by_id[tid] = latest_by_id.get(tid), acc
             if prev:
             if prev:
-                # backends are not expected to keep history,
+                # backends aren't expected to keep history,
                 # so we delete everything except the most recent state.
                 # so we delete everything except the most recent state.
                 prev.ack()
                 prev.ack()
                 prev = None
                 prev = None
@@ -199,7 +262,7 @@ class BaseRPCBackend(base.Backend, AsyncBackendMixin):
             binding = self._create_binding(task_id)(channel)
             binding = self._create_binding(task_id)(channel)
             binding.declare()
             binding.declare()
 
 
-            for i in range(limit):
+            for _ in range(limit):
                 msg = binding.get(accept=accept, no_ack=no_ack)
                 msg = binding.get(accept=accept, no_ack=no_ack)
                 if not msg:
                 if not msg:
                     break
                     break
@@ -252,53 +315,16 @@ class BaseRPCBackend(base.Backend, AsyncBackendMixin):
             expires=self.expires,
             expires=self.expires,
         ))
         ))
 
 
-
-class RPCBackend(BaseRPCBackend):
-    persistent = False
-
-    class Consumer(Consumer):
-        auto_declare = False
-
-    def _create_exchange(self, name, type='direct', delivery_mode=2):
-        # uses direct to queue routing (anon exchange).
-        return Exchange(None)
-
-    def _create_binding(self, task_id):
-        return self.binding
-
-    def on_task_call(self, producer, task_id):
-        if not task_join_will_block():
-            maybe_declare(self.binding(producer.channel), retry=True)
-
-    def rkey(self, task_id):
-        return task_id
-
-    def destination_for(self, task_id, request):
-        # Request is a new argument for backends, so must still support
-        # old code that rely on current_task
-        try:
-            request = request or current_task.request
-        except AttributeError:
-            raise RuntimeError(
-                'RPC backend missing task request for {0!r}'.format(task_id))
-        return request.reply_to, request.correlation_id or task_id
-
-    def on_reply_declare(self, task_id):
-        pass
-
-    def on_result_fulfilled(self, result):
-        pass
-
-    def as_uri(self, include_password=True):
-        return 'rpc://'
-
     @property
     @property
     def binding(self):
     def binding(self):
         return self.Queue(
         return self.Queue(
             self.oid, self.exchange, self.oid,
             self.oid, self.exchange, self.oid,
-            durable=False, auto_delete=True
+            durable=False,
+            auto_delete=True,
+            expires=self.expires,
         )
         )
 
 
     @cached_property
     @cached_property
     def oid(self):
     def oid(self):
+        # cached here is the app OID: name of queue we receive results on.
         return self.app.oid
         return self.app.oid

+ 61 - 48
celery/beat.py

@@ -23,8 +23,8 @@ from . import __version__
 from . import platforms
 from . import platforms
 from . import signals
 from . import signals
 from .schedules import maybe_schedule, crontab
 from .schedules import maybe_schedule, crontab
-from .utils.imports import instantiate
-from .utils.timeutils import humanize_seconds
+from .utils.imports import load_extension_class_names, symbol_by_name
+from .utils.time import humanize_seconds
 from .utils.log import get_logger, iter_open_logger_fds
 from .utils.log import get_logger, iter_open_logger_fds
 
 
 __all__ = [
 __all__ = [
@@ -98,8 +98,7 @@ class ScheduleEntry:
         return self.schedule.now() if self.schedule else self.app.now()
         return self.schedule.now() if self.schedule else self.app.now()
 
 
     def _next_instance(self, last_run_at=None):
     def _next_instance(self, last_run_at=None):
-        """Return a new instance of the same class, but with
-        its date and count fields updated."""
+        """Return new instance, with date and count fields updated."""
         return self.__class__(**dict(
         return self.__class__(**dict(
             self,
             self,
             last_run_at=last_run_at or self._default_now(),
             last_run_at=last_run_at or self._default_now(),
@@ -145,7 +144,7 @@ class ScheduleEntry:
             # in the scheduler heap, the order is decided by the
             # in the scheduler heap, the order is decided by the
             # preceding members of the tuple ``(time, priority, entry)``.
             # preceding members of the tuple ``(time, priority, entry)``.
             #
             #
-            # If all that is left to order on is the entry then it can
+            # If all that's left to order on is the entry then it can
             # just as well be random.
             # just as well be random.
             return id(self) < id(other)
             return id(self) < id(other)
         return NotImplemented
         return NotImplemented
@@ -156,13 +155,13 @@ class Scheduler:
 
 
     The :program:`celery beat` program may instantiate this class
     The :program:`celery beat` program may instantiate this class
     multiple times for introspection purposes, but then with the
     multiple times for introspection purposes, but then with the
-    ``lazy`` argument set.  It is important for subclasses to
+    ``lazy`` argument set.  It's important for subclasses to
     be idempotent when this argument is set.
     be idempotent when this argument is set.
 
 
     Arguments:
     Arguments:
         schedule (~celery.schedules.schedule): see :attr:`schedule`.
         schedule (~celery.schedules.schedule): see :attr:`schedule`.
         max_interval (int): see :attr:`max_interval`.
         max_interval (int): see :attr:`max_interval`.
-        lazy (bool): Do not set up the schedule.
+        lazy (bool): Don't set up the schedule.
     """
     """
 
 
     Entry = ScheduleEntry
     Entry = ScheduleEntry
@@ -214,7 +213,7 @@ class Scheduler:
         info('Scheduler: Sending due task %s (%s)', entry.name, entry.task)
         info('Scheduler: Sending due task %s (%s)', entry.name, entry.task)
         try:
         try:
             result = self.apply_async(entry, producer=producer, advance=False)
             result = self.apply_async(entry, producer=producer, advance=False)
-        except Exception as exc:
+        except Exception as exc:  # pylint: disable=broad-except
             error('Message Error: %s\n%s',
             error('Message Error: %s\n%s',
                   exc, traceback.format_stack(), exc_info=True)
                   exc, traceback.format_stack(), exc_info=True)
         else:
         else:
@@ -228,17 +227,17 @@ class Scheduler:
     def is_due(self, entry):
     def is_due(self, entry):
         return entry.is_due()
         return entry.is_due()
 
 
+    # pylint disable=redefined-outer-name
     def tick(self, event_t=event_t, min=min,
     def tick(self, event_t=event_t, min=min,
              heappop=heapq.heappop, heappush=heapq.heappush,
              heappop=heapq.heappop, heappush=heapq.heappush,
              heapify=heapq.heapify, mktime=time.mktime):
              heapify=heapq.heapify, mktime=time.mktime):
-        """Run a tick, that is one iteration of the scheduler.
+        """Run a tick - one iteration of the scheduler.
 
 
         Executes one due task per call.
         Executes one due task per call.
 
 
         Returns:
         Returns:
             float: preferred delay in seconds for next call.
             float: preferred delay in seconds for next call.
         """
         """
-
         def _when(entry, next_time_to_run):
         def _when(entry, next_time_to_run):
             return (mktime(entry.schedule.now().timetuple()) +
             return (mktime(entry.schedule.now().timetuple()) +
                     (adjust(next_time_to_run) or 0))
                     (adjust(next_time_to_run) or 0))
@@ -297,7 +296,7 @@ class Scheduler:
                 return self.send_task(entry.task, entry.args, entry.kwargs,
                 return self.send_task(entry.task, entry.args, entry.kwargs,
                                       producer=producer,
                                       producer=producer,
                                       **entry.options)
                                       **entry.options)
-        except Exception as exc:
+        except Exception as exc:  # pylint: disable=broad-except
             raise SchedulingError(
             raise SchedulingError(
                 "Couldn't apply scheduled task {0.name}: {exc}".format(
                 "Couldn't apply scheduled task {0.name}: {exc}".format(
                     entry, exc=exc)).with_traceback(sys.exc_info()[2])
                     entry, exc=exc)).with_traceback(sys.exc_info()[2])
@@ -391,6 +390,8 @@ class Scheduler:
 
 
 
 
 class PersistentScheduler(Scheduler):
 class PersistentScheduler(Scheduler):
+    """Scheduler backed by :mod:`shelve` database."""
+
     persistence = shelve
     persistence = shelve
     known_suffixes = ('', '.db', '.dat', '.bak', '.dir')
     known_suffixes = ('', '.db', '.dat', '.bak', '.dir')
 
 
@@ -418,61 +419,68 @@ class PersistentScheduler(Scheduler):
         try:
         try:
             self._store = self._open_schedule()
             self._store = self._open_schedule()
             # In some cases there may be different errors from a storage
             # In some cases there may be different errors from a storage
-            # backend for corrupted files. Example - DBPageNotFoundError
-            # exception from bsddb. In such case the file will be
+            # backend for corrupted files.  Example - DBPageNotFoundError
+            # exception from bsddb.  In such case the file will be
             # successfully opened but the error will be raised on first key
             # successfully opened but the error will be raised on first key
             # retrieving.
             # retrieving.
             self._store.keys()
             self._store.keys()
-        except Exception as exc:
+        except Exception as exc:  # pylint: disable=broad-except
             self._store = self._destroy_open_corrupted_schedule(exc)
             self._store = self._destroy_open_corrupted_schedule(exc)
 
 
-        for _ in (1, 2):
-            try:
-                self._store['entries']
-            except KeyError:
-                # new schedule db
-                try:
-                    self._store['entries'] = {}
-                except KeyError as exc:
-                    self._store = self._destroy_open_corrupted_schedule(exc)
-                    continue
-            else:
-                if '__version__' not in self._store:
-                    warning('DB Reset: Account for new __version__ field')
-                    self._store.clear()   # remove schedule at 2.2.2 upgrade.
-                elif 'tz' not in self._store:
-                    warning('DB Reset: Account for new tz field')
-                    self._store.clear()   # remove schedule at 3.0.8 upgrade
-                elif 'utc_enabled' not in self._store:
-                    warning('DB Reset: Account for new utc_enabled field')
-                    self._store.clear()   # remove schedule at 3.0.9 upgrade
-            break
+        self._create_schedule()
 
 
         tz = self.app.conf.timezone
         tz = self.app.conf.timezone
-        stored_tz = self._store.get('tz')
+        stored_tz = self._store.get(str(b'tz'))
         if stored_tz is not None and stored_tz != tz:
         if stored_tz is not None and stored_tz != tz:
             warning('Reset: Timezone changed from %r to %r', stored_tz, tz)
             warning('Reset: Timezone changed from %r to %r', stored_tz, tz)
             self._store.clear()   # Timezone changed, reset db!
             self._store.clear()   # Timezone changed, reset db!
         utc = self.app.conf.enable_utc
         utc = self.app.conf.enable_utc
-        stored_utc = self._store.get('utc_enabled')
+        stored_utc = self._store.get(str(b'utc_enabled'))
         if stored_utc is not None and stored_utc != utc:
         if stored_utc is not None and stored_utc != utc:
             choices = {True: 'enabled', False: 'disabled'}
             choices = {True: 'enabled', False: 'disabled'}
             warning('Reset: UTC changed from %s to %s',
             warning('Reset: UTC changed from %s to %s',
                     choices[stored_utc], choices[utc])
                     choices[stored_utc], choices[utc])
             self._store.clear()   # UTC setting changed, reset db!
             self._store.clear()   # UTC setting changed, reset db!
-        entries = self._store.setdefault('entries', {})
+        entries = self._store.setdefault(str(b'entries'), {})
         self.merge_inplace(self.app.conf.beat_schedule)
         self.merge_inplace(self.app.conf.beat_schedule)
         self.install_default_entries(self.schedule)
         self.install_default_entries(self.schedule)
-        self._store.update(__version__=__version__, tz=tz, utc_enabled=utc)
+        self._store.update({
+            str(b'__version__'): __version__,
+            str(b'tz'): tz,
+            str(b'utc_enabled'): utc,
+        })
         self.sync()
         self.sync()
         debug('Current schedule:\n' + '\n'.join(
         debug('Current schedule:\n' + '\n'.join(
             repr(entry) for entry in entries.values()))
             repr(entry) for entry in entries.values()))
 
 
+    def _create_schedule(self):
+        for _ in (1, 2):
+            try:
+                self._store[str(b'entries')]
+            except KeyError:
+                # new schedule db
+                try:
+                    self._store[str(b'entries')] = {}
+                except KeyError as exc:
+                    self._store = self._destroy_open_corrupted_schedule(exc)
+                    continue
+            else:
+                if str(b'__version__') not in self._store:
+                    warning('DB Reset: Account for new __version__ field')
+                    self._store.clear()   # remove schedule at 2.2.2 upgrade.
+                elif str(b'tz') not in self._store:
+                    warning('DB Reset: Account for new tz field')
+                    self._store.clear()   # remove schedule at 3.0.8 upgrade
+                elif str(b'utc_enabled') not in self._store:
+                    warning('DB Reset: Account for new utc_enabled field')
+                    self._store.clear()   # remove schedule at 3.0.9 upgrade
+            break
+
     def get_schedule(self):
     def get_schedule(self):
-        return self._store['entries']
+        return self._store[str(b'entries')]
 
 
     def set_schedule(self, schedule):
     def set_schedule(self, schedule):
-        self._store['entries'] = schedule
+        self._store[str(b'entries')] = schedule
     schedule = property(get_schedule, set_schedule)
     schedule = property(get_schedule, set_schedule)
 
 
     def sync(self):
     def sync(self):
@@ -489,6 +497,8 @@ class PersistentScheduler(Scheduler):
 
 
 
 
 class Service:
 class Service:
+    """Celery periodic task service."""
+
     scheduler_cls = PersistentScheduler
     scheduler_cls = PersistentScheduler
 
 
     def __init__(self, app, max_interval=None, schedule_filename=None,
     def __init__(self, app, max_interval=None, schedule_filename=None,
@@ -540,14 +550,17 @@ class Service:
         self._is_shutdown.set()
         self._is_shutdown.set()
         wait and self._is_stopped.wait()  # block until shutdown done.
         wait and self._is_stopped.wait()  # block until shutdown done.
 
 
-    def get_scheduler(self, lazy=False):
+    def get_scheduler(self, lazy=False,
+                      extension_namespace='celery.beat_schedulers'):
         filename = self.schedule_filename
         filename = self.schedule_filename
-        scheduler = instantiate(self.scheduler_cls,
-                                app=self.app,
-                                schedule_filename=filename,
-                                max_interval=self.max_interval,
-                                lazy=lazy)
-        return scheduler
+        aliases = dict(
+            load_extension_class_names(extension_namespace) or {})
+        return symbol_by_name(self.scheduler_cls, aliases=aliases)(
+            app=self.app,
+            schedule_filename=filename,
+            max_interval=self.max_interval,
+            lazy=lazy,
+        )
 
 
     @cached_property
     @cached_property
     def scheduler(self):
     def scheduler(self):

+ 11 - 10
celery/bin/amqp.py

@@ -3,7 +3,7 @@
 
 
 .. program:: celery amqp
 .. program:: celery amqp
 """
 """
-import cmd
+import cmd as _cmd
 import sys
 import sys
 import shlex
 import shlex
 import pprint
 import pprint
@@ -70,8 +70,7 @@ class Spec:
     def str_args_to_python(self, arglist):
     def str_args_to_python(self, arglist):
         """Process list of string arguments to values according to spec.
         """Process list of string arguments to values according to spec.
 
 
-        e.g::
-
+        Example:
             >>> spec = Spec([('queue', str), ('if_unused', bool)])
             >>> spec = Spec([('queue', str), ('if_unused', bool)])
             >>> spec.str_args_to_python('pobox', 'true')
             >>> spec.str_args_to_python('pobox', 'true')
             ('pobox', True)
             ('pobox', True)
@@ -109,7 +108,7 @@ def format_declare_queue(ret):
     return 'ok. queue:{0} messages:{1} consumers:{2}.'.format(*ret)
     return 'ok. queue:{0} messages:{1} consumers:{2}.'.format(*ret)
 
 
 
 
-class AMQShell(cmd.Cmd):
+class AMQShell(_cmd.Cmd):
     """AMQP API Shell.
     """AMQP API Shell.
 
 
     Arguments:
     Arguments:
@@ -118,10 +117,11 @@ class AMQShell(cmd.Cmd):
         silent (bool): If enabled, the commands won't have annoying
         silent (bool): If enabled, the commands won't have annoying
             output not relevant when running in non-shell mode.
             output not relevant when running in non-shell mode.
     """
     """
+
     conn = None
     conn = None
     chan = None
     chan = None
     prompt_fmt = '{self.counter}> '
     prompt_fmt = '{self.counter}> '
-    identchars = cmd.IDENTCHARS = '.'
+    identchars = _cmd.IDENTCHARS = '.'
     needs_reconnect = False
     needs_reconnect = False
     counter = 1
     counter = 1
     inc_counter = count(2)
     inc_counter = count(2)
@@ -183,11 +183,11 @@ class AMQShell(cmd.Cmd):
         self.connect = kwargs.pop('connect')
         self.connect = kwargs.pop('connect')
         self.silent = kwargs.pop('silent', False)
         self.silent = kwargs.pop('silent', False)
         self.out = kwargs.pop('out', sys.stderr)
         self.out = kwargs.pop('out', sys.stderr)
-        cmd.Cmd.__init__(self, *args, **kwargs)
+        _cmd.Cmd.__init__(self, *args, **kwargs)
         self._reconnect()
         self._reconnect()
 
 
     def note(self, m):
     def note(self, m):
-        """Say something to the user. Disabled if :attr:`silent`."""
+        """Say something to the user.  Disabled if :attr:`silent`."""
         if not self.silent:
         if not self.silent:
             say(m, file=self.out)
             say(m, file=self.out)
 
 
@@ -195,7 +195,9 @@ class AMQShell(cmd.Cmd):
         say(m, file=self.out)
         say(m, file=self.out)
 
 
     def get_amqp_api_command(self, cmd, arglist):
     def get_amqp_api_command(self, cmd, arglist):
-        """With a command name and a list of arguments, convert the arguments
+        """Get AMQP command wrapper.
+
+        With a command name and a list of arguments, convert the arguments
         to Python values and find the corresponding method on the AMQP channel
         to Python values and find the corresponding method on the AMQP channel
         object.
         object.
 
 
@@ -279,7 +281,7 @@ class AMQShell(cmd.Cmd):
             self.respond(self.dispatch(cmd, arg))
             self.respond(self.dispatch(cmd, arg))
         except (AttributeError, KeyError) as exc:
         except (AttributeError, KeyError) as exc:
             self.default(line)
             self.default(line)
-        except Exception as exc:
+        except Exception as exc:  # pylint: disable=broad-except
             self.say(exc)
             self.say(exc)
             self.needs_reconnect = True
             self.needs_reconnect = True
 
 
@@ -331,7 +333,6 @@ class AMQPAdmin:
             return shell.cmdloop()
             return shell.cmdloop()
         except KeyboardInterrupt:
         except KeyboardInterrupt:
             self.note('(bibi)')
             self.note('(bibi)')
-            pass
 
 
     def note(self, m):
     def note(self, m):
         if not self.silent:
         if not self.silent:

+ 166 - 140
celery/bin/base.py

@@ -1,5 +1,7 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
 """Base command-line interface."""
 """Base command-line interface."""
+import argparse
+import inspect
 import os
 import os
 import random
 import random
 import re
 import re
@@ -9,20 +11,28 @@ import json
 
 
 from collections import defaultdict
 from collections import defaultdict
 from heapq import heappush
 from heapq import heappush
-from inspect import getfullargspec
-from optparse import (
-    OptionParser, OptionGroup, IndentedHelpFormatter, make_option as Option,
-)
 from pprint import pformat
 from pprint import pformat
 
 
 from celery import VERSION_BANNER, Celery, maybe_patch_concurrency
 from celery import VERSION_BANNER, Celery, maybe_patch_concurrency
 from celery import signals
 from celery import signals
-from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning
-from celery.platforms import EX_FAILURE, EX_OK, EX_USAGE
+from celery.exceptions import (
+    CDeprecationWarning, CPendingDeprecationWarninga, ImproperlyConfigured,
+)
+from celery.platforms import EX_FAILURE, EX_OK, EX_USAGE, isatty
+from celery.utils import imports
 from celery.utils import term
 from celery.utils import term
 from celery.utils import text
 from celery.utils import text
+from celery.utils.functional import dictfilter
 from celery.utils.nodenames import node_format, host_format
 from celery.utils.nodenames import node_format, host_format
-from celery.utils.imports import symbol_by_name, import_from_cwd
+from celery.utils.objects import Bunch
+
+
+# Option is here for backwards compatiblity, as third-party commands
+# may import it from here.
+try:
+    from optparse import Option  # pylint: disable=deprecated-module
+except ImportError:  # pragma: no cover
+    Option = None  # noqa
 
 
 try:
 try:
     input = raw_input
     input = raw_input
@@ -30,8 +40,7 @@ except NameError:  # pragma: no cover
     pass
     pass
 
 
 __all__ = [
 __all__ = [
-    'Error', 'UsageError', 'Extensions',
-    'HelpFormatter', 'Command', 'Option', 'daemon_options',
+    'Error', 'UsageError', 'Extensions', 'Command', 'Option', 'daemon_options',
 ]
 ]
 
 
 # always enable DeprecationWarnings, so our users can see them.
 # always enable DeprecationWarnings, so our users can see them.
@@ -49,7 +58,55 @@ find_rst_ref = re.compile(r':\w+:`(.+?)`')
 find_rst_decl = re.compile(r'^\s*\.\. .+?::.+$')
 find_rst_decl = re.compile(r'^\s*\.\. .+?::.+$')
 
 
 
 
+def _optparse_callback_to_type(option, callback):
+    parser = Bunch(values=Bunch())
+
+    def _on_arg(value):
+        callback(option, None, value, parser)
+        return getattr(parser.values, option.dest)
+    return _on_arg
+
+
+def _add_optparse_argument(parser, opt, typemap={
+        'string': str,
+        'int': int,
+        'long': int,
+        'float': float,
+        'complex': complex,
+        'choice': None}):
+    if opt.callback:
+        opt.type = _optparse_callback_to_type(opt, opt.type)
+    # argparse checks for existence of this kwarg
+    if opt.action == 'callback':
+        opt.action = None
+    # store_true sets value to "('NO', 'DEFAULT')" for some
+    # crazy reason, so not to set a sane default here.
+    if opt.action == 'store_true' and opt.default is None:
+            opt.default = False
+    parser.add_argument(
+        *opt._long_opts + opt._short_opts,
+        **dictfilter(dict(
+            action=opt.action,
+            type=typemap.get(opt.type, opt.type),
+            dest=opt.dest,
+            nargs=opt.nargs,
+            choices=opt.choices,
+            help=opt.help,
+            metavar=opt.metavar,
+            default=opt.default)))
+
+
+def _add_compat_options(parser, options):
+    for option in options or ():
+        if callable(option):
+            option(parser)
+        else:
+            _add_optparse_argument(parser, option)
+
+
 class Error(Exception):
 class Error(Exception):
+    """Exception raised by commands."""
+
     status = EX_FAILURE
     status = EX_FAILURE
 
 
     def __init__(self, reason, status=None):
     def __init__(self, reason, status=None):
@@ -62,10 +119,13 @@ class Error(Exception):
 
 
 
 
 class UsageError(Error):
 class UsageError(Error):
+    """Exception raised for malformed arguments."""
+
     status = EX_USAGE
     status = EX_USAGE
 
 
 
 
 class Extensions:
 class Extensions:
+    """Loads extensions from setuptools entrypoints."""
 
 
     def __init__(self, namespace, register):
     def __init__(self, namespace, register):
         self.names = []
         self.names = []
@@ -77,35 +137,11 @@ class Extensions:
         self.register(cls, name=name)
         self.register(cls, name=name)
 
 
     def load(self):
     def load(self):
-        try:
-            from pkg_resources import iter_entry_points
-        except ImportError:  # pragma: no cover
-            return
-
-        for ep in iter_entry_points(self.namespace):
-            sym = ':'.join([ep.module_name, ep.attrs[0]])
-            try:
-                cls = symbol_by_name(sym)
-            except (ImportError, SyntaxError) as exc:
-                warnings.warn(
-                    'Cannot load extension {0!r}: {1!r}'.format(sym, exc))
-            else:
-                self.add(cls, ep.name)
+        for name, cls in imports.load_extension_classes(self.namespace):
+            self.add(cls, name)
         return self.names
         return self.names
 
 
 
 
-class HelpFormatter(IndentedHelpFormatter):
-
-    def format_epilog(self, epilog):
-        if epilog:
-            return '\n{0}\n\n'.format(epilog)
-        return ''
-
-    def format_description(self, description):
-        return text.ensure_newlines(
-            text.fill_paragraphs(text.dedent(description), self.width))
-
-
 class Command:
 class Command:
     """Base class for command-line applications.
     """Base class for command-line applications.
 
 
@@ -117,7 +153,7 @@ class Command:
 
 
     Error = Error
     Error = Error
     UsageError = UsageError
     UsageError = UsageError
-    Parser = OptionParser
+    Parser = argparse.ArgumentParser
 
 
     #: Arg list used in help.
     #: Arg list used in help.
     args = ''
     args = ''
@@ -130,7 +166,7 @@ class Command:
     supports_args = True
     supports_args = True
 
 
     #: List of options (without preload options).
     #: List of options (without preload options).
-    option_list = ()
+    option_list = None
 
 
     # module Rst documentation to parse help from (if any)
     # module Rst documentation to parse help from (if any)
     doc = None
     doc = None
@@ -142,17 +178,6 @@ class Command:
     # Some programs (multi) does not want to set up fixups etc.
     # Some programs (multi) does not want to set up fixups etc.
     fake_app = False
     fake_app = False
 
 
-    #: List of options to parse before parsing other options.
-    preload_options = (
-        Option('-A', '--app', default=None),
-        Option('-b', '--broker', default=None),
-        Option('--loader', default=None),
-        Option('--config', default=None),
-        Option('--workdir', default=None, dest='working_directory'),
-        Option('--no-color', '-C', action='store_true', default=None),
-        Option('--quiet', '-q', action='store_true'),
-    )
-
     #: Enable if the application should support config from the cmdline.
     #: Enable if the application should support config from the cmdline.
     enable_config_from_cmdline = False
     enable_config_from_cmdline = False
 
 
@@ -193,13 +218,16 @@ class Command:
             self.on_usage_error = on_usage_error
             self.on_usage_error = on_usage_error
 
 
     def run(self, *args, **options):
     def run(self, *args, **options):
-        """This is the body of the command called by :meth:`handle_argv`."""
         raise NotImplementedError('subclass responsibility')
         raise NotImplementedError('subclass responsibility')
 
 
     def on_error(self, exc):
     def on_error(self, exc):
+        # pylint: disable=method-hidden
+        #   on_error argument to __init__ may override this method.
         self.error(self.colored.red('Error: {0}'.format(exc)))
         self.error(self.colored.red('Error: {0}'.format(exc)))
 
 
     def on_usage_error(self, exc):
     def on_usage_error(self, exc):
+        # pylint: disable=method-hidden
+        #   on_usage_error argument to __init__ may override this method.
         self.handle_error(exc)
         self.handle_error(exc)
 
 
     def on_concurrency_setup(self):
     def on_concurrency_setup(self):
@@ -219,7 +247,7 @@ class Command:
             return exc.status
             return exc.status
 
 
     def verify_args(self, given, _index=0):
     def verify_args(self, given, _index=0):
-        S = getfullargspec(self.run)
+        S = inspect.getfullargspec(self.run)
         _index = 1 if S.args and S.args[0] == 'self' else _index
         _index = 1 if S.args and S.args[0] == 'self' else _index
         required = S.args[_index:-len(S.defaults) if S.defaults else None]
         required = S.args[_index:-len(S.defaults) if S.defaults else None]
         missing = required[len(given):]
         missing = required[len(given):]
@@ -257,15 +285,33 @@ class Command:
         pool_option = self.with_pool_option(argv)
         pool_option = self.with_pool_option(argv)
         if pool_option:
         if pool_option:
             maybe_patch_concurrency(argv, *pool_option)
             maybe_patch_concurrency(argv, *pool_option)
-            short_opts, long_opts = pool_option
 
 
     def usage(self, command):
     def usage(self, command):
-        return '%prog {0} [options] {self.args}'.format(command, self=self)
+        return '%(prog)s {0} [options] {self.args}'.format(command, self=self)
+
+    def add_arguments(self, parser):
+        pass
 
 
     def get_options(self):
     def get_options(self):
-        """Get supported command-line options."""
+        # This is for optparse options, please use add_arguments.
         return self.option_list
         return self.option_list
 
 
+    def add_preload_arguments(self, parser):
+        group = parser.add_argument_group('Global Options')
+        group.add_argument('-A', '--app', default=None)
+        group.add_argument('-b', '--broker', default=None)
+        group.add_argument('--loader', default=None)
+        group.add_argument('--config', default=None)
+        group.add_argument('--workdir', default=None)
+        group.add_argument(
+            '--no-color', '-C', action='store_true', default=None)
+        group.add_argument('--quiet', '-q', action='store_true')
+
+    def _add_version_argument(self, parser):
+        parser.add_argument(
+            '--version', action='version', version=self.version,
+        )
+
     def prepare_arguments(self, parser):
     def prepare_arguments(self, parser):
         pass
         pass
 
 
@@ -283,7 +329,7 @@ class Command:
         Matching is case insensitive.
         Matching is case insensitive.
 
 
         Arguments:
         Arguments:
-            q (str): the question to ask (do not include questionark)
+            q (str): the question to ask (don't include questionark)
             choice (Tuple[str]): tuple of possible choices, must be lowercase.
             choice (Tuple[str]): tuple of possible choices, must be lowercase.
             default (Any): Default value if any.
             default (Any): Default value if any.
         """
         """
@@ -303,8 +349,7 @@ class Command:
         return default
         return default
 
 
     def handle_argv(self, prog_name, argv, command=None):
     def handle_argv(self, prog_name, argv, command=None):
-        """Parse command-line arguments from ``argv`` and dispatch
-        to :meth:`run`.
+        """Parse arguments from argv and dispatch to :meth:`run`.
 
 
         Warning:
         Warning:
             Exits with an error message if :attr:`supports_args` is disabled
             Exits with an error message if :attr:`supports_args` is disabled
@@ -322,7 +367,7 @@ class Command:
         if options:
         if options:
             options = {
             options = {
                 k: self.expanduser(v)
                 k: self.expanduser(v)
-                for k, v in vars(options).items() if not k.startswith('_')
+                for k, v in options.items() if not k.startswith('_')
             }
             }
         args = [self.expanduser(arg) for arg in args]
         args = [self.expanduser(arg) for arg in args]
         self.check_args(args)
         self.check_args(args)
@@ -352,33 +397,49 @@ class Command:
         # Don't want to load configuration to just print the version,
         # Don't want to load configuration to just print the version,
         # so we handle --version manually here.
         # so we handle --version manually here.
         self.parser = self.create_parser(prog_name, command)
         self.parser = self.create_parser(prog_name, command)
-        return self.parser.parse_args(arguments)
+        options = vars(self.parser.parse_args(arguments))
+        return options, options.pop('args', None) or []
 
 
     def create_parser(self, prog_name, command=None):
     def create_parser(self, prog_name, command=None):
+        # for compatibility with optparse usage.
+        usage = self.usage(command).replace('%prog', '%(prog)s')
         parser = self.Parser(
         parser = self.Parser(
             prog=prog_name,
             prog=prog_name,
-            usage=self.usage(command),
-            version=self.version,
-            epilog=self.epilog,
-            formatter=HelpFormatter(),
-            description=self.description,
+            usage=usage,
+            epilog=self._format_epilog(self.epilog),
+            formatter_class=argparse.RawDescriptionHelpFormatter,
+            description=self._format_description(self.description),
         )
         )
-        parser.add_options(self.preload_options)
-        for typ_ in reversed(type(self).mro()):
-            try:
-                prepare_arguments = typ_.prepare_arguments
-            except AttributeError:
-                continue
-            prepare_arguments(self, parser)
-        parser.add_options(self.get_options() or ())
-        parser.add_options(self.app.user_options['preload'])
+        self._add_version_argument(parser)
+        self.add_preload_arguments(parser)
+        self.add_arguments(parser)
+        self.add_compat_options(parser, self.get_options())
+        self.add_compat_options(parser, self.app.user_options['preload'])
+
+        if self.supports_args:
+            # for backward compatibility with optparse, we automatically
+            # add arbitrary positional args.
+            parser.add_argument('args', nargs='*')
         return self.prepare_parser(parser)
         return self.prepare_parser(parser)
 
 
+    def _format_epilog(self, epilog):
+        if epilog:
+            return '\n{0}\n\n'.format(epilog)
+        return ''
+
+    def _format_description(self, description):
+        width = argparse.HelpFormatter('prog')._width
+        return text.ensure_newlines(
+            text.fill_paragraphs(text.dedent(description), width))
+
+    def add_compat_options(self, parser, options):
+        _add_compat_options(parser, options)
+
     def prepare_parser(self, parser):
     def prepare_parser(self, parser):
         docs = [self.parse_doc(doc) for doc in (self.doc, __doc__) if doc]
         docs = [self.parse_doc(doc) for doc in (self.doc, __doc__) if doc]
         for doc in docs:
         for doc in docs:
             for long_opt, help in doc.items():
             for long_opt, help in doc.items():
-                option = parser.get_option(long_opt)
+                option = parser._option_string_actions[long_opt]
                 if option is not None:
                 if option is not None:
                     option.help = ' '.join(help).format(default=option.default)
                     option.help = ' '.join(help).format(default=option.default)
         return parser
         return parser
@@ -392,7 +453,7 @@ class Command:
             self.no_color = preload_options['no_color']
             self.no_color = preload_options['no_color']
         except KeyError:
         except KeyError:
             pass
             pass
-        workdir = preload_options.get('working_directory')
+        workdir = preload_options.get('workdir')
         if workdir:
         if workdir:
             os.chdir(workdir)
             os.chdir(workdir)
         app = (preload_options.get('app') or
         app = (preload_options.get('app') or
@@ -414,15 +475,17 @@ class Command:
 
 
         self.initialize_app(app, loader)
         self.initialize_app(app, loader)
 
 
+        self._handle_user_preload_options(argv)
+
+        return argv
+
+    def _handle_user_preload_options(self, argv):
         user_preload = tuple(self.app.user_options['preload'] or ())
         user_preload = tuple(self.app.user_options['preload'] or ())
         if user_preload:
         if user_preload:
-            user_options = self.preparse_options(argv, user_preload)
-            for user_option in user_preload:
-                user_options.setdefault(user_option.dest, user_option.default)
+            user_options = self._parse_preload_options(argv, user_preload)
             signals.user_preload_options.send(
             signals.user_preload_options.send(
                 sender=self, app=self.app, options=user_options,
                 sender=self, app=self.app, options=user_options,
             )
             )
-        return argv
 
 
     def initialize_app(self, app=None, loader=None):
     def initialize_app(self, app=None, loader=None):
         if self.requires_app:
         if self.requires_app:
@@ -447,8 +510,8 @@ class Command:
         from celery.app.utils import find_app
         from celery.app.utils import find_app
         return find_app(app, symbol_by_name=self.symbol_by_name)
         return find_app(app, symbol_by_name=self.symbol_by_name)
 
 
-    def symbol_by_name(self, name, imp=import_from_cwd):
-        return symbol_by_name(name, imp=imp)
+    def symbol_by_name(self, name, imp=imports.import_from_cwd):
+        return imports.symbol_by_name(name, imp=imp)
 
 
     def process_cmdline_config(self, argv):
     def process_cmdline_config(self, argv):
         try:
         try:
@@ -460,7 +523,14 @@ class Command:
         return argv
         return argv
 
 
     def parse_preload_options(self, args):
     def parse_preload_options(self, args):
-        return self.preparse_options(args, self.preload_options)
+        return self._parse_preload_options(args, [self.add_preload_arguments])
+
+    def _parse_preload_options(self, args, options):
+        args = [arg for arg in args if arg not in ('-h', '--help')]
+        parser = self.Parser()
+        self.add_compat_options(parser, options)
+        namespace, _ = parser.parse_known_args(args)
+        return vars(namespace)
 
 
     def add_append_opt(self, acc, opt, value):
     def add_append_opt(self, acc, opt, value):
         default = opt.default or []
         default = opt.default or []
@@ -470,53 +540,6 @@ class Command:
 
 
         acc[opt.dest].append(value)
         acc[opt.dest].append(value)
 
 
-    def preparse_options(self, args, options):
-        acc = {}
-        opts = {}
-        for opt in options:
-            for t in (opt._long_opts, opt._short_opts):
-                opts.update(dict(zip(t, [opt] * len(t))))
-        index = 0
-        length = len(args)
-        while index < length:
-            arg = args[index]
-            if arg.startswith('--'):
-                if '=' in arg:
-                    key, value = arg.split('=', 1)
-                    opt = opts.get(key)
-                    if opt:
-                        if opt.action == 'append':
-                            self.add_append_opt(acc, opt, value)
-                        else:
-                            acc[opt.dest] = value
-                else:
-                    opt = opts.get(arg)
-                    if opt and opt.takes_value():
-                        # optparse also supports ['--opt', 'value']
-                        # (Issue #1668)
-                        if opt.action == 'append':
-                            self.add_append_opt(acc, opt, args[index + 1])
-                        else:
-                            acc[opt.dest] = args[index + 1]
-                        index += 1
-                    elif opt and opt.action == 'store_true':
-                        acc[opt.dest] = True
-            elif arg.startswith('-'):
-                opt = opts.get(arg)
-                if opt:
-                    if opt.takes_value():
-                        try:
-                            acc[opt.dest] = args[index + 1]
-                        except IndexError:
-                            raise ValueError(
-                                'Missing required argument for {0}'.format(
-                                    arg))
-                        index += 1
-                    elif opt.action == 'store_true':
-                        acc[opt.dest] = True
-            index += 1
-        return acc
-
     def parse_doc(self, doc):
     def parse_doc(self, doc):
         options, in_option = defaultdict(list), None
         options, in_option = defaultdict(list), None
         for line in doc.splitlines():
         for line in doc.splitlines():
@@ -540,12 +563,14 @@ class Command:
         )
         )
 
 
     def with_pool_option(self, argv):
     def with_pool_option(self, argv):
-        """Return tuple of ``(short_opts, long_opts)`` if the command
+        """Return tuple of ``(short_opts, long_opts)``.
+
+        Returns only if the command
         supports a pool argument, and used to monkey patch eventlet/gevent
         supports a pool argument, and used to monkey patch eventlet/gevent
         environments as early as possible.
         environments as early as possible.
 
 
-        E.g::
-              has_pool_option = (['-P'], ['--pool'])
+        Example:
+              >>> has_pool_option = (['-P'], ['--pool'])
         """
         """
         pass
         pass
 
 
@@ -610,7 +635,8 @@ class Command:
     @property
     @property
     def colored(self):
     def colored(self):
         if self._colored is None:
         if self._colored is None:
-            self._colored = term.colored(enabled=not self.no_color)
+            self._colored = term.colored(
+                enabled=isatty(self.stdout) and not self.no_color)
         return self._colored
         return self._colored
 
 
     @colored.setter
     @colored.setter
@@ -629,11 +655,11 @@ class Command:
 
 
 
 
 def daemon_options(parser, default_pidfile=None, default_logfile=None):
 def daemon_options(parser, default_pidfile=None, default_logfile=None):
-    group = OptionGroup(parser, 'Daemonization Options')
-    group.add_option('-f', '--logfile', default=default_logfile),
-    group.add_option('--pidfile', default=default_pidfile),
-    group.add_option('--uid', default=None),
-    group.add_option('--gid', default=None),
-    group.add_option('--umask', default=None),
-    group.add_option('--executable', default=None),
-    parser.add_option_group(group)
+    """Add daemon options to argparse parser."""
+    group = parser.add_argument_group('Daemonization Options')
+    group.add_argument('-f', '--logfile', default=default_logfile),
+    group.add_argument('--pidfile', default=default_pidfile),
+    group.add_argument('--uid', default=None),
+    group.add_argument('--gid', default=None),
+    group.add_argument('--umask', default=None),
+    group.add_argument('--executable', default=None),

+ 26 - 14
celery/bin/beat.py

@@ -13,7 +13,7 @@
 
 
 .. cmdoption:: -s, --schedule
 .. cmdoption:: -s, --schedule
 
 
-    Path to the schedule database. Defaults to `celerybeat-schedule`.
+    Path to the schedule database.  Defaults to `celerybeat-schedule`.
     The extension '.db' may be appended to the filename.
     The extension '.db' may be appended to the filename.
     Default is {default}.
     Default is {default}.
 
 
@@ -28,7 +28,7 @@
 
 
 .. cmdoption:: -f, --logfile
 .. cmdoption:: -f, --logfile
 
 
-    Path to log file. If no logfile is specified, `stderr` is used.
+    Path to log file.  If no logfile is specified, `stderr` is used.
 
 
 .. cmdoption:: -l, --loglevel
 .. cmdoption:: -l, --loglevel
 
 
@@ -39,7 +39,7 @@
 
 
     Optional file used to store the process pid.
     Optional file used to store the process pid.
 
 
-    The program will not start if this file already exists
+    The program won't start if this file already exists
     and the pid is still alive.
     and the pid is still alive.
 
 
 .. cmdoption:: --uid
 .. cmdoption:: --uid
@@ -72,6 +72,8 @@ from celery.bin.base import Command, daemon_options
 
 
 __all__ = ['beat']
 __all__ = ['beat']
 
 
+HELP = __doc__
+
 
 
 class beat(Command):
 class beat(Command):
     """Start the beat periodic task scheduler.
     """Start the beat periodic task scheduler.
@@ -81,17 +83,20 @@ class beat(Command):
 
 
             $ celery beat -l info
             $ celery beat -l info
             $ celery beat -s /var/run/celery/beat-schedule --detach
             $ celery beat -s /var/run/celery/beat-schedule --detach
-            $ celery beat -S djcelery.schedulers.DatabaseScheduler
+            $ celery beat -S django
+
+    The last example requires the :pypi:`django-celery-beat` extension
+    package found on PyPI.
     """
     """
-    doc = __doc__
+
+    doc = HELP
     enable_config_from_cmdline = True
     enable_config_from_cmdline = True
     supports_args = False
     supports_args = False
 
 
     def run(self, detach=False, logfile=None, pidfile=None, uid=None,
     def run(self, detach=False, logfile=None, pidfile=None, uid=None,
-            gid=None, umask=None, working_directory=None, **kwargs):
+            gid=None, umask=None, workdir=None, **kwargs):
         if not detach:
         if not detach:
             maybe_drop_privileges(uid=uid, gid=gid)
             maybe_drop_privileges(uid=uid, gid=gid)
-        workdir = working_directory
         kwargs.pop('app', None)
         kwargs.pop('app', None)
         beat = partial(self.app.Beat,
         beat = partial(self.app.Beat,
                        logfile=logfile, pidfile=pidfile, **kwargs)
                        logfile=logfile, pidfile=pidfile, **kwargs)
@@ -102,15 +107,22 @@ class beat(Command):
         else:
         else:
             return beat().run()
             return beat().run()
 
 
-    def prepare_arguments(self, parser):
+    def add_arguments(self, parser):
         c = self.app.conf
         c = self.app.conf
-        parser.add_option('--detach', action='store_true')
-        parser.add_option('-s', '--schedule', default=c.beat_schedule_filename)
-        parser.add_option('--max-interval', type='float')
-        parser.add_option('-S', '--scheduler', dest='scheduler_cls')
-        parser.add_option('-l', '--loglevel', default='WARN')
+        bopts = parser.add_argument_group('Beat Options')
+        bopts.add_argument('--detach', action='store_true', default=False)
+        bopts.add_argument(
+            '-s', '--schedule', default=c.beat_schedule_filename)
+        bopts.add_argument('--max-interval', type=float)
+        bopts.add_argument('-S', '--scheduler')
+        bopts.add_argument('-l', '--loglevel', default='WARN')
+
         daemon_options(parser, default_pidfile='celerybeat.pid')
         daemon_options(parser, default_pidfile='celerybeat.pid')
-        parser.add_options(self.app.user_options['beat'])
+
+        user_options = self.app.user_options['beat']
+        if user_options:
+            uopts = parser.add_argument_group('User Options')
+            self.add_compat_options(uopts, user_options)
 
 
 
 
 def main(app=None):
 def main(app=None):

+ 292 - 233
celery/bin/celery.py

@@ -13,7 +13,7 @@ and usually parsed before command-specific arguments.
 
 
 .. cmdoption:: -A, --app
 .. cmdoption:: -A, --app
 
 
-    app instance to use (e.g. ``module.attr_name``)
+    app instance to use (e.g., ``module.attr_name``)
 
 
 .. cmdoption:: -b, --broker
 .. cmdoption:: -b, --broker
 
 
@@ -50,13 +50,13 @@ in any command that also has a `--detach` option.
 
 
 .. cmdoption:: -f, --logfile
 .. cmdoption:: -f, --logfile
 
 
-    Path to log file. If no logfile is specified, `stderr` is used.
+    Path to log file.  If no logfile is specified, `stderr` is used.
 
 
 .. cmdoption:: --pidfile
 .. cmdoption:: --pidfile
 
 
     Optional file used to store the process pid.
     Optional file used to store the process pid.
 
 
-    The program will not start if this file already exists
+    The program won't start if this file already exists
     and the pid is still alive.
     and the pid is still alive.
 
 
 .. cmdoption:: --uid
 .. cmdoption:: --uid
@@ -262,7 +262,8 @@ import sys
 from functools import partial
 from functools import partial
 from importlib import import_module
 from importlib import import_module
 
 
-from kombu.utils import json
+from kombu.utils.json import dumps, loads
+from kombu.utils.objects import cached_property
 
 
 from celery.app import defaults
 from celery.app import defaults
 from celery.platforms import EX_OK, EX_FAILURE, EX_UNAVAILABLE, EX_USAGE
 from celery.platforms import EX_OK, EX_FAILURE, EX_UNAVAILABLE, EX_USAGE
@@ -270,10 +271,10 @@ from celery.utils import term
 from celery.utils import text
 from celery.utils import text
 from celery.utils.functional import pass1
 from celery.utils.functional import pass1
 from celery.utils.text import str_to_list
 from celery.utils.text import str_to_list
-from celery.utils.timeutils import maybe_iso8601
+from celery.utils.time import maybe_iso8601
 
 
 # Cannot use relative imports here due to a Windows issue (#1111).
 # Cannot use relative imports here due to a Windows issue (#1111).
-from celery.bin.base import Command, Option, Extensions
+from celery.bin.base import Command, Extensions
 
 
 # Import commands from other modules
 # Import commands from other modules
 from celery.bin.amqp import amqp
 from celery.bin.amqp import amqp
@@ -316,6 +317,7 @@ def determine_exit_status(ret):
 
 
 
 
 def main(argv=None):
 def main(argv=None):
+    """Start celery umbrella command."""
     # Fix for setuptools generated scripts, so that it will
     # Fix for setuptools generated scripts, so that it will
     # work with multiprocessing fork emulation.
     # work with multiprocessing fork emulation.
     # (see multiprocessing.forking.get_preparation_data())
     # (see multiprocessing.forking.get_preparation_data())
@@ -336,15 +338,10 @@ class multi(Command):
     fake_app = True
     fake_app = True
     requires_app = False
     requires_app = False
 
 
-    def get_options(self):
-        pass
-
     def run_from_argv(self, prog_name, argv, command=None):
     def run_from_argv(self, prog_name, argv, command=None):
         from celery.bin.multi import MultiTool
         from celery.bin.multi import MultiTool
-        multi = MultiTool(quiet=self.quiet, no_color=self.no_color)
-        return multi.execute_from_commandline(
-            [command] + argv, prog_name,
-        )
+        cmd = MultiTool(quiet=self.quiet, no_color=self.no_color)
+        return cmd.execute_from_commandline([command] + argv)
 
 
 
 
 class list_(Command):
 class list_(Command):
@@ -379,7 +376,7 @@ class list_(Command):
         available = ', '.join(topics)
         available = ', '.join(topics)
         if not what:
         if not what:
             raise self.UsageError(
             raise self.UsageError(
-                'You must specify one of {0}'.format(available))
+                'Missing argument, specify one of: {0}'.format(available))
         if what not in topics:
         if what not in topics:
             raise self.UsageError(
             raise self.UsageError(
                 'unknown topic {0!r} (choose one of: {1})'.format(
                 'unknown topic {0!r} (choose one of: {1})'.format(
@@ -401,18 +398,30 @@ class call(Command):
 
 
     args = '<task_name>'
     args = '<task_name>'
 
 
-    option_list = Command.option_list + (
-        Option('--args', '-a', help='positional arguments (json).'),
-        Option('--kwargs', '-k', help='keyword arguments (json).'),
-        Option('--eta', help='scheduled time (ISO-8601).'),
-        Option('--countdown', type='float',
-               help='eta in seconds from now (float/int).'),
-        Option('--expires', help='expiry time (ISO-8601/float/int).'),
-        Option('--serializer', default='json', help='defaults to json.'),
-        Option('--queue', help='custom queue name.'),
-        Option('--exchange', help='custom exchange name.'),
-        Option('--routing-key', help='custom routing key.'),
-    )
+    def add_arguments(self, parser):
+        group = parser.add_argument_group('Calling Options')
+        group.add_argument('--args', '-a',
+                           help='positional arguments (json).')
+        group.add_argument('--kwargs', '-k',
+                           help='keyword arguments (json).')
+        group.add_argument('--eta',
+                           help='scheduled time (ISO-8601).')
+        group.add_argument(
+            '--countdown', type=float,
+            help='eta in seconds from now (float/int).',
+        )
+        group.add_argument(
+            '--expires',
+            help='expiry time (ISO-8601/float/int).',
+        ),
+        group.add_argument(
+            '--serializer', default='json',
+            help='defaults to json.'),
+
+        ropts = parser.add_argument_group('Routing Options')
+        ropts.add_argument('--queue', help='custom queue name.')
+        ropts.add_argument('--exchange', help='custom exchange name.')
+        ropts.add_argument('--routing-key', help='custom routing key.')
 
 
     def run(self, name, *_, **kwargs):
     def run(self, name, *_, **kwargs):
         self._send_task(name, **kwargs)
         self._send_task(name, **kwargs)
@@ -420,10 +429,10 @@ class call(Command):
     def _send_task(self, name, args=None, kwargs=None,
     def _send_task(self, name, args=None, kwargs=None,
                    countdown=None, serializer=None,
                    countdown=None, serializer=None,
                    queue=None, exchange=None, routing_key=None,
                    queue=None, exchange=None, routing_key=None,
-                   eta=None, expires=None):
+                   eta=None, expires=None, **_):
         # arguments
         # arguments
-        args = json.loads(args) if isinstance(args, str) else args
-        kwargs = json.loads(kwargs) if isinstance(kwargs, str) else kwargs
+        args = loads(args) if isinstance(args, str) else args
+        kwargs = loads(kwargs) if isinstance(kwargs, str) else kwargs
 
 
         # expires can be int/float.
         # expires can be int/float.
         try:
         try:
@@ -466,14 +475,20 @@ class purge(Command):
     fmt_purged = 'Purged {mnum} {messages} from {qnum} known task {queues}.'
     fmt_purged = 'Purged {mnum} {messages} from {qnum} known task {queues}.'
     fmt_empty = 'No messages purged from {qnum} {queues}'
     fmt_empty = 'No messages purged from {qnum} {queues}'
 
 
-    option_list = Command.option_list + (
-        Option('--force', '-f', action='store_true',
-               help='Do not prompt for verification'),
-        Option('--queues', '-Q', default=[],
-               help='Comma separated list of queue names to purge.'),
-        Option('--exclude-queues', '-X', default=[],
-               help='Comma separated list of queues names not to purge.')
-    )
+    def add_arguments(self, parser):
+        group = parser.add_argument_group('Purging Options')
+        group.add_argument(
+            '--force', '-f', action='store_true', default=False,
+            help="Don't prompt for verification",
+        )
+        group.add_argument(
+            '--queues', '-Q', default=[],
+            help='Comma separated list of queue names to purge.',
+        )
+        group.add_argument(
+            '--exclude-queues', '-X', default=[],
+            help='Comma separated list of queues names not to purge.',
+        )
 
 
     def run(self, force=False, queues=None, exclude_queues=None, **kwargs):
     def run(self, force=False, queues=None, exclude_queues=None, **kwargs):
         queues = set(str_to_list(queues or []))
         queues = set(str_to_list(queues or []))
@@ -519,11 +534,15 @@ class result(Command):
 
 
     args = '<task_id>'
     args = '<task_id>'
 
 
-    option_list = Command.option_list + (
-        Option('--task', '-t', help='name of task (if custom backend)'),
-        Option('--traceback', action='store_true',
-               help='show traceback instead'),
-    )
+    def add_arguments(self, parser):
+        group = parser.add_argument_group('Result Options')
+        group.add_argument(
+            '--task', '-t', help='name of task (if custom backend)',
+        )
+        group.add_argument(
+            '--traceback', action='store_true', default=False,
+            help='show traceback instead',
+        )
 
 
     def run(self, task_id, *args, **kwargs):
     def run(self, task_id, *args, **kwargs):
         result_cls = self.app.AsyncResult
         result_cls = self.app.AsyncResult
@@ -532,71 +551,69 @@ class result(Command):
 
 
         if task:
         if task:
             result_cls = self.app.tasks[task].AsyncResult
             result_cls = self.app.tasks[task].AsyncResult
-        result = result_cls(task_id)
+        task_result = result_cls(task_id)
         if traceback:
         if traceback:
-            value = result.traceback
+            value = task_result.traceback
         else:
         else:
-            value = result.get()
+            value = task_result.get()
         self.out(self.pretty(value)[1])
         self.out(self.pretty(value)[1])
 
 
 
 
 class _RemoteControl(Command):
 class _RemoteControl(Command):
 
 
     name = None
     name = None
-    choices = None
     leaf = False
     leaf = False
-
-    option_list = Command.option_list + (
-        Option('--timeout', '-t', type='float',
-               help='Timeout in seconds (float) waiting for reply'),
-        Option('--destination', '-d',
-               help='Comma separated list of destination node names.'),
-        Option('--json', '-j', action='store_true',
-               help='Use json as output format.'),
-    )
+    control_group = None
 
 
     def __init__(self, *args, **kwargs):
     def __init__(self, *args, **kwargs):
         self.show_body = kwargs.pop('show_body', True)
         self.show_body = kwargs.pop('show_body', True)
         self.show_reply = kwargs.pop('show_reply', True)
         self.show_reply = kwargs.pop('show_reply', True)
         super().__init__(*args, **kwargs)
         super().__init__(*args, **kwargs)
 
 
+    def add_arguments(self, parser):
+        group = parser.add_argument_group('Remote Control Options')
+        group.add_argument(
+            '--timeout', '-t', type=float,
+            help='Timeout in seconds (float) waiting for reply',
+        )
+        group.add_argument(
+            '--destination', '-d',
+            help='Comma separated list of destination node names.')
+        group.add_argument(
+            '--json', '-j', action='store_true', default=False,
+            help='Use json as output format.',
+        )
+
     @classmethod
     @classmethod
-    def get_command_info(self, command,
-                         indent=0, prefix='', color=None, help=False):
+    def get_command_info(cls, command,
+                         indent=0, prefix='', color=None,
+                         help=False, app=None, choices=None):
+        if choices is None:
+            choices = cls._choices_by_group(app)
+        meta = choices[command]
         if help:
         if help:
-            help = '|' + text.indent(self.choices[command][1], indent + 4)
+            help = '|' + text.indent(meta.help, indent + 4)
         else:
         else:
             help = None
             help = None
-        try:
-            # see if it uses args.
-            meth = getattr(self, command)
-            return text.join([
-                '|' + text.indent('{0}{1} {2}'.format(
-                    prefix, color(command), meth.__doc__), indent),
-                help,
-            ])
-
-        except AttributeError:
-            return text.join([
-                '|' + text.indent(prefix + str(color(command)), indent), help,
-            ])
+        return text.join([
+            '|' + text.indent('{0}{1} {2}'.format(
+                prefix, color(command), meta.signature or ''), indent),
+            help,
+        ])
 
 
     @classmethod
     @classmethod
-    def list_commands(self, indent=0, prefix='', color=None, help=False):
+    def list_commands(cls, indent=0, prefix='',
+                      color=None, help=False, app=None):
+        choices = cls._choices_by_group(app)
         color = color if color else lambda x: x
         color = color if color else lambda x: x
         prefix = prefix + ' ' if prefix else ''
         prefix = prefix + ' ' if prefix else ''
-        return '\n'.join(self.get_command_info(c, indent, prefix, color, help)
-                         for c in sorted(self.choices))
-
-    @property
-    def epilog(self):
-        return '\n'.join([
-            '[Commands]',
-            self.list_commands(indent=4, help=True)
-        ])
+        return '\n'.join(
+            cls.get_command_info(c, indent, prefix, color, help,
+                                 app=app, choices=choices)
+            for c in sorted(choices))
 
 
     def usage(self, command):
     def usage(self, command):
-        return '%prog {0} [options] {1} <command> [arg1 .. argN]'.format(
+        return '%(prog)s {0} [options] {1} <command> [arg1 .. argN]'.format(
             command, self.args)
             command, self.args)
 
 
     def call(self, *args, **kwargs):
     def call(self, *args, **kwargs):
@@ -605,45 +622,107 @@ class _RemoteControl(Command):
     def run(self, *args, **kwargs):
     def run(self, *args, **kwargs):
         if not args:
         if not args:
             raise self.UsageError(
             raise self.UsageError(
-                'Missing {0.name} method. See --help'.format(self))
+                'Missing {0.name} method.  See --help'.format(self))
         return self.do_call_method(args, **kwargs)
         return self.do_call_method(args, **kwargs)
 
 
-    def do_call_method(self, args, **kwargs):
+    def _ensure_fanout_supported(self):
+        with self.app.connection_for_write() as conn:
+            if not conn.supports_exchange_type('fanout'):
+                raise self.Error(
+                    'Broadcast not supported by transport {0!r}'.format(
+                        conn.info()['transport']))
+
+    def do_call_method(self, args,
+                       timeout=None, destination=None, json=False, **kwargs):
         method = args[0]
         method = args[0]
         if method == 'help':
         if method == 'help':
             raise self.Error("Did you mean '{0.name} --help'?".format(self))
             raise self.Error("Did you mean '{0.name} --help'?".format(self))
-        if method not in self.choices:
+        try:
+            meta = self.choices[method]
+        except KeyError:
             raise self.UsageError(
             raise self.UsageError(
                 'Unknown {0.name} method {1}'.format(self, method))
                 'Unknown {0.name} method {1}'.format(self, method))
 
 
-        if self.app.connection_for_write().transport.driver_type == 'sql':
-            raise self.Error('Broadcast not supported by SQL broker transport')
+        self._ensure_fanout_supported()
 
 
-        output_json = kwargs.get('json')
-        destination = kwargs.get('destination')
-        timeout = kwargs.get('timeout') or self.choices[method][0]
+        timeout = timeout or meta.default_timeout
         if destination and isinstance(destination, str):
         if destination and isinstance(destination, str):
             destination = [dest.strip() for dest in destination.split(',')]
             destination = [dest.strip() for dest in destination.split(',')]
 
 
-        handler = getattr(self, method, self.call)
-
-        callback = None if output_json else self.say_remote_command_reply
-
-        replies = handler(method, *args[1:], timeout=timeout,
-                          destination=destination,
-                          callback=callback)
+        replies = self.call(
+            method,
+            arguments=self.compile_arguments(meta, method, args[1:]),
+            timeout=timeout,
+            destination=destination,
+            callback=None if json else self.say_remote_command_reply,
+        )
         if not replies:
         if not replies:
             raise self.Error('No nodes replied within time constraint.',
             raise self.Error('No nodes replied within time constraint.',
                              status=EX_UNAVAILABLE)
                              status=EX_UNAVAILABLE)
-        if output_json:
-            self.out(json.dumps(replies))
+        if json:
+            self.out(dumps(replies))
         return replies
         return replies
 
 
+    def compile_arguments(self, meta, method, args):
+        args = list(args)
+        kw = {}
+        if meta.args:
+            kw.update({
+                k: v for k, v in self._consume_args(meta, method, args)
+            })
+        if meta.variadic:
+            kw.update({meta.variadic: args})
+        if not kw and args:
+            raise self.Error(
+                'Command {0!r} takes no arguments.'.format(method),
+                status=EX_USAGE)
+        return kw or {}
+
+    def _consume_args(self, meta, method, args):
+        i = 0
+        try:
+            for i, arg in enumerate(args):
+                try:
+                    name, typ = meta.args[i]
+                except IndexError:
+                    if meta.variadic:
+                        break
+                    raise self.Error(
+                        'Command {0!r} takes arguments: {1}'.format(
+                            method, meta.signature),
+                        status=EX_USAGE)
+                else:
+                    yield name, typ(arg) if typ is not None else arg
+        finally:
+            args[:] = args[i:]
+
+    @classmethod
+    def _choices_by_group(cls, app):
+        from celery.worker.control import Panel
+        # need to import task modules for custom user-remote control commands.
+        app.loader.import_default_modules()
+
+        return {
+            name: info for name, info in Panel.meta.items()
+            if info.type == cls.control_group and info.visible
+        }
+
+    @cached_property
+    def choices(self):
+        return self._choices_by_group(self.app)
+
+    @property
+    def epilog(self):
+        return '\n'.join([
+            '[Commands]',
+            self.list_commands(indent=4, help=True, app=self.app)
+        ])
+
 
 
 class inspect(_RemoteControl):
 class inspect(_RemoteControl):
     """Inspect the worker at runtime.
     """Inspect the worker at runtime.
 
 
-    Availability: RabbitMQ (AMQP) transport.
+    Availability: RabbitMQ (AMQP) and Redis transports.
 
 
     Examples:
     Examples:
         .. code-block:: console
         .. code-block:: console
@@ -654,43 +733,17 @@ class inspect(_RemoteControl):
     """
     """
 
 
     name = 'inspect'
     name = 'inspect'
+    control_group = 'inspect'
 
 
-    choices = {
-        'active': (1.0, 'dump active tasks (being processed)'),
-        'active_queues': (1.0, 'dump queues being consumed from'),
-        'clock': (1.0, 'get value of logical clock'),
-        'conf': (1.0, 'dump worker configuration'),
-        'memdump': (1.0, 'dump memory samples (requires psutil)'),
-        'memsample': (1.0, 'sample memory (requires psutil)'),
-        'objgraph': (60.0, 'create object graph (requires objgraph)'),
-        'ping': (0.2, 'ping worker(s)'),
-        'query_task': (1.0, 'query for task information by id'),
-        'reserved': (1.0, 'dump reserved tasks (waiting to be processed)'),
-        'scheduled': (1.0, 'dump scheduled tasks (eta/countdown/retry)'),
-        'stats': (1.0, 'dump worker statistics'),
-        'registered': (1.0, 'dump of registered tasks'),
-        'report': (1.0, 'get bugreport info'),
-        'revoked': (1.0, 'dump of revoked task ids'),
-    }
-
-    def call(self, method, *args, **options):
-        i = self.app.control.inspect(**options)
-        return getattr(i, method)(*args)
-
-    def objgraph(self, type_='Request', *args, **kwargs):
-        return self.call('objgraph', type_, **kwargs)
-
-    def conf(self, with_defaults=False, *args, **kwargs):
-        return self.call('conf', with_defaults, **kwargs)
-
-    def query_task(self, *ids, **options):
-        return self.call('query_task', ids, **options)
+    def call(self, method, arguments, **options):
+        return self.app.control.inspect(**options)._request(
+            method, **arguments)
 
 
 
 
 class control(_RemoteControl):
 class control(_RemoteControl):
     """Workers remote control.
     """Workers remote control.
 
 
-    Availability: RabbitMQ (AMQP) transport.
+    Availability: RabbitMQ (AMQP), Redis, and MongoDB transports.
 
 
     Examples:
     Examples:
         .. code-block:: console
         .. code-block:: console
@@ -706,49 +759,11 @@ class control(_RemoteControl):
     """
     """
 
 
     name = 'control'
     name = 'control'
+    control_group = 'control'
 
 
-    choices = {
-        'enable_events': (1.0, 'tell worker(s) to enable events'),
-        'disable_events': (1.0, 'tell worker(s) to disable events'),
-        'add_consumer': (1.0, 'tell worker(s) to start consuming a queue'),
-        'cancel_consumer': (1.0, 'tell worker(s) to stop consuming a queue'),
-        'rate_limit': (
-            1.0, 'tell worker(s) to modify the rate limit for a task type'),
-        'time_limit': (
-            1.0, 'tell worker(s) to modify the time limit for a task type.'),
-        'pool_grow': (1.0, 'start more pool processes'),
-        'pool_shrink': (1.0, 'use less pool processes'),
-    }
-
-    def call(self, method, *args, **options):
-        return getattr(self.app.control, method)(*args, reply=True, **options)
-
-    def pool_grow(self, method, n=1, **kwargs):
-        """[N=1]"""
-        return self.call(method, int(n), **kwargs)
-
-    def pool_shrink(self, method, n=1, **kwargs):
-        """[N=1]"""
-        return self.call(method, int(n), **kwargs)
-
-    def rate_limit(self, method, task_name, rate_limit, **kwargs):
-        """<task_name> <rate_limit> (e.g. 5/s | 5/m | 5/h)>"""
-        return self.call(method, task_name, rate_limit, **kwargs)
-
-    def time_limit(self, method, task_name, soft, hard=None, **kwargs):
-        """<task_name> <soft_secs> [hard_secs]"""
-        return self.call(method, task_name,
-                         float(soft), float(hard), **kwargs)
-
-    def add_consumer(self, method, queue, exchange=None,
-                     exchange_type='direct', routing_key=None, **kwargs):
-        """<queue> [exchange [type [routing_key]]]"""
-        return self.call(method, queue, exchange,
-                         exchange_type, routing_key, **kwargs)
-
-    def cancel_consumer(self, method, queue, **kwargs):
-        """<queue>"""
-        return self.call(method, queue, **kwargs)
+    def call(self, method, arguments, **options):
+        return self.app.control.broadcast(
+            method, arguments=arguments, reply=True, **options)
 
 
 
 
 class status(Command):
 class status(Command):
@@ -784,27 +799,39 @@ class migrate(Command):
         .. code-block:: console
         .. code-block:: console
 
 
             $ celery migrate amqp://A.example.com amqp://guest@B.example.com//
             $ celery migrate amqp://A.example.com amqp://guest@B.example.com//
+            $ celery migrate redis://localhost amqp://guest@localhost//
     """
     """
 
 
     args = '<source_url> <dest_url>'
     args = '<source_url> <dest_url>'
-
-    option_list = Command.option_list + (
-        Option('--limit', '-n', type='int',
-               help='Number of tasks to consume (int)'),
-        Option('--timeout', '-t', type='float', default=1.0,
-               help='Timeout in seconds (float) waiting for tasks'),
-        Option('--ack-messages', '-a', action='store_true',
-               help='Ack messages from source broker.'),
-        Option('--tasks', '-T',
-               help='List of task names to filter on.'),
-        Option('--queues', '-Q',
-               help='List of queues to migrate.'),
-        Option('--forever', '-F', action='store_true',
-               help='Continually migrate tasks until killed.'),
-    )
-
     progress_fmt = MIGRATE_PROGRESS_FMT
     progress_fmt = MIGRATE_PROGRESS_FMT
 
 
+    def add_arguments(self, parser):
+        group = parser.add_argument_group('Migration Options')
+        group.add_argument(
+            '--limit', '-n', type=int,
+            help='Number of tasks to consume (int)',
+        )
+        group.add_argument(
+            '--timeout', '-t', type=float, default=1.0,
+            help='Timeout in seconds (float) waiting for tasks',
+        )
+        group.add_argument(
+            '--ack-messages', '-a', action='store_true', default=False,
+            help='Ack messages from source broker.',
+        )
+        group.add_argument(
+            '--tasks', '-T',
+            help='List of task names to filter on.',
+        )
+        group.add_argument(
+            '--queues', '-Q',
+            help='List of queues to migrate.',
+        )
+        group.add_argument(
+            '--forever', '-F', action='store_true', default=False,
+            help='Continually migrate tasks until killed.',
+        )
+
     def on_migrate_task(self, state, body, message):
     def on_migrate_task(self, state, body, message):
         self.out(self.progress_fmt.format(state=state, body=body))
         self.out(self.progress_fmt.format(state=state, body=body))
 
 
@@ -829,26 +856,45 @@ class shell(Command):  # pragma: no cover
         - all registered tasks.
         - all registered tasks.
     """
     """
 
 
-    option_list = Command.option_list + (
-        Option('--ipython', '-I',
-               action='store_true', dest='force_ipython',
-               help='force iPython.'),
-        Option('--bpython', '-B',
-               action='store_true', dest='force_bpython',
-               help='force bpython.'),
-        Option('--python', '-P',
-               action='store_true', dest='force_python',
-               help='force default Python shell.'),
-        Option('--without-tasks', '-T', action='store_true',
-               help="don't add tasks to locals."),
-        Option('--eventlet', action='store_true',
-               help='use eventlet.'),
-        Option('--gevent', action='store_true', help='use gevent.'),
-    )
+    def add_arguments(self, parser):
+        group = parser.add_argument_group('Shell Options')
+        group.add_argument(
+            '--ipython', '-I',
+            action='store_true', help='force iPython.', default=False,
+        )
+        group.add_argument(
+            '--bpython', '-B',
+            action='store_true', help='force bpython.', default=False,
+        )
+        group.add_argument(
+            '--python',
+            action='store_true', default=False,
+            help='force default Python shell.',
+        )
+        group.add_argument(
+            '--without-tasks', '-T',
+            action='store_true', default=False,
+            help="don't add tasks to locals.",
+        )
+        group.add_argument(
+            '--eventlet',
+            action='store_true', default=False,
+            help='use eventlet.',
+        )
+        group.add_argument(
+            '--gevent', action='store_true', default=False,
+            help='use gevent.',
+        )
+
+    def run(self, *args, **kwargs):
+        if args:
+            raise self.UsageError(
+                'shell command does not take arguments: {0}'.format(args))
+        return self._run(**kwargs)
 
 
-    def run(self, force_ipython=False, force_bpython=False,
-            force_python=False, without_tasks=False, eventlet=False,
-            gevent=False, **kwargs):
+    def _run(self, ipython=False, bpython=False,
+             python=False, without_tasks=False, eventlet=False,
+             gevent=False, **kwargs):
         sys.path.insert(0, os.getcwd())
         sys.path.insert(0, os.getcwd())
         if eventlet:
         if eventlet:
             import_module('celery.concurrency.eventlet')
             import_module('celery.concurrency.eventlet')
@@ -857,6 +903,8 @@ class shell(Command):  # pragma: no cover
         import celery
         import celery
         import celery.task.base
         import celery.task.base
         self.app.loader.import_default_modules()
         self.app.loader.import_default_modules()
+
+        # pylint: disable=attribute-defined-outside-init
         self.locals = {
         self.locals = {
             'app': self.app,
             'app': self.app,
             'celery': self.app,
             'celery': self.app,
@@ -877,11 +925,11 @@ class shell(Command):  # pragma: no cover
                 if not task.name.startswith('celery.')
                 if not task.name.startswith('celery.')
             })
             })
 
 
-        if force_python:
+        if python:
             return self.invoke_fallback_shell()
             return self.invoke_fallback_shell()
-        elif force_bpython:
+        elif bpython:
             return self.invoke_bpython_shell()
             return self.invoke_bpython_shell()
-        elif force_ipython:
+        elif ipython:
             return self.invoke_ipython_shell()
             return self.invoke_ipython_shell()
         return self.invoke_default_shell()
         return self.invoke_default_shell()
 
 
@@ -950,19 +998,25 @@ class shell(Command):  # pragma: no cover
 class upgrade(Command):
 class upgrade(Command):
     """Perform upgrade between versions."""
     """Perform upgrade between versions."""
 
 
-    option_list = Command.option_list + (
-        Option('--django', action='store_true',
-               help='Upgrade Django project'),
-        Option('--compat', action='store_true',
-               help='Maintain backwards compatibility'),
-        Option('--no-backup', action='store_true',
-               help='Dont backup original files'),
-    )
-
     choices = {'settings'}
     choices = {'settings'}
 
 
+    def add_arguments(self, parser):
+        group = parser.add_argument_group('Upgrading Options')
+        group.add_argument(
+            '--django', action='store_true', default=False,
+            help='Upgrade Django project',
+        )
+        group.add_argument(
+            '--compat', action='store_true', default=False,
+            help='Maintain backwards compatibility',
+        )
+        group.add_argument(
+            '--no-backup', action='store_true', default=False,
+            help='Dont backup original files',
+        )
+
     def usage(self, command):
     def usage(self, command):
-        return '%prog <command> settings [filename] [options]'
+        return '%(prog)s <command> settings [filename] [options]'
 
 
     def run(self, *args, **kwargs):
     def run(self, *args, **kwargs):
         try:
         try:
@@ -999,7 +1053,7 @@ class upgrade(Command):
         return lines
         return lines
 
 
     def _to_new_key(self, line, keyfilter=pass1, source=defaults._TO_NEW_KEY):
     def _to_new_key(self, line, keyfilter=pass1, source=defaults._TO_NEW_KEY):
-        # sort by length to avoid e.g. broker_transport overriding
+        # sort by length to avoid, for example, broker_transport overriding
         # broker_transport_options.
         # broker_transport_options.
         for old_key in reversed(sorted(source, key=lambda x: len(x))):
         for old_key in reversed(sorted(source, key=lambda x: len(x))):
             new_line = line.replace(old_key, keyfilter(source[old_key]))
             new_line = line.replace(old_key, keyfilter(source[old_key]))
@@ -1018,13 +1072,14 @@ class help(Command):
     """Show help screen and exit."""
     """Show help screen and exit."""
 
 
     def usage(self, command):
     def usage(self, command):
-        return '%prog <command> [options] {0.args}'.format(self)
+        return '%(prog)s <command> [options] {0.args}'.format(self)
 
 
     def run(self, *args, **kwargs):
     def run(self, *args, **kwargs):
         self.parser.print_help()
         self.parser.print_help()
         self.out(HELP.format(
         self.out(HELP.format(
             prog_name=self.prog_name,
             prog_name=self.prog_name,
-            commands=CeleryCommand.list_commands(colored=self.colored),
+            commands=CeleryCommand.list_commands(
+                colored=self.colored, app=self.app),
         ))
         ))
 
 
         return EX_USAGE
         return EX_USAGE
@@ -1039,6 +1094,7 @@ class report(Command):
 
 
 
 
 class CeleryCommand(Command):
 class CeleryCommand(Command):
+    """Base class for commands."""
 
 
     commands = {
     commands = {
         'amqp': amqp,
         'amqp': amqp,
@@ -1110,7 +1166,7 @@ class CeleryCommand(Command):
                 elif value.startswith('-'):
                 elif value.startswith('-'):
                     # we eat the next argument even though we don't know
                     # we eat the next argument even though we don't know
                     # if this option takes an argument or not.
                     # if this option takes an argument or not.
-                    # instead we will assume what is the command name in the
+                    # instead we'll assume what's the command name in the
                     # return statements below.
                     # return statements below.
                     try:
                     try:
                         nxt = argv[index + 1]
                         nxt = argv[index + 1]
@@ -1142,7 +1198,7 @@ class CeleryCommand(Command):
             return sys.modules['__main__'].__file__
             return sys.modules['__main__'].__file__
         return name
         return name
 
 
-    def handle_argv(self, prog_name, argv):
+    def handle_argv(self, prog_name, argv, **kwargs):
         self.prog_name = self.prepare_prog_name(prog_name)
         self.prog_name = self.prepare_prog_name(prog_name)
         argv = self._relocate_args_from_start(argv)
         argv = self._relocate_args_from_start(argv)
         _, argv = self.prepare_args(None, argv)
         _, argv = self.prepare_args(None, argv)
@@ -1164,29 +1220,32 @@ class CeleryCommand(Command):
             sys.exit(EX_FAILURE)
             sys.exit(EX_FAILURE)
 
 
     @classmethod
     @classmethod
-    def get_command_info(self, command, indent=0, color=None, colored=None):
+    def get_command_info(cls, command, indent=0,
+                         color=None, colored=None, app=None):
         colored = term.colored() if colored is None else colored
         colored = term.colored() if colored is None else colored
         colored = colored.names[color] if color else lambda x: x
         colored = colored.names[color] if color else lambda x: x
-        obj = self.commands[command]
+        obj = cls.commands[command]
         cmd = 'celery {0}'.format(colored(command))
         cmd = 'celery {0}'.format(colored(command))
         if obj.leaf:
         if obj.leaf:
             return '|' + text.indent(cmd, indent)
             return '|' + text.indent(cmd, indent)
         return text.join([
         return text.join([
             ' ',
             ' ',
             '|' + text.indent('{0} --help'.format(cmd), indent),
             '|' + text.indent('{0} --help'.format(cmd), indent),
-            obj.list_commands(indent, 'celery {0}'.format(command), colored),
+            obj.list_commands(indent, 'celery {0}'.format(command), colored,
+                              app=app),
         ])
         ])
 
 
     @classmethod
     @classmethod
-    def list_commands(self, indent=0, colored=None):
+    def list_commands(cls, indent=0, colored=None, app=None):
         colored = term.colored() if colored is None else colored
         colored = term.colored() if colored is None else colored
         white = colored.white
         white = colored.white
         ret = []
         ret = []
-        for cls, commands, color in command_classes:
+        for command_cls, commands, color in command_classes:
             ret.extend([
             ret.extend([
-                text.indent('+ {0}: '.format(white(cls)), indent),
+                text.indent('+ {0}: '.format(white(command_cls)), indent),
                 '\n'.join(
                 '\n'.join(
-                    self.get_command_info(command, indent + 4, color, colored)
+                    cls.get_command_info(
+                        command, indent + 4, color, colored, app=app)
                     for command in commands),
                     for command in commands),
                 ''
                 ''
             ])
             ])

+ 50 - 86
celery/bin/celeryd_detach.py

@@ -5,12 +5,11 @@ Using :func:`os.execv` as forking and multiprocessing
 leads to weird issues (it was a long time ago now, but it
 leads to weird issues (it was a long time ago now, but it
 could have something to do with the threading mutex bug)
 could have something to do with the threading mutex bug)
 """
 """
+import argparse
 import celery
 import celery
 import os
 import os
 import sys
 import sys
 
 
-from optparse import OptionParser, BadOptionError
-
 from celery.platforms import EX_FAILURE, detached
 from celery.platforms import EX_FAILURE, detached
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
 from celery.utils.nodenames import default_nodename, node_format
 from celery.utils.nodenames import default_nodename, node_format
@@ -25,19 +24,20 @@ C_FAKEFORK = os.environ.get('C_FAKEFORK')
 
 
 
 
 def detach(path, argv, logfile=None, pidfile=None, uid=None,
 def detach(path, argv, logfile=None, pidfile=None, uid=None,
-           gid=None, umask=None, working_directory=None, fake=False, app=None,
+           gid=None, umask=None, workdir=None, fake=False, app=None,
            executable=None, hostname=None):
            executable=None, hostname=None):
+    """Detach program by argv'."""
     hostname = default_nodename(hostname)
     hostname = default_nodename(hostname)
     logfile = node_format(logfile, hostname)
     logfile = node_format(logfile, hostname)
     pidfile = node_format(pidfile, hostname)
     pidfile = node_format(pidfile, hostname)
     fake = 1 if C_FAKEFORK else fake
     fake = 1 if C_FAKEFORK else fake
-    with detached(logfile, pidfile, uid, gid, umask, working_directory, fake,
+    with detached(logfile, pidfile, uid, gid, umask, workdir, fake,
                   after_forkers=False):
                   after_forkers=False):
         try:
         try:
             if executable is not None:
             if executable is not None:
                 path = executable
                 path = executable
             os.execv(path, [path] + argv)
             os.execv(path, [path] + argv)
-        except Exception:
+        except Exception:  # pylint: disable=broad-except
             if app is None:
             if app is None:
                 from celery import current_app
                 from celery import current_app
                 app = current_app
                 app = current_app
@@ -48,64 +48,10 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
         return EX_FAILURE
         return EX_FAILURE
 
 
 
 
-class PartialOptionParser(OptionParser):
-
-    def __init__(self, *args, **kwargs):
-        self.leftovers = []
-        OptionParser.__init__(self, *args, **kwargs)
-
-    def _process_long_opt(self, rargs, values):
-        arg = rargs.pop(0)
-
-        if '=' in arg:
-            opt, next_arg = arg.split('=', 1)
-            rargs.insert(0, next_arg)
-            had_explicit_value = True
-        else:
-            opt = arg
-            had_explicit_value = False
-
-        try:
-            opt = self._match_long_opt(opt)
-            option = self._long_opt.get(opt)
-        except BadOptionError:
-            option = None
-
-        if option:
-            if option.takes_value():
-                nargs = option.nargs
-                if len(rargs) < nargs:
-                    if nargs == 1:
-                        self.error('{0} requires an argument'.format(opt))
-                    else:
-                        self.error('{0} requires {1} arguments'.format(
-                            opt, nargs))
-                elif nargs == 1:
-                    value = rargs.pop(0)
-                else:
-                    value = tuple(rargs[0:nargs])
-                    del rargs[0:nargs]
-
-            elif had_explicit_value:
-                self.error('{0} option does not take a value'.format(opt))
-            else:
-                value = None
-            option.process(opt, value, values, self)
-        else:
-            self.leftovers.append(arg)
-
-    def _process_short_opts(self, rargs, values):
-        arg = rargs[0]
-        try:
-            OptionParser._process_short_opts(self, rargs, values)
-        except BadOptionError:
-            self.leftovers.append(arg)
-            if rargs and not rargs[0][0] == '-':
-                self.leftovers.append(rargs.pop(0))
-
-
 class detached_celeryd:
 class detached_celeryd:
-    usage = '%prog [options] [celeryd options]'
+    """Daemonize the celery worker process."""
+
+    usage = '%(prog)s [options] [celeryd options]'
     version = celery.VERSION_BANNER
     version = celery.VERSION_BANNER
     description = ('Detaches Celery worker nodes.  See `celery worker --help` '
     description = ('Detaches Celery worker nodes.  See `celery worker --help` '
                    'for the list of supported worker arguments.')
                    'for the list of supported worker arguments.')
@@ -117,52 +63,70 @@ class detached_celeryd:
         self.app = app
         self.app = app
 
 
     def create_parser(self, prog_name):
     def create_parser(self, prog_name):
-        p = PartialOptionParser(
+        parser = argparse.ArgumentParser(
             prog=prog_name,
             prog=prog_name,
             usage=self.usage,
             usage=self.usage,
             description=self.description,
             description=self.description,
-            version=self.version,
         )
         )
-        self.prepare_arguments(p)
-        return p
+        self._add_version_argument(parser)
+        self.add_arguments(parser)
+        return parser
+
+    def _add_version_argument(self, parser):
+        parser.add_argument(
+            '--version', action='version', version=self.version,
+        )
 
 
     def parse_options(self, prog_name, argv):
     def parse_options(self, prog_name, argv):
         parser = self.create_parser(prog_name)
         parser = self.create_parser(prog_name)
-        options, values = parser.parse_args(argv)
+        options, leftovers = parser.parse_known_args(argv)
         if options.logfile:
         if options.logfile:
-            parser.leftovers.append('--logfile={0}'.format(options.logfile))
+            leftovers.append('--logfile={0}'.format(options.logfile))
         if options.pidfile:
         if options.pidfile:
-            parser.leftovers.append('--pidfile={0}'.format(options.pidfile))
+            leftovers.append('--pidfile={0}'.format(options.pidfile))
         if options.hostname:
         if options.hostname:
-            parser.leftovers.append('--hostname={0}'.format(options.hostname))
-        return options, values, parser.leftovers
+            leftovers.append('--hostname={0}'.format(options.hostname))
+        return options, leftovers
 
 
     def execute_from_commandline(self, argv=None):
     def execute_from_commandline(self, argv=None):
         argv = sys.argv if argv is None else argv
         argv = sys.argv if argv is None else argv
-        config = []
-        seen_cargs = 0
-        for arg in argv:
-            if seen_cargs:
-                config.append(arg)
-            else:
-                if arg == '--':
-                    seen_cargs = 1
-                    config.append(arg)
         prog_name = os.path.basename(argv[0])
         prog_name = os.path.basename(argv[0])
-        options, values, leftovers = self.parse_options(prog_name, argv[1:])
+        config, argv = self._split_command_line_config(argv)
+        options, leftovers = self.parse_options(prog_name, argv[1:])
         sys.exit(detach(
         sys.exit(detach(
             app=self.app, path=self.execv_path,
             app=self.app, path=self.execv_path,
             argv=self.execv_argv + leftovers + config,
             argv=self.execv_argv + leftovers + config,
             **vars(options)
             **vars(options)
         ))
         ))
 
 
-    def prepare_arguments(self, parser):
+    def _split_command_line_config(self, argv):
+        config = list(self._extract_command_line_config(argv))
+        try:
+            argv = argv[:argv.index('--')]
+        except ValueError:
+            pass
+        return config, argv
+
+    def _extract_command_line_config(self, argv):
+        # Extracts command-line config appearing after '--':
+        #    celery worker -l info -- worker.prefetch_multiplier=10
+        # This to make sure argparse doesn't gobble it up.
+        seen_cargs = 0
+        for arg in argv:
+            if seen_cargs:
+                yield arg
+            else:
+                if arg == '--':
+                    seen_cargs = 1
+                    yield arg
+
+    def add_arguments(self, parser):
         daemon_options(parser, default_pidfile='celeryd.pid')
         daemon_options(parser, default_pidfile='celeryd.pid')
-        parser.add_option('--workdir', default=None, dest='working_directory')
-        parser.add_option('-n', '--hostname')
-        parser.add_option(
+        parser.add_argument('--workdir', default=None)
+        parser.add_argument('-n', '--hostname')
+        parser.add_argument(
             '--fake',
             '--fake',
-            default=False, action='store_true', dest='fake',
+            action='store_true', default=False,
             help="Don't fork (for debugging purposes)",
             help="Don't fork (for debugging purposes)",
         )
         )
 
 

+ 28 - 17
celery/bin/events.py

@@ -25,7 +25,7 @@
 
 
 .. cmdoption:: -r, --maxrate
 .. cmdoption:: -r, --maxrate
 
 
-    Camera: Optional shutter rate limit (e.g. 10/m).
+    Camera: Optional shutter rate limit (e.g., 10/m).
 
 
 .. cmdoption:: -l, --loglevel
 .. cmdoption:: -l, --loglevel
 
 
@@ -34,13 +34,13 @@
 
 
 .. cmdoption:: -f, --logfile
 .. cmdoption:: -f, --logfile
 
 
-    Path to log file. If no logfile is specified, `stderr` is used.
+    Path to log file.  If no logfile is specified, `stderr` is used.
 
 
 .. cmdoption:: --pidfile
 .. cmdoption:: --pidfile
 
 
     Optional file used to store the process pid.
     Optional file used to store the process pid.
 
 
-    The program will not start if this file already exists
+    The program won't start if this file already exists
     and the pid is still alive.
     and the pid is still alive.
 
 
 .. cmdoption:: --uid
 .. cmdoption:: --uid
@@ -74,6 +74,8 @@ from celery.bin.base import Command, daemon_options
 
 
 __all__ = ['events']
 __all__ = ['events']
 
 
+HELP = __doc__
+
 
 
 class events(Command):
 class events(Command):
     """Event-stream utilities.
     """Event-stream utilities.
@@ -96,13 +98,14 @@ class events(Command):
             $ celery events -d
             $ celery events -d
             $ celery events -c mod.attr -F 1.0 --detach --maxrate=100/m -l info
             $ celery events -c mod.attr -F 1.0 --detach --maxrate=100/m -l info
     """
     """
-    doc = __doc__
+
+    doc = HELP
     supports_args = False
     supports_args = False
 
 
     def run(self, dump=False, camera=None, frequency=1.0, maxrate=None,
     def run(self, dump=False, camera=None, frequency=1.0, maxrate=None,
             loglevel='INFO', logfile=None, prog_name='celery events',
             loglevel='INFO', logfile=None, prog_name='celery events',
             pidfile=None, uid=None, gid=None, umask=None,
             pidfile=None, uid=None, gid=None, umask=None,
-            working_directory=None, detach=False, **kwargs):
+            workdir=None, detach=False, **kwargs):
         self.prog_name = prog_name
         self.prog_name = prog_name
 
 
         if dump:
         if dump:
@@ -112,7 +115,7 @@ class events(Command):
                                   loglevel=loglevel, logfile=logfile,
                                   loglevel=loglevel, logfile=logfile,
                                   pidfile=pidfile, uid=uid, gid=gid,
                                   pidfile=pidfile, uid=uid, gid=gid,
                                   umask=umask,
                                   umask=umask,
-                                  working_directory=working_directory,
+                                  workdir=workdir,
                                   detach=detach)
                                   detach=detach)
         return self.run_evtop()
         return self.run_evtop()
 
 
@@ -127,10 +130,9 @@ class events(Command):
         return evtop(app=self.app)
         return evtop(app=self.app)
 
 
     def run_evcam(self, camera, logfile=None, pidfile=None, uid=None,
     def run_evcam(self, camera, logfile=None, pidfile=None, uid=None,
-                  gid=None, umask=None, working_directory=None,
+                  gid=None, umask=None, workdir=None,
                   detach=False, **kwargs):
                   detach=False, **kwargs):
         from celery.events.snapshot import evcam
         from celery.events.snapshot import evcam
-        workdir = working_directory
         self.set_process_status('cam')
         self.set_process_status('cam')
         kwargs['app'] = self.app
         kwargs['app'] = self.app
         cam = partial(evcam, camera,
         cam = partial(evcam, camera,
@@ -147,16 +149,25 @@ class events(Command):
         info = '{0} {1}'.format(info, strargv(sys.argv))
         info = '{0} {1}'.format(info, strargv(sys.argv))
         return set_process_title(prog, info=info)
         return set_process_title(prog, info=info)
 
 
-    def prepare_arguments(self, parser):
-        parser.add_option('-d', '--dump', action='store_true')
-        parser.add_option('-c', '--camera')
-        parser.add_option('--detach', action='store_true')
-        parser.add_option('-F', '--frequency', '--freq',
-                          type='float', default=1.0)
-        parser.add_option('-r', '--maxrate')
-        parser.add_option('-l', '--loglevel', default='INFO')
+    def add_arguments(self, parser):
+        dopts = parser.add_argument_group('Dumper')
+        dopts.add_argument('-d', '--dump', action='store_true', default=False)
+
+        copts = parser.add_argument_group('Snapshot')
+        copts.add_argument('-c', '--camera')
+        copts.add_argument('--detach', action='store_true', default=False)
+        copts.add_argument('-F', '--frequency', '--freq',
+                           type=float, default=1.0)
+        copts.add_argument('-r', '--maxrate')
+        copts.add_argument('-l', '--loglevel', default='INFO')
+
         daemon_options(parser, default_pidfile='celeryev.pid')
         daemon_options(parser, default_pidfile='celeryev.pid')
-        parser.add_options(self.app.user_options['events'])
+
+        user_options = self.app.user_options['events']
+        if user_options:
+            self.add_compat_options(
+                parser.add_argument_group('User Options'),
+                user_options)
 
 
 
 
 def main():
 def main():

+ 39 - 22
celery/bin/graph.py

@@ -13,6 +13,8 @@ __all__ = ['graph']
 
 
 
 
 class graph(Command):
 class graph(Command):
+    """The ``celery graph`` command."""
+
     args = """<TYPE> [arguments]
     args = """<TYPE> [arguments]
             .....  bootsteps [worker] [consumer]
             .....  bootsteps [worker] [consumer]
             .....  workers   [enumerate]
             .....  workers   [enumerate]
@@ -30,12 +32,12 @@ class graph(Command):
         worker = self.app.WorkController()
         worker = self.app.WorkController()
         include = {arg.lower() for arg in args or ['worker', 'consumer']}
         include = {arg.lower() for arg in args or ['worker', 'consumer']}
         if 'worker' in include:
         if 'worker' in include:
-            graph = worker.blueprint.graph
+            worker_graph = worker.blueprint.graph
             if 'consumer' in include:
             if 'consumer' in include:
                 worker.blueprint.connect_with(worker.consumer.blueprint)
                 worker.blueprint.connect_with(worker.consumer.blueprint)
         else:
         else:
-            graph = worker.consumer.blueprint.graph
-        graph.to_dot(self.stdout)
+            worker_graph = worker.consumer.blueprint.graph
+        worker_graph.to_dot(self.stdout)
 
 
     def workers(self, *args, **kwargs):
     def workers(self, *args, **kwargs):
 
 
@@ -67,14 +69,21 @@ class graph(Command):
                 return self.label()
                 return self.label()
 
 
         class Thread(Node):
         class Thread(Node):
-            scheme = {'fillcolor': 'lightcyan4', 'fontcolor': 'yellow',
-                      'shape': 'oval', 'fontsize': 10, 'width': 0.3,
-                      'color': 'black'}
+            scheme = {
+                'fillcolor': 'lightcyan4',
+                'fontcolor': 'yellow',
+                'shape': 'oval',
+                'fontsize': 10,
+                'width': 0.3,
+                'color': 'black',
+            }
 
 
             def __init__(self, label, **kwargs):
             def __init__(self, label, **kwargs):
-                self._label = 'thr-{0}'.format(next(tids))
                 self.real_label = label
                 self.real_label = label
-                self.pos = 0
+                super(Thread, self).__init__(
+                    label='thr-{0}'.format(next(tids)),
+                    pos=0,
+                )
 
 
         class Formatter(GraphFormatter):
         class Formatter(GraphFormatter):
 
 
@@ -108,16 +117,24 @@ class graph(Command):
             pass
             pass
 
 
         class Backend(Node):
         class Backend(Node):
-            scheme = {'shape': 'folder', 'width': 2,
-                      'height': 1, 'color': 'black',
-                      'fillcolor': 'peachpuff3', 'color': 'peachpuff4'}
+            scheme = {
+                'shape': 'folder',
+                'width': 2,
+                'height': 1,
+                'color': 'black',
+                'fillcolor': 'peachpuff3',
+            }
 
 
             def label(self):
             def label(self):
                 return generic_label(self) if generic else self._label
                 return generic_label(self) if generic else self._label
 
 
         class Broker(Node):
         class Broker(Node):
-            scheme = {'shape': 'circle', 'fillcolor': 'cadetblue3',
-                      'color': 'cadetblue4', 'height': 1}
+            scheme = {
+                'shape': 'circle',
+                'fillcolor': 'cadetblue3',
+                'color': 'cadetblue4',
+                'height': 1,
+            }
 
 
             def label(self):
             def label(self):
                 return generic_label(self) if generic else self._label
                 return generic_label(self) if generic else self._label
@@ -163,24 +180,24 @@ class graph(Command):
         broker = Broker(args.get(
         broker = Broker(args.get(
             'broker', self.app.connection_for_read().as_uri()))
             'broker', self.app.connection_for_read().as_uri()))
         backend = Backend(backend) if backend else None
         backend = Backend(backend) if backend else None
-        graph = DependencyGraph(formatter=Formatter())
-        graph.add_arc(broker)
+        deps = DependencyGraph(formatter=Formatter())
+        deps.add_arc(broker)
         if backend:
         if backend:
-            graph.add_arc(backend)
+            deps.add_arc(backend)
         curworker = [0]
         curworker = [0]
         for i, worker in enumerate(workers):
         for i, worker in enumerate(workers):
             worker = Worker(worker, pos=i)
             worker = Worker(worker, pos=i)
-            graph.add_arc(worker)
-            graph.add_edge(worker, broker)
+            deps.add_arc(worker)
+            deps.add_edge(worker, broker)
             if backend:
             if backend:
-                graph.add_edge(worker, backend)
+                deps.add_edge(worker, backend)
             threads = threads_for.get(worker._label)
             threads = threads_for.get(worker._label)
             if threads:
             if threads:
                 for thread in threads:
                 for thread in threads:
                     thread = Thread(thread)
                     thread = Thread(thread)
-                    graph.add_arc(thread)
-                    graph.add_edge(thread, worker)
+                    deps.add_arc(thread)
+                    deps.add_edge(thread, worker)
 
 
             curworker[0] += 1
             curworker[0] += 1
 
 
-        graph.to_dot(self.stdout)
+        deps.to_dot(self.stdout)

+ 7 - 5
celery/bin/logtool.py

@@ -12,11 +12,11 @@ from .base import Command
 
 
 __all__ = ['logtool']
 __all__ = ['logtool']
 
 
-RE_LOG_START = re.compile('^\[\d\d\d\d\-\d\d-\d\d ')
-RE_TASK_RECEIVED = re.compile('.+?\] Received')
-RE_TASK_READY = re.compile('.+?\] Task')
-RE_TASK_INFO = re.compile('.+?([\w\.]+)\[(.+?)\].+')
-RE_TASK_RESULT = re.compile('.+?[\w\.]+\[.+?\] (.+)')
+RE_LOG_START = re.compile(r'^\[\d\d\d\d\-\d\d-\d\d ')
+RE_TASK_RECEIVED = re.compile(r'.+?\] Received')
+RE_TASK_READY = re.compile(r'.+?\] Task')
+RE_TASK_INFO = re.compile(r'.+?([\w\.]+)\[(.+?)\].+')
+RE_TASK_RESULT = re.compile(r'.+?[\w\.]+\[.+?\] (.+)')
 
 
 REPORT_FORMAT = """
 REPORT_FORMAT = """
 Report
 Report
@@ -116,6 +116,8 @@ class Audit:
 
 
 
 
 class logtool(Command):
 class logtool(Command):
+    """The ``celery logtool`` command."""
+
     args = """<action> [arguments]
     args = """<action> [arguments]
             .....  stats      [file1|- [file2 [...]]]
             .....  stats      [file1|- [file2 [...]]]
             .....  traces     [file1|- [file2 [...]]]
             .....  traces     [file1|- [file2 [...]]]

+ 258 - 453
celery/bin/multi.py

@@ -20,7 +20,7 @@ Examples
 
 
 
 
     $ # You need to add the same arguments when you restart,
     $ # You need to add the same arguments when you restart,
-    $ # as these are not persisted anywhere.
+    $ # as these aren't persisted anywhere.
     $ celery multi restart Leslie -E --pidfile=/var/run/celery/%n.pid
     $ celery multi restart Leslie -E --pidfile=/var/run/celery/%n.pid
                                      --logfile=/var/run/celery/%n%I.log
                                      --logfile=/var/run/celery/%n%I.log
 
 
@@ -94,32 +94,21 @@ Examples
     celery worker -n xuzzy@myhost -c 3
     celery worker -n xuzzy@myhost -c 3
 """
 """
 import os
 import os
-import shlex
 import signal
 import signal
 import sys
 import sys
 
 
-from collections import OrderedDict, defaultdict, namedtuple
-from functools import partial
-from subprocess import Popen
-from time import sleep
+from functools import wraps
 
 
-from kombu.utils.encoding import from_utf8
 from kombu.utils.objects import cached_property
 from kombu.utils.objects import cached_property
 
 
 from celery import VERSION_BANNER
 from celery import VERSION_BANNER
-from celery.platforms import Pidfile, IS_WINDOWS
+from celery.apps.multi import Cluster, MultiParser, NamespacedOptionParser
+from celery.platforms import EX_FAILURE, EX_OK, signals
 from celery.utils import term
 from celery.utils import term
-from celery.utils.nodenames import (
-    gethostname, host_format, node_format, nodesplit,
-)
 from celery.utils.text import pluralize
 from celery.utils.text import pluralize
 
 
 __all__ = ['MultiTool']
 __all__ = ['MultiTool']
 
 
-SIGNAMES = {sig for sig in dir(signal)
-            if sig.startswith('SIG') and '_' not in sig}
-SIGMAP = {getattr(signal, name): name for name in SIGNAMES}
-
 USAGE = """\
 USAGE = """\
 usage: {prog_name} start <node1 node2 nodeN|range> [worker options]
 usage: {prog_name} start <node1 node2 nodeN|range> [worker options]
        {prog_name} stop <n1 n2 nN|range> [-SIG (default: -TERM)]
        {prog_name} stop <n1 n2 nN|range> [-SIG (default: -TERM)]
@@ -139,34 +128,116 @@ additional options (must appear after command name):
     * --verbose:    Show more output.
     * --verbose:    Show more output.
     * --no-color:   Don't display colors.
     * --no-color:   Don't display colors.
 """
 """
-CELERY_EXE = 'celery'
-
-multi_args_t = namedtuple(
-    'multi_args_t', ('name', 'argv', 'expander', 'namespace'),
-)
 
 
 
 
 def main():
 def main():
     sys.exit(MultiTool().execute_from_commandline(sys.argv))
     sys.exit(MultiTool().execute_from_commandline(sys.argv))
 
 
 
 
-def celery_exe(*args):
-    return ' '.join((CELERY_EXE,) + args)
+def splash(fun):
+
+    @wraps(fun)
+    def _inner(self, *args, **kwargs):
+        self.splash()
+        return fun(self, *args, **kwargs)
+    return _inner
+
+
+def using_cluster(fun):
+
+    @wraps(fun)
+    def _inner(self, *argv, **kwargs):
+        return fun(self, self.cluster_from_argv(argv), **kwargs)
+    return _inner
+
 
 
+def using_cluster_and_sig(fun):
 
 
-class MultiTool:
-    retcode = 0  # Final exit code.
+    @wraps(fun)
+    def _inner(self, *argv, **kwargs):
+        p, cluster = self._cluster_from_argv(argv)
+        sig = self._find_sig_argument(p)
+        return fun(self, cluster, sig, **kwargs)
+    return _inner
 
 
-    def __init__(self, env=None, fh=None, quiet=False, verbose=False,
-                 no_color=False, nosplash=False, stdout=None, stderr=None):
-        """fh is an old alias to stdout."""
-        self.stdout = self.fh = stdout or fh or sys.stdout
+
+class TermLogger:
+
+    splash_text = 'celery multi v{version}'
+    splash_context = {'version': VERSION_BANNER}
+
+    #: Final exit code.
+    retcode = 0
+
+    def setup_terminal(self, stdout, stderr,
+                       nosplash=False, quiet=False, verbose=False,
+                       no_color=False, **kwargs):
+        self.stdout = stdout or sys.stdout
         self.stderr = stderr or sys.stderr
         self.stderr = stderr or sys.stderr
-        self.env = env
         self.nosplash = nosplash
         self.nosplash = nosplash
         self.quiet = quiet
         self.quiet = quiet
         self.verbose = verbose
         self.verbose = verbose
         self.no_color = no_color
         self.no_color = no_color
+
+    def ok(self, m, newline=True, file=None):
+        self.say(m, newline=newline, file=file)
+        return EX_OK
+
+    def say(self, m, newline=True, file=None):
+        print(m, file=file or self.stdout, end='\n' if newline else '')
+
+    def carp(self, m, newline=True, file=None):
+        return self.say(m, newline, file or self.stderr)
+
+    def error(self, msg=None):
+        if msg:
+            self.carp(msg)
+        self.usage()
+        return EX_FAILURE
+
+    def info(self, msg, newline=True):
+        if self.verbose:
+            self.note(msg, newline=newline)
+
+    def note(self, msg, newline=True):
+        if not self.quiet:
+            self.say(str(msg), newline=newline)
+
+    @splash
+    def usage(self):
+        self.say(USAGE.format(prog_name=self.prog_name))
+
+    def splash(self):
+        if not self.nosplash:
+            self.note(self.colored.cyan(
+                self.splash_text.format(**self.splash_context)))
+
+    @cached_property
+    def colored(self):
+        return term.colored(enabled=not self.no_color)
+
+
+class MultiTool(TermLogger):
+    """The ``celery multi`` program."""
+
+    MultiParser = MultiParser
+    OptionParser = NamespacedOptionParser
+
+    reserved_options = [
+        ('--nosplash', 'nosplash'),
+        ('--quiet', 'quiet'),
+        ('-q', 'quiet'),
+        ('--verbose', 'verbose'),
+        ('--no-color', 'no_color'),
+    ]
+
+    def __init__(self, env=None, cmd=None,
+                 fh=None, stdout=None, stderr=None, **kwargs):
+        # fh is an old alias to stdout.
+        self.env = env
+        self.cmd = cmd
+        self.setup_terminal(stdout or fh, stderr, **kwargs)
+        self.fh = self.stdout
         self.prog_name = 'celery multi'
         self.prog_name = 'celery multi'
         self.commands = {
         self.commands = {
             'start': self.start,
             'start': self.start,
@@ -182,259 +253,193 @@ class MultiTool:
             'help': self.help,
             'help': self.help,
         }
         }
 
 
-    def execute_from_commandline(self, argv, cmd='celery worker'):
-        argv = list(argv)   # don't modify callers argv.
-
+    def execute_from_commandline(self, argv, cmd=None):
         # Reserve the --nosplash|--quiet|-q/--verbose options.
         # Reserve the --nosplash|--quiet|-q/--verbose options.
-        if '--nosplash' in argv:
-            self.nosplash = argv.pop(argv.index('--nosplash'))
-        if '--quiet' in argv:
-            self.quiet = argv.pop(argv.index('--quiet'))
-        if '-q' in argv:
-            self.quiet = argv.pop(argv.index('-q'))
-        if '--verbose' in argv:
-            self.verbose = argv.pop(argv.index('--verbose'))
-        if '--no-color' in argv:
-            self.no_color = argv.pop(argv.index('--no-color'))
-
+        argv = self._handle_reserved_options(argv)
+        self.cmd = cmd if cmd is not None else self.cmd
         self.prog_name = os.path.basename(argv.pop(0))
         self.prog_name = os.path.basename(argv.pop(0))
-        if not argv or argv[0][0] == '-':
+
+        if not self.validate_arguments(argv):
             return self.error()
             return self.error()
 
 
+        return self.call_command(argv[0], argv[1:])
+
+    def validate_arguments(self, argv):
+        return argv and argv[0][0] != '-'
+
+    def call_command(self, command, argv):
         try:
         try:
-            self.commands[argv[0]](argv[1:], cmd)
+            return self.commands[command](*argv) or EX_OK
         except KeyError:
         except KeyError:
-            self.error('Invalid command: {0}'.format(argv[0]))
+            return self.error('Invalid command: {0}'.format(command))
+
+    def _handle_reserved_options(self, argv):
+        argv = list(argv)  # don't modify callers argv.
+        for arg, attr in self.reserved_options:
+            if arg in argv:
+                setattr(self, attr, bool(argv.pop(argv.index(arg))))
+        return argv
+
+    @splash
+    @using_cluster
+    def start(self, cluster):
+        self.note('> Starting nodes...')
+        return int(any(cluster.start()))
 
 
-        return self.retcode
+    @splash
+    @using_cluster_and_sig
+    def stop(self, cluster, sig, **kwargs):
+        return cluster.stop(sig=sig, **kwargs)
 
 
-    def say(self, m, newline=True, file=None):
-        print(m, file=file or self.stdout, end='\n' if newline else '')
+    @splash
+    @using_cluster_and_sig
+    def stopwait(self, cluster, sig, **kwargs):
+        return cluster.stopwait(sig=sig, **kwargs)
+    stop_verify = stopwait  # compat
 
 
-    def carp(self, m, newline=True, file=None):
-        return self.say(m, newline, file or self.stderr)
+    @splash
+    @using_cluster_and_sig
+    def restart(self, cluster, sig, **kwargs):
+        return int(any(cluster.restart(sig=sig, **kwargs)))
 
 
-    def names(self, argv, cmd):
-        p = NamespacedOptionParser(argv)
-        self.say('\n'.join(
-            n.name for n in multi_args(p, cmd)),
-        )
+    @using_cluster
+    def names(self, cluster):
+        self.say('\n'.join(n.name for n in cluster))
 
 
-    def get(self, argv, cmd):
-        wanted = argv[0]
-        p = NamespacedOptionParser(argv[1:])
-        for node in multi_args(p, cmd):
-            if node.name == wanted:
-                self.say(' '.join(node.argv))
-                return
-
-    def show(self, argv, cmd):
-        p = NamespacedOptionParser(argv)
-        self.with_detacher_default_options(p)
-        self.say('\n'.join(
-            ' '.join([sys.executable] + n.argv) for n in multi_args(p, cmd)),
-        )
+    def get(self, wanted, *argv):
+        try:
+            node = self.cluster_from_argv(argv).find(wanted)
+        except KeyError:
+            return EX_FAILURE
+        else:
+            return self.ok(' '.join(node.argv))
+
+    @using_cluster
+    def show(self, cluster):
+        return self.ok('\n'.join(
+            ' '.join(node.argv_with_executable)
+            for node in cluster
+        ))
+
+    @splash
+    @using_cluster
+    def kill(self, cluster):
+        return cluster.kill()
+
+    def expand(self, template, *argv):
+        return self.ok('\n'.join(
+            node.expander(template)
+            for node in self.cluster_from_argv(argv)
+        ))
+
+    def help(self, *argv):
+        self.say(__doc__)
 
 
-    def start(self, argv, cmd):
-        self.splash()
-        p = NamespacedOptionParser(argv)
-        self.with_detacher_default_options(p)
-        retcodes = []
-        self.note('> Starting nodes...')
-        for node in multi_args(p, cmd):
-            self.note('\t> {0}: '.format(node.name), newline=False)
-            retcode = self.waitexec(node.argv, path=p.options['--executable'])
-            self.note(retcode and self.FAILED or self.OK)
-            retcodes.append(retcode)
-        self.retcode = int(any(retcodes))
-
-    def with_detacher_default_options(self, p):
-        _setdefaultopt(p.options, ['--pidfile', '-p'], '%n.pid')
-        _setdefaultopt(p.options, ['--logfile', '-f'], '%n%I.log')
-        p.options.setdefault(
-            '--cmd',
-            '-m {0}'.format(celery_exe('worker', '--detach')),
+    def _find_sig_argument(self, p, default=signal.SIGTERM):
+        args = p.args[len(p.values):]
+        for arg in reversed(args):
+            if len(arg) == 2 and arg[0] == '-':
+                try:
+                    return int(arg[1])
+                except ValueError:
+                    pass
+            if arg[0] == '-':
+                try:
+                    return signals.signum(arg[1:])
+                except (AttributeError, TypeError):
+                    pass
+        return default
+
+    def _nodes_from_argv(self, argv, cmd=None):
+        cmd = cmd if cmd is not None else self.cmd
+        p = self.OptionParser(argv)
+        p.parse()
+        return p, self.MultiParser(cmd=cmd).parse(p)
+
+    def cluster_from_argv(self, argv, cmd=None):
+        _, cluster = self._cluster_from_argv(argv, cmd=cmd)
+        return cluster
+
+    def _cluster_from_argv(self, argv, cmd=None):
+        p, nodes = self._nodes_from_argv(argv, cmd=cmd)
+        return p, self.Cluster(list(nodes), cmd=cmd)
+
+    def Cluster(self, nodes, cmd=None):
+        return Cluster(
+            nodes,
+            cmd=cmd,
+            env=self.env,
+            on_stopping_preamble=self.on_stopping_preamble,
+            on_send_signal=self.on_send_signal,
+            on_still_waiting_for=self.on_still_waiting_for,
+            on_still_waiting_progress=self.on_still_waiting_progress,
+            on_still_waiting_end=self.on_still_waiting_end,
+            on_node_start=self.on_node_start,
+            on_node_restart=self.on_node_restart,
+            on_node_shutdown_ok=self.on_node_shutdown_ok,
+            on_node_status=self.on_node_status,
+            on_node_signal_dead=self.on_node_signal_dead,
+            on_node_signal=self.on_node_signal,
+            on_node_down=self.on_node_down,
+            on_child_spawn=self.on_child_spawn,
+            on_child_signalled=self.on_child_signalled,
+            on_child_failure=self.on_child_failure,
         )
         )
-        _setdefaultopt(p.options, ['--executable'], sys.executable)
-
-    def signal_node(self, nodename, pid, sig):
-        try:
-            os.kill(pid, sig)
-        except ProcessLookupError:
-            self.note('Could not signal {0} ({1}): No such process'.format(
-                nodename, pid))
-            return False
-        return True
-
-    def node_alive(self, pid):
-        try:
-            os.kill(pid, 0)
-        except ProcessLookupError:
-            return False
-        return True
-
-    def shutdown_nodes(self, nodes, sig=signal.SIGTERM, retry=None,
-                       callback=None):
-        if not nodes:
-            return
-        P = set(nodes)
-
-        def on_down(node):
-            P.discard(node)
-            if callback:
-                callback(*node)
 
 
+    def on_stopping_preamble(self, nodes):
         self.note(self.colored.blue('> Stopping nodes...'))
         self.note(self.colored.blue('> Stopping nodes...'))
-        for node in list(P):
-            if node in P:
-                nodename, _, pid = node
-                self.note('\t> {0}: {1} -> {2}'.format(
-                    nodename, SIGMAP[sig][3:], pid))
-                if not self.signal_node(nodename, pid, sig):
-                    on_down(node)
-
-        def note_waiting():
-            left = len(P)
-            if left:
-                pids = ', '.join(str(pid) for _, _, pid in P)
-                self.note(self.colored.blue(
-                    '> Waiting for {0} {1} -> {2}...'.format(
-                        left, pluralize(left, 'node'), pids)), newline=False)
-
-        if retry:
-            note_waiting()
-            its = 0
-            while P:
-                for node in P:
-                    its += 1
-                    self.note('.', newline=False)
-                    nodename, _, pid = node
-                    if not self.node_alive(pid):
-                        self.note('\n\t> {0}: {1}'.format(nodename, self.OK))
-                        on_down(node)
-                        note_waiting()
-                        break
-                if P and not its % len(P):
-                    sleep(float(retry))
-            self.note('')
-
-    def getpids(self, p, cmd, callback=None):
-        _setdefaultopt(p.options, ['--pidfile', '-p'], '%n.pid')
-
-        nodes = []
-        for node in multi_args(p, cmd):
-            try:
-                pidfile_template = _getopt(
-                    p.namespaces[node.namespace], ['--pidfile', '-p'],
-                )
-            except KeyError:
-                pidfile_template = _getopt(p.options, ['--pidfile', '-p'])
-            pid = None
-            pidfile = node.expander(pidfile_template)
-            try:
-                pid = Pidfile(pidfile).read_pid()
-            except ValueError:
-                pass
-            if pid:
-                nodes.append((node.name, tuple(node.argv), pid))
-            else:
-                self.note('> {0.name}: {1}'.format(node, self.DOWN))
-                if callback:
-                    callback(node.name, node.argv, pid)
-
-        return nodes
-
-    def kill(self, argv, cmd):
-        self.splash()
-        p = NamespacedOptionParser(argv)
-        for nodename, _, pid in self.getpids(p, cmd):
-            self.note('Killing node {0} ({1})'.format(nodename, pid))
-            self.signal_node(nodename, pid, signal.SIGKILL)
 
 
-    def stop(self, argv, cmd, retry=None, callback=None):
-        self.splash()
-        p = NamespacedOptionParser(argv)
-        return self._stop_nodes(p, cmd, retry=retry, callback=callback)
+    def on_send_signal(self, node, sig):
+        self.note('\t> {0.name}: {1} -> {0.pid}'.format(node, sig))
 
 
-    def _stop_nodes(self, p, cmd, retry=None, callback=None):
-        restargs = p.args[len(p.values):]
-        self.shutdown_nodes(self.getpids(p, cmd, callback=callback),
-                            sig=findsig(restargs),
-                            retry=retry,
-                            callback=callback)
+    def on_still_waiting_for(self, nodes):
+        num_left = len(nodes)
+        if num_left:
+            self.note(self.colored.blue(
+                '> Waiting for {0} {1} -> {2}...'.format(
+                    num_left, pluralize(num_left, 'node'),
+                    ', '.join(str(node.pid) for node in nodes)),
+            ), newline=False)
 
 
-    def restart(self, argv, cmd):
-        self.splash()
-        p = NamespacedOptionParser(argv)
-        self.with_detacher_default_options(p)
-        retvals = []
+    def on_still_waiting_progress(self, nodes):
+        self.note('.', newline=False)
 
 
-        def on_node_shutdown(nodename, argv, pid):
-            self.note(self.colored.blue(
-                '> Restarting node {0}: '.format(nodename)), newline=False)
-            retval = self.waitexec(argv, path=p.options['--executable'])
-            self.note(retval and self.FAILED or self.OK)
-            retvals.append(retval)
+    def on_still_waiting_end(self):
+        self.note('')
 
 
-        self._stop_nodes(p, cmd, retry=2, callback=on_node_shutdown)
-        self.retval = int(any(retvals))
+    def on_node_signal_dead(self, node):
+        self.note(
+            'Could not signal {0.name} ({0.pid}): No such process'.format(
+                node))
 
 
-    def stopwait(self, argv, cmd):
-        self.splash()
-        p = NamespacedOptionParser(argv)
-        self.with_detacher_default_options(p)
-        return self._stop_nodes(p, cmd, retry=2)
-    stop_verify = stopwait  # compat
+    def on_node_start(self, node):
+        self.note('\t> {0.name}: '.format(node), newline=False)
 
 
-    def expand(self, argv, cmd=None):
-        template = argv[0]
-        p = NamespacedOptionParser(argv[1:])
-        for node in multi_args(p, cmd):
-            self.say(node.expander(template))
+    def on_node_restart(self, node):
+        self.note(self.colored.blue(
+            '> Restarting node {0.name}: '.format(node)), newline=False)
 
 
-    def help(self, argv, cmd=None):
-        self.say(__doc__)
+    def on_node_down(self, node):
+        self.note('> {0.name}: {1.DOWN}'.format(node, self))
 
 
-    def usage(self):
-        self.splash()
-        self.say(USAGE.format(prog_name=self.prog_name))
+    def on_node_shutdown_ok(self, node):
+        self.note('\n\t> {0.name}: {1.OK}'.format(node, self))
 
 
-    def splash(self):
-        if not self.nosplash:
-            c = self.colored
-            self.note(c.cyan('celery multi v{0}'.format(VERSION_BANNER)))
-
-    def waitexec(self, argv, path=sys.executable):
-        args = ' '.join([path] + list(argv))
-        argstr = shlex.split(from_utf8(args), posix=not IS_WINDOWS)
-        pipe = Popen(argstr, env=self.env)
-        self.info('  {0}'.format(' '.join(argstr)))
-        retcode = pipe.wait()
-        if retcode < 0:
-            self.note('* Child was terminated by signal {0}'.format(-retcode))
-            return -retcode
-        elif retcode > 0:
-            self.note('* Child terminated with errorcode {0}'.format(retcode))
-        return retcode
+    def on_node_status(self, node, retval):
+        self.note(retval and self.FAILED or self.OK)
 
 
-    def error(self, msg=None):
-        if msg:
-            self.carp(msg)
-        self.usage()
-        self.retcode = 1
-        return 1
+    def on_node_signal(self, node, sig):
+        self.note('Sending {sig} to node {0.name} ({0.pid})'.format(
+            node, sig=sig))
 
 
-    def info(self, msg, newline=True):
-        if self.verbose:
-            self.note(msg, newline=newline)
+    def on_child_spawn(self, node, argstr, env):
+        self.info('  {0}'.format(argstr))
 
 
-    def note(self, msg, newline=True):
-        if not self.quiet:
-            self.say(str(msg), newline=newline)
+    def on_child_signalled(self, node, signum):
+        self.note('* Child was terminated by signal {0}'.format(signum))
 
 
-    @cached_property
-    def colored(self):
-        return term.colored(enabled=not self.no_color)
+    def on_child_failure(self, node, retcode):
+        self.note('* Child terminated with exit code {0}'.format(retcode))
 
 
     @cached_property
     @cached_property
     def OK(self):
     def OK(self):
@@ -448,205 +453,5 @@ class MultiTool:
     def DOWN(self):
     def DOWN(self):
         return str(self.colored.magenta('DOWN'))
         return str(self.colored.magenta('DOWN'))
 
 
-
-def _args_for_node(p, name, prefix, suffix, cmd, append, options):
-    name, nodename, expand = _get_nodename(
-        name, prefix, suffix, options)
-
-    if nodename in p.namespaces:
-        ns = nodename
-    else:
-        ns = name
-
-    argv = ([expand(cmd)] +
-            [format_opt(opt, expand(value))
-                for opt, value in p.optmerge(ns, options).items()] +
-            [p.passthrough])
-    if append:
-        argv.append(expand(append))
-    return multi_args_t(nodename, argv, expand, name)
-
-
-def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''):
-    names = p.values
-    options = dict(p.options)
-    ranges = len(names) == 1
-    if ranges:
-        try:
-            names, prefix = _get_ranges(names)
-        except ValueError:
-            pass
-    cmd = options.pop('--cmd', cmd)
-    append = options.pop('--append', append)
-    hostname = options.pop('--hostname',
-                           options.pop('-n', gethostname()))
-    prefix = options.pop('--prefix', prefix) or ''
-    suffix = options.pop('--suffix', suffix) or hostname
-    suffix = '' if suffix in ('""', "''") else suffix
-
-    _update_ns_opts(p, names)
-    _update_ns_ranges(p, ranges)
-    return (_args_for_node(p, name, prefix, suffix, cmd, append, options)
-            for name in names)
-
-
-def _get_ranges(names):
-    noderange = int(names[0])
-    names = [str(n) for n in range(1, noderange + 1)]
-    prefix = 'celery'
-    return names, prefix
-
-
-def _update_ns_opts(p, names):
-    # Numbers in args always refers to the index in the list of names.
-    # (e.g. `start foo bar baz -c:1` where 1 is foo, 2 is bar, and so on).
-    for ns_name, ns_opts in list(p.namespaces.items()):
-        if ns_name.isdigit():
-            ns_index = int(ns_name) - 1
-            if ns_index < 0:
-                raise KeyError('Indexes start at 1 got: %r' % (ns_name,))
-            try:
-                p.namespaces[names[ns_index]].update(ns_opts)
-            except IndexError:
-                raise KeyError('No node at index %r' % (ns_name,))
-
-
-def _update_ns_ranges(p, ranges):
-    for ns_name, ns_opts in list(p.namespaces.items()):
-        if ',' in ns_name or (ranges and '-' in ns_name):
-            for subns in parse_ns_range(ns_name, ranges):
-                p.namespaces[subns].update(ns_opts)
-            p.namespaces.pop(ns_name)
-
-
-def _get_nodename(name, prefix, suffix, options):
-        hostname = suffix
-        if '@' in name:
-            nodename = options['-n'] = host_format(name)
-            shortname, hostname = nodesplit(nodename)
-            name = shortname
-        else:
-            shortname = '%s%s' % (prefix, name)
-            nodename = options['-n'] = host_format(
-                '{0}@{1}'.format(shortname, hostname),
-            )
-        expand = partial(
-            node_format, nodename=nodename, N=shortname, d=hostname,
-            h=nodename, i='%i', I='%I',
-        )
-        return name, nodename, expand
-
-
-class NamespacedOptionParser:
-
-    def __init__(self, args):
-        self.args = args
-        self.options = OrderedDict()
-        self.values = []
-        self.passthrough = ''
-        self.namespaces = defaultdict(lambda: OrderedDict())
-
-        self.parse()
-
-    def parse(self):
-        rargs = list(self.args)
-        pos = 0
-        while pos < len(rargs):
-            arg = rargs[pos]
-            if arg == '--':
-                self.passthrough = ' '.join(rargs[pos:])
-                break
-            elif arg[0] == '-':
-                if arg[1] == '-':
-                    self.process_long_opt(arg[2:])
-                else:
-                    value = None
-                    if len(rargs) > pos + 1 and rargs[pos + 1][0] != '-':
-                        value = rargs[pos + 1]
-                        pos += 1
-                    self.process_short_opt(arg[1:], value)
-            else:
-                self.values.append(arg)
-            pos += 1
-
-    def process_long_opt(self, arg, value=None):
-        if '=' in arg:
-            arg, value = arg.split('=', 1)
-        self.add_option(arg, value, short=False)
-
-    def process_short_opt(self, arg, value=None):
-        self.add_option(arg, value, short=True)
-
-    def optmerge(self, ns, defaults=None):
-        if defaults is None:
-            defaults = self.options
-        return OrderedDict(defaults, **self.namespaces[ns])
-
-    def add_option(self, name, value, short=False, ns=None):
-        prefix = short and '-' or '--'
-        dest = self.options
-        if ':' in name:
-            name, ns = name.split(':')
-            dest = self.namespaces[ns]
-        dest[prefix + name] = value
-
-
-def quote(v):
-    return "\\'".join("'" + p + "'" for p in v.split("'"))
-
-
-def format_opt(opt, value):
-    if not value:
-        return opt
-    if opt.startswith('--'):
-        return '{0}={1}'.format(opt, value)
-    return '{0} {1}'.format(opt, value)
-
-
-def parse_ns_range(ns, ranges=False):
-    ret = []
-    for space in ',' in ns and ns.split(',') or [ns]:
-        if ranges and '-' in space:
-            start, stop = space.split('-')
-            ret.extend(
-                str(n) for n in range(int(start), int(stop) + 1)
-            )
-        else:
-            ret.append(space)
-    return ret
-
-
-def findsig(args, default=signal.SIGTERM):
-    for arg in reversed(args):
-        if len(arg) == 2 and arg[0] == '-':
-            try:
-                return int(arg[1])
-            except ValueError:
-                pass
-        if arg[0] == '-':
-            maybe_sig = 'SIG' + arg[1:]
-            if maybe_sig in SIGNAMES:
-                return getattr(signal, maybe_sig)
-    return default
-
-
-def _getopt(d, alt):
-    for opt in alt:
-        try:
-            return d[opt]
-        except KeyError:
-            pass
-    raise KeyError(alt[0])
-
-
-def _setdefaultopt(d, alt, value):
-    for opt in alt[1:]:
-        try:
-            return d[opt]
-        except KeyError:
-            pass
-    return d.setdefault(alt[0], value)
-
-
 if __name__ == '__main__':              # pragma: no cover
 if __name__ == '__main__':              # pragma: no cover
     main()
     main()

+ 92 - 83
celery/bin/worker.py

@@ -11,7 +11,7 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 
 
 .. cmdoption:: -c, --concurrency
 .. cmdoption:: -c, --concurrency
 
 
-    Number of child processes processing the queue. The default
+    Number of child processes processing the queue.  The default
     is the number of CPUs available on your system.
     is the number of CPUs available on your system.
 
 
 .. cmdoption:: -P, --pool
 .. cmdoption:: -P, --pool
@@ -22,14 +22,19 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 
 
 .. cmdoption:: -n, --hostname
 .. cmdoption:: -n, --hostname
 
 
-    Set custom hostname, e.g. 'w1.%h'. Expands: %h (hostname),
-    %n (name) and %d, (domain).
+    Set custom hostname (e.g., 'w1@%%h').  Expands: %%h (hostname),
+    %%n (name) and %%d, (domain).
 
 
 .. cmdoption:: -B, --beat
 .. cmdoption:: -B, --beat
 
 
-    Also run the `celery beat` periodic task scheduler. Please note that
+    Also run the `celery beat` periodic task scheduler.  Please note that
     there must only be one instance of this service.
     there must only be one instance of this service.
 
 
+    .. note::
+
+        ``-B`` is meant to be used for development purposes. For production
+        environment, you need to start :program:`celery beat` separately.
+
 .. cmdoption:: -Q, --queues
 .. cmdoption:: -Q, --queues
 
 
     List of queues to enable for this worker, separated by comma.
     List of queues to enable for this worker, separated by comma.
@@ -50,7 +55,7 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 .. cmdoption:: -s, --schedule
 .. cmdoption:: -s, --schedule
 
 
     Path to the schedule database if running with the `-B` option.
     Path to the schedule database if running with the `-B` option.
-    Defaults to `celerybeat-schedule`. The extension ".db" may be
+    Defaults to `celerybeat-schedule`.  The extension ".db" may be
     appended to the filename.
     appended to the filename.
 
 
 .. cmdoption:: -O
 .. cmdoption:: -O
@@ -63,30 +68,30 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 
 
 .. cmdoption:: --scheduler
 .. cmdoption:: --scheduler
 
 
-    Scheduler class to use. Default is
+    Scheduler class to use.  Default is
     :class:`celery.beat.PersistentScheduler`
     :class:`celery.beat.PersistentScheduler`
 
 
 .. cmdoption:: -S, --statedb
 .. cmdoption:: -S, --statedb
 
 
-    Path to the state database. The extension '.db' may
-    be appended to the filename. Default: {default}
+    Path to the state database.  The extension '.db' may
+    be appended to the filename.  Default: {default}
 
 
-.. cmdoption:: -E, --events
+.. cmdoption:: -E, --task-events
 
 
     Send task-related events that can be captured by monitors like
     Send task-related events that can be captured by monitors like
     :program:`celery events`, :pypi:`flower` and others.
     :program:`celery events`, :pypi:`flower` and others.
 
 
 .. cmdoption:: --without-gossip
 .. cmdoption:: --without-gossip
 
 
-    Do not subscribe to other workers events.
+    Don't subscribe to other workers events.
 
 
 .. cmdoption:: --without-mingle
 .. cmdoption:: --without-mingle
 
 
-    Do not synchronize with other workers at start-up.
+    Don't synchronize with other workers at start-up.
 
 
 .. cmdoption:: --without-heartbeat
 .. cmdoption:: --without-heartbeat
 
 
-    Do not send event heartbeats.
+    Don't send event heartbeats.
 
 
 .. cmdoption:: --heartbeat-interval
 .. cmdoption:: --heartbeat-interval
 
 
@@ -106,26 +111,35 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 
 
     Enables a soft time limit (in seconds int/float) for tasks.
     Enables a soft time limit (in seconds int/float) for tasks.
 
 
-.. cmdoption:: --maxtasksperchild
+.. cmdoption:: --max-tasks-per-child
 
 
     Maximum number of tasks a pool worker can execute before it's
     Maximum number of tasks a pool worker can execute before it's
     terminated and replaced by a new worker.
     terminated and replaced by a new worker.
 
 
-.. cmdoption:: --maxmemperchild
+.. cmdoption:: --max-memory-per-child
 
 
     Maximum amount of resident memory, in KiB, that may be consumed by a
     Maximum amount of resident memory, in KiB, that may be consumed by a
-    child process before it will be replaced by a new one. If a single
+    child process before it will be replaced by a new one.  If a single
     task causes a child process to exceed this limit, the task will be
     task causes a child process to exceed this limit, the task will be
     completed and the child process will be replaced afterwards.
     completed and the child process will be replaced afterwards.
     Default: no limit.
     Default: no limit.
 
 
+.. cmdoption:: --autoscale
+
+    Enable autoscaling by providing
+    max_concurrency, min_concurrency. Example::
+
+        --autoscale=10,3
+
+    (always keep 3 processes, but grow to 10 if necessary)
+
 .. cmdoption:: --detach
 .. cmdoption:: --detach
 
 
     Start worker as a background process.
     Start worker as a background process.
 
 
 .. cmdoption:: -f, --logfile
 .. cmdoption:: -f, --logfile
 
 
-    Path to log file. If no logfile is specified, `stderr` is used.
+    Path to log file.  If no logfile is specified, `stderr` is used.
 
 
 .. cmdoption:: -l, --loglevel
 .. cmdoption:: -l, --loglevel
 
 
@@ -136,7 +150,7 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 
 
     Optional file used to store the process pid.
     Optional file used to store the process pid.
 
 
-    The program will not start if this file already exists
+    The program won't start if this file already exists
     and the pid is still alive.
     and the pid is still alive.
 
 
 .. cmdoption:: --uid
 .. cmdoption:: --uid
@@ -163,8 +177,6 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 """
 """
 import sys
 import sys
 
 
-from optparse import OptionGroup
-
 from celery import concurrency
 from celery import concurrency
 from celery.bin.base import Command, daemon_options
 from celery.bin.base import Command, daemon_options
 from celery.bin.celeryd_detach import detached_celeryd
 from celery.bin.celeryd_detach import detached_celeryd
@@ -174,7 +186,7 @@ from celery.utils.nodenames import default_nodename
 
 
 __all__ = ['worker', 'main']
 __all__ = ['worker', 'main']
 
 
-__MODULE_DOC__ = __doc__
+HELP = __doc__
 
 
 
 
 class worker(Command):
 class worker(Command):
@@ -188,13 +200,17 @@ class worker(Command):
 
 
             $ celery worker -A proj --concurrency=4
             $ celery worker -A proj --concurrency=4
             $ celery worker -A proj --concurrency=1000 -P eventlet
             $ celery worker -A proj --concurrency=1000 -P eventlet
+            $ celery worker --autoscale=10,0
     """
     """
-    doc = __MODULE_DOC__  # parse help from this too
+
+    doc = HELP  # parse help from this too
     namespace = 'worker'
     namespace = 'worker'
     enable_config_from_cmdline = True
     enable_config_from_cmdline = True
     supports_args = False
     supports_args = False
+    removed_flags = {'--no-execv', '--force-execv'}
 
 
     def run_from_argv(self, prog_name, argv=None, command=None):
     def run_from_argv(self, prog_name, argv=None, command=None):
+        argv = [x for x in argv if x not in self.removed_flags]
         command = sys.argv[0] if command is None else command
         command = sys.argv[0] if command is None else command
         argv = sys.argv[1:] if argv is None else argv
         argv = sys.argv[1:] if argv is None else argv
         # parse options before detaching so errors can be handled.
         # parse options before detaching so errors can be handled.
@@ -211,7 +227,7 @@ class worker(Command):
             raise SystemExit(0)
             raise SystemExit(0)
 
 
     def run(self, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
     def run(self, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
-            loglevel=None, logfile=None, pidfile=None, state_db=None,
+            loglevel=None, logfile=None, pidfile=None, statedb=None,
             **kwargs):
             **kwargs):
         maybe_drop_privileges(uid=uid, gid=gid)
         maybe_drop_privileges(uid=uid, gid=gid)
         # Pools like eventlet/gevent needs to patch libs as early
         # Pools like eventlet/gevent needs to patch libs as early
@@ -226,7 +242,7 @@ class worker(Command):
             try:
             try:
                 loglevel = mlevel(loglevel)
                 loglevel = mlevel(loglevel)
             except KeyError:  # pragma: no cover
             except KeyError:  # pragma: no cover
-                self.die('Unknown level {0!r}. Please use one of {1}.'.format(
+                self.die('Unknown level {0!r}.  Please use one of {1}.'.format(
                     loglevel, '|'.join(
                     loglevel, '|'.join(
                         l for l in LOG_LEVELS if isinstance(l, str))))
                         l for l in LOG_LEVELS if isinstance(l, str))))
 
 
@@ -234,8 +250,8 @@ class worker(Command):
             hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
             hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
             logfile=logfile,  # node format handled by celery.app.log.setup
             logfile=logfile,  # node format handled by celery.app.log.setup
             pidfile=self.node_format(pidfile, hostname),
             pidfile=self.node_format(pidfile, hostname),
-            state_db=self.node_format(state_db, hostname), **kwargs
-        )
+            statedb=self.node_format(statedb, hostname),
+            **kwargs)
         worker.start()
         worker.start()
         return worker.exitcode
         return worker.exitcode
 
 
@@ -244,103 +260,96 @@ class worker(Command):
         # that may have to be loaded as early as possible.
         # that may have to be loaded as early as possible.
         return (['-P'], ['--pool'])
         return (['-P'], ['--pool'])
 
 
-    def prepare_arguments(self, parser):
+    def add_arguments(self, parser):
         conf = self.app.conf
         conf = self.app.conf
 
 
-        wopts = OptionGroup(parser, 'Worker Options')
-        wopts.add_option('-n', '--hostname')
-        wopts.add_option('-D', '--detach', action='store_true')
-        wopts.add_option(
+        wopts = parser.add_argument_group('Worker Options')
+        wopts.add_argument('-n', '--hostname')
+        wopts.add_argument(
+            '-D', '--detach',
+            action='store_true', default=False,
+        )
+        wopts.add_argument(
             '-S', '--statedb',
             '-S', '--statedb',
-            default=conf.worker_state_db, dest='state_db',
+            default=conf.worker_state_db,
         )
         )
-        wopts.add_option('-l', '--loglevel', default='WARN')
-        wopts.add_option('-O', dest='optimization')
-        wopts.add_option(
+        wopts.add_argument('-l', '--loglevel', default='WARN')
+        wopts.add_argument('-O', dest='optimization')
+        wopts.add_argument(
             '--prefetch-multiplier',
             '--prefetch-multiplier',
-            dest='prefetch_multiplier', type='int',
-            default=conf.worker_prefetch_multiplier,
+            type=int, default=conf.worker_prefetch_multiplier,
         )
         )
-        parser.add_option_group(wopts)
 
 
-        topts = OptionGroup(parser, 'Pool Options')
-        topts.add_option(
+        topts = parser.add_argument_group('Pool Options')
+        topts.add_argument(
             '-c', '--concurrency',
             '-c', '--concurrency',
-            default=conf.worker_concurrency, type='int',
+            default=conf.worker_concurrency, type=int,
         )
         )
-        topts.add_option(
+        topts.add_argument(
             '-P', '--pool',
             '-P', '--pool',
-            default=conf.worker_pool, dest='pool_cls',
+            default=conf.worker_pool,
         )
         )
-        topts.add_option(
-            '-E', '--events',
-            default=conf.worker_send_task_events,
-            action='store_true', dest='send_events',
+        topts.add_argument(
+            '-E', '--task-events', '--events',
+            action='store_true', default=conf.worker_send_task_events,
         )
         )
-        topts.add_option(
+        topts.add_argument(
             '--time-limit',
             '--time-limit',
-            type='float', dest='task_time_limit',
-            default=conf.task_time_limit,
+            type=float, default=conf.task_time_limit,
         )
         )
-        topts.add_option(
+        topts.add_argument(
             '--soft-time-limit',
             '--soft-time-limit',
-            dest='task_soft_time_limit', type='float',
-            default=conf.task_soft_time_limit,
+            type=float, default=conf.task_soft_time_limit,
         )
         )
-        topts.add_option(
-            '--maxtasksperchild',
-            dest='max_tasks_per_child', type='int',
-            default=conf.worker_max_tasks_per_child,
+        topts.add_argument(
+            '--max-tasks-per-child', '--maxtasksperchild',
+            type=int, default=conf.worker_max_tasks_per_child,
         )
         )
-        topts.add_option(
-            '--maxmemperchild',
-            dest='max_memory_per_child', type='int',
-            default=conf.worker_max_memory_per_child,
+        topts.add_argument(
+            '--max-memory-per-child', '--maxmemperchild',
+            type=int, default=conf.worker_max_memory_per_child,
         )
         )
-        parser.add_option_group(topts)
 
 
-        qopts = OptionGroup(parser, 'Queue Options')
-        qopts.add_option(
+        qopts = parser.add_argument_group('Queue Options')
+        qopts.add_argument(
             '--purge', '--discard',
             '--purge', '--discard',
-            default=False, action='store_true',
+            action='store_true', default=False,
         )
         )
-        qopts.add_option('--queues', '-Q', default=[])
-        qopts.add_option('--exclude-queues', '-X', default=[])
-        qopts.add_option('--include', '-I', default=[])
-        parser.add_option_group(qopts)
+        qopts.add_argument('--queues', '-Q', default=[])
+        qopts.add_argument('--exclude-queues', '-X', default=[])
+        qopts.add_argument('--include', '-I', default=[])
 
 
-        fopts = OptionGroup(parser, 'Features')
-        fopts.add_option(
+        fopts = parser.add_argument_group('Features')
+        fopts.add_argument(
             '--without-gossip', action='store_true', default=False,
             '--without-gossip', action='store_true', default=False,
         )
         )
-        fopts.add_option(
+        fopts.add_argument(
             '--without-mingle', action='store_true', default=False,
             '--without-mingle', action='store_true', default=False,
         )
         )
-        fopts.add_option(
+        fopts.add_argument(
             '--without-heartbeat', action='store_true', default=False,
             '--without-heartbeat', action='store_true', default=False,
         )
         )
-        fopts.add_option('--heartbeat-interval', type='int')
-        parser.add_option_group(fopts)
+        fopts.add_argument('--heartbeat-interval', type=int)
+        fopts.add_argument('--autoscale')
 
 
         daemon_options(parser)
         daemon_options(parser)
 
 
-        bopts = OptionGroup(parser, 'Embedded Beat Options')
-        bopts.add_option('-B', '--beat', action='store_true')
-        bopts.add_option(
-            '-s', '--schedule', dest='schedule_filename',
+        bopts = parser.add_argument_group('Embedded Beat Options')
+        bopts.add_argument('-B', '--beat', action='store_true', default=False)
+        bopts.add_argument(
+            '-s', '--schedule-filename', '--schedule',
             default=conf.beat_schedule_filename,
             default=conf.beat_schedule_filename,
         )
         )
-        bopts.add_option('--scheduler', dest='scheduler_cls')
-        parser.add_option_group(bopts)
+        bopts.add_argument('--scheduler')
 
 
         user_options = self.app.user_options['worker']
         user_options = self.app.user_options['worker']
         if user_options:
         if user_options:
-            uopts = OptionGroup(parser, 'User Options')
-            uopts.option_list.extend(user_options)
-            parser.add_option_group(uopts)
+            uopts = parser.add_argument_group('User Options')
+            self.add_compat_options(uopts, user_options)
 
 
 
 
 def main(app=None):
 def main(app=None):
+    """Start worker."""
     # Fix for setuptools generated scripts, so that it will
     # Fix for setuptools generated scripts, so that it will
     # work with multiprocessing fork emulation.
     # work with multiprocessing fork emulation.
     # (see multiprocessing.forking.get_preparation_data())
     # (see multiprocessing.forking.get_preparation_data())

+ 6 - 14
celery/bootsteps.py

@@ -2,12 +2,11 @@
 """A directed acyclic graph of reusable components."""
 """A directed acyclic graph of reusable components."""
 from collections import deque
 from collections import deque
 from threading import Event
 from threading import Event
-from typing import Any, Callable, Mapping, Optional, Set, Sequence, Tuple
+from typing import Any, Callable, Mapping, Optional, Sequence, Tuple
 
 
 from kombu.common import ignore_errors
 from kombu.common import ignore_errors
 from kombu.utils.imports import symbol_by_name
 from kombu.utils.imports import symbol_by_name
 
 
-from .utils.abstract import AbstractApp
 from .utils.graph import DependencyGraph, GraphFormatter
 from .utils.graph import DependencyGraph, GraphFormatter
 from .utils.imports import instantiate, qualname
 from .utils.imports import instantiate, qualname
 from .utils.log import get_logger
 from .utils.log import get_logger
@@ -222,12 +221,12 @@ class Blueprint:
     Arguments:
     Arguments:
         steps Sequence[Union[str, Step]]: List of steps.
         steps Sequence[Union[str, Step]]: List of steps.
         name (str): Set explicit name for this blueprint.
         name (str): Set explicit name for this blueprint.
-        app (~@Celery): Set the Celery app for this blueprint.
         on_start (Callable): Optional callback applied after blueprint start.
         on_start (Callable): Optional callback applied after blueprint start.
         on_close (Callable): Optional callback applied before blueprint close.
         on_close (Callable): Optional callback applied before blueprint close.
         on_stopped (Callable): Optional callback applied after
         on_stopped (Callable): Optional callback applied after
             blueprint stopped.
             blueprint stopped.
     """
     """
+
     GraphFormatter = StepFormatter
     GraphFormatter = StepFormatter
 
 
     name = None                        # type: Optional[str]
     name = None                        # type: Optional[str]
@@ -243,11 +242,9 @@ class Blueprint:
 
 
     def __init__(self, steps: Optional[Sequence]=None,
     def __init__(self, steps: Optional[Sequence]=None,
                  name: Optional[str]=None,
                  name: Optional[str]=None,
-                 app: Optional[AbstractApp]=None,
                  on_start: Optional[Callable[[], None]]=None,
                  on_start: Optional[Callable[[], None]]=None,
                  on_close: Optional[Callable[[], None]]=None,
                  on_close: Optional[Callable[[], None]]=None,
                  on_stopped: Optional[Callable[[], None]]=None) -> None:
                  on_stopped: Optional[Callable[[], None]]=None) -> None:
-        self.app = app
         self.name = name or self.name or qualname(type(self))
         self.name = name or self.name or qualname(type(self))
         self.types = set(steps or []) | set(self.default_steps)
         self.types = set(steps or []) | set(self.default_steps)
         self.on_start = on_start
         self.on_start = on_start
@@ -298,13 +295,11 @@ class Blueprint:
                                 description.capitalize(), step.alias)
                                 description.capitalize(), step.alias)
                     try:
                     try:
                         fun(parent, *args)
                         fun(parent, *args)
-                    except Exception as exc:
+                    except Exception as exc:  # pylint: disable=broad-except
                         if propagate:
                         if propagate:
                             raise
                             raise
-                        logger.error(
-                            'Error on %s %s: %r',
-                            description, step.alias, exc, exc_info=1,
-                        )
+                        logger.exception(
+                            'Error on %s %s: %r', description, step.alias, exc)
 
 
     def stop(self, parent: Any,
     def stop(self, parent: Any,
              close: bool=True, terminate: bool=False) -> None:
              close: bool=True, terminate: bool=False) -> None:
@@ -404,10 +399,7 @@ class Blueprint:
             raise KeyError('unknown bootstep: %s' % exc)
             raise KeyError('unknown bootstep: %s' % exc)
 
 
     def claim_steps(self) -> Mapping[str, Step]:
     def claim_steps(self) -> Mapping[str, Step]:
-        return dict(self.load_step(step) for step in self._all_steps())
-
-    def _all_steps(self) -> Set:
-        return self.types | self.app.steps[self.name.lower()]
+        return dict(self.load_step(step) for step in self.types)
 
 
     def load_step(self, step: Step) -> Tuple[str, Step]:
     def load_step(self, step: Step) -> Tuple[str, Step]:
         step = symbol_by_name(step)
         step = symbol_by_name(step)

+ 349 - 216
celery/canvas.py

@@ -107,9 +107,39 @@ def _upgrade(fields, sig):
     return sig
     return sig
 
 
 
 
+def _seq_concat_item(seq, item):
+    """Return copy of sequence seq with item added.
+
+    Returns:
+        Sequence: if seq is a tuple, the result will be a tuple,
+           otherwise it depends on the implementation of ``__add__``.
+    """
+    return seq + (item,) if isinstance(seq, tuple) else seq + [item]
+
+
+def _seq_concat_seq(a, b):
+    """Concatenate two sequences: ``a + b``.
+
+    Returns:
+        Sequence: The return value will depend on the largest sequence
+            - if b is larger and is a tuple, the return value will be a tuple.
+            - if a is larger and is a list, the return value will be a list,
+    """
+    # find the type of the largest sequence
+    prefer = type(max([a, b], key=len))
+    # convert the smallest list to the type of the largest sequence.
+    if not isinstance(a, prefer):
+        a = prefer(a)
+    if not isinstance(b, prefer):
+        b = prefer(b)
+    return a + b
+
+
 @abstract.CallableSignature.register
 @abstract.CallableSignature.register
 class Signature(dict):
 class Signature(dict):
-    """Class that wraps the arguments and execution options
+    """Task Signature.
+
+    Class that wraps the arguments and execution options
     for a single task invocation.
     for a single task invocation.
 
 
     Used as the parts in a :class:`group` and other constructs,
     Used as the parts in a :class:`group` and other constructs,
@@ -118,7 +148,7 @@ class Signature(dict):
 
 
     Signatures can also be created from tasks:
     Signatures can also be created from tasks:
 
 
-    - Using the ``.signature()`` method which has the same signature
+    - Using the ``.signature()`` method that has the same signature
       as ``Task.apply_async``:
       as ``Task.apply_async``:
 
 
         .. code-block:: pycon
         .. code-block:: pycon
@@ -183,26 +213,26 @@ class Signature(dict):
                  type=None, subtask_type=None, immutable=False,
                  type=None, subtask_type=None, immutable=False,
                  app=None, **ex):
                  app=None, **ex):
         self._app = app
         self._app = app
-        init = dict.__init__
 
 
         if isinstance(task, dict):
         if isinstance(task, dict):
-            return init(self, task)  # works like dict(d)
-
-        # Also supports using task class/instance instead of string name.
-        try:
-            task_name = task.name
-        except AttributeError:
-            task_name = task
+            super(Signature, self).__init__(task)  # works like dict(d)
         else:
         else:
-            self._type = task
-
-        init(self,
-             task=task_name, args=tuple(args or ()),
-             kwargs=kwargs or {},
-             options=dict(options or {}, **ex),
-             subtask_type=subtask_type,
-             immutable=immutable,
-             chord_size=None)
+            # Also supports using task class/instance instead of string name.
+            try:
+                task_name = task.name
+            except AttributeError:
+                task_name = task
+            else:
+                self._type = task
+
+            super(Signature, self).__init__(
+                task=task_name, args=tuple(args or ()),
+                kwargs=kwargs or {},
+                options=dict(options or {}, **ex),
+                subtask_type=subtask_type,
+                immutable=immutable,
+                chord_size=None,
+            )
 
 
     def __call__(self, *partial_args, **partial_kwargs):
     def __call__(self, *partial_args, **partial_kwargs):
         """Call the task directly (in the current process)."""
         """Call the task directly (in the current process)."""
@@ -214,8 +244,11 @@ class Signature(dict):
         return self.apply_async(partial_args, partial_kwargs)
         return self.apply_async(partial_args, partial_kwargs)
 
 
     def apply(self, args=(), kwargs={}, **options):
     def apply(self, args=(), kwargs={}, **options):
-        """Same as :meth:`apply_async` but executed the task inline instead
-        of sending a task message."""
+        """Call task locally.
+
+        Same as :meth:`apply_async` but executed the task inline instead
+        of sending a task message.
+        """
         # For callbacks: extra args are prepended to the stored args.
         # For callbacks: extra args are prepended to the stored args.
         args, kwargs, options = self._merge(args, kwargs, options)
         args, kwargs, options = self._merge(args, kwargs, options)
         return self.type.apply(args, kwargs, **options)
         return self.type.apply(args, kwargs, **options)
@@ -245,6 +278,8 @@ class Signature(dict):
             args, kwargs, options = self._merge(args, kwargs, options)
             args, kwargs, options = self._merge(args, kwargs, options)
         else:
         else:
             args, kwargs, options = self.args, self.kwargs, self.options
             args, kwargs, options = self.args, self.kwargs, self.options
+        # pylint: disable=too-many-function-args
+        #   Borks on this, as it's a property
         return _apply(args, kwargs, **options)
         return _apply(args, kwargs, **options)
 
 
     def _merge(self, args=(), kwargs={}, options={}, force=False):
     def _merge(self, args=(), kwargs={}, options={}, force=False):
@@ -282,13 +317,15 @@ class Signature(dict):
                root_id=None, parent_id=None):
                root_id=None, parent_id=None):
         """Finalize the signature by adding a concrete task id.
         """Finalize the signature by adding a concrete task id.
 
 
-        The task will not be called and you should not call the signature
-        twice after freezing it as that will result in two task messages
+        The task won't be called and you shouldn't call the signature
+        twice after freezing it as that'll result in two task messages
         using the same task id.
         using the same task id.
 
 
         Returns:
         Returns:
             ~@AsyncResult: promise of future evaluation.
             ~@AsyncResult: promise of future evaluation.
         """
         """
+        # pylint: disable=redefined-outer-name
+        #   XXX chord is also a class in outer scope.
         opts = self.options
         opts = self.options
         try:
         try:
             tid = opts['task_id']
             tid = opts['task_id']
@@ -304,13 +341,17 @@ class Signature(dict):
             opts['group_id'] = group_id
             opts['group_id'] = group_id
         if chord:
         if chord:
             opts['chord'] = chord
             opts['chord'] = chord
+        # pylint: disable=too-many-function-args
+        #   Borks on this, as it's a property.
         return self.AsyncResult(tid)
         return self.AsyncResult(tid)
     _freeze = freeze
     _freeze = freeze
 
 
     def replace(self, args=None, kwargs=None, options=None):
     def replace(self, args=None, kwargs=None, options=None):
         """Replace the args, kwargs or options set for this signature.
         """Replace the args, kwargs or options set for this signature.
+
         These are only replaced if the argument for the section is
         These are only replaced if the argument for the section is
-        not :const:`None`."""
+        not :const:`None`.
+        """
         s = self.clone()
         s = self.clone()
         if args is not None:
         if args is not None:
             s.args = args
             s.args = args
@@ -325,7 +366,7 @@ class Signature(dict):
 
 
         Returns:
         Returns:
             Signature: This is a chaining method call
             Signature: This is a chaining method call
-                (i.e. it will return ``self``).
+                (i.e., it will return ``self``).
         """
         """
         if immutable is not None:
         if immutable is not None:
             self.set_immutable(immutable)
             self.set_immutable(immutable)
@@ -355,8 +396,7 @@ class Signature(dict):
         items.extend(maybe_list(value))
         items.extend(maybe_list(value))
 
 
     def link(self, callback):
     def link(self, callback):
-        """Add a callback task to be applied if this task
-        executes successfully.
+        """Add callback task to be applied if this task succeeds.
 
 
         Returns:
         Returns:
             Signature: the argument passed, for chaining
             Signature: the argument passed, for chaining
@@ -365,8 +405,7 @@ class Signature(dict):
         return self.append_to_list_option('link', callback)
         return self.append_to_list_option('link', callback)
 
 
     def link_error(self, errback):
     def link_error(self, errback):
-        """Add a callback task to be applied if an error occurs
-        while executing this task.
+        """Add callback task to be applied on error in task execution.
 
 
         Returns:
         Returns:
             Signature: the argument passed, for chaining
             Signature: the argument passed, for chaining
@@ -388,8 +427,10 @@ class Signature(dict):
         return self
         return self
 
 
     def flatten_links(self):
     def flatten_links(self):
-        """Return a recursive list of dependencies (unchain if you will,
-        but with links intact)."""
+        """Return a recursive list of dependencies.
+
+        "unchain" if you will, but with links intact.
+        """
         return list(_chain.from_iterable(_chain(
         return list(_chain.from_iterable(_chain(
             [[self]],
             [[self]],
             (link.flatten_links()
             (link.flatten_links()
@@ -399,21 +440,50 @@ class Signature(dict):
     def __or__(self, other):
     def __or__(self, other):
         if isinstance(self, group):
         if isinstance(self, group):
             if isinstance(other, group):
             if isinstance(other, group):
+                # group() | group() -> single group
                 return group(_chain(self.tasks, other.tasks), app=self.app)
                 return group(_chain(self.tasks, other.tasks), app=self.app)
+            # group() | task -> chord
             return chord(self, body=other, app=self._app)
             return chord(self, body=other, app=self._app)
         elif isinstance(other, group):
         elif isinstance(other, group):
+            # task | group() -> unroll group with one member
             other = maybe_unroll_group(other)
             other = maybe_unroll_group(other)
-
+            return chain(self, other, app=self._app)
         if not isinstance(self, chain) and isinstance(other, chain):
         if not isinstance(self, chain) and isinstance(other, chain):
-            return chain((self,) + other.tasks, app=self._app)
+            # task | chain -> chain
+            return chain(
+                _seq_concat_seq((self,), other.tasks), app=self._app)
         elif isinstance(other, chain):
         elif isinstance(other, chain):
-            return chain(*self.tasks + other.tasks, app=self._app)
+            # chain | chain -> chain
+            return chain(
+                _seq_concat_seq(self.tasks, other.tasks), app=self._app)
+        elif isinstance(self, chord):
+            sig = self.clone()
+            sig.body = sig.body | other
+            return sig
         elif isinstance(other, Signature):
         elif isinstance(other, Signature):
             if isinstance(self, chain):
             if isinstance(self, chain):
-                return chain(*self.tasks + (other,), app=self._app)
+                # chain | task -> chain
+                return chain(
+                    _seq_concat_item(self.tasks, other), app=self._app)
+            # task | task -> chain
             return chain(self, other, app=self._app)
             return chain(self, other, app=self._app)
         return NotImplemented
         return NotImplemented
 
 
+    def election(self):
+        type = self.type
+        app = type.app
+        tid = self.options.get('task_id') or uuid()
+
+        with app.producer_or_acquire(None) as P:
+            props = type.backend.on_task_call(P, tid)
+            app.control.election(tid, 'task', self.clone(task_id=tid, **props),
+                                 connection=P.connection)
+            return type.AsyncResult(tid)
+
+    def reprcall(self, *args, **kwargs):
+        args, kwargs, _ = self._merge(args, kwargs, {}, force=True)
+        return reprcall(self['task'], args, kwargs)
+
     def __deepcopy__(self, memo):
     def __deepcopy__(self, memo):
         memo[id(self)] = self
         memo[id(self)] = self
         return dict(self)
         return dict(self)
@@ -429,21 +499,6 @@ class Signature(dict):
     def __json__(self):
     def __json__(self):
         return dict(self)
         return dict(self)
 
 
-    def reprcall(self, *args, **kwargs):
-        args, kwargs, _ = self._merge(args, kwargs, {}, force=True)
-        return reprcall(self['task'], args, kwargs)
-
-    def election(self):
-        type = self.type
-        app = type.app
-        tid = self.options.get('task_id') or uuid()
-
-        with app.producer_or_acquire(None) as P:
-            props = type.backend.on_task_call(P, tid)
-            app.control.election(tid, 'task', self.clone(task_id=tid, **props),
-                                 connection=P.connection)
-            return type.AsyncResult(tid)
-
     def __repr__(self):
     def __repr__(self):
         return self.reprcall()
         return self.reprcall()
 
 
@@ -489,13 +544,15 @@ class Signature(dict):
 
 
 @Signature.register_type
 @Signature.register_type
 class chain(Signature):
 class chain(Signature):
-    """Chains tasks together, so that each tasks follows each other
+    """Chain tasks together.
+
+    Each tasks follows one another,
     by being applied as a callback of the previous task.
     by being applied as a callback of the previous task.
 
 
     Note:
     Note:
         If called with only one argument, then that argument must
         If called with only one argument, then that argument must
-        be an iterable of tasks to chain, which means you can
-        use this with a generator expression.
+        be an iterable of tasks to chain: this allows us
+        to use generator expressions.
 
 
     Example:
     Example:
         This is effectively :math:`((2 + 2) + 4)`:
         This is effectively :math:`((2 + 2) + 4)`:
@@ -524,7 +581,7 @@ class chain(Signature):
     Arguments:
     Arguments:
         *tasks (Signature): List of task signatures to chain.
         *tasks (Signature): List of task signatures to chain.
             If only one argument is passed and that argument is
             If only one argument is passed and that argument is
-            an iterable, then that will be used as the list of signatures
+            an iterable, then that'll be used as the list of signatures
             to chain instead.  This means that you can use a generator
             to chain instead.  This means that you can use a generator
             expression.
             expression.
 
 
@@ -533,8 +590,19 @@ class chain(Signature):
             task in the chain.  When that task succeeed the next task in the
             task in the chain.  When that task succeeed the next task in the
             chain is applied, and so on.
             chain is applied, and so on.
     """
     """
+
     tasks = _getitem_property('kwargs.tasks', 'Tasks in chain.')
     tasks = _getitem_property('kwargs.tasks', 'Tasks in chain.')
 
 
+    @classmethod
+    def from_dict(cls, d, app=None):
+        tasks = d['kwargs']['tasks']
+        if tasks:
+            if isinstance(tasks, tuple):  # aaaargh
+                tasks = d['kwargs']['tasks'] = list(tasks)
+            # First task must be signature object to get app
+            tasks[0] = maybe_signature(tasks[0], app=app)
+        return _upgrade(d, chain(tasks, app=app, **d['options']))
+
     def __init__(self, *tasks, **options):
     def __init__(self, *tasks, **options):
         tasks = (regen(tasks[0]) if len(tasks) == 1 and is_list(tasks[0])
         tasks = (regen(tasks[0]) if len(tasks) == 1 and is_list(tasks[0])
                  else tasks)
                  else tasks)
@@ -550,8 +618,12 @@ class chain(Signature):
             return self.apply_async(args, kwargs)
             return self.apply_async(args, kwargs)
 
 
     def clone(self, *args, **kwargs):
     def clone(self, *args, **kwargs):
+        to_signature = maybe_signature
         s = Signature.clone(self, *args, **kwargs)
         s = Signature.clone(self, *args, **kwargs)
-        s.kwargs['tasks'] = [sig.clone() for sig in s.kwargs['tasks']]
+        s.kwargs['tasks'] = [
+            to_signature(sig, app=self._app, clone=True)
+            for sig in s.kwargs['tasks']
+        ]
         return s
         return s
 
 
     def apply_async(self, args=(), kwargs={}, **options):
     def apply_async(self, args=(), kwargs={}, **options):
@@ -565,6 +637,8 @@ class chain(Signature):
     def run(self, args=(), kwargs={}, group_id=None, chord=None,
     def run(self, args=(), kwargs={}, group_id=None, chord=None,
             task_id=None, link=None, link_error=None,
             task_id=None, link=None, link_error=None,
             producer=None, root_id=None, parent_id=None, app=None, **options):
             producer=None, root_id=None, parent_id=None, app=None, **options):
+        # pylint: disable=redefined-outer-name
+        #   XXX chord is also a class in outer scope.
         app = app or self.app
         app = app or self.app
         use_link = self._use_link
         use_link = self._use_link
         if use_link is None and app.conf.task_protocol == 1:
         if use_link is None and app.conf.task_protocol == 1:
@@ -584,12 +658,17 @@ class chain(Signature):
             if link:
             if link:
                 tasks[0].extend_list_option('link', link)
                 tasks[0].extend_list_option('link', link)
             first_task = tasks.pop()
             first_task = tasks.pop()
-            first_task.apply_async(
-                chain=tasks if not use_link else None, **options)
+            # chain option may already be set, resulting in
+            # "multiple values for keyword argument 'chain'" error.
+            # Issue #3379.
+            options['chain'] = tasks if not use_link else None
+            first_task.apply_async(**options)
             return results[0]
             return results[0]
 
 
     def freeze(self, _id=None, group_id=None, chord=None,
     def freeze(self, _id=None, group_id=None, chord=None,
                root_id=None, parent_id=None):
                root_id=None, parent_id=None):
+        # pylint: disable=redefined-outer-name
+        #   XXX chord is also a class in outer scope.
         _, results = self._frozen = self.prepare_steps(
         _, results = self._frozen = self.prepare_steps(
             self.args, self.tasks, root_id, parent_id, None,
             self.args, self.tasks, root_id, parent_id, None,
             self.app, _id, group_id, chord, clone=False,
             self.app, _id, group_id, chord, clone=False,
@@ -649,6 +728,7 @@ class chain(Signature):
                     task_id=prev_res.id, root_id=root_id, app=app,
                     task_id=prev_res.id, root_id=root_id, app=app,
                 )
                 )
                 prev_res = prev_prev_res
                 prev_res = prev_prev_res
+
             if is_last_task:
             if is_last_task:
                 # chain(task_id=id) means task id is set for the last task
                 # chain(task_id=id) means task id is set for the last task
                 # in the chain.  If the chord is part of a chord/group
                 # in the chain.  If the chord is part of a chord/group
@@ -672,7 +752,17 @@ class chain(Signature):
                     task.link(prev_task)
                     task.link(prev_task)
 
 
                 if prev_res:
                 if prev_res:
-                    prev_res.parent = res
+                    if isinstance(prev_task, chord):
+                        # If previous task was a chord,
+                        # the freeze above would have set a parent for
+                        # us, but we'd be overwriting it here.
+
+                        # so fix this relationship so it's:
+                        #     chord body -> group -> THIS RES
+                        assert isinstance(prev_res.parent, GroupResult)
+                        prev_res.parent.parent = res
+                    else:
+                        prev_res.parent = res
 
 
             if is_first_task and parent_id is not None:
             if is_first_task and parent_id is not None:
                 task.set_parent_id(parent_id)
                 task.set_parent_id(parent_id)
@@ -702,16 +792,6 @@ class chain(Signature):
             res.parent, last, fargs = last, res, None
             res.parent, last, fargs = last, res, None
         return last
         return last
 
 
-    @classmethod
-    def from_dict(self, d, app=None):
-        tasks = d['kwargs']['tasks']
-        if tasks:
-            if isinstance(tasks, tuple):  # aaaargh
-                tasks = d['kwargs']['tasks'] = list(tasks)
-            # First task must be signature object to get app
-            tasks[0] = maybe_signature(tasks[0], app=app)
-        return _upgrade(d, chain(*tasks, app=app, **d['options']))
-
     @property
     @property
     def app(self):
     def app(self):
         app = self._app
         app = self._app
@@ -723,6 +803,9 @@ class chain(Signature):
         return app or current_app
         return app or current_app
 
 
     def __repr__(self):
     def __repr__(self):
+        if not self.tasks:
+            return '<{0}@{1:#x}: empty>'.format(
+                type(self).__name__, id(self))
         return ' | '.join(repr(t) for t in self.tasks)
         return ' | '.join(repr(t) for t in self.tasks)
 
 
 
 
@@ -730,6 +813,12 @@ class _basemap(Signature):
     _task_name = None
     _task_name = None
     _unpack_args = itemgetter('task', 'it')
     _unpack_args = itemgetter('task', 'it')
 
 
+    @classmethod
+    def from_dict(cls, d, app=None):
+        return _upgrade(
+            d, cls(*cls._unpack_args(d['kwargs']), app=app, **d['options']),
+        )
+
     def __init__(self, task, it, **options):
     def __init__(self, task, it, **options):
         Signature.__init__(
         Signature.__init__(
             self, self._task_name, (),
             self, self._task_name, (),
@@ -744,37 +833,49 @@ class _basemap(Signature):
             route_name=task_name_from(self.kwargs.get('task')), **opts
             route_name=task_name_from(self.kwargs.get('task')), **opts
         )
         )
 
 
-    @classmethod
-    def from_dict(cls, d, app=None):
-        return _upgrade(
-            d, cls(*cls._unpack_args(d['kwargs']), app=app, **d['options']),
-        )
-
 
 
 @Signature.register_type
 @Signature.register_type
 class xmap(_basemap):
 class xmap(_basemap):
+    """Map operation for tasks.
+
+    Note:
+        Tasks executed sequentially in process, this is not a
+        parallel operation like :class:`group`.
+    """
+
     _task_name = 'celery.map'
     _task_name = 'celery.map'
 
 
     def __repr__(self):
     def __repr__(self):
         task, it = self._unpack_args(self.kwargs)
         task, it = self._unpack_args(self.kwargs)
-        return '[{0}(x) for x in {1}]'.format(task.task,
-                                              truncate(repr(it), 100))
+        return '[{0}(x) for x in {1}]'.format(
+            task.task, truncate(repr(it), 100))
 
 
 
 
 @Signature.register_type
 @Signature.register_type
 class xstarmap(_basemap):
 class xstarmap(_basemap):
+    """Map operation for tasks, using star arguments."""
+
     _task_name = 'celery.starmap'
     _task_name = 'celery.starmap'
 
 
     def __repr__(self):
     def __repr__(self):
         task, it = self._unpack_args(self.kwargs)
         task, it = self._unpack_args(self.kwargs)
-        return '[{0}(*x) for x in {1}]'.format(task.task,
-                                               truncate(repr(it), 100))
+        return '[{0}(*x) for x in {1}]'.format(
+            task.task, truncate(repr(it), 100))
 
 
 
 
 @Signature.register_type
 @Signature.register_type
 class chunks(Signature):
 class chunks(Signature):
+    """Partition of tasks in n chunks."""
+
     _unpack_args = itemgetter('task', 'it', 'n')
     _unpack_args = itemgetter('task', 'it', 'n')
 
 
+    @classmethod
+    def from_dict(cls, d, app=None):
+        return _upgrade(
+            d, chunks(*cls._unpack_args(
+                d['kwargs']), app=app, **d['options']),
+        )
+
     def __init__(self, task, it, n, **options):
     def __init__(self, task, it, n, **options):
         Signature.__init__(
         Signature.__init__(
             self, 'celery.chunks', (),
             self, 'celery.chunks', (),
@@ -782,12 +883,8 @@ class chunks(Signature):
             immutable=True, **options
             immutable=True, **options
         )
         )
 
 
-    @classmethod
-    def from_dict(self, d, app=None):
-        return _upgrade(
-            d, chunks(*self._unpack_args(
-                d['kwargs']), app=app, **d['options']),
-        )
+    def __call__(self, **options):
+        return self.apply_async(**options)
 
 
     def apply_async(self, args=(), kwargs={}, **opts):
     def apply_async(self, args=(), kwargs={}, **opts):
         return self.group().apply_async(
         return self.group().apply_async(
@@ -795,9 +892,6 @@ class chunks(Signature):
             route_name=task_name_from(self.kwargs.get('task')), **opts
             route_name=task_name_from(self.kwargs.get('task')), **opts
         )
         )
 
 
-    def __call__(self, **options):
-        return self.apply_async(**options)
-
     def group(self):
     def group(self):
         # need to evaluate generators
         # need to evaluate generators
         task, it, n = self._unpack_args(self.kwargs)
         task, it, n = self._unpack_args(self.kwargs)
@@ -814,7 +908,7 @@ def _maybe_group(tasks, app):
     if isinstance(tasks, dict):
     if isinstance(tasks, dict):
         tasks = signature(tasks, app=app)
         tasks = signature(tasks, app=app)
 
 
-    if isinstance(tasks, group):
+    if isinstance(tasks, (group, chain)):
         tasks = tasks.tasks
         tasks = tasks.tasks
     elif isinstance(tasks, abstract.CallableSignature):
     elif isinstance(tasks, abstract.CallableSignature):
         tasks = [tasks]
         tasks = [tasks]
@@ -832,8 +926,8 @@ class group(Signature):
 
 
     Note:
     Note:
         If only one argument is passed, and that argument is an iterable
         If only one argument is passed, and that argument is an iterable
-        then that will be used as the list of tasks instead, which
-        means you can use ``group`` with generator expressions.
+        then that'll be used as the list of tasks instead: this
+        allows us to use ``group`` with generator expressions.
 
 
     Example:
     Example:
         >>> lazy_group = group([add.s(2, 2), add.s(4, 4)])
         >>> lazy_group = group([add.s(2, 2), add.s(4, 4)])
@@ -843,8 +937,8 @@ class group(Signature):
 
 
     Arguments:
     Arguments:
         *tasks (Signature): A list of signatures that this group will call.
         *tasks (Signature): A list of signatures that this group will call.
-            If there is only one argument, and that argument is an iterable,
-            then that will define the list of signatures instead.
+            If there's only one argument, and that argument is an iterable,
+            then that'll define the list of signatures instead.
         **options (Any): Execution options applied to all tasks
         **options (Any): Execution options applied to all tasks
             in the group.
             in the group.
 
 
@@ -853,8 +947,15 @@ class group(Signature):
             tasks in the group (and return a :class:`GroupResult` instance
             tasks in the group (and return a :class:`GroupResult` instance
             that can be used to inspect the state of the group).
             that can be used to inspect the state of the group).
     """
     """
+
     tasks = _getitem_property('kwargs.tasks', 'Tasks in group.')
     tasks = _getitem_property('kwargs.tasks', 'Tasks in group.')
 
 
+    @classmethod
+    def from_dict(cls, d, app=None):
+        return _upgrade(
+            d, group(d['kwargs']['tasks'], app=app, **d['options']),
+        )
+
     def __init__(self, *tasks, **options):
     def __init__(self, *tasks, **options):
         if len(tasks) == 1:
         if len(tasks) == 1:
             tasks = tasks[0]
             tasks = tasks[0]
@@ -867,14 +968,72 @@ class group(Signature):
         )
         )
         self.subtask_type = 'group'
         self.subtask_type = 'group'
 
 
-    @classmethod
-    def from_dict(self, d, app=None):
-        return _upgrade(
-            d, group(d['kwargs']['tasks'], app=app, **d['options']),
-        )
+    def __call__(self, *partial_args, **options):
+        return self.apply_async(partial_args, **options)
 
 
-    def __len__(self):
-        return len(self.tasks)
+    def skew(self, start=1.0, stop=None, step=1.0):
+        it = fxrange(start, stop, step, repeatlast=True)
+        for task in self.tasks:
+            task.set(countdown=next(it))
+        return self
+
+    def apply_async(self, args=(), kwargs=None, add_to_parent=True,
+                    producer=None, link=None, link_error=None, **options):
+        if link is not None:
+            raise TypeError('Cannot add link to group: use a chord')
+        if link_error is not None:
+            raise TypeError(
+                'Cannot add link to group: do that on individual tasks')
+        app = self.app
+        if app.conf.task_always_eager:
+            return self.apply(args, kwargs, **options)
+        if not self.tasks:
+            return self.freeze()
+
+        options, group_id, root_id = self._freeze_gid(options)
+        tasks = self._prepared(self.tasks, [], group_id, root_id, app)
+        p = barrier()
+        results = list(self._apply_tasks(tasks, producer, app, p,
+                                         args=args, kwargs=kwargs, **options))
+        result = self.app.GroupResult(group_id, results, ready_barrier=p)
+        p.finalize()
+
+        # - Special case of group(A.s() | group(B.s(), C.s()))
+        # That is, group with single item that's a chain but the
+        # last task in that chain is a group.
+        #
+        # We cannot actually support arbitrary GroupResults in chains,
+        # but this special case we can.
+        if len(result) == 1 and isinstance(result[0], GroupResult):
+            result = result[0]
+
+        parent_task = app.current_worker_task
+        if add_to_parent and parent_task:
+            parent_task.add_trail(result)
+        return result
+
+    def apply(self, args=(), kwargs={}, **options):
+        app = self.app
+        if not self.tasks:
+            return self.freeze()  # empty group returns GroupResult
+        options, group_id, root_id = self._freeze_gid(options)
+        tasks = self._prepared(self.tasks, [], group_id, root_id, app)
+        return app.GroupResult(group_id, [
+            sig.apply(args=args, kwargs=kwargs, **options) for sig, _ in tasks
+        ])
+
+    def set_immutable(self, immutable):
+        for task in self.tasks:
+            task.set_immutable(immutable)
+
+    def link(self, sig):
+        # Simply link to first task
+        sig = sig.clone().set(immutable=True)
+        return self.tasks[0].link(sig)
+
+    def link_error(self, sig):
+        sig = sig.clone().set(immutable=True)
+        return self.tasks[0].link_error(sig)
 
 
     def _prepared(self, tasks, partial_args, group_id, root_id, app,
     def _prepared(self, tasks, partial_args, group_id, root_id, app,
                   CallableSignature=abstract.CallableSignature,
                   CallableSignature=abstract.CallableSignature,
@@ -883,7 +1042,7 @@ class group(Signature):
         for task in tasks:
         for task in tasks:
             if isinstance(task, CallableSignature):
             if isinstance(task, CallableSignature):
                 # local sigs are always of type Signature, and we
                 # local sigs are always of type Signature, and we
-                # clone them to make sure we do not modify the originals.
+                # clone them to make sure we don't modify the originals.
                 task = task.clone()
                 task = task.clone()
             else:
             else:
                 # serialized sigs must be converted to Signature.
                 # serialized sigs must be converted to Signature.
@@ -901,12 +1060,16 @@ class group(Signature):
                 yield task, task.freeze(group_id=group_id, root_id=root_id)
                 yield task, task.freeze(group_id=group_id, root_id=root_id)
 
 
     def _apply_tasks(self, tasks, producer=None, app=None, p=None,
     def _apply_tasks(self, tasks, producer=None, app=None, p=None,
-                     add_to_parent=None, chord=None, **options):
+                     add_to_parent=None, chord=None,
+                     args=None, kwargs=None, **options):
+        # pylint: disable=redefined-outer-name
+        #   XXX chord is also a class in outer scope.
         app = app or self.app
         app = app or self.app
         with app.producer_or_acquire(producer) as producer:
         with app.producer_or_acquire(producer) as producer:
             for sig, res in tasks:
             for sig, res in tasks:
                 sig.apply_async(producer=producer, add_to_parent=False,
                 sig.apply_async(producer=producer, add_to_parent=False,
                                 chord=sig.options.get('chord') or chord,
                                 chord=sig.options.get('chord') or chord,
+                                args=args, kwargs=kwargs,
                                 **options)
                                 **options)
 
 
                 # adding callback to result, such that it will gradually
                 # adding callback to result, such that it will gradually
@@ -932,75 +1095,10 @@ class group(Signature):
         for task in self.tasks:
         for task in self.tasks:
             task.set_parent_id(parent_id)
             task.set_parent_id(parent_id)
 
 
-    def apply_async(self, args=(), kwargs=None, add_to_parent=True,
-                    producer=None, **options):
-        app = self.app
-        if app.conf.task_always_eager:
-            return self.apply(args, kwargs, **options)
-        if not self.tasks:
-            return self.freeze()
-
-        options, group_id, root_id = self._freeze_gid(options)
-        tasks = self._prepared(self.tasks, args, group_id, root_id, app)
-        p = barrier()
-        results = list(self._apply_tasks(tasks, producer, app, p, **options))
-        result = self.app.GroupResult(group_id, results, ready_barrier=p)
-        p.finalize()
-
-        # - Special case of group(A.s() | group(B.s(), C.s()))
-        # That is, group with single item that is a chain but the
-        # last task in that chain is a group.
-        #
-        # We cannot actually support arbitrary GroupResults in chains,
-        # but this special case we can.
-        if len(result) == 1 and isinstance(result[0], GroupResult):
-            result = result[0]
-
-        parent_task = app.current_worker_task
-        if add_to_parent and parent_task:
-            parent_task.add_trail(result)
-        return result
-
-    def apply(self, args=(), kwargs={}, **options):
-        app = self.app
-        if not self.tasks:
-            return self.freeze()  # empty group returns GroupResult
-        options, group_id, root_id = self._freeze_gid(options)
-        tasks = self._prepared(self.tasks, args, group_id, root_id, app)
-        return app.GroupResult(group_id, [
-            sig.apply(**options) for sig, _ in tasks
-        ])
-
-    def set_immutable(self, immutable):
-        for task in self.tasks:
-            task.set_immutable(immutable)
-
-    def link(self, sig):
-        # Simply link to first task
-        sig = sig.clone().set(immutable=True)
-        return self.tasks[0].link(sig)
-
-    def link_error(self, sig):
-        sig = sig.clone().set(immutable=True)
-        return self.tasks[0].link_error(sig)
-
-    def __call__(self, *partial_args, **options):
-        return self.apply_async(partial_args, **options)
-
-    def _freeze_unroll(self, new_tasks, group_id, chord, root_id, parent_id):
-        stack = deque(self.tasks)
-        while stack:
-            task = maybe_signature(stack.popleft(), app=self._app).clone()
-            if isinstance(task, group):
-                stack.extendleft(task.tasks)
-            else:
-                new_tasks.append(task)
-                yield task.freeze(group_id=group_id,
-                                  chord=chord, root_id=root_id,
-                                  parent_id=parent_id)
-
     def freeze(self, _id=None, group_id=None, chord=None,
     def freeze(self, _id=None, group_id=None, chord=None,
                root_id=None, parent_id=None):
                root_id=None, parent_id=None):
+        # pylint: disable=redefined-outer-name
+        #   XXX chord is also a class in outer scope.
         opts = self.options
         opts = self.options
         try:
         try:
             gid = opts['task_id']
             gid = opts['task_id']
@@ -1025,11 +1123,19 @@ class group(Signature):
         return self.app.GroupResult(gid, results)
         return self.app.GroupResult(gid, results)
     _freeze = freeze
     _freeze = freeze
 
 
-    def skew(self, start=1.0, stop=None, step=1.0):
-        it = fxrange(start, stop, step, repeatlast=True)
-        for task in self.tasks:
-            task.set(countdown=next(it))
-        return self
+    def _freeze_unroll(self, new_tasks, group_id, chord, root_id, parent_id):
+        # pylint: disable=redefined-outer-name
+        #   XXX chord is also a class in outer scope.
+        stack = deque(self.tasks)
+        while stack:
+            task = maybe_signature(stack.popleft(), app=self._app).clone()
+            if isinstance(task, group):
+                stack.extendleft(task.tasks)
+            else:
+                new_tasks.append(task)
+                yield task.freeze(group_id=group_id,
+                                  chord=chord, root_id=root_id,
+                                  parent_id=parent_id)
 
 
     def __iter__(self):
     def __iter__(self):
         return iter(self.tasks)
         return iter(self.tasks)
@@ -1037,6 +1143,9 @@ class group(Signature):
     def __repr__(self):
     def __repr__(self):
         return 'group({0.tasks!r})'.format(self)
         return 'group({0.tasks!r})'.format(self)
 
 
+    def __len__(self):
+        return len(self.tasks)
+
     @property
     @property
     def app(self):
     def app(self):
         app = self._app
         app = self._app
@@ -1050,7 +1159,7 @@ class group(Signature):
 
 
 @Signature.register_type
 @Signature.register_type
 class chord(Signature):
 class chord(Signature):
-    """Barrier synchronization primitive.
+    r"""Barrier synchronization primitive.
 
 
     A chord consists of a header and a body.
     A chord consists of a header and a body.
 
 
@@ -1075,22 +1184,41 @@ class chord(Signature):
             >>> res.get()
             >>> res.get()
             12
             12
     """
     """
+
+    @classmethod
+    def from_dict(cls, d, app=None):
+        args, d['kwargs'] = cls._unpack_args(**d['kwargs'])
+        return _upgrade(d, cls(*args, app=app, **d))
+
+    @staticmethod
+    def _unpack_args(header=None, body=None, **kwargs):
+        # Python signatures are better at extracting keys from dicts
+        # than manually popping things off.
+        return (header, body), kwargs
+
     def __init__(self, header, body=None, task='celery.chord',
     def __init__(self, header, body=None, task='celery.chord',
                  args=(), kwargs={}, app=None, **options):
                  args=(), kwargs={}, app=None, **options):
         Signature.__init__(
         Signature.__init__(
             self, task, args,
             self, task, args,
-            dict(kwargs, header=_maybe_group(header, app),
+            dict(kwargs=kwargs, header=_maybe_group(header, app),
                  body=maybe_signature(body, app=app)), app=app, **options
                  body=maybe_signature(body, app=app)), app=app, **options
         )
         )
         self.subtask_type = 'chord'
         self.subtask_type = 'chord'
 
 
+    def __call__(self, body=None, **options):
+        return self.apply_async((), {'body': body} if body else {}, **options)
+
     def freeze(self, _id=None, group_id=None, chord=None,
     def freeze(self, _id=None, group_id=None, chord=None,
                root_id=None, parent_id=None):
                root_id=None, parent_id=None):
+        # pylint: disable=redefined-outer-name
+        #   XXX chord is also a class in outer scope.
         if not isinstance(self.tasks, group):
         if not isinstance(self.tasks, group):
             self.tasks = group(self.tasks, app=self.app)
             self.tasks = group(self.tasks, app=self.app)
-        bodyres = self.body.freeze(_id, parent_id=self.id, root_id=root_id)
-        self.tasks.freeze(
+        header_result = self.tasks.freeze(
             parent_id=parent_id, root_id=root_id, chord=self.body)
             parent_id=parent_id, root_id=root_id, chord=self.body)
+        bodyres = self.body.freeze(
+            _id, parent_id=header_result.id, root_id=root_id)
+        bodyres.parent = header_result
         self.id = self.tasks.id
         self.id = self.tasks.id
         self.body.set_parent_id(self.id)
         self.body.set_parent_id(self.id)
         return bodyres
         return bodyres
@@ -1103,40 +1231,14 @@ class chord(Signature):
             task.set_parent_id(parent_id)
             task.set_parent_id(parent_id)
         self.parent_id = parent_id
         self.parent_id = parent_id
 
 
-    @classmethod
-    def from_dict(self, d, app=None):
-        args, d['kwargs'] = self._unpack_args(**d['kwargs'])
-        return _upgrade(d, self(*args, app=app, **d))
-
-    @staticmethod
-    def _unpack_args(header=None, body=None, **kwargs):
-        # Python signatures are better at extracting keys from dicts
-        # than manually popping things off.
-        return (header, body), kwargs
-
-    @cached_property
-    def app(self):
-        return self._get_app(self.body)
-
-    def _get_app(self, body=None):
-        app = self._app
-        if app is None:
-            try:
-                tasks = self.tasks.tasks  # is a group
-            except AttributeError:
-                tasks = self.tasks
-            app = tasks[0]._app
-            if app is None and body is not None:
-                app = body._app
-        return app if app is not None else current_app
-
     def apply_async(self, args=(), kwargs={}, task_id=None,
     def apply_async(self, args=(), kwargs={}, task_id=None,
                     producer=None, connection=None,
                     producer=None, connection=None,
                     router=None, result_cls=None, **options):
                     router=None, result_cls=None, **options):
+        kwargs = kwargs or {}
         args = (tuple(args) + tuple(self.args)
         args = (tuple(args) + tuple(self.args)
                 if args and not self.immutable else self.args)
                 if args and not self.immutable else self.args)
-        body = kwargs.get('body') or self.kwargs['body']
-        kwargs = dict(self.kwargs, **kwargs)
+        body = kwargs.pop('body', None) or self.kwargs['body']
+        kwargs = dict(self.kwargs['kwargs'], **kwargs)
         body = body.clone(**options)
         body = body.clone(**options)
         app = self._get_app(body)
         app = self._get_app(body)
         tasks = (self.tasks.clone() if isinstance(self.tasks, group)
         tasks = (self.tasks.clone() if isinstance(self.tasks, group)
@@ -1170,7 +1272,7 @@ class chord(Signature):
             countdown=1, max_retries=None, eager=False,
             countdown=1, max_retries=None, eager=False,
             task_id=None, **options):
             task_id=None, **options):
         app = app or self._get_app(body)
         app = app or self._get_app(body)
-        group_id = uuid()
+        group_id = header.options.get('task_id') or uuid()
         root_id = body.options.get('root_id')
         root_id = body.options.get('root_id')
         body.chord_size = self.__length_hint__()
         body.chord_size = self.__length_hint__()
         options = dict(self.options, **options) if options else self.options
         options = dict(self.options, **options) if options else self.options
@@ -1180,6 +1282,7 @@ class chord(Signature):
 
 
         results = header.freeze(
         results = header.freeze(
             group_id=group_id, chord=body, root_id=root_id).results
             group_id=group_id, chord=body, root_id=root_id).results
+        body.set_parent_id(group_id)
         bodyres = body.freeze(task_id, root_id=root_id)
         bodyres = body.freeze(task_id, root_id=root_id)
 
 
         parent = app.backend.apply_chord(
         parent = app.backend.apply_chord(
@@ -1190,14 +1293,11 @@ class chord(Signature):
         bodyres.parent = parent
         bodyres.parent = parent
         return bodyres
         return bodyres
 
 
-    def __call__(self, body=None, **options):
-        return self.apply_async((), {'body': body} if body else {}, **options)
-
     def clone(self, *args, **kwargs):
     def clone(self, *args, **kwargs):
         s = Signature.clone(self, *args, **kwargs)
         s = Signature.clone(self, *args, **kwargs)
         # need to make copy of body
         # need to make copy of body
         try:
         try:
-            s.kwargs['body'] = s.kwargs['body'].clone()
+            s.kwargs['body'] = maybe_signature(s.kwargs['body'], clone=True)
         except (AttributeError, KeyError):
         except (AttributeError, KeyError):
             pass
             pass
         return s
         return s
@@ -1220,12 +1320,28 @@ class chord(Signature):
             return self.body.reprcall(self.tasks)
             return self.body.reprcall(self.tasks)
         return '<chord without body: {0.tasks!r}>'.format(self)
         return '<chord without body: {0.tasks!r}>'.format(self)
 
 
+    @cached_property
+    def app(self):
+        return self._get_app(self.body)
+
+    def _get_app(self, body=None):
+        app = self._app
+        if app is None:
+            try:
+                tasks = self.tasks.tasks  # is a group
+            except AttributeError:
+                tasks = self.tasks
+            app = tasks[0]._app
+            if app is None and body is not None:
+                app = body._app
+        return app if app is not None else current_app
+
     tasks = _getitem_property('kwargs.header', 'Tasks in chord header.')
     tasks = _getitem_property('kwargs.header', 'Tasks in chord header.')
     body = _getitem_property('kwargs.body', 'Body task of chord.')
     body = _getitem_property('kwargs.body', 'Body task of chord.')
 
 
 
 
 def signature(varies, *args, **kwargs):
 def signature(varies, *args, **kwargs):
-    """Create new signature
+    """Create new signature.
 
 
     - if the first argument is a signature already then it's cloned.
     - if the first argument is a signature already then it's cloned.
     - if the first argument is a dict, then a Signature version is returned.
     - if the first argument is a dict, then a Signature version is returned.
@@ -1241,11 +1357,28 @@ def signature(varies, *args, **kwargs):
     return Signature(varies, *args, **kwargs)
     return Signature(varies, *args, **kwargs)
 
 
 
 
-def maybe_signature(d, app=None):
+def maybe_signature(d, app=None, clone=False):
+    """Ensure obj is a signature, or None.
+
+    Arguments:
+        d (Optional[Union[abstract.CallableSignature, Mapping]]):
+            Signature or dict-serialized signature.
+        app (celery.Celery):
+            App to bind signature to.
+        clone (bool):
+            If d' is already a signature, the signature
+           will be cloned when this flag is enabled.
+
+    Returns:
+        Optional[abstract.CallableSignature]
+    """
     if d is not None:
     if d is not None:
-        if (isinstance(d, dict) and
-                not isinstance(d, abstract.CallableSignature)):
+        if isinstance(d, abstract.CallableSignature):
+            if clone:
+                d = d.clone()
+        elif isinstance(d, dict):
             d = signature(d)
             d = signature(d)
+
         if app is not None:
         if app is not None:
             d._app = app
             d._app = app
-        return d
+    return d

+ 2 - 1
celery/concurrency/__init__.py

@@ -2,7 +2,7 @@
 """Pool implementation abstract factory, and alias definitions."""
 """Pool implementation abstract factory, and alias definitions."""
 # Import from kombu directly as it's used
 # Import from kombu directly as it's used
 # early in the import stage, where celery.utils loads
 # early in the import stage, where celery.utils loads
-# too much (e.g. for eventlet patching)
+# too much (e.g., for eventlet patching)
 from kombu.utils.imports import symbol_by_name
 from kombu.utils.imports import symbol_by_name
 
 
 __all__ = ['get_implementation']
 __all__ = ['get_implementation']
@@ -17,4 +17,5 @@ ALIASES = {
 
 
 
 
 def get_implementation(cls):
 def get_implementation(cls):
+    """Return pool implementation by name."""
     return symbol_by_name(cls, ALIASES)
     return symbol_by_name(cls, ALIASES)

+ 72 - 62
celery/concurrency/asynpool.py

@@ -34,7 +34,7 @@ from billiard.pool import RUN, TERMINATE, ACK, NACK, WorkersJoined
 from billiard import pool as _pool
 from billiard import pool as _pool
 from billiard.compat import buf_t, setblocking, isblocking
 from billiard.compat import buf_t, setblocking, isblocking
 from billiard.queues import _SimpleQueue
 from billiard.queues import _SimpleQueue
-from kombu.async import READ, WRITE, ERR
+from kombu.async import WRITE, ERR
 from kombu.serialization import pickle as _pickle
 from kombu.serialization import pickle as _pickle
 from kombu.utils.eventio import SELECT_BAD_FD
 from kombu.utils.eventio import SELECT_BAD_FD
 from kombu.utils.functional import fxrange
 from kombu.utils.functional import fxrange
@@ -44,6 +44,9 @@ from celery.utils.functional import noop
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
 from celery.worker import state as worker_state
 from celery.worker import state as worker_state
 
 
+# pylint: disable=redefined-outer-name
+# We cache globals and attribute lookups, so disable this warning.
+
 try:
 try:
     from _billiard import read as __read__
     from _billiard import read as __read__
     from struct import unpack_from as _unpack_from
     from struct import unpack_from as _unpack_from
@@ -64,6 +67,7 @@ except (ImportError, NameError):  # pragma: no cover
     def unpack_from(fmt, iobuf, unpack=struct.unpack):  # noqa
     def unpack_from(fmt, iobuf, unpack=struct.unpack):  # noqa
         return unpack(fmt, iobuf.getvalue())  # <-- BytesIO
         return unpack(fmt, iobuf.getvalue())  # <-- BytesIO
 
 
+__all__ = ['AsynPool']
 
 
 logger = get_logger(__name__)
 logger = get_logger(__name__)
 error, debug = logger.error, logger.debug
 error, debug = logger.error, logger.debug
@@ -73,21 +77,25 @@ UNAVAIL = frozenset({errno.EAGAIN, errno.EINTR})
 #: Constant sent by child process when started (ready to accept work)
 #: Constant sent by child process when started (ready to accept work)
 WORKER_UP = 15
 WORKER_UP = 15
 
 
-#: A process must have started before this timeout (in secs.) expires.
+#: A process must've started before this timeout (in secs.) expires.
 PROC_ALIVE_TIMEOUT = 4.0
 PROC_ALIVE_TIMEOUT = 4.0
 
 
-SCHED_STRATEGY_PREFETCH = 1
+SCHED_STRATEGY_FCFS = 1
 SCHED_STRATEGY_FAIR = 4
 SCHED_STRATEGY_FAIR = 4
 
 
 SCHED_STRATEGIES = {
 SCHED_STRATEGIES = {
-    None: SCHED_STRATEGY_PREFETCH,
+    None: SCHED_STRATEGY_FAIR,
+    'fast': SCHED_STRATEGY_FCFS,
+    'fcfs': SCHED_STRATEGY_FCFS,
     'fair': SCHED_STRATEGY_FAIR,
     'fair': SCHED_STRATEGY_FAIR,
 }
 }
+SCHED_STRATEGY_TO_NAME = {v: k for k, v in SCHED_STRATEGIES.items()}
 
 
 Ack = namedtuple('Ack', ('id', 'fd', 'payload'))
 Ack = namedtuple('Ack', ('id', 'fd', 'payload'))
 
 
 
 
 def gen_not_started(gen):
 def gen_not_started(gen):
+    """Return true if generator is not started."""
     # gi_frame is None when generator stopped.
     # gi_frame is None when generator stopped.
     return gen.gi_frame and gen.gi_frame.f_lasti == -1
     return gen.gi_frame and gen.gi_frame.f_lasti == -1
 
 
@@ -138,8 +146,7 @@ else:
 
 
 def _select(readers=None, writers=None, err=None, timeout=0,
 def _select(readers=None, writers=None, err=None, timeout=0,
             poll=_select_imp):
             poll=_select_imp):
-    """Simple wrapper to :class:`~select.select`, using :`~select.poll`
-    as the implementation.
+    """Simple wrapper to :class:`~select.select`, using :`~select.poll`.
 
 
     Arguments:
     Arguments:
         readers (Set[Fd]): Set of reader fds to test if readable.
         readers (Set[Fd]): Set of reader fds to test if readable.
@@ -154,7 +161,7 @@ def _select(readers=None, writers=None, err=None, timeout=0,
     Returns:
     Returns:
         Tuple[Set, Set, Set]: of ``(readable, writable, again)``, where
         Tuple[Set, Set, Set]: of ``(readable, writable, again)``, where
         ``readable`` is a set of fds that have data available for read,
         ``readable`` is a set of fds that have data available for read,
-        ``writable`` is a set of fds that is ready to be written to
+        ``writable`` is a set of fds that's ready to be written to
         and ``again`` is a flag that if set means the caller must
         and ``again`` is a flag that if set means the caller must
         throw away the result and call us again.
         throw away the result and call us again.
     """
     """
@@ -255,8 +262,7 @@ class ResultHandler(_pool.ResultHandler):
             callback(message)
             callback(message)
 
 
     def _make_process_result(self, hub):
     def _make_process_result(self, hub):
-        """Coroutine that reads messages from the pool processes
-        and calls the appropriate handler."""
+        """Coroutine reading messages from the pool processes."""
         fileno_to_outq = self.fileno_to_outq
         fileno_to_outq = self.fileno_to_outq
         on_state_change = self.on_state_change
         on_state_change = self.on_state_change
         add_reader = hub.add_reader
         add_reader = hub.add_reader
@@ -282,19 +288,20 @@ class ResultHandler(_pool.ResultHandler):
     def register_with_event_loop(self, hub):
     def register_with_event_loop(self, hub):
         self.handle_event = self._make_process_result(hub)
         self.handle_event = self._make_process_result(hub)
 
 
-    def handle_event(self, fileno):
+    def handle_event(self, *args):
+        # pylint: disable=method-hidden
+        #   register_with_event_loop overrides this
         raise RuntimeError('Not registered with event loop')
         raise RuntimeError('Not registered with event loop')
 
 
     def on_stop_not_started(self):
     def on_stop_not_started(self):
-        """This method is always used to stop when the helper thread is not
-        started."""
+        # This is always used, since we do not start any threads.
         cache = self.cache
         cache = self.cache
         check_timeouts = self.check_timeouts
         check_timeouts = self.check_timeouts
         fileno_to_outq = self.fileno_to_outq
         fileno_to_outq = self.fileno_to_outq
         on_state_change = self.on_state_change
         on_state_change = self.on_state_change
         join_exited_workers = self.join_exited_workers
         join_exited_workers = self.join_exited_workers
 
 
-        # flush the processes outqueues until they have all terminated.
+        # flush the processes outqueues until they've all terminated.
         outqueues = set(fileno_to_outq)
         outqueues = set(fileno_to_outq)
         while cache and outqueues and self._state != TERMINATE:
         while cache and outqueues and self._state != TERMINATE:
             if check_timeouts is not None:
             if check_timeouts is not None:
@@ -318,7 +325,7 @@ class ResultHandler(_pool.ResultHandler):
             proc = process_index[fd]
             proc = process_index[fd]
         except KeyError:
         except KeyError:
             # process already found terminated
             # process already found terminated
-            # which means its outqueue has already been processed
+            # this means its outqueue has already been processed
             # by the worker lost handler.
             # by the worker lost handler.
             return remove(fd)
             return remove(fd)
 
 
@@ -346,7 +353,8 @@ class ResultHandler(_pool.ResultHandler):
 
 
 
 
 class AsynPool(_pool.Pool):
 class AsynPool(_pool.Pool):
-    """Pool version that uses AIO instead of helper threads."""
+    """AsyncIO Pool (no threads)."""
+
     ResultHandler = ResultHandler
     ResultHandler = ResultHandler
     Worker = Worker
     Worker = Worker
 
 
@@ -373,7 +381,7 @@ class AsynPool(_pool.Pool):
         # synqueue fileno -> process mapping
         # synqueue fileno -> process mapping
         self._fileno_to_synq = {}
         self._fileno_to_synq = {}
 
 
-        # We keep track of processes that have not yet
+        # We keep track of processes that haven't yet
         # sent a WORKER_UP message.  If a process fails to send
         # sent a WORKER_UP message.  If a process fails to send
         # this message within proc_up_timeout we terminate it
         # this message within proc_up_timeout we terminate it
         # and hope the next process will recover.
         # and hope the next process will recover.
@@ -440,7 +448,7 @@ class AsynPool(_pool.Pool):
             os.close(fd)
             os.close(fd)
 
 
     def register_with_event_loop(self, hub):
     def register_with_event_loop(self, hub):
-        """Registers the async pool with the current event loop."""
+        """Register the async pool with the current event loop."""
         self._result_handler.register_with_event_loop(hub)
         self._result_handler.register_with_event_loop(hub)
         self.handle_result_event = self._result_handler.handle_event
         self.handle_result_event = self._result_handler.handle_event
         self._create_timelimit_handlers(hub)
         self._create_timelimit_handlers(hub)
@@ -461,9 +469,8 @@ class AsynPool(_pool.Pool):
 
 
         hub.on_tick.add(self.on_poll_start)
         hub.on_tick.add(self.on_poll_start)
 
 
-    def _create_timelimit_handlers(self, hub, now=time.time):
-        """For async pool this sets up the handlers used
-        to implement time limits."""
+    def _create_timelimit_handlers(self, hub):
+        """Create handlers used to implement time limits."""
         call_later = hub.call_later
         call_later = hub.call_later
         trefs = self._tref_for_id = WeakValueDictionary()
         trefs = self._tref_for_id = WeakValueDictionary()
 
 
@@ -482,7 +489,7 @@ class AsynPool(_pool.Pool):
             try:
             try:
                 tref = trefs.pop(job)
                 tref = trefs.pop(job)
                 tref.cancel()
                 tref.cancel()
-                del(tref)
+                del tref
             except (KeyError, AttributeError):
             except (KeyError, AttributeError):
                 pass  # out of scope
                 pass  # out of scope
         self._discard_tref = _discard_tref
         self._discard_tref = _discard_tref
@@ -523,9 +530,8 @@ class AsynPool(_pool.Pool):
     def on_job_ready(self, job, i, obj, inqW_fd):
     def on_job_ready(self, job, i, obj, inqW_fd):
         self._mark_worker_as_available(inqW_fd)
         self._mark_worker_as_available(inqW_fd)
 
 
-    def _create_process_handlers(self, hub, READ=READ, ERR=ERR):
-        """For async pool this will create the handlers called
-        when a process is up/down and etc."""
+    def _create_process_handlers(self, hub):
+        """Create handlers called on process up/down, etc."""
         add_reader, remove_reader, remove_writer = (
         add_reader, remove_reader, remove_writer = (
             hub.add_reader, hub.remove_reader, hub.remove_writer,
             hub.add_reader, hub.remove_reader, hub.remove_writer,
         )
         )
@@ -551,7 +557,7 @@ class AsynPool(_pool.Pool):
 
 
         def on_process_up(proc):
         def on_process_up(proc):
             """Called when a process has started."""
             """Called when a process has started."""
-            # If we got the same fd as a previous process then we will also
+            # If we got the same fd as a previous process then we'll also
             # receive jobs in the old buffer, so we need to reset the
             # receive jobs in the old buffer, so we need to reset the
             # job._write_to and job._scheduled_for attributes used to recover
             # job._write_to and job._scheduled_for attributes used to recover
             # message boundaries when processes exit.
             # message boundaries when processes exit.
@@ -590,7 +596,7 @@ class AsynPool(_pool.Pool):
 
 
             try:
             try:
                 if index[fd] is proc:
                 if index[fd] is proc:
-                    # fd has not been reused so we can remove it from index.
+                    # fd hasn't been reused so we can remove it from index.
                     index.pop(fd, None)
                     index.pop(fd, None)
             except KeyError:
             except KeyError:
                 pass
                 pass
@@ -633,8 +639,7 @@ class AsynPool(_pool.Pool):
     def _create_write_handlers(self, hub,
     def _create_write_handlers(self, hub,
                                pack=struct.pack, dumps=_pickle.dumps,
                                pack=struct.pack, dumps=_pickle.dumps,
                                protocol=HIGHEST_PROTOCOL):
                                protocol=HIGHEST_PROTOCOL):
-        """For async pool this creates the handlers used to write data to
-        child processes."""
+        """Create handlers used to write data to child processes."""
         fileno_to_inq = self._fileno_to_inq
         fileno_to_inq = self._fileno_to_inq
         fileno_to_synq = self._fileno_to_synq
         fileno_to_synq = self._fileno_to_synq
         outbound = self.outbound_buffer
         outbound = self.outbound_buffer
@@ -731,11 +736,11 @@ class AsynPool(_pool.Pool):
             # with many processes, and also leans more towards fairness
             # with many processes, and also leans more towards fairness
             # in write stats when used with many processes
             # in write stats when used with many processes
             # [XXX On macOS, this may vary depending
             # [XXX On macOS, this may vary depending
-            # on event loop implementation (i.e select vs epoll), so
+            # on event loop implementation (i.e, select/poll vs epoll), so
             # have to test further]
             # have to test further]
             num_ready = len(ready_fds)
             num_ready = len(ready_fds)
 
 
-            for i in range(num_ready):
+            for _ in range(num_ready):
                 ready_fd = ready_fds[total_write_count[0] % num_ready]
                 ready_fd = ready_fds[total_write_count[0] % num_ready]
                 total_write_count[0] += 1
                 total_write_count[0] += 1
                 if ready_fd in active_writes:
                 if ready_fd in active_writes:
@@ -802,8 +807,8 @@ class AsynPool(_pool.Pool):
         self._quick_put = send_job
         self._quick_put = send_job
 
 
         def on_not_recovering(proc, fd, job, exc):
         def on_not_recovering(proc, fd, job, exc):
-            error('Process inqueue damaged: %r %r: %r',
-                  proc, proc.exitcode, exc, exc_info=1)
+            logger.exception(
+                'Process inqueue damaged: %r %r: %r', proc, proc.exitcode, exc)
             if proc._is_alive():
             if proc._is_alive():
                 proc.terminate()
                 proc.terminate()
             hub.remove(fd)
             hub.remove(fd)
@@ -858,7 +863,7 @@ class AsynPool(_pool.Pool):
                 active_writes.discard(fd)
                 active_writes.discard(fd)
                 write_generator_done(job._writer())  # is a weakref
                 write_generator_done(job._writer())  # is a weakref
 
 
-        def send_ack(response, pid, job, fd, WRITE=WRITE, ERR=ERR):
+        def send_ack(response, pid, job, fd):
             # Only used when synack is enabled.
             # Only used when synack is enabled.
             # Schedule writing ack response for when the fd is writable.
             # Schedule writing ack response for when the fd is writable.
             msg = Ack(job, fd, precalc[response])
             msg = Ack(job, fd, precalc[response])
@@ -908,7 +913,7 @@ class AsynPool(_pool.Pool):
     def flush(self):
     def flush(self):
         if self._state == TERMINATE:
         if self._state == TERMINATE:
             return
             return
-        # cancel all tasks that have not been accepted so that NACK is sent.
+        # cancel all tasks that haven't been accepted so that NACK is sent.
         for job in self._cache.values():
         for job in self._cache.values():
             if not job._accepted:
             if not job._accepted:
                 job._cancel()
                 job._cancel()
@@ -938,7 +943,7 @@ class AsynPool(_pool.Pool):
                     for gen in writers:
                     for gen in writers:
                         if (gen.__name__ == '_write_job' and
                         if (gen.__name__ == '_write_job' and
                                 gen_not_started(gen)):
                                 gen_not_started(gen)):
-                            # has not started writing the job so can
+                            # hasn't started writing the job so can
                             # discard the task, but we must also remove
                             # discard the task, but we must also remove
                             # it from the Pool._cache.
                             # it from the Pool._cache.
                             try:
                             try:
@@ -987,7 +992,7 @@ class AsynPool(_pool.Pool):
     def get_process_queues(self):
     def get_process_queues(self):
         """Get queues for a new process.
         """Get queues for a new process.
 
 
-        Here we will find an unused slot, as there should always
+        Here we'll find an unused slot, as there should always
         be one available when we start a new process.
         be one available when we start a new process.
         """
         """
         return next(q for q, owner in self._queues.items()
         return next(q for q, owner in self._queues.items()
@@ -1006,11 +1011,10 @@ class AsynPool(_pool.Pool):
         pass
         pass
 
 
     def create_process_queues(self):
     def create_process_queues(self):
-        """Creates new in, out (and optionally syn) queues,
-        returned as a tuple."""
+        """Create new in, out, etc. queues, returned as a tuple."""
         # NOTE: Pipes must be set O_NONBLOCK at creation time (the original
         # NOTE: Pipes must be set O_NONBLOCK at creation time (the original
-        # fd), otherwise it will not be possible to change the flags until
-        # there is an actual reader/writer on the other side.
+        # fd), otherwise it won't be possible to change the flags until
+        # there's an actual reader/writer on the other side.
         inq = _SimpleQueue(wnonblock=True)
         inq = _SimpleQueue(wnonblock=True)
         outq = _SimpleQueue(rnonblock=True)
         outq = _SimpleQueue(rnonblock=True)
         synq = None
         synq = None
@@ -1025,9 +1029,10 @@ class AsynPool(_pool.Pool):
         return inq, outq, synq
         return inq, outq, synq
 
 
     def on_process_alive(self, pid):
     def on_process_alive(self, pid):
-        """Handler called when the :const:`WORKER_UP` message is received
-        from a child process, which marks the process as ready
-        to receive work."""
+        """Called when reciving the :const:`WORKER_UP` message.
+
+        Marks the process as ready to receive work.
+        """
         try:
         try:
             proc = next(w for w in self._pool if w.pid == pid)
             proc = next(w for w in self._pool if w.pid == pid)
         except StopIteration:
         except StopIteration:
@@ -1040,8 +1045,7 @@ class AsynPool(_pool.Pool):
         self._all_inqueues.add(proc.inqW_fd)
         self._all_inqueues.add(proc.inqW_fd)
 
 
     def on_job_process_down(self, job, pid_gone):
     def on_job_process_down(self, job, pid_gone):
-        """Handler called for each job when the process it was assigned to
-        exits."""
+        """Called for each job when the process assigned to it exits."""
         if job._write_to and not job._write_to._is_alive():
         if job._write_to and not job._write_to._is_alive():
             # job was partially written
             # job was partially written
             self.on_partial_read(job, job._write_to)
             self.on_partial_read(job, job._write_to)
@@ -1051,9 +1055,12 @@ class AsynPool(_pool.Pool):
             self._put_back(job)
             self._put_back(job)
 
 
     def on_job_process_lost(self, job, pid, exitcode):
     def on_job_process_lost(self, job, pid, exitcode):
-        """Handler called for each *started* job when the process it
+        """Called when the process executing job' exits.
+
+        This happens when the process job'
         was assigned to exited by mysterious means (error exitcodes and
         was assigned to exited by mysterious means (error exitcodes and
-        signals)"""
+        signals).
+        """
         self.mark_as_worker_lost(job, exitcode)
         self.mark_as_worker_lost(job, exitcode)
 
 
     def human_write_stats(self):
     def human_write_stats(self):
@@ -1070,6 +1077,7 @@ class AsynPool(_pool.Pool):
             'avg': per(total / len(self.write_stats) if total else 0, total),
             'avg': per(total / len(self.write_stats) if total else 0, total),
             'all': ', '.join(per(v, total) for v in vals),
             'all': ', '.join(per(v, total) for v in vals),
             'raw': ', '.join(map(str, vals)),
             'raw': ', '.join(map(str, vals)),
+            'strategy': SCHED_STRATEGY_TO_NAME[self.sched_strategy],
             'inqueues': {
             'inqueues': {
                 'total': len(self._all_inqueues),
                 'total': len(self._all_inqueues),
                 'active': len(self._active_writes),
                 'active': len(self._active_writes),
@@ -1077,8 +1085,7 @@ class AsynPool(_pool.Pool):
         }
         }
 
 
     def _process_cleanup_queues(self, proc):
     def _process_cleanup_queues(self, proc):
-        """Handler called to clean up a processes queues after process
-        exit."""
+        """Called to clean up queues after process exit."""
         if not proc.dead:
         if not proc.dead:
             try:
             try:
                 self._queues[self._find_worker_queues(proc)] = None
                 self._queues[self._find_worker_queues(proc)] = None
@@ -1087,7 +1094,7 @@ class AsynPool(_pool.Pool):
 
 
     @staticmethod
     @staticmethod
     def _stop_task_handler(task_handler):
     def _stop_task_handler(task_handler):
-        """Called at shutdown to tell processes that we are shutting down."""
+        """Called at shutdown to tell processes that we're shutting down."""
         for proc in task_handler.pool:
         for proc in task_handler.pool:
             try:
             try:
                 setblocking(proc.inq._writer, 1)
                 setblocking(proc.inq._writer, 1)
@@ -1107,8 +1114,7 @@ class AsynPool(_pool.Pool):
         )
         )
 
 
     def _process_register_queues(self, proc, queues):
     def _process_register_queues(self, proc, queues):
-        """Marks new ownership for ``queues`` so that the fileno indices are
-        updated."""
+        """Mark new ownership for ``queues`` to update fileno indices."""
         assert queues in self._queues
         assert queues in self._queues
         b = len(self._queues)
         b = len(self._queues)
         self._queues[queues] = proc
         self._queues[queues] = proc
@@ -1123,17 +1129,19 @@ class AsynPool(_pool.Pool):
             raise ValueError(proc)
             raise ValueError(proc)
 
 
     def _setup_queues(self):
     def _setup_queues(self):
-        # this is only used by the original pool which uses a shared
+        # this is only used by the original pool that used a shared
         # queue for all processes.
         # queue for all processes.
 
 
-        # these attributes makes no sense for us, but we will still
+        # these attributes makes no sense for us, but we'll still
         # have to initialize them.
         # have to initialize them.
         self._inqueue = self._outqueue = \
         self._inqueue = self._outqueue = \
             self._quick_put = self._quick_get = self._poll_result = None
             self._quick_put = self._quick_get = self._poll_result = None
 
 
     def process_flush_queues(self, proc):
     def process_flush_queues(self, proc):
-        """Flushes all queues, including the outbound buffer, so that
-        all tasks that have not been started will be discarded.
+        """Flush all queues.
+
+        Including the outbound buffer, so that
+        all tasks that haven't been started will be discarded.
 
 
         In Celery this is called whenever the transport connection is lost
         In Celery this is called whenever the transport connection is lost
         (consumer restart), and when a process is terminated.
         (consumer restart), and when a process is terminated.
@@ -1142,7 +1150,7 @@ class AsynPool(_pool.Pool):
         on_state_change = self._result_handler.on_state_change
         on_state_change = self._result_handler.on_state_change
         fds = {resq}
         fds = {resq}
         while fds and not resq.closed and self._state != TERMINATE:
         while fds and not resq.closed and self._state != TERMINATE:
-            readable, _, again = _select(fds, None, fds, timeout=0.01)
+            readable, _, _ = _select(fds, None, fds, timeout=0.01)
             if readable:
             if readable:
                 try:
                 try:
                     task = resq.recv()
                     task = resq.recv()
@@ -1166,8 +1174,7 @@ class AsynPool(_pool.Pool):
                 break
                 break
 
 
     def on_partial_read(self, job, proc):
     def on_partial_read(self, job, proc):
-        """Called when a job was only partially written to a child process
-        and it exited."""
+        """Called when a job was partially written to exited child."""
         # worker terminated by signal:
         # worker terminated by signal:
         # we cannot reuse the sockets again, because we don't know if
         # we cannot reuse the sockets again, because we don't know if
         # the process wrote/read anything frmo them, and if so we cannot
         # the process wrote/read anything frmo them, and if so we cannot
@@ -1178,7 +1185,7 @@ class AsynPool(_pool.Pool):
         writer = _get_job_writer(job)
         writer = _get_job_writer(job)
         if writer:
         if writer:
             self._active_writers.discard(writer)
             self._active_writers.discard(writer)
-            del(writer)
+            del writer
 
 
         if not proc.dead:
         if not proc.dead:
             proc.dead = True
             proc.dead = True
@@ -1193,8 +1200,10 @@ class AsynPool(_pool.Pool):
             assert len(self._queues) == before
             assert len(self._queues) == before
 
 
     def destroy_queues(self, queues, proc):
     def destroy_queues(self, queues, proc):
-        """Destroy queues that can no longer be used, so that they
-        be replaced by new sockets."""
+        """Destroy queues that can no longer be used.
+
+        This way they can be replaced by new usable sockets.
+        """
         assert not proc._is_alive()
         assert not proc._is_alive()
         self._waiting_to_start.discard(proc)
         self._waiting_to_start.discard(proc)
         removed = 1
         removed = 1
@@ -1236,6 +1245,7 @@ class AsynPool(_pool.Pool):
 
 
     @classmethod
     @classmethod
     def _help_stuff_finish(cls, pool):
     def _help_stuff_finish(cls, pool):
+        # pylint: disable=arguments-differ
         debug(
         debug(
             'removing tasks from inqueue until task handler finished',
             'removing tasks from inqueue until task handler finished',
         )
         )

+ 4 - 1
celery/concurrency/base.py

@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
-"""Base Execution Pool"""
+"""Base Execution Pool."""
 import logging
 import logging
 import os
 import os
 import sys
 import sys
@@ -23,6 +23,7 @@ logger = get_logger('celery.pool')
 def apply_target(target, args=(), kwargs={}, callback=None,
 def apply_target(target, args=(), kwargs={}, callback=None,
                  accept_callback=None, pid=None, getpid=os.getpid,
                  accept_callback=None, pid=None, getpid=os.getpid,
                  propagate=(), monotonic=monotonic, **_):
                  propagate=(), monotonic=monotonic, **_):
+    """Apply function within pool context."""
     if accept_callback:
     if accept_callback:
         accept_callback(pid or getpid(), monotonic())
         accept_callback(pid or getpid(), monotonic())
     try:
     try:
@@ -43,6 +44,8 @@ def apply_target(target, args=(), kwargs={}, callback=None,
 
 
 
 
 class BasePool:
 class BasePool:
+    """Task pool."""
+
     RUN = 0x1
     RUN = 0x1
     CLOSE = 0x2
     CLOSE = 0x2
     TERMINATE = 0x3
     TERMINATE = 0x3

+ 8 - 4
celery/concurrency/eventlet.py

@@ -1,8 +1,7 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
 """Eventlet execution pool."""
 """Eventlet execution pool."""
 import sys
 import sys
-
-from time import time
+from time import monotonic
 
 
 __all__ = ['TaskPool']
 __all__ = ['TaskPool']
 
 
@@ -36,6 +35,7 @@ def apply_target(target, args=(), kwargs={}, callback=None,
 
 
 
 
 class Timer(_timer.Timer):
 class Timer(_timer.Timer):
+    """Eventlet Timer."""
 
 
     def __init__(self, *args, **kwargs):
     def __init__(self, *args, **kwargs):
         from eventlet.greenthread import spawn_after
         from eventlet.greenthread import spawn_after
@@ -46,8 +46,8 @@ class Timer(_timer.Timer):
         self._spawn_after = spawn_after
         self._spawn_after = spawn_after
         self._queue = set()
         self._queue = set()
 
 
-    def _enter(self, eta, priority, entry):
-        secs = max(eta - time(), 0)
+    def _enter(self, eta, priority, entry, **kwargs):
+        secs = max(eta - monotonic(), 0)
         g = self._spawn_after(secs, entry)
         g = self._spawn_after(secs, entry)
         self._queue.add(g)
         self._queue.add(g)
         g.link(self._entry_exit, entry)
         g.link(self._entry_exit, entry)
@@ -87,11 +87,15 @@ class Timer(_timer.Timer):
 
 
 
 
 class TaskPool(base.BasePool):
 class TaskPool(base.BasePool):
+    """Eventlet Task Pool."""
+
     Timer = Timer
     Timer = Timer
 
 
     signal_safe = False
     signal_safe = False
     is_green = True
     is_green = True
     task_join_will_block = False
     task_join_will_block = False
+    _pool = None
+    _quick_put = None
 
 
     def __init__(self, *args, **kwargs):
     def __init__(self, *args, **kwargs):
         from eventlet import greenthread
         from eventlet import greenthread

+ 15 - 10
celery/concurrency/gevent.py

@@ -1,23 +1,24 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
 """Gevent execution pool."""
 """Gevent execution pool."""
-from time import time
+from kombu.async import timer as _timer
+from time import monotonic
+from . import base
 
 
 try:
 try:
     from gevent import Timeout
     from gevent import Timeout
 except ImportError:  # pragma: no cover
 except ImportError:  # pragma: no cover
     Timeout = None  # noqa
     Timeout = None  # noqa
 
 
-from kombu.async import timer as _timer
-
-from .base import apply_target, BasePool
-
 __all__ = ['TaskPool']
 __all__ = ['TaskPool']
 
 
+# pylint: disable=redefined-outer-name
+# We cache globals and attribute lookups, so disable this warning.
+
 
 
 def apply_timeout(target, args=(), kwargs={}, callback=None,
 def apply_timeout(target, args=(), kwargs={}, callback=None,
                   accept_callback=None, pid=None, timeout=None,
                   accept_callback=None, pid=None, timeout=None,
                   timeout_callback=None, Timeout=Timeout,
                   timeout_callback=None, Timeout=Timeout,
-                  apply_target=apply_target, **rest):
+                  apply_target=base.apply_target, **rest):
     try:
     try:
         with Timeout(timeout):
         with Timeout(timeout):
             return apply_target(target, args, kwargs, callback,
             return apply_target(target, args, kwargs, callback,
@@ -40,8 +41,8 @@ class Timer(_timer.Timer):
         super().__init__(*args, **kwargs)
         super().__init__(*args, **kwargs)
         self._queue = set()
         self._queue = set()
 
 
-    def _enter(self, eta, priority, entry):
-        secs = max(eta - time(), 0)
+    def _enter(self, eta, priority, entry, **kwargs):
+        secs = max(eta - monotonic(), 0)
         g = self._Greenlet.spawn_later(secs, entry)
         g = self._Greenlet.spawn_later(secs, entry)
         self._queue.add(g)
         self._queue.add(g)
         g.link(self._entry_exit)
         g.link(self._entry_exit)
@@ -70,12 +71,16 @@ class Timer(_timer.Timer):
         return self._queue
         return self._queue
 
 
 
 
-class TaskPool(BasePool):
+class TaskPool(base.BasePool):
+    """GEvent Pool."""
+
     Timer = Timer
     Timer = Timer
 
 
     signal_safe = False
     signal_safe = False
     is_green = True
     is_green = True
     task_join_will_block = False
     task_join_will_block = False
+    _pool = None
+    _quick_put = None
 
 
     def __init__(self, *args, **kwargs):
     def __init__(self, *args, **kwargs):
         from gevent import spawn_raw
         from gevent import spawn_raw
@@ -95,7 +100,7 @@ class TaskPool(BasePool):
 
 
     def on_apply(self, target, args=None, kwargs=None, callback=None,
     def on_apply(self, target, args=None, kwargs=None, callback=None,
                  accept_callback=None, timeout=None,
                  accept_callback=None, timeout=None,
-                 timeout_callback=None, **_):
+                 timeout_callback=None, apply_target=base.apply_target, **_):
         timeout = self.timeout if timeout is None else timeout
         timeout = self.timeout if timeout is None else timeout
         return self._quick_put(apply_timeout if timeout else apply_target,
         return self._quick_put(apply_timeout if timeout else apply_target,
                                target, args, kwargs, callback, accept_callback,
                                target, args, kwargs, callback, accept_callback,

+ 3 - 2
celery/concurrency/prefork.py

@@ -47,7 +47,7 @@ def process_initializer(app, hostname):
     platforms.signals.ignore(*WORKER_SIGIGNORE)
     platforms.signals.ignore(*WORKER_SIGIGNORE)
     platforms.set_mp_process_title('celeryd', hostname=hostname)
     platforms.set_mp_process_title('celeryd', hostname=hostname)
     # This is for Windows and other platforms not supporting
     # This is for Windows and other platforms not supporting
-    # fork(). Note that init_worker makes sure it's only
+    # fork().  Note that init_worker makes sure it's only
     # run once per process.
     # run once per process.
     app.loader.init_worker()
     app.loader.init_worker()
     app.loader.init_worker_process()
     app.loader.init_worker_process()
@@ -79,7 +79,7 @@ def process_initializer(app, hostname):
 
 
 
 
 def process_destructor(pid, exitcode):
 def process_destructor(pid, exitcode):
-    """Pool child process destructor
+    """Pool child process destructor.
 
 
     Dispatch the :signal:`worker_process_shutdown` signal.
     Dispatch the :signal:`worker_process_shutdown` signal.
     """
     """
@@ -90,6 +90,7 @@ def process_destructor(pid, exitcode):
 
 
 class TaskPool(BasePool):
 class TaskPool(BasePool):
     """Multiprocessing Pool implementation."""
     """Multiprocessing Pool implementation."""
+
     Pool = AsynPool
     Pool = AsynPool
     BlockingPool = BlockingPool
     BlockingPool = BlockingPool
 
 

+ 8 - 5
celery/concurrency/solo.py

@@ -9,6 +9,7 @@ __all__ = ['TaskPool']
 
 
 class TaskPool(BasePool):
 class TaskPool(BasePool):
     """Solo task pool (blocking, inline, fast)."""
     """Solo task pool (blocking, inline, fast)."""
+
     body_can_be_buffer = True
     body_can_be_buffer = True
 
 
     def __init__(self, *args, **kwargs):
     def __init__(self, *args, **kwargs):
@@ -17,8 +18,10 @@ class TaskPool(BasePool):
         self.limit = 1
         self.limit = 1
 
 
     def _get_info(self):
     def _get_info(self):
-        return {'max-concurrency': 1,
-                'processes': [os.getpid()],
-                'max-tasks-per-child': None,
-                'put-guarded-by-semaphore': True,
-                'timeouts': ()}
+        return {
+            'max-concurrency': 1,
+            'processes': [os.getpid()],
+            'max-tasks-per-child': None,
+            'put-guarded-by-semaphore': True,
+            'timeouts': (),
+        }

+ 13 - 8
celery/contrib/abortable.py

@@ -5,7 +5,7 @@ Abortable tasks overview
 =========================
 =========================
 
 
 For long-running :class:`Task`'s, it can be desirable to support
 For long-running :class:`Task`'s, it can be desirable to support
-aborting during execution. Of course, these tasks should be built to
+aborting during execution.  Of course, these tasks should be built to
 support abortion specifically.
 support abortion specifically.
 
 
 The :class:`AbortableTask` serves as a base class for all :class:`Task`
 The :class:`AbortableTask` serves as a base class for all :class:`Task`
@@ -16,7 +16,7 @@ objects that should support abortion by producers.
 
 
 * Consumers (workers) should periodically check (and honor!) the
 * Consumers (workers) should periodically check (and honor!) the
   :meth:`is_aborted` method at controlled points in their task's
   :meth:`is_aborted` method at controlled points in their task's
-  :meth:`run` method. The more often, the better.
+  :meth:`run` method.  The more often, the better.
 
 
 The necessary intermediate communication is dealt with by the
 The necessary intermediate communication is dealt with by the
 :class:`AbortableTask` implementation.
 :class:`AbortableTask` implementation.
@@ -67,9 +67,9 @@ In the producer:
         time.sleep(10)
         time.sleep(10)
         result.abort()
         result.abort()
 
 
-After the `result.abort()` call, the task execution is not
-aborted immediately. In fact, it is not guaranteed to abort at all. Keep
-checking `result.state` status, or call `result.get(timeout=)` to
+After the `result.abort()` call, the task execution isn't
+aborted immediately.  In fact, it's not guaranteed to abort at all.
+Keep checking `result.state` status, or call `result.get(timeout=)` to
 have it block until the task is finished.
 have it block until the task is finished.
 
 
 .. note::
 .. note::
@@ -105,7 +105,7 @@ class AbortableAsyncResult(AsyncResult):
     """Represents a abortable result.
     """Represents a abortable result.
 
 
     Specifically, this gives the `AsyncResult` a :meth:`abort()` method,
     Specifically, this gives the `AsyncResult` a :meth:`abort()` method,
-    which sets the state of the underlying Task to `'ABORTED'`.
+    that sets the state of the underlying Task to `'ABORTED'`.
     """
     """
 
 
     def is_aborted(self):
     def is_aborted(self):
@@ -129,13 +129,16 @@ class AbortableAsyncResult(AsyncResult):
 
 
 
 
 class AbortableTask(Task):
 class AbortableTask(Task):
-    """A celery task that serves as a base class for all :class:`Task`'s
+    """Task that can be aborted.
+
+    This serves as a base class for all :class:`Task`'s
     that support aborting during execution.
     that support aborting during execution.
 
 
     All subclasses of :class:`AbortableTask` must call the
     All subclasses of :class:`AbortableTask` must call the
     :meth:`is_aborted` method periodically and act accordingly when
     :meth:`is_aborted` method periodically and act accordingly when
     the call evaluates to :const:`True`.
     the call evaluates to :const:`True`.
     """
     """
+
     abstract = True
     abstract = True
 
 
     def AsyncResult(self, task_id):
     def AsyncResult(self, task_id):
@@ -143,7 +146,9 @@ class AbortableTask(Task):
         return AbortableAsyncResult(task_id, backend=self.backend)
         return AbortableAsyncResult(task_id, backend=self.backend)
 
 
     def is_aborted(self, **kwargs):
     def is_aborted(self, **kwargs):
-        """Checks against the backend whether this
+        """Return true if task is aborted.
+
+        Checks against the backend whether this
         :class:`AbortableAsyncResult` is :const:`ABORTED`.
         :class:`AbortableAsyncResult` is :const:`ABORTED`.
 
 
         Always return :const:`False` in case the `task_id` parameter
         Always return :const:`False` in case the `task_id` parameter

+ 120 - 64
celery/contrib/migrate.py

@@ -11,6 +11,7 @@ from kombu.utils.encoding import ensure_bytes
 
 
 from celery.app import app_or_default
 from celery.app import app_or_default
 from celery.utils.nodenames import worker_direct
 from celery.utils.nodenames import worker_direct
+from celery.utils.text import str_to_list
 
 
 __all__ = [
 __all__ = [
     'StopFiltering', 'State', 'republish', 'migrate_task',
     'StopFiltering', 'State', 'republish', 'migrate_task',
@@ -26,10 +27,12 @@ Moving task {state.filtered}/{state.strtotal}: \
 
 
 
 
 class StopFiltering(Exception):
 class StopFiltering(Exception):
-    pass
+    """Semi-predicate used to signal filter stop."""
 
 
 
 
 class State:
 class State:
+    """Migration progress state."""
+
     count = 0
     count = 0
     filtered = 0
     filtered = 0
     total_apx = 0
     total_apx = 0
@@ -51,6 +54,7 @@ def republish(producer, message, exchange=None, routing_key=None,
                             'content_type',
                             'content_type',
                             'content_encoding',
                             'content_encoding',
                             'headers']):
                             'headers']):
+    """Republish message."""
     body = ensure_bytes(message.body)  # use raw message body.
     body = ensure_bytes(message.body)  # use raw message body.
     info, headers, props = (message.delivery_info,
     info, headers, props = (message.delivery_info,
                             message.headers, message.properties)
                             message.headers, message.properties)
@@ -71,6 +75,7 @@ def republish(producer, message, exchange=None, routing_key=None,
 
 
 
 
 def migrate_task(producer, body_, message, queues=None):
 def migrate_task(producer, body_, message, queues=None):
+    """Migrate single task message."""
     info = message.delivery_info
     info = message.delivery_info
     queues = {} if queues is None else queues
     queues = {} if queues is None else queues
     republish(producer, message,
     republish(producer, message,
@@ -90,6 +95,7 @@ def filter_callback(callback, tasks):
 
 
 def migrate_tasks(source, dest, migrate=migrate_task, app=None,
 def migrate_tasks(source, dest, migrate=migrate_task, app=None,
                   queues=None, **kwargs):
                   queues=None, **kwargs):
+    """Migrate tasks from one broker to another."""
     app = app_or_default(app)
     app = app_or_default(app)
     queues = prepare_queues(queues)
     queues = prepare_queues(queues)
     producer = app.amqp.Producer(dest)
     producer = app.amqp.Producer(dest)
@@ -121,20 +127,20 @@ def move(predicate, connection=None, exchange=None, routing_key=None,
     """Find tasks by filtering them and move the tasks to a new queue.
     """Find tasks by filtering them and move the tasks to a new queue.
 
 
     Arguments:
     Arguments:
-        predicate (Callable): Filter function used to decide which messages
+        predicate (Callable): Filter function used to decide the messages
             to move.  Must accept the standard signature of ``(body, message)``
             to move.  Must accept the standard signature of ``(body, message)``
-            used by Kombu consumer callbacks. If the predicate wants the
+            used by Kombu consumer callbacks.  If the predicate wants the
             message to be moved it must return either:
             message to be moved it must return either:
 
 
                 1) a tuple of ``(exchange, routing_key)``, or
                 1) a tuple of ``(exchange, routing_key)``, or
 
 
                 2) a :class:`~kombu.entity.Queue` instance, or
                 2) a :class:`~kombu.entity.Queue` instance, or
 
 
-                3) any other true value which means the specified
+                3) any other true value means the specified
                     ``exchange`` and ``routing_key`` arguments will be used.
                     ``exchange`` and ``routing_key`` arguments will be used.
         connection (kombu.Connection): Custom connection to use.
         connection (kombu.Connection): Custom connection to use.
         source: List[Union[str, kombu.Queue]]: Optional list of source
         source: List[Union[str, kombu.Queue]]: Optional list of source
-            queues to use instead of the default (which is the queues
+            queues to use instead of the default (queues
             in :setting:`task_queues`).  This list can also contain
             in :setting:`task_queues`).  This list can also contain
             :class:`~kombu.entity.Queue` instances.
             :class:`~kombu.entity.Queue` instances.
         exchange (str, kombu.Exchange): Default destination exchange.
         exchange (str, kombu.Exchange): Default destination exchange.
@@ -215,10 +221,12 @@ def expand_dest(ret, exchange, routing_key):
 
 
 
 
 def task_id_eq(task_id, body, message):
 def task_id_eq(task_id, body, message):
+    """Return true if task id equals task_id'."""
     return body['id'] == task_id
     return body['id'] == task_id
 
 
 
 
 def task_id_in(ids, body, message):
 def task_id_in(ids, body, message):
+    """Return true if task id is member of set ids'."""
     return body['id'] in ids
     return body['id'] in ids
 
 
 
 
@@ -233,68 +241,115 @@ def prepare_queues(queues):
     return queues
     return queues
 
 
 
 
-def start_filter(app, conn, filter, limit=None, timeout=1.0,
+class Filterer(object):
+
+    def __init__(self, app, conn, filter,
+                 limit=None, timeout=1.0,
                  ack_messages=False, tasks=None, queues=None,
                  ack_messages=False, tasks=None, queues=None,
                  callback=None, forever=False, on_declare_queue=None,
                  callback=None, forever=False, on_declare_queue=None,
                  consume_from=None, state=None, accept=None, **kwargs):
                  consume_from=None, state=None, accept=None, **kwargs):
-    state = state or State()
-    queues = prepare_queues(queues)
-    consume_from = [_maybe_queue(app, q)
-                    for q in consume_from or list(queues)]
-    if isinstance(tasks, str):
-        tasks = set(tasks.split(','))
-    if tasks is None:
-        tasks = set()
-
-    def update_state(body, message):
-        state.count += 1
-        if limit and state.count >= limit:
+        self.app = app
+        self.conn = conn
+        self.filter = filter
+        self.limit = limit
+        self.timeout = timeout
+        self.ack_messages = ack_messages
+        self.tasks = set(str_to_list(tasks) or [])
+        self.queues = prepare_queues(queues)
+        self.callback = callback
+        self.forever = forever
+        self.on_declare_queue = on_declare_queue
+        self.consume_from = [
+            _maybe_queue(self.app, q)
+            for q in consume_from or list(self.queues)
+        ]
+        self.state = state or State()
+        self.accept = accept
+
+    def start(self):
+        # start migrating messages.
+        with self.prepare_consumer(self.create_consumer()):
+            try:
+                for _ in eventloop(self.conn,  # pragma: no cover
+                                   timeout=self.timeout,
+                                   ignore_timeouts=self.forever):
+                    pass
+            except socket.timeout:
+                pass
+            except StopFiltering:
+                pass
+        return self.state
+
+    def update_state(self, body, message):
+        self.state.count += 1
+        if self.limit and self.state.count >= self.limit:
             raise StopFiltering()
             raise StopFiltering()
 
 
-    def ack_message(body, message):
+    def ack_message(self, body, message):
         message.ack()
         message.ack()
 
 
-    consumer = app.amqp.TaskConsumer(conn, queues=consume_from, accept=accept)
-
-    if tasks:
-        filter = filter_callback(filter, tasks)
-        update_state = filter_callback(update_state, tasks)
-        ack_message = filter_callback(ack_message, tasks)
-
-    consumer.register_callback(filter)
-    consumer.register_callback(update_state)
-    if ack_messages:
-        consumer.register_callback(ack_message)
-    if callback is not None:
-        callback = partial(callback, state)
-        if tasks:
-            callback = filter_callback(callback, tasks)
-        consumer.register_callback(callback)
-
-    # declare all queues on the new broker.
-    for queue in consumer.queues:
-        if queues and queue.name not in queues:
-            continue
-        if on_declare_queue is not None:
-            on_declare_queue(queue)
-        try:
-            _, mcount, _ = queue(consumer.channel).queue_declare(passive=True)
-            if mcount:
-                state.total_apx += mcount
-        except conn.channel_errors:
-            pass
-
-    # start migrating messages.
-    with consumer:
-        try:
-            for _ in eventloop(conn,  # pragma: no cover
-                               timeout=timeout, ignore_timeouts=forever):
+    def create_consumer(self):
+        return self.app.amqp.TaskConsumer(
+            self.conn,
+            queues=self.consume_from,
+            accept=self.accept,
+        )
+
+    def prepare_consumer(self, consumer):
+        filter = self.filter
+        update_state = self.update_state
+        ack_message = self.ack_message
+        if self.tasks:
+            filter = filter_callback(filter, self.tasks)
+            update_state = filter_callback(update_state, self.tasks)
+            ack_message = filter_callback(ack_message, self.tasks)
+        consumer.register_callback(filter)
+        consumer.register_callback(update_state)
+        if self.ack_messages:
+            consumer.register_callback(self.ack_message)
+        if self.callback is not None:
+            callback = partial(self.callback, self.state)
+            if self.tasks:
+                callback = filter_callback(callback, self.tasks)
+            consumer.register_callback(callback)
+        self.declare_queues(consumer)
+        return consumer
+
+    def declare_queues(self, consumer):
+        # declare all queues on the new broker.
+        for queue in consumer.queues:
+            if self.queues and queue.name not in self.queues:
+                continue
+            if self.on_declare_queue is not None:
+                self.on_declare_queue(queue)
+            try:
+                _, mcount, _ = queue(
+                    consumer.channel).queue_declare(passive=True)
+                if mcount:
+                    self.state.total_apx += mcount
+            except self.conn.channel_errors:
                 pass
                 pass
-        except socket.timeout:
-            pass
-        except StopFiltering:
-            pass
-    return state
+
+
+def start_filter(app, conn, filter, limit=None, timeout=1.0,
+                 ack_messages=False, tasks=None, queues=None,
+                 callback=None, forever=False, on_declare_queue=None,
+                 consume_from=None, state=None, accept=None, **kwargs):
+    """Filter tasks."""
+    return Filterer(
+        app, conn, filter,
+        limit=limit,
+        timeout=timeout,
+        ack_messages=ack_messages,
+        tasks=tasks,
+        queues=queues,
+        callback=callback,
+        forever=forever,
+        on_declare_queue=on_declare_queue,
+        consume_from=consume_from,
+        state=state,
+        accept=accept,
+        **kwargs).start()
 
 
 
 
 def move_task_by_id(task_id, dest, **kwargs):
 def move_task_by_id(task_id, dest, **kwargs):
@@ -310,8 +365,9 @@ def move_task_by_id(task_id, dest, **kwargs):
 
 
 
 
 def move_by_idmap(map, **kwargs):
 def move_by_idmap(map, **kwargs):
-    """Moves tasks by matching from a ``task_id: queue`` mapping,
-    where ``queue`` is a queue to move the task to.
+    """Move tasks by matching from a ``task_id: queue`` mapping.
+
+    Where ``queue`` is a queue to move the task to.
 
 
     Example:
     Example:
         >>> move_by_idmap({
         >>> move_by_idmap({
@@ -329,8 +385,9 @@ def move_by_idmap(map, **kwargs):
 
 
 
 
 def move_by_taskmap(map, **kwargs):
 def move_by_taskmap(map, **kwargs):
-    """Moves tasks by matching from a ``task_name: queue`` mapping,
-    where ``queue`` is the queue to move the task to.
+    """Move tasks by matching from a ``task_name: queue`` mapping.
+
+    ``queue`` is the queue to move the task to.
 
 
     Example:
     Example:
         >>> move_by_taskmap({
         >>> move_by_taskmap({
@@ -338,7 +395,6 @@ def move_by_taskmap(map, **kwargs):
         ...     'tasks.mul': Queue('name'),
         ...     'tasks.mul': Queue('name'),
         ... })
         ... })
     """
     """
-
     def task_name_in_map(body, message):
     def task_name_in_map(body, message):
         return map.get(body['task'])  # <- name of task
         return map.get(body['task'])  # <- name of task
 
 

+ 166 - 0
celery/contrib/pytest.py

@@ -0,0 +1,166 @@
+"""Fixtures and testing utilities for :pypi:`py.test <pytest>`."""
+import os
+import pytest
+
+from contextlib import contextmanager
+
+from celery.backends.cache import CacheBackend, DummyClient
+
+from .testing import worker
+from .testing.app import TestApp, setup_default_app
+
+NO_WORKER = os.environ.get('NO_WORKER')
+
+# pylint: disable=redefined-outer-name
+# Well, they're called fixtures....
+
+
+@contextmanager
+def _create_app(request, enable_logging=False, use_trap=False, **config):
+    # type: (Any, **Any) -> Celery
+    """Utility context used to setup Celery app for pytest fixtures."""
+    test_app = TestApp(
+        set_as_current=False,
+        enable_logging=enable_logging,
+        config=config,
+    )
+    # request.module is not defined for session
+    _module = getattr(request, 'module', None)
+    _cls = getattr(request, 'cls', None)
+    _function = getattr(request, 'function', None)
+    with setup_default_app(test_app, use_trap=use_trap):
+        is_not_contained = any([
+            not getattr(_module, 'app_contained', True),
+            not getattr(_cls, 'app_contained', True),
+            not getattr(_function, 'app_contained', True)
+        ])
+        if is_not_contained:
+            test_app.set_current()
+        yield test_app
+
+
+@pytest.fixture(scope='session')
+def use_celery_app_trap():
+    # type: () -> bool
+    """You can override this fixture to enable the app trap.
+
+    The app trap raises an exception whenever something attempts
+    to use the current or default apps.
+    """
+    return False
+
+
+@pytest.fixture(scope='session')
+def celery_session_app(request,
+                       celery_config,
+                       celery_enable_logging,
+                       use_celery_app_trap):
+    # type: (Any) -> Celery
+    """Session Fixture: Return app for session fixtures."""
+    mark = request.node.get_marker('celery')
+    config = dict(celery_config, **mark.kwargs if mark else {})
+    with _create_app(request,
+                     enable_logging=celery_enable_logging,
+                     use_trap=use_celery_app_trap,
+                     **config) as app:
+        if not use_celery_app_trap:
+            app.set_default()
+            app.set_current()
+        yield app
+
+
+@pytest.fixture(scope='session')
+def celery_session_worker(request, celery_session_app,
+                          celery_includes, celery_worker_pool):
+    # type: (Any, Celery, Sequence[str], str) -> WorkController
+    """Session Fixture: Start worker that lives throughout test suite."""
+    if not NO_WORKER:
+        for module in celery_includes:
+            celery_session_app.loader.import_task_module(module)
+        with worker.start_worker(celery_session_app,
+                                 pool=celery_worker_pool) as w:
+            yield w
+
+
+@pytest.fixture(scope='session')
+def celery_enable_logging():
+    # type: () -> bool
+    """You can override this fixture to enable logging."""
+    return False
+
+
+@pytest.fixture(scope='session')
+def celery_includes():
+    # type: () -> Sequence[str]
+    """You can override this include modules when a worker start.
+
+    You can have this return a list of module names to import,
+    these can be task modules, modules registering signals, and so on.
+    """
+    return ()
+
+
+@pytest.fixture(scope='session')
+def celery_worker_pool():
+    # type: () -> Union[str, Any]
+    """You can override this fixture to set the worker pool.
+
+    The "solo" pool is used by default, but you can set this to
+    return e.g. "prefork".
+    """
+    return 'solo'
+
+
+@pytest.fixture(scope='session')
+def celery_config():
+    # type: () -> Mapping[str, Any]
+    """Redefine this fixture to configure the test Celery app.
+
+    The config returned by your fixture will then be used
+    to configure the :func:`celery_app` fixture.
+    """
+    return {}
+
+
+@pytest.fixture()
+def celery_app(request,
+               celery_config,
+               celery_enable_logging,
+               use_celery_app_trap):
+    """Fixture creating a Celery application instance."""
+    mark = request.node.get_marker('celery')
+    config = dict(celery_config, **mark.kwargs if mark else {})
+    with _create_app(request,
+                     enable_logging=celery_enable_logging,
+                     use_trap=use_celery_app_trap,
+                     **config) as app:
+        yield app
+
+
+@pytest.fixture()
+def celery_worker(request, celery_app, celery_includes, celery_worker_pool):
+    # type: (Any, Celery, Sequence[str], str) -> WorkController
+    """Fixture: Start worker in a thread, stop it when the test returns."""
+    if not NO_WORKER:
+        for module in celery_includes:
+            celery_app.loader.import_task_module(module)
+        with worker.start_worker(celery_app, pool=celery_worker_pool) as w:
+            yield w
+
+
+@pytest.fixture()
+def depends_on_current_app(celery_app):
+    """Fixture that sets app as current."""
+    celery_app.set_current()
+
+
+@pytest.fixture(autouse=True)
+def reset_cache_backend_state(celery_app):
+    """Fixture that resets the internal state of the cache result backend."""
+    yield
+    backend = celery_app.__dict__.get('backend')
+    if backend is not None:
+        if isinstance(backend, CacheBackend):
+            if isinstance(backend.client, DummyClient):
+                backend.client.cache.clear()
+            backend._cache.clear()

+ 6 - 5
celery/contrib/rdb.py

@@ -29,8 +29,8 @@ Environment Variables
 ``CELERY_RDB_HOST``
 ``CELERY_RDB_HOST``
 -------------------
 -------------------
 
 
-    Hostname to bind to.  Default is '127.0.01', which means the socket
-    will only be accessible from the local host.
+    Hostname to bind to.  Default is '127.0.01' (only accessable from
+    localhost).
 
 
 .. envvar:: CELERY_RDB_PORT
 .. envvar:: CELERY_RDB_PORT
 
 
@@ -84,6 +84,8 @@ SESSION_ENDED = '{self.ident}: Session with {self.remote_addr} ended.'
 
 
 
 
 class Rdb(Pdb):
 class Rdb(Pdb):
+    """Remote debugger."""
+
     me = 'Remote Debugger'
     me = 'Remote Debugger'
     _prev_outs = None
     _prev_outs = None
     _sock = None
     _sock = None
@@ -168,13 +170,12 @@ class Rdb(Pdb):
     do_q = do_exit = do_quit
     do_q = do_exit = do_quit
 
 
     def set_quit(self):
     def set_quit(self):
-        # this raises a BdbQuit exception that we are unable to catch.
+        # this raises a BdbQuit exception that we're unable to catch.
         sys.settrace(None)
         sys.settrace(None)
 
 
 
 
 def debugger():
 def debugger():
-    """Return the current debugger instance (if any),
-    or creates a new one."""
+    """Return the current debugger instance, or create if none."""
     rdb = _current[0]
     rdb = _current[0]
     if rdb is None or not rdb.active:
     if rdb is None or not rdb.active:
         rdb = _current[0] = Rdb()
         rdb = _current[0] = Rdb()

+ 6 - 2
celery/contrib/sphinx.py

@@ -14,7 +14,7 @@ Add the extension to your :file:`docs/conf.py` configuration module:
     extensions = (...,
     extensions = (...,
                   'celery.contrib.sphinx')
                   'celery.contrib.sphinx')
 
 
-If you would like to change the prefix for tasks in reference documentation
+If you'd like to change the prefix for tasks in reference documentation
 then you can change the ``celery_task_prefix`` configuration value:
 then you can change the ``celery_task_prefix`` configuration value:
 
 
 .. code-block:: python
 .. code-block:: python
@@ -37,6 +37,8 @@ from celery.app.task import BaseTask
 
 
 
 
 class TaskDocumenter(FunctionDocumenter):
 class TaskDocumenter(FunctionDocumenter):
+    """Document task definitions."""
+
     objtype = 'task'
     objtype = 'task'
     member_order = 11
     member_order = 11
 
 
@@ -45,7 +47,7 @@ class TaskDocumenter(FunctionDocumenter):
         return isinstance(member, BaseTask) and getattr(member, '__wrapped__')
         return isinstance(member, BaseTask) and getattr(member, '__wrapped__')
 
 
     def format_args(self):
     def format_args(self):
-        wrapped = getattr(self.object, '__wrapped__')
+        wrapped = getattr(self.object, '__wrapped__', None)
         if wrapped is not None:
         if wrapped is not None:
             argspec = getfullargspec(wrapped)
             argspec = getfullargspec(wrapped)
             fmt = formatargspec(*argspec)
             fmt = formatargspec(*argspec)
@@ -58,12 +60,14 @@ class TaskDocumenter(FunctionDocumenter):
 
 
 
 
 class TaskDirective(PyModulelevel):
 class TaskDirective(PyModulelevel):
+    """Sphinx task directive."""
 
 
     def get_signature_prefix(self, sig):
     def get_signature_prefix(self, sig):
         return self.env.config.celery_task_prefix
         return self.env.config.celery_task_prefix
 
 
 
 
 def setup(app):
 def setup(app):
+    """Setup Sphinx extension."""
     app.add_autodocumenter(TaskDocumenter)
     app.add_autodocumenter(TaskDocumenter)
     app.domains['py'].directives['task'] = TaskDirective
     app.domains['py'].directives['task'] = TaskDirective
     app.add_config_value('celery_task_prefix', '(task)', True)
     app.add_config_value('celery_task_prefix', '(task)', True)

+ 0 - 0
celery/tests/app/__init__.py → celery/contrib/testing/__init__.py


+ 105 - 0
celery/contrib/testing/app.py

@@ -0,0 +1,105 @@
+"""Create Celery app instances used for testing."""
+import weakref
+
+from contextlib import contextmanager
+from copy import deepcopy
+
+from kombu.utils.imports import symbol_by_name
+
+from celery import Celery
+from celery import _state
+
+#: Contains the default configuration values for the test app.
+DEFAULT_TEST_CONFIG = {
+    'worker_hijack_root_logger': False,
+    'worker_log_color': False,
+    'accept_content': {'json'},
+    'enable_utc': True,
+    'timezone': 'UTC',
+    'broker_url': 'memory://',
+    'result_backend': 'cache+memory://'
+}
+
+
+class Trap(object):
+    """Trap that pretends to be an app but raises an exception instead.
+
+    This to protect from code that does not properly pass app instances,
+    then falls back to the current_app.
+    """
+
+    def __getattr__(self, name):
+        raise RuntimeError('Test depends on current_app')
+
+
+class UnitLogging(symbol_by_name(Celery.log_cls)):
+    """Sets up logging for the test application."""
+
+    def __init__(self, *args, **kwargs):
+        super(UnitLogging, self).__init__(*args, **kwargs)
+        self.already_setup = True
+
+
+def TestApp(name=None, config=None, enable_logging=False, set_as_current=False,
+            log=UnitLogging, backend=None, broker=None, **kwargs):
+    """App used for testing."""
+    from . import tasks  # noqa
+    config = dict(deepcopy(DEFAULT_TEST_CONFIG), **config or {})
+    if broker is not None:
+        config.pop('broker_url', None)
+    if backend is not None:
+        config.pop('result_backend', None)
+    log = None if enable_logging else log
+    test_app = Celery(
+        name or 'celery.tests',
+        set_as_current=set_as_current,
+        log=log,
+        broker=broker,
+        backend=backend,
+        **kwargs)
+    test_app.add_defaults(config)
+    return test_app
+
+
+@contextmanager
+def set_trap(app):
+    """Contextmanager that installs the trap app.
+
+    The trap means that anything trying to use the current or default app
+    will raise an exception.
+    """
+    trap = Trap()
+    prev_tls = _state._tls
+    _state.set_default_app(trap)
+
+    class NonTLS(object):
+        current_app = trap
+    _state._tls = NonTLS()
+
+    yield
+    _state._tls = prev_tls
+
+
+@contextmanager
+def setup_default_app(app, use_trap=False):
+    """Setup default app for testing.
+
+    Ensures state is clean after the test returns.
+    """
+    prev_current_app = _state.get_current_app()
+    prev_default_app = _state.default_app
+    prev_finalizers = set(_state._on_app_finalizers)
+    prev_apps = weakref.WeakSet(_state._apps)
+
+    if use_trap:
+        with set_trap(app):
+            yield
+    else:
+        yield
+
+    _state.set_default_app(prev_default_app)
+    _state._tls.current_app = prev_current_app
+    if app is not prev_current_app:
+        app.close()
+    _state._on_app_finalizers = prev_finalizers
+    _state._apps = prev_apps

+ 186 - 0
celery/contrib/testing/manager.py

@@ -0,0 +1,186 @@
+"""Integration testing utilities."""
+import socket
+import sys
+
+from collections import defaultdict
+from functools import partial
+from itertools import count
+
+from kombu.utils.functional import retry_over_time
+
+from celery.exceptions import TimeoutError
+from celery.result import ResultSet
+from celery.utils.text import truncate
+from celery.utils.time import humanize_seconds as _humanize_seconds
+
+E_STILL_WAITING = 'Still waiting for {0}.  Trying again {when}: {exc!r}'
+
+
+class Sentinel(Exception):
+    """Signifies the end of something."""
+
+
+def humanize_seconds(secs, prefix='', sep='', now='now', **kwargs):
+    # type: (float, str, str, str, **Any) -> str
+    """Represent seconds in a human readable way."""
+    s = _humanize_seconds(secs, prefix, sep, now, **kwargs)
+    if s == now and secs > 0:
+        return '{prefix}{sep}{0:.2f} seconds'.format(
+            float(secs), prefix=prefix, sep=sep)
+    return s
+
+
+class ManagerMixin(object):
+    """Mixin that adds :class:`Manager` capabilities."""
+
+    def _init_manager(self,
+                      block_timeout=30 * 60.0, no_join=False,
+                      stdout=None, stderr=None):
+        # type: (float, bool, TextIO, TextIO) -> None
+        self.stdout = sys.stdout if stdout is None else stdout
+        self.stderr = sys.stderr if stderr is None else stderr
+        self.connerrors = self.app.connection().recoverable_connection_errors
+        self.block_timeout = block_timeout
+        self.no_join = no_join
+
+    def remark(self, s, sep='-'):
+        # type: (str, str) -> None
+        print('{0}{1}'.format(sep, s), file=self.stdout)
+
+    def missing_results(self, r):
+        # type: (Sequence[AsyncResult]) -> Sequence[str]
+        return [res.id for res in r if res.id not in res.backend._cache]
+
+    def wait_for(self, fun, catch,
+                 desc='thing', args=(), kwargs={}, errback=None,
+                 max_retries=10, interval_start=0.1, interval_step=0.5,
+                 interval_max=5.0, emit_warning=False, **options):
+        # type: (Callable, Sequence[Any], str, Tuple, Dict, Callable,
+        #        int, float, float, float, bool, **Any) -> Any
+        """Wait for event to happen.
+
+        The `catch` argument specifies the exception that means the event
+        has not happened yet.
+        """
+        def on_error(exc, intervals, retries):
+            interval = next(intervals)
+            if emit_warning:
+                self.warn(E_STILL_WAITING.format(
+                    desc, when=humanize_seconds(interval, 'in', ' '), exc=exc,
+                ))
+            if errback:
+                errback(exc, interval, retries)
+            return interval
+
+        return self.retry_over_time(
+            fun, catch,
+            args=args, kwargs=kwargs,
+            errback=on_error, max_retries=max_retries,
+            interval_start=interval_start, interval_step=interval_step,
+            **options
+        )
+
+    def ensure_not_for_a_while(self, fun, catch,
+                               desc='thing', max_retries=20,
+                               interval_start=0.1, interval_step=0.02,
+                               interval_max=1.0, emit_warning=False,
+                               **options):
+        """Make sure something does not happen (at least for a while)."""
+        try:
+            return self.wait_for(
+                fun, catch, desc=desc, max_retries=max_retries,
+                interval_start=interval_start, interval_step=interval_step,
+                interval_max=interval_max, emit_warning=emit_warning,
+            )
+        except catch:
+            pass
+        else:
+            raise AssertionError('Should not have happened: {0}'.format(desc))
+
+    def retry_over_time(self, *args, **kwargs):
+        return retry_over_time(*args, **kwargs)
+
+    def join(self, r, propagate=False, max_retries=10, **kwargs):
+        if self.no_join:
+            return
+        if not isinstance(r, ResultSet):
+            r = self.app.ResultSet([r])
+        received = []
+
+        def on_result(task_id, value):
+            received.append(task_id)
+
+        for i in range(max_retries) if max_retries else count(0):
+            received[:] = []
+            try:
+                return r.get(callback=on_result, propagate=propagate, **kwargs)
+            except (socket.timeout, TimeoutError) as exc:
+                waiting_for = self.missing_results(r)
+                self.remark(
+                    'Still waiting for {0}/{1}: [{2}]: {3!r}'.format(
+                        len(r) - len(received), len(r),
+                        truncate(', '.join(waiting_for)), exc), '!',
+                )
+            except self.connerrors as exc:
+                self.remark('join: connection lost: {0!r}'.format(exc), '!')
+        raise AssertionError('Test failed: Missing task results')
+
+    def inspect(self, timeout=3.0):
+        return self.app.control.inspect(timeout=timeout)
+
+    def query_tasks(self, ids, timeout=0.5):
+        for reply in (self.inspect(timeout).query_task(*ids) or {}).items():
+            yield reply
+
+    def query_task_states(self, ids, timeout=0.5):
+        states = defaultdict(set)
+        for hostname, reply in self.query_tasks(ids, timeout=timeout):
+            for task_id, (state, _) in reply.items():
+                states[state].add(task_id)
+        return states
+
+    def assert_accepted(self, ids, interval=0.5,
+                        desc='waiting for tasks to be accepted', **policy):
+        return self.assert_task_worker_state(
+            self.is_accepted, ids, interval=interval, desc=desc, **policy
+        )
+
+    def assert_received(self, ids, interval=0.5,
+                        desc='waiting for tasks to be received', **policy):
+        return self.assert_task_worker_state(
+            self.is_accepted, ids, interval=interval, desc=desc, **policy
+        )
+
+    def assert_task_worker_state(self, fun, ids, interval=0.5, **policy):
+        return self.wait_for(
+            partial(self.true_or_raise, fun, ids, timeout=interval),
+            (Sentinel,), **policy
+        )
+
+    def is_received(self, ids, **kwargs):
+        return self._ids_matches_state(
+            ['reserved', 'active', 'ready'], ids, **kwargs)
+
+    def is_accepted(self, ids, **kwargs):
+        return self._ids_matches_state(['active', 'ready'], ids, **kwargs)
+
+    def _ids_matches_state(self, expected_states, ids, timeout=0.5):
+        states = self.query_task_states(ids, timeout=timeout)
+        return all(
+            any(t in s for s in [states[k] for k in expected_states])
+            for t in ids
+        )
+
+    def true_or_raise(self, fun, *args, **kwargs):
+        res = fun(*args, **kwargs)
+        if not res:
+            raise Sentinel()
+        return res
+
+
+class Manager(ManagerMixin):
+    """Test helpers for task integration tests."""
+
+    def __init__(self, app, **kwargs):
+        self.app = app
+        self._init_manager(**kwargs)

+ 95 - 0
celery/contrib/testing/mocks.py

@@ -0,0 +1,95 @@
+"""Useful mocks for unit testing."""
+import numbers
+from datetime import datetime, timedelta
+
+try:
+    from case import Mock
+except ImportError:
+    try:
+        from unittest.mock import Mock
+    except ImportError:
+        from mock import Mock
+
+
+def TaskMessage(name, id=None, args=(), kwargs={}, callbacks=None,
+                errbacks=None, chain=None, shadow=None, utc=None, **options):
+    # type: (str, str, Sequence, Mapping, Sequence[Signature],
+    #        Sequence[Signature], Sequence[Signature],
+    #        str, bool, **Any) -> Any
+    """Create task message in protocol 2 format."""
+    from celery import uuid
+    from kombu.serialization import dumps
+    id = id or uuid()
+    message = Mock(name='TaskMessage-{0}'.format(id))
+    message.headers = {
+        'id': id,
+        'task': name,
+        'shadow': shadow,
+    }
+    embed = {'callbacks': callbacks, 'errbacks': errbacks, 'chain': chain}
+    message.headers.update(options)
+    message.content_type, message.content_encoding, message.body = dumps(
+        (args, kwargs, embed), serializer='json',
+    )
+    message.payload = (args, kwargs, embed)
+    return message
+
+
+def TaskMessage1(name, id=None, args=(), kwargs={}, callbacks=None,
+                 errbacks=None, chain=None, **options):
+    # type: (str, str, Sequence, Mapping, Sequence[Signature],
+    #        Sequence[Signature], Sequence[Signature]) -> Any
+    """Create task message in protocol 1 format."""
+    from celery import uuid
+    from kombu.serialization import dumps
+    id = id or uuid()
+    message = Mock(name='TaskMessage-{0}'.format(id))
+    message.headers = {}
+    message.payload = {
+        'task': name,
+        'id': id,
+        'args': args,
+        'kwargs': kwargs,
+        'callbacks': callbacks,
+        'errbacks': errbacks,
+    }
+    message.payload.update(options)
+    message.content_type, message.content_encoding, message.body = dumps(
+        message.payload,
+    )
+    return message
+
+
+def task_message_from_sig(app, sig, utc=True, TaskMessage=TaskMessage):
+    # type: (Celery, Signature, bool, Any) -> Any
+    """Create task message from :class:`celery.Signature`.
+
+    Example:
+        >>> m = task_message_from_sig(app, add.s(2, 2))
+        >>> amqp_client.basic_publish(m, exchange='ex', routing_key='rkey')
+    """
+    sig.freeze()
+    callbacks = sig.options.pop('link', None)
+    errbacks = sig.options.pop('link_error', None)
+    countdown = sig.options.pop('countdown', None)
+    if countdown:
+        eta = app.now() + timedelta(seconds=countdown)
+    else:
+        eta = sig.options.pop('eta', None)
+    if eta and isinstance(eta, datetime):
+        eta = eta.isoformat()
+    expires = sig.options.pop('expires', None)
+    if expires and isinstance(expires, numbers.Real):
+        expires = app.now() + timedelta(seconds=expires)
+    if expires and isinstance(expires, datetime):
+        expires = expires.isoformat()
+    return TaskMessage(
+        sig.task, id=sig.id, args=sig.args,
+        kwargs=sig.kwargs,
+        callbacks=[dict(s) for s in callbacks] if callbacks else None,
+        errbacks=[dict(s) for s in errbacks] if errbacks else None,
+        eta=eta,
+        expires=expires,
+        utc=utc,
+        **sig.options
+    )

+ 9 - 0
celery/contrib/testing/tasks.py

@@ -0,0 +1,9 @@
+"""Helper tasks for integration tests."""
+from celery import shared_task
+
+
+@shared_task(name='celery.ping')
+def ping():
+    # type: () -> str
+    """Simple task that just returns 'pong'."""
+    return 'pong'

+ 154 - 0
celery/contrib/testing/worker.py

@@ -0,0 +1,154 @@
+"""Embedded workers for integration tests."""
+import os
+import threading
+
+from contextlib import contextmanager
+
+from celery import worker
+from celery.result import allow_join_result, _set_task_join_will_block
+from celery.utils.dispatch import Signal
+from celery.utils.nodenames import anon_nodename
+
+test_worker_starting = Signal(providing_args=[])
+test_worker_started = Signal(providing_args=['worker', 'consumer'])
+test_worker_stopped = Signal(providing_args=['worker'])
+
+WORKER_LOGLEVEL = os.environ.get('WORKER_LOGLEVEL', 'error')
+
+
+class TestWorkController(worker.WorkController):
+    """Worker that can synchronize on being fully started."""
+
+    def __init__(self, *args, **kwargs):
+        # type: (*Any, **Any) -> None
+        self._on_started = threading.Event()
+        super(TestWorkController, self).__init__(*args, **kwargs)
+
+    def on_consumer_ready(self, consumer):
+        # type: (celery.worker.consumer.Consumer) -> None
+        """Callback called when the Consumer blueprint is fully started."""
+        self._on_started.set()
+        test_worker_started.send(
+            sender=self.app, worker=self, consumer=consumer)
+
+    def ensure_started(self):
+        # type: () -> None
+        """Wait for worker to be fully up and running.
+
+        Warning:
+            Worker must be started within a thread for this to work,
+            or it will block forever.
+        """
+        self._on_started.wait()
+
+
+@contextmanager
+def start_worker(app,
+                 concurrency=1,
+                 pool='solo',
+                 loglevel=WORKER_LOGLEVEL,
+                 logfile=None,
+                 perform_ping_check=True,
+                 ping_task_timeout=10.0,
+                 **kwargs):
+    # type: (Celery, int, str, Union[str, int],
+    #        str, bool, float, **Any) -> # Iterable
+    """Start embedded worker.
+
+    Yields:
+        celery.app.worker.Worker: worker instance.
+    """
+    test_worker_starting.send(sender=app)
+
+    with _start_worker_thread(app,
+                              concurrency=concurrency,
+                              pool=pool,
+                              loglevel=loglevel,
+                              logfile=logfile,
+                              **kwargs) as worker:
+        if perform_ping_check:
+            from .tasks import ping
+            with allow_join_result():
+                assert ping.delay().get(timeout=ping_task_timeout) == 'pong'
+
+        yield worker
+    test_worker_stopped.send(sender=app, worker=worker)
+
+
+@contextmanager
+def _start_worker_thread(app,
+                         concurrency=1,
+                         pool='solo',
+                         loglevel=WORKER_LOGLEVEL,
+                         logfile=None,
+                         WorkController=TestWorkController,
+                         **kwargs):
+    # type: (Celery, int, str, Union[str, int], str, Any, **Any) -> Iterable
+    """Start Celery worker in a thread.
+
+    Yields:
+        celery.worker.Worker: worker instance.
+    """
+    setup_app_for_worker(app, loglevel, logfile)
+    assert 'celery.ping' in app.tasks
+    # Make sure we can connect to the broker
+    with app.connection() as conn:
+        conn.default_channel.queue_declare
+
+    worker = WorkController(
+        app=app,
+        concurrency=concurrency,
+        hostname=anon_nodename(),
+        pool=pool,
+        loglevel=loglevel,
+        logfile=logfile,
+        # not allowed to override TestWorkController.on_consumer_ready
+        ready_callback=None,
+        without_heartbeat=True,
+        without_mingle=True,
+        without_gossip=True,
+        **kwargs)
+
+    t = threading.Thread(target=worker.start)
+    t.start()
+    worker.ensure_started()
+    _set_task_join_will_block(False)
+
+    yield worker
+
+    from celery.worker import state
+    state.should_terminate = 0
+    t.join(10)
+    state.should_terminate = None
+
+
+@contextmanager
+def _start_worker_process(app,
+                          concurrency=1,
+                          pool='solo',
+                          loglevel=WORKER_LOGLEVEL,
+                          logfile=None,
+                          **kwargs):
+    # type (Celery, int, str, Union[int, str], str, **Any) -> Iterable
+    """Start worker in separate process.
+
+    Yields:
+        celery.app.worker.Worker: worker instance.
+    """
+    from celery.apps.multi import Cluster, Node
+
+    app.set_current()
+    cluster = Cluster([Node('testworker1@%h')])
+    cluster.start()
+    yield
+    cluster.stopwait()
+
+
+def setup_app_for_worker(app, loglevel, logfile):
+    # type: (Celery, Union[str, int], str) -> None
+    """Setup the app to be used for starting an embedded worker."""
+    app.finalize()
+    app.set_current()
+    app.set_default()
+    type(app.log)._setup = False
+    app.log.setup(loglevel=loglevel, logfile=logfile)

+ 8 - 418
celery/events/__init__.py

@@ -5,421 +5,11 @@ Events is a stream of messages sent for certain actions occurring
 in the worker (and clients if :setting:`task_send_sent_event`
 in the worker (and clients if :setting:`task_send_sent_event`
 is enabled), used for monitoring purposes.
 is enabled), used for monitoring purposes.
 """
 """
-import os
-import time
-import threading
-
-from collections import defaultdict, deque
-from contextlib import contextmanager
-from copy import copy
-from operator import itemgetter
-
-from kombu import Exchange, Queue, Producer
-from kombu.connection import maybe_channel
-from kombu.mixins import ConsumerMixin
-from kombu.utils.objects import cached_property
-
-from celery import uuid
-from celery.app import app_or_default
-from celery.utils.functional import dictfilter
-from celery.utils.nodenames import anon_nodename
-from celery.utils.timeutils import adjust_timestamp, utcoffset, maybe_s_to_ms
-
-__all__ = ['Events', 'Event', 'EventDispatcher', 'EventReceiver']
-
-event_exchange = Exchange('celeryev', type='topic')
-
-_TZGETTER = itemgetter('utcoffset', 'timestamp')
-
-CLIENT_CLOCK_SKEW = -1
-
-
-def get_exchange(conn):
-    ex = copy(event_exchange)
-    if conn.transport.driver_type == 'redis':
-        # quick hack for Issue #436
-        ex.type = 'fanout'
-    return ex
-
-
-def Event(type, _fields=None, __dict__=dict, __now__=time.time, **fields):
-    """Create an event.
-
-    An event is a dictionary, the only required field is ``type``.
-    A ``timestamp`` field will be set to the current time if not provided.
-    """
-    event = __dict__(_fields, **fields) if _fields else fields
-    if 'timestamp' not in event:
-        event.update(timestamp=__now__(), type=type)
-    else:
-        event['type'] = type
-    return event
-
-
-def group_from(type):
-    """Get the group part of an event type name.
-
-    E.g.::
-
-        >>> group_from('task-sent')
-        'task'
-
-        >>> group_from('custom-my-event')
-        'custom'
-    """
-    return type.split('-', 1)[0]
-
-
-class EventDispatcher:
-    """Dispatches event messages.
-
-    Arguments:
-        connection (kombu.Connection): Connection to the broker.
-
-        hostname (str): Hostname to identify ourselves as,
-            by default uses the hostname returned by
-            :func:`~celery.utils.anon_nodename`.
-
-        groups (Sequence[str]): List of groups to send events for.
-            :meth:`send` will ignore send requests to groups not in this list.
-            If this is :const:`None`, all events will be sent. Example groups
-            include ``"task"`` and ``"worker"``.
-
-        enabled (bool): Set to :const:`False` to not actually publish any
-            events, making :meth:`send` a no-op.
-
-        channel (kombu.Channel): Can be used instead of `connection` to specify
-            an exact channel to use when sending events.
-
-        buffer_while_offline (bool): If enabled events will be buffered
-            while the connection is down. :meth:`flush` must be called
-            as soon as the connection is re-established.
-
-    Note:
-        You need to :meth:`close` this after use.
-    """
-    DISABLED_TRANSPORTS = {'sql'}
-
-    app = None
-
-    # set of callbacks to be called when :meth:`enabled`.
-    on_enabled = None
-
-    # set of callbacks to be called when :meth:`disabled`.
-    on_disabled = None
-
-    def __init__(self, connection=None, hostname=None, enabled=True,
-                 channel=None, buffer_while_offline=True, app=None,
-                 serializer=None, groups=None, delivery_mode=1,
-                 buffer_group=None, buffer_limit=24, on_send_buffered=None):
-        self.app = app_or_default(app or self.app)
-        self.connection = connection
-        self.channel = channel
-        self.hostname = hostname or anon_nodename()
-        self.buffer_while_offline = buffer_while_offline
-        self.buffer_group = buffer_group or frozenset()
-        self.buffer_limit = buffer_limit
-        self.on_send_buffered = on_send_buffered
-        self._group_buffer = defaultdict(list)
-        self.mutex = threading.Lock()
-        self.producer = None
-        self._outbound_buffer = deque()
-        self.serializer = serializer or self.app.conf.event_serializer
-        self.on_enabled = set()
-        self.on_disabled = set()
-        self.groups = set(groups or [])
-        self.tzoffset = [-time.timezone, -time.altzone]
-        self.clock = self.app.clock
-        self.delivery_mode = delivery_mode
-        if not connection and channel:
-            self.connection = channel.connection.client
-        self.enabled = enabled
-        conninfo = self.connection or self.app.connection_for_write()
-        self.exchange = get_exchange(conninfo)
-        if conninfo.transport.driver_type in self.DISABLED_TRANSPORTS:
-            self.enabled = False
-        if self.enabled:
-            self.enable()
-        self.headers = {'hostname': self.hostname}
-        self.pid = os.getpid()
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, *exc_info):
-        self.close()
-
-    def enable(self):
-        self.producer = Producer(self.channel or self.connection,
-                                 exchange=self.exchange,
-                                 serializer=self.serializer)
-        self.enabled = True
-        for callback in self.on_enabled:
-            callback()
-
-    def disable(self):
-        if self.enabled:
-            self.enabled = False
-            self.close()
-            for callback in self.on_disabled:
-                callback()
-
-    def publish(self, type, fields, producer,
-                blind=False, Event=Event, **kwargs):
-        """Publish event using a custom :class:`~kombu.Producer`
-        instance.
-
-        Arguments:
-            type (str): Event type name, with group separated by dash (`-`).
-                fields: Dictionary of event fields, must be json serializable.
-            producer (kombu.Producer): Producer instance to use:
-                only the ``publish`` method will be called.
-            retry (bool): Retry in the event of connection failure.
-            retry_policy (Mapping): Map of custom retry policy options.
-                See :meth:`~kombu.Connection.ensure`.
-            blind (bool): Don't set logical clock value (also do not forward
-                the internal logical clock).
-            Event (Callable): Event type used to create event.
-                Defaults to :func:`Event`.
-            utcoffset (Callable): Function returning the current
-                utc offset in hours.
-        """
-        clock = None if blind else self.clock.forward()
-        event = Event(type, hostname=self.hostname, utcoffset=utcoffset(),
-                      pid=self.pid, clock=clock, **fields)
-        with self.mutex:
-            return self._publish(event, producer,
-                                 routing_key=type.replace('-', '.'), **kwargs)
-
-    def _publish(self, event, producer, routing_key, retry=False,
-                 retry_policy=None, utcoffset=utcoffset):
-        exchange = self.exchange
-        try:
-            producer.publish(
-                event,
-                routing_key=routing_key,
-                exchange=exchange.name,
-                retry=retry,
-                retry_policy=retry_policy,
-                declare=[exchange],
-                serializer=self.serializer,
-                headers=self.headers,
-                delivery_mode=self.delivery_mode,
-            )
-        except Exception as exc:
-            if not self.buffer_while_offline:
-                raise
-            self._outbound_buffer.append((event, routing_key, exc))
-
-    def send(self, type, blind=False, utcoffset=utcoffset, retry=False,
-             retry_policy=None, Event=Event, **fields):
-        """Send event.
-
-        Arguments:
-            type (str): Event type name, with group separated by dash (`-`).
-            retry (bool): Retry in the event of connection failure.
-            retry_policy (Mapping): Map of custom retry policy options.
-                See :meth:`~kombu.Connection.ensure`.
-            blind (bool): Don't set logical clock value (also do not forward
-                the internal logical clock).
-            Event (Callable): Event type used to create event,
-                defaults to :func:`Event`.
-            utcoffset (Callable): unction returning the current utc offset
-                in hours.
-            **fields (Any): Event fields -- must be json serializable.
-        """
-        if self.enabled:
-            groups, group = self.groups, group_from(type)
-            if groups and group not in groups:
-                return
-            if group in self.buffer_group:
-                clock = self.clock.forward()
-                event = Event(type, hostname=self.hostname,
-                              utcoffset=utcoffset(),
-                              pid=self.pid, clock=clock, **fields)
-                buf = self._group_buffer[group]
-                buf.append(event)
-                if len(buf) >= self.buffer_limit:
-                    self.flush()
-                elif self.on_send_buffered:
-                    self.on_send_buffered()
-            else:
-                return self.publish(type, fields, self.producer, blind=blind,
-                                    Event=Event, retry=retry,
-                                    retry_policy=retry_policy)
-
-    def flush(self, errors=True, groups=True):
-        """Flushes the outbound buffer."""
-        if errors:
-            buf = list(self._outbound_buffer)
-            try:
-                with self.mutex:
-                    for event, routing_key, _ in buf:
-                        self._publish(event, self.producer, routing_key)
-            finally:
-                self._outbound_buffer.clear()
-        if groups:
-            with self.mutex:
-                for group, events in self._group_buffer.items():
-                    self._publish(events, self.producer, '%s.multi' % group)
-                    events[:] = []  # list.clear
-
-    def extend_buffer(self, other):
-        """Copies the outbound buffer of another instance."""
-        self._outbound_buffer.extend(other._outbound_buffer)
-
-    def close(self):
-        """Close the event dispatcher."""
-        self.mutex.locked() and self.mutex.release()
-        self.producer = None
-
-
-class EventReceiver(ConsumerMixin):
-    """Capture events.
-
-    Arguments:
-        connection (kombu.Connection): Connection to the broker.
-        handlers (Mapping[Callable]): Event handlers.
-            This is  a map of event type names and their handlers.
-            The special handler `"*"` captures all events that don't have a
-            handler.
-    """
-    app = None
-
-    def __init__(self, channel, handlers=None, routing_key='#',
-                 node_id=None, app=None, queue_prefix=None,
-                 accept=None, queue_ttl=None, queue_expires=None):
-        self.app = app_or_default(app or self.app)
-        self.channel = maybe_channel(channel)
-        self.handlers = {} if handlers is None else handlers
-        self.routing_key = routing_key
-        self.node_id = node_id or uuid()
-        self.queue_prefix = queue_prefix or self.app.conf.event_queue_prefix
-        self.exchange = get_exchange(
-            self.connection or self.app.connection_for_write())
-        self.queue = Queue(
-            '.'.join([self.queue_prefix, self.node_id]),
-            exchange=self.exchange,
-            routing_key=self.routing_key,
-            auto_delete=True, durable=False,
-            queue_arguments=self._get_queue_arguments(
-                ttl=queue_ttl, expires=queue_expires,
-            ),
-        )
-        self.clock = self.app.clock
-        self.adjust_clock = self.clock.adjust
-        self.forward_clock = self.clock.forward
-        if accept is None:
-            accept = {self.app.conf.event_serializer, 'json'}
-        self.accept = accept
-
-    def _get_queue_arguments(self, ttl=None, expires=None):
-        conf = self.app.conf
-        return dictfilter({
-            'x-message-ttl': maybe_s_to_ms(
-                ttl if ttl is not None else conf.event_queue_ttl,
-            ),
-            'x-expires': maybe_s_to_ms(
-                expires if expires is not None else conf.event_queue_expires,
-            ),
-        })
-
-    def process(self, type, event):
-        """Process the received event by dispatching it to the appropriate
-        handler."""
-        handler = self.handlers.get(type) or self.handlers.get('*')
-        handler and handler(event)
-
-    def get_consumers(self, Consumer, channel):
-        return [Consumer(queues=[self.queue],
-                         callbacks=[self._receive], no_ack=True,
-                         accept=self.accept)]
-
-    def on_consume_ready(self, connection, channel, consumers,
-                         wakeup=True, **kwargs):
-        if wakeup:
-            self.wakeup_workers(channel=channel)
-
-    def itercapture(self, limit=None, timeout=None, wakeup=True):
-        return self.consume(limit=limit, timeout=timeout, wakeup=wakeup)
-
-    def capture(self, limit=None, timeout=None, wakeup=True):
-        """Open up a consumer capturing events.
-
-        This has to run in the main process, and it will never stop
-        unless :attr:`EventDispatcher.should_stop` is set to True, or
-        forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.
-        """
-        return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup))
-
-    def wakeup_workers(self, channel=None):
-        self.app.control.broadcast('heartbeat',
-                                   connection=self.connection,
-                                   channel=channel)
-
-    def event_from_message(self, body, localize=True,
-                           now=time.time, tzfields=_TZGETTER,
-                           adjust_timestamp=adjust_timestamp,
-                           CLIENT_CLOCK_SKEW=CLIENT_CLOCK_SKEW):
-        type = body['type']
-        if type == 'task-sent':
-            # clients never sync so cannot use their clock value
-            _c = body['clock'] = (self.clock.value or 1) + CLIENT_CLOCK_SKEW
-            self.adjust_clock(_c)
-        else:
-            try:
-                clock = body['clock']
-            except KeyError:
-                body['clock'] = self.forward_clock()
-            else:
-                self.adjust_clock(clock)
-
-        if localize:
-            try:
-                offset, timestamp = tzfields(body)
-            except KeyError:
-                pass
-            else:
-                body['timestamp'] = adjust_timestamp(timestamp, offset)
-        body['local_received'] = now()
-        return type, body
-
-    def _receive(self, body, message, list=list, isinstance=isinstance):
-        if isinstance(body, list):  # celery 4.0: List of events
-            process, from_message = self.process, self.event_from_message
-            [process(*from_message(event)) for event in body]
-        else:
-            self.process(*self.event_from_message(body))
-
-    @property
-    def connection(self):
-        return self.channel.connection.client if self.channel else None
-
-
-class Events:
-
-    def __init__(self, app=None):
-        self.app = app
-
-    @cached_property
-    def Receiver(self):
-        return self.app.subclass_with_self(EventReceiver,
-                                           reverse='events.Receiver')
-
-    @cached_property
-    def Dispatcher(self):
-        return self.app.subclass_with_self(EventDispatcher,
-                                           reverse='events.Dispatcher')
-
-    @cached_property
-    def State(self):
-        return self.app.subclass_with_self('celery.events.state:State',
-                                           reverse='events.State')
-
-    @contextmanager
-    def default_dispatcher(self, hostname=None, enabled=True,
-                           buffer_while_offline=False):
-        with self.app.amqp.producer_pool.acquire(block=True) as prod:
-            with self.Dispatcher(prod.connection, hostname, enabled,
-                                 prod.channel, buffer_while_offline) as d:
-                yield d
+from .dispatcher import EventDispatcher
+from .event import Event, event_exchange, get_exchange, group_from
+from .receiver import EventReceiver
+
+__all__ = [
+    'Event', 'EventDispatcher', 'EventReceiver',
+    'event_exchange', 'get_exchange', 'group_from',
+]

+ 6 - 4
celery/events/cursesmon.py

@@ -34,9 +34,10 @@ events: {s.event_count} tasks:{s.task_count} workers:{w_alive}/{w_all}
 
 
 
 
 class CursesMonitor:  # pragma: no cover
 class CursesMonitor:  # pragma: no cover
+    """A curses based Celery task monitor."""
+
     keymap = {}
     keymap = {}
     win = None
     win = None
-    screen_width = None
     screen_delay = 10
     screen_delay = 10
     selected_task = None
     selected_task = None
     selected_position = 0
     selected_position = 0
@@ -147,7 +148,7 @@ class CursesMonitor:  # pragma: no cover
     def handle_keypress(self):
     def handle_keypress(self):
         try:
         try:
             key = self.win.getkey().upper()
             key = self.win.getkey().upper()
-        except:
+        except Exception:  # pylint: disable=broad-except
             return
             return
         key = self.keyalias.get(key) or key
         key = self.keyalias.get(key) or key
         handler = self.keymap.get(key)
         handler = self.keymap.get(key)
@@ -169,7 +170,7 @@ class CursesMonitor:  # pragma: no cover
         while 1:
         while 1:
             try:
             try:
                 return self.win.getkey().upper()
                 return self.win.getkey().upper()
-            except:
+            except Exception:  # pylint: disable=broad-except
                 pass
                 pass
 
 
     def selection_rate_limit(self):
     def selection_rate_limit(self):
@@ -498,7 +499,7 @@ class DisplayThread(threading.Thread):  # pragma: no cover
 def capture_events(app, state, display):  # pragma: no cover
 def capture_events(app, state, display):  # pragma: no cover
 
 
     def on_connection_error(exc, interval):
     def on_connection_error(exc, interval):
-        print('Connection Error: {0!r}. Retry in {1}s.'.format(
+        print('Connection Error: {0!r}.  Retry in {1}s.'.format(
             exc, interval), file=sys.stderr)
             exc, interval), file=sys.stderr)
 
 
     while 1:
     while 1:
@@ -516,6 +517,7 @@ def capture_events(app, state, display):  # pragma: no cover
 
 
 
 
 def evtop(app=None):  # pragma: no cover
 def evtop(app=None):  # pragma: no cover
+    """Start curses monitor."""
     app = app_or_default(app)
     app = app_or_default(app)
     state = app.events.State()
     state = app.events.State()
     display = CursesMonitor(state, app)
     display = CursesMonitor(state, app)

+ 227 - 0
celery/events/dispatcher.py

@@ -0,0 +1,227 @@
+"""Event dispatcher sends events."""
+import os
+import threading
+import time
+
+from collections import defaultdict, deque
+
+from kombu import Producer
+
+from celery.app import app_or_default
+from celery.utils.nodenames import anon_nodename
+from celery.utils.time import utcoffset
+
+from .event import Event, get_exchange, group_from
+
+__all__ = ['EventDispatcher']
+
+
+class EventDispatcher(object):
+    """Dispatches event messages.
+
+    Arguments:
+        connection (kombu.Connection): Connection to the broker.
+
+        hostname (str): Hostname to identify ourselves as,
+            by default uses the hostname returned by
+            :func:`~celery.utils.anon_nodename`.
+
+        groups (Sequence[str]): List of groups to send events for.
+            :meth:`send` will ignore send requests to groups not in this list.
+            If this is :const:`None`, all events will be sent.
+            Example groups include ``"task"`` and ``"worker"``.
+
+        enabled (bool): Set to :const:`False` to not actually publish any
+            events, making :meth:`send` a no-op.
+
+        channel (kombu.Channel): Can be used instead of `connection` to specify
+            an exact channel to use when sending events.
+
+        buffer_while_offline (bool): If enabled events will be buffered
+            while the connection is down. :meth:`flush` must be called
+            as soon as the connection is re-established.
+
+    Note:
+        You need to :meth:`close` this after use.
+    """
+
+    DISABLED_TRANSPORTS = {'sql'}
+
+    app = None
+
+    # set of callbacks to be called when :meth:`enabled`.
+    on_enabled = None
+
+    # set of callbacks to be called when :meth:`disabled`.
+    on_disabled = None
+
+    def __init__(self, connection=None, hostname=None, enabled=True,
+                 channel=None, buffer_while_offline=True, app=None,
+                 serializer=None, groups=None, delivery_mode=1,
+                 buffer_group=None, buffer_limit=24, on_send_buffered=None):
+        self.app = app_or_default(app or self.app)
+        self.connection = connection
+        self.channel = channel
+        self.hostname = hostname or anon_nodename()
+        self.buffer_while_offline = buffer_while_offline
+        self.buffer_group = buffer_group or frozenset()
+        self.buffer_limit = buffer_limit
+        self.on_send_buffered = on_send_buffered
+        self._group_buffer = defaultdict(list)
+        self.mutex = threading.Lock()
+        self.producer = None
+        self._outbound_buffer = deque()
+        self.serializer = serializer or self.app.conf.event_serializer
+        self.on_enabled = set()
+        self.on_disabled = set()
+        self.groups = set(groups or [])
+        self.tzoffset = [-time.timezone, -time.altzone]
+        self.clock = self.app.clock
+        self.delivery_mode = delivery_mode
+        if not connection and channel:
+            self.connection = channel.connection.client
+        self.enabled = enabled
+        conninfo = self.connection or self.app.connection_for_write()
+        self.exchange = get_exchange(conninfo)
+        if conninfo.transport.driver_type in self.DISABLED_TRANSPORTS:
+            self.enabled = False
+        if self.enabled:
+            self.enable()
+        self.headers = {'hostname': self.hostname}
+        self.pid = os.getpid()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *exc_info):
+        self.close()
+
+    def enable(self):
+        self.producer = Producer(self.channel or self.connection,
+                                 exchange=self.exchange,
+                                 serializer=self.serializer)
+        self.enabled = True
+        for callback in self.on_enabled:
+            callback()
+
+    def disable(self):
+        if self.enabled:
+            self.enabled = False
+            self.close()
+            for callback in self.on_disabled:
+                callback()
+
+    def publish(self, type, fields, producer,
+                blind=False, Event=Event, **kwargs):
+        """Publish event using custom :class:`~kombu.Producer`.
+
+        Arguments:
+            type (str): Event type name, with group separated by dash (`-`).
+                fields: Dictionary of event fields, must be json serializable.
+            producer (kombu.Producer): Producer instance to use:
+                only the ``publish`` method will be called.
+            retry (bool): Retry in the event of connection failure.
+            retry_policy (Mapping): Map of custom retry policy options.
+                See :meth:`~kombu.Connection.ensure`.
+            blind (bool): Don't set logical clock value (also don't forward
+                the internal logical clock).
+            Event (Callable): Event type used to create event.
+                Defaults to :func:`Event`.
+            utcoffset (Callable): Function returning the current
+                utc offset in hours.
+        """
+        clock = None if blind else self.clock.forward()
+        event = Event(type, hostname=self.hostname, utcoffset=utcoffset(),
+                      pid=self.pid, clock=clock, **fields)
+        with self.mutex:
+            return self._publish(event, producer,
+                                 routing_key=type.replace('-', '.'), **kwargs)
+
+    def _publish(self, event, producer, routing_key, retry=False,
+                 retry_policy=None, utcoffset=utcoffset):
+        exchange = self.exchange
+        try:
+            producer.publish(
+                event,
+                routing_key=routing_key,
+                exchange=exchange.name,
+                retry=retry,
+                retry_policy=retry_policy,
+                declare=[exchange],
+                serializer=self.serializer,
+                headers=self.headers,
+                delivery_mode=self.delivery_mode,
+            )
+        except Exception as exc:  # pylint: disable=broad-except
+            if not self.buffer_while_offline:
+                raise
+            self._outbound_buffer.append((event, routing_key, exc))
+
+    def send(self, type, blind=False, utcoffset=utcoffset, retry=False,
+             retry_policy=None, Event=Event, **fields):
+        """Send event.
+
+        Arguments:
+            type (str): Event type name, with group separated by dash (`-`).
+            retry (bool): Retry in the event of connection failure.
+            retry_policy (Mapping): Map of custom retry policy options.
+                See :meth:`~kombu.Connection.ensure`.
+            blind (bool): Don't set logical clock value (also don't forward
+                the internal logical clock).
+            Event (Callable): Event type used to create event,
+                defaults to :func:`Event`.
+            utcoffset (Callable): unction returning the current utc offset
+                in hours.
+            **fields (Any): Event fields -- must be json serializable.
+        """
+        if self.enabled:
+            groups, group = self.groups, group_from(type)
+            if groups and group not in groups:
+                return
+            if group in self.buffer_group:
+                clock = self.clock.forward()
+                event = Event(type, hostname=self.hostname,
+                              utcoffset=utcoffset(),
+                              pid=self.pid, clock=clock, **fields)
+                buf = self._group_buffer[group]
+                buf.append(event)
+                if len(buf) >= self.buffer_limit:
+                    self.flush()
+                elif self.on_send_buffered:
+                    self.on_send_buffered()
+            else:
+                return self.publish(type, fields, self.producer, blind=blind,
+                                    Event=Event, retry=retry,
+                                    retry_policy=retry_policy)
+
+    def flush(self, errors=True, groups=True):
+        """Flush the outbound buffer."""
+        if errors:
+            buf = list(self._outbound_buffer)
+            try:
+                with self.mutex:
+                    for event, routing_key, _ in buf:
+                        self._publish(event, self.producer, routing_key)
+            finally:
+                self._outbound_buffer.clear()
+        if groups:
+            with self.mutex:
+                for group, events in self._group_buffer.items():
+                    self._publish(events, self.producer, '%s.multi' % group)
+                    events[:] = []  # list.clear
+
+    def extend_buffer(self, other):
+        """Copy the outbound buffer of another instance."""
+        self._outbound_buffer.extend(other._outbound_buffer)
+
+    def close(self):
+        """Close the event dispatcher."""
+        self.mutex.locked() and self.mutex.release()
+        self.producer = None
+
+    def _get_publisher(self):
+        return self.producer
+
+    def _set_publisher(self, producer):
+        self.producer = producer
+    publisher = property(_get_publisher, _set_publisher)  # XXX compat

+ 4 - 2
celery/events/dumper.py

@@ -2,7 +2,7 @@
 """Utility to dump events to screen.
 """Utility to dump events to screen.
 
 
 This is a simple program that dumps events to the console
 This is a simple program that dumps events to the console
-as they happen. Think of it like a `tcpdump` for Celery events.
+as they happen.  Think of it like a `tcpdump` for Celery events.
 """
 """
 import sys
 import sys
 
 
@@ -10,7 +10,7 @@ from datetime import datetime
 
 
 from celery.app import app_or_default
 from celery.app import app_or_default
 from celery.utils.functional import LRUCache
 from celery.utils.functional import LRUCache
-from celery.utils.timeutils import humanize_seconds
+from celery.utils.time import humanize_seconds
 
 
 __all__ = ['Dumper', 'evdump']
 __all__ = ['Dumper', 'evdump']
 
 
@@ -36,6 +36,7 @@ def humanize_type(type):
 
 
 
 
 class Dumper:
 class Dumper:
+    """Monitor events."""
 
 
     def __init__(self, out=sys.stdout):
     def __init__(self, out=sys.stdout):
         self.out = out
         self.out = out
@@ -82,6 +83,7 @@ class Dumper:
 
 
 
 
 def evdump(app=None, out=sys.stdout):
 def evdump(app=None, out=sys.stdout):
+    """Start event dump."""
     app = app_or_default(app)
     app = app_or_default(app)
     dumper = Dumper(out=out)
     dumper = Dumper(out=out)
     dumper.say('-> evdump: starting capture...')
     dumper.say('-> evdump: starting capture...')

+ 58 - 0
celery/events/event.py

@@ -0,0 +1,58 @@
+"""Creating events, and event exchange definition."""
+import time
+from copy import copy
+from kombu import Exchange
+
+__all__ = [
+    'Event', 'event_exchange', 'get_exchange', 'group_from',
+]
+
+#: Exchange used to send events on.
+#: Note: Use :func:`get_exchange` instead, as the type of
+#: exchange will vary depending on the broker connection.
+event_exchange = Exchange('celeryev', type='topic')
+
+
+def Event(type, _fields=None, __dict__=dict, __now__=time.time, **fields):
+    """Create an event.
+
+    Notes:
+        An event is simply a dictionary: the only required field is ``type``.
+        A ``timestamp`` field will be set to the current time if not provided.
+    """
+    event = __dict__(_fields, **fields) if _fields else fields
+    if 'timestamp' not in event:
+        event.update(timestamp=__now__(), type=type)
+    else:
+        event['type'] = type
+    return event
+
+
+def group_from(type):
+    """Get the group part of an event type name.
+
+    Example:
+        >>> group_from('task-sent')
+        'task'
+
+        >>> group_from('custom-my-event')
+        'custom'
+    """
+    return type.split('-', 1)[0]
+
+
+def get_exchange(conn):
+    """Get exchange used for sending events.
+
+    Arguments:
+        conn (kombu.Connection): Connection used for sending/receving events.
+
+    Note:
+        The event type changes if Redis is used as the transport
+        (from topic -> fanout).
+    """
+    ex = copy(event_exchange)
+    if conn.transport.driver_type == 'redis':
+        # quick hack for Issue #436
+        ex.type = 'fanout'
+    return ex

+ 134 - 0
celery/events/receiver.py

@@ -0,0 +1,134 @@
+"""Event receiver implementation."""
+import time
+
+from operator import itemgetter
+
+from kombu import Queue
+from kombu.connection import maybe_channel
+from kombu.mixins import ConsumerMixin
+
+from celery import uuid
+from celery.app import app_or_default
+from celery.utils.time import adjust_timestamp
+
+from .event import get_exchange
+
+__all__ = ['EventReceiver']
+
+CLIENT_CLOCK_SKEW = -1
+
+_TZGETTER = itemgetter('utcoffset', 'timestamp')
+
+
+class EventReceiver(ConsumerMixin):
+    """Capture events.
+
+    Arguments:
+        connection (kombu.Connection): Connection to the broker.
+        handlers (Mapping[Callable]): Event handlers.
+            This is  a map of event type names and their handlers.
+            The special handler `"*"` captures all events that don't have a
+            handler.
+    """
+
+    app = None
+
+    def __init__(self, channel, handlers=None, routing_key='#',
+                 node_id=None, app=None, queue_prefix=None,
+                 accept=None, queue_ttl=None, queue_expires=None):
+        self.app = app_or_default(app or self.app)
+        self.channel = maybe_channel(channel)
+        self.handlers = {} if handlers is None else handlers
+        self.routing_key = routing_key
+        self.node_id = node_id or uuid()
+        self.queue_prefix = queue_prefix or self.app.conf.event_queue_prefix
+        self.exchange = get_exchange(
+            self.connection or self.app.connection_for_write())
+        if queue_ttl is None:
+            queue_ttl = self.app.conf.event_queue_ttl
+        if queue_expires is None:
+            queue_expires = self.app.conf.event_queue_expires
+        self.queue = Queue(
+            '.'.join([self.queue_prefix, self.node_id]),
+            exchange=self.exchange,
+            routing_key=self.routing_key,
+            auto_delete=True, durable=False,
+            message_ttl=queue_ttl,
+            expires=queue_expires,
+        )
+        self.clock = self.app.clock
+        self.adjust_clock = self.clock.adjust
+        self.forward_clock = self.clock.forward
+        if accept is None:
+            accept = {self.app.conf.event_serializer, 'json'}
+        self.accept = accept
+
+    def process(self, type, event):
+        """Process event by dispatching to configured handler."""
+        handler = self.handlers.get(type) or self.handlers.get('*')
+        handler and handler(event)
+
+    def get_consumers(self, Consumer, channel):
+        return [Consumer(queues=[self.queue],
+                         callbacks=[self._receive], no_ack=True,
+                         accept=self.accept)]
+
+    def on_consume_ready(self, connection, channel, consumers,
+                         wakeup=True, **kwargs):
+        if wakeup:
+            self.wakeup_workers(channel=channel)
+
+    def itercapture(self, limit=None, timeout=None, wakeup=True):
+        return self.consume(limit=limit, timeout=timeout, wakeup=wakeup)
+
+    def capture(self, limit=None, timeout=None, wakeup=True):
+        """Open up a consumer capturing events.
+
+        This has to run in the main process, and it will never stop
+        unless :attr:`EventDispatcher.should_stop` is set to True, or
+        forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.
+        """
+        return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup))
+
+    def wakeup_workers(self, channel=None):
+        self.app.control.broadcast('heartbeat',
+                                   connection=self.connection,
+                                   channel=channel)
+
+    def event_from_message(self, body, localize=True,
+                           now=time.time, tzfields=_TZGETTER,
+                           adjust_timestamp=adjust_timestamp,
+                           CLIENT_CLOCK_SKEW=CLIENT_CLOCK_SKEW):
+        type = body['type']
+        if type == 'task-sent':
+            # clients never sync so cannot use their clock value
+            _c = body['clock'] = (self.clock.value or 1) + CLIENT_CLOCK_SKEW
+            self.adjust_clock(_c)
+        else:
+            try:
+                clock = body['clock']
+            except KeyError:
+                body['clock'] = self.forward_clock()
+            else:
+                self.adjust_clock(clock)
+
+        if localize:
+            try:
+                offset, timestamp = tzfields(body)
+            except KeyError:
+                pass
+            else:
+                body['timestamp'] = adjust_timestamp(timestamp, offset)
+        body['local_received'] = now()
+        return type, body
+
+    def _receive(self, body, message, list=list, isinstance=isinstance):
+        if isinstance(body, list):  # celery 4.0: List of events
+            process, from_message = self.process, self.event_from_message
+            [process(*from_message(event)) for event in body]
+        else:
+            self.process(*self.event_from_message(body))
+
+    @property
+    def connection(self):
+        return self.channel.connection.client if self.channel else None

+ 5 - 3
celery/events/snapshot.py

@@ -1,9 +1,9 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
 """Periodically store events in a database.
 """Periodically store events in a database.
 
 
-Consuming the events as a stream is not always suitable
+Consuming the events as a stream isn't always suitable
 so this module implements a system to take snapshots of the
 so this module implements a system to take snapshots of the
-state of a cluster at regular intervals.  There is a full
+state of a cluster at regular intervals.  There's a full
 implementation of this writing the snapshots to a database
 implementation of this writing the snapshots to a database
 in :mod:`djcelery.snapshots` in the `django-celery` distribution.
 in :mod:`djcelery.snapshots` in the `django-celery` distribution.
 """
 """
@@ -15,7 +15,7 @@ from celery.utils.timer2 import Timer
 from celery.utils.dispatch import Signal
 from celery.utils.dispatch import Signal
 from celery.utils.imports import instantiate
 from celery.utils.imports import instantiate
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
-from celery.utils.timeutils import rate
+from celery.utils.time import rate
 
 
 __all__ = ['Polaroid', 'evcam']
 __all__ = ['Polaroid', 'evcam']
 
 
@@ -23,6 +23,7 @@ logger = get_logger('celery.evcam')
 
 
 
 
 class Polaroid:
 class Polaroid:
+    """Record event snapshots."""
 
 
     timer = None
     timer = None
     shutter_signal = Signal(providing_args=('state',))
     shutter_signal = Signal(providing_args=('state',))
@@ -85,6 +86,7 @@ class Polaroid:
 
 
 def evcam(camera, freq=1.0, maxrate=None, loglevel=0,
 def evcam(camera, freq=1.0, maxrate=None, loglevel=0,
           logfile=None, pidfile=None, timer=None, app=None):
           logfile=None, pidfile=None, timer=None, app=None):
+    """Start snapshot recorder."""
     app = app_or_default(app)
     app = app_or_default(app)
 
 
     if pidfile:
     if pidfile:

+ 36 - 12
celery/events/state.py

@@ -11,7 +11,7 @@ at the time of the last event.
 
 
 Snapshots (:mod:`celery.events.snapshot`) can be used to
 Snapshots (:mod:`celery.events.snapshot`) can be used to
 take "pictures" of this state at regular intervals
 take "pictures" of this state at regular intervals
-to e.g. store that in a database.
+to for example, store that in a database.
 """
 """
 import bisect
 import bisect
 import sys
 import sys
@@ -34,15 +34,21 @@ from celery.utils.log import get_logger
 
 
 __all__ = ['Worker', 'Task', 'State', 'heartbeat_expires']
 __all__ = ['Worker', 'Task', 'State', 'heartbeat_expires']
 
 
+# pylint: disable=redefined-outer-name
+# We cache globals and attribute lookups, so disable this warning.
+# pylint: disable=too-many-function-args
+# For some reason pylint thinks ._event is a method, when it's a property.
+
+#: Set if running PyPy
 PYPY = hasattr(sys, 'pypy_version_info')
 PYPY = hasattr(sys, 'pypy_version_info')
 
 
-# The window (in percentage) is added to the workers heartbeat
-# frequency.  If the time between updates exceeds this window,
-# then the worker is considered to be offline.
+#: The window (in percentage) is added to the workers heartbeat
+#: frequency.  If the time between updates exceeds this window,
+#: then the worker is considered to be offline.
 HEARTBEAT_EXPIRE_WINDOW = 200
 HEARTBEAT_EXPIRE_WINDOW = 200
 
 
-# Max drift between event timestamp and time of event received
-# before we alert that clocks may be unsynchronized.
+#: Max drift between event timestamp and time of event received
+#: before we alert that clocks may be unsynchronized.
 HEARTBEAT_DRIFT_MAX = 16
 HEARTBEAT_DRIFT_MAX = 16
 
 
 DRIFT_WARNING = """\
 DRIFT_WARNING = """\
@@ -106,8 +112,9 @@ def _warn_drift(hostname, drift, local_received, timestamp):
 def heartbeat_expires(timestamp, freq=60,
 def heartbeat_expires(timestamp, freq=60,
                       expire_window=HEARTBEAT_EXPIRE_WINDOW,
                       expire_window=HEARTBEAT_EXPIRE_WINDOW,
                       Decimal=Decimal, float=float, isinstance=isinstance):
                       Decimal=Decimal, float=float, isinstance=isinstance):
+    """Return time when heartbeat expires."""
     # some json implementations returns decimal.Decimal objects,
     # some json implementations returns decimal.Decimal objects,
-    # which are not compatible with float.
+    # which aren't compatible with float.
     freq = float(freq) if isinstance(freq, Decimal) else freq
     freq = float(freq) if isinstance(freq, Decimal) else freq
     if isinstance(timestamp, Decimal):
     if isinstance(timestamp, Decimal):
         timestamp = float(timestamp)
         timestamp = float(timestamp)
@@ -144,6 +151,7 @@ def with_unique_field(attr):
 @with_unique_field('hostname')
 @with_unique_field('hostname')
 class Worker:
 class Worker:
     """Worker State."""
     """Worker State."""
+
     heartbeat_max = 4
     heartbeat_max = 4
     expire_window = HEARTBEAT_EXPIRE_WINDOW
     expire_window = HEARTBEAT_EXPIRE_WINDOW
 
 
@@ -236,6 +244,7 @@ class Worker:
 @with_unique_field('uuid')
 @with_unique_field('uuid')
 class Task:
 class Task:
     """Task State."""
     """Task State."""
+
     name = received = sent = started = succeeded = failed = retried = \
     name = received = sent = started = succeeded = failed = retried = \
         revoked = rejected = args = kwargs = eta = expires = retries = \
         revoked = rejected = args = kwargs = eta = expires = retries = \
         worker = result = exception = timestamp = runtime = traceback = \
         worker = result = exception = timestamp = runtime = traceback = \
@@ -255,8 +264,8 @@ class Task:
         __slots__ = ('__dict__', '__weakref__')
         __slots__ = ('__dict__', '__weakref__')
 
 
     #: How to merge out of order events.
     #: How to merge out of order events.
-    #: Disorder is detected by logical ordering (e.g. :event:`task-received`
-    #: must have happened before a :event:`task-failed` event).
+    #: Disorder is detected by logical ordering (e.g., :event:`task-received`
+    #: must've happened before a :event:`task-failed` event).
     #:
     #:
     #: A merge rule consists of a state and a list of fields to keep from
     #: A merge rule consists of a state and a list of fields to keep from
     #: that state. ``(RECEIVED, ('name', 'args')``, means the name and args
     #: that state. ``(RECEIVED, ('name', 'args')``, means the name and args
@@ -286,6 +295,8 @@ class Task:
         )
         )
         self._serializer_handlers = {
         self._serializer_handlers = {
             'children': self._serializable_children,
             'children': self._serializable_children,
+            'root': self._serializable_root,
+            'parent': self._serializable_parent,
         }
         }
         if kwargs:
         if kwargs:
             self.__dict__.update(kwargs)
             self.__dict__.update(kwargs)
@@ -299,7 +310,7 @@ class Task:
         # using .get is faster than catching KeyError in this case.
         # using .get is faster than catching KeyError in this case.
         state = task_event_to_state(type_)
         state = task_event_to_state(type_)
         if state is not None:
         if state is not None:
-            # sets e.g. self.succeeded to the timestamp.
+            # sets, for example, self.succeeded to the timestamp.
             setattr(self, type_, timestamp)
             setattr(self, type_, timestamp)
         else:
         else:
             state = type_.upper()  # custom state
             state = type_.upper()  # custom state
@@ -345,6 +356,12 @@ class Task:
     def _serializable_children(self, value):
     def _serializable_children(self, value):
         return [task.id for task in self.children]
         return [task.id for task in self.children]
 
 
+    def _serializable_root(self, value):
+        return self.root_id
+
+    def _serializable_parent(self, value):
+        return self.parent_id
+
     def __reduce__(self):
     def __reduce__(self):
         return _depickle_task, (self.__class__, self.as_dict())
         return _depickle_task, (self.__class__, self.as_dict())
 
 
@@ -371,6 +388,7 @@ class Task:
 
 
 class State:
 class State:
     """Records clusters state."""
     """Records clusters state."""
+
     Worker = Worker
     Worker = Worker
     Task = Task
     Task = Task
     event_count = 0
     event_count = 0
@@ -478,6 +496,9 @@ class State:
             return self._event(event)
             return self._event(event)
 
 
     def _create_dispatcher(self):
     def _create_dispatcher(self):
+        # noqa: C901
+        # pylint: disable=too-many-statements
+        # This code is highly optimized, but not for reusability.
         get_handler = self.handlers.__getitem__
         get_handler = self.handlers.__getitem__
         event_callback = self.event_callback
         event_callback = self.event_callback
         wfields = itemgetter('hostname', 'timestamp', 'local_received')
         wfields = itemgetter('hostname', 'timestamp', 'local_received')
@@ -618,8 +639,11 @@ class State:
                 break
                 break
 
 
     def tasks_by_time(self, limit=None, reverse=True):
     def tasks_by_time(self, limit=None, reverse=True):
-        """Generator giving tasks ordered by time,
-        in ``(uuid, Task)`` tuples."""
+        """Generator yielding tasks ordered by time.
+
+        Yields:
+            Tuples of ``(uuid, Task)``.
+        """
         _heap = self._taskheap
         _heap = self._taskheap
         if reverse:
         if reverse:
             _heap = reversed(_heap)
             _heap = reversed(_heap)

+ 16 - 20
celery/exceptions.py

@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
-"""This module contains all exceptions used by the Celery API."""
+"""Celery Exceptions."""
 import numbers
 import numbers
 
 
 from billiard.exceptions import (  # noqa
 from billiard.exceptions import (  # noqa
@@ -20,28 +20,24 @@ __all__ = [
 ]
 ]
 
 
 UNREGISTERED_FMT = """\
 UNREGISTERED_FMT = """\
-Task of kind {0} is not registered, please make sure it's imported.\
+Task of kind {0} never registered, please make sure it's imported.\
 """
 """
 
 
 
 
 class CeleryError(Exception):
 class CeleryError(Exception):
-    pass
+    """Base class for all Celery errors."""
 
 
 
 
 class CeleryWarning(UserWarning):
 class CeleryWarning(UserWarning):
-    pass
+    """Base class for all Celery warnings."""
 
 
 
 
 class SecurityError(CeleryError):
 class SecurityError(CeleryError):
-    """Security related exceptions.
-
-    Handle with care.
-    """
-    pass
+    """Security related exception."""
 
 
 
 
 class TaskPredicate(CeleryError):
 class TaskPredicate(CeleryError):
-    pass
+    """Base class for task-related semi-predicates."""
 
 
 
 
 class Retry(TaskPredicate):
 class Retry(TaskPredicate):
@@ -65,10 +61,10 @@ class Retry(TaskPredicate):
         else:
         else:
             self.exc, self.excs = exc, safe_repr(exc) if exc else None
             self.exc, self.excs = exc, safe_repr(exc) if exc else None
         self.when = when
         self.when = when
-        Exception.__init__(self, exc, when, **kwargs)
+        super(Retry, self).__init__(self, exc, when, **kwargs)
 
 
     def humanize(self):
     def humanize(self):
-        if isinstance(self.when, numbers.Real):
+        if isinstance(self.when, numbers.Number):
             return 'in {0.when}s'.format(self)
             return 'in {0.when}s'.format(self)
         return 'at {0.when}'.format(self)
         return 'at {0.when}'.format(self)
 
 
@@ -116,7 +112,7 @@ class ImproperlyConfigured(ImportError):
 
 
 
 
 class NotRegistered(KeyError, CeleryError):
 class NotRegistered(KeyError, CeleryError):
-    """The task is not registered."""
+    """The task ain't registered."""
 
 
     def __repr__(self):
     def __repr__(self):
         return UNREGISTERED_FMT.format(self)
         return UNREGISTERED_FMT.format(self)
@@ -139,19 +135,19 @@ class TaskRevokedError(CeleryError):
 
 
 
 
 class NotConfigured(CeleryWarning):
 class NotConfigured(CeleryWarning):
-    """Celery has not been configured, as no config module has been found."""
+    """Celery hasn't been configured, as no config module has been found."""
 
 
 
 
 class AlwaysEagerIgnored(CeleryWarning):
 class AlwaysEagerIgnored(CeleryWarning):
-    """send_task ignores :setting:`task_always_eager` option"""
+    """send_task ignores :setting:`task_always_eager` option."""
 
 
 
 
 class InvalidTaskError(CeleryError):
 class InvalidTaskError(CeleryError):
-    """The task has invalid data or is not properly constructed."""
+    """The task has invalid data or ain't properly constructed."""
 
 
 
 
 class IncompleteStream(CeleryError):
 class IncompleteStream(CeleryError):
-    """Found the end of a stream of data, but the data is not yet complete."""
+    """Found the end of a stream of data, but the data isn't complete."""
 
 
 
 
 class ChordError(CeleryError):
 class ChordError(CeleryError):
@@ -159,15 +155,15 @@ class ChordError(CeleryError):
 
 
 
 
 class CPendingDeprecationWarning(PendingDeprecationWarning):
 class CPendingDeprecationWarning(PendingDeprecationWarning):
-    pass
+    """Warning of pending deprecation."""
 
 
 
 
 class CDeprecationWarning(DeprecationWarning):
 class CDeprecationWarning(DeprecationWarning):
-    pass
+    """Warning of deprecation."""
 
 
 
 
 class FixupWarning(CeleryWarning):
 class FixupWarning(CeleryWarning):
-    pass
+    """Fixup related warning."""
 
 
 
 
 class DuplicateNodenameWarning(CeleryWarning):
 class DuplicateNodenameWarning(CeleryWarning):

+ 0 - 149
celery/five.py

@@ -1,149 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Python 2/3 compatibility utilities."""
-import operator
-import sys
-
-from importlib import import_module
-from types import ModuleType
-
-# extends vine.five
-from vine import five
-from vine.five import *  # noqa
-from vine.five import __all__ as _all_five
-
-# bloody flake8
-items = five.items
-string_t = five.string_t
-
-try:
-    from functools import reduce
-except ImportError:
-    pass
-
-__all__ = [
-    'class_property', 'reclassmethod', 'create_module', 'recreate_module',
-]
-__all__ += _all_five
-
-#  ############# Module Generation ##########################
-
-# Utilities to dynamically
-# recreate modules, either for lazy loading or
-# to create old modules at runtime instead of
-# having them litter the source tree.
-
-# import fails in python 2.5. fallback to reduce in stdlib
-
-DEFAULT_ATTRS = {'__file__', '__path__', '__doc__', '__all__'}
-
-
-def getappattr(path):
-    """Gets attribute from the current_app recursively,
-    e.g. getappattr('amqp.get_task_consumer')``."""
-    from celery import current_app
-    return current_app._rgetattr(path)
-
-
-def _compat_periodic_task_decorator(*args, **kwargs):
-    from celery.task import periodic_task
-    return periodic_task(*args, **kwargs)
-
-
-class class_property:
-
-    def __init__(self, getter=None, setter=None):
-        if getter is not None and not isinstance(getter, classmethod):
-            getter = classmethod(getter)
-        if setter is not None and not isinstance(setter, classmethod):
-            setter = classmethod(setter)
-        self.__get = getter
-        self.__set = setter
-
-        info = getter.__get__(object)  # just need the info attrs.
-        self.__doc__ = info.__doc__
-        self.__name__ = info.__name__
-        self.__module__ = info.__module__
-
-    def __get__(self, obj, type=None):
-        if obj and type is None:
-            type = obj.__class__
-        return self.__get.__get__(obj, type)()
-
-    def __set__(self, obj, value):
-        if obj is None:
-            return self
-        return self.__set.__get__(obj)(value)
-
-    def setter(self, setter):
-        return self.__class__(self.__get, setter)
-
-
-def reclassmethod(method):
-    return classmethod(method)
-
-
-class LazyModule(ModuleType):
-    _compat_modules = ()
-    _all_by_module = {}
-    _direct = {}
-    _object_origins = {}
-
-    def __getattr__(self, name):
-        if name in self._object_origins:
-            module = __import__(self._object_origins[name], None, None, [name])
-            for item in self._all_by_module[module.__name__]:
-                setattr(self, item, getattr(module, item))
-            return getattr(module, name)
-        elif name in self._direct:  # pragma: no cover
-            module = __import__(self._direct[name], None, None, [name])
-            setattr(self, name, module)
-            return module
-        return ModuleType.__getattribute__(self, name)
-
-    def __dir__(self):
-        return list(set(self.__all__) | DEFAULT_ATTRS)
-
-    def __reduce__(self):
-        return import_module, (self.__name__,)
-
-
-def create_module(name, attrs, cls_attrs=None, pkg=None,
-                  base=LazyModule, prepare_attr=None):
-    fqdn = '.'.join([pkg.__name__, name]) if pkg else name
-    cls_attrs = {} if cls_attrs is None else cls_attrs
-    pkg, _, modname = name.rpartition('.')
-    cls_attrs['__module__'] = pkg
-
-    attrs = {
-        attr_name: (prepare_attr(attr) if prepare_attr else attr)
-        for attr_name, attr in items(attrs)
-    }
-    module = sys.modules[fqdn] = type(modname, (base,), cls_attrs)(name)
-    module.__dict__.update(attrs)
-    return module
-
-
-def recreate_module(name, compat_modules=(), by_module={}, direct={},
-                    base=LazyModule, **attrs):
-    old_module = sys.modules[name]
-    origins = get_origins(by_module)
-
-    _all = tuple(set(reduce(
-        operator.add,
-        [tuple(v) for v in [compat_modules, origins, direct, attrs]],
-    )))
-    cattrs = dict(
-        _compat_modules=compat_modules,
-        _all_by_module=by_module, _direct=direct,
-        _object_origins=origins,
-        __all__=_all,
-    )
-    new_module = create_module(name, attrs, cls_attrs=cattrs, base=base)
-    return old_module, new_module
-
-
-def get_origins(defs):
-    origins = {}
-    for module, attrs in items(defs):
-        origins.update({attr: module for attr in attrs})
-    return origins

+ 1 - 0
celery/fixups/__init__.py

@@ -0,0 +1 @@
+"""Fixups."""

+ 20 - 113
celery/fixups/django.py

@@ -8,17 +8,16 @@ from kombu.utils.objects import cached_property
 
 
 from datetime import datetime
 from datetime import datetime
 from importlib import import_module
 from importlib import import_module
-from io import StringIO
 
 
+from celery import _state
 from celery import signals
 from celery import signals
-from celery.app import default_app
 from celery.exceptions import FixupWarning
 from celery.exceptions import FixupWarning
 
 
 __all__ = ['DjangoFixup', 'fixup']
 __all__ = ['DjangoFixup', 'fixup']
 
 
 ERR_NOT_INSTALLED = """\
 ERR_NOT_INSTALLED = """\
 Environment variable DJANGO_SETTINGS_MODULE is defined
 Environment variable DJANGO_SETTINGS_MODULE is defined
-but Django is not installed.  Will not apply Django fix-ups!
+but Django isn't installed.  Won't apply Django fix-ups!
 """
 """
 
 
 
 
@@ -31,6 +30,7 @@ def _maybe_close_fd(fh):
 
 
 
 
 def fixup(app, env='DJANGO_SETTINGS_MODULE'):
 def fixup(app, env='DJANGO_SETTINGS_MODULE'):
+    """Install Django fixup if settings module environment is set."""
     SETTINGS_MODULE = os.environ.get(env)
     SETTINGS_MODULE = os.environ.get(env)
     if SETTINGS_MODULE and 'django' not in app.loader_cls.lower():
     if SETTINGS_MODULE and 'django' not in app.loader_cls.lower():
         try:
         try:
@@ -42,10 +42,11 @@ def fixup(app, env='DJANGO_SETTINGS_MODULE'):
 
 
 
 
 class DjangoFixup:
 class DjangoFixup:
+    """Fixup installed when using Django."""
 
 
     def __init__(self, app):
     def __init__(self, app):
         self.app = app
         self.app = app
-        if default_app is None:
+        if _state.default_app is None:
             self.app.set_default()
             self.app.set_default()
         self._worker_fixup = None
         self._worker_fixup = None
 
 
@@ -81,19 +82,12 @@ class DjangoFixup:
         return datetime.utcnow() if utc else self._now()
         return datetime.utcnow() if utc else self._now()
 
 
     def autodiscover_tasks(self):
     def autodiscover_tasks(self):
-        try:
-            from django.apps import apps
-        except ImportError:
-            return self._settings.INSTALLED_APPS
-        else:
-            return [config.name for config in apps.get_app_configs()]
+        from django.apps import apps
+        return [config.name for config in apps.get_app_configs()]
 
 
     @cached_property
     @cached_property
     def _now(self):
     def _now(self):
-        try:
-            return symbol_by_name('django.utils.timezone:now')
-        except (AttributeError, ImportError):  # pre django-1.4
-            return datetime.now
+        return symbol_by_name('django.utils.timezone:now')
 
 
 
 
 class DjangoWorkerFixup:
 class DjangoWorkerFixup:
@@ -106,91 +100,19 @@ class DjangoWorkerFixup:
         self._cache = import_module('django.core.cache')
         self._cache = import_module('django.core.cache')
         self._settings = symbol_by_name('django.conf:settings')
         self._settings = symbol_by_name('django.conf:settings')
 
 
-        try:
-            self.interface_errors = (
-                symbol_by_name('django.db.utils.InterfaceError'),
-            )
-        except (ImportError, AttributeError):
-            self._interface_errors = ()
-
-        # Database-related exceptions.
-        DatabaseError = symbol_by_name('django.db:DatabaseError')
-        try:
-            import MySQLdb as mysql
-            _my_database_errors = (mysql.DatabaseError,
-                                   mysql.InterfaceError,
-                                   mysql.OperationalError)
-        except ImportError:
-            _my_database_errors = ()      # noqa
-        try:
-            import psycopg2 as pg
-            _pg_database_errors = (pg.DatabaseError,
-                                   pg.InterfaceError,
-                                   pg.OperationalError)
-        except ImportError:
-            _pg_database_errors = ()      # noqa
-        try:
-            import sqlite3
-            _lite_database_errors = (sqlite3.DatabaseError,
-                                     sqlite3.InterfaceError,
-                                     sqlite3.OperationalError)
-        except ImportError:
-            _lite_database_errors = ()    # noqa
-        try:
-            import cx_Oracle as oracle
-            _oracle_database_errors = (oracle.DatabaseError,
-                                       oracle.InterfaceError,
-                                       oracle.OperationalError)
-        except ImportError:
-            _oracle_database_errors = ()  # noqa
-
-        try:
-            self._close_old_connections = symbol_by_name(
-                'django.db:close_old_connections',
-            )
-        except (ImportError, AttributeError):
-            self._close_old_connections = None
-        self.database_errors = (
-            (DatabaseError,) +
-            _my_database_errors +
-            _pg_database_errors +
-            _lite_database_errors +
-            _oracle_database_errors
+        self.interface_errors = (
+            symbol_by_name('django.db.utils.InterfaceError'),
         )
         )
+        self.DatabaseError = symbol_by_name('django.db:DatabaseError')
 
 
     def django_setup(self):
     def django_setup(self):
         import django
         import django
-        try:
-            django_setup = django.setup
-        except AttributeError:  # pragma: no cover
-            pass
-        else:
-            django_setup()
+        django.setup()
 
 
     def validate_models(self):
     def validate_models(self):
+        from django.core.checks import run_checks
         self.django_setup()
         self.django_setup()
-        try:
-            from django.core.management.validation import get_validation_errors
-        except ImportError:
-            self._validate_models_django17()
-        else:
-            s = StringIO()
-            num_errors = get_validation_errors(s, None)
-            if num_errors:
-                raise RuntimeError(
-                    'One or more Django models did not validate:\n{0}'.format(
-                        s.getvalue()))
-
-    def _validate_models_django17(self):
-        from django.core.management import base
-        print(base)
-        cmd = base.BaseCommand()
-        try:
-            cmd.stdout = base.OutputWrapper(sys.stdout)
-            cmd.stderr = base.OutputWrapper(sys.stderr)
-        except ImportError:  # before django 1.5
-            cmd.stdout, cmd.stderr = sys.stdout, sys.stderr
-        cmd.check()
+        run_checks()
 
 
     def install(self):
     def install(self):
         signals.beat_embedded_init.connect(self.close_database)
         signals.beat_embedded_init.connect(self.close_database)
@@ -216,13 +138,9 @@ class DjangoWorkerFixup:
         # the inherited DB conn to also get broken in the parent
         # the inherited DB conn to also get broken in the parent
         # process so we need to remove it without triggering any
         # process so we need to remove it without triggering any
         # network IO that close() might cause.
         # network IO that close() might cause.
-        try:
-            for c in self._db.connections.all():
-                if c and c.connection:
-                    self._maybe_close_db_fd(c.connection)
-        except AttributeError:
-            if self._db.connection and self._db.connection.connection:
-                self._maybe_close_db_fd(self._db.connection.connection)
+        for c in self._db.connections.all():
+            if c and c.connection:
+                self._maybe_close_db_fd(c.connection)
 
 
         # use the _ version to avoid DB_REUSE preventing the conn.close() call
         # use the _ version to avoid DB_REUSE preventing the conn.close() call
         self._close_database()
         self._close_database()
@@ -247,8 +165,6 @@ class DjangoWorkerFixup:
             self.close_cache()
             self.close_cache()
 
 
     def close_database(self, **kwargs):
     def close_database(self, **kwargs):
-        if self._close_old_connections:
-            return self._close_old_connections()  # Django 1.6
         if not self.db_reuse_max:
         if not self.db_reuse_max:
             return self._close_database()
             return self._close_database()
         if self._db_recycles >= self.db_reuse_max * 2:
         if self._db_recycles >= self.db_reuse_max * 2:
@@ -257,21 +173,12 @@ class DjangoWorkerFixup:
         self._db_recycles += 1
         self._db_recycles += 1
 
 
     def _close_database(self):
     def _close_database(self):
-        try:
-            funs = [conn.close for conn in self._db.connections.all()]
-        except AttributeError:
-            if hasattr(self._db, 'close_old_connections'):  # django 1.6
-                funs = [self._db.close_old_connections]
-            else:
-                # pre multidb, pending deprication in django 1.6
-                funs = [self._db.close_connection]
-
-        for close in funs:
+        for conn in self._db.connections.all():
             try:
             try:
-                close()
+                conn.close()
             except self.interface_errors:
             except self.interface_errors:
                 pass
                 pass
-            except self.database_errors as exc:
+            except self.DatabaseError as exc:
                 str_exc = str(exc)
                 str_exc = str(exc)
                 if 'closed' not in str_exc and 'not connected' not in str_exc:
                 if 'closed' not in str_exc and 'not connected' not in str_exc:
                     raise
                     raise

+ 1 - 1
celery/loaders/__init__.py

@@ -16,5 +16,5 @@ LOADER_ALIASES = {
 
 
 
 
 def get_loader_cls(loader):
 def get_loader_cls(loader):
-    """Get loader class by name/alias"""
+    """Get loader class by name/alias."""
     return symbol_by_name(loader, LOADER_ALIASES, imp=import_from_cwd)
     return symbol_by_name(loader, LOADER_ALIASES, imp=import_from_cwd)

+ 1 - 1
celery/loaders/app.py

@@ -6,4 +6,4 @@ __all__ = ['AppLoader']
 
 
 
 
 class AppLoader(BaseLoader):
 class AppLoader(BaseLoader):
-    pass
+    """Default loader used when an app is specified."""

+ 12 - 17
celery/loaders/base.py

@@ -34,7 +34,7 @@ unconfigured = object()
 
 
 
 
 class BaseLoader:
 class BaseLoader:
-    """The base class for loaders.
+    """Base class for loaders.
 
 
     Loaders handles,
     Loaders handles,
 
 
@@ -51,6 +51,7 @@ class BaseLoader:
 
 
         * What modules are imported to find tasks?
         * What modules are imported to find tasks?
     """
     """
+
     builtin_modules = frozenset()
     builtin_modules = frozenset()
     configured = False
     configured = False
     override_backends = {}
     override_backends = {}
@@ -68,25 +69,23 @@ class BaseLoader:
         return datetime.now()
         return datetime.now()
 
 
     def on_task_init(self, task_id, task):
     def on_task_init(self, task_id, task):
-        """This method is called before a task is executed."""
+        """Called before a task is executed."""
         pass
         pass
 
 
     def on_process_cleanup(self):
     def on_process_cleanup(self):
-        """This method is called after a task is executed."""
+        """Called after a task is executed."""
         pass
         pass
 
 
     def on_worker_init(self):
     def on_worker_init(self):
-        """This method is called when the worker (:program:`celery worker`)
-        starts."""
+        """Called when the worker (:program:`celery worker`) starts."""
         pass
         pass
 
 
     def on_worker_shutdown(self):
     def on_worker_shutdown(self):
-        """This method is called when the worker (:program:`celery worker`)
-        shuts down."""
+        """Called when the worker (:program:`celery worker`) shuts down."""
         pass
         pass
 
 
     def on_worker_process_init(self):
     def on_worker_process_init(self):
-        """This method is called when a child process starts."""
+        """Called when a child process starts."""
         pass
         pass
 
 
     def import_task_module(self, module):
     def import_task_module(self, module):
@@ -139,12 +138,12 @@ class BaseLoader:
     def _smart_import(self, path, imp=None):
     def _smart_import(self, path, imp=None):
         imp = self.import_module if imp is None else imp
         imp = self.import_module if imp is None else imp
         if ':' in path:
         if ':' in path:
-            # Path includes attribute so can just jump here.
-            # e.g. ``os.path:abspath``.
+            # Path includes attribute so can just jump
+            # here (e.g., ``os.path:abspath``).
             return symbol_by_name(path, imp=imp)
             return symbol_by_name(path, imp=imp)
 
 
         # Not sure if path is just a module name or if it includes an
         # Not sure if path is just a module name or if it includes an
-        # attribute name (e.g. ``os.path``, vs, ``os.path.abspath``).
+        # attribute name (e.g., ``os.path``, vs, ``os.path.abspath``).
         try:
         try:
             return imp(path)
             return imp(path)
         except ImportError:
         except ImportError:
@@ -179,9 +178,7 @@ class BaseLoader:
         typemap = dict(Option.typemap, **extra_types)
         typemap = dict(Option.typemap, **extra_types)
 
 
         def getarg(arg):
         def getarg(arg):
-            """Parse a single configuration definition from
-            the command-line."""
-
+            """Parse single configuration from command-line."""
             # ## find key/value
             # ## find key/value
             # ns.key=value|ns_key=value (case insensitive)
             # ns.key=value|ns_key=value (case insensitive)
             key, value = arg.split('=', 1)
             key, value = arg.split('=', 1)
@@ -249,9 +246,7 @@ def autodiscover_tasks(packages, related_name='tasks'):
 
 
 
 
 def find_related_module(package, related_name):
 def find_related_module(package, related_name):
-    """Given a package name and a module name, tries to find that
-    module."""
-
+    """Find module in package."""
     # Django 1.7 allows for speciying a class name in INSTALLED_APPS.
     # Django 1.7 allows for speciying a class name in INSTALLED_APPS.
     # (Issue #2248).
     # (Issue #2248).
     try:
     try:

+ 1 - 2
celery/loaders/default.py

@@ -24,8 +24,7 @@ class Loader(BaseLoader):
         return DictAttribute(settingsdict)
         return DictAttribute(settingsdict)
 
 
     def read_configuration(self, fail_silently=True):
     def read_configuration(self, fail_silently=True):
-        """Read configuration from :file:`celeryconfig.py` and configure
-        celery and Django so it can be used by regular Python."""
+        """Read configuration from :file:`celeryconfig.py`."""
         configname = os.environ.get('CELERY_CONFIG_MODULE',
         configname = os.environ.get('CELERY_CONFIG_MODULE',
                                     DEFAULT_CONFIG_MODULE)
                                     DEFAULT_CONFIG_MODULE)
         try:
         try:

+ 224 - 8
celery/local.py

@@ -6,7 +6,11 @@ soon as possible, and that shall not load any third party modules.
 
 
 Parts of this module is Copyright by Werkzeug Team.
 Parts of this module is Copyright by Werkzeug Team.
 """
 """
-import importlib
+import operator
+import sys
+from functools import reduce
+from importlib import import_module
+from types import ModuleType
 
 
 __all__ = ['Proxy', 'PromiseProxy', 'try_import', 'maybe_evaluate']
 __all__ = ['Proxy', 'PromiseProxy', 'try_import', 'maybe_evaluate']
 
 
@@ -34,10 +38,12 @@ def _default_cls_attr(name, type_, cls_value):
 
 
 
 
 def try_import(module, default=None):
 def try_import(module, default=None):
-    """Try to import and return module, or return
-    None if the module does not exist."""
+    """Try to import and return module.
+
+    Returns None if the module does not exist.
+    """
     try:
     try:
-        return importlib.import_module(module)
+        return import_module(module)
     except ImportError:
     except ImportError:
         return default
         return default
 
 
@@ -88,9 +94,12 @@ class Proxy:
         return self._get_class()
         return self._get_class()
 
 
     def _get_current_object(self):
     def _get_current_object(self):
-        """Return the current object.  This is useful if you want the real
+        """Get current object.
+
+        This is useful if you want the real
         object behind the proxy at a time for performance reasons or because
         object behind the proxy at a time for performance reasons or because
-        you want to pass the object into a different context."""
+        you want to pass the object into a different context.
+        """
         loc = object.__getattribute__(self, '_Proxy__local')
         loc = object.__getattribute__(self, '_Proxy__local')
         if not hasattr(loc, '__release_local__'):
         if not hasattr(loc, '__release_local__'):
             return loc(*self.__args, **self.__kwargs)
             return loc(*self.__args, **self.__kwargs)
@@ -277,7 +286,7 @@ class Proxy:
 
 
 
 
 class PromiseProxy(Proxy):
 class PromiseProxy(Proxy):
-    """This is a proxy to an object that has not yet been evaulated.
+    """Proxy that evaluates object once.
 
 
     :class:`Proxy` will evaluate the object each time, while the
     :class:`Proxy` will evaluate the object each time, while the
     promise will only evaluate it once.
     promise will only evaluate it once.
@@ -320,7 +329,7 @@ class PromiseProxy(Proxy):
                              '_Proxy__kwargs')):
                              '_Proxy__kwargs')):
         try:
         try:
             thing = Proxy._get_current_object(self)
             thing = Proxy._get_current_object(self)
-        except:
+        except Exception:
             raise
             raise
         else:
         else:
             object.__setattr__(self, '__thing', thing)
             object.__setattr__(self, '__thing', thing)
@@ -348,7 +357,214 @@ class PromiseProxy(Proxy):
 
 
 
 
 def maybe_evaluate(obj):
 def maybe_evaluate(obj):
+    """Attempt to evaluate promise, even if obj is not a promise."""
     try:
     try:
         return obj.__maybe_evaluate__()
         return obj.__maybe_evaluate__()
     except AttributeError:
     except AttributeError:
         return obj
         return obj
+
+#  ############# Module Generation ##########################
+
+# Utilities to dynamically
+# recreate modules, either for lazy loading or
+# to create old modules at runtime instead of
+# having them litter the source tree.
+
+# import fails in python 2.5. fallback to reduce in stdlib
+
+MODULE_DEPRECATED = """
+The module %s is deprecated and will be removed in a future version.
+"""
+
+DEFAULT_ATTRS = {'__file__', '__path__', '__doc__', '__all__'}
+
+# im_func is no longer available in Py3.
+# instead the unbound method itself can be used.
+if sys.version_info[0] == 3:  # pragma: no cover
+    def fun_of_method(method):
+        return method
+else:
+    def fun_of_method(method):  # noqa
+        return method.im_func
+
+
+def getappattr(path):
+    """Get attribute from current_app recursively.
+
+    Example: ``getappattr('amqp.get_task_consumer')``.
+
+    """
+    from celery import current_app
+    return current_app._rgetattr(path)
+
+
+def _compat_periodic_task_decorator(*args, **kwargs):
+    from celery.task import periodic_task
+    return periodic_task(*args, **kwargs)
+
+COMPAT_MODULES = {
+    'celery': {
+        'execute': {
+            'send_task': 'send_task',
+        },
+        'decorators': {
+            'task': 'task',
+            'periodic_task': _compat_periodic_task_decorator,
+        },
+        'log': {
+            'get_default_logger': 'log.get_default_logger',
+            'setup_logger': 'log.setup_logger',
+            'setup_logging_subsystem': 'log.setup_logging_subsystem',
+            'redirect_stdouts_to_logger': 'log.redirect_stdouts_to_logger',
+        },
+        'messaging': {
+            'TaskConsumer': 'amqp.TaskConsumer',
+            'establish_connection': 'connection',
+            'get_consumer_set': 'amqp.TaskConsumer',
+        },
+        'registry': {
+            'tasks': 'tasks',
+        },
+    },
+    'celery.task': {
+        'control': {
+            'broadcast': 'control.broadcast',
+            'rate_limit': 'control.rate_limit',
+            'time_limit': 'control.time_limit',
+            'ping': 'control.ping',
+            'revoke': 'control.revoke',
+            'discard_all': 'control.purge',
+            'inspect': 'control.inspect',
+        },
+        'schedules': 'celery.schedules',
+        'chords': 'celery.canvas',
+    }
+}
+
+#: We exclude these from dir(celery)
+DEPRECATED_ATTRS = set(COMPAT_MODULES['celery'].keys()) | {'subtask'}
+
+
+class class_property(object):
+
+    def __init__(self, getter=None, setter=None):
+        if getter is not None and not isinstance(getter, classmethod):
+            getter = classmethod(getter)
+        if setter is not None and not isinstance(setter, classmethod):
+            setter = classmethod(setter)
+        self.__get = getter
+        self.__set = setter
+
+        info = getter.__get__(object)  # just need the info attrs.
+        self.__doc__ = info.__doc__
+        self.__name__ = info.__name__
+        self.__module__ = info.__module__
+
+    def __get__(self, obj, type=None):
+        if obj and type is None:
+            type = obj.__class__
+        return self.__get.__get__(obj, type)()
+
+    def __set__(self, obj, value):
+        if obj is None:
+            return self
+        return self.__set.__get__(obj)(value)
+
+    def setter(self, setter):
+        return self.__class__(self.__get, setter)
+
+
+def reclassmethod(method):
+    return classmethod(fun_of_method(method))
+
+
+class LazyModule(ModuleType):
+    _compat_modules = ()
+    _all_by_module = {}
+    _direct = {}
+    _object_origins = {}
+
+    def __getattr__(self, name):
+        if name in self._object_origins:
+            module = __import__(self._object_origins[name], None, None, [name])
+            for item in self._all_by_module[module.__name__]:
+                setattr(self, item, getattr(module, item))
+            return getattr(module, name)
+        elif name in self._direct:  # pragma: no cover
+            module = __import__(self._direct[name], None, None, [name])
+            setattr(self, name, module)
+            return module
+        return ModuleType.__getattribute__(self, name)
+
+    def __dir__(self):
+        return [
+            attr for attr in set(self.__all__) | DEFAULT_ATTRS
+            if attr not in DEPRECATED_ATTRS
+        ]
+
+    def __reduce__(self):
+        return import_module, (self.__name__,)
+
+
+def create_module(name, attrs, cls_attrs=None, pkg=None,
+                  base=LazyModule, prepare_attr=None):
+    fqdn = '.'.join([pkg.__name__, name]) if pkg else name
+    cls_attrs = {} if cls_attrs is None else cls_attrs
+    pkg, _, modname = name.rpartition('.')
+    cls_attrs['__module__'] = pkg
+
+    attrs = {
+        attr_name: (prepare_attr(attr) if prepare_attr else attr)
+        for attr_name, attr in attrs.items()
+    }
+    module = sys.modules[fqdn] = type(
+        modname, (base,), cls_attrs)(name)
+    module.__dict__.update(attrs)
+    return module
+
+
+def recreate_module(name, compat_modules=(), by_module={}, direct={},
+                    base=LazyModule, **attrs):
+    old_module = sys.modules[name]
+    origins = get_origins(by_module)
+    compat_modules = COMPAT_MODULES.get(name, ())
+
+    _all = tuple(set(reduce(
+        operator.add,
+        [tuple(v) for v in [compat_modules, origins, direct, attrs]],
+    )))
+    if sys.version_info[0] < 3:
+        _all = [s.encode() for s in _all]
+    cattrs = dict(
+        _compat_modules=compat_modules,
+        _all_by_module=by_module, _direct=direct,
+        _object_origins=origins,
+        __all__=_all,
+    )
+    new_module = create_module(name, attrs, cls_attrs=cattrs, base=base)
+    new_module.__dict__.update({
+        mod: get_compat_module(new_module, mod) for mod in compat_modules
+    })
+    return old_module, new_module
+
+
+def get_compat_module(pkg, name):
+    def prepare(attr):
+        if isinstance(attr, str):
+            return Proxy(getappattr, (attr,))
+        return attr
+
+    attrs = COMPAT_MODULES[pkg.__name__][name]
+    if isinstance(attrs, str):
+        fqdn = '.'.join([pkg.__name__, name])
+        module = sys.modules[fqdn] = import_module(attrs)
+        return module
+    attrs['__all__'] = list(attrs)
+    return create_module(name, dict(attrs), pkg=pkg, prepare_attr=prepare)
+
+
+def get_origins(defs):
+    origins = {}
+    for module, attrs in defs.items():
+        origins.update({attr: module for attr in attrs})
+    return origins

+ 106 - 80
celery/platforms.py

@@ -1,6 +1,9 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
-"""Utilities dealing with platform specifics: signals, daemonization,
-users, groups, and so on."""
+"""Platforms.
+
+Utilities dealing with platform specifics: signals, daemonization,
+users, groups, and so on.
+"""
 import atexit
 import atexit
 import errno
 import errno
 import math
 import math
@@ -19,6 +22,7 @@ from kombu.utils.compat import maybe_fileno
 from kombu.utils.encoding import safe_str
 from kombu.utils.encoding import safe_str
 from contextlib import contextmanager
 from contextlib import contextmanager
 
 
+from .exceptions import SecurityError
 from .local import try_import
 from .local import try_import
 
 
 try:
 try:
@@ -34,13 +38,12 @@ mputil = try_import('multiprocessing.util')
 
 
 __all__ = [
 __all__ = [
     'EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM',
     'EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM',
-    'IS_macOS', 'IS_WINDOWS', 'pyimplementation', 'LockFailed',
-    'get_fdmax', 'Pidfile', 'create_pidlock',
-    'close_open_fds', 'DaemonContext', 'detached', 'parse_uid',
-    'parse_gid', 'setgroups', 'initgroups', 'setgid', 'setuid',
-    'maybe_drop_privileges', 'signals', 'set_process_title',
-    'set_mp_process_title', 'get_errno_name', 'ignore_errno',
-    'fd_by_path', 'isatty',
+    'IS_macOS', 'IS_WINDOWS', 'SIGMAP', 'pyimplementation', 'LockFailed',
+    'get_fdmax', 'Pidfile', 'create_pidlock', 'close_open_fds',
+    'DaemonContext', 'detached', 'parse_uid', 'parse_gid', 'setgroups',
+    'initgroups', 'setgid', 'setuid', 'maybe_drop_privileges', 'signals',
+    'signal_name', 'set_process_title', 'set_mp_process_title',
+    'get_errno_name', 'ignore_errno', 'fd_by_path', 'isatty',
 ]
 ]
 
 
 # exitcodes
 # exitcodes
@@ -77,7 +80,7 @@ User information: uid={uid} euid={euid} gid={gid} egid={egid}
 """
 """
 
 
 ROOT_DISCOURAGED = """\
 ROOT_DISCOURAGED = """\
-You are running the worker with superuser privileges, which is
+You're running the worker with superuser privileges: this is
 absolutely not recommended!
 absolutely not recommended!
 
 
 Please specify a different user using the -u option.
 Please specify a different user using the -u option.
@@ -85,8 +88,15 @@ Please specify a different user using the -u option.
 User information: uid={uid} euid={euid} gid={gid} egid={egid}
 User information: uid={uid} euid={euid} gid={gid} egid={egid}
 """
 """
 
 
+SIGNAMES = {
+    sig for sig in dir(_signal)
+    if sig.startswith('SIG') and '_' not in sig
+}
+SIGMAP = {getattr(_signal, name): name for name in SIGNAMES}
+
 
 
 def isatty(fh):
 def isatty(fh):
+    """Return true if the process has a controlling terminal."""
     try:
     try:
         return fh.isatty()
         return fh.isatty()
     except AttributeError:
     except AttributeError:
@@ -113,14 +123,14 @@ class LockFailed(Exception):
 
 
 
 
 class Pidfile:
 class Pidfile:
-    """Pidfile
+    """Pidfile.
 
 
     This is the type returned by :func:`create_pidlock`.
     This is the type returned by :func:`create_pidlock`.
 
 
     See Also:
     See Also:
         Best practice is to not use this directly but rather use
         Best practice is to not use this directly but rather use
-        the :func:`create_pidlock` function instead,
-        which is more convenient and also removes stale pidfiles (when
+        the :func:`create_pidlock` function instead:
+        more convenient and also removes stale pidfiles (when
         the process holding the lock is no longer running).
         the process holding the lock is no longer running).
     """
     """
 
 
@@ -169,12 +179,14 @@ class Pidfile:
             os.unlink(self.path)
             os.unlink(self.path)
 
 
     def remove_if_stale(self):
     def remove_if_stale(self):
-        """Remove the lock if the process is not running.
-        (does not respond to signals)."""
+        """Remove the lock if the process isn't running.
+
+        I.e. process does not respons to signal.
+        """
         try:
         try:
             pid = self.read_pid()
             pid = self.read_pid()
         except ValueError as exc:
         except ValueError as exc:
-            print('Broken pidfile found. Removing it.', file=sys.stderr)
+            print('Broken pidfile found - Removing it.', file=sys.stderr)
             self.remove()
             self.remove()
             return True
             return True
         if not pid:
         if not pid:
@@ -185,7 +197,7 @@ class Pidfile:
             os.kill(pid, 0)
             os.kill(pid, 0)
         except os.error as exc:
         except os.error as exc:
             if exc.errno == errno.ESRCH:
             if exc.errno == errno.ESRCH:
-                print('Stale pidfile exists. Removing it.', file=sys.stderr)
+                print('Stale pidfile exists - Removing it.', file=sys.stderr)
                 self.remove()
                 self.remove()
                 return True
                 return True
         return False
         return False
@@ -221,7 +233,7 @@ def create_pidlock(pidfile):
     """Create and verify pidfile.
     """Create and verify pidfile.
 
 
     If the pidfile already exists the program exits with an error message,
     If the pidfile already exists the program exits with an error message,
-    however if the process it refers to is not running anymore, the pidfile
+    however if the process it refers to isn't running anymore, the pidfile
     is deleted and the program continues.
     is deleted and the program continues.
 
 
     This function will automatically install an :mod:`atexit` handler
     This function will automatically install an :mod:`atexit` handler
@@ -284,6 +296,7 @@ def fd_by_path(paths):
 
 
 
 
 class DaemonContext:
 class DaemonContext:
+    """Context manager daemonizing the process."""
 
 
     _is_open = False
     _is_open = False
 
 
@@ -355,14 +368,14 @@ def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0,
             The ability to write to this file
             The ability to write to this file
             will be verified before the process is detached.
             will be verified before the process is detached.
         pidfile (str): Optional pid file.
         pidfile (str): Optional pid file.
-            The pidfile will not be created,
+            The pidfile won't be created,
             as this is the responsibility of the child.  But the process will
             as this is the responsibility of the child.  But the process will
             exit if the pid lock exists and the pid written is still running.
             exit if the pid lock exists and the pid written is still running.
         uid (int, str): Optional user id or user name to change
         uid (int, str): Optional user id or user name to change
             effective privileges to.
             effective privileges to.
         gid (int, str): Optional group id or group name to change
         gid (int, str): Optional group id or group name to change
             effective privileges to.
             effective privileges to.
-        umask (str, int): Optional umask that will be effective in
+        umask (str, int): Optional umask that'll be effective in
             the child process.
             the child process.
         workdir (str): Optional new working directory.
         workdir (str): Optional new working directory.
         fake (bool): Don't actually detach, intended for debugging purposes.
         fake (bool): Don't actually detach, intended for debugging purposes.
@@ -376,7 +389,7 @@ def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0,
         ...           uid='nobody'):
         ...           uid='nobody'):
         ... # Now in detached child process with effective user set to nobody,
         ... # Now in detached child process with effective user set to nobody,
         ... # and we know that our logfile can be written to, and that
         ... # and we know that our logfile can be written to, and that
-        ... # the pidfile is not locked.
+        ... # the pidfile isn't locked.
         ... pidlock = create_pidlock('/var/run/app.pid')
         ... pidlock = create_pidlock('/var/run/app.pid')
         ...
         ...
         ... # Run the program
         ... # Run the program
@@ -437,9 +450,9 @@ def parse_gid(gid):
 
 
 
 
 def _setgroups_hack(groups):
 def _setgroups_hack(groups):
-    """:fun:`setgroups` may have a platform-dependent limit,
-    and it is not always possible to know in advance what this limit
-    is, so we use this ugly hack stolen from glibc."""
+    # :fun:`setgroups` may have a platform-dependent limit,
+    # and it's not always possible to know in advance what this limit
+    # is, so we use this ugly hack stolen from glibc.
     groups = groups[:]
     groups = groups[:]
 
 
     while 1:
     while 1:
@@ -460,7 +473,7 @@ def setgroups(groups):
     max_groups = None
     max_groups = None
     try:
     try:
         max_groups = os.sysconf('SC_NGROUPS_MAX')
         max_groups = os.sysconf('SC_NGROUPS_MAX')
-    except Exception:
+    except Exception:  # pylint: disable=broad-except
         pass
         pass
     try:
     try:
         return _setgroups_hack(groups[:max_groups])
         return _setgroups_hack(groups[:max_groups])
@@ -471,8 +484,11 @@ def setgroups(groups):
 
 
 
 
 def initgroups(uid, gid):
 def initgroups(uid, gid):
-    """Compat version of :func:`os.initgroups` which was first
-    added to Python 2.7."""
+    """Init process group permissions.
+
+    Compat version of :func:`os.initgroups` that was first
+    added to Python 2.7.
+    """
     if not pwd:  # pragma: no cover
     if not pwd:  # pragma: no cover
         return
         return
     username = pwd.getpwuid(uid)[0]
     username = pwd.getpwuid(uid)[0]
@@ -508,46 +524,52 @@ def maybe_drop_privileges(uid=None, gid=None):
     if os.geteuid():
     if os.geteuid():
         # no point trying to setuid unless we're root.
         # no point trying to setuid unless we're root.
         if not os.getuid():
         if not os.getuid():
-            raise AssertionError('contact support')
+            raise SecurityError('contact support')
     uid = uid and parse_uid(uid)
     uid = uid and parse_uid(uid)
     gid = gid and parse_gid(gid)
     gid = gid and parse_gid(gid)
 
 
     if uid:
     if uid:
-        # If GID isn't defined, get the primary GID of the user.
-        if not gid and pwd:
-            gid = pwd.getpwuid(uid).pw_gid
-        # Must set the GID before initgroups(), as setgid()
-        # is known to zap the group list on some platforms.
-
-        # setgid must happen before setuid (otherwise the setgid operation
-        # may fail because of insufficient privileges and possibly stay
-        # in a privileged group).
-        setgid(gid)
-        initgroups(uid, gid)
-
-        # at last:
-        setuid(uid)
-        # ... and make sure privileges cannot be restored:
-        try:
-            setuid(0)
-        except PermissionError:
-            pass  # Good: cannot restore privileges.
-        else:
-            raise RuntimeError(
-                'non-root user able to restore privileges after setuid.')
+        _setuid(uid, gid)
     else:
     else:
         gid and setgid(gid)
         gid and setgid(gid)
 
 
-    if uid and (not os.getuid()) and not (os.geteuid()):
-        raise AssertionError('Still root uid after drop privileges!')
-    if gid and (not os.getgid()) and not (os.getegid()):
-        raise AssertionError('Still root gid after drop privileges!')
+    if uid and not os.getuid() and not os.geteuid():
+        raise SecurityError('Still root uid after drop privileges!')
+    if gid and not os.getgid() and not os.getegid():
+        raise SecurityError('Still root gid after drop privileges!')
+
+
+def _setuid(uid, gid):
+    # If GID isn't defined, get the primary GID of the user.
+    if not gid and pwd:
+        gid = pwd.getpwuid(uid).pw_gid
+    # Must set the GID before initgroups(), as setgid()
+    # is known to zap the group list on some platforms.
+
+    # setgid must happen before setuid (otherwise the setgid operation
+    # may fail because of insufficient privileges and possibly stay
+    # in a privileged group).
+    setgid(gid)
+    initgroups(uid, gid)
+
+    # at last:
+    setuid(uid)
+    # ... and make sure privileges cannot be restored:
+    try:
+        setuid(0)
+    except PermissionError:
+        # we should get here: cannot restore privileges,
+        # everything was fine.
+        pass
+    else:
+        raise SecurityError(
+            'non-root user able to restore privileges after setuid.')
 
 
 
 
 class Signals:
 class Signals:
     """Convenience interface to :mod:`signals`.
     """Convenience interface to :mod:`signals`.
 
 
-    If the requested signal is not supported on the current platform,
+    If the requested signal isn't supported on the current platform,
     the operation will be ignored.
     the operation will be ignored.
 
 
     Example:
     Example:
@@ -601,23 +623,22 @@ class Signals:
     def reset_alarm(self):
     def reset_alarm(self):
         return _signal.alarm(0)
         return _signal.alarm(0)
 
 
-    def supported(self, signal_name):
-        """Return true value if ``signal_name`` exists on this platform."""
+    def supported(self, name):
+        """Return true value if signal by ``name`` exists on this platform."""
         try:
         try:
-            return self.signum(signal_name)
+            return self.signum(name)
         except AttributeError:
         except AttributeError:
             pass
             pass
 
 
-    def signum(self, signal_name):
-        """Get signal number from signal name."""
-        if isinstance(signal_name, numbers.Integral):
-            return signal_name
-        if (not isinstance(signal_name, str) or
-                not signal_name.isupper()):
+    def signum(self, name):
+        """Get signal number by name."""
+        if isinstance(name, numbers.Integral):
+            return name
+        if not isinstance(name, str) or not name.isupper():
             raise TypeError('signal name must be uppercase string.')
             raise TypeError('signal name must be uppercase string.')
-        if not signal_name.startswith('SIG'):
-            signal_name = 'SIG' + signal_name
-        return getattr(_signal, signal_name)
+        if not name.startswith('SIG'):
+            name = 'SIG' + name
+        return getattr(_signal, name)
 
 
     def reset(self, *signal_names):
     def reset(self, *signal_names):
         """Reset signals to the default signal handler.
         """Reset signals to the default signal handler.
@@ -627,32 +648,32 @@ class Signals:
         """
         """
         self.update((sig, self.default) for sig in signal_names)
         self.update((sig, self.default) for sig in signal_names)
 
 
-    def ignore(self, *signal_names):
+    def ignore(self, *names):
         """Ignore signal using :const:`SIG_IGN`.
         """Ignore signal using :const:`SIG_IGN`.
 
 
         Does nothing if the platform has no support for signals,
         Does nothing if the platform has no support for signals,
         or the specified signal in particular.
         or the specified signal in particular.
         """
         """
-        self.update((sig, self.ignored) for sig in signal_names)
+        self.update((sig, self.ignored) for sig in names)
 
 
-    def __getitem__(self, signal_name):
-        return _signal.getsignal(self.signum(signal_name))
+    def __getitem__(self, name):
+        return _signal.getsignal(self.signum(name))
 
 
-    def __setitem__(self, signal_name, handler):
+    def __setitem__(self, name, handler):
         """Install signal handler.
         """Install signal handler.
 
 
         Does nothing if the current platform has no support for signals,
         Does nothing if the current platform has no support for signals,
         or the specified signal in particular.
         or the specified signal in particular.
         """
         """
         try:
         try:
-            _signal.signal(self.signum(signal_name), handler)
+            _signal.signal(self.signum(name), handler)
         except (AttributeError, ValueError):
         except (AttributeError, ValueError):
             pass
             pass
 
 
     def update(self, _d_=None, **sigmap):
     def update(self, _d_=None, **sigmap):
         """Set signal handlers from a mapping."""
         """Set signal handlers from a mapping."""
-        for signal_name, handler in dict(_d_ or {}, **sigmap).items():
-            self[signal_name] = handler
+        for name, handler in dict(_d_ or {}, **sigmap).items():
+            self[name] = handler
 
 
 signals = Signals()
 signals = Signals()
 get_signal = signals.signum                   # compat
 get_signal = signals.signum                   # compat
@@ -661,6 +682,11 @@ reset_signal = signals.reset                  # compat
 ignore_signal = signals.ignore                # compat
 ignore_signal = signals.ignore                # compat
 
 
 
 
+def signal_name(signum):
+    """Return name of signal from signal number."""
+    return SIGMAP[signum][3:]
+
+
 def strargv(argv):
 def strargv(argv):
     arg_start = 2 if 'manage' in argv[0] else 1
     arg_start = 2 if 'manage' in argv[0] else 1
     if len(argv) > arg_start:
     if len(argv) > arg_start:
@@ -683,12 +709,12 @@ def set_process_title(progname, info=None):
 if os.environ.get('NOSETPS'):  # pragma: no cover
 if os.environ.get('NOSETPS'):  # pragma: no cover
 
 
     def set_mp_process_title(*a, **k):
     def set_mp_process_title(*a, **k):
+        """Disabled feature."""
         pass
         pass
 else:
 else:
 
 
     def set_mp_process_title(progname, info=None, hostname=None):  # noqa
     def set_mp_process_title(progname, info=None, hostname=None):  # noqa
-        """Set the :command:`ps` name using the :mod:`multiprocessing`
-        process name.
+        """Set the :command:`ps` name from the current process name.
 
 
         Only works if :pypi:`setproctitle` is installed.
         Only works if :pypi:`setproctitle` is installed.
         """
         """
@@ -699,7 +725,7 @@ else:
 
 
 
 
 def get_errno_name(n):
 def get_errno_name(n):
-    """Get errno for string, e.g. ``ENOENT``."""
+    """Get errno for string (e.g., ``ENOENT``)."""
     if isinstance(n, str):
     if isinstance(n, str):
         return getattr(errno, n)
         return getattr(errno, n)
     return n
     return n
@@ -709,7 +735,7 @@ def get_errno_name(n):
 def ignore_errno(*errnos, **kwargs):
 def ignore_errno(*errnos, **kwargs):
     """Context manager to ignore specific POSIX error codes.
     """Context manager to ignore specific POSIX error codes.
 
 
-    Takes a list of error codes to ignore, which can be either
+    Takes a list of error codes to ignore: this can be either
     the name of the code, or the code integer itself::
     the name of the code, or the code integer itself::
 
 
         >>> with ignore_errno('ENOENT'):
         >>> with ignore_errno('ENOENT'):
@@ -743,7 +769,7 @@ def check_privileges(accept_content):
     if hasattr(os, 'fchown'):
     if hasattr(os, 'fchown'):
         if not all(hasattr(os, attr)
         if not all(hasattr(os, attr)
                    for attr in ['getuid', 'getgid', 'geteuid', 'getegid']):
                    for attr in ['getuid', 'getgid', 'geteuid', 'getegid']):
-            raise AssertionError('suspicious platform, contact support')
+            raise SecurityError('suspicious platform, contact support')
 
 
     if not uid or not gid or not euid or not egid:
     if not uid or not gid or not euid or not egid:
         if ('pickle' in accept_content or
         if ('pickle' in accept_content or

+ 57 - 42
celery/result.py

@@ -48,7 +48,7 @@ def allow_join_result():
 
 
 
 
 class ResultBase:
 class ResultBase:
-    """Base class for all results"""
+    """Base class for results."""
 
 
     #: Parent result (if part of a chain)
     #: Parent result (if part of a chain)
     parent = None
     parent = None
@@ -63,6 +63,7 @@ class AsyncResult(ResultBase):
         id (str): See :attr:`id`.
         id (str): See :attr:`id`.
         backend (Backend): See :attr:`backend`.
         backend (Backend): See :attr:`backend`.
     """
     """
+
     app = None
     app = None
 
 
     #: Error raised for timeouts.
     #: Error raised for timeouts.
@@ -84,7 +85,7 @@ class AsyncResult(ResultBase):
         self.id = id
         self.id = id
         self.backend = backend or self.app.backend
         self.backend = backend or self.app.backend
         self.parent = parent
         self.parent = parent
-        self.on_ready = promise(self._on_fulfilled)
+        self.on_ready = promise(self._on_fulfilled, weak=True)
         self._cache = None
         self._cache = None
 
 
     def then(self, callback, on_error=None, weak=False):
     def then(self, callback, on_error=None, weak=False):
@@ -127,8 +128,8 @@ class AsyncResult(ResultBase):
                                 reply=wait, timeout=timeout)
                                 reply=wait, timeout=timeout)
 
 
     def get(self, timeout=None, propagate=True, interval=0.5,
     def get(self, timeout=None, propagate=True, interval=0.5,
-            no_ack=True, follow_parents=True, callback=None, on_interval=None,
-            EXCEPTION_STATES=states.EXCEPTION_STATES,
+            no_ack=True, follow_parents=True, callback=None, on_message=None,
+            on_interval=None, EXCEPTION_STATES=states.EXCEPTION_STATES,
             PROPAGATE_STATES=states.PROPAGATE_STATES):
             PROPAGATE_STATES=states.PROPAGATE_STATES):
         """Wait until task is ready, and return its result.
         """Wait until task is ready, and return its result.
 
 
@@ -142,7 +143,7 @@ class AsyncResult(ResultBase):
             propagate (bool): Re-raise exception if the task failed.
             propagate (bool): Re-raise exception if the task failed.
             interval (float): Time to wait (in seconds) before retrying to
             interval (float): Time to wait (in seconds) before retrying to
                 retrieve the result.  Note that this does not have any effect
                 retrieve the result.  Note that this does not have any effect
-                when using the RPC/redis result store backends, as they do not
+                when using the RPC/redis result store backends, as they don't
                 use polling.
                 use polling.
             no_ack (bool): Enable amqp no ack (automatically acknowledge
             no_ack (bool): Enable amqp no ack (automatically acknowledge
                 message).  If this is :const:`False` then the message will
                 message).  If this is :const:`False` then the message will
@@ -151,7 +152,7 @@ class AsyncResult(ResultBase):
                 parent tasks.
                 parent tasks.
 
 
         Raises:
         Raises:
-            celery.exceptions.TimeoutError: if `timeout` is not
+            celery.exceptions.TimeoutError: if `timeout` isn't
                 :const:`None` and the result does not arrive within
                 :const:`None` and the result does not arrive within
                 `timeout` seconds.
                 `timeout` seconds.
             Exception: If the remote call raised an exception then that
             Exception: If the remote call raised an exception then that
@@ -160,7 +161,7 @@ class AsyncResult(ResultBase):
         assert_will_not_block()
         assert_will_not_block()
         _on_interval = promise()
         _on_interval = promise()
         if follow_parents and propagate and self.parent:
         if follow_parents and propagate and self.parent:
-            on_interval = promise(self._maybe_reraise_parent_error)
+            on_interval = promise(self._maybe_reraise_parent_error, weak=True)
             self._maybe_reraise_parent_error()
             self._maybe_reraise_parent_error()
         if on_interval:
         if on_interval:
             _on_interval.then(on_interval)
             _on_interval.then(on_interval)
@@ -178,6 +179,7 @@ class AsyncResult(ResultBase):
             no_ack=no_ack,
             no_ack=no_ack,
             propagate=propagate,
             propagate=propagate,
             callback=callback,
             callback=callback,
+            on_message=on_message,
         )
         )
 
 
     def _maybe_reraise_parent_error(self):
     def _maybe_reraise_parent_error(self):
@@ -191,7 +193,9 @@ class AsyncResult(ResultBase):
             node = node.parent
             node = node.parent
 
 
     def collect(self, intermediate=False, **kwargs):
     def collect(self, intermediate=False, **kwargs):
-        """Iterator, like :meth:`get` will wait for the task to complete,
+        """Collect results as they return.
+
+        Iterator, like :meth:`get` will wait for the task to complete,
         but will also follow :class:`AsyncResult` and :class:`ResultSet`
         but will also follow :class:`AsyncResult` and :class:`ResultSet`
         returned by the task, yielding ``(result, value)`` tuples for each
         returned by the task, yielding ``(result, value)`` tuples for each
         result in the tree.
         result in the tree.
@@ -256,7 +260,7 @@ class AsyncResult(ResultBase):
                     raise IncompleteStream()
                     raise IncompleteStream()
 
 
     def ready(self):
     def ready(self):
-        """Returns :const:`True` if the task has been executed.
+        """Return :const:`True` if the task started executing.
 
 
         If the task is still running, pending, or is waiting
         If the task is still running, pending, or is waiting
         for retry then :const:`False` is returned.
         for retry then :const:`False` is returned.
@@ -264,11 +268,11 @@ class AsyncResult(ResultBase):
         return self.state in self.backend.READY_STATES
         return self.state in self.backend.READY_STATES
 
 
     def successful(self):
     def successful(self):
-        """Returns :const:`True` if the task executed successfully."""
+        """Return :const:`True` if the task executed successfully."""
         return self.state == states.SUCCESS
         return self.state == states.SUCCESS
 
 
     def failed(self):
     def failed(self):
-        """Returns :const:`True` if the task failed."""
+        """Return :const:`True` if the task failed."""
         return self.state == states.FAILURE
         return self.state == states.FAILURE
 
 
     def throw(self, *args, **kwargs):
     def throw(self, *args, **kwargs):
@@ -283,6 +287,7 @@ class AsyncResult(ResultBase):
         if callback is not None:
         if callback is not None:
             callback(self.id, value)
             callback(self.id, value)
         return value
         return value
+    maybe_reraise = maybe_throw   # XXX compat alias
 
 
     def _to_remote_traceback(self, tb):
     def _to_remote_traceback(self, tb):
         if tb and tblib is not None and self.app.conf.task_remote_tracebacks:
         if tb and tblib is not None and self.app.conf.task_remote_tracebacks:
@@ -299,11 +304,11 @@ class AsyncResult(ResultBase):
         return graph
         return graph
 
 
     def __str__(self):
     def __str__(self):
-        """`str(self) -> self.id`"""
+        """`str(self) -> self.id`."""
         return str(self.id)
         return str(self.id)
 
 
     def __hash__(self):
     def __hash__(self):
-        """`hash(self) -> hash(self.id)`"""
+        """`hash(self) -> hash(self.id)`."""
         return hash(self.id)
         return hash(self.id)
 
 
     def __repr__(self):
     def __repr__(self):
@@ -371,9 +376,13 @@ class AsyncResult(ResultBase):
 
 
     @property
     @property
     def result(self):
     def result(self):
-        """When the task has been executed, this contains the return value.
-        If the task raised an exception, this will be the exception
-        instance."""
+        """Task return value.
+
+        Note:
+            When the task has been executed, this contains the return value.
+            If the task raised an exception, this will be the exception
+            instance.
+        """
         return self._get_task_meta()['result']
         return self._get_task_meta()['result']
     info = result
     info = result
 
 
@@ -408,7 +417,7 @@ class AsyncResult(ResultBase):
 
 
             *SUCCESS*
             *SUCCESS*
 
 
-                The task executed successfully. The :attr:`result` attribute
+                The task executed successfully.  The :attr:`result` attribute
                 then contains the tasks return value.
                 then contains the tasks return value.
         """
         """
         return self._get_task_meta()['status']
         return self._get_task_meta()['status']
@@ -417,11 +426,12 @@ class AsyncResult(ResultBase):
 @Thenable.register
 @Thenable.register
 @AbstractResult.register
 @AbstractResult.register
 class ResultSet(ResultBase):
 class ResultSet(ResultBase):
-    """Working with more than one result.
+    """A collection of results.
 
 
     Arguments:
     Arguments:
         results (Sequence[AsyncResult]): List of result instances.
         results (Sequence[AsyncResult]): List of result instances.
     """
     """
+
     _app = None
     _app = None
 
 
     #: List of results in in the set.
     #: List of results in in the set.
@@ -434,7 +444,7 @@ class ResultSet(ResultBase):
         self.on_ready = promise(args=(self,))
         self.on_ready = promise(args=(self,))
         self._on_full = ready_barrier or barrier(results)
         self._on_full = ready_barrier or barrier(results)
         if self._on_full:
         if self._on_full:
-            self._on_full.then(promise(self.on_ready))
+            self._on_full.then(promise(self.on_ready, weak=True))
 
 
     def add(self, result):
     def add(self, result):
         """Add :class:`AsyncResult` as a new member of the set.
         """Add :class:`AsyncResult` as a new member of the set.
@@ -456,7 +466,7 @@ class ResultSet(ResultBase):
         """Remove result from the set; it must be a member.
         """Remove result from the set; it must be a member.
 
 
         Raises:
         Raises:
-            KeyError: if the result is not a member.
+            KeyError: if the result isn't a member.
         """
         """
         if isinstance(result, str):
         if isinstance(result, str):
             result = self.app.AsyncResult(result)
             result = self.app.AsyncResult(result)
@@ -466,16 +476,17 @@ class ResultSet(ResultBase):
             raise KeyError(result)
             raise KeyError(result)
 
 
     def discard(self, result):
     def discard(self, result):
-        """Remove result from the set if it is a member,
-        or do nothing if it's not."""
+        """Remove result from the set if it is a member.
+
+        Does nothing if it's not a member.
+        """
         try:
         try:
             self.remove(result)
             self.remove(result)
         except KeyError:
         except KeyError:
             pass
             pass
 
 
     def update(self, results):
     def update(self, results):
-        """Update set with the union of itself and an iterable with
-        results."""
+        """Extend from iterable of results."""
         self.results.extend(r for r in results if r not in self.results)
         self.results.extend(r for r in results if r not in self.results)
 
 
     def clear(self):
     def clear(self):
@@ -483,16 +494,16 @@ class ResultSet(ResultBase):
         self.results[:] = []  # don't create new list.
         self.results[:] = []  # don't create new list.
 
 
     def successful(self):
     def successful(self):
-        """Was all of the tasks successful?
+        """Return true if all tasks successful.
 
 
         Returns:
         Returns:
             bool: true if all of the tasks finished
             bool: true if all of the tasks finished
-                successfully (i.e. did not raise an exception).
+                successfully (i.e. didn't raise an exception).
         """
         """
         return all(result.successful() for result in self.results)
         return all(result.successful() for result in self.results)
 
 
     def failed(self):
     def failed(self):
-        """Did any of the tasks fail?
+        """Return true if any of the tasks failed.
 
 
         Returns:
         Returns:
             bool: true if one of the tasks failed.
             bool: true if one of the tasks failed.
@@ -503,9 +514,10 @@ class ResultSet(ResultBase):
     def maybe_throw(self, callback=None, propagate=True):
     def maybe_throw(self, callback=None, propagate=True):
         for result in self.results:
         for result in self.results:
             result.maybe_throw(callback=callback, propagate=propagate)
             result.maybe_throw(callback=callback, propagate=propagate)
+    maybe_reraise = maybe_throw  # XXX compat alias.
 
 
     def waiting(self):
     def waiting(self):
-        """Are any of the tasks incomplete?
+        """Return true if any of the tasks are incomplate.
 
 
         Returns:
         Returns:
             bool: true if one of the tasks are still
             bool: true if one of the tasks are still
@@ -557,12 +569,12 @@ class ResultSet(ResultBase):
         return iter(self.results)
         return iter(self.results)
 
 
     def __getitem__(self, index):
     def __getitem__(self, index):
-        """`res[i] -> res.results[i]`"""
+        """`res[i] -> res.results[i]`."""
         return self.results[index]
         return self.results[index]
 
 
     def get(self, timeout=None, propagate=True, interval=0.5,
     def get(self, timeout=None, propagate=True, interval=0.5,
             callback=None, no_ack=True, on_message=None):
             callback=None, no_ack=True, on_message=None):
-        """See :meth:`join`
+        """See :meth:`join`.
 
 
         This is here for API compatibility with :class:`AsyncResult`,
         This is here for API compatibility with :class:`AsyncResult`,
         in addition it uses :meth:`join_native` if available for the
         in addition it uses :meth:`join_native` if available for the
@@ -578,11 +590,11 @@ class ResultSet(ResultBase):
 
 
     def join(self, timeout=None, propagate=True, interval=0.5,
     def join(self, timeout=None, propagate=True, interval=0.5,
              callback=None, no_ack=True, on_message=None, on_interval=None):
              callback=None, no_ack=True, on_message=None, on_interval=None):
-        """Gathers the results of all tasks as a list in order.
+        """Gather the results of all tasks as a list in order.
 
 
         Note:
         Note:
             This can be an expensive operation for result store
             This can be an expensive operation for result store
-            backends that must resort to polling (e.g. database).
+            backends that must resort to polling (e.g., database).
 
 
             You should consider using :meth:`join_native` if your backend
             You should consider using :meth:`join_native` if your backend
             supports it.
             supports it.
@@ -605,7 +617,7 @@ class ResultSet(ResultBase):
                 No results will be returned by this function if a callback
                 No results will be returned by this function if a callback
                 is specified.  The order of results is also arbitrary when a
                 is specified.  The order of results is also arbitrary when a
                 callback is used.  To get access to the result object for
                 callback is used.  To get access to the result object for
-                a particular id you will have to generate an index first:
+                a particular id you'll have to generate an index first:
                 ``index = {r.id: r for r in gres.results.values()}``
                 ``index = {r.id: r for r in gres.results.values()}``
                 Or you can create new result objects on the fly:
                 Or you can create new result objects on the fly:
                 ``result = app.AsyncResult(task_id)`` (both will
                 ``result = app.AsyncResult(task_id)`` (both will
@@ -615,7 +627,7 @@ class ResultSet(ResultBase):
                 *will not be acknowledged*).
                 *will not be acknowledged*).
 
 
         Raises:
         Raises:
-            celery.exceptions.TimeoutError: if ``timeout`` is not
+            celery.exceptions.TimeoutError: if ``timeout`` isn't
                 :const:`None` and the operation takes longer than ``timeout``
                 :const:`None` and the operation takes longer than ``timeout``
                 seconds.
                 seconds.
         """
         """
@@ -644,7 +656,7 @@ class ResultSet(ResultBase):
                 results.append(value)
                 results.append(value)
         return results
         return results
 
 
-    def then(self, callback, on_error=None):
+    def then(self, callback, on_error=None, weak=False):
         return self.on_ready.then(callback, on_error)
         return self.on_ready.then(callback, on_error)
 
 
     def iter_native(self, timeout=None, interval=0.5, no_ack=True,
     def iter_native(self, timeout=None, interval=0.5, no_ack=True,
@@ -812,10 +824,10 @@ class GroupResult(ResultSet):
         return self.results
         return self.results
 
 
     @classmethod
     @classmethod
-    def restore(self, id, backend=None):
+    def restore(cls, id, backend=None):
         """Restore previously saved group result."""
         """Restore previously saved group result."""
         return (
         return (
-            backend or (self.app.backend if self.app else current_app.backend)
+            backend or (cls.app.backend if cls.app else current_app.backend)
         ).restore_group(id)
         ).restore_group(id)
 
 
 
 
@@ -825,14 +837,16 @@ class EagerResult(AsyncResult):
     """Result that we know has already been executed."""
     """Result that we know has already been executed."""
 
 
     def __init__(self, id, ret_value, state, traceback=None):
     def __init__(self, id, ret_value, state, traceback=None):
+        # pylint: disable=super-init-not-called
+        # XXX should really not be inheriting from AsyncResult
         self.id = id
         self.id = id
         self._result = ret_value
         self._result = ret_value
         self._state = state
         self._state = state
         self._traceback = traceback
         self._traceback = traceback
-        self.on_ready = promise(args=(self,))
-        self.on_ready()
+        self.on_ready = promise()
+        self.on_ready(self)
 
 
-    def then(self, callback, on_error=None):
+    def then(self, callback, on_error=None, weak=False):
         return self.on_ready.then(callback, on_error)
         return self.on_ready.then(callback, on_error)
 
 
     def _get_task_meta(self):
     def _get_task_meta(self):
@@ -879,7 +893,7 @@ class EagerResult(AsyncResult):
 
 
     @property
     @property
     def result(self):
     def result(self):
-        """The tasks return value"""
+        """The tasks return value."""
         return self._result
         return self._result
 
 
     @property
     @property
@@ -899,6 +913,7 @@ class EagerResult(AsyncResult):
 
 
 
 
 def result_from_tuple(r, app=None):
 def result_from_tuple(r, app=None):
+    """Deserialize result from tuple."""
     # earlier backends may just pickle, so check if
     # earlier backends may just pickle, so check if
     # result is already prepared.
     # result is already prepared.
     app = app_or_default(app)
     app = app_or_default(app)
@@ -909,7 +924,7 @@ def result_from_tuple(r, app=None):
             return app.GroupResult(
             return app.GroupResult(
                 res, [result_from_tuple(child, app) for child in nodes],
                 res, [result_from_tuple(child, app) for child in nodes],
             )
             )
-        # previously did not include parent
+        # previously didn't include parent
         id, parent = res if isinstance(res, (list, tuple)) else (res, None)
         id, parent = res if isinstance(res, (list, tuple)) else (res, None)
         if parent:
         if parent:
             parent = result_from_tuple(parent, app)
             parent = result_from_tuple(parent, app)

+ 114 - 75
celery/schedules.py

@@ -11,7 +11,7 @@ from kombu.utils.objects import cached_property
 
 
 from . import current_app
 from . import current_app
 from .utils.collections import AttributeDict
 from .utils.collections import AttributeDict
-from .utils.timeutils import (
+from .utils.time import (
     weekday, maybe_timedelta, remaining, humanize_seconds,
     weekday, maybe_timedelta, remaining, humanize_seconds,
     timezone, maybe_make_aware, ffwd, localize
     timezone, maybe_make_aware, ffwd, localize
 )
 )
@@ -24,7 +24,7 @@ __all__ = [
 schedstate = namedtuple('schedstate', ('is_due', 'next'))
 schedstate = namedtuple('schedstate', ('is_due', 'next'))
 
 
 CRON_PATTERN_INVALID = """\
 CRON_PATTERN_INVALID = """\
-Invalid crontab pattern. Valid range is {min}-{max}. \
+Invalid crontab pattern.  Valid range is {min}-{max}. \
 '{value}' was found.\
 '{value}' was found.\
 """
 """
 
 
@@ -59,7 +59,47 @@ class ParseException(Exception):
     """Raised by :class:`crontab_parser` when the input can't be parsed."""
     """Raised by :class:`crontab_parser` when the input can't be parsed."""
 
 
 
 
-class schedule:
+class BaseSchedule(object):
+
+    def __init__(self, nowfun=None, app=None):
+        self.nowfun = nowfun
+        self._app = app
+
+    def now(self):
+        return (self.nowfun or self.app.now)()
+
+    def remaining_estimate(self, last_run_at):
+        raise NotImplementedError()
+
+    def is_due(self, last_run_at):
+        raise NotImplementedError()
+
+    def maybe_make_aware(self, dt):
+        return maybe_make_aware(dt, self.tz)
+
+    @property
+    def app(self):
+        return self._app or current_app._get_current_object()
+
+    @app.setter  # noqa
+    def app(self, app):
+        self._app = app
+
+    @cached_property
+    def tz(self):
+        return self.app.timezone
+
+    @cached_property
+    def utc_enabled(self):
+        return self.app.conf.enable_utc
+
+    def to_local(self, dt):
+        if not self.utc_enabled:
+            return timezone.to_local_fallback(dt)
+        return dt
+
+
+class schedule(BaseSchedule):
     """Schedule for periodic task.
     """Schedule for periodic task.
 
 
     Arguments:
     Arguments:
@@ -70,16 +110,13 @@ class schedule:
             (class:`~datetime.datetime`).
             (class:`~datetime.datetime`).
         app (~@Celery): Celery app instance.
         app (~@Celery): Celery app instance.
     """
     """
+
     relative = False
     relative = False
 
 
     def __init__(self, run_every=None, relative=False, nowfun=None, app=None):
     def __init__(self, run_every=None, relative=False, nowfun=None, app=None):
         self.run_every = maybe_timedelta(run_every)
         self.run_every = maybe_timedelta(run_every)
         self.relative = relative
         self.relative = relative
-        self.nowfun = nowfun
-        self._app = app
-
-    def now(self):
-        return (self.nowfun or self.app.now)()
+        super(schedule, self).__init__(nowfun=nowfun, app=app)
 
 
     def remaining_estimate(self, last_run_at):
     def remaining_estimate(self, last_run_at):
         return remaining(
         return remaining(
@@ -88,20 +125,22 @@ class schedule:
         )
         )
 
 
     def is_due(self, last_run_at):
     def is_due(self, last_run_at):
-        """Returns tuple of two items ``(is_due, next_time_to_check)``,
-        where next time to check is in seconds.
+        """Return tuple of ``(is_due, next_time_to_check)``.
+
+        Notes:
+            - next time to check is in seconds.
 
 
-        * ``(True, 20)``, means the task should be run now, and the next
-            time to check is in 20 seconds.
+            - ``(True, 20)``, means the task should be run now, and the next
+                time to check is in 20 seconds.
 
 
-        * ``(False, 12.3)``, means the task is not due, but that the scheduler
-          should check again in 12.3 seconds.
+            - ``(False, 12.3)``, means the task is not due, but that the
+              scheduler should check again in 12.3 seconds.
 
 
         The next time to check is used to save energy/CPU cycles,
         The next time to check is used to save energy/CPU cycles,
         it does not need to be accurate but will influence the precision
         it does not need to be accurate but will influence the precision
         of your schedule.  You must also keep in mind
         of your schedule.  You must also keep in mind
         the value of :setting:`beat_max_loop_interval`,
         the value of :setting:`beat_max_loop_interval`,
-        which decides the maximum number of seconds the scheduler can
+        that decides the maximum number of seconds the scheduler can
         sleep between re-checking the periodic task intervals.  So if you
         sleep between re-checking the periodic task intervals.  So if you
         have a task that changes schedule at run-time then your next_run_at
         have a task that changes schedule at run-time then your next_run_at
         check will decide how long it will take before a change to the
         check will decide how long it will take before a change to the
@@ -111,8 +150,8 @@ class schedule:
         .. admonition:: Scheduler max interval variance
         .. admonition:: Scheduler max interval variance
 
 
             The default max loop interval may vary for different schedulers.
             The default max loop interval may vary for different schedulers.
-            For the default scheduler the value is 5 minutes, but for e.g.
-            the :pypi:`django-celery` database scheduler the value
+            For the default scheduler the value is 5 minutes, but for example
+            the :pypi:`django-celery-beat` database scheduler the value
             is 5 seconds.
             is 5 seconds.
         """
         """
         last_run_at = self.maybe_make_aware(last_run_at)
         last_run_at = self.maybe_make_aware(last_run_at)
@@ -122,9 +161,6 @@ class schedule:
             return schedstate(is_due=True, next=self.seconds)
             return schedstate(is_due=True, next=self.seconds)
         return schedstate(is_due=False, next=remaining_s)
         return schedstate(is_due=False, next=remaining_s)
 
 
-    def maybe_make_aware(self, dt):
-        return maybe_make_aware(dt, self.tz)
-
     def __repr__(self):
     def __repr__(self):
         return '<freq: {0.human_seconds}>'.format(self)
         return '<freq: {0.human_seconds}>'.format(self)
 
 
@@ -147,30 +183,11 @@ class schedule:
     def human_seconds(self):
     def human_seconds(self):
         return humanize_seconds(self.seconds)
         return humanize_seconds(self.seconds)
 
 
-    @property
-    def app(self):
-        return self._app or current_app._get_current_object()
-
-    @app.setter  # noqa
-    def app(self, app):
-        self._app = app
-
-    @cached_property
-    def tz(self):
-        return self.app.timezone
-
-    @cached_property
-    def utc_enabled(self):
-        return self.app.conf.enable_utc
-
-    def to_local(self, dt):
-        if not self.utc_enabled:
-            return timezone.to_local_fallback(dt)
-        return dt
-
 
 
 class crontab_parser:
 class crontab_parser:
-    """Parser for Crontab expressions. Any expression of the form 'groups'
+    """Parser for Crontab expressions.
+
+    Any expression of the form 'groups'
     (see BNF grammar below) is accepted and expanded to a set of numbers.
     (see BNF grammar below) is accepted and expanded to a set of numbers.
     These numbers represent the units of time that the Crontab needs to
     These numbers represent the units of time that the Crontab needs to
     run on:
     run on:
@@ -214,6 +231,7 @@ class crontab_parser:
 
 
         :math:`max_ + min_ - 1`
         :math:`max_ + min_ - 1`
     """
     """
+
     ParseException = ParseException
     ParseException = ParseException
 
 
     _range = r'(\w+?)-(\w+)'
     _range = r'(\w+?)-(\w+)'
@@ -290,12 +308,14 @@ class crontab_parser:
         return i
         return i
 
 
 
 
-class crontab(schedule):
-    """A Crontab can be used as the ``run_every`` value of a
+class crontab(BaseSchedule):
+    """Crontab schedule.
+
+    A Crontab can be used as the ``run_every`` value of a
     periodic task entry to add :manpage:`crontab(5)`-like scheduling.
     periodic task entry to add :manpage:`crontab(5)`-like scheduling.
 
 
     Like a :manpage:`cron(5)`-job, you can specify units of time of when
     Like a :manpage:`cron(5)`-job, you can specify units of time of when
-    you would like the task to execute. It is a reasonably complete
+    you'd like the task to execute.  It's a reasonably complete
     implementation of :command:`cron`'s features, so it should provide a fair
     implementation of :command:`cron`'s features, so it should provide a fair
     degree of scheduling needs.
     degree of scheduling needs.
 
 
@@ -356,7 +376,7 @@ class crontab(schedule):
 
 
         The Celery app instance.
         The Celery app instance.
 
 
-    It is important to realize that any day on which execution should
+    It's important to realize that any day on which execution should
     occur must be represented by entries in all three of the day and
     occur must be represented by entries in all three of the day and
     month attributes.  For example, if ``day_of_week`` is 0 and
     month attributes.  For example, if ``day_of_week`` is 0 and
     ``day_of_month`` is every seventh day, only months that begin
     ``day_of_month`` is every seventh day, only months that begin
@@ -367,7 +387,7 @@ class crontab(schedule):
     """
     """
 
 
     def __init__(self, minute='*', hour='*', day_of_week='*',
     def __init__(self, minute='*', hour='*', day_of_week='*',
-                 day_of_month='*', month_of_year='*', nowfun=None, app=None):
+                 day_of_month='*', month_of_year='*', **kwargs):
         self._orig_minute = cronfield(minute)
         self._orig_minute = cronfield(minute)
         self._orig_hour = cronfield(hour)
         self._orig_hour = cronfield(hour)
         self._orig_day_of_week = cronfield(day_of_week)
         self._orig_day_of_week = cronfield(day_of_week)
@@ -378,12 +398,13 @@ class crontab(schedule):
         self.day_of_week = self._expand_cronspec(day_of_week, 7)
         self.day_of_week = self._expand_cronspec(day_of_week, 7)
         self.day_of_month = self._expand_cronspec(day_of_month, 31, 1)
         self.day_of_month = self._expand_cronspec(day_of_month, 31, 1)
         self.month_of_year = self._expand_cronspec(month_of_year, 12, 1)
         self.month_of_year = self._expand_cronspec(month_of_year, 12, 1)
-        self.nowfun = nowfun
-        self._app = app
+        super(crontab, self).__init__(**kwargs)
 
 
     @staticmethod
     @staticmethod
     def _expand_cronspec(cronspec, max_, min_=0):
     def _expand_cronspec(cronspec, max_, min_=0):
-        """Takes the given cronspec argument in one of the forms:
+        """Expand cron specification.
+
+        Takes the given cronspec argument in one of the forms:
 
 
         .. code-block:: text
         .. code-block:: text
 
 
@@ -394,8 +415,8 @@ class crontab(schedule):
 
 
         And convert it to an (expanded) set representing all time unit
         And convert it to an (expanded) set representing all time unit
         values on which the Crontab triggers.  Only in case of the base
         values on which the Crontab triggers.  Only in case of the base
-        type being :class:`str`, parsing occurs.  (It is fast and
-        happens only once for each Crontab instance, so there is no
+        type being :class:`str`, parsing occurs.  (It's fast and
+        happens only once for each Crontab instance, so there's no
         significant performance overhead involved.)
         significant performance overhead involved.)
 
 
         For the other base types, merely Python type conversions happen.
         For the other base types, merely Python type conversions happen.
@@ -425,8 +446,10 @@ class crontab(schedule):
         return result
         return result
 
 
     def _delta_to_next(self, last_run_at, next_hour, next_minute):
     def _delta_to_next(self, last_run_at, next_hour, next_minute):
-        """Takes a :class:`~datetime.datetime` of last run, next minute and hour,
-        and returns a :class:`~celery.utils.timeutils.ffwd` for the next
+        """Find next delta.
+
+        Takes a :class:`~datetime.datetime` of last run, next minute and hour,
+        and returns a :class:`~celery.utils.time.ffwd` for the next
         scheduled day and time.
         scheduled day and time.
 
 
         Only called when ``day_of_month`` and/or ``month_of_year``
         Only called when ``day_of_month`` and/or ``month_of_year``
@@ -493,9 +516,6 @@ class crontab(schedule):
                     second=0,
                     second=0,
                     microsecond=0)
                     microsecond=0)
 
 
-    def now(self):
-        return (self.nowfun or self.app.now)()
-
     def __repr__(self):
     def __repr__(self):
         return CRON_REPR.format(self)
         return CRON_REPR.format(self)
 
 
@@ -507,6 +527,8 @@ class crontab(schedule):
                                  self._orig_month_of_year), None)
                                  self._orig_month_of_year), None)
 
 
     def remaining_delta(self, last_run_at, tz=None, ffwd=ffwd):
     def remaining_delta(self, last_run_at, tz=None, ffwd=ffwd):
+        # pylint: disable=redefined-outer-name
+        # caching global ffwd
         tz = tz or self.tz
         tz = tz or self.tz
         last_run_at = self.maybe_make_aware(last_run_at)
         last_run_at = self.maybe_make_aware(last_run_at)
         now = self.maybe_make_aware(self.now())
         now = self.maybe_make_aware(self.now())
@@ -564,15 +586,23 @@ class crontab(schedule):
         return self.to_local(last_run_at), delta, self.to_local(now)
         return self.to_local(last_run_at), delta, self.to_local(now)
 
 
     def remaining_estimate(self, last_run_at, ffwd=ffwd):
     def remaining_estimate(self, last_run_at, ffwd=ffwd):
-        """Returns when the periodic task should run next as a
-        :class:`~datetime.timedelta`."""
+        """Estimate of next run time.
+
+        Returns when the periodic task should run next as a
+        :class:`~datetime.timedelta`.
+        """
+        # pylint: disable=redefined-outer-name
+        # caching global ffwd
         return remaining(*self.remaining_delta(last_run_at, ffwd=ffwd))
         return remaining(*self.remaining_delta(last_run_at, ffwd=ffwd))
 
 
     def is_due(self, last_run_at):
     def is_due(self, last_run_at):
-        """Returns tuple of two items ``(is_due, next_time_to_run)``,
-        where next time to run is in seconds.
+        """Return tuple of ``(is_due, next_time_to_run)``.
 
 
-        See :meth:`celery.schedules.schedule.is_due` for more information.
+        Note:
+            Next time to run is in seconds.
+
+        SeeAlso:
+            :meth:`celery.schedules.schedule.is_due` for more information.
         """
         """
         rem_delta = self.remaining_estimate(last_run_at)
         rem_delta = self.remaining_estimate(last_run_at)
         rem = max(rem_delta.total_seconds(), 0)
         rem = max(rem_delta.total_seconds(), 0)
@@ -601,6 +631,7 @@ class crontab(schedule):
 
 
 
 
 def maybe_schedule(s, relative=False, app=None):
 def maybe_schedule(s, relative=False, app=None):
+    """Return schedule from number, timedelta, or actual schedule."""
     if s is not None:
     if s is not None:
         if isinstance(s, numbers.Number):
         if isinstance(s, numbers.Number):
             s = timedelta(seconds=s)
             s = timedelta(seconds=s)
@@ -611,8 +642,10 @@ def maybe_schedule(s, relative=False, app=None):
     return s
     return s
 
 
 
 
-class solar(schedule):
-    """A solar event can be used as the ``run_every`` value of a
+class solar(BaseSchedule):
+    """Solar event.
+
+    A solar event can be used as the ``run_every`` value of a
     periodic task entry to schedule based on certain solar events.
     periodic task entry to schedule based on certain solar events.
 
 
     Notes:
     Notes:
@@ -684,13 +717,12 @@ class solar(schedule):
         'dusk_astronomical': True,
         'dusk_astronomical': True,
     }
     }
 
 
-    def __init__(self, event, lat, lon, nowfun=None, app=None):
+    def __init__(self, event, lat, lon, **kwargs):
         self.ephem = __import__('ephem')
         self.ephem = __import__('ephem')
         self.event = event
         self.event = event
         self.lat = lat
         self.lat = lat
         self.lon = lon
         self.lon = lon
-        self.nowfun = nowfun
-        self._app = app
+        super(solar, self).__init__(**kwargs)
 
 
         if event not in self._all_events:
         if event not in self._all_events:
             raise ValueError(SOLAR_INVALID_EVENT.format(
             raise ValueError(SOLAR_INVALID_EVENT.format(
@@ -721,10 +753,14 @@ class solar(schedule):
         )
         )
 
 
     def remaining_estimate(self, last_run_at):
     def remaining_estimate(self, last_run_at):
-        """Returns when the periodic task should run next as a
-        :class:`~datetime.timedelta`, or if it shouldn't run today (e.g.
-        the sun does not rise today), returns the time when the next check
-        should take place."""
+        """Return estimate of next time to run.
+
+        Returns:
+            ~datetime.timedelta: when the periodic task should
+                run next, or if it shouldn't run today (e.g., the sun does
+                not rise today), returns the time when the next check
+                should take place.
+        """
         last_run_at = self.maybe_make_aware(last_run_at)
         last_run_at = self.maybe_make_aware(last_run_at)
         last_run_at_utc = localize(last_run_at, timezone.utc)
         last_run_at_utc = localize(last_run_at, timezone.utc)
         self.cal.date = last_run_at_utc
         self.cal.date = last_run_at_utc
@@ -734,7 +770,7 @@ class solar(schedule):
                 start=last_run_at_utc, use_center=self.use_center,
                 start=last_run_at_utc, use_center=self.use_center,
             )
             )
         except self.ephem.CircumpolarError:  # pragma: no cover
         except self.ephem.CircumpolarError:  # pragma: no cover
-            # Sun will not rise/set today. Check again tomorrow
+            # Sun won't rise/set today.  Check again tomorrow
             # (specifically, after the next anti-transit).
             # (specifically, after the next anti-transit).
             next_utc = (
             next_utc = (
                 self.cal.next_antitransit(self.ephem.Sun()) +
                 self.cal.next_antitransit(self.ephem.Sun()) +
@@ -746,10 +782,13 @@ class solar(schedule):
         return delta
         return delta
 
 
     def is_due(self, last_run_at):
     def is_due(self, last_run_at):
-        """Returns tuple of two items ``(is_due, next_time_to_run)``,
-        where next time to run is in seconds.
+        """Return tuple of ``(is_due, next_time_to_run)``.
+
+        Note:
+            next time to run is in seconds.
 
 
-        See :meth:`celery.schedules.schedule.is_due` for more information.
+        See Also:
+            :meth:`celery.schedules.schedule.is_due` for more information.
         """
         """
         rem_delta = self.remaining_estimate(last_run_at)
         rem_delta = self.remaining_estimate(last_run_at)
         rem = max(rem_delta.total_seconds(), 0)
         rem = max(rem_delta.total_seconds(), 0)

+ 7 - 7
celery/security/certificate.py

@@ -29,33 +29,33 @@ class Certificate:
         return bytes_to_str(self._cert.get_serial_number())
         return bytes_to_str(self._cert.get_serial_number())
 
 
     def get_issuer(self):
     def get_issuer(self):
-        """Return issuer (CA) as a string"""
+        """Return issuer (CA) as a string."""
         return ' '.join(bytes_to_str(x[1]) for x in
         return ' '.join(bytes_to_str(x[1]) for x in
                         self._cert.get_issuer().get_components())
                         self._cert.get_issuer().get_components())
 
 
     def get_id(self):
     def get_id(self):
-        """Serial number/issuer pair uniquely identifies a certificate"""
+        """Serial number/issuer pair uniquely identifies a certificate."""
         return '{0} {1}'.format(self.get_issuer(), self.get_serial_number())
         return '{0} {1}'.format(self.get_issuer(), self.get_serial_number())
 
 
     def verify(self, data, signature, digest):
     def verify(self, data, signature, digest):
-        """Verifies the signature for string containing data."""
+        """Verify signature for string containing data."""
         with reraise_errors('Bad signature: {0!r}'):
         with reraise_errors('Bad signature: {0!r}'):
             crypto.verify(self._cert, signature, data, digest)
             crypto.verify(self._cert, signature, data, digest)
 
 
 
 
 class CertStore:
 class CertStore:
-    """Base class for certificate stores"""
+    """Base class for certificate stores."""
 
 
     def __init__(self):
     def __init__(self):
         self._certs = {}
         self._certs = {}
 
 
     def itercerts(self):
     def itercerts(self):
-        """an iterator over the certificates"""
+        """Return certificate iterator."""
         for c in self._certs.values():
         for c in self._certs.values():
             yield c
             yield c
 
 
     def __getitem__(self, id):
     def __getitem__(self, id):
-        """get certificate by id"""
+        """Get certificate by id."""
         try:
         try:
             return self._certs[bytes_to_str(id)]
             return self._certs[bytes_to_str(id)]
         except KeyError:
         except KeyError:
@@ -69,7 +69,7 @@ class CertStore:
 
 
 
 
 class FSCertStore(CertStore):
 class FSCertStore(CertStore):
-    """File system certificate store"""
+    """File system certificate store."""
 
 
     def __init__(self, path):
     def __init__(self, path):
         CertStore.__init__(self)
         CertStore.__init__(self)

+ 2 - 1
celery/security/key.py

@@ -8,12 +8,13 @@ __all__ = ['PrivateKey']
 
 
 
 
 class PrivateKey:
 class PrivateKey:
+    """Represents a private key."""
 
 
     def __init__(self, key):
     def __init__(self, key):
         with reraise_errors('Invalid private key: {0!r}'):
         with reraise_errors('Invalid private key: {0!r}'):
             self._key = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
             self._key = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
 
 
     def sign(self, data, digest):
     def sign(self, data, digest):
-        """sign string containing data."""
+        """Sign string containing data."""
         with reraise_errors('Unable to sign data: {0!r}'):
         with reraise_errors('Unable to sign data: {0!r}'):
             return crypto.sign(self._key, ensure_bytes(data), digest)
             return crypto.sign(self._key, ensure_bytes(data), digest)

+ 4 - 3
celery/security/serialization.py

@@ -13,6 +13,7 @@ __all__ = ['SecureSerializer', 'register_auth']
 
 
 
 
 class SecureSerializer:
 class SecureSerializer:
+    """Signed serializer."""
 
 
     def __init__(self, key=None, cert=None, cert_store=None,
     def __init__(self, key=None, cert=None, cert_store=None,
                  digest='sha1', serializer='json'):
                  digest='sha1', serializer='json'):
@@ -23,7 +24,7 @@ class SecureSerializer:
         self._serializer = serializer
         self._serializer = serializer
 
 
     def serialize(self, data):
     def serialize(self, data):
-        """serialize data structure into string"""
+        """Serialize data structure into string."""
         assert self._key is not None
         assert self._key is not None
         assert self._cert is not None
         assert self._cert is not None
         with reraise_errors('Unable to serialize: {0!r}', (Exception,)):
         with reraise_errors('Unable to serialize: {0!r}', (Exception,)):
@@ -39,7 +40,7 @@ class SecureSerializer:
                               signer=self._cert.get_id())
                               signer=self._cert.get_id())
 
 
     def deserialize(self, data):
     def deserialize(self, data):
-        """deserialize data structure from string"""
+        """Deserialize data structure from string."""
         assert self._cert_store is not None
         assert self._cert_store is not None
         with reraise_errors('Unable to deserialize: {0!r}', (Exception,)):
         with reraise_errors('Unable to deserialize: {0!r}', (Exception,)):
             payload = self._unpack(data)
             payload = self._unpack(data)
@@ -84,7 +85,7 @@ class SecureSerializer:
 
 
 def register_auth(key=None, cert=None, store=None, digest='sha1',
 def register_auth(key=None, cert=None, store=None, digest='sha1',
                   serializer='json'):
                   serializer='json'):
-    """register security serializer"""
+    """Register security serializer."""
     s = SecureSerializer(key and PrivateKey(key),
     s = SecureSerializer(key and PrivateKey(key),
                          cert and Certificate(cert),
                          cert and Certificate(cert),
                          store and FSCertStore(store),
                          store and FSCertStore(store),

+ 1 - 0
celery/security/utils.py

@@ -16,6 +16,7 @@ __all__ = ['reraise_errors']
 
 
 @contextmanager
 @contextmanager
 def reraise_errors(msg='{0!r}', errors=None):
 def reraise_errors(msg='{0!r}', errors=None):
+    """Context reraising crypto errors as :exc:`SecurityError`."""
     assert crypto is not None
     assert crypto is not None
     errors = (crypto.Error,) if errors is None else errors
     errors = (crypto.Error,) if errors is None else errors
     try:
     try:

+ 6 - 4
celery/states.py

@@ -25,7 +25,7 @@ Set of states meaning the task result is ready (has been executed).
 UNREADY_STATES
 UNREADY_STATES
 ~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~
 
 
-Set of states meaning the task result is not ready (has not been executed).
+Set of states meaning the task result is not ready (hasn't been executed).
 
 
 .. state:: EXCEPTION_STATES
 .. state:: EXCEPTION_STATES
 
 
@@ -48,8 +48,8 @@ ALL_STATES
 
 
 Set of all possible states.
 Set of all possible states.
 
 
-Misc.
------
+Misc
+----
 
 
 """
 """
 from typing import Any, Optional
 from typing import Any, Optional
@@ -92,7 +92,9 @@ def precedence(state: Optional[str]) -> int:
 
 
 
 
 class state(str):
 class state(str):
-    """State is a subclass of :class:`str`, implementing comparison
+    """Task state.
+
+    State is a subclass of :class:`str`, implementing comparison
     methods adhering to state precedence rules::
     methods adhering to state precedence rules::
 
 
         >>> from celery.states import state, PENDING, SUCCESS
         >>> from celery.states import state, PENDING, SUCCESS

+ 0 - 94
celery/tests/__init__.py

@@ -1,94 +0,0 @@
-import logging
-import os
-import sys
-import warnings
-
-from importlib import import_module
-
-PYPY3 = getattr(sys, 'pypy_version_info', None)
-
-try:
-    WindowsError = WindowsError  # noqa
-except NameError:
-
-    class WindowsError(Exception):
-        pass
-
-
-def setup():
-    using_coverage = (
-        os.environ.get('COVER_ALL_MODULES') or '--with-coverage' in sys.argv
-    )
-    os.environ.update(
-        # warn if config module not found
-        C_WNOCONF='yes',
-        KOMBU_DISABLE_LIMIT_PROTECTION='yes',
-    )
-
-    if using_coverage and not PYPY3:
-        from warnings import catch_warnings
-        with catch_warnings(record=True):
-            import_all_modules()
-        warnings.resetwarnings()
-    from celery.tests.case import Trap
-    from celery._state import set_default_app
-    set_default_app(Trap())
-
-
-def teardown():
-    # Don't want SUBDEBUG log messages at finalization.
-    try:
-        from multiprocessing.util import get_logger
-    except ImportError:
-        pass
-    else:
-        get_logger().setLevel(logging.WARNING)
-
-    # Make sure test database is removed.
-    import os
-    if os.path.exists('test.db'):
-        try:
-            os.remove('test.db')
-        except WindowsError:
-            pass
-
-    # Make sure there are no remaining threads at shutdown.
-    import threading
-    remaining_threads = [thread for thread in threading.enumerate()
-                         if thread.getName() != 'MainThread']
-    if remaining_threads:
-        sys.stderr.write(
-            '\n\n**WARNING**: Remaining threads at teardown: %r...\n' % (
-                remaining_threads))
-
-
-def find_distribution_modules(name=__name__, file=__file__):
-    current_dist_depth = len(name.split('.')) - 1
-    current_dist = os.path.join(os.path.dirname(file),
-                                *([os.pardir] * current_dist_depth))
-    abs = os.path.abspath(current_dist)
-    dist_name = os.path.basename(abs)
-
-    for dirpath, dirnames, filenames in os.walk(abs):
-        package = (dist_name + dirpath[len(abs):]).replace('/', '.')
-        if '__init__.py' in filenames:
-            yield package
-            for filename in filenames:
-                if filename.endswith('.py') and filename != '__init__.py':
-                    yield '.'.join([package, filename])[:-3]
-
-
-def import_all_modules(name=__name__, file=__file__,
-                       skip=('celery.decorators',
-                             'celery.task')):
-    for module in find_distribution_modules(name, file):
-        if not module.startswith(skip):
-            try:
-                import_module(module)
-            except ImportError:
-                pass
-            except OSError as exc:
-                warnings.warn(UserWarning(
-                    'Ignored error importing module {0}: {1!r}'.format(
-                        module, exc,
-                    )))

部分文件因为文件数量过多而无法显示