Browse Source

Merge branch 'master' into bigbeat

Conflicts:
	celery/beat.py
Ask Solem 11 năm trước cách đây
mục cha
commit
ed629f615a
100 tập tin đã thay đổi với 3853 bổ sung1897 xóa
  1. 6 0
      .coveragerc
  2. 4 0
      .gitignore
  3. 32 6
      .travis.yml
  4. 1065 0
      CONTRIBUTING.rst
  5. 10 2
      CONTRIBUTORS.txt
  6. 287 0
      Changelog
  7. 6 6
      README.rst
  8. 7 6
      celery/__init__.py
  9. 1 1
      celery/__main__.py
  10. 26 31
      celery/_state.py
  11. 5 6
      celery/app/__init__.py
  12. 264 224
      celery/app/amqp.py
  13. 101 58
      celery/app/base.py
  14. 38 235
      celery/app/builtins.py
  15. 11 11
      celery/app/control.py
  16. 4 2
      celery/app/defaults.py
  17. 12 7
      celery/app/log.py
  18. 2 2
      celery/app/registry.py
  19. 50 42
      celery/app/task.py
  20. 210 39
      celery/app/trace.py
  21. 15 10
      celery/app/utils.py
  22. 10 6
      celery/apps/beat.py
  23. 15 10
      celery/apps/worker.py
  24. 0 2
      celery/backends/__init__.py
  25. 16 8
      celery/backends/amqp.py
  26. 87 46
      celery/backends/base.py
  27. 1 1
      celery/backends/cache.py
  28. 4 5
      celery/backends/cassandra.py
  29. 40 31
      celery/backends/database/__init__.py
  30. 46 49
      celery/backends/database/session.py
  31. 0 1
      celery/backends/mongodb.py
  32. 76 4
      celery/backends/redis.py
  33. 35 16
      celery/beat.py
  34. 10 9
      celery/bin/amqp.py
  35. 15 21
      celery/bin/base.py
  36. 1 1
      celery/bin/beat.py
  37. 36 12
      celery/bin/celery.py
  38. 3 1
      celery/bin/celeryd_detach.py
  39. 2 2
      celery/bin/events.py
  40. 1 1
      celery/bin/graph.py
  41. 45 38
      celery/bin/multi.py
  42. 14 7
      celery/bin/worker.py
  43. 3 2
      celery/bootsteps.py
  44. 248 66
      celery/canvas.py
  45. 53 48
      celery/concurrency/asynpool.py
  46. 4 2
      celery/concurrency/base.py
  47. 7 0
      celery/concurrency/eventlet.py
  48. 7 2
      celery/concurrency/prefork.py
  49. 1 0
      celery/concurrency/solo.py
  50. 35 32
      celery/contrib/abortable.py
  51. 5 4
      celery/contrib/batches.py
  52. 3 3
      celery/contrib/migrate.py
  53. 1 1
      celery/contrib/rdb.py
  54. 73 0
      celery/contrib/sphinx.py
  55. 4 4
      celery/datastructures.py
  56. 5 18
      celery/events/__init__.py
  57. 1 1
      celery/events/dumper.py
  58. 1 1
      celery/events/snapshot.py
  59. 26 20
      celery/events/state.py
  60. 5 2
      celery/exceptions.py
  61. 20 188
      celery/five.py
  62. 15 6
      celery/fixups/django.py
  63. 2 2
      celery/loaders/base.py
  64. 33 2
      celery/local.py
  65. 20 11
      celery/platforms.py
  66. 101 29
      celery/result.py
  67. 11 10
      celery/schedules.py
  68. 5 4
      celery/security/certificate.py
  69. 6 9
      celery/security/serialization.py
  70. 2 2
      celery/task/__init__.py
  71. 23 7
      celery/task/base.py
  72. 9 9
      celery/task/http.py
  73. 1 1
      celery/task/sets.py
  74. 0 12
      celery/task/trace.py
  75. 1 1
      celery/tests/__init__.py
  76. 4 96
      celery/tests/app/test_amqp.py
  77. 26 23
      celery/tests/app/test_app.py
  78. 36 1
      celery/tests/app/test_beat.py
  79. 10 12
      celery/tests/app/test_builtins.py
  80. 4 7
      celery/tests/app/test_loaders.py
  81. 43 19
      celery/tests/app/test_log.py
  82. 49 51
      celery/tests/app/test_schedules.py
  83. 17 1
      celery/tests/app/test_utils.py
  84. 14 13
      celery/tests/backends/test_amqp.py
  85. 0 9
      celery/tests/backends/test_backends.py
  86. 13 10
      celery/tests/backends/test_base.py
  87. 2 2
      celery/tests/backends/test_cache.py
  88. 4 4
      celery/tests/backends/test_couchbase.py
  89. 3 3
      celery/tests/backends/test_database.py
  90. 11 10
      celery/tests/backends/test_mongodb.py
  91. 111 86
      celery/tests/backends/test_redis.py
  92. 1 1
      celery/tests/bin/test_amqp.py
  93. 5 5
      celery/tests/bin/test_base.py
  94. 2 1
      celery/tests/bin/test_celery.py
  95. 5 2
      celery/tests/bin/test_celeryd_detach.py
  96. 2 11
      celery/tests/bin/test_multi.py
  97. 26 18
      celery/tests/bin/test_worker.py
  98. 118 24
      celery/tests/case.py
  99. 3 26
      celery/tests/compat_modules/test_compat.py
  100. 0 4
      celery/tests/compat_modules/test_compat_utils.py

+ 6 - 0
.coveragerc

@@ -0,0 +1,6 @@
+[run]
+branch = 1
+cover_pylib = 0
+omit = celery.utils.debug,celery.tests.*,celery.bin.graph
+[report]
+omit = */python?.?/*,*/site-packages/*,*/pypy/*

+ 4 - 0
.gitignore

@@ -20,4 +20,8 @@ Documentation/
 .project
 .project
 .pydevproject
 .pydevproject
 .idea/
 .idea/
+.coverage
 celery/tests/cover/
 celery/tests/cover/
+.ve*
+cover/
+

+ 32 - 6
.travis.yml

@@ -1,8 +1,34 @@
 language: python
 language: python
-python:
-    - 2.6
-    - 2.7
-    - 3.3
+python: 2.7
+env:
+  global:
+    PYTHONUNBUFFERED=yes
+  matrix:
+    - TOXENV=2.7
+    - TOXENV=3.3
+    - TOXENV=3.4
+    - TOXENV=pypy
+before_install:
+  - |
+    if [[ $TOXENV = pypy ]]; then
+      deactivate
+      sudo apt-add-repository --yes ppa:pypy/ppa
+      sudo apt-get update
+      sudo apt-get install pypy
+      source ~/virtualenv/pypy/bin/activate
+    fi
+    python --version
+    uname -a
+    lsb_release -a
 install:
 install:
-    - pip install --use-mirrors tox
-script: TOXENV=py$(echo $TRAVIS_PYTHON_VERSION | tr -d .) tox -v
+  - pip install tox
+script:
+  - tox -v -- -v
+after_success:
+  - .tox/$TRAVIS_PYTHON_VERSION/bin/coveralls
+notifications:
+  irc:
+    channels:
+      - "chat.freenode.net#celery"
+    on_success: change
+    on_failure: always

+ 1065 - 0
CONTRIBUTING.rst

@@ -0,0 +1,1065 @@
+.. _contributing:
+
+==============
+ Contributing
+==============
+
+Welcome!
+
+This document is fairly extensive and you are not really expected
+to study this in detail for small contributions;
+
+    The most important rule is that contributing must be easy
+    and that the community is friendly and not nitpicking on details
+    such as coding style.
+
+If you're reporting a bug you should read the Reporting bugs section
+below to ensure that your bug report contains enough information
+to successfully diagnose the issue, and if you're contributing code
+you should try to mimic the conventions you see surrounding the code
+you are working on, but in the end all patches will be cleaned up by
+the person merging the changes so don't worry too much.
+
+.. contents::
+    :local:
+
+.. _community-code-of-conduct:
+
+Community Code of Conduct
+=========================
+
+The goal is to maintain a diverse community that is pleasant for everyone.
+That is why we would greatly appreciate it if everyone contributing to and
+interacting with the community also followed this Code of Conduct.
+
+The Code of Conduct covers our behavior as members of the community,
+in any forum, mailing list, wiki, website, Internet relay chat (IRC), public
+meeting or private correspondence.
+
+The Code of Conduct is heavily based on the `Ubuntu Code of Conduct`_, and
+the `Pylons Code of Conduct`_.
+
+.. _`Ubuntu Code of Conduct`: http://www.ubuntu.com/community/conduct
+.. _`Pylons Code of Conduct`: http://docs.pylonshq.com/community/conduct.html
+
+Be considerate.
+---------------
+
+Your work will be used by other people, and you in turn will depend on the
+work of others.  Any decision you take will affect users and colleagues, and
+we expect you to take those consequences into account when making decisions.
+Even if it's not obvious at the time, our contributions to Celery will impact
+the work of others.  For example, changes to code, infrastructure, policy,
+documentation and translations during a release may negatively impact
+others work.
+
+Be respectful.
+--------------
+
+The Celery community and its members treat one another with respect.  Everyone
+can make a valuable contribution to Celery.  We may not always agree, but
+disagreement is no excuse for poor behavior and poor manners.  We might all
+experience some frustration now and then, but we cannot allow that frustration
+to turn into a personal attack.  It's important to remember that a community
+where people feel uncomfortable or threatened is not a productive one.  We
+expect members of the Celery community to be respectful when dealing with
+other contributors as well as with people outside the Celery project and with
+users of Celery.
+
+Be collaborative.
+-----------------
+
+Collaboration is central to Celery and to the larger free software community.
+We should always be open to collaboration.  Your work should be done
+transparently and patches from Celery should be given back to the community
+when they are made, not just when the distribution releases.  If you wish
+to work on new code for existing upstream projects, at least keep those
+projects informed of your ideas and progress.  It many not be possible to
+get consensus from upstream, or even from your colleagues about the correct
+implementation for an idea, so don't feel obliged to have that agreement
+before you begin, but at least keep the outside world informed of your work,
+and publish your work in a way that allows outsiders to test, discuss and
+contribute to your efforts.
+
+When you disagree, consult others.
+----------------------------------
+
+Disagreements, both political and technical, happen all the time and
+the Celery community is no exception.  It is important that we resolve
+disagreements and differing views constructively and with the help of the
+community and community process.  If you really want to go a different
+way, then we encourage you to make a derivative distribution or alternate
+set of packages that still build on the work we've done to utilize as common
+of a core as possible.
+
+When you are unsure, ask for help.
+----------------------------------
+
+Nobody knows everything, and nobody is expected to be perfect.  Asking
+questions avoids many problems down the road, and so questions are
+encouraged.  Those who are asked questions should be responsive and helpful.
+However, when asking a question, care must be taken to do so in an appropriate
+forum.
+
+Step down considerately.
+------------------------
+
+Developers on every project come and go and Celery is no different.  When you
+leave or disengage from the project, in whole or in part, we ask that you do
+so in a way that minimizes disruption to the project.  This means you should
+tell people you are leaving and take the proper steps to ensure that others
+can pick up where you leave off.
+
+.. _reporting-bugs:
+
+
+Reporting Bugs
+==============
+
+.. _vulnsec:
+
+Security
+--------
+
+You must never report security related issues, vulnerabilities or bugs
+including sensitive information to the bug tracker, or elsewhere in public.
+Instead sensitive bugs must be sent by email to ``security@celeryproject.org``.
+
+If you'd like to submit the information encrypted our PGP key is::
+
+    -----BEGIN PGP PUBLIC KEY BLOCK-----
+    Version: GnuPG v1.4.15 (Darwin)
+
+    mQENBFJpWDkBCADFIc9/Fpgse4owLNvsTC7GYfnJL19XO0hnL99sPx+DPbfr+cSE
+    9wiU+Wp2TfUX7pCLEGrODiEP6ZCZbgtiPgId+JYvMxpP6GXbjiIlHRw1EQNH8RlX
+    cVxy3rQfVv8PGGiJuyBBjxzvETHW25htVAZ5TI1+CkxmuyyEYqgZN2fNd0wEU19D
+    +c10G1gSECbCQTCbacLSzdpngAt1Gkrc96r7wGHBBSvDaGDD2pFSkVuTLMbIRrVp
+    lnKOPMsUijiip2EMr2DvfuXiUIUvaqInTPNWkDynLoh69ib5xC19CSVLONjkKBsr
+    Pe+qAY29liBatatpXsydY7GIUzyBT3MzgMJlABEBAAG0MUNlbGVyeSBTZWN1cml0
+    eSBUZWFtIDxzZWN1cml0eUBjZWxlcnlwcm9qZWN0Lm9yZz6JATgEEwECACIFAlJp
+    WDkCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOArFOUDCicIw1IH/26f
+    CViDC7/P13jr+srRdjAsWvQztia9HmTlY8cUnbmkR9w6b6j3F2ayw8VhkyFWgYEJ
+    wtPBv8mHKADiVSFARS+0yGsfCkia5wDSQuIv6XqRlIrXUyqJbmF4NUFTyCZYoh+C
+    ZiQpN9xGhFPr5QDlMx2izWg1rvWlG1jY2Es1v/xED3AeCOB1eUGvRe/uJHKjGv7J
+    rj0pFcptZX+WDF22AN235WYwgJM6TrNfSu8sv8vNAQOVnsKcgsqhuwomSGsOfMQj
+    LFzIn95MKBBU1G5wOs7JtwiV9jefGqJGBO2FAvOVbvPdK/saSnB+7K36dQcIHqms
+    5hU4Xj0RIJiod5idlRC5AQ0EUmlYOQEIAJs8OwHMkrdcvy9kk2HBVbdqhgAREMKy
+    gmphDp7prRL9FqSY/dKpCbG0u82zyJypdb7QiaQ5pfPzPpQcd2dIcohkkh7G3E+e
+    hS2L9AXHpwR26/PzMBXyr2iNnNc4vTksHvGVDxzFnRpka6vbI/hrrZmYNYh9EAiv
+    uhE54b3/XhXwFgHjZXb9i8hgJ3nsO0pRwvUAM1bRGMbvf8e9F+kqgV0yWYNnh6QL
+    4Vpl1+epqp2RKPHyNQftbQyrAHXT9kQF9pPlx013MKYaFTADscuAp4T3dy7xmiwS
+    crqMbZLzfrxfFOsNxTUGE5vmJCcm+mybAtRo4aV6ACohAO9NevMx8pUAEQEAAYkB
+    HwQYAQIACQUCUmlYOQIbDAAKCRDgKxTlAwonCNFbB/9esir/f7TufE+isNqErzR/
+    aZKZo2WzZR9c75kbqo6J6DYuUHe6xI0OZ2qZ60iABDEZAiNXGulysFLCiPdatQ8x
+    8zt3DF9BMkEck54ZvAjpNSern6zfZb1jPYWZq3TKxlTs/GuCgBAuV4i5vDTZ7xK/
+    aF+OFY5zN7ciZHkqLgMiTZ+RhqRcK6FhVBP/Y7d9NlBOcDBTxxE1ZO1ute6n7guJ
+    ciw4hfoRk8qNN19szZuq3UU64zpkM2sBsIFM9tGF2FADRxiOaOWZHmIyVZriPFqW
+    RUwjSjs7jBVNq0Vy4fCu/5+e+XLOUBOoqtM5W7ELt0t1w9tXebtPEetV86in8fU2
+    =0chn
+    -----END PGP PUBLIC KEY BLOCK-----
+
+Other bugs
+----------
+
+Bugs can always be described to the `mailing-list`_, but the best
+way to report an issue and to ensure a timely response is to use the
+issue tracker.
+
+1) **Create a GitHub account.**
+
+You need to `create a GitHub account`_ to be able to create new issues
+and participate in the discussion.
+
+.. _`create a GitHub account`: https://github.com/signup/free
+
+2) **Determine if your bug is really a bug.**
+
+You should not file a bug if you are requesting support.  For that you can use
+the `mailing-list`_, or `irc-channel`_.
+
+3) **Make sure your bug hasn't already been reported.**
+
+Search through the appropriate Issue tracker.  If a bug like yours was found,
+check if you have new information that could be reported to help
+the developers fix the bug.
+
+4) **Check if you're using the latest version.**
+
+A bug could be fixed by some other improvements and fixes - it might not have an
+existing report in the bug tracker. Make sure you're using the latest releases of
+celery, billiard and kombu.
+
+5) **Collect information about the bug.**
+
+To have the best chance of having a bug fixed, we need to be able to easily
+reproduce the conditions that caused it.  Most of the time this information
+will be from a Python traceback message, though some bugs might be in design,
+spelling or other errors on the website/docs/code.
+
+    A) If the error is from a Python traceback, include it in the bug report.
+
+    B) We also need to know what platform you're running (Windows, OS X, Linux,
+       etc.), the version of your Python interpreter, and the version of Celery,
+       and related packages that you were running when the bug occurred.
+
+    C) If you are reporting a race condition or a deadlock, tracebacks can be
+       hard to get or might not be that useful. Try to inspect the process to
+       get more diagnostic data. Some ideas:
+
+       * Enable celery's ``breakpoint_signal`` and use it
+         to inspect the process's state.  This will allow you to open a
+         ``pdb`` session.
+       * Collect tracing data using strace_(Linux), dtruss (OSX) and ktrace(BSD),
+         ltrace_ and lsof_.
+
+    D) Include the output from the `celery report` command:
+        ::
+
+            $ celery -A proj report
+
+        This will also include your configuration settings and it try to
+        remove values for keys known to be sensitive, but make sure you also
+        verify the information before submitting so that it doesn't contain
+        confidential information like API tokens and authentication
+        credentials.
+
+6) **Submit the bug.**
+
+By default `GitHub`_ will email you to let you know when new comments have
+been made on your bug. In the event you've turned this feature off, you
+should check back on occasion to ensure you don't miss any questions a
+developer trying to fix the bug might ask.
+
+.. _`GitHub`: http://github.com
+.. _`strace`: http://en.wikipedia.org/wiki/Strace
+.. _`ltrace`: http://en.wikipedia.org/wiki/Ltrace
+.. _`lsof`: http://en.wikipedia.org/wiki/Lsof
+
+.. _issue-trackers:
+
+Issue Trackers
+--------------
+
+Bugs for a package in the Celery ecosystem should be reported to the relevant
+issue tracker.
+
+* Celery: http://github.com/celery/celery/issues/
+* Kombu: http://github.com/celery/kombu/issues
+* pyamqp: http://github.com/celery/pyamqp/issues
+* librabbitmq: http://github.com/celery/librabbitmq/issues
+* Django-Celery: http://github.com/celery/django-celery/issues
+
+If you are unsure of the origin of the bug you can ask the
+`mailing-list`_, or just use the Celery issue tracker.
+
+Contributors guide to the codebase
+==================================
+
+There's a separate section for internal details,
+including details about the codebase and a style guide.
+
+Read `internals-guide`_ for more!
+
+.. _versions:
+
+Versions
+========
+
+Version numbers consists of a major version, minor version and a release number.
+Since version 2.1.0 we use the versioning semantics described by
+semver: http://semver.org.
+
+Stable releases are published at PyPI
+while development releases are only available in the GitHub git repository as tags.
+All version tags starts with "v", so version 0.8.0 is the tag v0.8.0.
+
+.. _git-branches:
+
+Branches
+========
+
+Current active version branches:
+
+* master (http://github.com/celery/celery/tree/master)
+* 3.1 (http://github.com/celery/celery/tree/3.1)
+* 3.0 (http://github.com/celery/celery/tree/3.0)
+
+You can see the state of any branch by looking at the Changelog:
+
+    https://github.com/celery/celery/blob/master/Changelog
+
+If the branch is in active development the topmost version info should
+contain metadata like::
+
+    2.4.0
+    ======
+    :release-date: TBA
+    :status: DEVELOPMENT
+    :branch: master
+
+The ``status`` field can be one of:
+
+* ``PLANNING``
+
+    The branch is currently experimental and in the planning stage.
+
+* ``DEVELOPMENT``
+
+    The branch is in active development, but the test suite should
+    be passing and the product should be working and possible for users to test.
+
+* ``FROZEN``
+
+    The branch is frozen, and no more features will be accepted.
+    When a branch is frozen the focus is on testing the version as much
+    as possible before it is released.
+
+``master`` branch
+-----------------
+
+The master branch is where development of the next version happens.
+
+Maintenance branches
+--------------------
+
+Maintenance branches are named after the version, e.g. the maintenance branch
+for the 2.2.x series is named ``2.2``.  Previously these were named
+``releaseXX-maint``.
+
+The versions we currently maintain is:
+
+* 3.1
+
+  This is the current series.
+
+* 3.0
+
+  This is the previous series, and the last version to support Python 2.5.
+
+Archived branches
+-----------------
+
+Archived branches are kept for preserving history only,
+and theoretically someone could provide patches for these if they depend
+on a series that is no longer officially supported.
+
+An archived version is named ``X.Y-archived``.
+
+Our currently archived branches are:
+
+* 2.5-archived
+
+* 2.4-archived
+
+* 2.3-archived
+
+* 2.1-archived
+
+* 2.0-archived
+
+* 1.0-archived
+
+Feature branches
+----------------
+
+Major new features are worked on in dedicated branches.
+There is no strict naming requirement for these branches.
+
+Feature branches are removed once they have been merged into a release branch.
+
+Tags
+====
+
+Tags are used exclusively for tagging releases.  A release tag is
+named with the format ``vX.Y.Z``, e.g. ``v2.3.1``.
+Experimental releases contain an additional identifier ``vX.Y.Z-id``, e.g.
+``v3.0.0-rc1``.  Experimental tags may be removed after the official release.
+
+.. _contributing-changes:
+
+Working on Features & Patches
+=============================
+
+.. note::
+
+    Contributing to Celery should be as simple as possible,
+    so none of these steps should be considered mandatory.
+
+    You can even send in patches by email if that is your preferred
+    work method. We won't like you any less, any contribution you make
+    is always appreciated!
+
+    However following these steps may make maintainers life easier,
+    and may mean that your changes will be accepted sooner.
+
+Forking and setting up the repository
+-------------------------------------
+
+First you need to fork the Celery repository, a good introduction to this
+is in the Github Guide: `Fork a Repo`_.
+
+After you have cloned the repository you should checkout your copy
+to a directory on your machine:
+::
+
+    $ git clone git@github.com:username/celery.git
+
+When the repository is cloned enter the directory to set up easy access
+to upstream changes:
+::
+
+    $ cd celery
+    $ git remote add upstream git://github.com/celery/celery.git
+    $ git fetch upstream
+
+If you need to pull in new changes from upstream you should
+always use the ``--rebase`` option to ``git pull``:
+::
+
+    git pull --rebase upstream master
+
+With this option you don't clutter the history with merging
+commit notes. See `Rebasing merge commits in git`_.
+If you want to learn more about rebasing see the `Rebase`_
+section in the Github guides.
+
+If you need to work on a different branch than ``master`` you can
+fetch and checkout a remote branch like this::
+
+    git checkout --track -b 3.0-devel origin/3.0-devel
+
+.. _`Fork a Repo`: http://help.github.com/fork-a-repo/
+.. _`Rebasing merge commits in git`:
+    http://notes.envato.com/developers/rebasing-merge-commits-in-git/
+.. _`Rebase`: http://help.github.com/rebase/
+
+.. _contributing-testing:
+
+Running the unit test suite
+---------------------------
+
+To run the Celery test suite you need to install a few dependencies.
+A complete list of the dependencies needed are located in
+``requirements/test.txt``.
+
+Installing the test requirements:
+::
+
+    $ pip install -U -r requirements/test.txt
+
+When installation of dependencies is complete you can execute
+the test suite by calling ``nosetests``:
+::
+
+    $ nosetests
+
+Some useful options to ``nosetests`` are:
+
+* ``-x``
+
+    Stop running the tests at the first test that fails.
+
+* ``-s``
+
+    Don't capture output
+
+* ``--nologcapture``
+
+    Don't capture log output.
+
+* ``-v``
+
+    Run with verbose output.
+
+If you want to run the tests for a single test file only
+you can do so like this:
+::
+
+    $ nosetests celery.tests.test_worker.test_worker_job
+
+.. _contributing-pull-requests:
+
+Creating pull requests
+----------------------
+
+When your feature/bugfix is complete you may want to submit
+a pull requests so that it can be reviewed by the maintainers.
+
+Creating pull requests is easy, and also let you track the progress
+of your contribution.  Read the `Pull Requests`_ section in the Github
+Guide to learn how this is done.
+
+You can also attach pull requests to existing issues by following
+the steps outlined here: http://bit.ly/koJoso
+
+.. _`Pull Requests`: http://help.github.com/send-pull-requests/
+
+.. _contributing-coverage:
+
+Calculating test coverage
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To calculate test coverage you must first install the ``coverage`` module.
+
+Installing the ``coverage`` module:
+::
+
+    $ pip install -U coverage
+
+Code coverage in HTML:
+::
+
+    $ nosetests --with-coverage --cover-html
+
+The coverage output will then be located at
+``celery/tests/cover/index.html``.
+
+Code coverage in XML (Cobertura-style):
+::
+
+    $ nosetests --with-coverage --cover-xml --cover-xml-file=coverage.xml
+
+The coverage XML output will then be located at ``coverage.xml``
+
+.. _contributing-tox:
+
+Running the tests on all supported Python versions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There is a ``tox`` configuration file in the top directory of the
+distribution.
+
+To run the tests for all supported Python versions simply execute:
+::
+
+    $ tox
+
+If you only want to test specific Python versions use the ``-e``
+option:
+::
+
+    $ tox -e py26
+
+Building the documentation
+--------------------------
+
+To build the documentation you need to install the dependencies
+listed in ``requirements/docs.txt``:
+::
+
+    $ pip install -U -r requirements/docs.txt
+
+After these dependencies are installed you should be able to
+build the docs by running:
+::
+
+    $ cd docs
+    $ rm -rf .build
+    $ make html
+
+Make sure there are no errors or warnings in the build output.
+After building succeeds the documentation is available at ``.build/html``.
+
+.. _contributing-verify:
+
+Verifying your contribution
+---------------------------
+
+To use these tools you need to install a few dependencies.  These dependencies
+can be found in ``requirements/pkgutils.txt``.
+
+Installing the dependencies:
+::
+
+    $ pip install -U -r requirements/pkgutils.txt
+
+pyflakes & PEP8
+~~~~~~~~~~~~~~~
+
+To ensure that your changes conform to PEP8 and to run pyflakes
+execute:
+::
+
+    $ paver flake8
+
+To not return a negative exit code when this command fails use the
+``-E`` option, this can be convenient while developing:
+::
+
+    $ paver flake8 -E
+
+API reference
+~~~~~~~~~~~~~
+
+To make sure that all modules have a corresponding section in the API
+reference please execute:
+::
+
+    $ paver autodoc
+    $ paver verifyindex
+
+If files are missing you can add them by copying an existing reference file.
+
+If the module is internal it should be part of the internal reference
+located in ``docs/internals/reference/``.  If the module is public
+it should be located in ``docs/reference/``.
+
+For example if reference is missing for the module ``celery.worker.awesome``
+and this module is considered part of the public API, use the following steps:
+
+
+Use an existing file as a template:
+::
+
+    $ cd docs/reference/
+    $ cp celery.schedules.rst celery.worker.awesome.rst
+
+Edit the file using your favorite editor:
+::
+
+    $ vim celery.worker.awesome.rst
+
+        # change every occurance of ``celery.schedules`` to
+        # ``celery.worker.awesome``
+
+
+Edit the index using your favorite editor:
+::
+
+    $ vim index.rst
+
+        # Add ``celery.worker.awesome`` to the index.
+
+
+Commit your changes:
+::
+
+    # Add the file to git
+    $ git add celery.worker.awesome.rst
+    $ git add index.rst
+    $ git commit celery.worker.awesome.rst index.rst \
+        -m "Adds reference for celery.worker.awesome"
+
+.. _coding-style:
+
+Coding Style
+============
+
+You should probably be able to pick up the coding style
+from surrounding code, but it is a good idea to be aware of the
+following conventions.
+
+* All Python code must follow the `PEP-8`_ guidelines.
+
+`pep8.py`_ is an utility you can use to verify that your code
+is following the conventions.
+
+.. _`PEP-8`: http://www.python.org/dev/peps/pep-0008/
+.. _`pep8.py`: http://pypi.python.org/pypi/pep8
+
+* Docstrings must follow the `PEP-257`_ conventions, and use the following
+  style.
+
+    Do this:
+    ::
+
+        def method(self, arg):
+            """Short description.
+
+            More details.
+
+            """
+
+    or:
+    ::
+
+        def method(self, arg):
+            """Short description."""
+
+
+    but not this:
+    ::
+
+        def method(self, arg):
+            """
+            Short description.
+            """
+
+.. _`PEP-257`: http://www.python.org/dev/peps/pep-0257/
+
+* Lines should not exceed 78 columns.
+
+  You can enforce this in ``vim`` by setting the ``textwidth`` option:
+  ::
+
+        set textwidth=78
+
+  If adhering to this limit makes the code less readable, you have one more
+  character to go on, which means 78 is a soft limit, and 79 is the hard
+  limit :)
+
+* Import order
+
+    * Python standard library (`import xxx`)
+    * Python standard library ('from xxx import`)
+    * Third party packages.
+    * Other modules from the current package.
+
+    or in case of code using Django:
+
+    * Python standard library (`import xxx`)
+    * Python standard library ('from xxx import`)
+    * Third party packages.
+    * Django packages.
+    * Other modules from the current package.
+
+    Within these sections the imports should be sorted by module name.
+
+    Example:
+    ::
+
+        import threading
+        import time
+
+        from collections import deque
+        from Queue import Queue, Empty
+
+        from .datastructures import TokenBucket
+        from .five import zip_longest, items, range
+        from .utils import timeutils
+
+* Wildcard imports must not be used (`from xxx import *`).
+
+* For distributions where Python 2.5 is the oldest support version
+  additional rules apply:
+
+    * Absolute imports must be enabled at the top of every module::
+
+        from __future__ import absolute_import
+
+    * If the module uses the with statement and must be compatible
+      with Python 2.5 (celery is not) then it must also enable that::
+
+        from __future__ import with_statement
+
+    * Every future import must be on its own line, as older Python 2.5
+      releases did not support importing multiple features on the
+      same future import line::
+
+        # Good
+        from __future__ import absolute_import
+        from __future__ import with_statement
+
+        # Bad
+        from __future__ import absolute_import, with_statement
+
+     (Note that this rule does not apply if the package does not include
+     support for Python 2.5)
+
+
+* Note that we use "new-style` relative imports when the distribution
+  does not support Python versions below 2.5
+
+    This requires Python 2.5 or later:
+    ::
+
+        from . import submodule
+
+
+.. _feature-with-extras:
+
+Contributing features requiring additional libraries
+====================================================
+
+Some features like a new result backend may require additional libraries
+that the user must install.
+
+We use setuptools `extra_requires` for this, and all new optional features
+that require 3rd party libraries must be added.
+
+1) Add a new requirements file in `requirements/extras`
+
+    E.g. for the Cassandra backend this is
+    ``requirements/extras/cassandra.txt``, and the file looks like this::
+
+        pycassa
+
+    These are pip requirement files so you can have version specifiers and
+    multiple packages are separated by newline.  A more complex example could
+    be:
+
+        # pycassa 2.0 breaks Foo
+        pycassa>=1.0,<2.0
+        thrift
+
+2) Modify ``setup.py``
+
+    After the requirements file is added you need to add it as an option
+    to ``setup.py`` in the ``extras_require`` section::
+
+        extra['extras_require'] = {
+            # ...
+            'cassandra': extras('cassandra.txt'),
+        }
+
+3) Document the new feature in ``docs/includes/installation.txt``
+
+    You must add your feature to the list in the `bundles`_ section
+    of ``docs/includes/installation.txt``.
+
+    After you've made changes to this file you need to render
+    the distro ``README`` file:
+    ::
+
+        $ pip install -U requirements/pkgutils.txt
+        $ paver readme
+
+
+That's all that needs to be done, but remember that if your feature
+adds additional configuration options then these needs to be documented
+in ``docs/configuration.rst``.  Also all settings need to be added to the
+``celery/app/defaults.py`` module.
+
+Result backends require a separate section in the ``docs/configuration.rst``
+file.
+
+.. _contact_information:
+
+Contacts
+========
+
+This is a list of people that can be contacted for questions
+regarding the official git repositories, PyPI packages
+Read the Docs pages.
+
+If the issue is not an emergency then it is better
+to `report an issue`_.
+
+
+Committers
+----------
+
+Ask Solem
+~~~~~~~~~
+
+:github: https://github.com/ask
+:twitter: http://twitter.com/#!/asksol
+
+Mher Movsisyan
+~~~~~~~~~~~~~~
+
+:github: https://github.com/mher
+:twitter: http://twitter.com/#!/movsm
+
+Steeve Morin
+~~~~~~~~~~~~
+
+:github: https://github.com/steeve
+:twitter: http://twitter.com/#!/steeve
+
+Website
+-------
+
+The Celery Project website is run and maintained by
+
+Mauro Rocco
+~~~~~~~~~~~
+
+:github: https://github.com/fireantology
+:twitter: https://twitter.com/#!/fireantology
+
+with design by:
+
+Jan Henrik Helmers
+~~~~~~~~~~~~~~~~~~
+
+:web: http://www.helmersworks.com
+:twitter: http://twitter.com/#!/helmers
+
+
+.. _packages:
+
+Packages
+========
+
+celery
+------
+
+:git: https://github.com/celery/celery
+:CI: http://travis-ci.org/#!/celery/celery
+:PyPI: http://pypi.python.org/pypi/celery
+:docs: http://docs.celeryproject.org
+
+kombu
+-----
+
+Messaging library.
+
+:git: https://github.com/celery/kombu
+:CI: http://travis-ci.org/#!/celery/kombu
+:PyPI: http://pypi.python.org/pypi/kombu
+:docs: http://kombu.readthedocs.org
+
+amqp
+----
+
+Python AMQP 0.9.1 client.
+
+:git: https://github.com/celery/py-amqp
+:CI: http://travis-ci.org/#!/celery/py-amqp
+:PyPI: http://pypi.python.org/pypi/amqp
+:docs: http://amqp.readthedocs.org
+
+billiard
+--------
+
+Fork of multiprocessing containing improvements
+that will eventually be merged into the Python stdlib.
+
+:git: https://github.com/celery/billiard
+:PyPI: http://pypi.python.org/pypi/billiard
+
+librabbitmq
+-----------
+
+Very fast Python AMQP client written in C.
+
+:git: https://github.com/celery/librabbitmq
+:PyPI: http://pypi.python.org/pypi/librabbitmq
+
+celerymon
+---------
+
+Celery monitor web-service.
+
+:git: https://github.com/celery/celerymon
+:PyPI: http://pypi.python.org/pypi/celerymon
+
+django-celery
+-------------
+
+Django <-> Celery Integration.
+
+:git: https://github.com/celery/django-celery
+:PyPI: http://pypi.python.org/pypi/django-celery
+:docs: http://docs.celeryproject.org/en/latest/django
+
+cl
+--
+
+Actor library.
+
+:git: https://github.com/celery/cl
+:PyPI: http://pypi.python.org/pypi/cl
+
+cyme
+----
+
+Distributed Celery Instance manager.
+
+:git: https://github.com/celery/cyme
+:PyPI: http://pypi.python.org/pypi/cyme
+:docs: http://cyme.readthedocs.org/
+
+
+Deprecated
+----------
+
+- Flask-Celery
+
+:git: https://github.com/ask/Flask-Celery
+:PyPI: http://pypi.python.org/pypi/Flask-Celery
+
+- carrot
+
+:git: https://github.com/ask/carrot
+:PyPI: http://pypi.python.org/pypi/carrot
+
+- ghettoq
+
+:git: https://github.com/ask/ghettoq
+:PyPI: http://pypi.python.org/pypi/ghettoq
+
+- kombu-sqlalchemy
+
+:git: https://github.com/ask/kombu-sqlalchemy
+:PyPI: http://pypi.python.org/pypi/kombu-sqlalchemy
+
+- django-kombu
+
+:git: https://github.com/ask/django-kombu
+:PyPI: http://pypi.python.org/pypi/django-kombu
+
+- pylibrabbitmq
+
+Old name for ``librabbitmq``.
+
+:git: ``None``
+:PyPI: http://pypi.python.org/pypi/pylibrabbitmq
+
+.. _release-procedure:
+
+
+Release Procedure
+=================
+
+Updating the version number
+---------------------------
+
+The version number must be updated two places:
+
+    * ``celery/__init__.py``
+    * ``docs/include/introduction.txt``
+
+After you have changed these files you must render
+the ``README`` files.  There is a script to convert sphinx syntax
+to generic reStructured Text syntax, and the paver task `readme`
+does this for you:
+::
+
+    $ paver readme
+
+Now commit the changes:
+::
+
+    $ git commit -a -m "Bumps version to X.Y.Z"
+
+and make a new version tag:
+::
+
+    $ git tag vX.Y.Z
+    $ git push --tags
+
+Releasing
+---------
+
+Commands to make a new public stable release::
+
+    $ paver releaseok  # checks pep8, autodoc index, runs tests and more
+    $ paver removepyc  # Remove .pyc files
+    $ git clean -xdn   # Check that there's no left-over files in the repo
+    $ python setup.py sdist upload  # Upload package to PyPI
+
+If this is a new release series then you also need to do the
+following:
+
+* Go to the Read The Docs management interface at:
+    http://readthedocs.org/projects/celery/?fromdocs=celery
+
+* Enter "Edit project"
+
+    Change default branch to the branch of this series, e.g. ``2.4``
+    for series 2.4.
+
+* Also add the previous version under the "versions" tab.
+
+.. _`mailing-list`: http://groups.google.com/group/celery-users
+
+.. _`irc-channel`: http://docs.celeryproject.org/en/latest/getting-started/resources.html#irc
+
+.. _`internals-guide`: http://docs.celeryproject.org/en/latest/internals/guide.html
+
+.. _`bundles`: http://docs.celeryproject.org/en/latest/getting-started/introduction.html#bundles
+
+.. _`report an issue`: http://docs.celeryproject.org/en/latest/contributing.html#reporting-bugs
+

+ 10 - 2
CONTRIBUTORS.txt

@@ -152,5 +152,13 @@ Michael Robellard, 2013/11/07
 Vsevolod Kulaga, 2013/11/16
 Vsevolod Kulaga, 2013/11/16
 Ionel Cristian Mărieș, 2013/12/09
 Ionel Cristian Mărieș, 2013/12/09
 Константин Подшумок, 2013/12/16
 Константин Подшумок, 2013/12/16
-Antoine Legrand, 2014/09/01
-Pepijn de Vos, 2014/15/01
+Antoine Legrand, 2014/01/09
+Pepijn de Vos, 2014/01/15
+Dan McGee, 2014/01/27
+Paul Kilgo, 2014/01/28
+Martin Davidsson, 2014/02/08
+Chris Clark, 2014/02/20
+Matthew Duggan, 2014/04/10
+Brian Bouterse, 2014/04/10
+Dmitry Malinovsky, 2014/04/28
+Luke Pomfrey, 2014/05/06

+ 287 - 0
Changelog

@@ -8,6 +8,293 @@ This document contains change notes for bugfix releases in the 3.1.x series
 (Cipater), please see :ref:`whatsnew-3.1` for an overview of what's
 (Cipater), please see :ref:`whatsnew-3.1` for an overview of what's
 new in Celery 3.1.
 new in Celery 3.1.
 
 
+.. _version-3.1.11:
+
+3.1.11
+======
+:release-date: 2014-04-16 11:00 P.M UTC
+:release-by: Ask Solem
+
+- **Now compatible with RabbitMQ 3.3.0**
+
+    You need to run Celery 3.1.11 or later when using RabbitMQ 3.3,
+    and if you use the ``librabbitmq`` module you also have to upgrade
+    to librabbitmq 1.5.0:
+
+    .. code-block:: bash
+
+        $ pip install -U librabbitmq
+
+- **Requirements**:
+
+    - Now depends on :ref:`Kombu 3.0.15 <kombu:version-3.0.15>`.
+
+    - Now depends on `billiard 3.3.0.17`_.
+
+    - Bundle ``celery[librabbitmq]`` now depends on :mod:`librabbitmq` 1.5.0.
+
+.. _`billiard 3.3.0.17`:
+    https://github.com/celery/billiard/blob/master/CHANGES.txt
+
+- **Tasks**: The :setting:`CELERY_DEFAULT_DELIVERY_MODE` setting was being
+  ignored (Issue #1953).
+
+- **Worker**: New :option:`--heartbeat-interval` can be used to change the
+  time (in seconds) between sending event heartbeats.
+
+    Contributed by Matthew Duggan and Craig Northway.
+
+- **App**: Fixed memory leaks occurring when creating lots of temporary
+  app instances (Issue #1949).
+
+- **MongoDB**: SSL configuration with non-MongoDB transport breaks MongoDB
+  results backend (Issue #1973).
+
+    Fix contributed by Brian Bouterse.
+
+- **Logging**: The color formatter accidentally modified ``record.msg``
+  (Issue #1939).
+
+- **Results**: Fixed problem with task trails being stored multiple times,
+  causing ``result.collect()`` to hang (Issue #1936, Issue #1943).
+
+- **Results**: ``ResultSet`` now implements a ``.backend`` attribute for
+  compatibility with ``AsyncResult``.
+
+- **Results**: ``.forget()`` now also clears the local cache.
+
+- **Results**: Fixed problem with multiple calls to ``result._set_cache``
+  (Issue #1940).
+
+- **Results**: ``join_native`` populated result cache even if disabled.
+
+- **Results**: The YAML result serializer should now be able to handle storing
+  exceptions.
+
+- **Worker**: No longer sends task error emails for expected errors (in
+  ``@task(throws=(..., )))``.
+
+- **Canvas**: Fixed problem with exception deserialization when using
+  the JSON serializer (Issue #1987).
+
+- **Eventlet**: Fixes crash when ``celery.contrib.batches`` attempted to
+  cancel a non-existing timer (Issue #1984).
+
+- Can now import ``celery.version_info_t``, and ``celery.five`` (Issue #1968).
+
+
+.. _version-3.1.10:
+
+3.1.10
+======
+:release-date: 2014-03-22 09:40 P.M UTC
+:release-by: Ask Solem
+
+- **Requirements**:
+
+    - Now depends on :ref:`Kombu 3.0.14 <kombu:version-3.0.14>`.
+
+- **Redis:** Important note about events (Issue #1882).
+
+    There is a new transport option for Redis that enables monitors
+    to filter out unwanted events.  Enabling this option in the workers
+    will increase performance considerably:
+
+    .. code-block:: python
+
+        BROKER_TRANSPORT_OPTIONS = {'fanout_patterns': True}
+
+    Enabling this option means that your workers will not be able to see
+    workers with the option disabled (or is running an older version of
+    Celery), so if you do enable it then make sure you do so on all
+    nodes.
+
+    See :ref:`redis-caveats-fanout-patterns`.
+
+    This will be the default in Celery 3.2.
+
+- **Results**: The :class:`@AsyncResult` object now keeps a local cache
+  of the final state of the task.
+
+    This means that the global result cache can finally be disabled,
+    and you can do so by setting :setting:`CELERY_MAX_CACHED_RESULTS` to
+    :const:`-1`.  The lifetime of the cache will then be bound to the
+    lifetime of the result object, which will be the default behavior
+    in Celery 3.2.
+
+- **Events**: The "Substantial drift" warning message is now logged once
+  per node name only (Issue #1802).
+
+- **Worker**: Ability to use one log file per child process when using the
+  prefork pool.
+
+    This can be enabled by using the new ``%i`` and ``%I`` format specifiers
+    for the log file name.  See :ref:`worker-files-process-index`.
+
+- **Redis**: New experimental chord join implementation.
+
+    This is an optimization for chords when using the Redis result backend,
+    where the join operation is now considerably faster and using less
+    resources than the previous strategy.
+
+    The new option can be set in the result backend URL:
+
+        CELERY_RESULT_BACKEND = 'redis://localhost?new_join=1'
+
+    This must be enabled manually as it's incompatible
+    with workers and clients not using it, so be sure to enable
+    the option in all clients and workers if you decide to use it.
+
+- **Multi**: With ``-opt:index`` (e.g. :option:`-c:1`) the index now always refers
+  to the position of a node in the argument list.
+
+    This means that referring to a number will work when specifying a list
+    of node names and not just for a number range:
+
+    .. code-block:: bash
+
+        celery multi start A B C D -c:1 4 -c:2-4 8
+
+    In this example ``1`` refers to node A (as it's the first node in the
+    list).
+
+- **Signals**: The sender argument to ``Signal.connect`` can now be a proxy
+  object, which means that it can be used with the task decorator
+  (Issue #1873).
+
+- **Task**: A regression caused the ``queue`` argument to ``Task.retry`` to be
+  ignored (Issue #1892).
+
+- **App**: Fixed error message for :meth:`~@Celery.config_from_envvar`.
+
+    Fix contributed by Dmitry Malinovsky.
+
+- **Canvas**: Chords can now contain a group of other chords (Issue #1921).
+
+- **Canvas**: Chords can now be combined when using the amqp result backend
+  (a chord where the callback is also a chord).
+
+- **Canvas**: Calling ``result.get()`` for a chain task will now complete
+  even if one of the tasks in the chain is ``ignore_result=True``
+  (Issue #1905).
+
+- **Canvas**: Worker now also logs chord errors.
+
+- **Canvas**: A chord task raising an exception will now result in
+  any errbacks (``link_error``) to the chord callback to also be called.
+
+- **Results**: Reliability improvements to the SQLAlchemy database backend
+  (Issue #1786).
+
+    Previously the connection from the ``MainProcess`` was improperly
+    inherited by child processes.
+
+    Fix contributed by Ionel Cristian Mărieș.
+
+- **Task**: Task callbacks and errbacks are now called using the group
+  primitive.
+
+- **Task**: ``Task.apply`` now properly sets ``request.headers``
+  (Issue #1874).
+
+- **Worker**: Fixed ``UnicodeEncodeError`` occuring when worker is started
+  by `supervisord`.
+
+    Fix contributed by Codeb Fan.
+
+- **Beat**: No longer attempts to upgrade a newly created database file
+  (Issue #1923).
+
+- **Beat**: New setting :setting:``CELERYBEAT_SYNC_EVERY`` can be be used
+  to control file sync by specifying the number of tasks to send between
+  each sync.
+
+    Contributed by Chris Clark.
+
+- **Commands**: :program:`celery inspect memdump` no longer crashes
+  if the :mod:`psutil` module is not installed (Issue #1914).
+
+- **Worker**: Remote control commands now always accepts json serialized
+  messages (Issue #1870).
+
+- **Worker**: Gossip will now drop any task related events it receives
+  by mistake (Issue #1882).
+
+
+.. _version-3.1.9:
+
+3.1.9
+=====
+:release-date: 2014-02-10 06:43 P.M UTC
+:release-by: Ask Solem
+
+- **Requirements**:
+
+    - Now depends on :ref:`Kombu 3.0.12 <kombu:version-3.0.12>`.
+
+- **Prefork pool**: Better handling of exiting child processes.
+
+    Fix contributed by Ionel Cristian Mărieș.
+
+- **Prefork pool**: Now makes sure all file descriptors are removed
+  from the hub when a process is cleaned up.
+
+    Fix contributed by Ionel Cristian Mărieș.
+
+- **New Sphinx extension**: for autodoc documentation of tasks:
+  :mod:`celery.contrib.spinx` (Issue #1833).
+
+- **Django**: Now works with Django 1.7a1.
+
+- **Task**: Task.backend is now a property that forwards to ``app.backend``
+  if no custom backend has been specified for the task (Issue #1821).
+
+- **Generic init scripts**: Fixed bug in stop command.
+
+    Fix contributed by Rinat Shigapov.
+
+- **Generic init scripts**: Fixed compatibility with GNU :manpage:`stat`.
+
+    Fix contributed by Paul Kilgo.
+
+- **Generic init scripts**: Fixed compatibility with the minimal
+  :program:`dash` shell (Issue #1815).
+
+- **Commands**: The :program:`celery amqp basic.publish` command was not
+  working properly.
+
+    Fix contributed by Andrey Voronov.
+
+- **Commands**: Did no longer emit an error message if the pidfile exists
+  and the process is still alive (Issue #1855).
+
+- **Commands**: Better error message for missing arguments to preload
+  options (Issue #1860).
+
+- **Commands**: :program:`celery -h` did not work because of a bug in the
+  argument parser (Issue #1849).
+
+- **Worker**: Improved error message for message decoding errors.
+
+- **Time**: Now properly parses the `Z` timezone specifier in ISO 8601 date
+  strings.
+
+    Fix contributed by Martin Davidsson.
+
+- **Worker**: Now uses the *negotiated* heartbeat value to calculate
+  how often to run the heartbeat checks.
+
+- **Beat**: Fixed problem with beat hanging after the first schedule
+  iteration (Issue #1822).
+
+    Fix contributed by Roger Hu.
+
+- **Signals**: The header argument to :signal:`before_task_publish` is now
+  always a dictionary instance so that signal handlers can add headers.
+
+- **Worker**: A list of message headers is now included in message related
+  errors.
+
 .. _version-3.1.8:
 .. _version-3.1.8:
 
 
 3.1.8
 3.1.8

+ 6 - 6
README.rst

@@ -4,7 +4,7 @@
 
 
 .. image:: http://cloud.github.com/downloads/celery/celery/celery_128.png
 .. image:: http://cloud.github.com/downloads/celery/celery/celery_128.png
 
 
-:Version: 3.1.8 (Cipater)
+:Version: 3.2.0a1 (Cipater)
 :Web: http://celeryproject.org/
 :Web: http://celeryproject.org/
 :Download: http://pypi.python.org/pypi/celery/
 :Download: http://pypi.python.org/pypi/celery/
 :Source: http://github.com/celery/celery/
 :Source: http://github.com/celery/celery/
@@ -81,8 +81,8 @@ getting started tutorials:
 .. _`Next steps`:
 .. _`Next steps`:
     http://docs.celeryproject.org/en/latest/getting-started/next-steps.html
     http://docs.celeryproject.org/en/latest/getting-started/next-steps.html
 
 
-Celery is
-==========
+Celery is...
+============
 
 
 - **Simple**
 - **Simple**
 
 
@@ -119,8 +119,8 @@ Celery is…
     Custom pool implementations, serializers, compression schemes, logging,
     Custom pool implementations, serializers, compression schemes, logging,
     schedulers, consumers, producers, autoscalers, broker transports and much more.
     schedulers, consumers, producers, autoscalers, broker transports and much more.
 
 
-It supports
-============
+It supports...
+==============
 
 
     - **Message Transports**
     - **Message Transports**
 
 
@@ -128,7 +128,7 @@ It supports…
         - MongoDB_ (experimental), Amazon SQS (experimental),
         - MongoDB_ (experimental), Amazon SQS (experimental),
         - CouchDB_ (experimental), SQLAlchemy_ (experimental),
         - CouchDB_ (experimental), SQLAlchemy_ (experimental),
         - Django ORM (experimental), `IronMQ`_
         - Django ORM (experimental), `IronMQ`_
-        - and more
+        - and more...
 
 
     - **Concurrency**
     - **Concurrency**
 
 

+ 7 - 6
celery/__init__.py

@@ -5,7 +5,7 @@
 # :copyright: (c) 2012-2013 GoPivotal, Inc., All rights reserved.
 # :copyright: (c) 2012-2013 GoPivotal, Inc., All rights reserved.
 # :license:   BSD (3 Clause), see LICENSE for more details.
 # :license:   BSD (3 Clause), see LICENSE for more details.
 
 
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function, unicode_literals
 
 
 from collections import namedtuple
 from collections import namedtuple
 
 
@@ -13,8 +13,8 @@ version_info_t = namedtuple(
     'version_info_t', ('major', 'minor', 'micro', 'releaselevel', 'serial'),
     'version_info_t', ('major', 'minor', 'micro', 'releaselevel', 'serial'),
 )
 )
 
 
-SERIES = 'Cipater'
-VERSION = version_info_t(3, 1, 8, '', '')
+SERIES = 'DEV'
+VERSION = version_info_t(3, 2, 0, 'a2', '')
 __version__ = '{0.major}.{0.minor}.{0.micro}{0.releaselevel}'.format(VERSION)
 __version__ = '{0.major}.{0.minor}.{0.micro}{0.releaselevel}'.format(VERSION)
 __author__ = 'Ask Solem'
 __author__ = 'Ask Solem'
 __contact__ = 'ask@celeryproject.org'
 __contact__ = 'ask@celeryproject.org'
@@ -127,9 +127,9 @@ def maybe_patch_concurrency(argv=sys.argv,
         concurrency.get_implementation(pool)
         concurrency.get_implementation(pool)
 
 
 # Lazy loading
 # Lazy loading
-from .five import recreate_module
+from celery import five
 
 
-old_module, new_module = recreate_module(  # pragma: no cover
+old_module, new_module = five.recreate_module(  # pragma: no cover
     __name__,
     __name__,
     by_module={
     by_module={
         'celery.app': ['Celery', 'bugreport', 'shared_task'],
         'celery.app': ['Celery', 'bugreport', 'shared_task'],
@@ -144,8 +144,9 @@ old_module, new_module = recreate_module(  # pragma: no cover
     __package__='celery', __file__=__file__,
     __package__='celery', __file__=__file__,
     __path__=__path__, __doc__=__doc__, __version__=__version__,
     __path__=__path__, __doc__=__doc__, __version__=__version__,
     __author__=__author__, __contact__=__contact__,
     __author__=__author__, __contact__=__contact__,
-    __homepage__=__homepage__, __docformat__=__docformat__,
+    __homepage__=__homepage__, __docformat__=__docformat__, five=five,
     VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER,
     VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER,
+    version_info_t=version_info_t,
     maybe_patch_concurrency=maybe_patch_concurrency,
     maybe_patch_concurrency=maybe_patch_concurrency,
     _find_option_with_arg=_find_option_with_arg,
     _find_option_with_arg=_find_option_with_arg,
 )
 )

+ 1 - 1
celery/__main__.py

@@ -1,4 +1,4 @@
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function, unicode_literals
 
 
 import sys
 import sys
 
 

+ 26 - 31
celery/_state.py

@@ -9,7 +9,7 @@
     This module shouldn't be used directly.
     This module shouldn't be used directly.
 
 
 """
 """
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import, print_function, unicode_literals
 
 
 import os
 import os
 import sys
 import sys
@@ -19,43 +19,35 @@ import weakref
 from celery.local import Proxy
 from celery.local import Proxy
 from celery.utils.threads import LocalStack
 from celery.utils.threads import LocalStack
 
 
-try:
-    from weakref import WeakSet as AppSet
-except ImportError:  # XXX Py2.6
-
-    class AppSet(object):  # noqa
-
-        def __init__(self):
-            self._refs = set()
-
-        def add(self, app):
-            self._refs.add(weakref.ref(app))
-
-        def __iter__(self):
-            dirty = []
-            try:
-                for appref in self._refs:
-                    app = appref()
-                    if app is None:
-                        dirty.append(appref)
-                    else:
-                        yield app
-            finally:
-                while dirty:
-                    self._refs.discard(dirty.pop())
-
 __all__ = ['set_default_app', 'get_current_app', 'get_current_task',
 __all__ = ['set_default_app', 'get_current_app', 'get_current_task',
-           'get_current_worker_task', 'current_app', 'current_task']
+           'get_current_worker_task', 'current_app', 'current_task',
+           'connect_on_app_finalize']
 
 
 #: Global default app used when no current app.
 #: Global default app used when no current app.
 default_app = None
 default_app = None
 
 
 #: List of all app instances (weakrefs), must not be used directly.
 #: List of all app instances (weakrefs), must not be used directly.
-_apps = AppSet()
+_apps = weakref.WeakSet()
+
+#: global set of functions to call whenever a new app is finalized
+#: E.g. Shared tasks, and builtin tasks are created
+#: by adding callbacks here.
+_on_app_finalizers = set()
 
 
 _task_join_will_block = False
 _task_join_will_block = False
 
 
 
 
+def connect_on_app_finalize(callback):
+    _on_app_finalizers.add(callback)
+    return callback
+
+
+def _announce_app_finalized(app):
+    callbacks = set(_on_app_finalizers)
+    for callback in callbacks:
+        callback(app)
+
+
 def _set_task_join_will_block(blocks):
 def _set_task_join_will_block(blocks):
     global _task_join_will_block
     global _task_join_will_block
     _task_join_will_block = blocks
     _task_join_will_block = blocks
@@ -85,13 +77,16 @@ def _get_current_app():
         #: creates the global fallback app instance.
         #: creates the global fallback app instance.
         from celery.app import Celery
         from celery.app import Celery
         set_default_app(Celery(
         set_default_app(Celery(
-            'default',
+            'default', fixups=[], set_as_current=False,
             loader=os.environ.get('CELERY_LOADER') or 'default',
             loader=os.environ.get('CELERY_LOADER') or 'default',
-            fixups=[],
-            set_as_current=False, accept_magic_kwargs=True,
         ))
         ))
     return _tls.current_app or default_app
     return _tls.current_app or default_app
 
 
+
+def _set_current_app(app):
+    _tls.current_app = app
+
+
 C_STRICT_APP = os.environ.get('C_STRICT_APP')
 C_STRICT_APP = os.environ.get('C_STRICT_APP')
 if os.environ.get('C_STRICT_APP'):  # pragma: no cover
 if os.environ.get('C_STRICT_APP'):  # pragma: no cover
     def get_current_app():
     def get_current_app():

+ 5 - 6
celery/app/__init__.py

@@ -6,22 +6,19 @@
     Celery Application.
     Celery Application.
 
 
 """
 """
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function, unicode_literals
 
 
 import os
 import os
 
 
 from celery.local import Proxy
 from celery.local import Proxy
 from celery import _state
 from celery import _state
 from celery._state import (
 from celery._state import (
-    set_default_app,
     get_current_app as current_app,
     get_current_app as current_app,
     get_current_task as current_task,
     get_current_task as current_task,
-    _get_active_apps,
-    _task_stack,
+    connect_on_app_finalize, set_default_app, _get_active_apps, _task_stack,
 )
 )
 from celery.utils import gen_task_name
 from celery.utils import gen_task_name
 
 
-from .builtins import shared_task as _shared_task
 from .base import Celery, AppPickler
 from .base import Celery, AppPickler
 
 
 __all__ = ['Celery', 'AppPickler', 'default_app', 'app_or_default',
 __all__ = ['Celery', 'AppPickler', 'default_app', 'app_or_default',
@@ -128,7 +125,9 @@ def shared_task(*args, **kwargs):
             name = options.get('name')
             name = options.get('name')
             # Set as shared task so that unfinalized apps,
             # Set as shared task so that unfinalized apps,
             # and future apps will load the task.
             # and future apps will load the task.
-            _shared_task(lambda app: app._task_from_fun(fun, **options))
+            connect_on_app_finalize(
+                lambda app: app._task_from_fun(fun, **options)
+            )
 
 
             # Force all finalized apps to take this task as well.
             # Force all finalized apps to take this task as well.
             for app in _get_active_apps():
             for app in _get_active_apps():

+ 264 - 224
celery/app/amqp.py

@@ -8,13 +8,16 @@
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
+import numbers
+
+from collections import Mapping, namedtuple
 from datetime import timedelta
 from datetime import timedelta
 from weakref import WeakValueDictionary
 from weakref import WeakValueDictionary
 
 
 from kombu import Connection, Consumer, Exchange, Producer, Queue
 from kombu import Connection, Consumer, Exchange, Producer, Queue
 from kombu.common import Broadcast
 from kombu.common import Broadcast
 from kombu.pools import ProducerPool
 from kombu.pools import ProducerPool
-from kombu.utils import cached_property, uuid
+from kombu.utils import cached_property
 from kombu.utils.encoding import safe_repr
 from kombu.utils.encoding import safe_repr
 from kombu.utils.functional import maybe_list
 from kombu.utils.functional import maybe_list
 
 
@@ -23,10 +26,9 @@ from celery.five import items, string_t
 from celery.utils.text import indent as textindent
 from celery.utils.text import indent as textindent
 from celery.utils.timeutils import to_utc
 from celery.utils.timeutils import to_utc
 
 
-from . import app_or_default
 from . import routes as _routes
 from . import routes as _routes
 
 
-__all__ = ['AMQP', 'Queues', 'TaskProducer', 'TaskConsumer']
+__all__ = ['AMQP', 'Queues', 'task_message']
 
 
 #: Human readable queue declaration.
 #: Human readable queue declaration.
 QUEUE_FORMAT = """
 QUEUE_FORMAT = """
@@ -34,6 +36,9 @@ QUEUE_FORMAT = """
 key={0.routing_key}
 key={0.routing_key}
 """
 """
 
 
+task_message = namedtuple('task_message',
+                          ('headers', 'properties', 'body', 'sent_event'))
+
 
 
 class Queues(dict):
 class Queues(dict):
     """Queue name⇒ declaration mapping.
     """Queue name⇒ declaration mapping.
@@ -60,7 +65,7 @@ class Queues(dict):
         self.ha_policy = ha_policy
         self.ha_policy = ha_policy
         self.autoexchange = Exchange if autoexchange is None else autoexchange
         self.autoexchange = Exchange if autoexchange is None else autoexchange
         if isinstance(queues, (tuple, list)):
         if isinstance(queues, (tuple, list)):
-            queues = dict((q.name, q) for q in queues)
+            queues = {q.name: q for q in queues}
         for name, q in items(queues or {}):
         for name, q in items(queues or {}):
             self.add(q) if isinstance(q, Queue) else self.add_compat(name, **q)
             self.add(q) if isinstance(q, Queue) else self.add_compat(name, **q)
 
 
@@ -151,8 +156,9 @@ class Queues(dict):
                         Can be iterable or string.
                         Can be iterable or string.
         """
         """
         if include:
         if include:
-            self._consume_from = dict((name, self[name])
-                                      for name in maybe_list(include))
+            self._consume_from = {
+                name: self[name] for name in maybe_list(include)
+            }
     select_subset = select  # XXX compat
     select_subset = select  # XXX compat
 
 
     def deselect(self, exclude):
     def deselect(self, exclude):
@@ -182,203 +188,14 @@ class Queues(dict):
         return self
         return self
 
 
 
 
-class TaskProducer(Producer):
-    app = None
-    auto_declare = False
-    retry = False
-    retry_policy = None
-    utc = True
-    event_dispatcher = None
-    send_sent_event = False
-
-    def __init__(self, channel=None, exchange=None, *args, **kwargs):
-        self.retry = kwargs.pop('retry', self.retry)
-        self.retry_policy = kwargs.pop('retry_policy',
-                                       self.retry_policy or {})
-        self.send_sent_event = kwargs.pop('send_sent_event',
-                                          self.send_sent_event)
-        exchange = exchange or self.exchange
-        self.queues = self.app.amqp.queues  # shortcut
-        self.default_queue = self.app.amqp.default_queue
-        super(TaskProducer, self).__init__(channel, exchange, *args, **kwargs)
-
-    def publish_task(self, task_name, task_args=None, task_kwargs=None,
-                     countdown=None, eta=None, task_id=None, group_id=None,
-                     taskset_id=None,  # compat alias to group_id
-                     expires=None, exchange=None, exchange_type=None,
-                     event_dispatcher=None, retry=None, retry_policy=None,
-                     queue=None, now=None, retries=0, chord=None,
-                     callbacks=None, errbacks=None, routing_key=None,
-                     serializer=None, delivery_mode=None, compression=None,
-                     reply_to=None, time_limit=None, soft_time_limit=None,
-                     declare=None, headers=None,
-                     send_before_publish=signals.before_task_publish.send,
-                     before_receivers=signals.before_task_publish.receivers,
-                     send_after_publish=signals.after_task_publish.send,
-                     after_receivers=signals.after_task_publish.receivers,
-                     send_task_sent=signals.task_sent.send,  # XXX deprecated
-                     sent_receivers=signals.task_sent.receivers,
-                     **kwargs):
-        """Send task message."""
-        retry = self.retry if retry is None else retry
-
-        qname = queue
-        if queue is None and exchange is None:
-            queue = self.default_queue
-        if queue is not None:
-            if isinstance(queue, string_t):
-                qname, queue = queue, self.queues[queue]
-            else:
-                qname = queue.name
-            exchange = exchange or queue.exchange.name
-            routing_key = routing_key or queue.routing_key
-        if declare is None and queue and not isinstance(queue, Broadcast):
-            declare = [queue]
-
-        # merge default and custom policy
-        retry = self.retry if retry is None else retry
-        _rp = (dict(self.retry_policy, **retry_policy) if retry_policy
-               else self.retry_policy)
-        task_id = task_id or uuid()
-        task_args = task_args or []
-        task_kwargs = task_kwargs or {}
-        if not isinstance(task_args, (list, tuple)):
-            raise ValueError('task args must be a list or tuple')
-        if not isinstance(task_kwargs, dict):
-            raise ValueError('task kwargs must be a dictionary')
-        if countdown:  # Convert countdown to ETA.
-            now = now or self.app.now()
-            eta = now + timedelta(seconds=countdown)
-            if self.utc:
-                eta = to_utc(eta).astimezone(self.app.timezone)
-        if isinstance(expires, (int, float)):
-            now = now or self.app.now()
-            expires = now + timedelta(seconds=expires)
-            if self.utc:
-                expires = to_utc(expires).astimezone(self.app.timezone)
-        eta = eta and eta.isoformat()
-        expires = expires and expires.isoformat()
-
-        body = {
-            'task': task_name,
-            'id': task_id,
-            'args': task_args,
-            'kwargs': task_kwargs,
-            'retries': retries or 0,
-            'eta': eta,
-            'expires': expires,
-            'utc': self.utc,
-            'callbacks': callbacks,
-            'errbacks': errbacks,
-            'timelimit': (time_limit, soft_time_limit),
-            'taskset': group_id or taskset_id,
-            'chord': chord,
-        }
-
-        if before_receivers:
-            send_before_publish(
-                sender=task_name, body=body,
-                exchange=exchange,
-                routing_key=routing_key,
-                declare=declare,
-                headers=headers,
-                properties=kwargs,
-                retry_policy=retry_policy,
-            )
-
-        self.publish(
-            body,
-            exchange=exchange, routing_key=routing_key,
-            serializer=serializer or self.serializer,
-            compression=compression or self.compression,
-            headers=headers,
-            retry=retry, retry_policy=_rp,
-            reply_to=reply_to,
-            correlation_id=task_id,
-            delivery_mode=delivery_mode, declare=declare,
-            **kwargs
-        )
-
-        if after_receivers:
-            send_after_publish(sender=task_name, body=body,
-                               exchange=exchange, routing_key=routing_key)
-
-        if sent_receivers:  # XXX deprecated
-            send_task_sent(sender=task_name, task_id=task_id,
-                           task=task_name, args=task_args,
-                           kwargs=task_kwargs, eta=eta,
-                           taskset=group_id or taskset_id)
-        if self.send_sent_event:
-            evd = event_dispatcher or self.event_dispatcher
-            exname = exchange or self.exchange
-            if isinstance(exname, Exchange):
-                exname = exname.name
-            evd.publish(
-                'task-sent',
-                {
-                    'uuid': task_id,
-                    'name': task_name,
-                    'args': safe_repr(task_args),
-                    'kwargs': safe_repr(task_kwargs),
-                    'retries': retries,
-                    'eta': eta,
-                    'expires': expires,
-                    'queue': qname,
-                    'exchange': exname,
-                    'routing_key': routing_key,
-                },
-                self, retry=retry, retry_policy=retry_policy,
-            )
-        return task_id
-    delay_task = publish_task   # XXX Compat
-
-    @cached_property
-    def event_dispatcher(self):
-        # We call Dispatcher.publish with a custom producer
-        # so don't need the dispatcher to be "enabled".
-        return self.app.events.Dispatcher(enabled=False)
-
-
-class TaskPublisher(TaskProducer):
-    """Deprecated version of :class:`TaskProducer`."""
-
-    def __init__(self, channel=None, exchange=None, *args, **kwargs):
-        self.app = app_or_default(kwargs.pop('app', self.app))
-        self.retry = kwargs.pop('retry', self.retry)
-        self.retry_policy = kwargs.pop('retry_policy',
-                                       self.retry_policy or {})
-        exchange = exchange or self.exchange
-        if not isinstance(exchange, Exchange):
-            exchange = Exchange(exchange,
-                                kwargs.pop('exchange_type', 'direct'))
-        self.queues = self.app.amqp.queues  # shortcut
-        super(TaskPublisher, self).__init__(channel, exchange, *args, **kwargs)
-
-
-class TaskConsumer(Consumer):
-    app = None
-
-    def __init__(self, channel, queues=None, app=None, accept=None, **kw):
-        self.app = app or self.app
-        if accept is None:
-            accept = self.app.conf.CELERY_ACCEPT_CONTENT
-        super(TaskConsumer, self).__init__(
-            channel,
-            queues or list(self.app.amqp.queues.consume_from.values()),
-            accept=accept,
-            **kw
-        )
-
-
 class AMQP(object):
 class AMQP(object):
     Connection = Connection
     Connection = Connection
     Consumer = Consumer
     Consumer = Consumer
+    Producer = Producer
 
 
     #: compat alias to Connection
     #: compat alias to Connection
     BrokerConnection = Connection
     BrokerConnection = Connection
 
 
-    producer_cls = TaskProducer
-    consumer_cls = TaskConsumer
     queues_cls = Queues
     queues_cls = Queues
 
 
     #: Cached and prepared routing table.
     #: Cached and prepared routing table.
@@ -396,9 +213,18 @@ class AMQP(object):
 
 
     def __init__(self, app):
     def __init__(self, app):
         self.app = app
         self.app = app
+        self.task_protocols = {
+            1: self.as_task_v1,
+            2: self.as_task_v2,
+        }
 
 
-    def flush_routes(self):
-        self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES)
+    @cached_property
+    def create_task_message(self):
+        return self.task_protocols[self.app.conf.CELERY_TASK_PROTOCOL]
+
+    @cached_property
+    def send_task_message(self):
+        return self._create_task_sender()
 
 
     def Queues(self, queues, create_missing=None, ha_policy=None,
     def Queues(self, queues, create_missing=None, ha_policy=None,
                autoexchange=None):
                autoexchange=None):
@@ -426,35 +252,239 @@ class AMQP(object):
                               self.app.either('CELERY_CREATE_MISSING_QUEUES',
                               self.app.either('CELERY_CREATE_MISSING_QUEUES',
                                               create_missing), app=self.app)
                                               create_missing), app=self.app)
 
 
-    @cached_property
-    def TaskConsumer(self):
-        """Return consumer configured to consume from the queues
-        we are configured for (``app.amqp.queues.consume_from``)."""
-        return self.app.subclass_with_self(self.consumer_cls,
-                                           reverse='amqp.TaskConsumer')
-    get_task_consumer = TaskConsumer  # XXX compat
+    def flush_routes(self):
+        self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES)
 
 
-    @cached_property
-    def TaskProducer(self):
-        """Return publisher used to send tasks.
+    def TaskConsumer(self, channel, queues=None, accept=None, **kw):
+        if accept is None:
+            accept = self.app.conf.CELERY_ACCEPT_CONTENT
+        return self.Consumer(
+            channel, accept=accept,
+            queues=queues or list(self.queues.consume_from.values()),
+            **kw
+        )
 
 
-        You should use `app.send_task` instead.
+    def as_task_v2(self, task_id, name, args=None, kwargs=None,
+                   countdown=None, eta=None, group_id=None,
+                   expires=None, retries=0, chord=None,
+                   callbacks=None, errbacks=None, reply_to=None,
+                   time_limit=None, soft_time_limit=None,
+                   create_sent_event=False, now=None, timezone=None,
+                   root_id=None, parent_id=None):
+        args = args or ()
+        kwargs = kwargs or {}
+        utc = self.utc
+        if not isinstance(args, (list, tuple)):
+            raise ValueError('task args must be a list or tuple')
+        if not isinstance(kwargs, Mapping):
+            raise ValueError('task keyword arguments must be a mapping')
+        if countdown:  # convert countdown to ETA
+            now = now or self.app.now()
+            timezone = timezone or self.app.timezone
+            eta = now + timedelta(seconds=countdown)
+            if utc:
+                eta = to_utc(eta).astimezone(timezone)
+        if isinstance(expires, numbers.Real):
+            now = now or self.app.now()
+            timezone = timezone or self.app.timezone
+            expires = now + timedelta(seconds=expires)
+            if utc:
+                expires = to_utc(expires).astimezone(timezone)
+        eta = eta and eta.isoformat()
+        expires = expires and expires.isoformat()
 
 
-        """
-        conf = self.app.conf
-        return self.app.subclass_with_self(
-            self.producer_cls,
-            reverse='amqp.TaskProducer',
-            exchange=self.default_exchange,
-            routing_key=conf.CELERY_DEFAULT_ROUTING_KEY,
-            serializer=conf.CELERY_TASK_SERIALIZER,
-            compression=conf.CELERY_MESSAGE_COMPRESSION,
-            retry=conf.CELERY_TASK_PUBLISH_RETRY,
-            retry_policy=conf.CELERY_TASK_PUBLISH_RETRY_POLICY,
-            send_sent_event=conf.CELERY_SEND_TASK_SENT_EVENT,
-            utc=conf.CELERY_ENABLE_UTC,
+        return task_message(
+            headers={
+                'lang': 'py',
+                'task': name,
+                'id': task_id,
+                'eta': eta,
+                'expires': expires,
+                'callbacks': callbacks,
+                'errbacks': errbacks,
+                'chain': None,  # TODO
+                'group': group_id,
+                'chord': chord,
+                'retries': retries,
+                'timelimit': [time_limit, soft_time_limit],
+                'root_id': root_id,
+                'parent_id': parent_id,
+            },
+            properties={
+                'correlation_id': task_id,
+                'reply_to': reply_to or '',
+            },
+            body=(args, kwargs),
+            sent_event={
+                'uuid': task_id,
+                'root': root_id,
+                'parent': parent_id,
+                'name': name,
+                'args': safe_repr(args),
+                'kwargs': safe_repr(kwargs),
+                'retries': retries,
+                'eta': eta,
+                'expires': expires,
+            } if create_sent_event else None,
+        )
+
+    def as_task_v1(self, task_id, name, args=None, kwargs=None,
+                   countdown=None, eta=None, group_id=None,
+                   expires=None, retries=0,
+                   chord=None, callbacks=None, errbacks=None, reply_to=None,
+                   time_limit=None, soft_time_limit=None,
+                   create_sent_event=False, now=None, timezone=None,
+                   root_id=None, parent_id=None):
+        args = args or ()
+        kwargs = kwargs or {}
+        utc = self.utc
+        if not isinstance(args, (list, tuple)):
+            raise ValueError('task args must be a list or tuple')
+        if not isinstance(kwargs, Mapping):
+            raise ValueError('task keyword arguments must be a mapping')
+        if countdown:  # convert countdown to ETA
+            now = now or self.app.now()
+            timezone = timezone or self.app.timezone
+            eta = now + timedelta(seconds=countdown)
+            if utc:
+                eta = to_utc(eta).astimezone(timezone)
+        if isinstance(expires, numbers.Real):
+            now = now or self.app.now()
+            timezone = timezone or self.app.timezone
+            expires = now + timedelta(seconds=expires)
+            if utc:
+                expires = to_utc(expires).astimezone(timezone)
+        eta = eta and eta.isoformat()
+        expires = expires and expires.isoformat()
+
+        return task_message(
+            headers={},
+            properties={
+                'correlation_id': task_id,
+                'reply_to': reply_to or '',
+            },
+            body={
+                'task': name,
+                'id': task_id,
+                'args': args,
+                'kwargs': kwargs,
+                'retries': retries,
+                'eta': eta,
+                'expires': expires,
+                'utc': utc,
+                'callbacks': callbacks,
+                'errbacks': errbacks,
+                'timelimit': (time_limit, soft_time_limit),
+                'taskset': group_id,
+                'chord': chord,
+            },
+            sent_event={
+                'uuid': task_id,
+                'name': name,
+                'args': safe_repr(args),
+                'kwargs': safe_repr(kwargs),
+                'retries': retries,
+                'eta': eta,
+                'expires': expires,
+            } if create_sent_event else None,
         )
         )
-    TaskPublisher = TaskProducer  # compat
+
+    def _create_task_sender(self):
+        default_retry = self.app.conf.CELERY_TASK_PUBLISH_RETRY
+        default_policy = self.app.conf.CELERY_TASK_PUBLISH_RETRY_POLICY
+        default_delivery_mode = self.app.conf.CELERY_DEFAULT_DELIVERY_MODE
+        default_queue = self.default_queue
+        queues = self.queues
+        send_before_publish = signals.before_task_publish.send
+        before_receivers = signals.before_task_publish.receivers
+        send_after_publish = signals.after_task_publish.send
+        after_receivers = signals.after_task_publish.receivers
+
+        send_task_sent = signals.task_sent.send   # XXX compat
+        sent_receivers = signals.task_sent.receivers
+
+        default_evd = self._event_dispatcher
+        default_exchange = self.default_exchange
+
+        default_rkey = self.app.conf.CELERY_DEFAULT_ROUTING_KEY
+        default_serializer = self.app.conf.CELERY_TASK_SERIALIZER
+        default_compressor = self.app.conf.CELERY_MESSAGE_COMPRESSION
+
+        def publish_task(producer, name, message,
+                         exchange=None, routing_key=None, queue=None,
+                         event_dispatcher=None, retry=None, retry_policy=None,
+                         serializer=None, delivery_mode=None,
+                         compression=None, declare=None,
+                         headers=None, **kwargs):
+            retry = default_retry if retry is None else retry
+            headers2, properties, body, sent_event = message
+            if headers:
+                headers2.update(headers)
+            if kwargs:
+                properties.update(kwargs)
+
+            qname = queue
+            if queue is None and exchange is None:
+                queue = default_queue
+            if queue is not None:
+                if isinstance(queue, string_t):
+                    qname, queue = queue, queues[queue]
+                else:
+                    qname = queue.name
+            if delivery_mode is None:
+                try:
+                    delivery_mode = queue.exchange.delivery_mode
+                except AttributeError:
+                    delivery_mode = default_delivery_mode
+            exchange = exchange or queue.exchange.name
+            routing_key = routing_key or queue.routing_key
+            if declare is None and queue and not isinstance(queue, Broadcast):
+                declare = [queue]
+
+            # merge default and custom policy
+            retry = default_retry if retry is None else retry
+            _rp = (dict(default_policy, **retry_policy) if retry_policy
+                   else default_policy)
+
+            if before_receivers:
+                send_before_publish(
+                    sender=name, body=body,
+                    exchange=exchange, routing_key=routing_key,
+                    declare=declare, headers=headers2,
+                    properties=kwargs,  retry_policy=retry_policy,
+                )
+            ret = producer.publish(
+                body,
+                exchange=exchange or default_exchange,
+                routing_key=routing_key or default_rkey,
+                serializer=serializer or default_serializer,
+                compression=compression or default_compressor,
+                retry=retry, retry_policy=_rp,
+                delivery_mode=delivery_mode, declare=declare,
+                headers=headers2,
+                **properties
+            )
+            if after_receivers:
+                send_after_publish(sender=name, body=body, headers=headers2,
+                                   exchange=exchange, routing_key=routing_key)
+            if sent_receivers:  # XXX deprecated
+                send_task_sent(sender=name, task_id=body['id'], task=name,
+                               args=body['args'], kwargs=body['kwargs'],
+                               eta=body['eta'], taskset=body['taskset'])
+            if sent_event:
+                evd = event_dispatcher or default_evd
+                exname = exchange or self.exchange
+                if isinstance(name, Exchange):
+                    exname = exname.name
+                sent_event.update({
+                    'queue': qname,
+                    'exchange': exname,
+                    'routing_key': routing_key,
+                })
+                evd.publish('task-sent', sent_event,
+                            self, retry=retry, retry_policy=retry_policy)
+            return ret
+        return publish_task
 
 
     @cached_property
     @cached_property
     def default_queue(self):
     def default_queue(self):
@@ -485,7 +515,7 @@ class AMQP(object):
             self._producer_pool = ProducerPool(
             self._producer_pool = ProducerPool(
                 self.app.pool,
                 self.app.pool,
                 limit=self.app.pool.limit,
                 limit=self.app.pool.limit,
-                Producer=self.TaskProducer,
+                Producer=self.Producer,
             )
             )
         return self._producer_pool
         return self._producer_pool
     publisher_pool = producer_pool  # compat alias
     publisher_pool = producer_pool  # compat alias
@@ -494,3 +524,13 @@ class AMQP(object):
     def default_exchange(self):
     def default_exchange(self):
         return Exchange(self.app.conf.CELERY_DEFAULT_EXCHANGE,
         return Exchange(self.app.conf.CELERY_DEFAULT_EXCHANGE,
                         self.app.conf.CELERY_DEFAULT_EXCHANGE_TYPE)
                         self.app.conf.CELERY_DEFAULT_EXCHANGE_TYPE)
+
+    @cached_property
+    def utc(self):
+        return self.app.conf.CELERY_ENABLE_UTC
+
+    @cached_property
+    def _event_dispatcher(self):
+        # We call Dispatcher.publish with a custom producer
+        # so don't need the diuspatcher to be enabled.
+        return self.app.events.Dispatcher(enabled=False)

+ 101 - 58
celery/app/base.py

@@ -13,7 +13,6 @@ import threading
 import warnings
 import warnings
 
 
 from collections import defaultdict, deque
 from collections import defaultdict, deque
-from contextlib import contextmanager
 from copy import deepcopy
 from copy import deepcopy
 from operator import attrgetter
 from operator import attrgetter
 
 
@@ -26,25 +25,29 @@ from kombu.utils import cached_property, uuid
 from celery import platforms
 from celery import platforms
 from celery import signals
 from celery import signals
 from celery._state import (
 from celery._state import (
-    _task_stack, _tls, get_current_app, set_default_app,
-    _register_app, get_current_worker_task,
+    _task_stack, get_current_app, _set_current_app, set_default_app,
+    _register_app, get_current_worker_task, connect_on_app_finalize,
+    _announce_app_finalized,
 )
 )
 from celery.exceptions import AlwaysEagerIgnored, ImproperlyConfigured
 from celery.exceptions import AlwaysEagerIgnored, ImproperlyConfigured
 from celery.five import items, values
 from celery.five import items, values
 from celery.loaders import get_loader_cls
 from celery.loaders import get_loader_cls
 from celery.local import PromiseProxy, maybe_evaluate
 from celery.local import PromiseProxy, maybe_evaluate
+from celery.utils.dispatch import Signal
 from celery.utils.functional import first, maybe_list
 from celery.utils.functional import first, maybe_list
 from celery.utils.imports import instantiate, symbol_by_name
 from celery.utils.imports import instantiate, symbol_by_name
-from celery.utils.objects import mro_lookup
+from celery.utils.objects import FallbackContext, mro_lookup
 
 
 from .annotations import prepare as prepare_annotations
 from .annotations import prepare as prepare_annotations
-from .builtins import shared_task, load_shared_tasks
 from .defaults import DEFAULTS, find_deprecated_settings
 from .defaults import DEFAULTS, find_deprecated_settings
 from .registry import TaskRegistry
 from .registry import TaskRegistry
 from .utils import (
 from .utils import (
     AppPickler, Settings, bugreport, _unpickle_app, _unpickle_app_v2, appstr,
     AppPickler, Settings, bugreport, _unpickle_app, _unpickle_app_v2, appstr,
 )
 )
 
 
+# Load all builtin tasks
+from . import builtins  # noqa
+
 __all__ = ['Celery']
 __all__ = ['Celery']
 
 
 _EXECV = os.environ.get('FORKED_BY_MULTIPROCESSING')
 _EXECV = os.environ.get('FORKED_BY_MULTIPROCESSING')
@@ -58,6 +61,8 @@ and as such the configuration could not be loaded.
 Please set this variable and make it point to
 Please set this variable and make it point to
 a configuration module."""
 a configuration module."""
 
 
+_after_fork_registered = False
+
 
 
 def app_has_custom(app, attr):
 def app_has_custom(app, attr):
     return mro_lookup(app.__class__, attr, stop=(Celery, object),
     return mro_lookup(app.__class__, attr, stop=(Celery, object),
@@ -70,6 +75,29 @@ def _unpickle_appattr(reverse_name, args):
     return get_current_app()._rgetattr(reverse_name)(*args)
     return get_current_app()._rgetattr(reverse_name)(*args)
 
 
 
 
+def _global_after_fork():
+    # Previously every app would call:
+    #    `register_after_fork(app, app._after_fork)`
+    # but this created a leak as `register_after_fork` stores concrete object
+    # references and once registered an object cannot be removed without
+    # touching and iterating over the private afterfork registry list.
+    #
+    # See Issue #1949
+    from celery import _state
+    from multiprocessing.util import info
+    for app in _state.apps:
+        try:
+            app._after_fork()
+        except Exception as exc:
+            info('after forker raised exception: %r' % (exc, ), exc_info=1)
+
+
+def _ensure_after_fork():
+    global _after_fork_registered
+    _after_fork_registered = True
+    register_after_fork(_global_after_fork, _global_after_fork)
+
+
 class Celery(object):
 class Celery(object):
     #: This is deprecated, use :meth:`reduce_keys` instead
     #: This is deprecated, use :meth:`reduce_keys` instead
     Pickler = AppPickler
     Pickler = AppPickler
@@ -89,11 +117,22 @@ class Celery(object):
     _pool = None
     _pool = None
     builtin_fixups = BUILTIN_FIXUPS
     builtin_fixups = BUILTIN_FIXUPS
 
 
+    #: Signal sent when app is loading configuration.
+    on_configure = None
+
+    #: Signal sent after app has prepared the configuration.
+    on_after_configure = None
+
+    #: Signal sent after app has been finalized.
+    on_after_finalize = None
+
+    #: ignored
+    accept_magic_kwargs = False
+
     def __init__(self, main=None, loader=None, backend=None,
     def __init__(self, main=None, loader=None, backend=None,
                  amqp=None, events=None, log=None, control=None,
                  amqp=None, events=None, log=None, control=None,
-                 set_as_current=True, accept_magic_kwargs=False,
-                 tasks=None, broker=None, include=None, changes=None,
-                 config_source=None, fixups=None, task_cls=None,
+                 set_as_current=True, tasks=None, broker=None, include=None,
+                 changes=None, config_source=None, fixups=None, task_cls=None,
                  autofinalize=True, **kwargs):
                  autofinalize=True, **kwargs):
         self.clock = LamportClock()
         self.clock = LamportClock()
         self.main = main
         self.main = main
@@ -106,7 +145,6 @@ class Celery(object):
         self.task_cls = task_cls or self.task_cls
         self.task_cls = task_cls or self.task_cls
         self.set_as_current = set_as_current
         self.set_as_current = set_as_current
         self.registry_cls = symbol_by_name(self.registry_cls)
         self.registry_cls = symbol_by_name(self.registry_cls)
-        self.accept_magic_kwargs = accept_magic_kwargs
         self.user_options = defaultdict(set)
         self.user_options = defaultdict(set)
         self.steps = defaultdict(set)
         self.steps = defaultdict(set)
         self.autofinalize = autofinalize
         self.autofinalize = autofinalize
@@ -143,11 +181,18 @@ class Celery(object):
         if self.set_as_current:
         if self.set_as_current:
             self.set_current()
             self.set_current()
 
 
+        # Signals
+        if self.on_configure is None:
+            # used to be a method pre 3.2
+            self.on_configure = Signal()
+        self.on_after_configure = Signal()
+        self.on_after_finalize = Signal()
+
         self.on_init()
         self.on_init()
         _register_app(self)
         _register_app(self)
 
 
     def set_current(self):
     def set_current(self):
-        _tls.current_app = self
+        _set_current_app(self)
 
 
     def set_default(self):
     def set_default(self):
         set_default_app(self)
         set_default_app(self)
@@ -183,8 +228,8 @@ class Celery(object):
             # a differnt task instance.  This makes sure it will always use
             # a differnt task instance.  This makes sure it will always use
             # the task instance from the current app.
             # the task instance from the current app.
             # Really need a better solution for this :(
             # Really need a better solution for this :(
-            from . import shared_task as proxies_to_curapp
-            return proxies_to_curapp(*args, _force_evaluate=True, **opts)
+            from . import shared_task
+            return shared_task(*args, _force_evaluate=True, **opts)
 
 
         def inner_create_task_cls(shared=True, filter=None, **opts):
         def inner_create_task_cls(shared=True, filter=None, **opts):
             _filt = filter  # stupid 2to3
             _filt = filter  # stupid 2to3
@@ -193,13 +238,7 @@ class Celery(object):
                 if shared:
                 if shared:
                     cons = lambda app: app._task_from_fun(fun, **opts)
                     cons = lambda app: app._task_from_fun(fun, **opts)
                     cons.__name__ = fun.__name__
                     cons.__name__ = fun.__name__
-                    shared_task(cons)
-                if self.accept_magic_kwargs:  # compat mode
-                    task = self._task_from_fun(fun, **opts)
-                    if filter:
-                        task = filter(task)
-                    return task
-
+                    connect_on_app_finalize(cons)
                 if self.finalized or opts.get('_force_evaluate'):
                 if self.finalized or opts.get('_force_evaluate'):
                     ret = self._task_from_fun(fun, **opts)
                     ret = self._task_from_fun(fun, **opts)
                 else:
                 else:
@@ -231,11 +270,11 @@ class Celery(object):
 
 
         T = type(fun.__name__, (base, ), dict({
         T = type(fun.__name__, (base, ), dict({
             'app': self,
             'app': self,
-            'accept_magic_kwargs': False,
             'run': fun if bind else staticmethod(fun),
             'run': fun if bind else staticmethod(fun),
             '_decorated': True,
             '_decorated': True,
             '__doc__': fun.__doc__,
             '__doc__': fun.__doc__,
-            '__module__': fun.__module__}, **options))()
+            '__module__': fun.__module__,
+            '__wrapped__': fun}, **options))()
         task = self._tasks[T.name]  # return global instance.
         task = self._tasks[T.name]  # return global instance.
         return task
         return task
 
 
@@ -245,7 +284,7 @@ class Celery(object):
                 if auto and not self.autofinalize:
                 if auto and not self.autofinalize:
                     raise RuntimeError('Contract breach: app not finalized')
                     raise RuntimeError('Contract breach: app not finalized')
                 self.finalized = True
                 self.finalized = True
-                load_shared_tasks(self)
+                _announce_app_finalized(self)
 
 
                 pending = self._pending
                 pending = self._pending
                 while pending:
                 while pending:
@@ -254,6 +293,8 @@ class Celery(object):
                 for task in values(self._tasks):
                 for task in values(self._tasks):
                     task.bind(self)
                     task.bind(self)
 
 
+                self.on_after_finalize.send(sender=self)
+
     def add_defaults(self, fun):
     def add_defaults(self, fun):
         if not callable(fun):
         if not callable(fun):
             d, fun = fun, lambda: d
             d, fun = fun, lambda: d
@@ -272,7 +313,8 @@ class Celery(object):
         if not module_name:
         if not module_name:
             if silent:
             if silent:
                 return False
                 return False
-            raise ImproperlyConfigured(ERR_ENVVAR_NOT_SET.format(module_name))
+            raise ImproperlyConfigured(
+                ERR_ENVVAR_NOT_SET.format(variable_name))
         return self.config_from_object(module_name, silent=silent, force=force)
         return self.config_from_object(module_name, silent=silent, force=force)
 
 
     def config_from_cmdline(self, argv, namespace='celery'):
     def config_from_cmdline(self, argv, namespace='celery'):
@@ -300,26 +342,34 @@ class Celery(object):
                   eta=None, task_id=None, producer=None, connection=None,
                   eta=None, task_id=None, producer=None, connection=None,
                   router=None, result_cls=None, expires=None,
                   router=None, result_cls=None, expires=None,
                   publisher=None, link=None, link_error=None,
                   publisher=None, link=None, link_error=None,
-                  add_to_parent=True, reply_to=None, **options):
+                  add_to_parent=True, group_id=None, retries=0, chord=None,
+                  reply_to=None, time_limit=None, soft_time_limit=None,
+                  root_id=None, parent_id=None, **options):
+        amqp = self.amqp
         task_id = task_id or uuid()
         task_id = task_id or uuid()
         producer = producer or publisher  # XXX compat
         producer = producer or publisher  # XXX compat
-        router = router or self.amqp.router
+        router = router or amqp.router
         conf = self.conf
         conf = self.conf
         if conf.CELERY_ALWAYS_EAGER:  # pragma: no cover
         if conf.CELERY_ALWAYS_EAGER:  # pragma: no cover
             warnings.warn(AlwaysEagerIgnored(
             warnings.warn(AlwaysEagerIgnored(
                 'CELERY_ALWAYS_EAGER has no effect on send_task',
                 'CELERY_ALWAYS_EAGER has no effect on send_task',
             ), stacklevel=2)
             ), stacklevel=2)
         options = router.route(options, name, args, kwargs)
         options = router.route(options, name, args, kwargs)
+
+        message = amqp.create_task_message(
+            task_id, name, args, kwargs, countdown, eta, group_id,
+            expires, retries, chord,
+            maybe_list(link), maybe_list(link_error),
+            reply_to or self.oid, time_limit, soft_time_limit,
+            self.conf.CELERY_SEND_TASK_SENT_EVENT,
+            root_id, parent_id,
+        )
+
         if connection:
         if connection:
-            producer = self.amqp.TaskProducer(connection)
+            producer = amqp.Producer(connection)
         with self.producer_or_acquire(producer) as P:
         with self.producer_or_acquire(producer) as P:
             self.backend.on_task_call(P, task_id)
             self.backend.on_task_call(P, task_id)
-            task_id = P.publish_task(
-                name, args, kwargs, countdown=countdown, eta=eta,
-                task_id=task_id, expires=expires,
-                callbacks=maybe_list(link), errbacks=maybe_list(link_error),
-                reply_to=reply_to or self.oid, **options
-            )
+            amqp.send_task_message(P, name, message, **options)
         result = (result_cls or self.AsyncResult)(task_id)
         result = (result_cls or self.AsyncResult)(task_id)
         if add_to_parent:
         if add_to_parent:
             parent = get_current_worker_task()
             parent = get_current_worker_task()
@@ -355,27 +405,20 @@ class Celery(object):
         )
         )
     broker_connection = connection
     broker_connection = connection
 
 
-    @contextmanager
-    def connection_or_acquire(self, connection=None, pool=True,
-                              *args, **kwargs):
-        if connection:
-            yield connection
-        else:
-            if pool:
-                with self.pool.acquire(block=True) as connection:
-                    yield connection
-            else:
-                with self.connection() as connection:
-                    yield connection
+    def _acquire_connection(self, pool=True):
+        """Helper for :meth:`connection_or_acquire`."""
+        if pool:
+            return self.pool.acquire(block=True)
+        return self.connection()
+
+    def connection_or_acquire(self, connection=None, pool=True, *_, **__):
+        return FallbackContext(connection, self._acquire_connection, pool=pool)
     default_connection = connection_or_acquire  # XXX compat
     default_connection = connection_or_acquire  # XXX compat
 
 
-    @contextmanager
     def producer_or_acquire(self, producer=None):
     def producer_or_acquire(self, producer=None):
-        if producer:
-            yield producer
-        else:
-            with self.amqp.producer_pool.acquire(block=True) as producer:
-                yield producer
+        return FallbackContext(
+            producer, self.amqp.producer_pool.acquire, block=True,
+        )
     default_producer = producer_or_acquire  # XXX compat
     default_producer = producer_or_acquire  # XXX compat
 
 
     def prepare_config(self, c):
     def prepare_config(self, c):
@@ -418,12 +461,12 @@ class Celery(object):
             self.loader)
             self.loader)
         return backend(app=self, url=url)
         return backend(app=self, url=url)
 
 
-    def on_configure(self):
-        """Callback calld when the app loads configuration"""
-        pass
-
     def _get_config(self):
     def _get_config(self):
-        self.on_configure()
+        if isinstance(self.on_configure, Signal):
+            self.on_configure.send(sender=self)
+        else:
+            # used to be a method pre 3.2
+            self.on_configure()
         if self._config_source:
         if self._config_source:
             self.loader.config_from_object(self._config_source)
             self.loader.config_from_object(self._config_source)
         self.configured = True
         self.configured = True
@@ -437,6 +480,7 @@ class Celery(object):
         if self._preconf:
         if self._preconf:
             for key, value in items(self._preconf):
             for key, value in items(self._preconf):
                 setattr(s, key, value)
                 setattr(s, key, value)
+        self.on_after_configure.send(sender=self, source=s)
         return s
         return s
 
 
     def _after_fork(self, obj_):
     def _after_fork(self, obj_):
@@ -523,7 +567,6 @@ class Celery(object):
             'events': self.events_cls,
             'events': self.events_cls,
             'log': self.log_cls,
             'log': self.log_cls,
             'control': self.control_cls,
             'control': self.control_cls,
-            'accept_magic_kwargs': self.accept_magic_kwargs,
             'fixups': self.fixups,
             'fixups': self.fixups,
             'config_source': self._config_source,
             'config_source': self._config_source,
             'task_cls': self.task_cls,
             'task_cls': self.task_cls,
@@ -534,7 +577,7 @@ class Celery(object):
         return (self.main, self.conf.changes,
         return (self.main, self.conf.changes,
                 self.loader_cls, self.backend_cls, self.amqp_cls,
                 self.loader_cls, self.backend_cls, self.amqp_cls,
                 self.events_cls, self.log_cls, self.control_cls,
                 self.events_cls, self.log_cls, self.control_cls,
-                self.accept_magic_kwargs, self._config_source)
+                False, self._config_source)
 
 
     @cached_property
     @cached_property
     def Worker(self):
     def Worker(self):
@@ -581,7 +624,7 @@ class Celery(object):
     @property
     @property
     def pool(self):
     def pool(self):
         if self._pool is None:
         if self._pool is None:
-            register_after_fork(self, self._after_fork)
+            _ensure_after_fork()
             limit = self.conf.BROKER_POOL_LIMIT
             limit = self.conf.BROKER_POOL_LIMIT
             self._pool = self.connection().Pool(limit=limit)
             self._pool = self.connection().Pool(limit=limit)
         return self._pool
         return self._pool

+ 38 - 235
celery/app/builtins.py

@@ -9,38 +9,15 @@
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
-from collections import deque
+from celery._state import get_current_worker_task, connect_on_app_finalize
+from celery.utils.log import get_logger
 
 
-from celery._state import get_current_worker_task
-from celery.utils import uuid
+__all__ = []
 
 
-__all__ = ['shared_task', 'load_shared_tasks']
+logger = get_logger(__name__)
 
 
-#: global list of functions defining tasks that should be
-#: added to all apps.
-_shared_tasks = set()
 
 
-
-def shared_task(constructor):
-    """Decorator that specifies a function that generates a built-in task.
-
-    The function will then be called for every new app instance created
-    (lazily, so more exactly when the task registry for that app is needed).
-
-    The function must take a single ``app`` argument.
-    """
-    _shared_tasks.add(constructor)
-    return constructor
-
-
-def load_shared_tasks(app):
-    """Create built-in tasks for an app instance."""
-    constructors = set(_shared_tasks)
-    for constructor in constructors:
-        constructor(app)
-
-
-@shared_task
+@connect_on_app_finalize
 def add_backend_cleanup_task(app):
 def add_backend_cleanup_task(app):
     """The backend cleanup task can be used to clean up the default result
     """The backend cleanup task can be used to clean up the default result
     backend.
     backend.
@@ -57,14 +34,14 @@ def add_backend_cleanup_task(app):
     return backend_cleanup
     return backend_cleanup
 
 
 
 
-@shared_task
+@connect_on_app_finalize
 def add_unlock_chord_task(app):
 def add_unlock_chord_task(app):
     """This task is used by result backends without native chord support.
     """This task is used by result backends without native chord support.
 
 
     It joins chords by creating a task chain polling the header for completion.
     It joins chords by creating a task chain polling the header for completion.
 
 
     """
     """
-    from celery.canvas import signature
+    from celery.canvas import maybe_signature
     from celery.exceptions import ChordError
     from celery.exceptions import ChordError
     from celery.result import allow_join_result, result_from_tuple
     from celery.result import allow_join_result, result_from_tuple
 
 
@@ -86,6 +63,7 @@ def add_unlock_chord_task(app):
             interval = unlock_chord.default_retry_delay
             interval = unlock_chord.default_retry_delay
 
 
         # check if the task group is ready, and if so apply the callback.
         # check if the task group is ready, and if so apply the callback.
+        callback = maybe_signature(callback, app)
         deps = GroupResult(
         deps = GroupResult(
             group_id,
             group_id,
             [result_from_tuple(r, app=app) for r in result],
             [result_from_tuple(r, app=app) for r in result],
@@ -93,7 +71,7 @@ def add_unlock_chord_task(app):
         j = deps.join_native if deps.supports_native_join else deps.join
         j = deps.join_native if deps.supports_native_join else deps.join
 
 
         if deps.ready():
         if deps.ready():
-            callback = signature(callback, app=app)
+            callback = maybe_signature(callback, app=app)
             try:
             try:
                 with allow_join_result():
                 with allow_join_result():
                     ret = j(timeout=3.0, propagate=propagate)
                     ret = j(timeout=3.0, propagate=propagate)
@@ -105,16 +83,17 @@ def add_unlock_chord_task(app):
                     )
                     )
                 except StopIteration:
                 except StopIteration:
                     reason = repr(exc)
                     reason = repr(exc)
-
-                app._tasks[callback.task].backend.fail_from_current_stack(
-                    callback.id, exc=ChordError(reason),
-                )
+                logger.error('Chord %r raised: %r', group_id, exc, exc_info=1)
+                app.backend.chord_error_from_stack(callback,
+                                                   ChordError(reason))
             else:
             else:
                 try:
                 try:
                     callback.delay(ret)
                     callback.delay(ret)
                 except Exception as exc:
                 except Exception as exc:
-                    app._tasks[callback.task].backend.fail_from_current_stack(
-                        callback.id,
+                    logger.error('Chord %r raised: %r', group_id, exc,
+                                 exc_info=1)
+                    app.backend.chord_error_from_stack(
+                        callback,
                         exc=ChordError('Callback error: {0!r}'.format(exc)),
                         exc=ChordError('Callback error: {0!r}'.format(exc)),
                     )
                     )
         else:
         else:
@@ -123,7 +102,7 @@ def add_unlock_chord_task(app):
     return unlock_chord
     return unlock_chord
 
 
 
 
-@shared_task
+@connect_on_app_finalize
 def add_map_task(app):
 def add_map_task(app):
     from celery.canvas import signature
     from celery.canvas import signature
 
 
@@ -134,7 +113,7 @@ def add_map_task(app):
     return xmap
     return xmap
 
 
 
 
-@shared_task
+@connect_on_app_finalize
 def add_starmap_task(app):
 def add_starmap_task(app):
     from celery.canvas import signature
     from celery.canvas import signature
 
 
@@ -145,7 +124,7 @@ def add_starmap_task(app):
     return xstarmap
     return xstarmap
 
 
 
 
-@shared_task
+@connect_on_app_finalize
 def add_chunk_task(app):
 def add_chunk_task(app):
     from celery.canvas import chunks as _chunks
     from celery.canvas import chunks as _chunks
 
 
@@ -155,182 +134,58 @@ def add_chunk_task(app):
     return chunks
     return chunks
 
 
 
 
-@shared_task
+@connect_on_app_finalize
 def add_group_task(app):
 def add_group_task(app):
+    """No longer used, but here for backwards compatibility."""
     _app = app
     _app = app
-    from celery.canvas import maybe_signature, signature
+    from celery.canvas import maybe_signature
     from celery.result import result_from_tuple
     from celery.result import result_from_tuple
 
 
     class Group(app.Task):
     class Group(app.Task):
         app = _app
         app = _app
         name = 'celery.group'
         name = 'celery.group'
-        accept_magic_kwargs = False
         _decorated = True
         _decorated = True
 
 
-        def run(self, tasks, result, group_id, partial_args):
+        def run(self, tasks, result, group_id, partial_args,
+                add_to_parent=True):
             app = self.app
             app = self.app
             result = result_from_tuple(result, app)
             result = result_from_tuple(result, app)
             # any partial args are added to all tasks in the group
             # any partial args are added to all tasks in the group
-            taskit = (signature(task, app=app).clone(partial_args)
+            taskit = (maybe_signature(task, app=app).clone(partial_args)
                       for i, task in enumerate(tasks))
                       for i, task in enumerate(tasks))
-            if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER:
-                return app.GroupResult(
-                    result.id,
-                    [stask.apply(group_id=group_id) for stask in taskit],
-                )
             with app.producer_or_acquire() as pub:
             with app.producer_or_acquire() as pub:
-                [stask.apply_async(group_id=group_id, publisher=pub,
+                [stask.apply_async(group_id=group_id, producer=pub,
                                    add_to_parent=False) for stask in taskit]
                                    add_to_parent=False) for stask in taskit]
             parent = get_current_worker_task()
             parent = get_current_worker_task()
-            if parent:
+            if add_to_parent and parent:
                 parent.add_trail(result)
                 parent.add_trail(result)
             return result
             return result
-
-        def prepare(self, options, tasks, args, **kwargs):
-            options['group_id'] = group_id = (
-                options.setdefault('task_id', uuid()))
-
-            def prepare_member(task):
-                task = maybe_signature(task, app=self.app)
-                task.options['group_id'] = group_id
-                return task, task.freeze()
-
-            try:
-                tasks, res = list(zip(
-                    *[prepare_member(task) for task in tasks]
-                ))
-            except ValueError:  # tasks empty
-                tasks, res = [], []
-            return (tasks, self.app.GroupResult(group_id, res), group_id, args)
-
-        def apply_async(self, partial_args=(), kwargs={}, **options):
-            if self.app.conf.CELERY_ALWAYS_EAGER:
-                return self.apply(partial_args, kwargs, **options)
-            tasks, result, gid, args = self.prepare(
-                options, args=partial_args, **kwargs
-            )
-            super(Group, self).apply_async((
-                list(tasks), result.as_tuple(), gid, args), **options
-            )
-            return result
-
-        def apply(self, args=(), kwargs={}, **options):
-            return super(Group, self).apply(
-                self.prepare(options, args=args, **kwargs),
-                **options).get()
     return Group
     return Group
 
 
 
 
-@shared_task
+@connect_on_app_finalize
 def add_chain_task(app):
 def add_chain_task(app):
-    from celery.canvas import (
-        Signature, chain, chord, group, maybe_signature, maybe_unroll_group,
-    )
-
+    """No longer used, but here for backwards compatibility."""
     _app = app
     _app = app
 
 
     class Chain(app.Task):
     class Chain(app.Task):
         app = _app
         app = _app
         name = 'celery.chain'
         name = 'celery.chain'
-        accept_magic_kwargs = False
         _decorated = True
         _decorated = True
 
 
-        def prepare_steps(self, args, tasks):
-            app = self.app
-            steps = deque(tasks)
-            next_step = prev_task = prev_res = None
-            tasks, results = [], []
-            i = 0
-            while steps:
-                # First task get partial args from chain.
-                task = maybe_signature(steps.popleft(), app=app)
-                task = task.clone() if i else task.clone(args)
-                res = task.freeze()
-                i += 1
-
-                if isinstance(task, group):
-                    task = maybe_unroll_group(task)
-                if isinstance(task, chain):
-                    # splice the chain
-                    steps.extendleft(reversed(task.tasks))
-                    continue
-
-                elif isinstance(task, group) and steps and \
-                        not isinstance(steps[0], group):
-                    # automatically upgrade group(..) | s to chord(group, s)
-                    try:
-                        next_step = steps.popleft()
-                        # for chords we freeze by pretending it's a normal
-                        # task instead of a group.
-                        res = Signature.freeze(next_step)
-                        task = chord(task, body=next_step, task_id=res.task_id)
-                    except IndexError:
-                        pass  # no callback, so keep as group
-                if prev_task:
-                    # link previous task to this task.
-                    prev_task.link(task)
-                    # set the results parent attribute.
-                    if not res.parent:
-                        res.parent = prev_res
-
-                if not isinstance(prev_task, chord):
-                    results.append(res)
-                    tasks.append(task)
-                prev_task, prev_res = task, res
-
-            return tasks, results
-
-        def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
-                        task_id=None, link=None, link_error=None, **options):
-            if self.app.conf.CELERY_ALWAYS_EAGER:
-                return self.apply(args, kwargs, **options)
-            options.pop('publisher', None)
-            tasks, results = self.prepare_steps(args, kwargs['tasks'])
-            result = results[-1]
-            if group_id:
-                tasks[-1].set(group_id=group_id)
-            if chord:
-                tasks[-1].set(chord=chord)
-            if task_id:
-                tasks[-1].set(task_id=task_id)
-                result = tasks[-1].type.AsyncResult(task_id)
-            # make sure we can do a link() and link_error() on a chain object.
-            if link:
-                tasks[-1].set(link=link)
-            # and if any task in the chain fails, call the errbacks
-            if link_error:
-                for task in tasks:
-                    task.set(link_error=link_error)
-            tasks[0].apply_async(**options)
-            return result
-
-        def apply(self, args=(), kwargs={}, signature=maybe_signature,
-                  **options):
-            app = self.app
-            last, fargs = None, args  # fargs passed to first task only
-            for task in kwargs['tasks']:
-                res = signature(task, app=app).clone(fargs).apply(
-                    last and (last.get(), ),
-                )
-                res.parent, last, fargs = last, res, None
-            return last
     return Chain
     return Chain
 
 
 
 
-@shared_task
+@connect_on_app_finalize
 def add_chord_task(app):
 def add_chord_task(app):
-    """Every chord is executed in a dedicated task, so that the chord
-    can be used as a signature, and this generates the task
-    responsible for that."""
-    from celery import group
+    """No longer used, but here for backwards compatibility."""
+    from celery import group, chord as _chord
     from celery.canvas import maybe_signature
     from celery.canvas import maybe_signature
     _app = app
     _app = app
-    default_propagate = app.conf.CELERY_CHORD_PROPAGATES
 
 
     class Chord(app.Task):
     class Chord(app.Task):
         app = _app
         app = _app
         name = 'celery.chord'
         name = 'celery.chord'
-        accept_magic_kwargs = False
         ignore_result = False
         ignore_result = False
         _decorated = True
         _decorated = True
 
 
@@ -338,65 +193,13 @@ def add_chord_task(app):
                 countdown=1, max_retries=None, propagate=None,
                 countdown=1, max_retries=None, propagate=None,
                 eager=False, **kwargs):
                 eager=False, **kwargs):
             app = self.app
             app = self.app
-            propagate = default_propagate if propagate is None else propagate
-            group_id = uuid()
-            AsyncResult = app.AsyncResult
-            prepare_member = self._prepare_member
-
             # - convert back to group if serialized
             # - convert back to group if serialized
             tasks = header.tasks if isinstance(header, group) else header
             tasks = header.tasks if isinstance(header, group) else header
             header = group([
             header = group([
-                maybe_signature(s, app=app).clone() for s in tasks
-            ])
-            # - eager applies the group inline
-            if eager:
-                return header.apply(args=partial_args, task_id=group_id)
-
-            results = [AsyncResult(prepare_member(task, body, group_id))
-                       for task in header.tasks]
-
-            return self.backend.apply_chord(
-                header, partial_args, group_id,
-                body, interval=interval, countdown=countdown,
-                max_retries=max_retries, propagate=propagate, result=results,
-            )
-
-        def _prepare_member(self, task, body, group_id):
-            opts = task.options
-            # d.setdefault would work but generating uuid's are expensive
-            try:
-                task_id = opts['task_id']
-            except KeyError:
-                task_id = opts['task_id'] = uuid()
-            opts.update(chord=body, group_id=group_id)
-            return task_id
-
-        def apply_async(self, args=(), kwargs={}, task_id=None,
-                        group_id=None, chord=None, **options):
-            app = self.app
-            if app.conf.CELERY_ALWAYS_EAGER:
-                return self.apply(args, kwargs, **options)
-            header = kwargs.pop('header')
-            body = kwargs.pop('body')
-            header, body = (maybe_signature(header, app=app),
-                            maybe_signature(body, app=app))
-            # forward certain options to body
-            if chord is not None:
-                body.options['chord'] = chord
-            if group_id is not None:
-                body.options['group_id'] = group_id
-            [body.link(s) for s in options.pop('link', [])]
-            [body.link_error(s) for s in options.pop('link_error', [])]
-            body_result = body.freeze(task_id)
-            parent = super(Chord, self).apply_async((header, body, args),
-                                                    kwargs, **options)
-            body_result.parent = parent
-            return body_result
-
-        def apply(self, args=(), kwargs={}, propagate=True, **options):
-            body = kwargs['body']
-            res = super(Chord, self).apply(args, dict(kwargs, eager=True),
-                                           **options)
-            return maybe_signature(body, app=self.app).apply(
-                args=(res.get(propagate=propagate).get(), ))
+                maybe_signature(s, app=app) for s in tasks
+            ], app=self.app)
+            body = maybe_signature(body, app=app)
+            ch = _chord(header, body)
+            return ch.run(header, body, partial_args, app, interval,
+                          countdown, max_retries, propagate, **kwargs)
     return Chord
     return Chord

+ 11 - 11
celery/app/control.py

@@ -15,26 +15,27 @@ from kombu.pidbox import Mailbox
 from kombu.utils import cached_property
 from kombu.utils import cached_property
 
 
 from celery.exceptions import DuplicateNodenameWarning
 from celery.exceptions import DuplicateNodenameWarning
+from celery.utils.text import pluralize
 
 
 __all__ = ['Inspect', 'Control', 'flatten_reply']
 __all__ = ['Inspect', 'Control', 'flatten_reply']
 
 
 W_DUPNODE = """\
 W_DUPNODE = """\
-Received multiple replies from node name {0!r}.
+Received multiple replies from node name: {0!r}.
 Please make sure you give each node a unique nodename using the `-n` option.\
 Please make sure you give each node a unique nodename using the `-n` option.\
 """
 """
 
 
 
 
 def flatten_reply(reply):
 def flatten_reply(reply):
-    nodes = {}
-    seen = set()
+    nodes, dupes = {}, set()
     for item in reply:
     for item in reply:
-        dup = next((nodename in seen for nodename in item), None)
-        if dup:
-            warnings.warn(DuplicateNodenameWarning(
-                W_DUPNODE.format(dup),
-            ))
-        seen.update(item)
+        [dupes.add(name) for name in item if name in nodes]
         nodes.update(item)
         nodes.update(item)
+    if dupes:
+        warnings.warn(DuplicateNodenameWarning(
+            W_DUPNODE.format(
+                pluralize(len(dupes), 'name'), ', '.join(sorted(dupes)),
+            ),
+        ))
     return nodes
     return nodes
 
 
 
 
@@ -125,8 +126,7 @@ class Control(object):
 
 
     def __init__(self, app=None):
     def __init__(self, app=None):
         self.app = app
         self.app = app
-        self.mailbox = self.Mailbox('celery', type='fanout',
-                                    accept=self.app.conf.CELERY_ACCEPT_CONTENT)
+        self.mailbox = self.Mailbox('celery', type='fanout', accept=['json'])
 
 
     @cached_property
     @cached_property
     def inspect(self):
     def inspect(self):

+ 4 - 2
celery/app/defaults.py

@@ -124,7 +124,7 @@ NAMESPACES = {
         'IMPORTS': Option((), type='tuple'),
         'IMPORTS': Option((), type='tuple'),
         'INCLUDE': Option((), type='tuple'),
         'INCLUDE': Option((), type='tuple'),
         'IGNORE_RESULT': Option(False, type='bool'),
         'IGNORE_RESULT': Option(False, type='bool'),
-        'MAX_CACHED_RESULTS': Option(5000, type='int'),
+        'MAX_CACHED_RESULTS': Option(100, type='int'),
         'MESSAGE_COMPRESSION': Option(type='string'),
         'MESSAGE_COMPRESSION': Option(type='string'),
         'MONGODB_BACKEND_SETTINGS': Option(type='dict'),
         'MONGODB_BACKEND_SETTINGS': Option(type='dict'),
         'REDIS_HOST': Option(type='string', **_REDIS_OLD),
         'REDIS_HOST': Option(type='string', **_REDIS_OLD),
@@ -146,6 +146,7 @@ NAMESPACES = {
         'SEND_TASK_ERROR_EMAILS': Option(False, type='bool'),
         'SEND_TASK_ERROR_EMAILS': Option(False, type='bool'),
         'SEND_TASK_SENT_EVENT': Option(False, type='bool'),
         'SEND_TASK_SENT_EVENT': Option(False, type='bool'),
         'STORE_ERRORS_EVEN_IF_IGNORED': Option(False, type='bool'),
         'STORE_ERRORS_EVEN_IF_IGNORED': Option(False, type='bool'),
+        'TASK_PROTOCOL': Option(1, type='int'),
         'TASK_PUBLISH_RETRY': Option(True, type='bool'),
         'TASK_PUBLISH_RETRY': Option(True, type='bool'),
         'TASK_PUBLISH_RETRY_POLICY': Option({
         'TASK_PUBLISH_RETRY_POLICY': Option({
             'max_retries': 3,
             'max_retries': 3,
@@ -196,6 +197,7 @@ NAMESPACES = {
         'SCHEDULE': Option({}, type='dict'),
         'SCHEDULE': Option({}, type='dict'),
         'SCHEDULER': Option('celery.beat:PersistentScheduler'),
         'SCHEDULER': Option('celery.beat:PersistentScheduler'),
         'SCHEDULE_FILENAME': Option('celerybeat-schedule'),
         'SCHEDULE_FILENAME': Option('celerybeat-schedule'),
+        'SYNC_EVERY': Option(0, type='int'),
         'MAX_LOOP_INTERVAL': Option(0, type='float'),
         'MAX_LOOP_INTERVAL': Option(0, type='float'),
         'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0',
         'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0',
                             alt='--loglevel argument'),
                             alt='--loglevel argument'),
@@ -232,7 +234,7 @@ def flatten(d, ns=''):
                 stack.append((name + key + '_', value))
                 stack.append((name + key + '_', value))
             else:
             else:
                 yield name + key, value
                 yield name + key, value
-DEFAULTS = dict((key, value.default) for key, value in flatten(NAMESPACES))
+DEFAULTS = {key: value.default for key, value in flatten(NAMESPACES)}
 
 
 
 
 def find_deprecated_settings(source):
 def find_deprecated_settings(source):

+ 12 - 7
celery/app/log.py

@@ -24,7 +24,7 @@ from kombu.utils.encoding import set_default_encoding_file
 from celery import signals
 from celery import signals
 from celery._state import get_current_task
 from celery._state import get_current_task
 from celery.five import class_property, string_t
 from celery.five import class_property, string_t
-from celery.utils import isatty
+from celery.utils import isatty, node_format
 from celery.utils.log import (
 from celery.utils.log import (
     get_logger, mlevel,
     get_logger, mlevel,
     ColorFormatter, ensure_process_aware_logger,
     ColorFormatter, ensure_process_aware_logger,
@@ -65,9 +65,9 @@ class Logging(object):
         self.colorize = self.app.conf.CELERYD_LOG_COLOR
         self.colorize = self.app.conf.CELERYD_LOG_COLOR
 
 
     def setup(self, loglevel=None, logfile=None, redirect_stdouts=False,
     def setup(self, loglevel=None, logfile=None, redirect_stdouts=False,
-              redirect_level='WARNING', colorize=None):
+              redirect_level='WARNING', colorize=None, hostname=None):
         handled = self.setup_logging_subsystem(
         handled = self.setup_logging_subsystem(
-            loglevel, logfile, colorize=colorize,
+            loglevel, logfile, colorize=colorize, hostname=hostname,
         )
         )
         if not handled:
         if not handled:
             if redirect_stdouts:
             if redirect_stdouts:
@@ -87,10 +87,12 @@ class Logging(object):
             CELERY_LOG_REDIRECT_LEVEL=str(loglevel or ''),
             CELERY_LOG_REDIRECT_LEVEL=str(loglevel or ''),
         )
         )
 
 
-    def setup_logging_subsystem(self, loglevel=None, logfile=None,
-                                format=None, colorize=None, **kwargs):
+    def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None,
+                                colorize=None, hostname=None, **kwargs):
         if self.already_setup:
         if self.already_setup:
             return
             return
+        if logfile and hostname:
+            logfile = node_format(logfile, hostname)
         self.already_setup = True
         self.already_setup = True
         loglevel = mlevel(loglevel or self.loglevel)
         loglevel = mlevel(loglevel or self.loglevel)
         format = format or self.format
         format = format or self.format
@@ -107,6 +109,9 @@ class Logging(object):
 
 
             if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
             if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
                 root.handlers = []
                 root.handlers = []
+                get_logger('celery').handlers = []
+                get_logger('celery.task').handlers = []
+                get_logger('celery.redirected').handlers = []
 
 
             # Configure root logger
             # Configure root logger
             self._configure_logger(
             self._configure_logger(
@@ -228,8 +233,8 @@ class Logging(object):
         return WatchedFileHandler(logfile)
         return WatchedFileHandler(logfile)
 
 
     def _has_handler(self, logger):
     def _has_handler(self, logger):
-        return (logger.handlers and
-                not isinstance(logger.handlers[0], NullHandler))
+        if logger.handlers:
+            return any(not isinstance(h, NullHandler) for h in logger.handlers)
 
 
     def _is_configured(self, logger):
     def _is_configured(self, logger):
         return self._has_handler(logger) and not getattr(
         return self._has_handler(logger) and not getattr(

+ 2 - 2
celery/app/registry.py

@@ -57,8 +57,8 @@ class TaskRegistry(dict):
         return self.filter_types('periodic')
         return self.filter_types('periodic')
 
 
     def filter_types(self, type):
     def filter_types(self, type):
-        return dict((name, task) for name, task in items(self)
-                    if getattr(task, 'type', 'regular') == type)
+        return {name: task for name, task in items(self)
+                if getattr(task, 'type', 'regular') == type}
 
 
 
 
 def _unpickle_task(name):
 def _unpickle_task(name):

+ 50 - 42
celery/app/task.py

@@ -20,7 +20,7 @@ from celery.exceptions import MaxRetriesExceededError, Reject, Retry
 from celery.five import class_property, items, with_metaclass
 from celery.five import class_property, items, with_metaclass
 from celery.local import Proxy
 from celery.local import Proxy
 from celery.result import EagerResult
 from celery.result import EagerResult
-from celery.utils import gen_task_name, fun_takes_kwargs, uuid, maybe_reraise
+from celery.utils import gen_task_name, uuid, maybe_reraise
 from celery.utils.functional import mattrgetter, maybe_list
 from celery.utils.functional import mattrgetter, maybe_list
 from celery.utils.imports import instantiate
 from celery.utils.imports import instantiate
 from celery.utils.mail import ErrorMail
 from celery.utils.mail import ErrorMail
@@ -93,6 +93,8 @@ class Context(object):
     headers = None
     headers = None
     delivery_info = None
     delivery_info = None
     reply_to = None
     reply_to = None
+    root_id = None
+    parent_id = None
     correlation_id = None
     correlation_id = None
     taskset = None   # compat alias to group
     taskset = None   # compat alias to group
     group = None
     group = None
@@ -176,14 +178,14 @@ class TaskType(type):
             # Hairy stuff,  here to be compatible with 2.x.
             # Hairy stuff,  here to be compatible with 2.x.
             # People should not use non-abstract task classes anymore,
             # People should not use non-abstract task classes anymore,
             # use the task decorator.
             # use the task decorator.
-            from celery.app.builtins import shared_task
+            from celery._state import connect_on_app_finalize
             unique_name = '.'.join([task_module, name])
             unique_name = '.'.join([task_module, name])
             if unique_name not in cls._creation_count:
             if unique_name not in cls._creation_count:
                 # the creation count is used as a safety
                 # the creation count is used as a safety
                 # so that the same task is not added recursively
                 # so that the same task is not added recursively
                 # to the set of constructors.
                 # to the set of constructors.
                 cls._creation_count[unique_name] = 1
                 cls._creation_count[unique_name] = 1
-                shared_task(_CompatShared(
+                connect_on_app_finalize(_CompatShared(
                     unique_name,
                     unique_name,
                     lambda app: TaskType.__new__(cls, name, bases,
                     lambda app: TaskType.__new__(cls, name, bases,
                                                  dict(attrs, _app=app)),
                                                  dict(attrs, _app=app)),
@@ -235,10 +237,6 @@ class Task(object):
     #: If :const:`True` the task is an abstract base class.
     #: If :const:`True` the task is an abstract base class.
     abstract = True
     abstract = True
 
 
-    #: If disabled the worker will not forward magic keyword arguments.
-    #: Deprecated and scheduled for removal in v4.0.
-    accept_magic_kwargs = False
-
     #: Maximum number of retries before giving up.  If set to :const:`None`,
     #: Maximum number of retries before giving up.  If set to :const:`None`,
     #: it will **never** stop retrying.
     #: it will **never** stop retrying.
     max_retries = 3
     max_retries = 3
@@ -313,7 +311,7 @@ class Task(object):
     #: :setting:`CELERY_ACKS_LATE` setting.
     #: :setting:`CELERY_ACKS_LATE` setting.
     acks_late = None
     acks_late = None
 
 
-    #: List/tuple of expected exceptions.
+    #: Tuple of expected exceptions.
     #:
     #:
     #: These are errors that are expected in normal operation
     #: These are errors that are expected in normal operation
     #: and that should not be regarded as a real error by the worker.
     #: and that should not be regarded as a real error by the worker.
@@ -343,6 +341,11 @@ class Task(object):
             'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'),
             'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'),
     )
     )
 
 
+    #: ignored
+    accept_magic_kwargs = False
+
+    _backend = None  # set by backend property.
+
     __bound__ = False
     __bound__ = False
 
 
     # - Tasks are lazily bound, so that configuration is not set
     # - Tasks are lazily bound, so that configuration is not set
@@ -358,9 +361,6 @@ class Task(object):
         for attr_name, config_name in self.from_config:
         for attr_name, config_name in self.from_config:
             if getattr(self, attr_name, None) is None:
             if getattr(self, attr_name, None) is None:
                 setattr(self, attr_name, conf[config_name])
                 setattr(self, attr_name, conf[config_name])
-        if self.accept_magic_kwargs is None:
-            self.accept_magic_kwargs = app.accept_magic_kwargs
-        self.backend = app.backend
 
 
         # decorate with annotations from config.
         # decorate with annotations from config.
         if not was_bound:
         if not was_bound:
@@ -524,7 +524,7 @@ class Task(object):
         :keyword link_error: A single, or a list of tasks to apply
         :keyword link_error: A single, or a list of tasks to apply
                       if an error occurs while executing the task.
                       if an error occurs while executing the task.
 
 
-        :keyword producer: :class:~@amqp.TaskProducer` instance to use.
+        :keyword producer: :class:~@kombu.Producer` instance to use.
         :keyword add_to_parent: If set to True (default) and the task
         :keyword add_to_parent: If set to True (default) and the task
             is applied while executing another task, then the result
             is applied while executing another task, then the result
             will be appended to the parent tasks ``request.children``
             will be appended to the parent tasks ``request.children``
@@ -554,13 +554,13 @@ class Task(object):
             **dict(self._get_exec_options(), **options)
             **dict(self._get_exec_options(), **options)
         )
         )
 
 
-    def subtask_from_request(self, request=None, args=None, kwargs=None,
-                             **extra_options):
+    def signature_from_request(self, request=None, args=None, kwargs=None,
+                               queue=None, **extra_options):
         request = self.request if request is None else request
         request = self.request if request is None else request
         args = request.args if args is None else args
         args = request.args if args is None else args
         kwargs = request.kwargs if kwargs is None else kwargs
         kwargs = request.kwargs if kwargs is None else kwargs
         limit_hard, limit_soft = request.timelimit or (None, None)
         limit_hard, limit_soft = request.timelimit or (None, None)
-        options = dict({
+        options = {
             'task_id': request.id,
             'task_id': request.id,
             'link': request.callbacks,
             'link': request.callbacks,
             'link_error': request.errbacks,
             'link_error': request.errbacks,
@@ -568,8 +568,14 @@ class Task(object):
             'chord': request.chord,
             'chord': request.chord,
             'soft_time_limit': limit_soft,
             'soft_time_limit': limit_soft,
             'time_limit': limit_hard,
             'time_limit': limit_hard,
-        }, **request.delivery_info or {})
-        return self.subtask(args, kwargs, options, type=self, **extra_options)
+        }
+        options.update(
+            {'queue': queue} if queue else (request.delivery_info or {})
+        )
+        return self.signature(
+            args, kwargs, options, type=self, **extra_options
+        )
+    subtask_from_request = signature_from_request
 
 
     def retry(self, args=None, kwargs=None, exc=None, throw=True,
     def retry(self, args=None, kwargs=None, exc=None, throw=True,
               eta=None, countdown=None, max_retries=None, **options):
               eta=None, countdown=None, max_retries=None, **options):
@@ -643,7 +649,7 @@ class Task(object):
             countdown = self.default_retry_delay
             countdown = self.default_retry_delay
 
 
         is_eager = request.is_eager
         is_eager = request.is_eager
-        S = self.subtask_from_request(
+        S = self.signature_from_request(
             request, args, kwargs,
             request, args, kwargs,
             countdown=countdown, eta=eta, retries=retries,
             countdown=countdown, eta=eta, retries=retries,
             **options
             **options
@@ -686,7 +692,7 @@ class Task(object):
 
 
         """
         """
         # trace imports Task, so need to import inline.
         # trace imports Task, so need to import inline.
-        from celery.app.trace import eager_trace_task
+        from celery.app.trace import build_tracer
 
 
         app = self._get_app()
         app = self._get_app()
         args = args or ()
         args = args or ()
@@ -709,28 +715,18 @@ class Task(object):
                    'loglevel': options.get('loglevel', 0),
                    'loglevel': options.get('loglevel', 0),
                    'callbacks': maybe_list(link),
                    'callbacks': maybe_list(link),
                    'errbacks': maybe_list(link_error),
                    'errbacks': maybe_list(link_error),
+                   'headers': options.get('headers'),
                    'delivery_info': {'is_eager': True}}
                    'delivery_info': {'is_eager': True}}
-        if self.accept_magic_kwargs:
-            default_kwargs = {'task_name': task.name,
-                              'task_id': task_id,
-                              'task_retries': retries,
-                              'task_is_eager': True,
-                              'logfile': options.get('logfile'),
-                              'loglevel': options.get('loglevel', 0),
-                              'delivery_info': {'is_eager': True}}
-            supported_keys = fun_takes_kwargs(task.run, default_kwargs)
-            extend_with = dict((key, val)
-                               for key, val in items(default_kwargs)
-                               if key in supported_keys)
-            kwargs.update(extend_with)
-
         tb = None
         tb = None
-        retval, info = eager_trace_task(task, task_id, args, kwargs,
-                                        app=self._get_app(),
-                                        request=request, propagate=throw)
+        tracer = build_tracer(
+            task.name, task, eager=True,
+            propagate=throw, app=self._get_app(),
+        )
+        ret = tracer(task_id, args, kwargs, request)
+        retval = ret.retval
         if isinstance(retval, ExceptionInfo):
         if isinstance(retval, ExceptionInfo):
             retval, tb = retval.exception, retval.traceback
             retval, tb = retval.exception, retval.traceback
-        state = states.SUCCESS if info is None else info.state
+        state = states.SUCCESS if ret.info is None else ret.info.state
         return EagerResult(task_id, retval, state, traceback=tb)
         return EagerResult(task_id, retval, state, traceback=tb)
 
 
     def AsyncResult(self, task_id, **kwargs):
     def AsyncResult(self, task_id, **kwargs):
@@ -742,20 +738,21 @@ class Task(object):
         return self._get_app().AsyncResult(task_id, backend=self.backend,
         return self._get_app().AsyncResult(task_id, backend=self.backend,
                                            task_name=self.name, **kwargs)
                                            task_name=self.name, **kwargs)
 
 
-    def subtask(self, args=None, *starargs, **starkwargs):
+    def signature(self, args=None, *starargs, **starkwargs):
         """Return :class:`~celery.signature` object for
         """Return :class:`~celery.signature` object for
         this task, wrapping arguments and execution options
         this task, wrapping arguments and execution options
         for a single task invocation."""
         for a single task invocation."""
         starkwargs.setdefault('app', self.app)
         starkwargs.setdefault('app', self.app)
         return signature(self, args, *starargs, **starkwargs)
         return signature(self, args, *starargs, **starkwargs)
+    subtask = signature
 
 
     def s(self, *args, **kwargs):
     def s(self, *args, **kwargs):
-        """``.s(*a, **k) -> .subtask(a, k)``"""
-        return self.subtask(args, kwargs)
+        """``.s(*a, **k) -> .signature(a, k)``"""
+        return self.signature(args, kwargs)
 
 
     def si(self, *args, **kwargs):
     def si(self, *args, **kwargs):
-        """``.si(*a, **k) -> .subtask(a, k, immutable=True)``"""
-        return self.subtask(args, kwargs, immutable=True)
+        """``.si(*a, **k) -> .signature(a, k, immutable=True)``"""
+        return self.signature(args, kwargs, immutable=True)
 
 
     def chunks(self, it, n):
     def chunks(self, it, n):
         """Creates a :class:`~celery.canvas.chunks` task for this task."""
         """Creates a :class:`~celery.canvas.chunks` task for this task."""
@@ -899,6 +896,17 @@ class Task(object):
             self._exec_options = extract_exec_options(self)
             self._exec_options = extract_exec_options(self)
         return self._exec_options
         return self._exec_options
 
 
+    @property
+    def backend(self):
+        backend = self._backend
+        if backend is None:
+            return self.app.backend
+        return backend
+
+    @backend.setter
+    def backend(self, value):  # noqa
+        self._backend = value
+
     @property
     @property
     def __name__(self):
     def __name__(self):
         return self.__class__.__name__
         return self.__class__.__name__

+ 210 - 39
celery/app/trace.py

@@ -15,33 +15,84 @@ from __future__ import absolute_import
 # but in the end it only resulted in bad performance and horrible tracebacks,
 # but in the end it only resulted in bad performance and horrible tracebacks,
 # so instead we now use one closure per task class.
 # so instead we now use one closure per task class.
 
 
+import logging
 import os
 import os
 import socket
 import socket
 import sys
 import sys
 
 
+from collections import namedtuple
 from warnings import warn
 from warnings import warn
 
 
 from billiard.einfo import ExceptionInfo
 from billiard.einfo import ExceptionInfo
 from kombu.exceptions import EncodeError
 from kombu.exceptions import EncodeError
-from kombu.utils import kwdict
+from kombu.serialization import loads as loads_message, prepare_accept_content
+from kombu.utils.encoding import safe_repr, safe_str
 
 
-from celery import current_app
+from celery import current_app, group
 from celery import states, signals
 from celery import states, signals
 from celery._state import _task_stack
 from celery._state import _task_stack
 from celery.app import set_default_app
 from celery.app import set_default_app
 from celery.app.task import Task as BaseTask, Context
 from celery.app.task import Task as BaseTask, Context
-from celery.exceptions import Ignore, Reject, Retry
+from celery.exceptions import Ignore, Reject, Retry, InvalidTaskError
+from celery.five import monotonic
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
 from celery.utils.objects import mro_lookup
 from celery.utils.objects import mro_lookup
 from celery.utils.serialization import (
 from celery.utils.serialization import (
-    get_pickleable_exception,
-    get_pickleable_etype,
+    get_pickleable_exception, get_pickled_exception, get_pickleable_etype,
 )
 )
+from celery.utils.text import truncate
 
 
-__all__ = ['TraceInfo', 'build_tracer', 'trace_task', 'eager_trace_task',
+__all__ = ['TraceInfo', 'build_tracer', 'trace_task',
            'setup_worker_optimizations', 'reset_worker_optimizations']
            'setup_worker_optimizations', 'reset_worker_optimizations']
 
 
-_logger = get_logger(__name__)
+logger = get_logger(__name__)
+info = logger.info
+
+#: Format string used to log task success.
+LOG_SUCCESS = """\
+Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s\
+"""
+
+#: Format string used to log task failure.
+LOG_FAILURE = """\
+Task %(name)s[%(id)s] %(description)s: %(exc)s\
+"""
+
+#: Format string used to log task internal error.
+LOG_INTERNAL_ERROR = """\
+Task %(name)s[%(id)s] %(description)s: %(exc)s\
+"""
+
+#: Format string used to log task ignored.
+LOG_IGNORED = """\
+Task %(name)s[%(id)s] %(description)s\
+"""
+
+#: Format string used to log task rejected.
+LOG_REJECTED = """\
+Task %(name)s[%(id)s] %(exc)s\
+"""
+
+#: Format string used to log task retry.
+LOG_RETRY = """\
+Task %(name)s[%(id)s] retry: %(exc)s\
+"""
+
+log_policy_t = namedtuple(
+    'log_policy_t', ('format', 'description', 'severity', 'traceback', 'mail'),
+)
+
+log_policy_reject = log_policy_t(LOG_REJECTED, 'rejected', logging.WARN, 1, 1)
+log_policy_ignore = log_policy_t(LOG_IGNORED, 'ignored', logging.INFO, 0, 0)
+log_policy_internal = log_policy_t(
+    LOG_INTERNAL_ERROR, 'INTERNAL ERROR', logging.CRITICAL, 1, 1,
+)
+log_policy_expected = log_policy_t(
+    LOG_FAILURE, 'raised expected', logging.INFO, 0, 0,
+)
+log_policy_unexpected = log_policy_t(
+    LOG_FAILURE, 'raised unexpected', logging.ERROR, 1, 1,
+)
 
 
 send_prerun = signals.task_prerun.send
 send_prerun = signals.task_prerun.send
 send_postrun = signals.task_postrun.send
 send_postrun = signals.task_postrun.send
@@ -56,9 +107,11 @@ EXCEPTION_STATES = states.EXCEPTION_STATES
 IGNORE_STATES = frozenset([IGNORED, RETRY, REJECTED])
 IGNORE_STATES = frozenset([IGNORED, RETRY, REJECTED])
 
 
 #: set by :func:`setup_worker_optimizations`
 #: set by :func:`setup_worker_optimizations`
-_tasks = None
+_localized = []
 _patched = {}
 _patched = {}
 
 
+trace_ok_t = namedtuple('trace_ok_t', ('retval', 'info', 'runtime', 'retstr'))
+
 
 
 def task_has_custom(task, attr):
 def task_has_custom(task, attr):
     """Return true if the task or one of its bases
     """Return true if the task or one of its bases
@@ -67,6 +120,19 @@ def task_has_custom(task, attr):
                       monkey_patched=['celery.app.task'])
                       monkey_patched=['celery.app.task'])
 
 
 
 
+def get_log_policy(task, einfo, exc):
+    if isinstance(exc, Reject):
+        return log_policy_reject
+    elif isinstance(exc, Ignore):
+        return log_policy_ignore
+    elif einfo.internal:
+        return log_policy_internal
+    else:
+        if task.throws and isinstance(exc, task.throws):
+            return log_policy_expected
+        return log_policy_unexpected
+
+
 class TraceInfo(object):
 class TraceInfo(object):
     __slots__ = ('state', 'retval')
     __slots__ = ('state', 'retval')
 
 
@@ -84,6 +150,12 @@ class TraceInfo(object):
             FAILURE: self.handle_failure,
             FAILURE: self.handle_failure,
         }[self.state](task, store_errors=store_errors)
         }[self.state](task, store_errors=store_errors)
 
 
+    def handle_reject(self, task, **kwargs):
+        self._log_error(task, ExceptionInfo())
+
+    def handle_ignore(self, task, **kwargs):
+        self._log_error(task, ExceptionInfo())
+
     def handle_retry(self, task, store_errors=True):
     def handle_retry(self, task, store_errors=True):
         """Handle retry exception."""
         """Handle retry exception."""
         # the exception raised is the Retry semi-predicate,
         # the exception raised is the Retry semi-predicate,
@@ -100,6 +172,10 @@ class TraceInfo(object):
             task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo)
             task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo)
             signals.task_retry.send(sender=task, request=req,
             signals.task_retry.send(sender=task, request=req,
                                     reason=reason, einfo=einfo)
                                     reason=reason, einfo=einfo)
+            info(LOG_RETRY, {
+                'id': req.id, 'name': task.name,
+                'exc': safe_repr(reason.exc),
+            })
             return einfo
             return einfo
         finally:
         finally:
             del(tb)
             del(tb)
@@ -123,14 +199,47 @@ class TraceInfo(object):
                                       kwargs=req.kwargs,
                                       kwargs=req.kwargs,
                                       traceback=tb,
                                       traceback=tb,
                                       einfo=einfo)
                                       einfo=einfo)
+            self._log_error(task, einfo)
             return einfo
             return einfo
         finally:
         finally:
             del(tb)
             del(tb)
 
 
+    def _log_error(self, task, einfo):
+        req = task.request
+        eobj = einfo.exception = get_pickled_exception(einfo.exception)
+        exception, traceback, exc_info, sargs, skwargs = (
+            safe_repr(eobj),
+            safe_str(einfo.traceback),
+            einfo.exc_info,
+            safe_repr(req.args),
+            safe_repr(req.kwargs),
+        )
+        policy = get_log_policy(task, einfo, eobj)
+
+        context = {
+            'hostname': req.hostname,
+            'id': req.id,
+            'name': task.name,
+            'exc': exception,
+            'traceback': traceback,
+            'args': sargs,
+            'kwargs': skwargs,
+            'description': policy.description,
+            'internal': einfo.internal,
+        }
+
+        logger.log(policy.severity, policy.format.strip(), context,
+                   exc_info=exc_info if policy.traceback else None,
+                   extra={'data': context})
+
+        if policy.mail:
+            task.send_error_email(context, einfo.exception)
+
 
 
 def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
 def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                  Info=TraceInfo, eager=False, propagate=False, app=None,
                  Info=TraceInfo, eager=False, propagate=False, app=None,
-                 IGNORE_STATES=IGNORE_STATES):
+                 monotonic=monotonic, truncate=truncate,
+                 trace_ok_t=trace_ok_t, IGNORE_STATES=IGNORE_STATES):
     """Return a function that traces task execution; catches all
     """Return a function that traces task execution; catches all
     exceptions and updates result backend with the state and result
     exceptions and updates result backend with the state and result
 
 
@@ -186,6 +295,7 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
     push_task = _task_stack.push
     push_task = _task_stack.push
     pop_task = _task_stack.pop
     pop_task = _task_stack.pop
     on_chord_part_return = backend.on_chord_part_return
     on_chord_part_return = backend.on_chord_part_return
+    _does_info = logger.isEnabledFor(logging.INFO)
 
 
     prerun_receivers = signals.task_prerun.receivers
     prerun_receivers = signals.task_prerun.receivers
     postrun_receivers = signals.task_postrun.receivers
     postrun_receivers = signals.task_postrun.receivers
@@ -200,13 +310,17 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
         I = Info(state, exc)
         I = Info(state, exc)
         R = I.handle_error_state(task, eager=eager)
         R = I.handle_error_state(task, eager=eager)
         if call_errbacks:
         if call_errbacks:
-            [signature(errback, app=app).apply_async((uuid, ))
-             for errback in request.errbacks or []]
+            group(
+                [signature(errback, app=app)
+                 for errback in request.errbacks or []], app=app,
+            ).apply_async((uuid, ))
         return I, R, I.state, I.retval
         return I, R, I.state, I.retval
 
 
     def trace_task(uuid, args, kwargs, request=None):
     def trace_task(uuid, args, kwargs, request=None):
         # R      - is the possibly prepared return value.
         # R      - is the possibly prepared return value.
         # I      - is the Info object.
         # I      - is the Info object.
+        # T      - runtime
+        # Rstr   - textual representation of return value
         # retval - is the always unmodified return value.
         # retval - is the always unmodified return value.
         # state  - is the resulting task state.
         # state  - is the resulting task state.
 
 
@@ -214,9 +328,14 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
         # for performance reasons, and because the function is so long
         # for performance reasons, and because the function is so long
         # we want the main variables (I, and R) to stand out visually from the
         # we want the main variables (I, and R) to stand out visually from the
         # the rest of the variables, so breaking PEP8 is worth it ;)
         # the rest of the variables, so breaking PEP8 is worth it ;)
-        R = I = retval = state = None
-        kwargs = kwdict(kwargs)
+        R = I = T = Rstr = retval = state = None
+        time_start = monotonic()
         try:
         try:
+            try:
+                kwargs.items
+            except AttributeError:
+                raise InvalidTaskError(
+                    'Task keyword arguments is not a mapping')
             push_task(task)
             push_task(task)
             task_request = Context(request or {}, args=args,
             task_request = Context(request or {}, args=args,
                                    called_directly=False, kwargs=kwargs)
                                    called_directly=False, kwargs=kwargs)
@@ -240,9 +359,11 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                 except Reject as exc:
                 except Reject as exc:
                     I, R = Info(REJECTED, exc), ExceptionInfo(internal=True)
                     I, R = Info(REJECTED, exc), ExceptionInfo(internal=True)
                     state, retval = I.state, I.retval
                     state, retval = I.state, I.retval
+                    I.handle_reject(task)
                 except Ignore as exc:
                 except Ignore as exc:
                     I, R = Info(IGNORED, exc), ExceptionInfo(internal=True)
                     I, R = Info(IGNORED, exc), ExceptionInfo(internal=True)
                     state, retval = I.state, I.retval
                     state, retval = I.state, I.retval
+                    I.handle_ignore(task)
                 except Retry as exc:
                 except Retry as exc:
                     I, R, state, retval = on_error(
                     I, R, state, retval = on_error(
                         task_request, exc, uuid, RETRY, call_errbacks=False,
                         task_request, exc, uuid, RETRY, call_errbacks=False,
@@ -255,8 +376,27 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                     try:
                     try:
                         # callback tasks must be applied before the result is
                         # callback tasks must be applied before the result is
                         # stored, so that result.children is populated.
                         # stored, so that result.children is populated.
-                        [signature(callback, app=app).apply_async((retval, ))
-                            for callback in task_request.callbacks or []]
+
+                        # groups are called inline and will store trail
+                        # separately, so need to call them separately
+                        # so that the trail's not added multiple times :(
+                        # (Issue #1936)
+                        callbacks = task.request.callbacks
+                        if callbacks:
+                            if len(task.request.callbacks) > 1:
+                                sigs, groups = [], []
+                                for sig in callbacks:
+                                    sig = signature(sig, app=app)
+                                    if isinstance(sig, group):
+                                        groups.append(sig)
+                                    else:
+                                        sigs.append(sig)
+                                for group_ in groups:
+                                    group.apply_async((retval, ))
+                                if sigs:
+                                    group(sigs).apply_async(retval, )
+                            else:
+                                signature(callbacks[0], app=app).delay(retval)
                         if publish_result:
                         if publish_result:
                             store_result(
                             store_result(
                                 uuid, retval, SUCCESS, request=task_request,
                                 uuid, retval, SUCCESS, request=task_request,
@@ -268,11 +408,18 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                             task_on_success(retval, uuid, args, kwargs)
                             task_on_success(retval, uuid, args, kwargs)
                         if success_receivers:
                         if success_receivers:
                             send_success(sender=task, result=retval)
                             send_success(sender=task, result=retval)
+                        if _does_info:
+                            T = monotonic() - time_start
+                            Rstr = truncate(safe_repr(R), 256)
+                            info(LOG_SUCCESS, {
+                                'id': uuid, 'name': name,
+                                'return_value': Rstr, 'runtime': T,
+                            })
 
 
                 # -* POST *-
                 # -* POST *-
                 if state not in IGNORE_STATES:
                 if state not in IGNORE_STATES:
                     if task_request.chord:
                     if task_request.chord:
-                        on_chord_part_return(task)
+                        on_chord_part_return(task, state, R)
                     if task_after_return:
                     if task_after_return:
                         task_after_return(
                         task_after_return(
                             state, retval, uuid, args, kwargs, None,
                             state, retval, uuid, args, kwargs, None,
@@ -293,15 +440,15 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                         except (KeyboardInterrupt, SystemExit, MemoryError):
                         except (KeyboardInterrupt, SystemExit, MemoryError):
                             raise
                             raise
                         except Exception as exc:
                         except Exception as exc:
-                            _logger.error('Process cleanup failed: %r', exc,
-                                          exc_info=True)
+                            logger.error('Process cleanup failed: %r', exc,
+                                         exc_info=True)
         except MemoryError:
         except MemoryError:
             raise
             raise
         except Exception as exc:
         except Exception as exc:
             if eager:
             if eager:
                 raise
                 raise
             R = report_internal_error(task, exc)
             R = report_internal_error(task, exc)
-        return R, I
+        return trace_ok_t(R, I, T, Rstr)
 
 
     return trace_task
     return trace_task
 
 
@@ -310,33 +457,55 @@ def trace_task(task, uuid, args, kwargs, request={}, **opts):
     try:
     try:
         if task.__trace__ is None:
         if task.__trace__ is None:
             task.__trace__ = build_tracer(task.name, task, **opts)
             task.__trace__ = build_tracer(task.name, task, **opts)
-        return task.__trace__(uuid, args, kwargs, request)[0]
+        return task.__trace__(uuid, args, kwargs, request)
     except Exception as exc:
     except Exception as exc:
         return report_internal_error(task, exc)
         return report_internal_error(task, exc)
 
 
 
 
-def _trace_task_ret(name, uuid, args, kwargs, request={}, app=None, **opts):
-    return trace_task((app or current_app).tasks[name],
-                      uuid, args, kwargs, request, app=app, **opts)
+def _trace_task_ret(name, uuid, request, body, content_type,
+                    content_encoding, loads=loads_message, app=None,
+                    **extra_request):
+    app = app or current_app._get_current_object()
+    accept = prepare_accept_content(app.conf.CELERY_ACCEPT_CONTENT)
+    args, kwargs = loads(body, content_type, content_encoding, accept=accept)
+    request.update(args=args, kwargs=kwargs, **extra_request)
+    R, I, T, Rstr = trace_task(app.tasks[name],
+                               uuid, args, kwargs, request, app=app)
+    return (1, R, T) if I else (0, Rstr, T)
 trace_task_ret = _trace_task_ret
 trace_task_ret = _trace_task_ret
 
 
 
 
-def _fast_trace_task(task, uuid, args, kwargs, request={}):
+def _fast_trace_task_v1(task, uuid, args, kwargs, request={}, _loc=_localized):
     # setup_worker_optimizations will point trace_task_ret to here,
     # setup_worker_optimizations will point trace_task_ret to here,
     # so this is the function used in the worker.
     # so this is the function used in the worker.
-    return _tasks[task].__trace__(uuid, args, kwargs, request)[0]
-
-
-def eager_trace_task(task, uuid, args, kwargs, request=None, **opts):
-    opts.setdefault('eager', True)
-    return build_tracer(task.name, task, **opts)(
-        uuid, args, kwargs, request)
+    tasks, _ = _loc
+    R, I, T, Rstr = tasks[task].__trace__(uuid, args, kwargs, request)[0]
+    # exception instance if error, else result text
+    return (1, R, T) if I else (0, Rstr, T)
+
+
+def _fast_trace_task(task, uuid, request, body, content_type,
+                     content_encoding, loads=loads_message, _loc=_localized,
+                     hostname=None, **_):
+    tasks, accept = _loc
+    if content_type:
+        args, kwargs = loads(body, content_type, content_encoding,
+                             accept=accept)
+    else:
+        args, kwargs = body
+    request.update({
+        'args': args, 'kwargs': kwargs, 'hostname': hostname,
+    })
+    R, I, T, Rstr = tasks[task].__trace__(
+        uuid, args, kwargs, request,
+    )
+    return (1, R, T) if I else (0, Rstr, T)
 
 
 
 
 def report_internal_error(task, exc):
 def report_internal_error(task, exc):
     _type, _value, _tb = sys.exc_info()
     _type, _value, _tb = sys.exc_info()
     try:
     try:
-        _value = task.backend.prepare_exception(exc)
+        _value = task.backend.prepare_exception(exc, 'pickle')
         exc_info = ExceptionInfo((_type, _value, _tb), internal=True)
         exc_info = ExceptionInfo((_type, _value, _tb), internal=True)
         warn(RuntimeWarning(
         warn(RuntimeWarning(
             'Exception raised outside body: {0!r}:\n{1}'.format(
             'Exception raised outside body: {0!r}:\n{1}'.format(
@@ -347,7 +516,6 @@ def report_internal_error(task, exc):
 
 
 
 
 def setup_worker_optimizations(app):
 def setup_worker_optimizations(app):
-    global _tasks
     global trace_task_ret
     global trace_task_ret
 
 
     # make sure custom Task.__call__ methods that calls super
     # make sure custom Task.__call__ methods that calls super
@@ -367,12 +535,15 @@ def setup_worker_optimizations(app):
     app.finalize()
     app.finalize()
 
 
     # set fast shortcut to task registry
     # set fast shortcut to task registry
-    _tasks = app._tasks
+    _localized[:] = [
+        app._tasks,
+        prepare_accept_content(app.conf.CELERY_ACCEPT_CONTENT),
+    ]
 
 
     trace_task_ret = _fast_trace_task
     trace_task_ret = _fast_trace_task
-    from celery.worker import job as job_module
-    job_module.trace_task_ret = _fast_trace_task
-    job_module.__optimize__()
+    from celery.worker import request as request_module
+    request_module.trace_task_ret = _fast_trace_task
+    request_module.__optimize__()
 
 
 
 
 def reset_worker_optimizations():
 def reset_worker_optimizations():
@@ -386,8 +557,8 @@ def reset_worker_optimizations():
         BaseTask.__call__ = _patched.pop('BaseTask.__call__')
         BaseTask.__call__ = _patched.pop('BaseTask.__call__')
     except KeyError:
     except KeyError:
         pass
         pass
-    from celery.worker import job as job_module
-    job_module.trace_task_ret = _trace_task_ret
+    from celery.worker import request as request_module
+    request_module.trace_task_ret = _trace_task_ret
 
 
 
 
 def _install_stack_protection():
 def _install_stack_protection():

+ 15 - 10
celery/app/utils.py

@@ -15,6 +15,8 @@ import re
 from collections import Mapping
 from collections import Mapping
 from types import ModuleType
 from types import ModuleType
 
 
+from kombu.utils.url import maybe_sanitize_url
+
 from celery.datastructures import ConfigurationView
 from celery.datastructures import ConfigurationView
 from celery.five import items, string_t, values
 from celery.five import items, string_t, values
 from celery.platforms import pyimplementation
 from celery.platforms import pyimplementation
@@ -117,11 +119,11 @@ class Settings(ConfigurationView):
 
 
     def table(self, with_defaults=False, censored=True):
     def table(self, with_defaults=False, censored=True):
         filt = filter_hidden_settings if censored else lambda v: v
         filt = filter_hidden_settings if censored else lambda v: v
-        return filt(dict(
-            (k, v) for k, v in items(
+        return filt({
+            k: v for k, v in items(
                 self if with_defaults else self.without_defaults())
                 self if with_defaults else self.without_defaults())
             if k.isupper() and not k.startswith('_')
             if k.isupper() and not k.startswith('_')
-        ))
+        })
 
 
     def humanize(self, with_defaults=False, censored=True):
     def humanize(self, with_defaults=False, censored=True):
         """Return a human readable string showing changes to the
         """Return a human readable string showing changes to the
@@ -152,7 +154,6 @@ class AppPickler(object):
         return dict(main=main, loader=loader, backend=backend, amqp=amqp,
         return dict(main=main, loader=loader, backend=backend, amqp=amqp,
                     changes=changes, events=events, log=log, control=control,
                     changes=changes, events=events, log=log, control=control,
                     set_as_current=False,
                     set_as_current=False,
-                    accept_magic_kwargs=accept_magic_kwargs,
                     config_source=config_source)
                     config_source=config_source)
 
 
     def construct(self, cls, **kwargs):
     def construct(self, cls, **kwargs):
@@ -175,14 +176,18 @@ def filter_hidden_settings(conf):
     def maybe_censor(key, value, mask='*' * 8):
     def maybe_censor(key, value, mask='*' * 8):
         if isinstance(value, Mapping):
         if isinstance(value, Mapping):
             return filter_hidden_settings(value)
             return filter_hidden_settings(value)
-        if isinstance(value, string_t) and HIDDEN_SETTINGS.search(key):
-            return mask
-        if isinstance(key, string_t) and 'BROKER_URL' in key.upper():
-            from kombu import Connection
-            return Connection(value).as_uri(mask=mask)
+        if isinstance(key, string_t):
+            if HIDDEN_SETTINGS.search(key):
+                return mask
+            elif 'BROKER_URL' in key.upper():
+                from kombu import Connection
+                return Connection(value).as_uri(mask=mask)
+            elif key.upper() in ('CELERY_RESULT_BACKEND', 'CELERY_BACKEND'):
+                return maybe_sanitize_url(value, mask=mask)
+
         return value
         return value
 
 
-    return dict((k, maybe_censor(k, v)) for k, v in items(conf))
+    return {k: maybe_censor(k, v) for k, v in items(conf)}
 
 
 
 
 def bugreport(app):
 def bugreport(app):

+ 10 - 6
celery/apps/beat.py

@@ -10,12 +10,14 @@
     and so on.
     and so on.
 
 
 """
 """
-from __future__ import absolute_import, unicode_literals
+from __future__ import absolute_import, print_function, unicode_literals
 
 
+import numbers
 import socket
 import socket
 import sys
 import sys
 
 
 from celery import VERSION_BANNER, platforms, beat
 from celery import VERSION_BANNER, platforms, beat
+from celery.five import text_t
 from celery.utils.imports import qualname
 from celery.utils.imports import qualname
 from celery.utils.log import LOG_LEVELS, get_logger
 from celery.utils.log import LOG_LEVELS, get_logger
 from celery.utils.timeutils import humanize_seconds
 from celery.utils.timeutils import humanize_seconds
@@ -66,7 +68,7 @@ class Beat(object):
         )
         )
         self.pidfile = pidfile
         self.pidfile = pidfile
 
 
-        if not isinstance(self.loglevel, int):
+        if not isinstance(self.loglevel, numbers.Integral):
             self.loglevel = LOG_LEVELS[self.loglevel.upper()]
             self.loglevel = LOG_LEVELS[self.loglevel.upper()]
 
 
     def _getopt(self, key, value):
     def _getopt(self, key, value):
@@ -97,10 +99,12 @@ class Beat(object):
                             scheduler_cls=self.scheduler_cls,
                             scheduler_cls=self.scheduler_cls,
                             schedule_filename=self.schedule)
                             schedule_filename=self.schedule)
 
 
-        print(str(c.blue('__    ', c.magenta('-'),
-                  c.blue('    ... __   '), c.magenta('-'),
-                  c.blue('        _\n'),
-                  c.reset(self.startup_info(beat)))))
+        print(text_t(   # noqa (pyflakes chokes on print)
+            c.blue('__    ', c.magenta('-'),
+            c.blue('    ... __   '), c.magenta('-'),
+            c.blue('        _\n'),
+            c.reset(self.startup_info(beat))),
+        ))
         self.setup_logging()
         self.setup_logging()
         if self.socket_timeout:
         if self.socket_timeout:
             logger.debug('Setting default socket timeout to %r',
             logger.debug('Setting default socket timeout to %r',

+ 15 - 10
celery/apps/worker.py

@@ -22,6 +22,7 @@ from functools import partial
 
 
 from billiard import current_process
 from billiard import current_process
 from kombu.utils.encoding import safe_str
 from kombu.utils.encoding import safe_str
+from kombu.utils.url import maybe_sanitize_url
 
 
 from celery import VERSION_BANNER, platforms, signals
 from celery import VERSION_BANNER, platforms, signals
 from celery.app import trace
 from celery.app import trace
@@ -30,7 +31,7 @@ from celery.exceptions import (
 )
 )
 from celery.five import string, string_t
 from celery.five import string, string_t
 from celery.loaders.app import AppLoader
 from celery.loaders.app import AppLoader
-from celery.platforms import check_privileges
+from celery.platforms import EX_FAILURE, EX_OK, check_privileges
 from celery.utils import cry, isatty
 from celery.utils import cry, isatty
 from celery.utils.imports import qualname
 from celery.utils.imports import qualname
 from celery.utils.log import get_logger, in_sighandler, set_in_sighandler
 from celery.utils.log import get_logger, in_sighandler, set_in_sighandler
@@ -165,10 +166,10 @@ class Worker(WorkController):
 
 
         # Dump configuration to screen so we have some basic information
         # Dump configuration to screen so we have some basic information
         # for when users sends bug reports.
         # for when users sends bug reports.
-        print(''.join([
+        print(safe_str(''.join([
             string(self.colored.cyan(' \n', self.startup_info())),
             string(self.colored.cyan(' \n', self.startup_info())),
             string(self.colored.reset(self.extra_info() or '')),
             string(self.colored.reset(self.extra_info() or '')),
-        ]), file=sys.__stdout__)
+        ])), file=sys.__stdout__)
         self.set_process_status('-active-')
         self.set_process_status('-active-')
         self.install_platform_tweaks(self)
         self.install_platform_tweaks(self)
 
 
@@ -181,7 +182,7 @@ class Worker(WorkController):
             colorize = not self.no_color
             colorize = not self.no_color
         return self.app.log.setup(
         return self.app.log.setup(
             self.loglevel, self.logfile,
             self.loglevel, self.logfile,
-            redirect_stdouts=False, colorize=colorize,
+            redirect_stdouts=False, colorize=colorize, hostname=self.hostname,
         )
         )
 
 
     def purge_messages(self):
     def purge_messages(self):
@@ -227,7 +228,9 @@ class Worker(WorkController):
             hostname=safe_str(self.hostname),
             hostname=safe_str(self.hostname),
             version=VERSION_BANNER,
             version=VERSION_BANNER,
             conninfo=self.app.connection().as_uri(),
             conninfo=self.app.connection().as_uri(),
-            results=self.app.conf.CELERY_RESULT_BACKEND or 'disabled',
+            results=maybe_sanitize_url(
+                self.app.conf.CELERY_RESULT_BACKEND or 'disabled',
+            ),
             concurrency=concurrency,
             concurrency=concurrency,
             platform=safe_str(_platform.platform()),
             platform=safe_str(_platform.platform()),
             events=events,
             events=events,
@@ -277,7 +280,7 @@ class Worker(WorkController):
 
 
 
 
 def _shutdown_handler(worker, sig='TERM', how='Warm',
 def _shutdown_handler(worker, sig='TERM', how='Warm',
-                      exc=WorkerShutdown, callback=None):
+                      exc=WorkerShutdown, callback=None, exitcode=EX_OK):
 
 
     def _handle_request(*args):
     def _handle_request(*args):
         with in_sighandler():
         with in_sighandler():
@@ -288,9 +291,9 @@ def _shutdown_handler(worker, sig='TERM', how='Warm',
                 safe_say('worker: {0} shutdown (MainProcess)'.format(how))
                 safe_say('worker: {0} shutdown (MainProcess)'.format(how))
             if active_thread_count() > 1:
             if active_thread_count() > 1:
                 setattr(state, {'Warm': 'should_stop',
                 setattr(state, {'Warm': 'should_stop',
-                                'Cold': 'should_terminate'}[how], True)
+                                'Cold': 'should_terminate'}[how], exitcode)
             else:
             else:
-                raise exc()
+                raise exc(exitcode)
     _handle_request.__name__ = str('worker_{0}'.format(how))
     _handle_request.__name__ = str('worker_{0}'.format(how))
     platforms.signals[sig] = _handle_request
     platforms.signals[sig] = _handle_request
 install_worker_term_handler = partial(
 install_worker_term_handler = partial(
@@ -299,6 +302,7 @@ install_worker_term_handler = partial(
 if not is_jython:  # pragma: no cover
 if not is_jython:  # pragma: no cover
     install_worker_term_hard_handler = partial(
     install_worker_term_hard_handler = partial(
         _shutdown_handler, sig='SIGQUIT', how='Cold', exc=WorkerTerminate,
         _shutdown_handler, sig='SIGQUIT', how='Cold', exc=WorkerTerminate,
+        exitcode=EX_FAILURE,
     )
     )
 else:  # pragma: no cover
 else:  # pragma: no cover
     install_worker_term_handler = \
     install_worker_term_handler = \
@@ -310,7 +314,8 @@ def on_SIGINT(worker):
     install_worker_term_hard_handler(worker, sig='SIGINT')
     install_worker_term_hard_handler(worker, sig='SIGINT')
 if not is_jython:  # pragma: no cover
 if not is_jython:  # pragma: no cover
     install_worker_int_handler = partial(
     install_worker_int_handler = partial(
-        _shutdown_handler, sig='SIGINT', callback=on_SIGINT
+        _shutdown_handler, sig='SIGINT', callback=on_SIGINT,
+        exitcode=EX_FAILURE,
     )
     )
 else:  # pragma: no cover
 else:  # pragma: no cover
     install_worker_int_handler = lambda *a, **kw: None
     install_worker_int_handler = lambda *a, **kw: None
@@ -332,7 +337,7 @@ def install_worker_restart_handler(worker, sig='SIGHUP'):
         import atexit
         import atexit
         atexit.register(_reload_current_worker)
         atexit.register(_reload_current_worker)
         from celery.worker import state
         from celery.worker import state
-        state.should_stop = True
+        state.should_stop = EX_OK
     platforms.signals[sig] = restart_worker_sig_handler
     platforms.signals[sig] = restart_worker_sig_handler
 
 
 
 

+ 0 - 2
celery/backends/__init__.py

@@ -16,7 +16,6 @@ from celery.local import Proxy
 from celery._state import current_app
 from celery._state import current_app
 from celery.five import reraise
 from celery.five import reraise
 from celery.utils.imports import symbol_by_name
 from celery.utils.imports import symbol_by_name
-from celery.utils.functional import memoize
 
 
 __all__ = ['get_backend_cls', 'get_backend_by_url']
 __all__ = ['get_backend_cls', 'get_backend_by_url']
 
 
@@ -41,7 +40,6 @@ BACKEND_ALIASES = {
 default_backend = Proxy(lambda: current_app.backend)
 default_backend = Proxy(lambda: current_app.backend)
 
 
 
 
-@memoize(100)
 def get_backend_cls(backend=None, loader=None):
 def get_backend_cls(backend=None, loader=None):
     """Get backend class by name/alias"""
     """Get backend class by name/alias"""
     backend = backend or 'disabled'
     backend = backend or 'disabled'

+ 16 - 8
celery/backends/amqp.py

@@ -141,6 +141,7 @@ class AMQPBackend(BaseBackend):
         return [self._create_binding(task_id)]
         return [self._create_binding(task_id)]
 
 
     def wait_for(self, task_id, timeout=None, cache=True, propagate=True,
     def wait_for(self, task_id, timeout=None, cache=True, propagate=True,
+                 no_ack=True, on_interval=None,
                  READY_STATES=states.READY_STATES,
                  READY_STATES=states.READY_STATES,
                  PROPAGATE_STATES=states.PROPAGATE_STATES,
                  PROPAGATE_STATES=states.PROPAGATE_STATES,
                  **kwargs):
                  **kwargs):
@@ -150,7 +151,8 @@ class AMQPBackend(BaseBackend):
             meta = cached_meta
             meta = cached_meta
         else:
         else:
             try:
             try:
-                meta = self.consume(task_id, timeout=timeout)
+                meta = self.consume(task_id, timeout=timeout, no_ack=no_ack,
+                                    on_interval=on_interval)
             except socket.timeout:
             except socket.timeout:
                 raise TimeoutError('The operation timed out.')
                 raise TimeoutError('The operation timed out.')
 
 
@@ -167,15 +169,18 @@ class AMQPBackend(BaseBackend):
 
 
             prev = latest = acc = None
             prev = latest = acc = None
             for i in range(backlog_limit):  # spool ffwd
             for i in range(backlog_limit):  # spool ffwd
-                prev, latest, acc = latest, acc, binding.get(
+                acc = binding.get(
                     accept=self.accept, no_ack=False,
                     accept=self.accept, no_ack=False,
                 )
                 )
                 if not acc:  # no more messages
                 if not acc:  # no more messages
                     break
                     break
+                if acc.payload['task_id'] == task_id:
+                    prev, latest = latest, acc
                 if prev:
                 if prev:
                     # backends are not expected to keep history,
                     # backends are not expected to keep history,
                     # so we delete everything except the most recent state.
                     # so we delete everything except the most recent state.
                     prev.ack()
                     prev.ack()
+                    prev = None
             else:
             else:
                 raise self.BacklogLimitExceeded(task_id)
                 raise self.BacklogLimitExceeded(task_id)
 
 
@@ -193,7 +198,7 @@ class AMQPBackend(BaseBackend):
     poll = get_task_meta  # XXX compat
     poll = get_task_meta  # XXX compat
 
 
     def drain_events(self, connection, consumer,
     def drain_events(self, connection, consumer,
-                     timeout=None, now=monotonic, wait=None):
+                     timeout=None, on_interval=None, now=monotonic, wait=None):
         wait = wait or connection.drain_events
         wait = wait or connection.drain_events
         results = {}
         results = {}
 
 
@@ -209,27 +214,30 @@ class AMQPBackend(BaseBackend):
             if timeout and now() - time_start >= timeout:
             if timeout and now() - time_start >= timeout:
                 raise socket.timeout()
                 raise socket.timeout()
             wait(timeout=timeout)
             wait(timeout=timeout)
+            if on_interval:
+                on_interval()
             if results:  # got event on the wanted channel.
             if results:  # got event on the wanted channel.
                 break
                 break
         self._cache.update(results)
         self._cache.update(results)
         return results
         return results
 
 
-    def consume(self, task_id, timeout=None):
+    def consume(self, task_id, timeout=None, no_ack=True, on_interval=None):
         wait = self.drain_events
         wait = self.drain_events
         with self.app.pool.acquire_channel(block=True) as (conn, channel):
         with self.app.pool.acquire_channel(block=True) as (conn, channel):
             binding = self._create_binding(task_id)
             binding = self._create_binding(task_id)
             with self.Consumer(channel, binding,
             with self.Consumer(channel, binding,
-                               no_ack=True, accept=self.accept) as consumer:
+                               no_ack=no_ack, accept=self.accept) as consumer:
                 while 1:
                 while 1:
                     try:
                     try:
-                        return wait(conn, consumer, timeout)[task_id]
+                        return wait(
+                            conn, consumer, timeout, on_interval)[task_id]
                     except KeyError:
                     except KeyError:
                         continue
                         continue
 
 
     def _many_bindings(self, ids):
     def _many_bindings(self, ids):
         return [self._create_binding(task_id) for task_id in ids]
         return [self._create_binding(task_id) for task_id in ids]
 
 
-    def get_many(self, task_ids, timeout=None,
+    def get_many(self, task_ids, timeout=None, no_ack=True,
                  now=monotonic, getfields=itemgetter('status', 'task_id'),
                  now=monotonic, getfields=itemgetter('status', 'task_id'),
                  READY_STATES=states.READY_STATES,
                  READY_STATES=states.READY_STATES,
                  PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs):
                  PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs):
@@ -263,7 +271,7 @@ class AMQPBackend(BaseBackend):
 
 
             bindings = self._many_bindings(task_ids)
             bindings = self._many_bindings(task_ids)
             with self.Consumer(channel, bindings, on_message=on_message,
             with self.Consumer(channel, bindings, on_message=on_message,
-                               accept=self.accept, no_ack=True):
+                               accept=self.accept, no_ack=no_ack):
                 wait = conn.drain_events
                 wait = conn.drain_events
                 popleft = results.popleft
                 popleft = results.popleft
                 while ids:
                 while ids:

+ 87 - 46
celery/backends/base.py

@@ -33,8 +33,8 @@ from celery.five import items
 from celery.result import (
 from celery.result import (
     GroupResult, ResultBase, allow_join_result, result_from_tuple,
     GroupResult, ResultBase, allow_join_result, result_from_tuple,
 )
 )
-from celery.utils import timeutils
 from celery.utils.functional import LRUCache
 from celery.utils.functional import LRUCache
+from celery.utils.log import get_logger
 from celery.utils.serialization import (
 from celery.utils.serialization import (
     get_pickled_exception,
     get_pickled_exception,
     get_pickleable_exception,
     get_pickleable_exception,
@@ -46,12 +46,21 @@ __all__ = ['BaseBackend', 'KeyValueStoreBackend', 'DisabledBackend']
 EXCEPTION_ABLE_CODECS = frozenset(['pickle', 'yaml'])
 EXCEPTION_ABLE_CODECS = frozenset(['pickle', 'yaml'])
 PY3 = sys.version_info >= (3, 0)
 PY3 = sys.version_info >= (3, 0)
 
 
+logger = get_logger(__name__)
+
 
 
 def unpickle_backend(cls, args, kwargs):
 def unpickle_backend(cls, args, kwargs):
     """Return an unpickled backend."""
     """Return an unpickled backend."""
     return cls(*args, app=current_app._get_current_object(), **kwargs)
     return cls(*args, app=current_app._get_current_object(), **kwargs)
 
 
 
 
+class _nulldict(dict):
+
+    def ignore(self, *a, **kw):
+        pass
+    __setitem__ = update = setdefault = ignore
+
+
 class BaseBackend(object):
 class BaseBackend(object):
     READY_STATES = states.READY_STATES
     READY_STATES = states.READY_STATES
     UNREADY_STATES = states.UNREADY_STATES
     UNREADY_STATES = states.UNREADY_STATES
@@ -90,9 +99,8 @@ class BaseBackend(object):
         (self.content_type,
         (self.content_type,
          self.content_encoding,
          self.content_encoding,
          self.encoder) = serializer_registry._encoders[self.serializer]
          self.encoder) = serializer_registry._encoders[self.serializer]
-        self._cache = LRUCache(
-            limit=max_cached_results or conf.CELERY_MAX_CACHED_RESULTS,
-        )
+        cmax = max_cached_results or conf.CELERY_MAX_CACHED_RESULTS
+        self._cache = _nulldict() if cmax == -1 else LRUCache(limit=cmax)
         self.accept = prepare_accept_content(
         self.accept = prepare_accept_content(
             conf.CELERY_ACCEPT_CONTENT if accept is None else accept,
             conf.CELERY_ACCEPT_CONTENT if accept is None else accept,
         )
         )
@@ -111,6 +119,21 @@ class BaseBackend(object):
         return self.store_result(task_id, exc, status=states.FAILURE,
         return self.store_result(task_id, exc, status=states.FAILURE,
                                  traceback=traceback, request=request)
                                  traceback=traceback, request=request)
 
 
+    def chord_error_from_stack(self, callback, exc=None):
+        from celery import group
+        app = self.app
+        backend = app._tasks[callback.task].backend
+        try:
+            group(
+                [app.signature(errback)
+                 for errback in callback.options.get('link_error') or []],
+                app=app,
+            ).apply_async((callback.id, ))
+        except Exception as eb_exc:
+            return backend.fail_from_current_stack(callback.id, exc=eb_exc)
+        else:
+            return backend.fail_from_current_stack(callback.id, exc=exc)
+
     def fail_from_current_stack(self, task_id, exc=None):
     def fail_from_current_stack(self, task_id, exc=None):
         type_, real_exc, tb = sys.exc_info()
         type_, real_exc, tb = sys.exc_info()
         try:
         try:
@@ -132,18 +155,21 @@ class BaseBackend(object):
                                  status=states.REVOKED, traceback=None,
                                  status=states.REVOKED, traceback=None,
                                  request=request)
                                  request=request)
 
 
-    def prepare_exception(self, exc):
+    def prepare_exception(self, exc, serializer=None):
         """Prepare exception for serialization."""
         """Prepare exception for serialization."""
-        if self.serializer in EXCEPTION_ABLE_CODECS:
+        serializer = self.serializer if serializer is None else serializer
+        if serializer in EXCEPTION_ABLE_CODECS:
             return get_pickleable_exception(exc)
             return get_pickleable_exception(exc)
         return {'exc_type': type(exc).__name__, 'exc_message': str(exc)}
         return {'exc_type': type(exc).__name__, 'exc_message': str(exc)}
 
 
     def exception_to_python(self, exc):
     def exception_to_python(self, exc):
         """Convert serialized exception to Python exception."""
         """Convert serialized exception to Python exception."""
+        if not isinstance(exc, BaseException):
+            exc = create_exception_cls(
+                from_utf8(exc['exc_type']), __name__)(exc['exc_message'])
         if self.serializer in EXCEPTION_ABLE_CODECS:
         if self.serializer in EXCEPTION_ABLE_CODECS:
-            return get_pickled_exception(exc)
-        return create_exception_cls(
-            from_utf8(exc['exc_type']), __name__)(exc['exc_message'])
+            exc = get_pickled_exception(exc)
+        return exc
 
 
     def prepare_value(self, result):
     def prepare_value(self, result):
         """Prepare value for storage."""
         """Prepare value for storage."""
@@ -162,7 +188,9 @@ class BaseBackend(object):
                      content_encoding=self.content_encoding,
                      content_encoding=self.content_encoding,
                      accept=self.accept)
                      accept=self.accept)
 
 
-    def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5):
+    def wait_for(self, task_id,
+                 timeout=None, propagate=True, interval=0.5, no_ack=True,
+                 on_interval=None):
         """Wait for task and return its result.
         """Wait for task and return its result.
 
 
         If the task raises an exception, this exception
         If the task raises an exception, this exception
@@ -185,6 +213,8 @@ class BaseBackend(object):
                 if propagate:
                 if propagate:
                     raise result
                     raise result
                 return result
                 return result
+            if on_interval:
+                on_interval()
             # avoid hammering the CPU checking status.
             # avoid hammering the CPU checking status.
             time.sleep(interval)
             time.sleep(interval)
             time_elapsed += interval
             time_elapsed += interval
@@ -195,7 +225,7 @@ class BaseBackend(object):
         if value is None:
         if value is None:
             value = self.app.conf.CELERY_TASK_RESULT_EXPIRES
             value = self.app.conf.CELERY_TASK_RESULT_EXPIRES
         if isinstance(value, timedelta):
         if isinstance(value, timedelta):
-            value = timeutils.timedelta_seconds(value)
+            value = value.total_seconds()
         if value is not None and type:
         if value is not None and type:
             return type(value)
             return type(value)
         return value
         return value
@@ -311,7 +341,7 @@ class BaseBackend(object):
     def on_task_call(self, producer, task_id):
     def on_task_call(self, producer, task_id):
         return {}
         return {}
 
 
-    def on_chord_part_return(self, task, propagate=False):
+    def on_chord_part_return(self, task, state, result, propagate=False):
         pass
         pass
 
 
     def fallback_chord_unlock(self, group_id, body, result=None,
     def fallback_chord_unlock(self, group_id, body, result=None,
@@ -374,17 +404,26 @@ class KeyValueStoreBackend(BaseBackend):
     def expire(self, key, value):
     def expire(self, key, value):
         pass
         pass
 
 
-    def get_key_for_task(self, task_id):
+    def get_key_for_task(self, task_id, key=''):
         """Get the cache key for a task by id."""
         """Get the cache key for a task by id."""
-        return self.task_keyprefix + self.key_t(task_id)
+        key_t = self.key_t
+        return key_t('').join([
+            self.task_keyprefix, key_t(task_id), key_t(key),
+        ])
 
 
-    def get_key_for_group(self, group_id):
+    def get_key_for_group(self, group_id, key=''):
         """Get the cache key for a group by id."""
         """Get the cache key for a group by id."""
-        return self.group_keyprefix + self.key_t(group_id)
+        key_t = self.key_t
+        return key_t('').join([
+            self.group_keyprefix, key_t(group_id), key_t(key),
+        ])
 
 
-    def get_key_for_chord(self, group_id):
+    def get_key_for_chord(self, group_id, key=''):
         """Get the cache key for the chord waiting on group with given id."""
         """Get the cache key for the chord waiting on group with given id."""
-        return self.chord_keyprefix + self.key_t(group_id)
+        key_t = self.key_t
+        return key_t('').join([
+            self.chord_keyprefix, key_t(group_id), key_t(key),
+        ])
 
 
     def _strip_prefix(self, key):
     def _strip_prefix(self, key):
         """Takes bytes, emits string."""
         """Takes bytes, emits string."""
@@ -397,16 +436,18 @@ class KeyValueStoreBackend(BaseBackend):
     def _mget_to_results(self, values, keys):
     def _mget_to_results(self, values, keys):
         if hasattr(values, 'items'):
         if hasattr(values, 'items'):
             # client returns dict so mapping preserved.
             # client returns dict so mapping preserved.
-            return dict((self._strip_prefix(k), self.decode(v))
-                        for k, v in items(values)
-                        if v is not None)
+            return {
+                self._strip_prefix(k): self.decode(v)
+                for k, v in items(values) if v is not None
+            }
         else:
         else:
             # client returns list so need to recreate mapping.
             # client returns list so need to recreate mapping.
-            return dict((bytes_to_str(keys[i]), self.decode(value))
-                        for i, value in enumerate(values)
-                        if value is not None)
+            return {
+                bytes_to_str(keys[i]): self.decode(value)
+                for i, value in enumerate(values) if value is not None
+            }
 
 
-    def get_many(self, task_ids, timeout=None, interval=0.5,
+    def get_many(self, task_ids, timeout=None, interval=0.5, no_ack=True,
                  READY_STATES=states.READY_STATES):
                  READY_STATES=states.READY_STATES):
         interval = 0.5 if interval is None else interval
         interval = 0.5 if interval is None else interval
         ids = task_ids if isinstance(task_ids, set) else set(task_ids)
         ids = task_ids if isinstance(task_ids, set) else set(task_ids)
@@ -429,7 +470,7 @@ class KeyValueStoreBackend(BaseBackend):
             r = self._mget_to_results(self.mget([self.get_key_for_task(k)
             r = self._mget_to_results(self.mget([self.get_key_for_task(k)
                                                  for k in keys]), keys)
                                                  for k in keys]), keys)
             cache.update(r)
             cache.update(r)
-            ids.difference_update(set(bytes_to_str(v) for v in r))
+            ids.difference_update({bytes_to_str(v) for v in r})
             for key, value in items(r):
             for key, value in items(r):
                 yield bytes_to_str(key), value
                 yield bytes_to_str(key), value
             if timeout and iterations * interval >= timeout:
             if timeout and iterations * interval >= timeout:
@@ -479,12 +520,12 @@ class KeyValueStoreBackend(BaseBackend):
         self.save_group(group_id, self.app.GroupResult(group_id, result))
         self.save_group(group_id, self.app.GroupResult(group_id, result))
         return header(*partial_args, task_id=group_id)
         return header(*partial_args, task_id=group_id)
 
 
-    def on_chord_part_return(self, task, propagate=None):
+    def on_chord_part_return(self, task, state, result, propagate=None):
         if not self.implements_incr:
         if not self.implements_incr:
             return
             return
         app = self.app
         app = self.app
         if propagate is None:
         if propagate is None:
-            propagate = self.app.conf.CELERY_CHORD_PROPAGATES
+            propagate = app.conf.CELERY_CHORD_PROPAGATES
         gid = task.request.group
         gid = task.request.group
         if not gid:
         if not gid:
             return
             return
@@ -492,26 +533,26 @@ class KeyValueStoreBackend(BaseBackend):
         try:
         try:
             deps = GroupResult.restore(gid, backend=task.backend)
             deps = GroupResult.restore(gid, backend=task.backend)
         except Exception as exc:
         except Exception as exc:
-            callback = maybe_signature(task.request.chord, app=self.app)
-            return app._tasks[callback.task].backend.fail_from_current_stack(
-                callback.id,
-                exc=ChordError('Cannot restore group: {0!r}'.format(exc)),
+            callback = maybe_signature(task.request.chord, app=app)
+            logger.error('Chord %r raised: %r', gid, exc, exc_info=1)
+            return self.chord_error_from_stack(
+                callback,
+                ChordError('Cannot restore group: {0!r}'.format(exc)),
             )
             )
         if deps is None:
         if deps is None:
             try:
             try:
                 raise ValueError(gid)
                 raise ValueError(gid)
             except ValueError as exc:
             except ValueError as exc:
-                callback = maybe_signature(task.request.chord, app=self.app)
-                task = app._tasks[callback.task]
-                return task.backend.fail_from_current_stack(
-                    callback.id,
-                    exc=ChordError('GroupResult {0} no longer exists'.format(
-                        gid,
-                    ))
+                callback = maybe_signature(task.request.chord, app=app)
+                logger.error('Chord callback %r raised: %r', gid, exc,
+                             exc_info=1)
+                return self.chord_error_from_stack(
+                    callback,
+                    ChordError('GroupResult {0} no longer exists'.format(gid)),
                 )
                 )
         val = self.incr(key)
         val = self.incr(key)
         if val >= len(deps):
         if val >= len(deps):
-            callback = maybe_signature(task.request.chord, app=self.app)
+            callback = maybe_signature(task.request.chord, app=app)
             j = deps.join_native if deps.supports_native_join else deps.join
             j = deps.join_native if deps.supports_native_join else deps.join
             try:
             try:
                 with allow_join_result():
                 with allow_join_result():
@@ -525,16 +566,16 @@ class KeyValueStoreBackend(BaseBackend):
                 except StopIteration:
                 except StopIteration:
                     reason = repr(exc)
                     reason = repr(exc)
 
 
-                app._tasks[callback.task].backend.fail_from_current_stack(
-                    callback.id, exc=ChordError(reason),
-                )
+                logger.error('Chord %r raised: %r', gid, reason, exc_info=1)
+                self.chord_error_from_stack(callback, ChordError(reason))
             else:
             else:
                 try:
                 try:
                     callback.delay(ret)
                     callback.delay(ret)
                 except Exception as exc:
                 except Exception as exc:
-                    app._tasks[callback.task].backend.fail_from_current_stack(
-                        callback.id,
-                        exc=ChordError('Callback error: {0!r}'.format(exc)),
+                    logger.error('Chord %r raised: %r', gid, exc, exc_info=1)
+                    self.chord_error_from_stack(
+                        callback,
+                        ChordError('Callback error: {0!r}'.format(exc)),
                     )
                     )
             finally:
             finally:
                 deps.delete()
                 deps.delete()

+ 1 - 1
celery/backends/cache.py

@@ -73,7 +73,7 @@ class DummyClient(object):
 
 
     def get_multi(self, keys):
     def get_multi(self, keys):
         cache = self.cache
         cache = self.cache
-        return dict((k, cache[k]) for k in keys if k in cache)
+        return {k: cache[k] for k in keys if k in cache}
 
 
     def set(self, key, value, *args, **kwargs):
     def set(self, key, value, *args, **kwargs):
         self.cache[key] = value
         self.cache[key] = value

+ 4 - 5
celery/backends/cassandra.py

@@ -22,7 +22,7 @@ from celery import states
 from celery.exceptions import ImproperlyConfigured
 from celery.exceptions import ImproperlyConfigured
 from celery.five import monotonic
 from celery.five import monotonic
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
-from celery.utils.timeutils import maybe_timedelta, timedelta_seconds
+from celery.utils.timeutils import maybe_timedelta
 
 
 from .base import BaseBackend
 from .base import BaseBackend
 
 
@@ -148,14 +148,13 @@ class CassandraBackend(BaseBackend):
                     'children': self.encode(
                     'children': self.encode(
                         self.current_task_children(request),
                         self.current_task_children(request),
                     )}
                     )}
+            ttl = self.expires and max(self.expires.total_seconds(), 0)
             if self.detailed_mode:
             if self.detailed_mode:
                 meta['result'] = result
                 meta['result'] = result
-                cf.insert(task_id, {date_done: self.encode(meta)},
-                          ttl=self.expires and timedelta_seconds(self.expires))
+                cf.insert(task_id, {date_done: self.encode(meta)}, ttl=ttl)
             else:
             else:
                 meta['result'] = self.encode(result)
                 meta['result'] = self.encode(result)
-                cf.insert(task_id, meta,
-                          ttl=self.expires and timedelta_seconds(self.expires))
+                cf.insert(task_id, meta, ttl=ttl)
 
 
         return self._retry_on_error(_do_store)
         return self._retry_on_error(_do_store)
 
 

+ 40 - 31
celery/backends/database/__init__.py

@@ -8,17 +8,21 @@
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
+import logging
+from contextlib import contextmanager
 from functools import wraps
 from functools import wraps
 
 
 from celery import states
 from celery import states
+from celery.backends.base import BaseBackend
 from celery.exceptions import ImproperlyConfigured
 from celery.exceptions import ImproperlyConfigured
 from celery.five import range
 from celery.five import range
 from celery.utils.timeutils import maybe_timedelta
 from celery.utils.timeutils import maybe_timedelta
 
 
-from celery.backends.base import BaseBackend
+from .models import Task
+from .models import TaskSet
+from .session import SessionManager
 
 
-from .models import Task, TaskSet
-from .session import ResultSession
+logger = logging.getLogger(__name__)
 
 
 __all__ = ['DatabaseBackend']
 __all__ = ['DatabaseBackend']
 
 
@@ -33,7 +37,19 @@ def _sqlalchemy_installed():
     return sqlalchemy
     return sqlalchemy
 _sqlalchemy_installed()
 _sqlalchemy_installed()
 
 
-from sqlalchemy.exc import DatabaseError, OperationalError
+from sqlalchemy.exc import DatabaseError, InvalidRequestError
+from sqlalchemy.orm.exc import StaleDataError
+
+
+@contextmanager
+def session_cleanup(session):
+    try:
+        yield
+    except Exception:
+        session.rollback()
+        raise
+    finally:
+        session.close()
 
 
 
 
 def retry(fun):
 def retry(fun):
@@ -45,7 +61,12 @@ def retry(fun):
         for retries in range(max_retries):
         for retries in range(max_retries):
             try:
             try:
                 return fun(*args, **kwargs)
                 return fun(*args, **kwargs)
-            except (DatabaseError, OperationalError):
+            except (DatabaseError, InvalidRequestError, StaleDataError):
+                logger.warning(
+                    "Failed operation %s. Retrying %s more times.",
+                    fun.__name__, max_retries - retries - 1,
+                    exc_info=True,
+                )
                 if retries + 1 >= max_retries:
                 if retries + 1 >= max_retries:
                     raise
                     raise
 
 
@@ -83,8 +104,8 @@ class DatabaseBackend(BaseBackend):
                 'Missing connection string! Do you have '
                 'Missing connection string! Do you have '
                 'CELERY_RESULT_DBURI set to a real value?')
                 'CELERY_RESULT_DBURI set to a real value?')
 
 
-    def ResultSession(self):
-        return ResultSession(
+    def ResultSession(self, session_manager=SessionManager()):
+        return session_manager.session_factory(
             dburi=self.dburi,
             dburi=self.dburi,
             short_lived_sessions=self.short_lived_sessions,
             short_lived_sessions=self.short_lived_sessions,
             **self.engine_options
             **self.engine_options
@@ -95,8 +116,9 @@ class DatabaseBackend(BaseBackend):
                       traceback=None, max_retries=3, **kwargs):
                       traceback=None, max_retries=3, **kwargs):
         """Store return value and status of an executed task."""
         """Store return value and status of an executed task."""
         session = self.ResultSession()
         session = self.ResultSession()
-        try:
-            task = session.query(Task).filter(Task.task_id == task_id).first()
+        with session_cleanup(session):
+            task = list(session.query(Task).filter(Task.task_id == task_id))
+            task = task and task[0]
             if not task:
             if not task:
                 task = Task(task_id)
                 task = Task(task_id)
                 session.add(task)
                 session.add(task)
@@ -106,83 +128,70 @@ class DatabaseBackend(BaseBackend):
             task.traceback = traceback
             task.traceback = traceback
             session.commit()
             session.commit()
             return result
             return result
-        finally:
-            session.close()
 
 
     @retry
     @retry
     def _get_task_meta_for(self, task_id):
     def _get_task_meta_for(self, task_id):
         """Get task metadata for a task by id."""
         """Get task metadata for a task by id."""
         session = self.ResultSession()
         session = self.ResultSession()
-        try:
-            task = session.query(Task).filter(Task.task_id == task_id).first()
-            if task is None:
+        with session_cleanup(session):
+            task = list(session.query(Task).filter(Task.task_id == task_id))
+            task = task and task[0]
+            if not task:
                 task = Task(task_id)
                 task = Task(task_id)
                 task.status = states.PENDING
                 task.status = states.PENDING
                 task.result = None
                 task.result = None
             return task.to_dict()
             return task.to_dict()
-        finally:
-            session.close()
 
 
     @retry
     @retry
     def _save_group(self, group_id, result):
     def _save_group(self, group_id, result):
         """Store the result of an executed group."""
         """Store the result of an executed group."""
         session = self.ResultSession()
         session = self.ResultSession()
-        try:
+        with session_cleanup(session):
             group = TaskSet(group_id, result)
             group = TaskSet(group_id, result)
             session.add(group)
             session.add(group)
             session.flush()
             session.flush()
             session.commit()
             session.commit()
             return result
             return result
-        finally:
-            session.close()
 
 
     @retry
     @retry
     def _restore_group(self, group_id):
     def _restore_group(self, group_id):
         """Get metadata for group by id."""
         """Get metadata for group by id."""
         session = self.ResultSession()
         session = self.ResultSession()
-        try:
+        with session_cleanup(session):
             group = session.query(TaskSet).filter(
             group = session.query(TaskSet).filter(
                 TaskSet.taskset_id == group_id).first()
                 TaskSet.taskset_id == group_id).first()
             if group:
             if group:
                 return group.to_dict()
                 return group.to_dict()
-        finally:
-            session.close()
 
 
     @retry
     @retry
     def _delete_group(self, group_id):
     def _delete_group(self, group_id):
         """Delete metadata for group by id."""
         """Delete metadata for group by id."""
         session = self.ResultSession()
         session = self.ResultSession()
-        try:
+        with session_cleanup(session):
             session.query(TaskSet).filter(
             session.query(TaskSet).filter(
                 TaskSet.taskset_id == group_id).delete()
                 TaskSet.taskset_id == group_id).delete()
             session.flush()
             session.flush()
             session.commit()
             session.commit()
-        finally:
-            session.close()
 
 
     @retry
     @retry
     def _forget(self, task_id):
     def _forget(self, task_id):
         """Forget about result."""
         """Forget about result."""
         session = self.ResultSession()
         session = self.ResultSession()
-        try:
+        with session_cleanup(session):
             session.query(Task).filter(Task.task_id == task_id).delete()
             session.query(Task).filter(Task.task_id == task_id).delete()
             session.commit()
             session.commit()
-        finally:
-            session.close()
 
 
     def cleanup(self):
     def cleanup(self):
         """Delete expired metadata."""
         """Delete expired metadata."""
         session = self.ResultSession()
         session = self.ResultSession()
         expires = self.expires
         expires = self.expires
         now = self.app.now()
         now = self.app.now()
-        try:
+        with session_cleanup(session):
             session.query(Task).filter(
             session.query(Task).filter(
                 Task.date_done < (now - expires)).delete()
                 Task.date_done < (now - expires)).delete()
             session.query(TaskSet).filter(
             session.query(TaskSet).filter(
                 TaskSet.date_done < (now - expires)).delete()
                 TaskSet.date_done < (now - expires)).delete()
             session.commit()
             session.commit()
-        finally:
-            session.close()
 
 
     def __reduce__(self, args=(), kwargs={}):
     def __reduce__(self, args=(), kwargs={}):
         kwargs.update(
         kwargs.update(

+ 46 - 49
celery/backends/database/session.py

@@ -8,58 +8,55 @@
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
-from collections import defaultdict
-from multiprocessing.util import register_after_fork
+from billiard.util import register_after_fork
 
 
 from sqlalchemy import create_engine
 from sqlalchemy import create_engine
-from sqlalchemy.orm import sessionmaker
 from sqlalchemy.ext.declarative import declarative_base
 from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy.pool import NullPool
 
 
 ResultModelBase = declarative_base()
 ResultModelBase = declarative_base()
 
 
-_SETUP = defaultdict(lambda: False)
-_ENGINES = {}
-_SESSIONS = {}
-
-__all__ = ['ResultSession', 'get_engine', 'create_session']
-
-
-class _after_fork(object):
-    registered = False
-
-    def __call__(self):
-        self.registered = False  # child must reregister
-        for engine in list(_ENGINES.values()):
-            engine.dispose()
-        _ENGINES.clear()
-        _SESSIONS.clear()
-after_fork = _after_fork()
-
-
-def get_engine(dburi, **kwargs):
-    try:
-        return _ENGINES[dburi]
-    except KeyError:
-        engine = _ENGINES[dburi] = create_engine(dburi, **kwargs)
-        after_fork.registered = True
-        register_after_fork(after_fork, after_fork)
-        return engine
-
-
-def create_session(dburi, short_lived_sessions=False, **kwargs):
-    engine = get_engine(dburi, **kwargs)
-    if short_lived_sessions or dburi not in _SESSIONS:
-        _SESSIONS[dburi] = sessionmaker(bind=engine)
-    return engine, _SESSIONS[dburi]
-
-
-def setup_results(engine):
-    if not _SETUP['results']:
-        ResultModelBase.metadata.create_all(engine)
-        _SETUP['results'] = True
-
-
-def ResultSession(dburi, **kwargs):
-    engine, session = create_session(dburi, **kwargs)
-    setup_results(engine)
-    return session()
+__all__ = ['SessionManager']
+
+
+class SessionManager(object):
+    def __init__(self):
+        self._engines = {}
+        self._sessions = {}
+        self.forked = False
+        self.prepared = False
+        register_after_fork(self, self._after_fork)
+
+    def _after_fork(self,):
+        self.forked = True
+
+    def get_engine(self, dburi, **kwargs):
+        if self.forked:
+            try:
+                return self._engines[dburi]
+            except KeyError:
+                engine = self._engines[dburi] = create_engine(dburi, **kwargs)
+                return engine
+        else:
+            kwargs['poolclass'] = NullPool
+            return create_engine(dburi, **kwargs)
+
+    def create_session(self, dburi, short_lived_sessions=False, **kwargs):
+        engine = self.get_engine(dburi, **kwargs)
+        if self.forked:
+            if short_lived_sessions or dburi not in self._sessions:
+                self._sessions[dburi] = sessionmaker(bind=engine)
+            return engine, self._sessions[dburi]
+        else:
+            return engine, sessionmaker(bind=engine)
+
+    def prepare_models(self, engine):
+        if not self.prepared:
+            ResultModelBase.metadata.create_all(engine)
+            self.prepared = True
+
+    def session_factory(self, dburi, **kwargs):
+        engine, session = self.create_session(dburi, **kwargs)
+        self.prepare_models(engine)
+        return session()

+ 0 - 1
celery/backends/mongodb.py

@@ -92,7 +92,6 @@ class MongoBackend(BaseBackend):
             self.options = dict(config, **config.pop('options', None) or {})
             self.options = dict(config, **config.pop('options', None) or {})
 
 
             # Set option defaults
             # Set option defaults
-            self.options.setdefault('ssl', self.app.conf.BROKER_USE_SSL)
             self.options.setdefault('max_pool_size', self.max_pool_size)
             self.options.setdefault('max_pool_size', self.max_pool_size)
             self.options.setdefault('auto_start_request', False)
             self.options.setdefault('auto_start_request', False)
 
 

+ 76 - 4
celery/backends/redis.py

@@ -13,9 +13,11 @@ from functools import partial
 from kombu.utils import cached_property, retry_over_time
 from kombu.utils import cached_property, retry_over_time
 from kombu.utils.url import _parse_url
 from kombu.utils.url import _parse_url
 
 
-from celery.exceptions import ImproperlyConfigured
+from celery import states
+from celery.canvas import maybe_signature
+from celery.exceptions import ChordError, ImproperlyConfigured
 from celery.five import string_t
 from celery.five import string_t
-from celery.utils import deprecated_property
+from celery.utils import deprecated_property, strtobool
 from celery.utils.functional import dictfilter
 from celery.utils.functional import dictfilter
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
 from celery.utils.timeutils import humanize_seconds
 from celery.utils.timeutils import humanize_seconds
@@ -56,7 +58,7 @@ class RedisBackend(KeyValueStoreBackend):
 
 
     def __init__(self, host=None, port=None, db=None, password=None,
     def __init__(self, host=None, port=None, db=None, password=None,
                  expires=None, max_connections=None, url=None,
                  expires=None, max_connections=None, url=None,
-                 connection_pool=None, **kwargs):
+                 connection_pool=None, new_join=False, **kwargs):
         super(RedisBackend, self).__init__(**kwargs)
         super(RedisBackend, self).__init__(**kwargs)
         conf = self.app.conf
         conf = self.app.conf
         if self.redis is None:
         if self.redis is None:
@@ -90,7 +92,17 @@ class RedisBackend(KeyValueStoreBackend):
         self.url = url
         self.url = url
         self.expires = self.prepare_expires(expires, type=int)
         self.expires = self.prepare_expires(expires, type=int)
 
 
-        self.connection_errors, self.channel_errors = get_redis_error_classes()
+        try:
+            new_join = strtobool(self.connparams.pop('new_join'))
+        except KeyError:
+            pass
+        if new_join:
+            self.apply_chord = self._new_chord_apply
+            self.on_chord_part_return = self._new_chord_return
+
+        self.connection_errors, self.channel_errors = (
+            get_redis_error_classes() if get_redis_error_classes
+            else ((), ()))
 
 
     def _params_from_url(self, url, defaults):
     def _params_from_url(self, url, defaults):
         scheme, host, port, user, password, path, query = _parse_url(url)
         scheme, host, port, user, password, path, query = _parse_url(url)
@@ -165,6 +177,66 @@ class RedisBackend(KeyValueStoreBackend):
     def expire(self, key, value):
     def expire(self, key, value):
         return self.client.expire(key, value)
         return self.client.expire(key, value)
 
 
+    def _unpack_chord_result(self, tup, decode,
+                             PROPAGATE_STATES=states.PROPAGATE_STATES):
+        _, tid, state, retval = decode(tup)
+        if state in PROPAGATE_STATES:
+            raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval))
+        return retval
+
+    def _new_chord_apply(self, header, partial_args, group_id, body,
+                         result=None, **options):
+        # avoids saving the group in the redis db.
+        return header(*partial_args, task_id=group_id)
+
+    def _new_chord_return(self, task, state, result, propagate=None,
+                          PROPAGATE_STATES=states.PROPAGATE_STATES):
+        app = self.app
+        if propagate is None:
+            propagate = self.app.conf.CELERY_CHORD_PROPAGATES
+        request = task.request
+        tid, gid = request.id, request.group
+        if not gid or not tid:
+            return
+
+        client = self.client
+        jkey = self.get_key_for_group(gid, '.j')
+        result = self.encode_result(result, state)
+        _, readycount, _ = client.pipeline()                            \
+            .rpush(jkey, self.encode([1, tid, state, result]))          \
+            .llen(jkey)                                                 \
+            .expire(jkey, 86400)                                        \
+            .execute()
+
+        try:
+            callback = maybe_signature(request.chord, app=app)
+            total = callback['chord_size']
+            if readycount >= total:
+                decode, unpack = self.decode, self._unpack_chord_result
+                resl, _ = client.pipeline()     \
+                    .lrange(jkey, 0, total)     \
+                    .delete(jkey)               \
+                    .execute()
+                try:
+                    callback.delay([unpack(tup, decode) for tup in resl])
+                except Exception as exc:
+                    error('Chord callback for %r raised: %r',
+                          request.group, exc, exc_info=1)
+                    app._tasks[callback.task].backend.fail_from_current_stack(
+                        callback.id,
+                        exc=ChordError('Callback error: {0!r}'.format(exc)),
+                    )
+        except ChordError as exc:
+            error('Chord %r raised: %r', request.group, exc, exc_info=1)
+            app._tasks[callback.task].backend.fail_from_current_stack(
+                callback.id, exc=exc,
+            )
+        except Exception as exc:
+            error('Chord %r raised: %r', request.group, exc, exc_info=1)
+            app._tasks[callback.task].backend.fail_from_current_stack(
+                callback.id, exc=ChordError('Join error: {0!r}'.format(exc)),
+            )
+
     @property
     @property
     def ConnectionPool(self):
     def ConnectionPool(self):
         if self._ConnectionPool is None:
         if self._ConnectionPool is None:

+ 35 - 16
celery/beat.py

@@ -165,12 +165,16 @@ class Scheduler(object):
     #: How often to sync the schedule (3 minutes by default)
     #: How often to sync the schedule (3 minutes by default)
     sync_every = 3 * 60
     sync_every = 3 * 60
 
 
+    #: How many tasks can be called before a sync is forced.
+    sync_every_tasks = None
+
     _last_sync = None
     _last_sync = None
+    _tasks_since_sync = 0
 
 
     logger = logger  # compat
     logger = logger  # compat
 
 
     def __init__(self, app, schedule=None, max_interval=None,
     def __init__(self, app, schedule=None, max_interval=None,
-                 Publisher=None, lazy=False, **kwargs):
+                 Publisher=None, lazy=False, sync_every_tasks=None, **kwargs):
         self.app = app
         self.app = app
         self.data = maybe_evaluate({} if schedule is None else schedule)
         self.data = maybe_evaluate({} if schedule is None else schedule)
         self.max_interval = (max_interval
         self.max_interval = (max_interval
@@ -178,6 +182,9 @@ class Scheduler(object):
                              or self.max_interval)
                              or self.max_interval)
         self.Publisher = Publisher or app.amqp.TaskProducer
         self.Publisher = Publisher or app.amqp.TaskProducer
         self._heap = None
         self._heap = None
+        self.sync_every_tasks = (
+            app.conf.CELERYBEAT_SYNC_EVERY if sync_every_tasks is None
+            else sync_every_tasks)
         if not lazy:
         if not lazy:
             self.setup_schedule()
             self.setup_schedule()
 
 
@@ -236,8 +243,12 @@ class Scheduler(object):
         return min(next_time_to_run, self.max_interval)
         return min(next_time_to_run, self.max_interval)
 
 
     def should_sync(self):
     def should_sync(self):
-        return (not self._last_sync or
-                (monotonic() - self._last_sync) > self.sync_every)
+        return (
+            (not self._last_sync or
+               (monotonic() - self._last_sync) > self.sync_every) or
+            (self.sync_every_tasks and
+                self._tasks_since_sync >= self.sync_every_tasks)
+        )
 
 
     def reserve(self, entry):
     def reserve(self, entry):
         new_entry = self.schedule[entry.name] = next(entry)
         new_entry = self.schedule[entry.name] = next(entry)
@@ -264,6 +275,7 @@ class Scheduler(object):
                 "Couldn't apply scheduled task {0.name}: {exc}".format(
                 "Couldn't apply scheduled task {0.name}: {exc}".format(
                     entry, exc=exc)), sys.exc_info()[2])
                     entry, exc=exc)), sys.exc_info()[2])
         finally:
         finally:
+            self._tasks_since_sync += 1
             if self.should_sync():
             if self.should_sync():
                 self._do_sync()
                 self._do_sync()
 
 
@@ -279,6 +291,7 @@ class Scheduler(object):
             self.sync()
             self.sync()
         finally:
         finally:
             self._last_sync = monotonic()
             self._last_sync = monotonic()
+            self._tasks_since_sync = 0
 
 
     def sync(self):
     def sync(self):
         pass
         pass
@@ -298,9 +311,10 @@ class Scheduler(object):
         return self.Entry(**dict(entry, name=name, app=self.app))
         return self.Entry(**dict(entry, name=name, app=self.app))
 
 
     def update_from_dict(self, dict_):
     def update_from_dict(self, dict_):
-        self.schedule.update(dict(
-            (name, self._maybe_entry(name, entry))
-            for name, entry in items(dict_)))
+        self.schedule.update({
+            name: self._maybe_entry(name, entry)
+            for name, entry in items(dict_)
+        })
 
 
     def merge_inplace(self, b):
     def merge_inplace(self, b):
         schedule = self.schedule
         schedule = self.schedule
@@ -368,7 +382,6 @@ class PersistentScheduler(Scheduler):
         try:
         try:
             self._store = self.persistence.open(self.schedule_filename,
             self._store = self.persistence.open(self.schedule_filename,
                                                 writeback=True)
                                                 writeback=True)
-            entries = self._store.setdefault('entries', {})
         except Exception as exc:
         except Exception as exc:
             error('Removing corrupted schedule file %r: %r',
             error('Removing corrupted schedule file %r: %r',
                   self.schedule_filename, exc, exc_info=True)
                   self.schedule_filename, exc, exc_info=True)
@@ -376,15 +389,21 @@ class PersistentScheduler(Scheduler):
             self._store = self.persistence.open(self.schedule_filename,
             self._store = self.persistence.open(self.schedule_filename,
                                                 writeback=True)
                                                 writeback=True)
         else:
         else:
-            if '__version__' not in self._store:
-                warning('Reset: Account for new __version__ field')
-                self._store.clear()   # remove schedule at 2.2.2 upgrade.
-            if 'tz' not in self._store:
-                warning('Reset: Account for new tz field')
-                self._store.clear()   # remove schedule at 3.0.8 upgrade
-            if 'utc_enabled' not in self._store:
-                warning('Reset: Account for new utc_enabled field')
-                self._store.clear()   # remove schedule at 3.0.9 upgrade
+            try:
+                self._store['entries']
+            except KeyError:
+                # new schedule db
+                self._store['entries'] = {}
+            else:
+                if '__version__' not in self._store:
+                    warning('DB Reset: Account for new __version__ field')
+                    self._store.clear()   # remove schedule at 2.2.2 upgrade.
+                elif 'tz' not in self._store:
+                    warning('DB Reset: Account for new tz field')
+                    self._store.clear()   # remove schedule at 3.0.8 upgrade
+                elif 'utc_enabled' not in self._store:
+                    warning('DB Reset: Account for new utc_enabled field')
+                    self._store.clear()   # remove schedule at 3.0.9 upgrade
 
 
         tz = self.app.conf.CELERY_TIMEZONE
         tz = self.app.conf.CELERY_TIMEZONE
         stored_tz = self._store.get('tz')
         stored_tz = self._store.get('tz')

+ 10 - 9
celery/bin/amqp.py

@@ -15,7 +15,6 @@ import pprint
 from functools import partial
 from functools import partial
 from itertools import count
 from itertools import count
 
 
-from amqp import Message
 from kombu.utils.encoding import safe_str
 from kombu.utils.encoding import safe_str
 
 
 from celery.utils.functional import padlist
 from celery.utils.functional import padlist
@@ -175,7 +174,7 @@ class AMQShell(cmd.Cmd):
         'basic.get': Spec(('queue', str),
         'basic.get': Spec(('queue', str),
                           ('no_ack', bool, 'off'),
                           ('no_ack', bool, 'off'),
                           returns=dump_message),
                           returns=dump_message),
-        'basic.publish': Spec(('msg', Message),
+        'basic.publish': Spec(('msg', str),
                               ('exchange', str),
                               ('exchange', str),
                               ('routing_key', str),
                               ('routing_key', str),
                               ('mandatory', bool, 'no'),
                               ('mandatory', bool, 'no'),
@@ -247,32 +246,34 @@ class AMQShell(cmd.Cmd):
         return [cmd for cmd in names
         return [cmd for cmd in names
                 if cmd.partition('.')[2].startswith(text)]
                 if cmd.partition('.')[2].startswith(text)]
 
 
-    def dispatch(self, cmd, argline):
+    def dispatch(self, cmd, arglist):
         """Dispatch and execute the command.
         """Dispatch and execute the command.
 
 
         Lookup order is: :attr:`builtins` -> :attr:`amqp`.
         Lookup order is: :attr:`builtins` -> :attr:`amqp`.
 
 
         """
         """
-        arglist = shlex.split(safe_str(argline))
+        if isinstance(arglist, string_t):
+            arglist = shlex.split(safe_str(arglist))
         if cmd in self.builtins:
         if cmd in self.builtins:
             return getattr(self, self.builtins[cmd])(*arglist)
             return getattr(self, self.builtins[cmd])(*arglist)
         fun, args, formatter = self.get_amqp_api_command(cmd, arglist)
         fun, args, formatter = self.get_amqp_api_command(cmd, arglist)
         return formatter(fun(*args))
         return formatter(fun(*args))
 
 
-    def parseline(self, line):
+    def parseline(self, parts):
         """Parse input line.
         """Parse input line.
 
 
         :returns: tuple of three items:
         :returns: tuple of three items:
             `(command_name, arglist, original_line)`
             `(command_name, arglist, original_line)`
 
 
         """
         """
-        parts = line.split()
         if parts:
         if parts:
-            return parts[0], ' '.join(parts[1:]), line
-        return '', '', line
+            return parts[0], parts[1:], ' '.join(parts)
+        return '', '', ''
 
 
     def onecmd(self, line):
     def onecmd(self, line):
         """Parse line and execute command."""
         """Parse line and execute command."""
+        if isinstance(line, string_t):
+            line = shlex.split(safe_str(line))
         cmd, arg, line = self.parseline(line)
         cmd, arg, line = self.parseline(line)
         if not line:
         if not line:
             return self.emptyline()
             return self.emptyline()
@@ -327,7 +328,7 @@ class AMQPAdmin(object):
     def run(self):
     def run(self):
         shell = self.Shell(connect=self.connect, out=self.out)
         shell = self.Shell(connect=self.connect, out=self.out)
         if self.args:
         if self.args:
-            return shell.onecmd(' '.join(self.args))
+            return shell.onecmd(self.args)
         try:
         try:
             return shell.cmdloop()
             return shell.cmdloop()
         except KeyboardInterrupt:
         except KeyboardInterrupt:

+ 15 - 21
celery/bin/base.py

@@ -68,7 +68,6 @@ from __future__ import absolute_import, print_function, unicode_literals
 import os
 import os
 import random
 import random
 import re
 import re
-import socket
 import sys
 import sys
 import warnings
 import warnings
 import json
 import json
@@ -86,7 +85,7 @@ from celery.five import items, string, string_t
 from celery.platforms import EX_FAILURE, EX_OK, EX_USAGE
 from celery.platforms import EX_FAILURE, EX_OK, EX_USAGE
 from celery.utils import term
 from celery.utils import term
 from celery.utils import text
 from celery.utils import text
-from celery.utils import NODENAME_DEFAULT, nodesplit
+from celery.utils import node_format, host_format
 from celery.utils.imports import symbol_by_name, import_from_cwd
 from celery.utils.imports import symbol_by_name, import_from_cwd
 
 
 try:
 try:
@@ -106,7 +105,6 @@ Try --help?
 
 
 find_long_opt = re.compile(r'.+?(--.+?)(?:\s|,|$)')
 find_long_opt = re.compile(r'.+?(--.+?)(?:\s|,|$)')
 find_rst_ref = re.compile(r':\w+:`(.+?)`')
 find_rst_ref = re.compile(r':\w+:`(.+?)`')
-find_sformat = re.compile(r'%(\w)')
 
 
 __all__ = ['Error', 'UsageError', 'Extensions', 'HelpFormatter',
 __all__ = ['Error', 'UsageError', 'Extensions', 'HelpFormatter',
            'Command', 'Option', 'daemon_options']
            'Command', 'Option', 'daemon_options']
@@ -375,9 +373,10 @@ class Command(object):
 
 
     def prepare_args(self, options, args):
     def prepare_args(self, options, args):
         if options:
         if options:
-            options = dict((k, self.expanduser(v))
-                           for k, v in items(vars(options))
-                           if not k.startswith('_'))
+            options = {
+                k: self.expanduser(v)
+                for k, v in items(vars(options)) if not k.startswith('_')
+            }
         args = [self.expanduser(arg) for arg in args]
         args = [self.expanduser(arg) for arg in args]
         self.check_args(args)
         self.check_args(args)
         return options, args
         return options, args
@@ -530,7 +529,12 @@ class Command(object):
                 opt = opts.get(arg)
                 opt = opts.get(arg)
                 if opt:
                 if opt:
                     if opt.takes_value():
                     if opt.takes_value():
-                        acc[opt.dest] = args[index + 1]
+                        try:
+                            acc[opt.dest] = args[index + 1]
+                        except IndexError:
+                            raise ValueError(
+                                'Missing required argument for {0}'.format(
+                                    arg))
                         index += 1
                         index += 1
                     elif opt.action == 'store_true':
                     elif opt.action == 'store_true':
                         acc[opt.dest] = True
                         acc[opt.dest] = True
@@ -561,20 +565,10 @@ class Command(object):
         pass
         pass
 
 
     def node_format(self, s, nodename, **extra):
     def node_format(self, s, nodename, **extra):
-        name, host = nodesplit(nodename)
-        return self._simple_format(
-            s, host, n=name or NODENAME_DEFAULT, **extra)
-
-    def simple_format(self, s, **extra):
-        return self._simple_format(s, socket.gethostname(), **extra)
-
-    def _simple_format(self, s, host,
-                       match=find_sformat, expand=r'\1', **keys):
-        if s:
-            name, _, domain = host.partition('.')
-            keys = dict({'%': '%', 'h': host, 'n': name, 'd': domain}, **keys)
-            return match.sub(lambda m: keys[m.expand(expand)], s)
-        return s
+        return node_format(s, nodename, **extra)
+
+    def host_format(self, s, **extra):
+        return host_format(s, **extra)
 
 
     def _get_default_app(self, *args, **kwargs):
     def _get_default_app(self, *args, **kwargs):
         from celery._state import get_current_app
         from celery._state import get_current_app

+ 1 - 1
celery/bin/beat.py

@@ -24,7 +24,7 @@ The :program:`celery beat` command.
     Scheduler class to use.
     Scheduler class to use.
     Default is :class:`celery.beat.PersistentScheduler`.
     Default is :class:`celery.beat.PersistentScheduler`.
 
 
-.. cmdoption:: max-interval
+.. cmdoption:: --max-interval
 
 
     Max seconds to sleep between schedule iterations.
     Max seconds to sleep between schedule iterations.
 
 

+ 36 - 12
celery/bin/celery.py

@@ -8,13 +8,15 @@ The :program:`celery` umbrella command.
 """
 """
 from __future__ import absolute_import, unicode_literals
 from __future__ import absolute_import, unicode_literals
 
 
-import anyjson
+import numbers
 import os
 import os
 import sys
 import sys
 
 
 from functools import partial
 from functools import partial
 from importlib import import_module
 from importlib import import_module
 
 
+from kombu.utils import json
+
 from celery.five import string_t, values
 from celery.five import string_t, values
 from celery.platforms import EX_OK, EX_FAILURE, EX_UNAVAILABLE, EX_USAGE
 from celery.platforms import EX_OK, EX_FAILURE, EX_UNAVAILABLE, EX_USAGE
 from celery.utils import term
 from celery.utils import term
@@ -61,7 +63,7 @@ if DEBUG:  # pragma: no cover
 
 
 
 
 def determine_exit_status(ret):
 def determine_exit_status(ret):
-    if isinstance(ret, int):
+    if isinstance(ret, numbers.Integral):
         return ret
         return ret
     return EX_OK if ret else EX_FAILURE
     return EX_OK if ret else EX_FAILURE
 
 
@@ -161,12 +163,12 @@ class call(Command):
         # Positional args.
         # Positional args.
         args = kw.get('args') or ()
         args = kw.get('args') or ()
         if isinstance(args, string_t):
         if isinstance(args, string_t):
-            args = anyjson.loads(args)
+            args = json.loads(args)
 
 
         # Keyword args.
         # Keyword args.
         kwargs = kw.get('kwargs') or {}
         kwargs = kw.get('kwargs') or {}
         if isinstance(kwargs, string_t):
         if isinstance(kwargs, string_t):
-            kwargs = anyjson.loads(kwargs)
+            kwargs = json.loads(kwargs)
 
 
         # Expires can be int/float.
         # Expires can be int/float.
         expires = kw.get('expires') or None
         expires = kw.get('expires') or None
@@ -571,10 +573,10 @@ class shell(Command):  # pragma: no cover
                        'signature': celery.signature}
                        'signature': celery.signature}
 
 
         if not without_tasks:
         if not without_tasks:
-            self.locals.update(dict(
-                (task.__name__, task) for task in values(self.app.tasks)
-                if not task.name.startswith('celery.')),
-            )
+            self.locals.update({
+                task.__name__: task for task in values(self.app.tasks)
+                if not task.name.startswith('celery.')
+            })
 
 
         if force_python:
         if force_python:
             return self.invoke_fallback_shell()
             return self.invoke_fallback_shell()
@@ -702,7 +704,7 @@ class CeleryCommand(Command):
             helps = '{self.prog_name} {command} --help'
             helps = '{self.prog_name} {command} --help'
         else:
         else:
             helps = '{self.prog_name} --help'
             helps = '{self.prog_name} --help'
-        self.error(self.colored.magenta("Error: {0}".format(exc)))
+        self.error(self.colored.magenta('Error: {0}'.format(exc)))
         self.error("""Please try '{0}'""".format(helps.format(
         self.error("""Please try '{0}'""".format(helps.format(
             self=self, command=command,
             self=self, command=command,
         )))
         )))
@@ -715,11 +717,33 @@ class CeleryCommand(Command):
                 if value.startswith('--'):
                 if value.startswith('--'):
                     rest.append(value)
                     rest.append(value)
                 elif value.startswith('-'):
                 elif value.startswith('-'):
-                    rest.extend([value] + [argv[index + 1]])
-                    index += 1
+                    # we eat the next argument even though we don't know
+                    # if this option takes an argument or not.
+                    # instead we will assume what is the command name in the
+                    # return statements below.
+                    try:
+                        nxt = argv[index + 1]
+                        if nxt.startswith('-'):
+                            # is another option
+                            rest.append(value)
+                        else:
+                            # is (maybe) a value for this option
+                            rest.extend([value, nxt])
+                            index += 1
+                    except IndexError:
+                        rest.append(value)
+                        break
                 else:
                 else:
-                    return argv[index:] + rest
+                    break
                 index += 1
                 index += 1
+            if argv[index:]:
+                # if there are more arguments left then divide and swap
+                # we assume the first argument in argv[i:] is the command
+                # name.
+                return argv[index:] + rest
+            # if there are no more arguments then the last arg in rest'
+            # must be the command.
+            [rest.pop()] + rest
         return []
         return []
 
 
     def prepare_prog_name(self, name):
     def prepare_prog_name(self, name):

+ 3 - 1
celery/bin/celeryd_detach.py

@@ -30,6 +30,7 @@ logger = get_logger(__name__)
 C_FAKEFORK = os.environ.get('C_FAKEFORK')
 C_FAKEFORK = os.environ.get('C_FAKEFORK')
 
 
 OPTION_LIST = daemon_options(default_pidfile='celeryd.pid') + (
 OPTION_LIST = daemon_options(default_pidfile='celeryd.pid') + (
+    Option('--workdir', default=None, dest='working_directory'),
     Option('--fake',
     Option('--fake',
            default=False, action='store_true', dest='fake',
            default=False, action='store_true', dest='fake',
            help="Don't fork (for debugging purposes)"),
            help="Don't fork (for debugging purposes)"),
@@ -39,7 +40,8 @@ OPTION_LIST = daemon_options(default_pidfile='celeryd.pid') + (
 def detach(path, argv, logfile=None, pidfile=None, uid=None,
 def detach(path, argv, logfile=None, pidfile=None, uid=None,
            gid=None, umask=0, working_directory=None, fake=False, app=None):
            gid=None, umask=0, working_directory=None, fake=False, app=None):
     fake = 1 if C_FAKEFORK else fake
     fake = 1 if C_FAKEFORK else fake
-    with detached(logfile, pidfile, uid, gid, umask, working_directory, fake):
+    with detached(logfile, pidfile, uid, gid, umask, working_directory, fake,
+                  after_forkers=False):
         try:
         try:
             os.execv(path, [path] + argv)
             os.execv(path, [path] + argv)
         except Exception:
         except Exception:

+ 2 - 2
celery/bin/events.py

@@ -57,14 +57,14 @@ class events(Command):
         celery events -d --app=proj
         celery events -d --app=proj
             dump events to screen.
             dump events to screen.
         celery events -b amqp://
         celery events -b amqp://
-        celery events -C <camera> [options]
+        celery events -c <camera> [options]
             run snapshot camera.
             run snapshot camera.
 
 
     Examples::
     Examples::
 
 
         celery events
         celery events
         celery events -d
         celery events -d
-        celery events -C mod.attr -F 1.0 --detach --maxrate=100/m -l info
+        celery events -c mod.attr -F 1.0 --detach --maxrate=100/m -l info
     """
     """
     doc = __doc__
     doc = __doc__
     supports_args = False
     supports_args = False

+ 1 - 1
celery/bin/graph.py

@@ -34,7 +34,7 @@ class graph(Command):
 
 
     def bootsteps(self, *args, **kwargs):
     def bootsteps(self, *args, **kwargs):
         worker = self.app.WorkController()
         worker = self.app.WorkController()
-        include = set(arg.lower() for arg in args or ['worker', 'consumer'])
+        include = {arg.lower() for arg in args or ['worker', 'consumer']}
         if 'worker' in include:
         if 'worker' in include:
             graph = worker.blueprint.graph
             graph = worker.blueprint.graph
             if 'consumer' in include:
             if 'consumer' in include:

+ 45 - 38
celery/bin/multi.py

@@ -13,19 +13,19 @@ Examples
 
 
     # Pidfiles and logfiles are stored in the current directory
     # Pidfiles and logfiles are stored in the current directory
     # by default.  Use --pidfile and --logfile argument to change
     # by default.  Use --pidfile and --logfile argument to change
-    # this.  The abbreviation %N will be expanded to the current
+    # this.  The abbreviation %n will be expanded to the current
     # node name.
     # node name.
-    $ celery multi start Leslie -E --pidfile=/var/run/celery/%N.pid
-                                    --logfile=/var/log/celery/%N.log
+    $ celery multi start Leslie -E --pidfile=/var/run/celery/%n.pid
+                                    --logfile=/var/log/celery/%n.log
 
 
 
 
     # You need to add the same arguments when you restart,
     # You need to add the same arguments when you restart,
     # as these are not persisted anywhere.
     # as these are not persisted anywhere.
-    $ celery multi restart Leslie -E --pidfile=/var/run/celery/%N.pid
-                                     --logfile=/var/run/celery/%N.log
+    $ celery multi restart Leslie -E --pidfile=/var/run/celery/%n.pid
+                                     --logfile=/var/run/celery/%n.log
 
 
     # To stop the node, you need to specify the same pidfile.
     # To stop the node, you need to specify the same pidfile.
-    $ celery multi stop Leslie --pidfile=/var/run/celery/%N.pid
+    $ celery multi stop Leslie --pidfile=/var/run/celery/%n.pid
 
 
     # 3 workers, with 3 processes each
     # 3 workers, with 3 processes each
     $ celery multi start 3 -c 3
     $ celery multi start 3 -c 3
@@ -47,6 +47,9 @@ Examples
     # specify fully qualified nodenames
     # specify fully qualified nodenames
     $ celery multi start foo@worker.example.com bar@worker.example.com -c 3
     $ celery multi start foo@worker.example.com bar@worker.example.com -c 3
 
 
+    # fully qualified nodenames but using the current hostname
+    $ celery multi start foo@%h bar@%h
+
     # Advanced example starting 10 workers in the background:
     # Advanced example starting 10 workers in the background:
     #   * Three of the workers processes the images and video queue
     #   * Three of the workers processes the images and video queue
     #   * Two of the workers processes the data queue with loglevel DEBUG
     #   * Two of the workers processes the data queue with loglevel DEBUG
@@ -100,25 +103,26 @@ import signal
 import socket
 import socket
 import sys
 import sys
 
 
-from collections import defaultdict, namedtuple
+from collections import OrderedDict, defaultdict, namedtuple
+from functools import partial
 from subprocess import Popen
 from subprocess import Popen
 from time import sleep
 from time import sleep
 
 
 from kombu.utils import cached_property
 from kombu.utils import cached_property
-from kombu.utils.compat import OrderedDict
 from kombu.utils.encoding import from_utf8
 from kombu.utils.encoding import from_utf8
 
 
 from celery import VERSION_BANNER
 from celery import VERSION_BANNER
 from celery.five import items
 from celery.five import items
 from celery.platforms import Pidfile, IS_WINDOWS
 from celery.platforms import Pidfile, IS_WINDOWS
-from celery.utils import term, nodesplit
+from celery.utils import term
+from celery.utils import host_format, node_format, nodesplit
 from celery.utils.text import pluralize
 from celery.utils.text import pluralize
 
 
 __all__ = ['MultiTool']
 __all__ = ['MultiTool']
 
 
-SIGNAMES = set(sig for sig in dir(signal)
-               if sig.startswith('SIG') and '_' not in sig)
-SIGMAP = dict((getattr(signal, name), name) for name in SIGNAMES)
+SIGNAMES = {sig for sig in dir(signal)
+            if sig.startswith('SIG') and '_' not in sig}
+SIGMAP = {getattr(signal, name): name for name in SIGNAMES}
 
 
 USAGE = """\
 USAGE = """\
 usage: {prog_name} start <node1 node2 nodeN|range> [worker options]
 usage: {prog_name} start <node1 node2 nodeN|range> [worker options]
@@ -247,8 +251,8 @@ class MultiTool(object):
         self.retcode = int(any(retcodes))
         self.retcode = int(any(retcodes))
 
 
     def with_detacher_default_options(self, p):
     def with_detacher_default_options(self, p):
-        _setdefaultopt(p.options, ['--pidfile', '-p'], '%N.pid')
-        _setdefaultopt(p.options, ['--logfile', '-f'], '%N.log')
+        _setdefaultopt(p.options, ['--pidfile', '-p'], '%n.pid')
+        _setdefaultopt(p.options, ['--logfile', '-f'], '%n.log')
         p.options.setdefault(
         p.options.setdefault(
             '--cmd',
             '--cmd',
             '-m {0}'.format(celery_exe('worker', '--detach')),
             '-m {0}'.format(celery_exe('worker', '--detach')),
@@ -320,7 +324,7 @@ class MultiTool(object):
             self.note('')
             self.note('')
 
 
     def getpids(self, p, cmd, callback=None):
     def getpids(self, p, cmd, callback=None):
-        _setdefaultopt(p.options, ['--pidfile', '-p'], '%N.pid')
+        _setdefaultopt(p.options, ['--pidfile', '-p'], '%n.pid')
 
 
         nodes = []
         nodes = []
         for node in multi_args(p, cmd):
         for node in multi_args(p, cmd):
@@ -478,26 +482,41 @@ def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''):
                 p.namespaces[subns].update(ns_opts)
                 p.namespaces[subns].update(ns_opts)
             p.namespaces.pop(ns_name)
             p.namespaces.pop(ns_name)
 
 
+    # Numbers in args always refers to the index in the list of names.
+    # (e.g. `start foo bar baz -c:1` where 1 is foo, 2 is bar, and so on).
+    for ns_name, ns_opts in list(items(p.namespaces)):
+        if ns_name.isdigit():
+            ns_index = int(ns_name) - 1
+            if ns_index < 0:
+                raise KeyError('Indexes start at 1 got: %r' % (ns_name, ))
+            try:
+                p.namespaces[names[ns_index]].update(ns_opts)
+            except IndexError:
+                raise KeyError('No node at index %r' % (ns_name, ))
+
     for name in names:
     for name in names:
-        this_suffix = suffix
+        hostname = suffix
         if '@' in name:
         if '@' in name:
-            this_name = options['-n'] = name
-            nodename, this_suffix = nodesplit(name)
-            name = nodename
+            nodename = options['-n'] = host_format(name)
+            shortname, hostname = nodesplit(nodename)
+            name = shortname
         else:
         else:
-            nodename = '%s%s' % (prefix, name)
-            this_name = options['-n'] = '%s@%s' % (nodename, this_suffix)
-        expand = abbreviations({'%h': this_name,
-                                '%n': name,
-                                '%N': nodename,
-                                '%d': this_suffix})
+            shortname = '%s%s' % (prefix, name)
+            nodename = options['-n'] = host_format(
+                '{0}@{1}'.format(shortname, hostname),
+            )
+
+        expand = partial(
+            node_format, nodename=nodename, N=shortname, d=hostname,
+            h=nodename,
+        )
         argv = ([expand(cmd)] +
         argv = ([expand(cmd)] +
                 [format_opt(opt, expand(value))
                 [format_opt(opt, expand(value))
                  for opt, value in items(p.optmerge(name, options))] +
                  for opt, value in items(p.optmerge(name, options))] +
                 [passthrough])
                 [passthrough])
         if append:
         if append:
             argv.append(expand(append))
             argv.append(expand(append))
-        yield multi_args_t(this_name, argv, expand, name)
+        yield multi_args_t(nodename, argv, expand, name)
 
 
 
 
 class NamespacedOptionParser(object):
 class NamespacedOptionParser(object):
@@ -579,18 +598,6 @@ def parse_ns_range(ns, ranges=False):
     return ret
     return ret
 
 
 
 
-def abbreviations(mapping):
-
-    def expand(S):
-        ret = S
-        if S is not None:
-            for short_opt, long_opt in items(mapping):
-                ret = ret.replace(short_opt, long_opt)
-        return ret
-
-    return expand
-
-
 def findsig(args, default=signal.SIGTERM):
 def findsig(args, default=signal.SIGTERM):
     for arg in reversed(args):
     for arg in reversed(args):
         if len(arg) == 2 and arg[0] == '-':
         if len(arg) == 2 and arg[0] == '-':

+ 14 - 7
celery/bin/worker.py

@@ -71,8 +71,8 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 
 
 .. cmdoption:: -E, --events
 .. cmdoption:: -E, --events
 
 
-    Send events that can be captured by monitors like :program:`celery events`,
-    `celerymon`, and others.
+    Send task-related events that can be captured by monitors like
+    :program:`celery events`, `celerymon`, and others.
 
 
 .. cmdoption:: --without-gossip
 .. cmdoption:: --without-gossip
 
 
@@ -86,6 +86,10 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 
 
     Do not send event heartbeats.
     Do not send event heartbeats.
 
 
+.. cmdoption:: --heartbeat-interval
+
+    Interval in seconds at which to send worker heartbeat
+
 .. cmdoption:: --purge
 .. cmdoption:: --purge
 
 
     Purges all waiting tasks before the daemon is started.
     Purges all waiting tasks before the daemon is started.
@@ -171,7 +175,7 @@ class worker(Command):
         # parse options before detaching so errors can be handled.
         # parse options before detaching so errors can be handled.
         options, args = self.prepare_args(
         options, args = self.prepare_args(
             *self.parse_options(prog_name, argv, command))
             *self.parse_options(prog_name, argv, command))
-        self.maybe_detach([command] + sys.argv[1:])
+        self.maybe_detach([command] + argv)
         return self(*args, **options)
         return self(*args, **options)
 
 
     def maybe_detach(self, argv, dopts=['-D', '--detach']):
     def maybe_detach(self, argv, dopts=['-D', '--detach']):
@@ -192,7 +196,7 @@ class worker(Command):
         if self.app.IS_WINDOWS and kwargs.get('beat'):
         if self.app.IS_WINDOWS and kwargs.get('beat'):
             self.die('-B option does not work on Windows.  '
             self.die('-B option does not work on Windows.  '
                      'Please run celery beat as a separate service.')
                      'Please run celery beat as a separate service.')
-        hostname = self.simple_format(default_nodename(hostname))
+        hostname = self.host_format(default_nodename(hostname))
         if loglevel:
         if loglevel:
             try:
             try:
                 loglevel = mlevel(loglevel)
                 loglevel = mlevel(loglevel)
@@ -201,12 +205,14 @@ class worker(Command):
                     loglevel, '|'.join(
                     loglevel, '|'.join(
                         l for l in LOG_LEVELS if isinstance(l, string_t))))
                         l for l in LOG_LEVELS if isinstance(l, string_t))))
 
 
-        return self.app.Worker(
+        worker = self.app.Worker(
             hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
             hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
-            logfile=self.node_format(logfile, hostname),
+            logfile=logfile,  # node format handled by celery.app.log.setup
             pidfile=self.node_format(pidfile, hostname),
             pidfile=self.node_format(pidfile, hostname),
             state_db=self.node_format(state_db, hostname), **kwargs
             state_db=self.node_format(state_db, hostname), **kwargs
-        ).start()
+        )
+        worker.start()
+        return worker.exitcode
 
 
     def with_pool_option(self, argv):
     def with_pool_option(self, argv):
         # this command support custom pools
         # this command support custom pools
@@ -245,6 +251,7 @@ class worker(Command):
             Option('--without-gossip', action='store_true', default=False),
             Option('--without-gossip', action='store_true', default=False),
             Option('--without-mingle', action='store_true', default=False),
             Option('--without-mingle', action='store_true', default=False),
             Option('--without-heartbeat', action='store_true', default=False),
             Option('--without-heartbeat', action='store_true', default=False),
+            Option('--heartbeat-interval', type='int'),
             Option('-O', dest='optimization'),
             Option('-O', dest='optimization'),
             Option('-D', '--detach', action='store_true'),
             Option('-D', '--detach', action='store_true'),
         ) + daemon_options() + tuple(self.app.user_options['worker'])
         ) + daemon_options() + tuple(self.app.user_options['worker'])

+ 3 - 2
celery/bootsteps.py

@@ -232,6 +232,8 @@ class Blueprint(object):
         return next((C for C in values(self.steps) if C.last), None)
         return next((C for C in values(self.steps) if C.last), None)
 
 
     def _firstpass(self, steps):
     def _firstpass(self, steps):
+        for step in values(steps):
+            step.requires = [symbol_by_name(dep) for dep in step.requires]
         stream = deque(step.requires for step in values(steps))
         stream = deque(step.requires for step in values(steps))
         while stream:
         while stream:
             for node in stream.popleft():
             for node in stream.popleft():
@@ -283,7 +285,6 @@ class StepType(type):
         attrs.update(
         attrs.update(
             __qualname__=qname,
             __qualname__=qname,
             name=attrs.get('name') or qname,
             name=attrs.get('name') or qname,
-            requires=attrs.get('requires', ()),
         )
         )
         return super(StepType, cls).__new__(cls, name, bases, attrs)
         return super(StepType, cls).__new__(cls, name, bases, attrs)
 
 
@@ -392,7 +393,7 @@ class StartStopStep(Step):
 
 
 
 
 class ConsumerStep(StartStopStep):
 class ConsumerStep(StartStopStep):
-    requires = ('Connection', )
+    requires = ('celery.worker.consumer:Connection', )
     consumers = None
     consumers = None
 
 
     def get_consumers(self, channel):
     def get_consumers(self, channel):

+ 248 - 66
celery/canvas.py

@@ -12,14 +12,15 @@
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
+from collections import MutableSequence, deque
 from copy import deepcopy
 from copy import deepcopy
 from functools import partial as _partial, reduce
 from functools import partial as _partial, reduce
 from operator import itemgetter
 from operator import itemgetter
 from itertools import chain as _chain
 from itertools import chain as _chain
 
 
-from kombu.utils import cached_property, fxrange, kwdict, reprcall, uuid
+from kombu.utils import cached_property, fxrange, reprcall, uuid
 
 
-from celery._state import current_app
+from celery._state import current_app, get_current_worker_task
 from celery.utils.functional import (
 from celery.utils.functional import (
     maybe_list, is_list, regen,
     maybe_list, is_list, regen,
     chunks as _chunks,
     chunks as _chunks,
@@ -132,7 +133,7 @@ class Signature(dict):
     def from_dict(self, d, app=None):
     def from_dict(self, d, app=None):
         typ = d.get('subtask_type')
         typ = d.get('subtask_type')
         if typ:
         if typ:
-            return self.TYPES[typ].from_dict(kwdict(d), app=app)
+            return self.TYPES[typ].from_dict(d, app=app)
         return Signature(d, app=app)
         return Signature(d, app=app)
 
 
     def __init__(self, task=None, args=None, kwargs=None, options=None,
     def __init__(self, task=None, args=None, kwargs=None, options=None,
@@ -194,14 +195,19 @@ class Signature(dict):
         return s
         return s
     partial = clone
     partial = clone
 
 
-    def freeze(self, _id=None):
+    def freeze(self, _id=None, group_id=None, chord=None, root_id=None):
         opts = self.options
         opts = self.options
         try:
         try:
             tid = opts['task_id']
             tid = opts['task_id']
         except KeyError:
         except KeyError:
             tid = opts['task_id'] = _id or uuid()
             tid = opts['task_id'] = _id or uuid()
+        root_id = opts.setdefault('root_id', root_id)
         if 'reply_to' not in opts:
         if 'reply_to' not in opts:
             opts['reply_to'] = self.app.oid
             opts['reply_to'] = self.app.oid
+        if group_id:
+            opts['group_id'] = group_id
+        if chord:
+            opts['chord'] = chord
         return self.AsyncResult(tid)
         return self.AsyncResult(tid)
     _freeze = freeze
     _freeze = freeze
 
 
@@ -238,6 +244,8 @@ class Signature(dict):
 
 
     def append_to_list_option(self, key, value):
     def append_to_list_option(self, key, value):
         items = self.options.setdefault(key, [])
         items = self.options.setdefault(key, [])
+        if not isinstance(items, MutableSequence):
+            items = self.options[key] = [items]
         if value not in items:
         if value not in items:
             items.append(value)
             items.append(value)
         return value
         return value
@@ -278,7 +286,10 @@ class Signature(dict):
     def __reduce__(self):
     def __reduce__(self):
         # for serialization, the task type is lazily loaded,
         # for serialization, the task type is lazily loaded,
         # and not stored in the dict itself.
         # and not stored in the dict itself.
-        return subtask, (dict(self), )
+        return signature, (dict(self), )
+
+    def __json__(self):
+        return dict(self)
 
 
     def reprcall(self, *args, **kwargs):
     def reprcall(self, *args, **kwargs):
         args, kwargs, _ = self._merge(args, kwargs, {})
         args, kwargs, _ = self._merge(args, kwargs, {})
@@ -344,20 +355,116 @@ class chain(Signature):
         if self.tasks:
         if self.tasks:
             return self.apply_async(args, kwargs)
             return self.apply_async(args, kwargs)
 
 
+    def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
+                    task_id=None, link=None, link_error=None,
+                    publisher=None, root_id=None, **options):
+        app = self.app
+        if app.conf.CELERY_ALWAYS_EAGER:
+            return self.apply(args, kwargs, **options)
+        tasks, results = self.prepare_steps(
+            args, self.tasks, root_id, link_error,
+        )
+        if not results:
+            return
+        result = results[-1]
+        last_task = tasks[-1]
+        if group_id:
+            last_task.set(group_id=group_id)
+        if chord:
+            last_task.set(chord=chord)
+        if task_id:
+            last_task.set(task_id=task_id)
+            result = last_task.type.AsyncResult(task_id)
+        # make sure we can do a link() and link_error() on a chain object.
+        if link:
+            tasks[-1].set(link=link)
+        tasks[0].apply_async(**options)
+        return result
+
+    def prepare_steps(self, args, tasks,
+                      root_id=None, link_error=None, app=None):
+        app = app or self.app
+        steps = deque(tasks)
+        next_step = prev_task = prev_res = None
+        tasks, results = [], []
+        i = 0
+        while steps:
+            task = steps.popleft()
+            if not i:  # first task
+                # first task gets partial args from chain
+                task = task.clone(args)
+                res = task.freeze(root_id=root_id)
+                root_id = res.id if root_id is None else root_id
+            else:
+                task = task.clone()
+                res = task.freeze(root_id=root_id)
+            i += 1
+
+            if isinstance(task, group):
+                task = maybe_unroll_group(task)
+
+            if isinstance(task, chain):
+                # splice the chain
+                steps.extendleft(reversed(task.tasks))
+                continue
+            elif isinstance(task, group) and steps and \
+                    not isinstance(steps[0], group):
+                # automatically upgrade group(...) | s to chord(group, s)
+                try:
+                    next_step = steps.popleft()
+                    # for chords we freeze by pretending it's a normal
+                    # signature instead of a group.
+                    res = Signature.freeze(next_step)
+                    task = chord(
+                        task, body=next_step,
+                        task_id=res.task_id, root_id=root_id,
+                    )
+                except IndexError:
+                    pass  # no callback, so keep as group.
+
+            if prev_task:
+                # link previous task to this task.
+                prev_task.link(task)
+                # set AsyncResult.parent
+                if not res.parent:
+                    res.parent = prev_res
+
+            if link_error:
+                task.set(link_error=link_error)
+
+            if not isinstance(prev_task, chord):
+                results.append(res)
+                tasks.append(task)
+            prev_task, prev_res = task, res
+
+        return tasks, results
+
+    def apply(self, args=(), kwargs={}, **options):
+        last, fargs = None, args
+        for task in self.tasks:
+            res = task.clone(fargs).apply(
+                last and (last.get(), ), **options
+            )
+            res.parent, last, fargs = last, res, None
+        return last
+
     @classmethod
     @classmethod
     def from_dict(self, d, app=None):
     def from_dict(self, d, app=None):
         tasks = d['kwargs']['tasks']
         tasks = d['kwargs']['tasks']
         if d['args'] and tasks:
         if d['args'] and tasks:
             # partial args passed on to first task in chain (Issue #1057).
             # partial args passed on to first task in chain (Issue #1057).
             tasks[0]['args'] = tasks[0]._merge(d['args'])[0]
             tasks[0]['args'] = tasks[0]._merge(d['args'])[0]
-        return chain(*d['kwargs']['tasks'], app=app, **kwdict(d['options']))
+        return chain(*d['kwargs']['tasks'], app=app, **d['options'])
 
 
     @property
     @property
-    def type(self):
-        try:
-            return self._type or self.tasks[0].type.app.tasks['celery.chain']
-        except KeyError:
-            return self.app.tasks['celery.chain']
+    def app(self):
+        app = self._app
+        if app is None:
+            try:
+                app = self.tasks[0]._app
+            except (KeyError, IndexError):
+                pass
+        return app or current_app
 
 
     def __repr__(self):
     def __repr__(self):
         return ' | '.join(repr(t) for t in self.tasks)
         return ' | '.join(repr(t) for t in self.tasks)
@@ -448,11 +555,6 @@ def _maybe_group(tasks):
     return tasks
     return tasks
 
 
 
 
-def _maybe_clone(tasks, app):
-    return [s.clone() if isinstance(s, Signature) else signature(s, app=app)
-            for s in tasks]
-
-
 @Signature.register_type
 @Signature.register_type
 class group(Signature):
 class group(Signature):
 
 
@@ -471,15 +573,66 @@ class group(Signature):
             # partial args passed on to all tasks in the group (Issue #1057).
             # partial args passed on to all tasks in the group (Issue #1057).
             for task in tasks:
             for task in tasks:
                 task['args'] = task._merge(d['args'])[0]
                 task['args'] = task._merge(d['args'])[0]
-        return group(tasks, app=app, **kwdict(d['options']))
-
-    def apply_async(self, args=(), kwargs=None, **options):
-        tasks = _maybe_clone(self.tasks, app=self._app)
-        if not tasks:
+        return group(tasks, app=app, **d['options'])
+
+    def _prepared(self, tasks, partial_args, group_id, root_id, dict=dict,
+                  Signature=Signature, from_dict=Signature.from_dict):
+        for task in tasks:
+            if isinstance(task, dict):
+                if isinstance(task, Signature):
+                    # local sigs are always of type Signature, and we
+                    # clone them to make sure we do not modify the originals.
+                    task = task.clone()
+                else:
+                    # serialized sigs must be converted to Signature.
+                    task = from_dict(task)
+            if partial_args and not task.immutable:
+                task.args = tuple(partial_args) + tuple(task.args)
+            yield task, task.freeze(group_id=group_id, root_id=root_id)
+
+    def _apply_tasks(self, tasks, producer=None, app=None, **options):
+        app = app or self.app
+        with app.producer_or_acquire(producer) as producer:
+            for sig, res in tasks:
+                sig.apply_async(producer=producer, add_to_parent=False,
+                                **options)
+                yield res
+
+    def _freeze_gid(self, options):
+        # remove task_id and use that as the group_id,
+        # if we don't remove it then every task will have the same id...
+        options = dict(self.options, **options)
+        options['group_id'] = group_id = (
+            options.pop('task_id', uuid()))
+        return options, group_id, options.get('root_id')
+
+    def apply_async(self, args=(), kwargs=None, add_to_parent=True,
+                    producer=None, **options):
+        app = self.app
+        if app.conf.CELERY_ALWAYS_EAGER:
+            return self.apply(args, kwargs, **options)
+        if not self.tasks:
             return self.freeze()
             return self.freeze()
-        type = self.type
-        return type(*type.prepare(dict(self.options, **options),
-                                  tasks, args))
+
+        options, group_id, root_id = self._freeze_gid(options)
+        tasks = self._prepared(self.tasks, args, group_id, root_id)
+        result = self.app.GroupResult(
+            group_id, list(self._apply_tasks(tasks, producer, app, **options)),
+        )
+        parent_task = get_current_worker_task()
+        if add_to_parent and parent_task:
+            parent_task.add_trail(result)
+        return result
+
+    def apply(self, args=(), kwargs={}, **options):
+        app = self.app
+        if not self.tasks:
+            return self.freeze()  # empty group returns GroupResult
+        options, group_id, root_id = self._freeze_gid(options)
+        tasks = self._prepared(self.tasks, args, group_id, root_id)
+        return app.GroupResult(group_id, [
+            sig.apply(**options) for sig, _ in tasks
+        ])
 
 
     def set_immutable(self, immutable):
     def set_immutable(self, immutable):
         for task in self.tasks:
         for task in self.tasks:
@@ -494,24 +647,26 @@ class group(Signature):
         sig = sig.clone().set(immutable=True)
         sig = sig.clone().set(immutable=True)
         return self.tasks[0].link_error(sig)
         return self.tasks[0].link_error(sig)
 
 
-    def apply(self, *args, **kwargs):
-        if not self.tasks:
-            return self.freeze()  # empty group returns GroupResult
-        return Signature.apply(self, *args, **kwargs)
-
     def __call__(self, *partial_args, **options):
     def __call__(self, *partial_args, **options):
         return self.apply_async(partial_args, **options)
         return self.apply_async(partial_args, **options)
 
 
-    def freeze(self, _id=None):
+    def freeze(self, _id=None, group_id=None, chord=None, root_id=None):
         opts = self.options
         opts = self.options
         try:
         try:
             gid = opts['task_id']
             gid = opts['task_id']
         except KeyError:
         except KeyError:
             gid = opts['task_id'] = uuid()
             gid = opts['task_id'] = uuid()
+        if group_id:
+            opts['group_id'] = group_id
+        if chord:
+            opts['chord'] = group_id
+        root_id = opts.setdefault('root_id', root_id)
         new_tasks, results = [], []
         new_tasks, results = [], []
         for task in self.tasks:
         for task in self.tasks:
             task = maybe_signature(task, app=self._app).clone()
             task = maybe_signature(task, app=self._app).clone()
-            results.append(task._freeze())
+            results.append(task.freeze(
+                group_id=group_id, chord=chord, root_id=root_id,
+            ))
             new_tasks.append(task)
             new_tasks.append(task)
         self.tasks = self.kwargs['tasks'] = new_tasks
         self.tasks = self.kwargs['tasks'] = new_tasks
         return self.app.GroupResult(gid, results)
         return self.app.GroupResult(gid, results)
@@ -530,14 +685,14 @@ class group(Signature):
         return repr(self.tasks)
         return repr(self.tasks)
 
 
     @property
     @property
-    def type(self):
-        if self._type:
-            return self._type
-        # taking the app from the first task in the list, there may be a
-        # better solution for this, e.g. to consolidate tasks with the same
-        # app and apply them in batches.
-        app = self._app if self._app else self.tasks[0].type.app
-        return app.tasks[self['task']]
+    def app(self):
+        app = self._app
+        if app is None:
+            try:
+                app = self.tasks[0]._app
+            except (KeyError, IndexError):
+                pass
+        return app if app is not None else current_app
 
 
 
 
 @Signature.register_type
 @Signature.register_type
@@ -552,10 +707,13 @@ class chord(Signature):
         )
         )
         self.subtask_type = 'chord'
         self.subtask_type = 'chord'
 
 
+    def freeze(self, *args, **kwargs):
+        return self.body.freeze(*args, **kwargs)
+
     @classmethod
     @classmethod
     def from_dict(self, d, app=None):
     def from_dict(self, d, app=None):
-        args, d['kwargs'] = self._unpack_args(**kwdict(d['kwargs']))
-        return self(*args, app=app, **kwdict(d))
+        args, d['kwargs'] = self._unpack_args(**d['kwargs'])
+        return self(*args, app=app, **d)
 
 
     @staticmethod
     @staticmethod
     def _unpack_args(header=None, body=None, **kwargs):
     def _unpack_args(header=None, body=None, **kwargs):
@@ -563,33 +721,56 @@ class chord(Signature):
         # than manually popping things off.
         # than manually popping things off.
         return (header, body), kwargs
         return (header, body), kwargs
 
 
-    @property
-    def type(self):
-        if self._type:
-            return self._type
-        # we will be able to fix this mess in 3.2 when we no longer
-        # require an actual task implementation for chord/group
-        if self._app:
-            app = self._app
-        else:
-            try:
-                app = self.tasks[0].type.app
-            except IndexError:
-                app = self.body.type.app
-        return app.tasks['celery.chord']
-
-    def apply_async(self, args=(), kwargs={}, task_id=None, **options):
+    @cached_property
+    def app(self):
+        app = self._app
+        if app is None:
+            app = self.tasks[0]._app
+            if app is None:
+                app = self.body._app
+        return app if app is not None else current_app
+
+    def apply_async(self, args=(), kwargs={}, task_id=None,
+                    producer=None, publisher=None, connection=None,
+                    router=None, result_cls=None, **options):
         body = kwargs.get('body') or self.kwargs['body']
         body = kwargs.get('body') or self.kwargs['body']
         kwargs = dict(self.kwargs, **kwargs)
         kwargs = dict(self.kwargs, **kwargs)
         body = body.clone(**options)
         body = body.clone(**options)
+        app = self.app
+        tasks = (self.tasks.clone() if isinstance(self.tasks, group)
+                 else group(self.tasks))
+        if app.conf.CELERY_ALWAYS_EAGER:
+            return self.apply((), kwargs,
+                              body=body, task_id=task_id, **options)
+        return self.run(tasks, body, args, task_id=task_id, **options)
+
+    def apply(self, args=(), kwargs={}, propagate=True, body=None, **options):
+        body = self.body if body is None else body
+        tasks = (self.tasks.clone() if isinstance(self.tasks, group)
+                 else group(self.tasks))
+        return body.apply(
+            args=(tasks.apply().get(propagate=propagate), ),
+        )
 
 
-        _chord = self.type
-        if _chord.app.conf.CELERY_ALWAYS_EAGER:
-            return self.apply((), kwargs, task_id=task_id, **options)
-        res = body.freeze(task_id)
-        parent = _chord(self.tasks, body, args, **options)
-        res.parent = parent
-        return res
+    def run(self, header, body, partial_args, app=None, interval=None,
+            countdown=1, max_retries=None, propagate=None, eager=False,
+            task_id=None, **options):
+        app = app or self.app
+        propagate = (app.conf.CELERY_CHORD_PROPAGATES
+                     if propagate is None else propagate)
+        group_id = uuid()
+        root_id = body.options.get('root_id')
+        body.setdefault('chord_size', len(header.tasks))
+        results = header.freeze(
+            group_id=group_id, chord=body, root_id=root_id).results
+        bodyres = body.freeze(task_id, root_id=root_id)
+
+        parent = app.backend.apply_chord(
+            header, partial_args, group_id, body,
+            interval=interval, countdown=countdown,
+            max_retries=max_retries, propagate=propagate, result=results)
+        bodyres.parent = parent
+        return bodyres
 
 
     def __call__(self, body=None, **options):
     def __call__(self, body=None, **options):
         return self.apply_async((), {'body': body} if body else {}, **options)
         return self.apply_async((), {'body': body} if body else {}, **options)
@@ -626,7 +807,7 @@ class chord(Signature):
 
 
 
 
 def signature(varies, *args, **kwargs):
 def signature(varies, *args, **kwargs):
-    if not (args or kwargs) and isinstance(varies, dict):
+    if isinstance(varies, dict):
         if isinstance(varies, Signature):
         if isinstance(varies, Signature):
             return varies.clone()
             return varies.clone()
         return Signature.from_dict(varies)
         return Signature.from_dict(varies)
@@ -638,9 +819,10 @@ def maybe_signature(d, app=None):
     if d is not None:
     if d is not None:
         if isinstance(d, dict):
         if isinstance(d, dict):
             if not isinstance(d, Signature):
             if not isinstance(d, Signature):
-                return signature(d, app=app)
+                d = signature(d)
         elif isinstance(d, list):
         elif isinstance(d, list):
             return [maybe_signature(s, app=app) for s in d]
             return [maybe_signature(s, app=app) for s in d]
+
         if app is not None:
         if app is not None:
             d._app = app
             d._app = app
         return d
         return d

+ 53 - 48
celery/concurrency/asynpool.py

@@ -37,16 +37,13 @@ from amqp.utils import promise
 from billiard.pool import RUN, TERMINATE, ACK, NACK, WorkersJoined
 from billiard.pool import RUN, TERMINATE, ACK, NACK, WorkersJoined
 from billiard import pool as _pool
 from billiard import pool as _pool
 from billiard.compat import buf_t, setblocking, isblocking
 from billiard.compat import buf_t, setblocking, isblocking
-from billiard.einfo import ExceptionInfo
 from billiard.queues import _SimpleQueue
 from billiard.queues import _SimpleQueue
 from kombu.async import READ, WRITE, ERR
 from kombu.async import READ, WRITE, ERR
 from kombu.serialization import pickle as _pickle
 from kombu.serialization import pickle as _pickle
 from kombu.utils import fxrange
 from kombu.utils import fxrange
-from kombu.utils.compat import get_errno
 from kombu.utils.eventio import SELECT_BAD_FD
 from kombu.utils.eventio import SELECT_BAD_FD
 from celery.five import Counter, items, values
 from celery.five import Counter, items, values
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
-from celery.utils.text import truncate
 from celery.worker import state as worker_state
 from celery.worker import state as worker_state
 
 
 try:
 try:
@@ -96,8 +93,6 @@ SCHED_STRATEGIES = {
     'fair': SCHED_STRATEGY_FAIR,
     'fair': SCHED_STRATEGY_FAIR,
 }
 }
 
 
-RESULT_MAXLEN = 128
-
 Ack = namedtuple('Ack', ('id', 'fd', 'payload'))
 Ack = namedtuple('Ack', ('id', 'fd', 'payload'))
 
 
 
 
@@ -143,14 +138,14 @@ def _select(readers=None, writers=None, err=None, timeout=0):
             r = list(set(r) | set(e))
             r = list(set(r) | set(e))
         return r, w, 0
         return r, w, 0
     except (select.error, socket.error) as exc:
     except (select.error, socket.error) as exc:
-        if get_errno(exc) == errno.EINTR:
+        if exc.errno == errno.EINTR:
             return [], [], 1
             return [], [], 1
-        elif get_errno(exc) in SELECT_BAD_FD:
+        elif exc.errno in SELECT_BAD_FD:
             for fd in readers | writers | err:
             for fd in readers | writers | err:
                 try:
                 try:
                     select.select([fd], [], [], 0)
                     select.select([fd], [], [], 0)
                 except (select.error, socket.error) as exc:
                 except (select.error, socket.error) as exc:
-                    if get_errno(exc) not in SELECT_BAD_FD:
+                    if exc.errno not in SELECT_BAD_FD:
                         raise
                         raise
                     readers.discard(fd)
                     readers.discard(fd)
                     writers.discard(fd)
                     writers.discard(fd)
@@ -170,11 +165,6 @@ class Worker(_pool.Worker):
         # is writable.
         # is writable.
         self.outq.put((WORKER_UP, (pid, )))
         self.outq.put((WORKER_UP, (pid, )))
 
 
-    def prepare_result(self, result, RESULT_MAXLEN=RESULT_MAXLEN):
-        if not isinstance(result, ExceptionInfo):
-            return truncate(repr(result), RESULT_MAXLEN)
-        return result
-
 
 
 class ResultHandler(_pool.ResultHandler):
 class ResultHandler(_pool.ResultHandler):
     """Handles messages from the pool processes."""
     """Handles messages from the pool processes."""
@@ -205,7 +195,7 @@ class ResultHandler(_pool.ResultHandler):
                     fd, bufv[Hr:] if readcanbuf else bufv, 4 - Hr,
                     fd, bufv[Hr:] if readcanbuf else bufv, 4 - Hr,
                 )
                 )
             except OSError as exc:
             except OSError as exc:
-                if get_errno(exc) not in UNAVAIL:
+                if exc.errno not in UNAVAIL:
                     raise
                     raise
                 yield
                 yield
             else:
             else:
@@ -227,7 +217,7 @@ class ResultHandler(_pool.ResultHandler):
                     fd, bufv[Br:] if readcanbuf else bufv, body_size - Br,
                     fd, bufv[Br:] if readcanbuf else bufv, body_size - Br,
                 )
                 )
             except OSError as exc:
             except OSError as exc:
-                if get_errno(exc) not in UNAVAIL:
+                if exc.errno not in UNAVAIL:
                     raise
                     raise
                 yield
                 yield
             else:
             else:
@@ -250,21 +240,21 @@ class ResultHandler(_pool.ResultHandler):
         fileno_to_outq = self.fileno_to_outq
         fileno_to_outq = self.fileno_to_outq
         on_state_change = self.on_state_change
         on_state_change = self.on_state_change
         add_reader = hub.add_reader
         add_reader = hub.add_reader
-        hub_remove = hub.remove
+        remove_reader = hub.remove_reader
         recv_message = self._recv_message
         recv_message = self._recv_message
 
 
         def on_result_readable(fileno):
         def on_result_readable(fileno):
             try:
             try:
                 fileno_to_outq[fileno]
                 fileno_to_outq[fileno]
             except KeyError:  # process gone
             except KeyError:  # process gone
-                return hub_remove(fileno)
+                return remove_reader(fileno)
             it = recv_message(add_reader, fileno, on_state_change)
             it = recv_message(add_reader, fileno, on_state_change)
             try:
             try:
                 next(it)
                 next(it)
             except StopIteration:
             except StopIteration:
                 pass
                 pass
             except (IOError, OSError, EOFError):
             except (IOError, OSError, EOFError):
-                hub_remove(fileno)
+                remove_reader(fileno)
             else:
             else:
                 add_reader(fileno, it)
                 add_reader(fileno, it)
         return on_result_readable
         return on_result_readable
@@ -347,8 +337,9 @@ class AsynPool(_pool.Pool):
         processes = self.cpu_count() if processes is None else processes
         processes = self.cpu_count() if processes is None else processes
         self.synack = synack
         self.synack = synack
         # create queue-pairs for all our processes in advance.
         # create queue-pairs for all our processes in advance.
-        self._queues = dict((self.create_process_queues(), None)
-                            for _ in range(processes))
+        self._queues = {
+            self.create_process_queues(): None for _ in range(processes)
+        }
 
 
         # inqueue fileno -> process mapping
         # inqueue fileno -> process mapping
         self._fileno_to_inq = {}
         self._fileno_to_inq = {}
@@ -485,7 +476,9 @@ class AsynPool(_pool.Pool):
     def _create_process_handlers(self, hub, READ=READ, ERR=ERR):
     def _create_process_handlers(self, hub, READ=READ, ERR=ERR):
         """For async pool this will create the handlers called
         """For async pool this will create the handlers called
         when a process is up/down and etc."""
         when a process is up/down and etc."""
-        add_reader, hub_remove = hub.add_reader, hub.remove
+        add_reader, remove_reader, remove_writer = (
+            hub.add_reader, hub.remove_reader, hub.remove_writer,
+        )
         cache = self._cache
         cache = self._cache
         all_inqueues = self._all_inqueues
         all_inqueues = self._all_inqueues
         fileno_to_inq = self._fileno_to_inq
         fileno_to_inq = self._fileno_to_inq
@@ -536,7 +529,7 @@ class AsynPool(_pool.Pool):
 
 
         self.on_process_up = on_process_up
         self.on_process_up = on_process_up
 
 
-        def _remove_from_index(obj, proc, index, callback=None):
+        def _remove_from_index(obj, proc, index, remove_fun, callback=None):
             # this remove the file descriptors for a process from
             # this remove the file descriptors for a process from
             # the indices.  we have to make sure we don't overwrite
             # the indices.  we have to make sure we don't overwrite
             # another processes fds, as the fds may be reused.
             # another processes fds, as the fds may be reused.
@@ -552,7 +545,7 @@ class AsynPool(_pool.Pool):
             except KeyError:
             except KeyError:
                 pass
                 pass
             else:
             else:
-                hub_remove(fd)
+                remove_fun(fd)
                 if callback is not None:
                 if callback is not None:
                     callback(fd)
                     callback(fd)
             return fd
             return fd
@@ -562,14 +555,29 @@ class AsynPool(_pool.Pool):
             if proc.dead:
             if proc.dead:
                 return
                 return
             process_flush_queues(proc)
             process_flush_queues(proc)
-            _remove_from_index(proc.outq._reader, proc, fileno_to_outq)
+            _remove_from_index(
+                proc.outq._reader, proc, fileno_to_outq, remove_reader,
+            )
             if proc.synq:
             if proc.synq:
-                _remove_from_index(proc.synq._writer, proc, fileno_to_synq)
-            inq = _remove_from_index(proc.inq._writer, proc, fileno_to_inq,
-                                     callback=all_inqueues.discard)
+                _remove_from_index(
+                    proc.synq._writer, proc, fileno_to_synq, remove_writer,
+                )
+            inq = _remove_from_index(
+                proc.inq._writer, proc, fileno_to_inq, remove_writer,
+                callback=all_inqueues.discard,
+            )
             if inq:
             if inq:
                 busy_workers.discard(inq)
                 busy_workers.discard(inq)
-            hub_remove(proc.sentinel)
+            remove_reader(proc.sentinel)
+            waiting_to_start.discard(proc)
+            self._active_writes.discard(proc.inqW_fd)
+            remove_writer(proc.inqW_fd)
+            remove_reader(proc.outqR_fd)
+            if proc.synqR_fd:
+                remove_reader(proc.synqR_fd)
+            if proc.synqW_fd:
+                self._active_writes.discard(proc.synqW_fd)
+                remove_reader(proc.synqW_fd)
         self.on_process_down = on_process_down
         self.on_process_down = on_process_down
 
 
     def _create_write_handlers(self, hub,
     def _create_write_handlers(self, hub,
@@ -713,7 +721,7 @@ class AsynPool(_pool.Pool):
                         except StopIteration:
                         except StopIteration:
                             pass
                             pass
                         except OSError as exc:
                         except OSError as exc:
-                            if get_errno(exc) != errno.EBADF:
+                            if exc.errno != errno.EBADF:
                                 raise
                                 raise
                         else:
                         else:
                             add_writer(ready_fd, cor)
                             add_writer(ready_fd, cor)
@@ -756,7 +764,7 @@ class AsynPool(_pool.Pool):
                     try:
                     try:
                         Hw += send(header, Hw)
                         Hw += send(header, Hw)
                     except Exception as exc:
                     except Exception as exc:
-                        if get_errno(exc) not in UNAVAIL:
+                        if getattr(exc, 'errno', None) not in UNAVAIL:
                             raise
                             raise
                         # suspend until more data
                         # suspend until more data
                         errors += 1
                         errors += 1
@@ -772,7 +780,7 @@ class AsynPool(_pool.Pool):
                     try:
                     try:
                         Bw += send(body, Bw)
                         Bw += send(body, Bw)
                     except Exception as exc:
                     except Exception as exc:
-                        if get_errno(exc) not in UNAVAIL:
+                        if getattr(exc, 'errno', None) not in UNAVAIL:
                             raise
                             raise
                         # suspend until more data
                         # suspend until more data
                         errors += 1
                         errors += 1
@@ -821,7 +829,7 @@ class AsynPool(_pool.Pool):
                     try:
                     try:
                         Hw += send(header, Hw)
                         Hw += send(header, Hw)
                     except Exception as exc:
                     except Exception as exc:
-                        if get_errno(exc) not in UNAVAIL:
+                        if getattr(exc, 'errno', None) not in UNAVAIL:
                             raise
                             raise
                         yield
                         yield
 
 
@@ -830,7 +838,7 @@ class AsynPool(_pool.Pool):
                     try:
                     try:
                         Bw += send(body, Bw)
                         Bw += send(body, Bw)
                     except Exception as exc:
                     except Exception as exc:
-                        if get_errno(exc) not in UNAVAIL:
+                        if getattr(exc, 'errno', None) not in UNAVAIL:
                             raise
                             raise
                         # suspend until more data
                         # suspend until more data
                         yield
                         yield
@@ -903,7 +911,7 @@ class AsynPool(_pool.Pool):
             self._busy_workers.clear()
             self._busy_workers.clear()
 
 
     def _flush_writer(self, proc, writer):
     def _flush_writer(self, proc, writer):
-        fds = set([proc.inq._writer])
+        fds = {proc.inq._writer}
         try:
         try:
             while fds:
             while fds:
                 if not proc._is_alive():
                 if not proc._is_alive():
@@ -932,9 +940,9 @@ class AsynPool(_pool.Pool):
         """Grow the pool by ``n`` proceses."""
         """Grow the pool by ``n`` proceses."""
         diff = max(self._processes - len(self._queues), 0)
         diff = max(self._processes - len(self._queues), 0)
         if diff:
         if diff:
-            self._queues.update(
-                dict((self.create_process_queues(), None) for _ in range(diff))
-            )
+            self._queues.update({
+                self.create_process_queues(): None for _ in range(diff)
+            })
 
 
     def on_shrink(self, n):
     def on_shrink(self, n):
         """Shrink the pool by ``n`` processes."""
         """Shrink the pool by ``n`` processes."""
@@ -960,14 +968,13 @@ class AsynPool(_pool.Pool):
         return inq, outq, synq
         return inq, outq, synq
 
 
     def on_process_alive(self, pid):
     def on_process_alive(self, pid):
-        """Handler called when the WORKER_UP message is received
+        """Handler called when the :const:`WORKER_UP` message is received
         from a child process, which marks the process as ready
         from a child process, which marks the process as ready
         to receive work."""
         to receive work."""
         try:
         try:
             proc = next(w for w in self._pool if w.pid == pid)
             proc = next(w for w in self._pool if w.pid == pid)
         except StopIteration:
         except StopIteration:
-            # process already exited :(  this will be handled elsewhere.
-            return
+            return logger.warning('process with pid=%s already exited', pid)
         assert proc.inqW_fd not in self._fileno_to_inq
         assert proc.inqW_fd not in self._fileno_to_inq
         assert proc.inqW_fd not in self._all_inqueues
         assert proc.inqW_fd not in self._all_inqueues
         self._waiting_to_start.discard(proc)
         self._waiting_to_start.discard(proc)
@@ -1033,7 +1040,7 @@ class AsynPool(_pool.Pool):
                 try:
                 try:
                     proc.inq.put(None)
                     proc.inq.put(None)
                 except OSError as exc:
                 except OSError as exc:
-                    if get_errno(exc) != errno.EBADF:
+                    if exc.errno != errno.EBADF:
                         raise
                         raise
 
 
     def create_result_handler(self):
     def create_result_handler(self):
@@ -1077,21 +1084,19 @@ class AsynPool(_pool.Pool):
         """
         """
         resq = proc.outq._reader
         resq = proc.outq._reader
         on_state_change = self._result_handler.on_state_change
         on_state_change = self._result_handler.on_state_change
-        fds = set([resq])
+        fds = {resq}
         while fds and not resq.closed and self._state != TERMINATE:
         while fds and not resq.closed and self._state != TERMINATE:
             readable, _, again = _select(fds, None, fds, timeout=0.01)
             readable, _, again = _select(fds, None, fds, timeout=0.01)
             if readable:
             if readable:
                 try:
                 try:
                     task = resq.recv()
                     task = resq.recv()
                 except (OSError, IOError, EOFError) as exc:
                 except (OSError, IOError, EOFError) as exc:
-                    if get_errno(exc) == errno.EINTR:
+                    _errno = getattr(exc, 'errno', None)
+                    if _errno == errno.EINTR:
                         continue
                         continue
-                    elif get_errno(exc) == errno.EAGAIN:
+                    elif _errno == errno.EAGAIN:
                         break
                         break
-                    else:
-                        debug('got %r while flushing process %r',
-                              exc, proc, exc_info=1)
-                    if get_errno(exc) not in UNAVAIL:
+                    elif _errno not in UNAVAIL:
                         debug('got %r while flushing process %r',
                         debug('got %r while flushing process %r',
                               exc, proc, exc_info=1)
                               exc, proc, exc_info=1)
                     break
                     break

+ 4 - 2
celery/concurrency/base.py

@@ -66,11 +66,13 @@ class BasePool(object):
 
 
     _state = None
     _state = None
     _pool = None
     _pool = None
+    _does_debug = True
 
 
     #: only used by multiprocessing pool
     #: only used by multiprocessing pool
     uses_semaphore = False
     uses_semaphore = False
 
 
     task_join_will_block = True
     task_join_will_block = True
+    body_can_be_buffer = False
 
 
     def __init__(self, limit=None, putlocks=True,
     def __init__(self, limit=None, putlocks=True,
                  forking_enable=True, callbacks_propagate=(), **options):
                  forking_enable=True, callbacks_propagate=(), **options):
@@ -79,7 +81,6 @@ class BasePool(object):
         self.options = options
         self.options = options
         self.forking_enable = forking_enable
         self.forking_enable = forking_enable
         self.callbacks_propagate = callbacks_propagate
         self.callbacks_propagate = callbacks_propagate
-        self._does_debug = logger.isEnabledFor(logging.DEBUG)
 
 
     def on_start(self):
     def on_start(self):
         pass
         pass
@@ -111,7 +112,7 @@ class BasePool(object):
     def maintain_pool(self, *args, **kwargs):
     def maintain_pool(self, *args, **kwargs):
         pass
         pass
 
 
-    def terminate_job(self, pid):
+    def terminate_job(self, pid, signal=None):
         raise NotImplementedError(
         raise NotImplementedError(
             '{0} does not implement kill_job'.format(type(self)))
             '{0} does not implement kill_job'.format(type(self)))
 
 
@@ -128,6 +129,7 @@ class BasePool(object):
         self.on_terminate()
         self.on_terminate()
 
 
     def start(self):
     def start(self):
+        self._does_debug = logger.isEnabledFor(logging.DEBUG)
         self.on_start()
         self.on_start()
         self._state = self.RUN
         self._state = self.RUN
 
 

+ 7 - 0
celery/concurrency/eventlet.py

@@ -142,3 +142,10 @@ class TaskPool(base.BasePool):
         self._quick_put(apply_target, target, args, kwargs,
         self._quick_put(apply_target, target, args, kwargs,
                         callback, accept_callback,
                         callback, accept_callback,
                         self.getpid)
                         self.getpid)
+
+    def _get_info(self):
+        return {
+            'max-concurrency': self.limit,
+            'free-threads': self._pool.free(),
+            'running-threads': self._pool.running(),
+        }

+ 7 - 2
celery/concurrency/prefork.py

@@ -57,10 +57,15 @@ def process_initializer(app, hostname):
     # run once per process.
     # run once per process.
     app.loader.init_worker()
     app.loader.init_worker()
     app.loader.init_worker_process()
     app.loader.init_worker_process()
+    logfile = os.environ.get('CELERY_LOG_FILE') or None
+    if logfile and '%i' in logfile.lower():
+        # logfile path will differ so need to set up logging again.
+        app.log.already_setup = False
     app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0),
     app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0),
-                  os.environ.get('CELERY_LOG_FILE') or None,
+                  logfile,
                   bool(os.environ.get('CELERY_LOG_REDIRECT', False)),
                   bool(os.environ.get('CELERY_LOG_REDIRECT', False)),
-                  str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')))
+                  str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')),
+                  hostname=hostname)
     if os.environ.get('FORKED_BY_MULTIPROCESSING'):
     if os.environ.get('FORKED_BY_MULTIPROCESSING'):
         # pool did execv after fork
         # pool did execv after fork
         trace.setup_worker_optimizations(app)
         trace.setup_worker_optimizations(app)

+ 1 - 0
celery/concurrency/solo.py

@@ -17,6 +17,7 @@ __all__ = ['TaskPool']
 
 
 class TaskPool(BasePool):
 class TaskPool(BasePool):
     """Solo task pool (blocking, inline, fast)."""
     """Solo task pool (blocking, inline, fast)."""
+    body_can_be_buffer = True
 
 
     def __init__(self, *args, **kwargs):
     def __init__(self, *args, **kwargs):
         super(TaskPool, self).__init__(*args, **kwargs)
         super(TaskPool, self).__init__(*args, **kwargs)

+ 35 - 32
celery/contrib/abortable.py

@@ -28,49 +28,52 @@ In the consumer:
 
 
 .. code-block:: python
 .. code-block:: python
 
 
-   from celery.contrib.abortable import AbortableTask
-   from celery.utils.log import get_task_logger
-
-   logger = get_logger(__name__)
-
-   class MyLongRunningTask(AbortableTask):
-
-       def run(self, **kwargs):
-           results = []
-           for x in range(100):
-               # Check after every 5 loops..
-               if x % 5 == 0:  # alternatively, check when some timer is due
-                   if self.is_aborted(**kwargs):
-                       # Respect the aborted status and terminate
-                       # gracefully
-                       logger.warning('Task aborted.')
-                       return
-               y = do_something_expensive(x)
-               results.append(y)
-           logger.info('Task finished.')
-           return results
-
+    from __future__ import absolute_import
+
+    from celery.contrib.abortable import AbortableTask
+    from celery.utils.log import get_task_logger
+
+    from proj.celery import app
+
+    logger = get_logger(__name__)
+
+    @app.task(bind=True, base=AbortableTask)
+    def long_running_task(self):
+        results = []
+        for i in range(100):
+            # check after every 5 iterations...
+            # (or alternatively, check when some timer is due)
+            if not i % 5:
+                if self.is_aborted():
+                    # respect aborted state, and terminate gracefully.
+                    logger.warning('Task aborted')
+                    return
+                value = do_something_expensive(i)
+                results.append(y)
+        logger.info('Task complete')
+        return results
 
 
 In the producer:
 In the producer:
 
 
 .. code-block:: python
 .. code-block:: python
 
 
-   from myproject.tasks import MyLongRunningTask
+    from __future__ import absolute_import
 
 
-   def myview(request):
+    import time
 
 
-       async_result = MyLongRunningTask.delay()
-       # async_result is of type AbortableAsyncResult
+    from proj.tasks import MyLongRunningTask
 
 
-       # After 10 seconds, abort the task
-       time.sleep(10)
-       async_result.abort()
+    def myview(request):
+        # result is of type AbortableAsyncResult
+        result = long_running_task.delay()
 
 
-       ...
+        # abort the task after 10 seconds
+        time.sleep(10)
+        result.abort()
 
 
-After the `async_result.abort()` call, the task execution is not
+After the `result.abort()` call, the task execution is not
 aborted immediately. In fact, it is not guaranteed to abort at all. Keep
 aborted immediately. In fact, it is not guaranteed to abort at all. Keep
-checking the `async_result` status, or call `async_result.wait()` to
+checking `result.state` status, or call `result.get(timeout=)` to
 have it block until the task is finished.
 have it block until the task is finished.
 
 
 .. note::
 .. note::

+ 5 - 4
celery/contrib/batches.py

@@ -47,7 +47,7 @@ messages, and every 10 seconds.
 
 
     from celery.contrib.batches import Batches
     from celery.contrib.batches import Batches
 
 
-    wot_api_target = "https://api.mywot.com/0.4/public_link_json"
+    wot_api_target = 'https://api.mywot.com/0.4/public_link_json'
 
 
     @app.task(base=Batches, flush_every=100, flush_interval=10)
     @app.task(base=Batches, flush_every=100, flush_interval=10)
     def wot_api(requests):
     def wot_api(requests):
@@ -64,7 +64,7 @@ messages, and every 10 seconds.
         domains = [urlparse(url).netloc for url in urls]
         domains = [urlparse(url).netloc for url in urls]
         response = requests.get(
         response = requests.get(
             wot_api_target,
             wot_api_target,
-            params={"hosts": ('/').join(set(domains)) + '/'}
+            params={'hosts': ('/').join(set(domains)) + '/'}
         )
         )
         return [response.json[domain] for domain in domains]
         return [response.json[domain] for domain in domains]
 
 
@@ -88,7 +88,7 @@ from itertools import count
 from celery.task import Task
 from celery.task import Task
 from celery.five import Empty, Queue
 from celery.five import Empty, Queue
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
-from celery.worker.job import Request
+from celery.worker.request import Request
 from celery.utils import noop
 from celery.utils import noop
 
 
 __all__ = ['Batches']
 __all__ = ['Batches']
@@ -226,7 +226,8 @@ class Batches(Task):
                 self.flush(requests)
                 self.flush(requests)
         if not requests:
         if not requests:
             logger.debug('Batches: Cancelling timer: Nothing in buffer.')
             logger.debug('Batches: Cancelling timer: Nothing in buffer.')
-            self._tref.cancel()  # cancel timer.
+            if self._tref:
+                self._tref.cancel()  # cancel timer.
             self._tref = None
             self._tref = None
 
 
     def apply_buffer(self, requests, args=(), kwargs={}):
     def apply_buffer(self, requests, args=(), kwargs={}):

+ 3 - 3
celery/contrib/migrate.py

@@ -99,7 +99,7 @@ def migrate_tasks(source, dest, migrate=migrate_task, app=None,
                   queues=None, **kwargs):
                   queues=None, **kwargs):
     app = app_or_default(app)
     app = app_or_default(app)
     queues = prepare_queues(queues)
     queues = prepare_queues(queues)
-    producer = app.amqp.TaskProducer(dest)
+    producer = app.amqp.Producer(dest)
     migrate = partial(migrate, producer, queues=queues)
     migrate = partial(migrate, producer, queues=queues)
 
 
     def on_declare_queue(queue):
     def on_declare_queue(queue):
@@ -186,7 +186,7 @@ def move(predicate, connection=None, exchange=None, routing_key=None,
     app = app_or_default(app)
     app = app_or_default(app)
     queues = [_maybe_queue(app, queue) for queue in source or []] or None
     queues = [_maybe_queue(app, queue) for queue in source or []] or None
     with app.connection_or_acquire(connection, pool=False) as conn:
     with app.connection_or_acquire(connection, pool=False) as conn:
-        producer = app.amqp.TaskProducer(conn)
+        producer = app.amqp.Producer(conn)
         state = State()
         state = State()
 
 
         def on_task(body, message):
         def on_task(body, message):
@@ -250,7 +250,7 @@ def start_filter(app, conn, filter, limit=None, timeout=1.0,
     if isinstance(tasks, string_t):
     if isinstance(tasks, string_t):
         tasks = set(tasks.split(','))
         tasks = set(tasks.split(','))
     if tasks is None:
     if tasks is None:
-        tasks = set([])
+        tasks = set()
 
 
     def update_state(body, message):
     def update_state(body, message):
         state.count += 1
         state.count += 1

+ 1 - 1
celery/contrib/rdb.py

@@ -34,7 +34,7 @@ Inspired by http://snippets.dzone.com/posts/show/7248
     base port.  The selected port will be logged by the worker.
     base port.  The selected port will be logged by the worker.
 
 
 """
 """
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import, print_function, unicode_literals
 
 
 import errno
 import errno
 import os
 import os

+ 73 - 0
celery/contrib/sphinx.py

@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+"""
+celery.contrib.sphinx
+=====================
+
+Sphinx documentation plugin
+
+**Usage**
+
+Add the extension to your :file:`docs/conf.py` configuration module:
+
+.. code-block:: python
+
+    extensions = (...,
+                  'celery.contrib.sphinx')
+
+If you would like to change the prefix for tasks in reference documentation
+then you can change the ``celery_task_prefix`` configuration value:
+
+.. code-block:: python
+
+    celery_task_prefix = '(task)'  # < default
+
+
+With the extension installed `autodoc` will automatically find
+task decorated objects and generate the correct (as well as
+add a ``(task)`` prefix), and you can also refer to the tasks
+using `:task:proj.tasks.add` syntax.
+
+Use ``.. autotask::`` to manually document a task.
+
+"""
+from __future__ import absolute_import
+
+from inspect import formatargspec, getargspec
+
+from sphinx.domains.python import PyModulelevel
+from sphinx.ext.autodoc import FunctionDocumenter
+
+from celery.app.task import BaseTask
+
+
+class TaskDocumenter(FunctionDocumenter):
+    objtype = 'task'
+    member_order = 11
+
+    @classmethod
+    def can_document_member(cls, member, membername, isattr, parent):
+        return isinstance(member, BaseTask) and getattr(member, '__wrapped__')
+
+    def format_args(self):
+        wrapped = getattr(self.object, '__wrapped__')
+        if wrapped is not None:
+            argspec = getargspec(wrapped)
+            fmt = formatargspec(*argspec)
+            fmt = fmt.replace('\\', '\\\\')
+            return fmt
+        return ''
+
+    def document_members(self, all_members=False):
+        pass
+
+
+class TaskDirective(PyModulelevel):
+
+    def get_signature_prefix(self, sig):
+        return self.env.config.celery_task_prefix
+
+
+def setup(app):
+    app.add_autodocumenter(TaskDocumenter)
+    app.domains['py'].directives['task'] = TaskDirective
+    app.add_config_value('celery_task_prefix', '(task)', True)

+ 4 - 4
celery/datastructures.py

@@ -186,9 +186,9 @@ class DependencyGraph(object):
         graph = DependencyGraph()
         graph = DependencyGraph()
         components = self._tarjan72()
         components = self._tarjan72()
 
 
-        NC = dict((node, component)
-                  for component in components
-                  for node in component)
+        NC = {
+            node: component for component in components for node in component
+        }
         for component in components:
         for component in components:
             graph.add_arc(component)
             graph.add_arc(component)
         for node in self:
         for node in self:
@@ -555,7 +555,7 @@ class LimitedSet(object):
     """Kind-of Set with limitations.
     """Kind-of Set with limitations.
 
 
     Good for when you need to test for membership (`a in set`),
     Good for when you need to test for membership (`a in set`),
-    but the list might become to big.
+    but the list might become too big.
 
 
     :keyword maxlen: Maximum number of members before we start
     :keyword maxlen: Maximum number of members before we start
                      evicting expired members.
                      evicting expired members.

+ 5 - 18
celery/events/__init__.py

@@ -13,7 +13,6 @@ from __future__ import absolute_import
 import os
 import os
 import time
 import time
 import threading
 import threading
-import warnings
 
 
 from collections import deque
 from collections import deque
 from contextlib import contextmanager
 from contextlib import contextmanager
@@ -36,14 +35,6 @@ event_exchange = Exchange('celeryev', type='topic')
 
 
 _TZGETTER = itemgetter('utcoffset', 'timestamp')
 _TZGETTER = itemgetter('utcoffset', 'timestamp')
 
 
-W_YAJL = """
-anyjson is currently using the yajl library.
-This json implementation is broken, it severely truncates floats
-so timestamps will not work.
-
-Please uninstall yajl or force anyjson to use a different library.
-"""
-
 CLIENT_CLOCK_SKEW = -1
 CLIENT_CLOCK_SKEW = -1
 
 
 
 
@@ -112,7 +103,7 @@ class EventDispatcher(object):
     You need to :meth:`close` this after use.
     You need to :meth:`close` this after use.
 
 
     """
     """
-    DISABLED_TRANSPORTS = set(['sql'])
+    DISABLED_TRANSPORTS = {'sql'}
 
 
     app = None
     app = None
 
 
@@ -124,7 +115,7 @@ class EventDispatcher(object):
 
 
     def __init__(self, connection=None, hostname=None, enabled=True,
     def __init__(self, connection=None, hostname=None, enabled=True,
                  channel=None, buffer_while_offline=True, app=None,
                  channel=None, buffer_while_offline=True, app=None,
-                 serializer=None, groups=None):
+                 serializer=None, groups=None, delivery_mode=1):
         self.app = app_or_default(app or self.app)
         self.app = app_or_default(app or self.app)
         self.connection = connection
         self.connection = connection
         self.channel = channel
         self.channel = channel
@@ -139,6 +130,7 @@ class EventDispatcher(object):
         self.groups = set(groups or [])
         self.groups = set(groups or [])
         self.tzoffset = [-time.timezone, -time.altzone]
         self.tzoffset = [-time.timezone, -time.altzone]
         self.clock = self.app.clock
         self.clock = self.app.clock
+        self.delivery_mode = delivery_mode
         if not connection and channel:
         if not connection and channel:
             self.connection = channel.connection.client
             self.connection = channel.connection.client
         self.enabled = enabled
         self.enabled = enabled
@@ -150,12 +142,6 @@ class EventDispatcher(object):
             self.enable()
             self.enable()
         self.headers = {'hostname': self.hostname}
         self.headers = {'hostname': self.hostname}
         self.pid = os.getpid()
         self.pid = os.getpid()
-        self.warn_if_yajl()
-
-    def warn_if_yajl(self):
-        import anyjson
-        if anyjson.implementation.name == 'yajl':
-            warnings.warn(UserWarning(W_YAJL))
 
 
     def __enter__(self):
     def __enter__(self):
         return self
         return self
@@ -213,6 +199,7 @@ class EventDispatcher(object):
                 declare=[exchange],
                 declare=[exchange],
                 serializer=self.serializer,
                 serializer=self.serializer,
                 headers=self.headers,
                 headers=self.headers,
+                delivery_mode=self.delivery_mode,
             )
             )
 
 
     def send(self, type, blind=False, **fields):
     def send(self, type, blind=False, **fields):
@@ -300,7 +287,7 @@ class EventReceiver(ConsumerMixin):
         self.adjust_clock = self.clock.adjust
         self.adjust_clock = self.clock.adjust
         self.forward_clock = self.clock.forward
         self.forward_clock = self.clock.forward
         if accept is None:
         if accept is None:
-            accept = set([self.app.conf.CELERY_EVENT_SERIALIZER, 'json'])
+            accept = {self.app.conf.CELERY_EVENT_SERIALIZER, 'json'}
         self.accept = accept
         self.accept = accept
 
 
     def _get_queue_arguments(self):
     def _get_queue_arguments(self):

+ 1 - 1
celery/events/dumper.py

@@ -7,7 +7,7 @@
     as they happen. Think of it like a `tcpdump` for Celery events.
     as they happen. Think of it like a `tcpdump` for Celery events.
 
 
 """
 """
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import, print_function, unicode_literals
 
 
 import sys
 import sys
 
 

+ 1 - 1
celery/events/snapshot.py

@@ -10,7 +10,7 @@
     in :mod:`djcelery.snapshots` in the `django-celery` distribution.
     in :mod:`djcelery.snapshots` in the `django-celery` distribution.
 
 
 """
 """
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
 
 
 from kombu.utils.limits import TokenBucket
 from kombu.utils.limits import TokenBucket
 
 

+ 26 - 20
celery/events/state.py

@@ -30,12 +30,12 @@ from time import time
 from weakref import ref
 from weakref import ref
 
 
 from kombu.clocks import timetuple
 from kombu.clocks import timetuple
-from kombu.utils import cached_property, kwdict
+from kombu.utils import cached_property
 
 
 from celery import states
 from celery import states
 from celery.five import class_property, items, values
 from celery.five import class_property, items, values
 from celery.utils import deprecated
 from celery.utils import deprecated
-from celery.utils.functional import LRUCache
+from celery.utils.functional import LRUCache, memoize
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
 
 
 PYPY = hasattr(sys, 'pypy_version_info')
 PYPY = hasattr(sys, 'pypy_version_info')
@@ -54,8 +54,6 @@ Substantial drift from %s may mean clocks are out of sync.  Current drift is
 %s seconds.  [orig: %s recv: %s]
 %s seconds.  [orig: %s recv: %s]
 """
 """
 
 
-CAN_KWDICT = sys.version_info >= (2, 6, 5)
-
 logger = get_logger(__name__)
 logger = get_logger(__name__)
 warn = logger.warning
 warn = logger.warning
 
 
@@ -66,6 +64,14 @@ R_TASK = '<Task: {0.name}({0.uuid}) {0.state} clock:{0.clock}>'
 __all__ = ['Worker', 'Task', 'State', 'heartbeat_expires']
 __all__ = ['Worker', 'Task', 'State', 'heartbeat_expires']
 
 
 
 
+@memoize(maxsize=1000, keyfun=lambda a, _: a[0])
+def _warn_drift(hostname, drift, local_received, timestamp):
+    # we use memoize here so the warning is only logged once per hostname
+    warn(DRIFT_WARNING, hostname, drift,
+         datetime.fromtimestamp(local_received),
+         datetime.fromtimestamp(timestamp))
+
+
 def heartbeat_expires(timestamp, freq=60,
 def heartbeat_expires(timestamp, freq=60,
                       expire_window=HEARTBEAT_EXPIRE_WINDOW,
                       expire_window=HEARTBEAT_EXPIRE_WINDOW,
                       Decimal=Decimal, float=float, isinstance=isinstance):
                       Decimal=Decimal, float=float, isinstance=isinstance):
@@ -78,7 +84,7 @@ def heartbeat_expires(timestamp, freq=60,
 
 
 
 
 def _depickle_task(cls, fields):
 def _depickle_task(cls, fields):
-    return cls(**(fields if CAN_KWDICT else kwdict(fields)))
+    return cls(**fields)
 
 
 
 
 def with_unique_field(attr):
 def with_unique_field(attr):
@@ -158,9 +164,8 @@ class Worker(object):
                     return
                     return
                 drift = abs(int(local_received) - int(timestamp))
                 drift = abs(int(local_received) - int(timestamp))
                 if drift > HEARTBEAT_DRIFT_MAX:
                 if drift > HEARTBEAT_DRIFT_MAX:
-                    warn(DRIFT_WARNING, self.hostname, drift,
-                         datetime.fromtimestamp(local_received),
-                         datetime.fromtimestamp(timestamp))
+                    _warn_drift(self.hostname, drift,
+                                local_received, timestamp)
                 if local_received:
                 if local_received:
                     hearts = len(heartbeats)
                     hearts = len(heartbeats)
                     if hearts > hbmax - 1:
                     if hearts > hbmax - 1:
@@ -215,7 +220,7 @@ class Worker(object):
     def _defaults(cls):
     def _defaults(cls):
         """Deprecated, to be removed in 3.3"""
         """Deprecated, to be removed in 3.3"""
         source = cls()
         source = cls()
-        return dict((k, getattr(source, k)) for k in cls._fields)
+        return {k: getattr(source, k) for k in cls._fields}
 
 
 
 
 @with_unique_field('uuid')
 @with_unique_field('uuid')
@@ -288,9 +293,9 @@ class Task(object):
             # this state logically happens-before the current state, so merge.
             # this state logically happens-before the current state, so merge.
             keep = self.merge_rules.get(state)
             keep = self.merge_rules.get(state)
             if keep is not None:
             if keep is not None:
-                fields = dict(
-                    (k, v) for k, v in items(fields) if k in keep
-                )
+                fields = {
+                    k: v for k, v in items(fields) if k in keep
+                }
             for key, value in items(fields):
             for key, value in items(fields):
                 setattr(self, key, value)
                 setattr(self, key, value)
         else:
         else:
@@ -316,9 +321,9 @@ class Task(object):
 
 
     def as_dict(self):
     def as_dict(self):
         get = object.__getattribute__
         get = object.__getattribute__
-        return dict(
-            (k, get(self, k)) for k in self._fields
-        )
+        return {
+            k: get(self, k) for k in self._fields
+        }
 
 
     def __reduce__(self):
     def __reduce__(self):
         return _depickle_task, (self.__class__, self.as_dict())
         return _depickle_task, (self.__class__, self.as_dict())
@@ -372,7 +377,7 @@ class Task(object):
     def merge(self, state, timestamp, fields):
     def merge(self, state, timestamp, fields):
         keep = self.merge_rules.get(state)
         keep = self.merge_rules.get(state)
         if keep is not None:
         if keep is not None:
-            fields = dict((k, v) for k, v in items(fields) if k in keep)
+            fields = {k: v for k, v in items(fields) if k in keep}
         for key, value in items(fields):
         for key, value in items(fields):
             setattr(self, key, value)
             setattr(self, key, value)
 
 
@@ -380,7 +385,7 @@ class Task(object):
     def _defaults(cls):
     def _defaults(cls):
         """Deprecated, to be removed in 3.3."""
         """Deprecated, to be removed in 3.3."""
         source = cls()
         source = cls()
-        return dict((k, getattr(source, k)) for k in source._fields)
+        return {k: getattr(source, k) for k in source._fields}
 
 
 
 
 class State(object):
 class State(object):
@@ -429,9 +434,10 @@ class State(object):
 
 
     def _clear_tasks(self, ready=True):
     def _clear_tasks(self, ready=True):
         if ready:
         if ready:
-            in_progress = dict(
-                (uuid, task) for uuid, task in self.itertasks()
-                if task.state not in states.READY_STATES)
+            in_progress = {
+                uuid: task for uuid, task in self.itertasks()
+                if task.state not in states.READY_STATES
+            }
             self.tasks.clear()
             self.tasks.clear()
             self.tasks.update(in_progress)
             self.tasks.update(in_progress)
         else:
         else:

+ 5 - 2
celery/exceptions.py

@@ -8,6 +8,8 @@
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
+import numbers
+
 from .five import string_t
 from .five import string_t
 
 
 from billiard.exceptions import (  # noqa
 from billiard.exceptions import (  # noqa
@@ -98,7 +100,8 @@ class Retry(Exception):
     #: Exception (if any) that caused the retry to happen.
     #: Exception (if any) that caused the retry to happen.
     exc = None
     exc = None
 
 
-    #: Time of retry (ETA), either int or :class:`~datetime.datetime`.
+    #: Time of retry (ETA), either :class:`numbers.Real` or
+    #: :class:`~datetime.datetime`.
     when = None
     when = None
 
 
     def __init__(self, message=None, exc=None, when=None, **kwargs):
     def __init__(self, message=None, exc=None, when=None, **kwargs):
@@ -112,7 +115,7 @@ class Retry(Exception):
         Exception.__init__(self, exc, when, **kwargs)
         Exception.__init__(self, exc, when, **kwargs)
 
 
     def humanize(self):
     def humanize(self):
-        if isinstance(self.when, int):
+        if isinstance(self.when, numbers.Real):
             return 'in {0.when}s'.format(self)
             return 'in {0.when}s'.format(self)
         return 'at {0.when}'.format(self)
         return 'at {0.when}'.format(self)
 
 

+ 20 - 188
celery/five.py

@@ -10,164 +10,15 @@
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
-__all__ = ['Counter', 'reload', 'UserList', 'UserDict', 'Queue', 'Empty',
-           'zip_longest', 'map', 'string', 'string_t',
-           'long_t', 'text_t', 'range', 'int_types', 'items', 'keys', 'values',
-           'nextfun', 'reraise', 'WhateverIO', 'with_metaclass',
-           'OrderedDict', 'THREAD_TIMEOUT_MAX', 'format_d',
-           'class_property', 'reclassmethod', 'create_module',
-           'recreate_module', 'monotonic']
+__all__ = [
+    'class_property', 'reclassmethod', 'create_module', 'recreate_module',
+]
 
 
-import io
-
-try:
-    from collections import Counter
-except ImportError:  # pragma: no cover
-    from collections import defaultdict
-
-    def Counter():  # noqa
-        return defaultdict(int)
-
-############## py3k #########################################################
-import sys
-PY3 = sys.version_info[0] == 3
-
-try:
-    reload = reload                         # noqa
-except NameError:                           # pragma: no cover
-    from imp import reload                  # noqa
-
-try:
-    from UserList import UserList           # noqa
-except ImportError:                         # pragma: no cover
-    from collections import UserList        # noqa
-
-try:
-    from UserDict import UserDict           # noqa
-except ImportError:                         # pragma: no cover
-    from collections import UserDict        # noqa
-
-
-from kombu.five import monotonic
-
-if PY3:  # pragma: no cover
-    import builtins
-
-    from queue import Queue, Empty
-    from itertools import zip_longest
-
-    map = map
-    string = str
-    string_t = str
-    long_t = int
-    text_t = str
-    range = range
-    int_types = (int, )
-    _byte_t = bytes
-
-    open_fqdn = 'builtins.open'
-
-    def items(d):
-        return d.items()
-
-    def keys(d):
-        return d.keys()
-
-    def values(d):
-        return d.values()
-
-    def nextfun(it):
-        return it.__next__
-
-    exec_ = getattr(builtins, 'exec')
-
-    def reraise(tp, value, tb=None):
-        if value.__traceback__ is not tb:
-            raise value.with_traceback(tb)
-        raise value
-
-else:
-    import __builtin__ as builtins  # noqa
-    from Queue import Queue, Empty  # noqa
-    from itertools import imap as map, izip_longest as zip_longest  # noqa
-    string = unicode                # noqa
-    string_t = basestring           # noqa
-    text_t = unicode                # noqa
-    long_t = long                   # noqa
-    range = xrange                  # noqa
-    int_types = (int, long)         # noqa
-    _byte_t = (str, bytes)          # noqa
-
-    open_fqdn = '__builtin__.open'
-
-    def items(d):                   # noqa
-        return d.iteritems()
-
-    def keys(d):                    # noqa
-        return d.iterkeys()
-
-    def values(d):                  # noqa
-        return d.itervalues()
-
-    def nextfun(it):                # noqa
-        return it.next
-
-    def exec_(code, globs=None, locs=None):  # pragma: no cover
-        """Execute code in a namespace."""
-        if globs is None:
-            frame = sys._getframe(1)
-            globs = frame.f_globals
-            if locs is None:
-                locs = frame.f_locals
-            del frame
-        elif locs is None:
-            locs = globs
-        exec("""exec code in globs, locs""")
-
-    exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""")
-
-
-def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])):
-    """Class decorator to set metaclass.
-
-    Works with both Python 2 and Python 3 and it does not add
-    an extra class in the lookup order like ``six.with_metaclass`` does
-    (that is -- it copies the original class instead of using inheritance).
-
-    """
-
-    def _clone_with_metaclass(Class):
-        attrs = dict((key, value) for key, value in items(vars(Class))
-                     if key not in skip_attrs)
-        return Type(Class.__name__, Class.__bases__, attrs)
-
-    return _clone_with_metaclass
-
-
-############## collections.OrderedDict ######################################
-# was moved to kombu
-from kombu.utils.compat import OrderedDict  # noqa
-
-############## threading.TIMEOUT_MAX #######################################
-try:
-    from threading import TIMEOUT_MAX as THREAD_TIMEOUT_MAX
-except ImportError:
-    THREAD_TIMEOUT_MAX = 1e10  # noqa
-
-############## format(int, ',d') ##########################
-
-if sys.version_info >= (2, 7):  # pragma: no cover
-    def format_d(i):
-        return format(i, ',d')
-else:  # pragma: no cover
-    def format_d(i):  # noqa
-        s = '%d' % i
-        groups = []
-        while s and s[-1].isdigit():
-            groups.append(s[-3:])
-            s = s[:-3]
-        return s + ','.join(reversed(groups))
+# extends amqp.five
+from amqp.five import *  # noqa
+from amqp.five import __all__ as _all_five
 
 
+__all__ += _all_five
 
 
 ############## Module Generation ##########################
 ############## Module Generation ##########################
 
 
@@ -191,7 +42,7 @@ MODULE_DEPRECATED = """
 The module %s is deprecated and will be removed in a future version.
 The module %s is deprecated and will be removed in a future version.
 """
 """
 
 
-DEFAULT_ATTRS = set(['__file__', '__path__', '__doc__', '__all__'])
+DEFAULT_ATTRS = {'__file__', '__path__', '__doc__', '__all__'}
 
 
 # im_func is no longer available in Py3.
 # im_func is no longer available in Py3.
 # instead the unbound method itself can be used.
 # instead the unbound method itself can be used.
@@ -210,25 +61,17 @@ def getappattr(path):
     return current_app._rgetattr(path)
     return current_app._rgetattr(path)
 
 
 
 
-def _compat_task_decorator(*args, **kwargs):
-    from celery import current_app
-    kwargs.setdefault('accept_magic_kwargs', True)
-    return current_app.task(*args, **kwargs)
-
-
 def _compat_periodic_task_decorator(*args, **kwargs):
 def _compat_periodic_task_decorator(*args, **kwargs):
     from celery.task import periodic_task
     from celery.task import periodic_task
-    kwargs.setdefault('accept_magic_kwargs', True)
     return periodic_task(*args, **kwargs)
     return periodic_task(*args, **kwargs)
 
 
-
 COMPAT_MODULES = {
 COMPAT_MODULES = {
     'celery': {
     'celery': {
         'execute': {
         'execute': {
             'send_task': 'send_task',
             'send_task': 'send_task',
         },
         },
         'decorators': {
         'decorators': {
-            'task': _compat_task_decorator,
+            'task': 'task',
             'periodic_task': _compat_periodic_task_decorator,
             'periodic_task': _compat_periodic_task_decorator,
         },
         },
         'log': {
         'log': {
@@ -238,7 +81,6 @@ COMPAT_MODULES = {
             'redirect_stdouts_to_logger': 'log.redirect_stdouts_to_logger',
             'redirect_stdouts_to_logger': 'log.redirect_stdouts_to_logger',
         },
         },
         'messaging': {
         'messaging': {
-            'TaskPublisher': 'amqp.TaskPublisher',
             'TaskConsumer': 'amqp.TaskConsumer',
             'TaskConsumer': 'amqp.TaskConsumer',
             'establish_connection': 'connection',
             'establish_connection': 'connection',
             'get_consumer_set': 'amqp.TaskConsumer',
             'get_consumer_set': 'amqp.TaskConsumer',
@@ -296,7 +138,7 @@ def reclassmethod(method):
     return classmethod(fun_of_method(method))
     return classmethod(fun_of_method(method))
 
 
 
 
-class MagicModule(ModuleType):
+class LazyModule(ModuleType):
     _compat_modules = ()
     _compat_modules = ()
     _all_by_module = {}
     _all_by_module = {}
     _direct = {}
     _direct = {}
@@ -322,21 +164,23 @@ class MagicModule(ModuleType):
 
 
 
 
 def create_module(name, attrs, cls_attrs=None, pkg=None,
 def create_module(name, attrs, cls_attrs=None, pkg=None,
-                  base=MagicModule, prepare_attr=None):
+                  base=LazyModule, prepare_attr=None):
     fqdn = '.'.join([pkg.__name__, name]) if pkg else name
     fqdn = '.'.join([pkg.__name__, name]) if pkg else name
     cls_attrs = {} if cls_attrs is None else cls_attrs
     cls_attrs = {} if cls_attrs is None else cls_attrs
     pkg, _, modname = name.rpartition('.')
     pkg, _, modname = name.rpartition('.')
     cls_attrs['__module__'] = pkg
     cls_attrs['__module__'] = pkg
 
 
-    attrs = dict((attr_name, prepare_attr(attr) if prepare_attr else attr)
-                 for attr_name, attr in items(attrs))
+    attrs = {
+        attr_name: (prepare_attr(attr) if prepare_attr else attr)
+        for attr_name, attr in items(attrs)
+    }
     module = sys.modules[fqdn] = type(modname, (base, ), cls_attrs)(fqdn)
     module = sys.modules[fqdn] = type(modname, (base, ), cls_attrs)(fqdn)
     module.__dict__.update(attrs)
     module.__dict__.update(attrs)
     return module
     return module
 
 
 
 
 def recreate_module(name, compat_modules=(), by_module={}, direct={},
 def recreate_module(name, compat_modules=(), by_module={}, direct={},
-                    base=MagicModule, **attrs):
+                    base=LazyModule, **attrs):
     old_module = sys.modules[name]
     old_module = sys.modules[name]
     origins = get_origins(by_module)
     origins = get_origins(by_module)
     compat_modules = COMPAT_MODULES.get(name, ())
     compat_modules = COMPAT_MODULES.get(name, ())
@@ -351,8 +195,9 @@ def recreate_module(name, compat_modules=(), by_module={}, direct={},
         ))),
         ))),
     )
     )
     new_module = create_module(name, attrs, cls_attrs=cattrs, base=base)
     new_module = create_module(name, attrs, cls_attrs=cattrs, base=base)
-    new_module.__dict__.update(dict((mod, get_compat_module(new_module, mod))
-                               for mod in compat_modules))
+    new_module.__dict__.update({
+        mod: get_compat_module(new_module, mod) for mod in compat_modules
+    })
     return old_module, new_module
     return old_module, new_module
 
 
 
 
@@ -376,18 +221,5 @@ def get_compat_module(pkg, name):
 def get_origins(defs):
 def get_origins(defs):
     origins = {}
     origins = {}
     for module, attrs in items(defs):
     for module, attrs in items(defs):
-        origins.update(dict((attr, module) for attr in attrs))
+        origins.update({attr: module for attr in attrs})
     return origins
     return origins
-
-
-_SIO_write = io.StringIO.write
-_SIO_init = io.StringIO.__init__
-
-
-class WhateverIO(io.StringIO):
-
-    def __init__(self, v=None, *a, **kw):
-        _SIO_init(self, v.decode() if isinstance(v, _byte_t) else v, *a, **kw)
-
-    def write(self, data):
-        _SIO_write(self, data.decode() if isinstance(data, _byte_t) else data)

+ 15 - 6
celery/fixups/django.py

@@ -134,13 +134,22 @@ class DjangoWorkerFixup(object):
         )
         )
 
 
     def validate_models(self):
     def validate_models(self):
-        from django.core.management.validation import get_validation_errors
         s = io.StringIO()
         s = io.StringIO()
-        num_errors = get_validation_errors(s, None)
-        if num_errors:
-            raise RuntimeError(
-                'One or more Django models did not validate:\n{0}'.format(
-                    s.getvalue()))
+        try:
+            from django.core.management.validation import get_validation_errors
+        except ImportError:
+            import django
+            from django.core.management.base import BaseCommand
+            django.setup()
+            cmd = BaseCommand()
+            cmd.stdout, cmd.stderr = sys.stdout, sys.stderr
+            cmd.check()
+        else:
+            num_errors = get_validation_errors(s, None)
+            if num_errors:
+                raise RuntimeError(
+                    'One or more Django models did not validate:\n{0}'.format(
+                        s.getvalue()))
 
 
     def install(self):
     def install(self):
         signals.beat_embedded_init.connect(self.close_database)
         signals.beat_embedded_init.connect(self.close_database)

+ 2 - 2
celery/loaders/base.py

@@ -8,7 +8,6 @@
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
-import anyjson
 import imp as _imp
 import imp as _imp
 import importlib
 import importlib
 import os
 import os
@@ -17,6 +16,7 @@ import sys
 
 
 from datetime import datetime
 from datetime import datetime
 
 
+from kombu.utils import json
 from kombu.utils import cached_property
 from kombu.utils import cached_property
 from kombu.utils.encoding import safe_str
 from kombu.utils.encoding import safe_str
 
 
@@ -178,7 +178,7 @@ class BaseLoader(object):
     def cmdline_config_parser(
     def cmdline_config_parser(
             self, args, namespace='celery',
             self, args, namespace='celery',
             re_type=re.compile(r'\((\w+)\)'),
             re_type=re.compile(r'\((\w+)\)'),
-            extra_types={'json': anyjson.loads},
+            extra_types={'json': json.loads},
             override_types={'tuple': 'json',
             override_types={'tuple': 'json',
                             'list': 'json',
                             'list': 'json',
                             'dict': 'json'}):
                             'dict': 'json'}):

+ 33 - 2
celery/local.py

@@ -212,12 +212,27 @@ class PromiseProxy(Proxy):
 
 
     """
     """
 
 
+    __slots__ = ('__pending__', )
+
     def _get_current_object(self):
     def _get_current_object(self):
         try:
         try:
             return object.__getattribute__(self, '__thing')
             return object.__getattribute__(self, '__thing')
         except AttributeError:
         except AttributeError:
             return self.__evaluate__()
             return self.__evaluate__()
 
 
+    def __then__(self, fun, *args, **kwargs):
+        if self.__evaluated__():
+            return fun(*args, **kwargs)
+        from collections import deque
+        try:
+            pending = object.__getattribute__(self, '__pending__')
+        except AttributeError:
+            pending = None
+        if pending is None:
+            pending = deque()
+            object.__setattr__(self, '__pending__', pending)
+        pending.append((fun, args, kwargs))
+
     def __evaluated__(self):
     def __evaluated__(self):
         try:
         try:
             object.__getattribute__(self, '__thing')
             object.__getattribute__(self, '__thing')
@@ -234,15 +249,31 @@ class PromiseProxy(Proxy):
                              '_Proxy__kwargs')):
                              '_Proxy__kwargs')):
         try:
         try:
             thing = Proxy._get_current_object(self)
             thing = Proxy._get_current_object(self)
+        except:
+            raise
+        else:
             object.__setattr__(self, '__thing', thing)
             object.__setattr__(self, '__thing', thing)
-            return thing
-        finally:
             for attr in _clean:
             for attr in _clean:
                 try:
                 try:
                     object.__delattr__(self, attr)
                     object.__delattr__(self, attr)
                 except AttributeError:  # pragma: no cover
                 except AttributeError:  # pragma: no cover
                     # May mask errors so ignore
                     # May mask errors so ignore
                     pass
                     pass
+            try:
+                pending = object.__getattribute__(self, '__pending__')
+            except AttributeError:
+                pass
+            else:
+                try:
+                    while pending:
+                        fun, args, kwargs = pending.popleft()
+                        fun(*args, **kwargs)
+                finally:
+                    try:
+                        object.__delattr__(self, '__pending__')
+                    except AttributeError:
+                        pass
+            return thing
 
 
 
 
 def maybe_evaluate(obj):
 def maybe_evaluate(obj):

+ 20 - 11
celery/platforms.py

@@ -12,6 +12,7 @@ from __future__ import absolute_import, print_function
 import atexit
 import atexit
 import errno
 import errno
 import math
 import math
+import numbers
 import os
 import os
 import platform as _platform
 import platform as _platform
 import signal as _signal
 import signal as _signal
@@ -23,7 +24,6 @@ from collections import namedtuple
 from billiard import current_process
 from billiard import current_process
 # fileno used to be in this module
 # fileno used to be in this module
 from kombu.utils import maybe_fileno
 from kombu.utils import maybe_fileno
-from kombu.utils.compat import get_errno
 from kombu.utils.encoding import safe_str
 from kombu.utils.encoding import safe_str
 from contextlib import contextmanager
 from contextlib import contextmanager
 
 
@@ -35,6 +35,7 @@ _setproctitle = try_import('setproctitle')
 resource = try_import('resource')
 resource = try_import('resource')
 pwd = try_import('pwd')
 pwd = try_import('pwd')
 grp = try_import('grp')
 grp = try_import('grp')
+mputil = try_import('multiprocessing.util')
 
 
 __all__ = ['EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM',
 __all__ = ['EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM',
            'IS_OSX', 'IS_WINDOWS', 'pyimplementation', 'LockFailed',
            'IS_OSX', 'IS_WINDOWS', 'pyimplementation', 'LockFailed',
@@ -49,6 +50,7 @@ EX_OK = getattr(os, 'EX_OK', 0)
 EX_FAILURE = 1
 EX_FAILURE = 1
 EX_UNAVAILABLE = getattr(os, 'EX_UNAVAILABLE', 69)
 EX_UNAVAILABLE = getattr(os, 'EX_UNAVAILABLE', 69)
 EX_USAGE = getattr(os, 'EX_USAGE', 64)
 EX_USAGE = getattr(os, 'EX_USAGE', 64)
+EX_CANTCREAT = getattr(os, 'EX_CANTCREAT', 73)
 
 
 SYSTEM = _platform.system()
 SYSTEM = _platform.system()
 IS_OSX = SYSTEM == 'Darwin'
 IS_OSX = SYSTEM == 'Darwin'
@@ -258,7 +260,8 @@ def create_pidlock(pidfile):
 def _create_pidlock(pidfile):
 def _create_pidlock(pidfile):
     pidlock = Pidfile(pidfile)
     pidlock = Pidfile(pidfile)
     if pidlock.is_locked() and not pidlock.remove_if_stale():
     if pidlock.is_locked() and not pidlock.remove_if_stale():
-        raise SystemExit(PIDLOCKED.format(pidfile, pidlock.read_pid()))
+        print(PIDLOCKED.format(pidfile, pidlock.read_pid()), file=sys.stderr)
+        raise SystemExit(EX_CANTCREAT)
     pidlock.acquire()
     pidlock.acquire()
     return pidlock
     return pidlock
 
 
@@ -266,9 +269,10 @@ def _create_pidlock(pidfile):
 if hasattr(os, 'closerange'):
 if hasattr(os, 'closerange'):
 
 
     def close_open_fds(keep=None):
     def close_open_fds(keep=None):
-        keep = list(uniq(sorted(filter(None, (
-            maybe_fileno(f) for f in keep or []
-        )))))
+        # must make sure this is 0-inclusive (Issue #1882)
+        keep = list(uniq(sorted(
+            f for f in map(maybe_fileno, keep or []) if f is not None
+        )))
         maxfd = get_fdmax(default=2048)
         maxfd = get_fdmax(default=2048)
         kL, kH = iter([-1] + keep), iter(keep + [maxfd])
         kL, kH = iter([-1] + keep), iter(keep + [maxfd])
         for low, high in zip_longest(kL, kH):
         for low, high in zip_longest(kL, kH):
@@ -290,11 +294,13 @@ class DaemonContext(object):
     _is_open = False
     _is_open = False
 
 
     def __init__(self, pidfile=None, workdir=None, umask=None,
     def __init__(self, pidfile=None, workdir=None, umask=None,
-                 fake=False, after_chdir=None, **kwargs):
+                 fake=False, after_chdir=None, after_forkers=True,
+                 **kwargs):
         self.workdir = workdir or DAEMON_WORKDIR
         self.workdir = workdir or DAEMON_WORKDIR
         self.umask = DAEMON_UMASK if umask is None else umask
         self.umask = DAEMON_UMASK if umask is None else umask
         self.fake = fake
         self.fake = fake
         self.after_chdir = after_chdir
         self.after_chdir = after_chdir
+        self.after_forkers = after_forkers
         self.stdfds = (sys.stdin, sys.stdout, sys.stderr)
         self.stdfds = (sys.stdin, sys.stdout, sys.stderr)
 
 
     def redirect_to_null(self, fd):
     def redirect_to_null(self, fd):
@@ -313,9 +319,12 @@ class DaemonContext(object):
             if self.after_chdir:
             if self.after_chdir:
                 self.after_chdir()
                 self.after_chdir()
 
 
-            close_open_fds(self.stdfds)
-            for fd in self.stdfds:
-                self.redirect_to_null(maybe_fileno(fd))
+            if not self.fake:
+                close_open_fds(self.stdfds)
+                for fd in self.stdfds:
+                    self.redirect_to_null(maybe_fileno(fd))
+                if self.after_forkers and mputil is not None:
+                    mputil._run_after_forkers()
 
 
             self._is_open = True
             self._is_open = True
     __enter__ = open
     __enter__ = open
@@ -521,7 +530,7 @@ def maybe_drop_privileges(uid=None, gid=None):
         try:
         try:
             setuid(0)
             setuid(0)
         except OSError as exc:
         except OSError as exc:
-            if get_errno(exc) != errno.EPERM:
+            if exc.errno != errno.EPERM:
                 raise
                 raise
             pass  # Good: cannot restore privileges.
             pass  # Good: cannot restore privileges.
         else:
         else:
@@ -606,7 +615,7 @@ class Signals(object):
 
 
     def signum(self, signal_name):
     def signum(self, signal_name):
         """Get signal number from signal name."""
         """Get signal number from signal name."""
-        if isinstance(signal_name, int):
+        if isinstance(signal_name, numbers.Integral):
             return signal_name
             return signal_name
         if not isinstance(signal_name, string_t) \
         if not isinstance(signal_name, string_t) \
                 or not signal_name.isupper():
                 or not signal_name.isupper():

+ 101 - 29
celery/result.py

@@ -11,12 +11,11 @@ from __future__ import absolute_import
 import time
 import time
 import warnings
 import warnings
 
 
-from collections import deque
+from collections import OrderedDict, deque
 from contextlib import contextmanager
 from contextlib import contextmanager
 from copy import copy
 from copy import copy
 
 
 from kombu.utils import cached_property
 from kombu.utils import cached_property
-from kombu.utils.compat import OrderedDict
 
 
 from . import current_app
 from . import current_app
 from . import states
 from . import states
@@ -87,6 +86,7 @@ class AsyncResult(ResultBase):
         self.backend = backend or self.app.backend
         self.backend = backend or self.app.backend
         self.task_name = task_name
         self.task_name = task_name
         self.parent = parent
         self.parent = parent
+        self._cache = None
 
 
     def as_tuple(self):
     def as_tuple(self):
         parent = self.parent
         parent = self.parent
@@ -95,6 +95,7 @@ class AsyncResult(ResultBase):
 
 
     def forget(self):
     def forget(self):
         """Forget about (and possibly remove the result of) this task."""
         """Forget about (and possibly remove the result of) this task."""
+        self._cache = None
         self.backend.forget(self.id)
         self.backend.forget(self.id)
 
 
     def revoke(self, connection=None, terminate=False, signal=None,
     def revoke(self, connection=None, terminate=False, signal=None,
@@ -118,7 +119,8 @@ class AsyncResult(ResultBase):
                                 terminate=terminate, signal=signal,
                                 terminate=terminate, signal=signal,
                                 reply=wait, timeout=timeout)
                                 reply=wait, timeout=timeout)
 
 
-    def get(self, timeout=None, propagate=True, interval=0.5):
+    def get(self, timeout=None, propagate=True, interval=0.5, no_ack=True,
+            follow_parents=True):
         """Wait until task is ready, and return its result.
         """Wait until task is ready, and return its result.
 
 
         .. warning::
         .. warning::
@@ -133,6 +135,10 @@ class AsyncResult(ResultBase):
            retrieve the result.  Note that this does not have any effect
            retrieve the result.  Note that this does not have any effect
            when using the amqp result store backend, as it does not
            when using the amqp result store backend, as it does not
            use polling.
            use polling.
+        :keyword no_ack: Enable amqp no ack (automatically acknowledge
+            message).  If this is :const:`False` then the message will
+            **not be acked**.
+        :keyword follow_parents: Reraise any exception raised by parent task.
 
 
         :raises celery.exceptions.TimeoutError: if `timeout` is not
         :raises celery.exceptions.TimeoutError: if `timeout` is not
             :const:`None` and the result does not arrive within `timeout`
             :const:`None` and the result does not arrive within `timeout`
@@ -143,15 +149,32 @@ class AsyncResult(ResultBase):
 
 
         """
         """
         assert_will_not_block()
         assert_will_not_block()
-        if propagate and self.parent:
-            for node in reversed(list(self._parents())):
-                node.get(propagate=True, timeout=timeout, interval=interval)
+        on_interval = None
+        if follow_parents and propagate and self.parent:
+            on_interval = self._maybe_reraise_parent_error
+            on_interval()
 
 
-        return self.backend.wait_for(self.id, timeout=timeout,
-                                     propagate=propagate,
-                                     interval=interval)
+        if self._cache:
+            if propagate:
+                self.maybe_reraise()
+            return self.result
+
+        try:
+            return self.backend.wait_for(
+                self.id, timeout=timeout,
+                propagate=propagate,
+                interval=interval,
+                on_interval=on_interval,
+                no_ack=no_ack,
+            )
+        finally:
+            self._get_task_meta()  # update self._cache
     wait = get  # deprecated alias to :meth:`get`.
     wait = get  # deprecated alias to :meth:`get`.
 
 
+    def _maybe_reraise_parent_error(self):
+        for node in reversed(list(self._parents())):
+            node.maybe_reraise()
+
     def _parents(self):
     def _parents(self):
         node = self.parent
         node = self.parent
         while node:
         while node:
@@ -238,6 +261,10 @@ class AsyncResult(ResultBase):
         """Returns :const:`True` if the task failed."""
         """Returns :const:`True` if the task failed."""
         return self.state == states.FAILURE
         return self.state == states.FAILURE
 
 
+    def maybe_reraise(self):
+        if self.state in states.PROPAGATE_STATES:
+            raise self.result
+
     def build_graph(self, intermediate=False, formatter=None):
     def build_graph(self, intermediate=False, formatter=None):
         graph = DependencyGraph(
         graph = DependencyGraph(
             formatter=formatter or GraphFormatter(root=self.id, shape='oval'),
             formatter=formatter or GraphFormatter(root=self.id, shape='oval'),
@@ -280,6 +307,9 @@ class AsyncResult(ResultBase):
     def __reduce_args__(self):
     def __reduce_args__(self):
         return self.id, self.backend, self.task_name, None, self.parent
         return self.id, self.backend, self.task_name, None, self.parent
 
 
+    def __del__(self):
+        self._cache = None
+
     @cached_property
     @cached_property
     def graph(self):
     def graph(self):
         return self.build_graph()
         return self.build_graph()
@@ -290,22 +320,41 @@ class AsyncResult(ResultBase):
 
 
     @property
     @property
     def children(self):
     def children(self):
-        children = self.backend.get_children(self.id)
+        return self._get_task_meta().get('children')
+
+    def _get_task_meta(self):
+        if self._cache is None:
+            meta = self.backend.get_task_meta(self.id)
+            if meta:
+                state = meta['status']
+                if state == states.SUCCESS or state in states.PROPAGATE_STATES:
+                    return self._set_cache(meta)
+            return meta
+        return self._cache
+
+    def _set_cache(self, d):
+        state, children = d['status'], d.get('children')
+        if state in states.EXCEPTION_STATES:
+            d['result'] = self.backend.exception_to_python(d['result'])
         if children:
         if children:
-            return [result_from_tuple(child, self.app) for child in children]
+            d['children'] = [
+                result_from_tuple(child, self.app) for child in children
+            ]
+        self._cache = d
+        return d
 
 
     @property
     @property
     def result(self):
     def result(self):
         """When the task has been executed, this contains the return value.
         """When the task has been executed, this contains the return value.
         If the task raised an exception, this will be the exception
         If the task raised an exception, this will be the exception
         instance."""
         instance."""
-        return self.backend.get_result(self.id)
+        return self._get_task_meta()['result']
     info = result
     info = result
 
 
     @property
     @property
     def traceback(self):
     def traceback(self):
         """Get the traceback of a failed task."""
         """Get the traceback of a failed task."""
-        return self.backend.get_traceback(self.id)
+        return self._get_task_meta().get('traceback')
 
 
     @property
     @property
     def state(self):
     def state(self):
@@ -337,7 +386,7 @@ class AsyncResult(ResultBase):
                 then contains the tasks return value.
                 then contains the tasks return value.
 
 
         """
         """
-        return self.backend.get_status(self.id)
+        return self._get_task_meta()['status']
     status = state
     status = state
 
 
     @property
     @property
@@ -426,6 +475,10 @@ class ResultSet(ResultBase):
         """
         """
         return any(result.failed() for result in self.results)
         return any(result.failed() for result in self.results)
 
 
+    def maybe_reraise(self):
+        for result in self.results:
+            result.maybe_reraise()
+
     def waiting(self):
     def waiting(self):
         """Are any of the tasks incomplete?
         """Are any of the tasks incomplete?
 
 
@@ -506,7 +559,8 @@ class ResultSet(ResultBase):
             if timeout and elapsed >= timeout:
             if timeout and elapsed >= timeout:
                 raise TimeoutError('The operation timed out')
                 raise TimeoutError('The operation timed out')
 
 
-    def get(self, timeout=None, propagate=True, interval=0.5, callback=None):
+    def get(self, timeout=None, propagate=True, interval=0.5,
+            callback=None, no_ack=True):
         """See :meth:`join`
         """See :meth:`join`
 
 
         This is here for API compatibility with :class:`AsyncResult`,
         This is here for API compatibility with :class:`AsyncResult`,
@@ -516,9 +570,10 @@ class ResultSet(ResultBase):
         """
         """
         return (self.join_native if self.supports_native_join else self.join)(
         return (self.join_native if self.supports_native_join else self.join)(
             timeout=timeout, propagate=propagate,
             timeout=timeout, propagate=propagate,
-            interval=interval, callback=callback)
+            interval=interval, callback=callback, no_ack=no_ack)
 
 
-    def join(self, timeout=None, propagate=True, interval=0.5, callback=None):
+    def join(self, timeout=None, propagate=True, interval=0.5,
+             callback=None, no_ack=True):
         """Gathers the results of all tasks as a list in order.
         """Gathers the results of all tasks as a list in order.
 
 
         .. note::
         .. note::
@@ -557,6 +612,10 @@ class ResultSet(ResultBase):
                            ``result = app.AsyncResult(task_id)`` (both will
                            ``result = app.AsyncResult(task_id)`` (both will
                            take advantage of the backend cache anyway).
                            take advantage of the backend cache anyway).
 
 
+        :keyword no_ack: Automatic message acknowledgement (Note that if this
+            is set to :const:`False` then the messages *will not be
+            acknowledged*).
+
         :raises celery.exceptions.TimeoutError: if ``timeout`` is not
         :raises celery.exceptions.TimeoutError: if ``timeout`` is not
             :const:`None` and the operation takes longer than ``timeout``
             :const:`None` and the operation takes longer than ``timeout``
             seconds.
             seconds.
@@ -573,16 +632,17 @@ class ResultSet(ResultBase):
                 remaining = timeout - (monotonic() - time_start)
                 remaining = timeout - (monotonic() - time_start)
                 if remaining <= 0.0:
                 if remaining <= 0.0:
                     raise TimeoutError('join operation timed out')
                     raise TimeoutError('join operation timed out')
-            value = result.get(timeout=remaining,
-                               propagate=propagate,
-                               interval=interval)
+            value = result.get(
+                timeout=remaining, propagate=propagate,
+                interval=interval, no_ack=no_ack,
+            )
             if callback:
             if callback:
                 callback(result.id, value)
                 callback(result.id, value)
             else:
             else:
                 results.append(value)
                 results.append(value)
         return results
         return results
 
 
-    def iter_native(self, timeout=None, interval=0.5):
+    def iter_native(self, timeout=None, interval=0.5, no_ack=True):
         """Backend optimized version of :meth:`iterate`.
         """Backend optimized version of :meth:`iterate`.
 
 
         .. versionadded:: 2.2
         .. versionadded:: 2.2
@@ -597,12 +657,13 @@ class ResultSet(ResultBase):
         results = self.results
         results = self.results
         if not results:
         if not results:
             return iter([])
             return iter([])
-        return results[0].backend.get_many(
-            set(r.id for r in results), timeout=timeout, interval=interval,
+        return self.backend.get_many(
+            set(r.id for r in results),
+            timeout=timeout, interval=interval, no_ack=no_ack,
         )
         )
 
 
     def join_native(self, timeout=None, propagate=True,
     def join_native(self, timeout=None, propagate=True,
-                    interval=0.5, callback=None):
+                    interval=0.5, callback=None, no_ack=True):
         """Backend optimized version of :meth:`join`.
         """Backend optimized version of :meth:`join`.
 
 
         .. versionadded:: 2.2
         .. versionadded:: 2.2
@@ -615,11 +676,11 @@ class ResultSet(ResultBase):
 
 
         """
         """
         assert_will_not_block()
         assert_will_not_block()
-        order_index = None if callback else dict(
-            (result.id, i) for i, result in enumerate(self.results)
-        )
+        order_index = None if callback else {
+            result.id: i for i, result in enumerate(self.results)
+        }
         acc = None if callback else [None for _ in range(len(self))]
         acc = None if callback else [None for _ in range(len(self))]
-        for task_id, meta in self.iter_native(timeout, interval):
+        for task_id, meta in self.iter_native(timeout, interval, no_ack):
             value = meta['result']
             value = meta['result']
             if propagate and meta['status'] in states.PROPAGATE_STATES:
             if propagate and meta['status'] in states.PROPAGATE_STATES:
                 raise value
                 raise value
@@ -656,7 +717,14 @@ class ResultSet(ResultBase):
 
 
     @property
     @property
     def supports_native_join(self):
     def supports_native_join(self):
-        return self.results[0].supports_native_join
+        try:
+            return self.results[0].supports_native_join
+        except IndexError:
+            pass
+
+    @property
+    def backend(self):
+        return self.app.backend if self.app else self.results[0].backend
 
 
 
 
 class GroupResult(ResultSet):
 class GroupResult(ResultSet):
@@ -772,6 +840,10 @@ class EagerResult(AsyncResult):
         self._state = state
         self._state = state
         self._traceback = traceback
         self._traceback = traceback
 
 
+    def _get_task_meta(self):
+        return {'task_id': self.id, 'result': self._result, 'status':
+                self._state, 'traceback': self._traceback}
+
     def __reduce__(self):
     def __reduce__(self):
         return self.__class__, self.__reduce_args__()
         return self.__class__, self.__reduce_args__()
 
 

+ 11 - 10
celery/schedules.py

@@ -9,6 +9,7 @@
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
+import numbers
 import re
 import re
 
 
 from collections import namedtuple
 from collections import namedtuple
@@ -20,8 +21,8 @@ from . import current_app
 from .five import range, string_t
 from .five import range, string_t
 from .utils import is_iterable
 from .utils import is_iterable
 from .utils.timeutils import (
 from .utils.timeutils import (
-    timedelta_seconds, weekday, maybe_timedelta, remaining,
-    humanize_seconds, timezone, maybe_make_aware, ffwd
+    weekday, maybe_timedelta, remaining, humanize_seconds,
+    timezone, maybe_make_aware, ffwd
 )
 )
 from .datastructures import AttributeDict
 from .datastructures import AttributeDict
 
 
@@ -115,7 +116,7 @@ class schedule(object):
         """
         """
         last_run_at = self.maybe_make_aware(last_run_at)
         last_run_at = self.maybe_make_aware(last_run_at)
         rem_delta = self.remaining_estimate(last_run_at)
         rem_delta = self.remaining_estimate(last_run_at)
-        remaining_s = timedelta_seconds(rem_delta)
+        remaining_s = max(rem_delta.total_seconds(), 0)
         if remaining_s == 0:
         if remaining_s == 0:
             return schedstate(is_due=True, next=self.seconds)
             return schedstate(is_due=True, next=self.seconds)
         return schedstate(is_due=False, next=remaining_s)
         return schedstate(is_due=False, next=remaining_s)
@@ -141,7 +142,7 @@ class schedule(object):
 
 
     @property
     @property
     def seconds(self):
     def seconds(self):
-        return timedelta_seconds(self.run_every)
+        return max(self.run_every.total_seconds(), 0)
 
 
     @property
     @property
     def human_seconds(self):
     def human_seconds(self):
@@ -382,7 +383,7 @@ class crontab(schedule):
 
 
             int         (like 7)
             int         (like 7)
             str         (like '3-5,*/15', '*', or 'monday')
             str         (like '3-5,*/15', '*', or 'monday')
-            set         (like set([0,15,30,45]))
+            set         (like {0,15,30,45}
             list        (like [8-17])
             list        (like [8-17])
 
 
         And convert it to an (expanded) set representing all time unit
         And convert it to an (expanded) set representing all time unit
@@ -401,8 +402,8 @@ class crontab(schedule):
         week.
         week.
 
 
         """
         """
-        if isinstance(cronspec, int):
-            result = set([cronspec])
+        if isinstance(cronspec, numbers.Integral):
+            result = {cronspec}
         elif isinstance(cronspec, string_t):
         elif isinstance(cronspec, string_t):
             result = crontab_parser(max_, min_).parse(cronspec)
             result = crontab_parser(max_, min_).parse(cronspec)
         elif isinstance(cronspec, set):
         elif isinstance(cronspec, set):
@@ -561,11 +562,11 @@ class crontab(schedule):
 
 
         """
         """
         rem_delta = self.remaining_estimate(last_run_at)
         rem_delta = self.remaining_estimate(last_run_at)
-        rem = timedelta_seconds(rem_delta)
+        rem = max(rem_delta.total_seconds(), 0)
         due = rem == 0
         due = rem == 0
         if due:
         if due:
             rem_delta = self.remaining_estimate(self.now())
             rem_delta = self.remaining_estimate(self.now())
-            rem = timedelta_seconds(rem_delta)
+            rem = max(rem_delta.total_seconds(), 0)
         return schedstate(due, rem)
         return schedstate(due, rem)
 
 
     def __eq__(self, other):
     def __eq__(self, other):
@@ -583,7 +584,7 @@ class crontab(schedule):
 
 
 def maybe_schedule(s, relative=False, app=None):
 def maybe_schedule(s, relative=False, app=None):
     if s is not None:
     if s is not None:
-        if isinstance(s, int):
+        if isinstance(s, numbers.Integral):
             s = timedelta(seconds=s)
             s = timedelta(seconds=s)
         if isinstance(s, timedelta):
         if isinstance(s, timedelta):
             return schedule(s, relative, app=app)
             return schedule(s, relative, app=app)

+ 5 - 4
celery/security/certificate.py

@@ -35,7 +35,7 @@ class Certificate(object):
 
 
     def get_serial_number(self):
     def get_serial_number(self):
         """Return the serial number in the certificate."""
         """Return the serial number in the certificate."""
-        return self._cert.get_serial_number()
+        return bytes_to_str(self._cert.get_serial_number())
 
 
     def get_issuer(self):
     def get_issuer(self):
         """Return issuer (CA) as a string"""
         """Return issuer (CA) as a string"""
@@ -66,14 +66,15 @@ class CertStore(object):
     def __getitem__(self, id):
     def __getitem__(self, id):
         """get certificate by id"""
         """get certificate by id"""
         try:
         try:
-            return self._certs[id]
+            return self._certs[bytes_to_str(id)]
         except KeyError:
         except KeyError:
             raise SecurityError('Unknown certificate: {0!r}'.format(id))
             raise SecurityError('Unknown certificate: {0!r}'.format(id))
 
 
     def add_cert(self, cert):
     def add_cert(self, cert):
-        if cert.get_id() in self._certs:
+        cert_id = bytes_to_str(cert.get_id())
+        if cert_id in self._certs:
             raise SecurityError('Duplicate certificate: {0!r}'.format(id))
             raise SecurityError('Duplicate certificate: {0!r}'.format(id))
-        self._certs[cert.get_id()] = cert
+        self._certs[cert_id] = cert
 
 
 
 
 class FSCertStore(CertStore):
 class FSCertStore(CertStore):

+ 6 - 9
celery/security/serialization.py

@@ -44,7 +44,7 @@ class SecureSerializer(object):
         assert self._cert is not None
         assert self._cert is not None
         with reraise_errors('Unable to serialize: {0!r}', (Exception, )):
         with reraise_errors('Unable to serialize: {0!r}', (Exception, )):
             content_type, content_encoding, body = dumps(
             content_type, content_encoding, body = dumps(
-                data, serializer=self._serializer)
+                bytes_to_str(data), serializer=self._serializer)
             # What we sign is the serialized body, not the body itself.
             # What we sign is the serialized body, not the body itself.
             # this way the receiver doesn't have to decode the contents
             # this way the receiver doesn't have to decode the contents
             # to verify the signature (and thus avoiding potential flaws
             # to verify the signature (and thus avoiding potential flaws
@@ -89,15 +89,12 @@ class SecureSerializer(object):
 
 
         v = raw_payload[end_of_sig:].split(sep)
         v = raw_payload[end_of_sig:].split(sep)
 
 
-        values = [bytes_to_str(signer), bytes_to_str(signature),
-                  bytes_to_str(v[0]), bytes_to_str(v[1]), bytes_to_str(v[2])]
-
         return {
         return {
-            'signer': values[0],
-            'signature': values[1],
-            'content_type': values[2],
-            'content_encoding': values[3],
-            'body': values[4],
+            'signer': signer,
+            'signature': signature,
+            'content_type': bytes_to_str(v[0]),
+            'content_encoding': bytes_to_str(v[1]),
+            'body': bytes_to_str(v[2]),
         }
         }
 
 
 
 

+ 2 - 2
celery/task/__init__.py

@@ -12,7 +12,7 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
 from celery._state import current_app, current_task as current
 from celery._state import current_app, current_task as current
-from celery.five import MagicModule, recreate_module
+from celery.five import LazyModule, recreate_module
 from celery.local import Proxy
 from celery.local import Proxy
 
 
 __all__ = [
 __all__ = [
@@ -32,7 +32,7 @@ if STATICA_HACK:  # pragma: no cover
     from .sets import TaskSet
     from .sets import TaskSet
 
 
 
 
-class module(MagicModule):
+class module(LazyModule):
 
 
     def __call__(self, *args, **kwargs):
     def __call__(self, *args, **kwargs):
         return self.task(*args, **kwargs)
         return self.task(*args, **kwargs)

+ 23 - 7
celery/task/base.py

@@ -24,6 +24,7 @@ __all__ = ['Task', 'PeriodicTask', 'task']
 #: list of methods that must be classmethods in the old API.
 #: list of methods that must be classmethods in the old API.
 _COMPAT_CLASSMETHODS = (
 _COMPAT_CLASSMETHODS = (
     'delay', 'apply_async', 'retry', 'apply', 'subtask_from_request',
     'delay', 'apply_async', 'retry', 'apply', 'subtask_from_request',
+    'signature_from_request', 'signature',
     'AsyncResult', 'subtask', '_get_request', '_get_exec_options',
     'AsyncResult', 'subtask', '_get_request', '_get_exec_options',
 )
 )
 
 
@@ -50,7 +51,6 @@ class Task(BaseTask):
     priority = None
     priority = None
     type = 'regular'
     type = 'regular'
     disable_error_emails = False
     disable_error_emails = False
-    accept_magic_kwargs = False
 
 
     from_config = BaseTask.from_config + (
     from_config = BaseTask.from_config + (
         ('exchange_type', 'CELERY_DEFAULT_EXCHANGE_TYPE'),
         ('exchange_type', 'CELERY_DEFAULT_EXCHANGE_TYPE'),
@@ -69,6 +69,16 @@ class Task(BaseTask):
     def request(cls):
     def request(cls):
         return cls._get_request()
         return cls._get_request()
 
 
+    @class_property
+    def backend(cls):
+        if cls._backend is None:
+            return cls.app.backend
+        return cls._backend
+
+    @backend.setter
+    def backend(cls, value):  # noqa
+        cls._backend = value
+
     @classmethod
     @classmethod
     def get_logger(self, **kwargs):
     def get_logger(self, **kwargs):
         return get_task_logger(self.name)
         return get_task_logger(self.name)
@@ -96,12 +106,19 @@ class Task(BaseTask):
                       exchange_type=None, **options):
                       exchange_type=None, **options):
         """Deprecated method to get the task publisher (now called producer).
         """Deprecated method to get the task publisher (now called producer).
 
 
-        Should be replaced with :class:`@amqp.TaskProducer`:
+        Should be replaced with :class:`@kombu.Producer`:
 
 
         .. code-block:: python
         .. code-block:: python
 
 
-            with celery.connection() as conn:
-                with celery.amqp.TaskProducer(conn) as prod:
+            with app.connection() as conn:
+                with app.amqp.Producer(conn) as prod:
+                    my_task.apply_async(producer=prod)
+
+            or event better is to use the :class:`@amqp.producer_pool`:
+
+            .. code-block:: python
+
+                with app.producer_or_acquire() as prod:
                     my_task.apply_async(producer=prod)
                     my_task.apply_async(producer=prod)
 
 
         """
         """
@@ -109,7 +126,7 @@ class Task(BaseTask):
         if exchange_type is None:
         if exchange_type is None:
             exchange_type = self.exchange_type
             exchange_type = self.exchange_type
         connection = connection or self.establish_connection()
         connection = connection or self.establish_connection()
-        return self._get_app().amqp.TaskProducer(
+        return self._get_app().amqp.Producer(
             connection,
             connection,
             exchange=exchange and Exchange(exchange, exchange_type),
             exchange=exchange and Exchange(exchange, exchange_type),
             routing_key=self.routing_key, **options
             routing_key=self.routing_key, **options
@@ -160,8 +177,7 @@ class PeriodicTask(Task):
 
 
 def task(*args, **kwargs):
 def task(*args, **kwargs):
     """Deprecated decorator, please use :func:`celery.task`."""
     """Deprecated decorator, please use :func:`celery.task`."""
-    return current_app.task(*args, **dict({'accept_magic_kwargs': False,
-                                           'base': Task}, **kwargs))
+    return current_app.task(*args, **dict({'base': Task}, **kwargs))
 
 
 
 
 def periodic_task(*args, **options):
 def periodic_task(*args, **options):

+ 9 - 9
celery/task/http.py

@@ -8,7 +8,6 @@
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
-import anyjson
 import sys
 import sys
 
 
 try:
 try:
@@ -17,6 +16,8 @@ except ImportError:  # pragma: no cover
     from urllib import urlencode              # noqa
     from urllib import urlencode              # noqa
     from urlparse import urlparse, parse_qsl  # noqa
     from urlparse import urlparse, parse_qsl  # noqa
 
 
+from kombu.utils import json
+
 from celery import shared_task, __version__ as celery_version
 from celery import shared_task, __version__ as celery_version
 from celery.five import items, reraise
 from celery.five import items, reraise
 from celery.utils.log import get_task_logger
 from celery.utils.log import get_task_logger
@@ -41,13 +42,13 @@ else:
 
 
     from urllib2 import Request, urlopen  # noqa
     from urllib2 import Request, urlopen  # noqa
 
 
-    def utf8dict(tup):  # noqa
+    def utf8dict(tup, enc='utf-8'):  # noqa
         """With a dict's items() tuple return a new dict with any utf-8
         """With a dict's items() tuple return a new dict with any utf-8
         keys/values encoded."""
         keys/values encoded."""
-        return dict(
-            (k.encode('utf-8'),
-             v.encode('utf-8') if isinstance(v, unicode) else v)  # noqa
-            for k, v in tup)
+        return {
+            k.encode(enc): (v.encode(enc) if isinstance(v, unicode) else v)
+            for k, v in tup
+        }
 
 
 
 
 class InvalidResponseError(Exception):
 class InvalidResponseError(Exception):
@@ -62,7 +63,7 @@ class UnknownStatusError(InvalidResponseError):
     """The remote server gave an unknown status."""
     """The remote server gave an unknown status."""
 
 
 
 
-def extract_response(raw_response, loads=anyjson.loads):
+def extract_response(raw_response, loads=json.loads):
     """Extract the response text from a raw JSON response."""
     """Extract the response text from a raw JSON response."""
     if not raw_response:
     if not raw_response:
         raise InvalidResponseError('Empty response')
         raise InvalidResponseError('Empty response')
@@ -162,8 +163,7 @@ class HttpDispatch(object):
         return headers
         return headers
 
 
 
 
-@shared_task(name='celery.http_dispatch', bind=True,
-             url=None, method=None, accept_magic_kwargs=False)
+@shared_task(name='celery.http_dispatch', bind=True, url=None, method=None)
 def dispatch(self, url=None, method='GET', **kwargs):
 def dispatch(self, url=None, method='GET', **kwargs):
     """Task dispatching to an URL.
     """Task dispatching to an URL.
 
 

+ 1 - 1
celery/task/sets.py

@@ -46,7 +46,7 @@ class TaskSet(list):
         super(TaskSet, self).__init__(
         super(TaskSet, self).__init__(
             maybe_signature(t, app=self.app) for t in tasks or []
             maybe_signature(t, app=self.app) for t in tasks or []
         )
         )
-        self.Publisher = Publisher or self.app.amqp.TaskProducer
+        self.Publisher = Publisher or self.app.amqp.Producer
         self.total = len(self)  # XXX compat
         self.total = len(self)  # XXX compat
 
 
     def apply_async(self, connection=None, publisher=None, taskset_id=None):
     def apply_async(self, connection=None, publisher=None, taskset_id=None):

+ 0 - 12
celery/task/trace.py

@@ -1,12 +0,0 @@
-"""This module has moved to celery.app.trace."""
-from __future__ import absolute_import
-
-import sys
-
-from celery.utils import warn_deprecated
-
-warn_deprecated('celery.task.trace', removal='3.2',
-                alternative='Please use celery.app.trace instead.')
-
-from celery.app import trace
-sys.modules[__name__] = trace

+ 1 - 1
celery/tests/__init__.py

@@ -22,7 +22,7 @@ def setup():
         KOMBU_DISABLE_LIMIT_PROTECTION='yes',
         KOMBU_DISABLE_LIMIT_PROTECTION='yes',
     )
     )
 
 
-    if os.environ.get('COVER_ALL_MODULES') or '--with-coverage3' in sys.argv:
+    if os.environ.get('COVER_ALL_MODULES') or '--with-coverage' in sys.argv:
         from warnings import catch_warnings
         from warnings import catch_warnings
         with catch_warnings(record=True):
         with catch_warnings(record=True):
             import_all_modules()
             import_all_modules()

+ 4 - 96
celery/tests/app/test_amqp.py

@@ -1,86 +1,10 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
-import datetime
-
-import pytz
-
 from kombu import Exchange, Queue
 from kombu import Exchange, Queue
 
 
-from celery.app.amqp import Queues, TaskPublisher
+from celery.app.amqp import Queues
 from celery.five import keys
 from celery.five import keys
-from celery.tests.case import AppCase, Mock
-
-
-class test_TaskProducer(AppCase):
-
-    def test__exit__(self):
-        publisher = self.app.amqp.TaskProducer(self.app.connection())
-        publisher.release = Mock()
-        with publisher:
-            pass
-        publisher.release.assert_called_with()
-
-    def test_declare(self):
-        publisher = self.app.amqp.TaskProducer(self.app.connection())
-        publisher.exchange.name = 'foo'
-        publisher.declare()
-        publisher.exchange.name = None
-        publisher.declare()
-
-    def test_retry_policy(self):
-        prod = self.app.amqp.TaskProducer(Mock())
-        prod.channel.connection.client.declared_entities = set()
-        prod.publish_task('tasks.add', (2, 2), {},
-                          retry_policy={'frobulate': 32.4})
-
-    def test_publish_no_retry(self):
-        prod = self.app.amqp.TaskProducer(Mock())
-        prod.channel.connection.client.declared_entities = set()
-        prod.publish_task('tasks.add', (2, 2), {}, retry=False, chord=123)
-        self.assertFalse(prod.connection.ensure.call_count)
-
-    def test_publish_custom_queue(self):
-        prod = self.app.amqp.TaskProducer(Mock())
-        self.app.amqp.queues['some_queue'] = Queue(
-            'xxx', Exchange('yyy'), 'zzz',
-        )
-        prod.channel.connection.client.declared_entities = set()
-        prod.publish = Mock()
-        prod.publish_task('tasks.add', (8, 8), {}, retry=False,
-                          queue='some_queue')
-        self.assertEqual(prod.publish.call_args[1]['exchange'], 'yyy')
-        self.assertEqual(prod.publish.call_args[1]['routing_key'], 'zzz')
-
-    def test_publish_with_countdown(self):
-        prod = self.app.amqp.TaskProducer(Mock())
-        prod.channel.connection.client.declared_entities = set()
-        prod.publish = Mock()
-        now = datetime.datetime(2013, 11, 26, 16, 48, 46)
-        prod.publish_task('tasks.add', (1, 1), {}, retry=False,
-                          countdown=10, now=now)
-        self.assertEqual(
-            prod.publish.call_args[0][0]['eta'],
-            '2013-11-26T16:48:56+00:00',
-        )
-
-    def test_publish_with_countdown_and_timezone(self):
-        # use timezone with fixed offset to be sure it won't be changed
-        self.app.conf.CELERY_TIMEZONE = pytz.FixedOffset(120)
-        prod = self.app.amqp.TaskProducer(Mock())
-        prod.channel.connection.client.declared_entities = set()
-        prod.publish = Mock()
-        now = datetime.datetime(2013, 11, 26, 16, 48, 46)
-        prod.publish_task('tasks.add', (2, 2), {}, retry=False,
-                          countdown=20, now=now)
-        self.assertEqual(
-            prod.publish.call_args[0][0]['eta'],
-            '2013-11-26T18:49:06+02:00',
-        )
-
-    def test_event_dispatcher(self):
-        prod = self.app.amqp.TaskProducer(Mock())
-        self.assertTrue(prod.event_dispatcher)
-        self.assertFalse(prod.event_dispatcher.enabled)
+from celery.tests.case import AppCase
 
 
 
 
 class test_TaskConsumer(AppCase):
 class test_TaskConsumer(AppCase):
@@ -90,30 +14,14 @@ class test_TaskConsumer(AppCase):
             self.app.conf.CELERY_ACCEPT_CONTENT = ['application/json']
             self.app.conf.CELERY_ACCEPT_CONTENT = ['application/json']
             self.assertEqual(
             self.assertEqual(
                 self.app.amqp.TaskConsumer(conn).accept,
                 self.app.amqp.TaskConsumer(conn).accept,
-                set(['application/json'])
+                {'application/json'},
             )
             )
             self.assertEqual(
             self.assertEqual(
                 self.app.amqp.TaskConsumer(conn, accept=['json']).accept,
                 self.app.amqp.TaskConsumer(conn, accept=['json']).accept,
-                set(['application/json']),
+                {'application/json'},
             )
             )
 
 
 
 
-class test_compat_TaskPublisher(AppCase):
-
-    def test_compat_exchange_is_string(self):
-        producer = TaskPublisher(exchange='foo', app=self.app)
-        self.assertIsInstance(producer.exchange, Exchange)
-        self.assertEqual(producer.exchange.name, 'foo')
-        self.assertEqual(producer.exchange.type, 'direct')
-        producer = TaskPublisher(exchange='foo', exchange_type='topic',
-                                 app=self.app)
-        self.assertEqual(producer.exchange.type, 'topic')
-
-    def test_compat_exchange_is_Exchange(self):
-        producer = TaskPublisher(exchange=Exchange('foo'), app=self.app)
-        self.assertEqual(producer.exchange.name, 'foo')
-
-
 class test_PublisherPool(AppCase):
 class test_PublisherPool(AppCase):
 
 
     def test_setup_nolimit(self):
     def test_setup_nolimit(self):

+ 26 - 23
celery/tests/app/test_app.py

@@ -8,7 +8,6 @@ from copy import deepcopy
 from pickle import loads, dumps
 from pickle import loads, dumps
 
 
 from amqp import promise
 from amqp import promise
-from kombu import Exchange
 
 
 from celery import shared_task, current_app
 from celery import shared_task, current_app
 from celery import app as _app
 from celery import app as _app
@@ -252,14 +251,14 @@ class test_App(AppCase):
             _state._task_stack.pop()
             _state._task_stack.pop()
 
 
     def test_task_not_shared(self):
     def test_task_not_shared(self):
-        with patch('celery.app.base.shared_task') as sh:
+        with patch('celery.app.base.connect_on_app_finalize') as sh:
             @self.app.task(shared=False)
             @self.app.task(shared=False)
             def foo():
             def foo():
                 pass
                 pass
             self.assertFalse(sh.called)
             self.assertFalse(sh.called)
 
 
     def test_task_compat_with_filter(self):
     def test_task_compat_with_filter(self):
-        with self.Celery(accept_magic_kwargs=True) as app:
+        with self.Celery() as app:
             check = Mock()
             check = Mock()
 
 
             def filter(task):
             def filter(task):
@@ -272,7 +271,7 @@ class test_App(AppCase):
             check.assert_called_with(foo)
             check.assert_called_with(foo)
 
 
     def test_task_with_filter(self):
     def test_task_with_filter(self):
-        with self.Celery(accept_magic_kwargs=False) as app:
+        with self.Celery() as app:
             check = Mock()
             check = Mock()
 
 
             def filter(task):
             def filter(task):
@@ -336,10 +335,13 @@ class test_App(AppCase):
         def aawsX():
         def aawsX():
             pass
             pass
 
 
-        with patch('celery.app.amqp.TaskProducer.publish_task') as dt:
-            aawsX.apply_async((4, 5))
-            args = dt.call_args[0][1]
-            self.assertEqual(args, ('hello', 4, 5))
+        with patch('celery.app.amqp.AMQP.create_task_message') as create:
+            with patch('celery.app.amqp.AMQP.send_task_message') as send:
+                create.return_value = Mock(), Mock(), Mock(), Mock()
+                aawsX.apply_async((4, 5))
+                args = create.call_args[0][2]
+                self.assertEqual(args, ('hello', 4, 5))
+                self.assertTrue(send.called)
 
 
     def test_apply_async_adds_children(self):
     def test_apply_async_adds_children(self):
         from celery._state import _task_stack
         from celery._state import _task_stack
@@ -549,14 +551,14 @@ class test_App(AppCase):
         # Test passing in a string and make sure the string
         # Test passing in a string and make sure the string
         # gets there untouched
         # gets there untouched
         self.app.conf.BROKER_FAILOVER_STRATEGY = 'foo-bar'
         self.app.conf.BROKER_FAILOVER_STRATEGY = 'foo-bar'
-        self.assertEquals(
+        self.assertEqual(
             self.app.connection('amqp:////value').failover_strategy,
             self.app.connection('amqp:////value').failover_strategy,
             'foo-bar',
             'foo-bar',
         )
         )
 
 
         # Try passing in None
         # Try passing in None
         self.app.conf.BROKER_FAILOVER_STRATEGY = None
         self.app.conf.BROKER_FAILOVER_STRATEGY = None
-        self.assertEquals(
+        self.assertEqual(
             self.app.connection('amqp:////value').failover_strategy,
             self.app.connection('amqp:////value').failover_strategy,
             itertools.cycle,
             itertools.cycle,
         )
         )
@@ -566,7 +568,7 @@ class test_App(AppCase):
             yield True
             yield True
 
 
         self.app.conf.BROKER_FAILOVER_STRATEGY = my_failover_strategy
         self.app.conf.BROKER_FAILOVER_STRATEGY = my_failover_strategy
-        self.assertEquals(
+        self.assertEqual(
             self.app.connection('amqp:////value').failover_strategy,
             self.app.connection('amqp:////value').failover_strategy,
             my_failover_strategy,
             my_failover_strategy,
         )
         )
@@ -609,22 +611,23 @@ class test_App(AppCase):
             chan.close()
             chan.close()
         assert conn.transport_cls == 'memory'
         assert conn.transport_cls == 'memory'
 
 
-        prod = self.app.amqp.TaskProducer(
-            conn, exchange=Exchange('foo_exchange'),
-            send_sent_event=True,
+        message = self.app.amqp.create_task_message(
+            'id', 'footask', (), {}, create_sent_event=True,
         )
         )
 
 
+        prod = self.app.amqp.Producer(conn)
         dispatcher = Dispatcher()
         dispatcher = Dispatcher()
-        self.assertTrue(prod.publish_task('footask', (), {},
-                                          exchange='moo_exchange',
-                                          routing_key='moo_exchange',
-                                          event_dispatcher=dispatcher))
+        self.app.amqp.send_task_message(
+            prod, 'footask', message,
+            exchange='moo_exchange', routing_key='moo_exchange',
+            event_dispatcher=dispatcher,
+        )
         self.assertTrue(dispatcher.sent)
         self.assertTrue(dispatcher.sent)
         self.assertEqual(dispatcher.sent[0][0], 'task-sent')
         self.assertEqual(dispatcher.sent[0][0], 'task-sent')
-        self.assertTrue(prod.publish_task('footask', (), {},
-                                          event_dispatcher=dispatcher,
-                                          exchange='bar_exchange',
-                                          routing_key='bar_exchange'))
+        self.app.amqp.send_task_message(
+            prod, 'footask', message, event_dispatcher=dispatcher,
+            exchange='bar_exchange', routing_key='bar_exchange',
+        )
 
 
     def test_error_mail_sender(self):
     def test_error_mail_sender(self):
         x = ErrorMail.subject % {'name': 'task_name',
         x = ErrorMail.subject % {'name': 'task_name',
@@ -644,7 +647,7 @@ class test_App(AppCase):
 
 
 class test_defaults(AppCase):
 class test_defaults(AppCase):
 
 
-    def test_str_to_bool(self):
+    def test_strtobool(self):
         for s in ('false', 'no', '0'):
         for s in ('false', 'no', '0'):
             self.assertFalse(defaults.strtobool(s))
             self.assertFalse(defaults.strtobool(s))
         for s in ('true', 'yes', '1'):
         for s in ('true', 'yes', '1'):

+ 36 - 1
celery/tests/app/test_beat.py

@@ -162,7 +162,7 @@ class test_Scheduler(AppCase):
         scheduler.apply_async(scheduler.Entry(task=foo.name, app=self.app))
         scheduler.apply_async(scheduler.Entry(task=foo.name, app=self.app))
         self.assertTrue(foo.apply_async.called)
         self.assertTrue(foo.apply_async.called)
 
 
-    def test_apply_async_should_not_sync(self):
+    def test_should_sync(self):
 
 
         @self.app.task(shared=False)
         @self.app.task(shared=False)
         def not_sync():
         def not_sync():
@@ -181,6 +181,41 @@ class test_Scheduler(AppCase):
         s.apply_async(s.Entry(task=not_sync.name, app=self.app))
         s.apply_async(s.Entry(task=not_sync.name, app=self.app))
         self.assertFalse(s._do_sync.called)
         self.assertFalse(s._do_sync.called)
 
 
+    def test_should_sync_increments_sync_every_counter(self):
+        self.app.conf.CELERYBEAT_SYNC_EVERY = 2
+
+        @self.app.task(shared=False)
+        def not_sync():
+            pass
+        not_sync.apply_async = Mock()
+
+        s = mScheduler(app=self.app)
+        self.assertEqual(s.sync_every_tasks, 2)
+        s._do_sync = Mock()
+
+        s.apply_async(s.Entry(task=not_sync.name, app=self.app))
+        self.assertEqual(s._tasks_since_sync, 1)
+        s.apply_async(s.Entry(task=not_sync.name, app=self.app))
+        s._do_sync.assert_called_with()
+
+        self.app.conf.CELERYBEAT_SYNC_EVERY = 0
+
+    def test_sync_task_counter_resets_on_do_sync(self):
+        self.app.conf.CELERYBEAT_SYNC_EVERY = 1
+
+        @self.app.task(shared=False)
+        def not_sync():
+            pass
+        not_sync.apply_async = Mock()
+
+        s = mScheduler(app=self.app)
+        self.assertEqual(s.sync_every_tasks, 1)
+
+        s.apply_async(s.Entry(task=not_sync.name, app=self.app))
+        self.assertEqual(s._tasks_since_sync, 0)
+
+        self.app.conf.CELERYBEAT_SYNC_EVERY = 0
+
     @patch('celery.app.base.Celery.send_task')
     @patch('celery.app.base.Celery.send_task')
     def test_send_task(self, send_task):
     def test_send_task(self, send_task):
         b = beat.Scheduler(app=self.app)
         b = beat.Scheduler(app=self.app)

+ 10 - 12
celery/tests/app/test_builtins.py

@@ -136,18 +136,18 @@ class test_chain(BuiltinsCase):
 
 
     def test_group_to_chord(self):
     def test_group_to_chord(self):
         c = (
         c = (
-            group(self.add.s(i, i) for i in range(5)) |
+            group([self.add.s(i, i) for i in range(5)], app=self.app) |
             self.add.s(10) |
             self.add.s(10) |
             self.add.s(20) |
             self.add.s(20) |
             self.add.s(30)
             self.add.s(30)
         )
         )
-        tasks, _ = c.type.prepare_steps((), c.tasks)
+        tasks, _ = c.prepare_steps((), c.tasks)
         self.assertIsInstance(tasks[0], chord)
         self.assertIsInstance(tasks[0], chord)
         self.assertTrue(tasks[0].body.options['link'])
         self.assertTrue(tasks[0].body.options['link'])
         self.assertTrue(tasks[0].body.options['link'][0].options['link'])
         self.assertTrue(tasks[0].body.options['link'][0].options['link'])
 
 
         c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10))
         c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10))
-        tasks2, _ = c2.type.prepare_steps((), c2.tasks)
+        tasks2, _ = c2.prepare_steps((), c2.tasks)
         self.assertIsInstance(tasks2[1], group)
         self.assertIsInstance(tasks2[1], group)
 
 
     def test_apply_options(self):
     def test_apply_options(self):
@@ -158,7 +158,7 @@ class test_chain(BuiltinsCase):
                 return self
                 return self
 
 
         def s(*args, **kwargs):
         def s(*args, **kwargs):
-            return static(self.add, args, kwargs, type=self.add)
+            return static(self.add, args, kwargs, type=self.add, app=self.app)
 
 
         c = s(2, 2) | s(4, 4) | s(8, 8)
         c = s(2, 2) | s(4, 4) | s(8, 8)
         r1 = c.apply_async(task_id='some_id')
         r1 = c.apply_async(task_id='some_id')
@@ -196,18 +196,16 @@ class test_chord(BuiltinsCase):
     def test_forward_options(self):
     def test_forward_options(self):
         body = self.xsum.s()
         body = self.xsum.s()
         x = chord([self.add.s(i, i) for i in range(10)], body=body)
         x = chord([self.add.s(i, i) for i in range(10)], body=body)
-        x._type = Mock()
-        x._type.app.conf.CELERY_ALWAYS_EAGER = False
+        x.run = Mock(name='chord.run(x)')
         x.apply_async(group_id='some_group_id')
         x.apply_async(group_id='some_group_id')
-        self.assertTrue(x._type.called)
-        resbody = x._type.call_args[0][1]
+        self.assertTrue(x.run.called)
+        resbody = x.run.call_args[0][1]
         self.assertEqual(resbody.options['group_id'], 'some_group_id')
         self.assertEqual(resbody.options['group_id'], 'some_group_id')
         x2 = chord([self.add.s(i, i) for i in range(10)], body=body)
         x2 = chord([self.add.s(i, i) for i in range(10)], body=body)
-        x2._type = Mock()
-        x2._type.app.conf.CELERY_ALWAYS_EAGER = False
+        x2.run = Mock(name='chord.run(x2)')
         x2.apply_async(chord='some_chord_id')
         x2.apply_async(chord='some_chord_id')
-        self.assertTrue(x2._type.called)
-        resbody = x2._type.call_args[0][1]
+        self.assertTrue(x2.run.called)
+        resbody = x2.run.call_args[0][1]
         self.assertEqual(resbody.options['chord'], 'some_chord_id')
         self.assertEqual(resbody.options['chord'], 'some_chord_id')
 
 
     def test_apply_eager(self):
     def test_apply_eager(self):

+ 4 - 7
celery/tests/app/test_loaders.py

@@ -7,7 +7,6 @@ import warnings
 from celery import loaders
 from celery import loaders
 from celery.exceptions import (
 from celery.exceptions import (
     NotConfigured,
     NotConfigured,
-    CPendingDeprecationWarning,
 )
 )
 from celery.loaders import base
 from celery.loaders import base
 from celery.loaders import default
 from celery.loaders import default
@@ -34,16 +33,12 @@ class test_loaders(AppCase):
 
 
     @depends_on_current_app
     @depends_on_current_app
     def test_current_loader(self):
     def test_current_loader(self):
-        with self.assertWarnsRegex(
-                CPendingDeprecationWarning,
-                r'deprecation'):
+        with self.assertPendingDeprecation():
             self.assertIs(loaders.current_loader(), self.app.loader)
             self.assertIs(loaders.current_loader(), self.app.loader)
 
 
     @depends_on_current_app
     @depends_on_current_app
     def test_load_settings(self):
     def test_load_settings(self):
-        with self.assertWarnsRegex(
-                CPendingDeprecationWarning,
-                r'deprecation'):
+        with self.assertPendingDeprecation():
             self.assertIs(loaders.load_settings(), self.app.conf)
             self.assertIs(loaders.load_settings(), self.app.conf)
 
 
 
 
@@ -211,9 +206,11 @@ class test_DefaultLoader(AppCase):
         except ValueError:
         except ValueError:
             pass
             pass
         celery = sys.modules.pop('celery', None)
         celery = sys.modules.pop('celery', None)
+        sys.modules.pop('celery.five', None)
         try:
         try:
             self.assertTrue(l.import_from_cwd('celery'))
             self.assertTrue(l.import_from_cwd('celery'))
             sys.modules.pop('celery', None)
             sys.modules.pop('celery', None)
+            sys.modules.pop('celery.five', None)
             sys.path.insert(0, os.getcwd())
             sys.path.insert(0, os.getcwd())
             self.assertTrue(l.import_from_cwd('celery'))
             self.assertTrue(l.import_from_cwd('celery'))
         finally:
         finally:

+ 43 - 19
celery/tests/app/test_log.py

@@ -2,6 +2,9 @@ from __future__ import absolute_import
 
 
 import sys
 import sys
 import logging
 import logging
+
+from collections import defaultdict
+from io import StringIO
 from tempfile import mktemp
 from tempfile import mktemp
 
 
 from celery import signals
 from celery import signals
@@ -94,7 +97,7 @@ class test_ColorFormatter(AppCase):
     @patch('celery.utils.log.safe_str')
     @patch('celery.utils.log.safe_str')
     @patch('logging.Formatter.formatException')
     @patch('logging.Formatter.formatException')
     def test_formatException_not_string(self, fe, safe_str):
     def test_formatException_not_string(self, fe, safe_str):
-        x = ColorFormatter('HELLO')
+        x = ColorFormatter()
         value = KeyError()
         value = KeyError()
         fe.return_value = value
         fe.return_value = value
         self.assertIs(x.formatException(value), value)
         self.assertIs(x.formatException(value), value)
@@ -103,16 +106,19 @@ class test_ColorFormatter(AppCase):
 
 
     @patch('logging.Formatter.formatException')
     @patch('logging.Formatter.formatException')
     @patch('celery.utils.log.safe_str')
     @patch('celery.utils.log.safe_str')
-    def test_formatException_string(self, safe_str, fe, value='HELLO'):
-        x = ColorFormatter(value)
-        fe.return_value = value
-        self.assertTrue(x.formatException(value))
+    def test_formatException_string(self, safe_str, fe):
+        x = ColorFormatter()
+        fe.return_value = 'HELLO'
+        try:
+            raise Exception()
+        except Exception:
+            self.assertTrue(x.formatException(sys.exc_info()))
         if sys.version_info[0] == 2:
         if sys.version_info[0] == 2:
             self.assertTrue(safe_str.called)
             self.assertTrue(safe_str.called)
 
 
     @patch('logging.Formatter.format')
     @patch('logging.Formatter.format')
     def test_format_object(self, _format):
     def test_format_object(self, _format):
-        x = ColorFormatter(object())
+        x = ColorFormatter()
         x.use_color = True
         x.use_color = True
         record = Mock()
         record = Mock()
         record.levelname = 'ERROR'
         record.levelname = 'ERROR'
@@ -121,7 +127,7 @@ class test_ColorFormatter(AppCase):
 
 
     @patch('celery.utils.log.safe_str')
     @patch('celery.utils.log.safe_str')
     def test_format_raises(self, safe_str):
     def test_format_raises(self, safe_str):
-        x = ColorFormatter('HELLO')
+        x = ColorFormatter()
 
 
         def on_safe_str(s):
         def on_safe_str(s):
             try:
             try:
@@ -133,6 +139,7 @@ class test_ColorFormatter(AppCase):
         class Record(object):
         class Record(object):
             levelname = 'ERROR'
             levelname = 'ERROR'
             msg = 'HELLO'
             msg = 'HELLO'
+            exc_info = 1
             exc_text = 'error text'
             exc_text = 'error text'
             stack_info = None
             stack_info = None
 
 
@@ -145,15 +152,15 @@ class test_ColorFormatter(AppCase):
         record = Record()
         record = Record()
         safe_str.return_value = record
         safe_str.return_value = record
 
 
-        x.format(record)
-        self.assertIn('<Unrepresentable', record.msg)
+        msg = x.format(record)
+        self.assertIn('<Unrepresentable', msg)
         self.assertEqual(safe_str.call_count, 1)
         self.assertEqual(safe_str.call_count, 1)
 
 
     @patch('celery.utils.log.safe_str')
     @patch('celery.utils.log.safe_str')
     def test_format_raises_no_color(self, safe_str):
     def test_format_raises_no_color(self, safe_str):
         if sys.version_info[0] == 3:
         if sys.version_info[0] == 3:
             raise SkipTest('py3k')
             raise SkipTest('py3k')
-        x = ColorFormatter('HELLO', False)
+        x = ColorFormatter(use_color=False)
         record = Mock()
         record = Mock()
         record.levelname = 'ERROR'
         record.levelname = 'ERROR'
         record.msg = 'HELLO'
         record.msg = 'HELLO'
@@ -248,14 +255,31 @@ class test_default_logger(AppCase):
                 l.info('The quick brown fox...')
                 l.info('The quick brown fox...')
                 self.assertIn('The quick brown fox...', stderr.getvalue())
                 self.assertIn('The quick brown fox...', stderr.getvalue())
 
 
-    def test_setup_logger_no_handlers_file(self):
-        with restore_logging():
-            l = self.get_logger()
-            l.handlers = []
-            tempfile = mktemp(suffix='unittest', prefix='celery')
-            l = self.setup_logger(logfile=tempfile, loglevel=0, root=False)
-            self.assertIsInstance(get_handlers(l)[0],
-                                  logging.FileHandler)
+    @patch('os.fstat')
+    def test_setup_logger_no_handlers_file(self, *args):
+        tempfile = mktemp(suffix='unittest', prefix='celery')
+        _open = ('builtins.open' if sys.version_info[0] == 3
+                 else '__builtin__.open')
+        with patch(_open) as osopen:
+            with restore_logging():
+                files = defaultdict(StringIO)
+
+                def open_file(filename, *args, **kwargs):
+                    f = files[filename]
+                    f.fileno = Mock()
+                    f.fileno.return_value = 99
+                    return f
+
+                osopen.side_effect = open_file
+                l = self.get_logger()
+                l.handlers = []
+                l = self.setup_logger(
+                    logfile=tempfile, loglevel=logging.INFO, root=False,
+                )
+                self.assertIsInstance(
+                    get_handlers(l)[0], logging.FileHandler,
+                )
+                self.assertIn(tempfile, files)
 
 
     def test_redirect_stdouts(self):
     def test_redirect_stdouts(self):
         with restore_logging():
         with restore_logging():
@@ -336,7 +360,7 @@ class test_task_logger(test_default_logger):
         return self.app.log.setup_task_loggers(*args, **kwargs)
         return self.app.log.setup_task_loggers(*args, **kwargs)
 
 
     def get_logger(self, *args, **kwargs):
     def get_logger(self, *args, **kwargs):
-        return get_task_logger("test_task_logger")
+        return get_task_logger('test_task_logger')
 
 
 
 
 class test_patch_logger_cls(AppCase):
 class test_patch_logger_cls(AppCase):

+ 49 - 51
celery/tests/app/test_schedules.py

@@ -54,65 +54,63 @@ class test_crontab_parser(AppCase):
 
 
     def test_parse_range_wraps(self):
     def test_parse_range_wraps(self):
         self.assertEqual(crontab_parser(12).parse('11-1'),
         self.assertEqual(crontab_parser(12).parse('11-1'),
-                         set([11, 0, 1]))
+                         {11, 0, 1})
         self.assertEqual(crontab_parser(60, 1).parse('2-1'),
         self.assertEqual(crontab_parser(60, 1).parse('2-1'),
                          set(range(1, 60 + 1)))
                          set(range(1, 60 + 1)))
 
 
     def test_parse_groups(self):
     def test_parse_groups(self):
         self.assertEqual(crontab_parser().parse('1,2,3,4'),
         self.assertEqual(crontab_parser().parse('1,2,3,4'),
-                         set([1, 2, 3, 4]))
+                         {1, 2, 3, 4})
         self.assertEqual(crontab_parser().parse('0,15,30,45'),
         self.assertEqual(crontab_parser().parse('0,15,30,45'),
-                         set([0, 15, 30, 45]))
+                         {0, 15, 30, 45})
         self.assertEqual(crontab_parser(min_=1).parse('1,2,3,4'),
         self.assertEqual(crontab_parser(min_=1).parse('1,2,3,4'),
-                         set([1, 2, 3, 4]))
+                         {1, 2, 3, 4})
 
 
     def test_parse_steps(self):
     def test_parse_steps(self):
         self.assertEqual(crontab_parser(8).parse('*/2'),
         self.assertEqual(crontab_parser(8).parse('*/2'),
-                         set([0, 2, 4, 6]))
+                         {0, 2, 4, 6})
         self.assertEqual(crontab_parser().parse('*/2'),
         self.assertEqual(crontab_parser().parse('*/2'),
-                         set(i * 2 for i in range(30)))
+                         {i * 2 for i in range(30)})
         self.assertEqual(crontab_parser().parse('*/3'),
         self.assertEqual(crontab_parser().parse('*/3'),
-                         set(i * 3 for i in range(20)))
+                         {i * 3 for i in range(20)})
         self.assertEqual(crontab_parser(8, 1).parse('*/2'),
         self.assertEqual(crontab_parser(8, 1).parse('*/2'),
-                         set([1, 3, 5, 7]))
+                         {1, 3, 5, 7})
         self.assertEqual(crontab_parser(min_=1).parse('*/2'),
         self.assertEqual(crontab_parser(min_=1).parse('*/2'),
-                         set(i * 2 + 1 for i in range(30)))
+                         {i * 2 + 1 for i in range(30)})
         self.assertEqual(crontab_parser(min_=1).parse('*/3'),
         self.assertEqual(crontab_parser(min_=1).parse('*/3'),
-                         set(i * 3 + 1 for i in range(20)))
+                         {i * 3 + 1 for i in range(20)})
 
 
     def test_parse_composite(self):
     def test_parse_composite(self):
-        self.assertEqual(crontab_parser(8).parse('*/2'), set([0, 2, 4, 6]))
-        self.assertEqual(crontab_parser().parse('2-9/5'), set([2, 7]))
-        self.assertEqual(crontab_parser().parse('2-10/5'), set([2, 7]))
+        self.assertEqual(crontab_parser(8).parse('*/2'), {0, 2, 4, 6})
+        self.assertEqual(crontab_parser().parse('2-9/5'), {2, 7})
+        self.assertEqual(crontab_parser().parse('2-10/5'), {2, 7})
         self.assertEqual(
         self.assertEqual(
             crontab_parser(min_=1).parse('55-5/3'),
             crontab_parser(min_=1).parse('55-5/3'),
-            set([55, 58, 1, 4]),
+            {55, 58, 1, 4},
         )
         )
-        self.assertEqual(crontab_parser().parse('2-11/5,3'), set([2, 3, 7]))
+        self.assertEqual(crontab_parser().parse('2-11/5,3'), {2, 3, 7})
         self.assertEqual(
         self.assertEqual(
             crontab_parser().parse('2-4/3,*/5,0-21/4'),
             crontab_parser().parse('2-4/3,*/5,0-21/4'),
-            set([0, 2, 4, 5, 8, 10, 12, 15, 16,
-                 20, 25, 30, 35, 40, 45, 50, 55]),
+            {0, 2, 4, 5, 8, 10, 12, 15, 16, 20, 25, 30, 35, 40, 45, 50, 55},
         )
         )
         self.assertEqual(
         self.assertEqual(
             crontab_parser().parse('1-9/2'),
             crontab_parser().parse('1-9/2'),
-            set([1, 3, 5, 7, 9]),
+            {1, 3, 5, 7, 9},
         )
         )
-        self.assertEqual(crontab_parser(8, 1).parse('*/2'), set([1, 3, 5, 7]))
-        self.assertEqual(crontab_parser(min_=1).parse('2-9/5'), set([2, 7]))
-        self.assertEqual(crontab_parser(min_=1).parse('2-10/5'), set([2, 7]))
+        self.assertEqual(crontab_parser(8, 1).parse('*/2'), {1, 3, 5, 7})
+        self.assertEqual(crontab_parser(min_=1).parse('2-9/5'), {2, 7})
+        self.assertEqual(crontab_parser(min_=1).parse('2-10/5'), {2, 7})
         self.assertEqual(
         self.assertEqual(
             crontab_parser(min_=1).parse('2-11/5,3'),
             crontab_parser(min_=1).parse('2-11/5,3'),
-            set([2, 3, 7]),
+            {2, 3, 7},
         )
         )
         self.assertEqual(
         self.assertEqual(
             crontab_parser(min_=1).parse('2-4/3,*/5,1-21/4'),
             crontab_parser(min_=1).parse('2-4/3,*/5,1-21/4'),
-            set([1, 2, 5, 6, 9, 11, 13, 16, 17,
-                 21, 26, 31, 36, 41, 46, 51, 56]),
+            {1, 2, 5, 6, 9, 11, 13, 16, 17, 21, 26, 31, 36, 41, 46, 51, 56},
         )
         )
         self.assertEqual(
         self.assertEqual(
             crontab_parser(min_=1).parse('1-9/2'),
             crontab_parser(min_=1).parse('1-9/2'),
-            set([1, 3, 5, 7, 9]),
+            {1, 3, 5, 7, 9},
         )
         )
 
 
     def test_parse_errors_on_empty_string(self):
     def test_parse_errors_on_empty_string(self):
@@ -148,11 +146,11 @@ class test_crontab_parser(AppCase):
     def test_expand_cronspec_eats_iterables(self):
     def test_expand_cronspec_eats_iterables(self):
         self.assertEqual(
         self.assertEqual(
             crontab._expand_cronspec(iter([1, 2, 3]), 100),
             crontab._expand_cronspec(iter([1, 2, 3]), 100),
-            set([1, 2, 3]),
+            {1, 2, 3},
         )
         )
         self.assertEqual(
         self.assertEqual(
             crontab._expand_cronspec(iter([1, 2, 3]), 100, 1),
             crontab._expand_cronspec(iter([1, 2, 3]), 100, 1),
-            set([1, 2, 3]),
+            {1, 2, 3},
         )
         )
 
 
     def test_expand_cronspec_invalid_type(self):
     def test_expand_cronspec_invalid_type(self):
@@ -408,7 +406,7 @@ class test_crontab_is_due(AppCase):
 
 
     def test_simple_crontab_spec(self):
     def test_simple_crontab_spec(self):
         c = self.crontab(minute=30)
         c = self.crontab(minute=30)
-        self.assertEqual(c.minute, set([30]))
+        self.assertEqual(c.minute, {30})
         self.assertEqual(c.hour, set(range(24)))
         self.assertEqual(c.hour, set(range(24)))
         self.assertEqual(c.day_of_week, set(range(7)))
         self.assertEqual(c.day_of_week, set(range(7)))
         self.assertEqual(c.day_of_month, set(range(1, 32)))
         self.assertEqual(c.day_of_month, set(range(1, 32)))
@@ -416,13 +414,13 @@ class test_crontab_is_due(AppCase):
 
 
     def test_crontab_spec_minute_formats(self):
     def test_crontab_spec_minute_formats(self):
         c = self.crontab(minute=30)
         c = self.crontab(minute=30)
-        self.assertEqual(c.minute, set([30]))
+        self.assertEqual(c.minute, {30})
         c = self.crontab(minute='30')
         c = self.crontab(minute='30')
-        self.assertEqual(c.minute, set([30]))
+        self.assertEqual(c.minute, {30})
         c = self.crontab(minute=(30, 40, 50))
         c = self.crontab(minute=(30, 40, 50))
-        self.assertEqual(c.minute, set([30, 40, 50]))
-        c = self.crontab(minute=set([30, 40, 50]))
-        self.assertEqual(c.minute, set([30, 40, 50]))
+        self.assertEqual(c.minute, {30, 40, 50})
+        c = self.crontab(minute={30, 40, 50})
+        self.assertEqual(c.minute, {30, 40, 50})
 
 
     def test_crontab_spec_invalid_minute(self):
     def test_crontab_spec_invalid_minute(self):
         with self.assertRaises(ValueError):
         with self.assertRaises(ValueError):
@@ -432,11 +430,11 @@ class test_crontab_is_due(AppCase):
 
 
     def test_crontab_spec_hour_formats(self):
     def test_crontab_spec_hour_formats(self):
         c = self.crontab(hour=6)
         c = self.crontab(hour=6)
-        self.assertEqual(c.hour, set([6]))
+        self.assertEqual(c.hour, {6})
         c = self.crontab(hour='5')
         c = self.crontab(hour='5')
-        self.assertEqual(c.hour, set([5]))
+        self.assertEqual(c.hour, {5})
         c = self.crontab(hour=(4, 8, 12))
         c = self.crontab(hour=(4, 8, 12))
-        self.assertEqual(c.hour, set([4, 8, 12]))
+        self.assertEqual(c.hour, {4, 8, 12})
 
 
     def test_crontab_spec_invalid_hour(self):
     def test_crontab_spec_invalid_hour(self):
         with self.assertRaises(ValueError):
         with self.assertRaises(ValueError):
@@ -446,17 +444,17 @@ class test_crontab_is_due(AppCase):
 
 
     def test_crontab_spec_dow_formats(self):
     def test_crontab_spec_dow_formats(self):
         c = self.crontab(day_of_week=5)
         c = self.crontab(day_of_week=5)
-        self.assertEqual(c.day_of_week, set([5]))
+        self.assertEqual(c.day_of_week, {5})
         c = self.crontab(day_of_week='5')
         c = self.crontab(day_of_week='5')
-        self.assertEqual(c.day_of_week, set([5]))
+        self.assertEqual(c.day_of_week, {5})
         c = self.crontab(day_of_week='fri')
         c = self.crontab(day_of_week='fri')
-        self.assertEqual(c.day_of_week, set([5]))
+        self.assertEqual(c.day_of_week, {5})
         c = self.crontab(day_of_week='tuesday,sunday,fri')
         c = self.crontab(day_of_week='tuesday,sunday,fri')
-        self.assertEqual(c.day_of_week, set([0, 2, 5]))
+        self.assertEqual(c.day_of_week, {0, 2, 5})
         c = self.crontab(day_of_week='mon-fri')
         c = self.crontab(day_of_week='mon-fri')
-        self.assertEqual(c.day_of_week, set([1, 2, 3, 4, 5]))
+        self.assertEqual(c.day_of_week, {1, 2, 3, 4, 5})
         c = self.crontab(day_of_week='*/2')
         c = self.crontab(day_of_week='*/2')
-        self.assertEqual(c.day_of_week, set([0, 2, 4, 6]))
+        self.assertEqual(c.day_of_week, {0, 2, 4, 6})
 
 
     def test_crontab_spec_invalid_dow(self):
     def test_crontab_spec_invalid_dow(self):
         with self.assertRaises(ValueError):
         with self.assertRaises(ValueError):
@@ -470,13 +468,13 @@ class test_crontab_is_due(AppCase):
 
 
     def test_crontab_spec_dom_formats(self):
     def test_crontab_spec_dom_formats(self):
         c = self.crontab(day_of_month=5)
         c = self.crontab(day_of_month=5)
-        self.assertEqual(c.day_of_month, set([5]))
+        self.assertEqual(c.day_of_month, {5})
         c = self.crontab(day_of_month='5')
         c = self.crontab(day_of_month='5')
-        self.assertEqual(c.day_of_month, set([5]))
+        self.assertEqual(c.day_of_month, {5})
         c = self.crontab(day_of_month='2,4,6')
         c = self.crontab(day_of_month='2,4,6')
-        self.assertEqual(c.day_of_month, set([2, 4, 6]))
+        self.assertEqual(c.day_of_month, {2, 4, 6})
         c = self.crontab(day_of_month='*/5')
         c = self.crontab(day_of_month='*/5')
-        self.assertEqual(c.day_of_month, set([1, 6, 11, 16, 21, 26, 31]))
+        self.assertEqual(c.day_of_month, {1, 6, 11, 16, 21, 26, 31})
 
 
     def test_crontab_spec_invalid_dom(self):
     def test_crontab_spec_invalid_dom(self):
         with self.assertRaises(ValueError):
         with self.assertRaises(ValueError):
@@ -490,15 +488,15 @@ class test_crontab_is_due(AppCase):
 
 
     def test_crontab_spec_moy_formats(self):
     def test_crontab_spec_moy_formats(self):
         c = self.crontab(month_of_year=1)
         c = self.crontab(month_of_year=1)
-        self.assertEqual(c.month_of_year, set([1]))
+        self.assertEqual(c.month_of_year, {1})
         c = self.crontab(month_of_year='1')
         c = self.crontab(month_of_year='1')
-        self.assertEqual(c.month_of_year, set([1]))
+        self.assertEqual(c.month_of_year, {1})
         c = self.crontab(month_of_year='2,4,6')
         c = self.crontab(month_of_year='2,4,6')
-        self.assertEqual(c.month_of_year, set([2, 4, 6]))
+        self.assertEqual(c.month_of_year, {2, 4, 6})
         c = self.crontab(month_of_year='*/2')
         c = self.crontab(month_of_year='*/2')
-        self.assertEqual(c.month_of_year, set([1, 3, 5, 7, 9, 11]))
+        self.assertEqual(c.month_of_year, {1, 3, 5, 7, 9, 11})
         c = self.crontab(month_of_year='2-12/2')
         c = self.crontab(month_of_year='2-12/2')
-        self.assertEqual(c.month_of_year, set([2, 4, 6, 8, 10, 12]))
+        self.assertEqual(c.month_of_year, {2, 4, 6, 8, 10, 12})
 
 
     def test_crontab_spec_invalid_moy(self):
     def test_crontab_spec_invalid_moy(self):
         with self.assertRaises(ValueError):
         with self.assertRaises(ValueError):

+ 17 - 1
celery/tests/app/test_utils.py

@@ -2,7 +2,7 @@ from __future__ import absolute_import
 
 
 from collections import Mapping, MutableMapping
 from collections import Mapping, MutableMapping
 
 
-from celery.app.utils import Settings, bugreport
+from celery.app.utils import Settings, filter_hidden_settings, bugreport
 
 
 from celery.tests.case import AppCase, Mock
 from celery.tests.case import AppCase, Mock
 
 
@@ -20,6 +20,22 @@ class TestSettings(AppCase):
         self.assertTrue(issubclass(Settings, MutableMapping))
         self.assertTrue(issubclass(Settings, MutableMapping))
 
 
 
 
+class test_filter_hidden_settings(AppCase):
+
+    def test_handles_non_string_keys(self):
+        """filter_hidden_settings shouldn't raise an exception when handling
+        mappings with non-string keys"""
+        conf = {
+            'STRING_KEY': 'VALUE1',
+            ('NON', 'STRING', 'KEY'): 'VALUE2',
+            'STRING_KEY2': {
+                'STRING_KEY3': 1,
+                ('NON', 'STRING', 'KEY', '2'): 2
+            },
+        }
+        filter_hidden_settings(conf)
+
+
 class test_bugreport(AppCase):
 class test_bugreport(AppCase):
 
 
     def test_no_conn_driver_info(self):
     def test_no_conn_driver_info(self):

+ 14 - 13
celery/tests/backends/test_amqp.py

@@ -33,8 +33,8 @@ class test_AMQPBackend(AppCase):
         return AMQPBackend(self.app, **opts)
         return AMQPBackend(self.app, **opts)
 
 
     def test_mark_as_done(self):
     def test_mark_as_done(self):
-        tb1 = self.create_backend()
-        tb2 = self.create_backend()
+        tb1 = self.create_backend(max_cached_results=1)
+        tb2 = self.create_backend(max_cached_results=1)
 
 
         tid = uuid()
         tid = uuid()
 
 
@@ -108,8 +108,8 @@ class test_AMQPBackend(AppCase):
             raise KeyError('foo')
             raise KeyError('foo')
 
 
         backend = AMQPBackend(self.app)
         backend = AMQPBackend(self.app)
-        from celery.app.amqp import TaskProducer
-        prod, TaskProducer.publish = TaskProducer.publish, publish
+        from celery.app.amqp import Producer
+        prod, Producer.publish = Producer.publish, publish
         try:
         try:
             with self.assertRaises(KeyError):
             with self.assertRaises(KeyError):
                 backend.retry_policy['max_retries'] = None
                 backend.retry_policy['max_retries'] = None
@@ -119,7 +119,7 @@ class test_AMQPBackend(AppCase):
                 backend.retry_policy['max_retries'] = 10
                 backend.retry_policy['max_retries'] = 10
                 backend.store_result('foo', 'bar', 'STARTED')
                 backend.store_result('foo', 'bar', 'STARTED')
         finally:
         finally:
-            TaskProducer.publish = prod
+            Producer.publish = prod
 
 
     def assertState(self, retval, state):
     def assertState(self, retval, state):
         self.assertEqual(retval['status'], state)
         self.assertEqual(retval['status'], state)
@@ -175,7 +175,7 @@ class test_AMQPBackend(AppCase):
         class MockBackend(AMQPBackend):
         class MockBackend(AMQPBackend):
             Queue = MockBinding
             Queue = MockBinding
 
 
-        backend = MockBackend(self.app)
+        backend = MockBackend(self.app, max_cached_results=100)
         backend._republish = Mock()
         backend._republish = Mock()
 
 
         yield results, backend, Message
         yield results, backend, Message
@@ -183,29 +183,30 @@ class test_AMQPBackend(AppCase):
     def test_backlog_limit_exceeded(self):
     def test_backlog_limit_exceeded(self):
         with self._result_context() as (results, backend, Message):
         with self._result_context() as (results, backend, Message):
             for i in range(1001):
             for i in range(1001):
-                results.put(Message(status=states.RECEIVED))
+                results.put(Message(task_id='id', status=states.RECEIVED))
             with self.assertRaises(backend.BacklogLimitExceeded):
             with self.assertRaises(backend.BacklogLimitExceeded):
                 backend.get_task_meta('id')
                 backend.get_task_meta('id')
 
 
     def test_poll_result(self):
     def test_poll_result(self):
         with self._result_context() as (results, backend, Message):
         with self._result_context() as (results, backend, Message):
+            tid = uuid()
             # FFWD's to the latest state.
             # FFWD's to the latest state.
             state_messages = [
             state_messages = [
-                Message(status=states.RECEIVED, seq=1),
-                Message(status=states.STARTED, seq=2),
-                Message(status=states.FAILURE, seq=3),
+                Message(task_id=tid, status=states.RECEIVED, seq=1),
+                Message(task_id=tid, status=states.STARTED, seq=2),
+                Message(task_id=tid, status=states.FAILURE, seq=3),
             ]
             ]
             for state_message in state_messages:
             for state_message in state_messages:
                 results.put(state_message)
                 results.put(state_message)
-            r1 = backend.get_task_meta(uuid())
+            r1 = backend.get_task_meta(tid)
             self.assertDictContainsSubset(
             self.assertDictContainsSubset(
                 {'status': states.FAILURE, 'seq': 3}, r1,
                 {'status': states.FAILURE, 'seq': 3}, r1,
                 'FFWDs to the last state',
                 'FFWDs to the last state',
             )
             )
 
 
             # Caches last known state.
             # Caches last known state.
-            results.put(Message())
             tid = uuid()
             tid = uuid()
+            results.put(Message(task_id=tid))
             backend.get_task_meta(tid)
             backend.get_task_meta(tid)
             self.assertIn(tid, backend._cache, 'Caches last known state')
             self.assertIn(tid, backend._cache, 'Caches last known state')
 
 
@@ -261,7 +262,7 @@ class test_AMQPBackend(AppCase):
                 b.drain_events(Connection(), consumer, timeout=0.1)
                 b.drain_events(Connection(), consumer, timeout=0.1)
 
 
     def test_get_many(self):
     def test_get_many(self):
-        b = self.create_backend()
+        b = self.create_backend(max_cached_results=10)
 
 
         tids = []
         tids = []
         for i in range(10):
         for i in range(10):

+ 0 - 9
celery/tests/backends/test_backends.py

@@ -19,15 +19,6 @@ class test_backends(AppCase):
                 expect_cls,
                 expect_cls,
             )
             )
 
 
-    def test_get_backend_cache(self):
-        backends.get_backend_cls.clear()
-        hits = backends.get_backend_cls.hits
-        misses = backends.get_backend_cls.misses
-        self.assertTrue(backends.get_backend_cls('amqp', self.app.loader))
-        self.assertEqual(backends.get_backend_cls.misses, misses + 1)
-        self.assertTrue(backends.get_backend_cls('amqp', self.app.loader))
-        self.assertEqual(backends.get_backend_cls.hits, hits + 1)
-
     def test_unknown_backend(self):
     def test_unknown_backend(self):
         with self.assertRaises(ImportError):
         with self.assertRaises(ImportError):
             backends.get_backend_cls('fasodaopjeqijwqe', self.app.loader)
             backends.get_backend_cls('fasodaopjeqijwqe', self.app.loader)

+ 13 - 10
celery/tests/backends/test_base.py

@@ -62,7 +62,7 @@ class test_BaseBackend_interface(AppCase):
             self.b.forget('SOMExx-N0nex1stant-IDxx-')
             self.b.forget('SOMExx-N0nex1stant-IDxx-')
 
 
     def test_on_chord_part_return(self):
     def test_on_chord_part_return(self):
-        self.b.on_chord_part_return(None)
+        self.b.on_chord_part_return(None, None, None)
 
 
     def test_apply_chord(self, unlock='celery.chord_unlock'):
     def test_apply_chord(self, unlock='celery.chord_unlock'):
         self.app.tasks[unlock] = Mock()
         self.app.tasks[unlock] = Mock()
@@ -234,9 +234,10 @@ class test_BaseBackend_dict(AppCase):
         self.assertIsInstance(self.b.prepare_value(g), self.app.GroupResult)
         self.assertIsInstance(self.b.prepare_value(g), self.app.GroupResult)
 
 
     def test_is_cached(self):
     def test_is_cached(self):
-        self.b._cache['foo'] = 1
-        self.assertTrue(self.b.is_cached('foo'))
-        self.assertFalse(self.b.is_cached('false'))
+        b = BaseBackend(app=self.app, max_cached_results=1)
+        b._cache['foo'] = 1
+        self.assertTrue(b.is_cached('foo'))
+        self.assertFalse(b.is_cached('false'))
 
 
 
 
 class test_KeyValueStoreBackend(AppCase):
 class test_KeyValueStoreBackend(AppCase):
@@ -246,7 +247,7 @@ class test_KeyValueStoreBackend(AppCase):
 
 
     def test_on_chord_part_return(self):
     def test_on_chord_part_return(self):
         assert not self.b.implements_incr
         assert not self.b.implements_incr
-        self.b.on_chord_part_return(None)
+        self.b.on_chord_part_return(None, None, None)
 
 
     def test_get_store_delete_result(self):
     def test_get_store_delete_result(self):
         tid = uuid()
         tid = uuid()
@@ -282,12 +283,14 @@ class test_KeyValueStoreBackend(AppCase):
     def test_chord_part_return_no_gid(self):
     def test_chord_part_return_no_gid(self):
         self.b.implements_incr = True
         self.b.implements_incr = True
         task = Mock()
         task = Mock()
+        state = 'SUCCESS'
+        result = 10
         task.request.group = None
         task.request.group = None
         self.b.get_key_for_chord = Mock()
         self.b.get_key_for_chord = Mock()
         self.b.get_key_for_chord.side_effect = AssertionError(
         self.b.get_key_for_chord.side_effect = AssertionError(
             'should not get here',
             'should not get here',
         )
         )
-        self.assertIsNone(self.b.on_chord_part_return(task))
+        self.assertIsNone(self.b.on_chord_part_return(task, state, result))
 
 
     @contextmanager
     @contextmanager
     def _chord_part_context(self, b):
     def _chord_part_context(self, b):
@@ -315,14 +318,14 @@ class test_KeyValueStoreBackend(AppCase):
 
 
     def test_chord_part_return_propagate_set(self):
     def test_chord_part_return_propagate_set(self):
         with self._chord_part_context(self.b) as (task, deps, _):
         with self._chord_part_context(self.b) as (task, deps, _):
-            self.b.on_chord_part_return(task, propagate=True)
+            self.b.on_chord_part_return(task, 'SUCCESS', 10, propagate=True)
             self.assertFalse(self.b.expire.called)
             self.assertFalse(self.b.expire.called)
             deps.delete.assert_called_with()
             deps.delete.assert_called_with()
             deps.join_native.assert_called_with(propagate=True, timeout=3.0)
             deps.join_native.assert_called_with(propagate=True, timeout=3.0)
 
 
     def test_chord_part_return_propagate_default(self):
     def test_chord_part_return_propagate_default(self):
         with self._chord_part_context(self.b) as (task, deps, _):
         with self._chord_part_context(self.b) as (task, deps, _):
-            self.b.on_chord_part_return(task, propagate=None)
+            self.b.on_chord_part_return(task, 'SUCCESS', 10, propagate=None)
             self.assertFalse(self.b.expire.called)
             self.assertFalse(self.b.expire.called)
             deps.delete.assert_called_with()
             deps.delete.assert_called_with()
             deps.join_native.assert_called_with(
             deps.join_native.assert_called_with(
@@ -334,7 +337,7 @@ class test_KeyValueStoreBackend(AppCase):
         with self._chord_part_context(self.b) as (task, deps, callback):
         with self._chord_part_context(self.b) as (task, deps, callback):
             deps._failed_join_report = lambda: iter([])
             deps._failed_join_report = lambda: iter([])
             deps.join_native.side_effect = KeyError('foo')
             deps.join_native.side_effect = KeyError('foo')
-            self.b.on_chord_part_return(task)
+            self.b.on_chord_part_return(task, 'SUCCESS', 10)
             self.assertTrue(self.b.fail_from_current_stack.called)
             self.assertTrue(self.b.fail_from_current_stack.called)
             args = self.b.fail_from_current_stack.call_args
             args = self.b.fail_from_current_stack.call_args
             exc = args[1]['exc']
             exc = args[1]['exc']
@@ -348,7 +351,7 @@ class test_KeyValueStoreBackend(AppCase):
                 self.app.AsyncResult('culprit'),
                 self.app.AsyncResult('culprit'),
             ])
             ])
             deps.join_native.side_effect = KeyError('foo')
             deps.join_native.side_effect = KeyError('foo')
-            b.on_chord_part_return(task)
+            b.on_chord_part_return(task, 'SUCCESS', 10)
             self.assertTrue(b.fail_from_current_stack.called)
             self.assertTrue(b.fail_from_current_stack.called)
             args = b.fail_from_current_stack.call_args
             args = b.fail_from_current_stack.call_args
             exc = args[1]['exc']
             exc = args[1]['exc']

+ 2 - 2
celery/tests/backends/test_cache.py

@@ -86,10 +86,10 @@ class test_CacheBackend(AppCase):
         tb.apply_chord(group(app=self.app), (), gid, {}, result=res)
         tb.apply_chord(group(app=self.app), (), gid, {}, result=res)
 
 
         self.assertFalse(deps.join_native.called)
         self.assertFalse(deps.join_native.called)
-        tb.on_chord_part_return(task)
+        tb.on_chord_part_return(task, 'SUCCESS', 10)
         self.assertFalse(deps.join_native.called)
         self.assertFalse(deps.join_native.called)
 
 
-        tb.on_chord_part_return(task)
+        tb.on_chord_part_return(task, 'SUCCESS', 10)
         deps.join_native.assert_called_with(propagate=True, timeout=3.0)
         deps.join_native.assert_called_with(propagate=True, timeout=3.0)
         deps.delete.assert_called_with()
         deps.delete.assert_called_with()
 
 

+ 4 - 4
celery/tests/backends/test_couchbase.py

@@ -129,8 +129,8 @@ class test_CouchBaseBackend(AppCase):
         url = 'couchbase://johndoe:mysecret@myhost:123/mycoolbucket'
         url = 'couchbase://johndoe:mysecret@myhost:123/mycoolbucket'
         with self.Celery(backend=url) as app:
         with self.Celery(backend=url) as app:
             x = app.backend
             x = app.backend
-            self.assertEqual(x.bucket, "mycoolbucket")
-            self.assertEqual(x.host, "myhost")
-            self.assertEqual(x.username, "johndoe")
-            self.assertEqual(x.password, "mysecret")
+            self.assertEqual(x.bucket, 'mycoolbucket')
+            self.assertEqual(x.host, 'myhost')
+            self.assertEqual(x.username, 'johndoe')
+            self.assertEqual(x.password, 'mysecret')
             self.assertEqual(x.port, 123)
             self.assertEqual(x.port, 123)

+ 3 - 3
celery/tests/backends/test_database.py

@@ -42,16 +42,16 @@ class test_DatabaseBackend(AppCase):
         self.uri = 'sqlite:///test.db'
         self.uri = 'sqlite:///test.db'
 
 
     def test_retry_helper(self):
     def test_retry_helper(self):
-        from celery.backends.database import OperationalError
+        from celery.backends.database import DatabaseError
 
 
         calls = [0]
         calls = [0]
 
 
         @retry
         @retry
         def raises():
         def raises():
             calls[0] += 1
             calls[0] += 1
-            raise OperationalError(1, 2, 3)
+            raise DatabaseError(1, 2, 3)
 
 
-        with self.assertRaises(OperationalError):
+        with self.assertRaises(DatabaseError):
             raises(max_retries=5)
             raises(max_retries=5)
         self.assertEqual(calls[0], 5)
         self.assertEqual(calls[0], 5)
 
 

+ 11 - 10
celery/tests/backends/test_mongodb.py

@@ -10,7 +10,7 @@ from celery.backends import mongodb as module
 from celery.backends.mongodb import MongoBackend, Bunch, pymongo
 from celery.backends.mongodb import MongoBackend, Bunch, pymongo
 from celery.exceptions import ImproperlyConfigured
 from celery.exceptions import ImproperlyConfigured
 from celery.tests.case import (
 from celery.tests.case import (
-    AppCase, MagicMock, Mock, SkipTest,
+    AppCase, MagicMock, Mock, SkipTest, ANY,
     depends_on_current_app, patch, sentinel,
     depends_on_current_app, patch, sentinel,
 )
 )
 
 
@@ -98,7 +98,7 @@ class test_MongoBackend(AppCase):
 
 
             connection = self.backend._get_connection()
             connection = self.backend._get_connection()
             mock_Connection.assert_called_once_with(
             mock_Connection.assert_called_once_with(
-                host='mongodb://localhost:27017', ssl=False, max_pool_size=10,
+                host='mongodb://localhost:27017', max_pool_size=10,
                 auto_start_request=False)
                 auto_start_request=False)
             self.assertEqual(sentinel.connection, connection)
             self.assertEqual(sentinel.connection, connection)
 
 
@@ -113,7 +113,7 @@ class test_MongoBackend(AppCase):
 
 
             connection = self.backend._get_connection()
             connection = self.backend._get_connection()
             mock_Connection.assert_called_once_with(
             mock_Connection.assert_called_once_with(
-                host=mongodb_uri, ssl=False, max_pool_size=10,
+                host=mongodb_uri, max_pool_size=10,
                 auto_start_request=False)
                 auto_start_request=False)
             self.assertEqual(sentinel.connection, connection)
             self.assertEqual(sentinel.connection, connection)
 
 
@@ -176,7 +176,7 @@ class test_MongoBackend(AppCase):
 
 
         mock_get_database.assert_called_once_with()
         mock_get_database.assert_called_once_with()
         mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
         mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
-        mock_collection.save.assert_called_once()
+        mock_collection.save.assert_called_once_with(ANY)
         self.assertEqual(sentinel.result, ret_val)
         self.assertEqual(sentinel.result, ret_val)
 
 
     @patch('celery.backends.mongodb.MongoBackend._get_database')
     @patch('celery.backends.mongodb.MongoBackend._get_database')
@@ -196,9 +196,10 @@ class test_MongoBackend(AppCase):
         mock_get_database.assert_called_once_with()
         mock_get_database.assert_called_once_with()
         mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
         mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
         self.assertEqual(
         self.assertEqual(
-            ['status', 'task_id', 'date_done', 'traceback', 'result',
-             'children'],
-            list(ret_val.keys()))
+            list(sorted(['status', 'task_id', 'date_done', 'traceback',
+                         'result', 'children'])),
+            list(sorted(ret_val.keys())),
+        )
 
 
     @patch('celery.backends.mongodb.MongoBackend._get_database')
     @patch('celery.backends.mongodb.MongoBackend._get_database')
     def test_get_task_meta_for_no_result(self, mock_get_database):
     def test_get_task_meta_for_no_result(self, mock_get_database):
@@ -232,7 +233,7 @@ class test_MongoBackend(AppCase):
 
 
         mock_get_database.assert_called_once_with()
         mock_get_database.assert_called_once_with()
         mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
         mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
-        mock_collection.save.assert_called_once()
+        mock_collection.save.assert_called_once_with(ANY)
         self.assertEqual(sentinel.result, ret_val)
         self.assertEqual(sentinel.result, ret_val)
 
 
     @patch('celery.backends.mongodb.MongoBackend._get_database')
     @patch('celery.backends.mongodb.MongoBackend._get_database')
@@ -298,7 +299,7 @@ class test_MongoBackend(AppCase):
         self.backend.taskmeta_collection = MONGODB_COLLECTION
         self.backend.taskmeta_collection = MONGODB_COLLECTION
 
 
         mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
         mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
-        mock_collection = Mock()
+        self.backend.collections = mock_collection = Mock()
 
 
         mock_get_database.return_value = mock_database
         mock_get_database.return_value = mock_database
         mock_database.__getitem__.return_value = mock_collection
         mock_database.__getitem__.return_value = mock_collection
@@ -309,7 +310,7 @@ class test_MongoBackend(AppCase):
         mock_get_database.assert_called_once_with()
         mock_get_database.assert_called_once_with()
         mock_database.__getitem__.assert_called_once_with(
         mock_database.__getitem__.assert_called_once_with(
             MONGODB_COLLECTION)
             MONGODB_COLLECTION)
-        mock_collection.assert_called_once()
+        self.assertTrue(mock_collection.remove.called)
 
 
     def test_get_database_authfailure(self):
     def test_get_database_authfailure(self):
         x = MongoBackend(app=self.app)
         x = MongoBackend(app=self.app)

+ 111 - 86
celery/tests/backends/test_redis.py

@@ -4,52 +4,54 @@ from datetime import timedelta
 
 
 from pickle import loads, dumps
 from pickle import loads, dumps
 
 
-from kombu.utils import cached_property, uuid
-
 from celery import signature
 from celery import signature
 from celery import states
 from celery import states
 from celery import group
 from celery import group
+from celery import uuid
 from celery.datastructures import AttributeDict
 from celery.datastructures import AttributeDict
-from celery.exceptions import CPendingDeprecationWarning, ImproperlyConfigured
-from celery.utils.timeutils import timedelta_seconds
+from celery.exceptions import ImproperlyConfigured
 
 
 from celery.tests.case import (
 from celery.tests.case import (
-    AppCase, Mock, SkipTest, depends_on_current_app, patch,
+    AppCase, Mock, MockCallbacks, SkipTest, depends_on_current_app, patch,
 )
 )
 
 
 
 
-class Redis(object):
+class Connection(object):
+    connected = True
+
+    def disconnect(self):
+        self.connected = False
+
 
 
-    class Connection(object):
-        connected = True
+class Pipeline(object):
 
 
-        def disconnect(self):
-            self.connected = False
+    def __init__(self, client):
+        self.client = client
+        self.steps = []
 
 
-    class Pipeline(object):
+    def __getattr__(self, attr):
 
 
-        def __init__(self, client):
-            self.client = client
-            self.steps = []
+        def add_step(*args, **kwargs):
+            self.steps.append((getattr(self.client, attr), args, kwargs))
+            return self
+        return add_step
 
 
-        def __getattr__(self, attr):
+    def execute(self):
+        return [step(*a, **kw) for step, a, kw in self.steps]
 
 
-            def add_step(*args, **kwargs):
-                self.steps.append((getattr(self.client, attr), args, kwargs))
-                return self
-            return add_step
 
 
-        def execute(self):
-            return [step(*a, **kw) for step, a, kw in self.steps]
+class Redis(MockCallbacks):
+    Connection = Connection
+    Pipeline = Pipeline
 
 
     def __init__(self, host=None, port=None, db=None, password=None, **kw):
     def __init__(self, host=None, port=None, db=None, password=None, **kw):
         self.host = host
         self.host = host
         self.port = port
         self.port = port
         self.db = db
         self.db = db
         self.password = password
         self.password = password
-        self.connection = self.Connection()
         self.keyspace = {}
         self.keyspace = {}
         self.expiry = {}
         self.expiry = {}
+        self.connection = self.Connection()
 
 
     def get(self, key):
     def get(self, key):
         return self.keyspace.get(key)
         return self.keyspace.get(key)
@@ -63,16 +65,30 @@ class Redis(object):
 
 
     def expire(self, key, expires):
     def expire(self, key, expires):
         self.expiry[key] = expires
         self.expiry[key] = expires
+        return expires
 
 
     def delete(self, key):
     def delete(self, key):
-        self.keyspace.pop(key)
-
-    def publish(self, key, value):
-        pass
+        return bool(self.keyspace.pop(key, None))
 
 
     def pipeline(self):
     def pipeline(self):
         return self.Pipeline(self)
         return self.Pipeline(self)
 
 
+    def _get_list(self, key):
+        try:
+            return self.keyspace[key]
+        except KeyError:
+            l = self.keyspace[key] = []
+            return l
+
+    def rpush(self, key, value):
+        self._get_list(key).append(value)
+
+    def lrange(self, key, start, stop):
+        return self._get_list(key)[start:stop]
+
+    def llen(self, key):
+        return len(self.keyspace.get(key) or [])
+
 
 
 class redis(object):
 class redis(object):
     Redis = Redis
     Redis = Redis
@@ -91,41 +107,34 @@ class redis(object):
 class test_RedisBackend(AppCase):
 class test_RedisBackend(AppCase):
 
 
     def get_backend(self):
     def get_backend(self):
-        from celery.backends import redis
+        from celery.backends.redis import RedisBackend
 
 
-        class RedisBackend(redis.RedisBackend):
+        class _RedisBackend(RedisBackend):
             redis = redis
             redis = redis
 
 
-        return RedisBackend
+        return _RedisBackend
 
 
     def setup(self):
     def setup(self):
         self.Backend = self.get_backend()
         self.Backend = self.get_backend()
 
 
-        class MockBackend(self.Backend):
-
-            @cached_property
-            def client(self):
-                return Mock()
-
-        self.MockBackend = MockBackend
-
     @depends_on_current_app
     @depends_on_current_app
     def test_reduce(self):
     def test_reduce(self):
         try:
         try:
             from celery.backends.redis import RedisBackend
             from celery.backends.redis import RedisBackend
-            x = RedisBackend(app=self.app)
+            x = RedisBackend(app=self.app, new_join=True)
             self.assertTrue(loads(dumps(x)))
             self.assertTrue(loads(dumps(x)))
         except ImportError:
         except ImportError:
             raise SkipTest('redis not installed')
             raise SkipTest('redis not installed')
 
 
     def test_no_redis(self):
     def test_no_redis(self):
-        self.MockBackend.redis = None
+        self.Backend.redis = None
         with self.assertRaises(ImproperlyConfigured):
         with self.assertRaises(ImproperlyConfigured):
-            self.MockBackend(app=self.app)
+            self.Backend(app=self.app, new_join=True)
 
 
     def test_url(self):
     def test_url(self):
-        x = self.MockBackend(
+        x = self.Backend(
             'redis://:bosco@vandelay.com:123//1', app=self.app,
             'redis://:bosco@vandelay.com:123//1', app=self.app,
+            new_join=True,
         )
         )
         self.assertTrue(x.connparams)
         self.assertTrue(x.connparams)
         self.assertEqual(x.connparams['host'], 'vandelay.com')
         self.assertEqual(x.connparams['host'], 'vandelay.com')
@@ -134,8 +143,9 @@ class test_RedisBackend(AppCase):
         self.assertEqual(x.connparams['password'], 'bosco')
         self.assertEqual(x.connparams['password'], 'bosco')
 
 
     def test_socket_url(self):
     def test_socket_url(self):
-        x = self.MockBackend(
+        x = self.Backend(
             'socket:///tmp/redis.sock?virtual_host=/3', app=self.app,
             'socket:///tmp/redis.sock?virtual_host=/3', app=self.app,
+            new_join=True,
         )
         )
         self.assertTrue(x.connparams)
         self.assertTrue(x.connparams)
         self.assertEqual(x.connparams['path'], '/tmp/redis.sock')
         self.assertEqual(x.connparams['path'], '/tmp/redis.sock')
@@ -148,20 +158,17 @@ class test_RedisBackend(AppCase):
         self.assertEqual(x.connparams['db'], 3)
         self.assertEqual(x.connparams['db'], 3)
 
 
     def test_compat_propertie(self):
     def test_compat_propertie(self):
-        x = self.MockBackend(
+        x = self.Backend(
             'redis://:bosco@vandelay.com:123//1', app=self.app,
             'redis://:bosco@vandelay.com:123//1', app=self.app,
+            new_join=True,
         )
         )
-        with self.assertWarnsRegex(CPendingDeprecationWarning,
-                                   r'scheduled for deprecation'):
+        with self.assertPendingDeprecation():
             self.assertEqual(x.host, 'vandelay.com')
             self.assertEqual(x.host, 'vandelay.com')
-        with self.assertWarnsRegex(CPendingDeprecationWarning,
-                                   r'scheduled for deprecation'):
+        with self.assertPendingDeprecation():
             self.assertEqual(x.db, 1)
             self.assertEqual(x.db, 1)
-        with self.assertWarnsRegex(CPendingDeprecationWarning,
-                                   r'scheduled for deprecation'):
+        with self.assertPendingDeprecation():
             self.assertEqual(x.port, 123)
             self.assertEqual(x.port, 123)
-        with self.assertWarnsRegex(CPendingDeprecationWarning,
-                                   r'scheduled for deprecation'):
+        with self.assertPendingDeprecation():
             self.assertEqual(x.password, 'bosco')
             self.assertEqual(x.password, 'bosco')
 
 
     def test_conf_raises_KeyError(self):
     def test_conf_raises_KeyError(self):
@@ -171,71 +178,87 @@ class test_RedisBackend(AppCase):
             'CELERY_ACCEPT_CONTENT': ['json'],
             'CELERY_ACCEPT_CONTENT': ['json'],
             'CELERY_TASK_RESULT_EXPIRES': None,
             'CELERY_TASK_RESULT_EXPIRES': None,
         })
         })
-        self.MockBackend(app=self.app)
+        self.Backend(app=self.app, new_join=True)
 
 
     def test_expires_defaults_to_config(self):
     def test_expires_defaults_to_config(self):
         self.app.conf.CELERY_TASK_RESULT_EXPIRES = 10
         self.app.conf.CELERY_TASK_RESULT_EXPIRES = 10
-        b = self.Backend(expires=None, app=self.app)
+        b = self.Backend(expires=None, app=self.app, new_join=True)
         self.assertEqual(b.expires, 10)
         self.assertEqual(b.expires, 10)
 
 
     def test_expires_is_int(self):
     def test_expires_is_int(self):
-        b = self.Backend(expires=48, app=self.app)
+        b = self.Backend(expires=48, app=self.app, new_join=True)
         self.assertEqual(b.expires, 48)
         self.assertEqual(b.expires, 48)
 
 
+    def test_set_new_join_from_url_query(self):
+        b = self.Backend('redis://?new_join=True;foobar=1', app=self.app)
+        self.assertEqual(b.on_chord_part_return, b._new_chord_return)
+        self.assertEqual(b.apply_chord, b._new_chord_apply)
+
+    def test_default_is_old_join(self):
+        b = self.Backend(app=self.app)
+        self.assertNotEqual(b.on_chord_part_return, b._new_chord_return)
+        self.assertNotEqual(b.apply_chord, b._new_chord_apply)
+
     def test_expires_is_None(self):
     def test_expires_is_None(self):
-        b = self.Backend(expires=None, app=self.app)
-        self.assertEqual(b.expires, timedelta_seconds(
-            self.app.conf.CELERY_TASK_RESULT_EXPIRES))
+        b = self.Backend(expires=None, app=self.app, new_join=True)
+        self.assertEqual(
+            b.expires,
+            self.app.conf.CELERY_TASK_RESULT_EXPIRES.total_seconds(),
+        )
 
 
     def test_expires_is_timedelta(self):
     def test_expires_is_timedelta(self):
-        b = self.Backend(expires=timedelta(minutes=1), app=self.app)
+        b = self.Backend(
+            expires=timedelta(minutes=1), app=self.app, new_join=1,
+        )
         self.assertEqual(b.expires, 60)
         self.assertEqual(b.expires, 60)
 
 
     def test_apply_chord(self):
     def test_apply_chord(self):
-        self.Backend(app=self.app).apply_chord(
+        self.Backend(app=self.app, new_join=True).apply_chord(
             group(app=self.app), (), 'group_id', {},
             group(app=self.app), (), 'group_id', {},
             result=[self.app.AsyncResult(x) for x in [1, 2, 3]],
             result=[self.app.AsyncResult(x) for x in [1, 2, 3]],
         )
         )
 
 
     def test_mget(self):
     def test_mget(self):
-        b = self.MockBackend(app=self.app)
+        b = self.Backend(app=self.app, new_join=True)
         self.assertTrue(b.mget(['a', 'b', 'c']))
         self.assertTrue(b.mget(['a', 'b', 'c']))
         b.client.mget.assert_called_with(['a', 'b', 'c'])
         b.client.mget.assert_called_with(['a', 'b', 'c'])
 
 
     def test_set_no_expire(self):
     def test_set_no_expire(self):
-        b = self.MockBackend(app=self.app)
+        b = self.Backend(app=self.app, new_join=True)
         b.expires = None
         b.expires = None
         b.set('foo', 'bar')
         b.set('foo', 'bar')
 
 
     @patch('celery.result.GroupResult.restore')
     @patch('celery.result.GroupResult.restore')
     def test_on_chord_part_return(self, restore):
     def test_on_chord_part_return(self, restore):
-        b = self.MockBackend(app=self.app)
-        deps = Mock()
-        deps.__len__ = Mock()
-        deps.__len__.return_value = 10
-        restore.return_value = deps
-        b.client.incr.return_value = 1
-        task = Mock()
-        task.name = 'foobarbaz'
-        self.app.tasks['foobarbaz'] = task
-        task.request.chord = signature(task)
-        task.request.group = 'group_id'
-
-        b.on_chord_part_return(task)
-        self.assertTrue(b.client.incr.call_count)
-
-        b.client.incr.return_value = len(deps)
-        b.on_chord_part_return(task)
-        deps.join_native.assert_called_with(propagate=True, timeout=3.0)
-        deps.delete.assert_called_with()
-
-        self.assertTrue(b.client.expire.call_count)
+        b = self.Backend(app=self.app, new_join=True)
+
+        def create_task():
+            tid = uuid()
+            task = Mock(name='task-{0}'.format(tid))
+            task.name = 'foobarbaz'
+            self.app.tasks['foobarbaz'] = task
+            task.request.chord = signature(task)
+            task.request.id = tid
+            task.request.chord['chord_size'] = 10
+            task.request.group = 'group_id'
+            return task
+
+        tasks = [create_task() for i in range(10)]
+
+        for i in range(10):
+            b.on_chord_part_return(tasks[i], states.SUCCESS, i)
+            self.assertTrue(b.client.rpush.call_count)
+            b.client.rpush.reset_mock()
+        self.assertTrue(b.client.lrange.call_count)
+        gkey = b.get_key_for_group('group_id', '.j')
+        b.client.delete.assert_called_with(gkey)
+        b.client.expire.assert_called_witeh(gkey, 86400)
 
 
     def test_process_cleanup(self):
     def test_process_cleanup(self):
-        self.Backend(app=self.app).process_cleanup()
+        self.Backend(app=self.app, new_join=True).process_cleanup()
 
 
     def test_get_set_forget(self):
     def test_get_set_forget(self):
-        b = self.Backend(app=self.app)
+        b = self.Backend(app=self.app, new_join=True)
         tid = uuid()
         tid = uuid()
         b.store_result(tid, 42, states.SUCCESS)
         b.store_result(tid, 42, states.SUCCESS)
         self.assertEqual(b.get_status(tid), states.SUCCESS)
         self.assertEqual(b.get_status(tid), states.SUCCESS)
@@ -244,8 +267,10 @@ class test_RedisBackend(AppCase):
         self.assertEqual(b.get_status(tid), states.PENDING)
         self.assertEqual(b.get_status(tid), states.PENDING)
 
 
     def test_set_expires(self):
     def test_set_expires(self):
-        b = self.Backend(expires=512, app=self.app)
+        b = self.Backend(expires=512, app=self.app, new_join=True)
         tid = uuid()
         tid = uuid()
         key = b.get_key_for_task(tid)
         key = b.get_key_for_task(tid)
         b.store_result(tid, 42, states.SUCCESS)
         b.store_result(tid, 42, states.SUCCESS)
-        self.assertEqual(b.client.expiry[key], 512)
+        b.client.expire.assert_called_with(
+            key, 512,
+        )

+ 1 - 1
celery/tests/bin/test_amqp.py

@@ -124,7 +124,7 @@ class test_AMQShell(AppCase):
         self.assertNotIn('FOO', self.fh.getvalue())
         self.assertNotIn('FOO', self.fh.getvalue())
 
 
     def test_run(self):
     def test_run(self):
-        a = self.create_adm('queue.declare foo')
+        a = self.create_adm('queue.declare',  'foo')
         a.run()
         a.run()
         self.assertIn('ok', self.fh.getvalue())
         self.assertIn('ok', self.fh.getvalue())
 
 

+ 5 - 5
celery/tests/bin/test_base.py

@@ -241,21 +241,21 @@ class test_Command(AppCase):
         with self.assertRaises(AttributeError):
         with self.assertRaises(AttributeError):
             cmd.find_app(__name__)
             cmd.find_app(__name__)
 
 
-    def test_simple_format(self):
+    def test_host_format(self):
         cmd = MockCommand(app=self.app)
         cmd = MockCommand(app=self.app)
         with patch('socket.gethostname') as hn:
         with patch('socket.gethostname') as hn:
             hn.return_value = 'blacktron.example.com'
             hn.return_value = 'blacktron.example.com'
-            self.assertEqual(cmd.simple_format(''), '')
+            self.assertEqual(cmd.host_format(''), '')
             self.assertEqual(
             self.assertEqual(
-                cmd.simple_format('celery@%h'),
+                cmd.host_format('celery@%h'),
                 'celery@blacktron.example.com',
                 'celery@blacktron.example.com',
             )
             )
             self.assertEqual(
             self.assertEqual(
-                cmd.simple_format('celery@%d'),
+                cmd.host_format('celery@%d'),
                 'celery@example.com',
                 'celery@example.com',
             )
             )
             self.assertEqual(
             self.assertEqual(
-                cmd.simple_format('celery@%n'),
+                cmd.host_format('celery@%n'),
                 'celery@blacktron',
                 'celery@blacktron',
             )
             )
 
 

+ 2 - 1
celery/tests/bin/test_celery.py

@@ -2,9 +2,10 @@ from __future__ import absolute_import
 
 
 import sys
 import sys
 
 
-from anyjson import dumps
 from datetime import datetime
 from datetime import datetime
 
 
+from kombu.utils.json import dumps
+
 from celery import __main__
 from celery import __main__
 from celery.platforms import EX_FAILURE, EX_USAGE, EX_OK
 from celery.platforms import EX_FAILURE, EX_USAGE, EX_OK
 from celery.bin.base import Error
 from celery.bin.base import Error

+ 5 - 2
celery/tests/bin/test_celeryd_detach.py

@@ -24,8 +24,10 @@ if not IS_WINDOWS:
 
 
             detach('/bin/boo', ['a', 'b', 'c'], logfile='/var/log',
             detach('/bin/boo', ['a', 'b', 'c'], logfile='/var/log',
                    pidfile='/var/pid')
                    pidfile='/var/pid')
-            detached.assert_called_with('/var/log', '/var/pid', None, None, 0,
-                                        None, False)
+            detached.assert_called_with(
+                '/var/log', '/var/pid', None, None, 0, None, False,
+                after_forkers=False,
+            )
             execv.assert_called_with('/bin/boo', ['/bin/boo', 'a', 'b', 'c'])
             execv.assert_called_with('/bin/boo', ['/bin/boo', 'a', 'b', 'c'])
 
 
             execv.side_effect = Exception('foo')
             execv.side_effect = Exception('foo')
@@ -85,6 +87,7 @@ class test_Command(AppCase):
         detach.assert_called_with(
         detach.assert_called_with(
             path=x.execv_path, uid=None, gid=None,
             path=x.execv_path, uid=None, gid=None,
             umask=0, fake=False, logfile='/var/log', pidfile='celeryd.pid',
             umask=0, fake=False, logfile='/var/log', pidfile='celeryd.pid',
+            working_directory=None,
             argv=x.execv_argv + [
             argv=x.execv_argv + [
                 '-c', '1', '-lDEBUG',
                 '-c', '1', '-lDEBUG',
                 '--logfile=/var/log', '--pidfile=celeryd.pid',
                 '--logfile=/var/log', '--pidfile=celeryd.pid',

+ 2 - 11
celery/tests/bin/test_multi.py

@@ -8,7 +8,6 @@ from celery.bin.multi import (
     main,
     main,
     MultiTool,
     MultiTool,
     findsig,
     findsig,
-    abbreviations,
     parse_ns_range,
     parse_ns_range,
     format_opt,
     format_opt,
     quote,
     quote,
@@ -30,14 +29,6 @@ class test_functions(AppCase):
         self.assertEqual(findsig(['-s']), signal.SIGTERM)
         self.assertEqual(findsig(['-s']), signal.SIGTERM)
         self.assertEqual(findsig(['-log']), signal.SIGTERM)
         self.assertEqual(findsig(['-log']), signal.SIGTERM)
 
 
-    def test_abbreviations(self):
-        expander = abbreviations({'%s': 'START',
-                                  '%x': 'STOP'})
-        self.assertEqual(expander('foo%s'), 'fooSTART')
-        self.assertEqual(expander('foo%x'), 'fooSTOP')
-        self.assertEqual(expander('foo%y'), 'foo%y')
-        self.assertIsNone(expander(None))
-
     def test_parse_ns_range(self):
     def test_parse_ns_range(self):
         self.assertEqual(parse_ns_range('1-3', True), ['1', '2', '3'])
         self.assertEqual(parse_ns_range('1-3', True), ['1', '2', '3'])
         self.assertEqual(parse_ns_range('1-3', False), ['1-3'])
         self.assertEqual(parse_ns_range('1-3', False), ['1-3'])
@@ -78,6 +69,7 @@ class test_multi_args(AppCase):
 
 
     @patch('socket.gethostname')
     @patch('socket.gethostname')
     def test_parse(self, gethostname):
     def test_parse(self, gethostname):
+        gethostname.return_value = 'example.com'
         p = NamespacedOptionParser([
         p = NamespacedOptionParser([
             '-c:jerry,elaine', '5',
             '-c:jerry,elaine', '5',
             '--loglevel:kramer=DEBUG',
             '--loglevel:kramer=DEBUG',
@@ -120,12 +112,11 @@ class test_multi_args(AppCase):
         )
         )
         expand = names[0][2]
         expand = names[0][2]
         self.assertEqual(expand('%h'), '*P*jerry@*S*')
         self.assertEqual(expand('%h'), '*P*jerry@*S*')
-        self.assertEqual(expand('%n'), 'jerry')
+        self.assertEqual(expand('%n'), '*P*jerry')
         names2 = list(multi_args(p, cmd='COMMAND', append='',
         names2 = list(multi_args(p, cmd='COMMAND', append='',
                       prefix='*P*', suffix='*S*'))
                       prefix='*P*', suffix='*S*'))
         self.assertEqual(names2[0][1][-1], '-- .disable_rate_limits=1')
         self.assertEqual(names2[0][1][-1], '-- .disable_rate_limits=1')
 
 
-        gethostname.return_value = 'example.com'
         p2 = NamespacedOptionParser(['10', '-c:1', '5'])
         p2 = NamespacedOptionParser(['10', '-c:1', '5'])
         names3 = list(multi_args(p2, cmd='COMMAND'))
         names3 = list(multi_args(p2, cmd='COMMAND'))
         self.assertEqual(len(names3), 10)
         self.assertEqual(len(names3), 10)

+ 26 - 18
celery/tests/bin/test_worker.py

@@ -17,6 +17,7 @@ from celery.bin.worker import worker, main as worker_main
 from celery.exceptions import (
 from celery.exceptions import (
     ImproperlyConfigured, WorkerShutdown, WorkerTerminate,
     ImproperlyConfigured, WorkerShutdown, WorkerTerminate,
 )
 )
+from celery.platforms import EX_FAILURE, EX_OK
 from celery.utils.log import ensure_process_aware_logger
 from celery.utils.log import ensure_process_aware_logger
 from celery.worker import state
 from celery.worker import state
 
 
@@ -443,8 +444,10 @@ class test_funs(WorkerAppCase):
     def test_parse_options(self):
     def test_parse_options(self):
         cmd = worker()
         cmd = worker()
         cmd.app = self.app
         cmd.app = self.app
-        opts, args = cmd.parse_options('worker', ['--concurrency=512'])
+        opts, args = cmd.parse_options('worker', ['--concurrency=512',
+                                       '--heartbeat-interval=10'])
         self.assertEqual(opts.concurrency, 512)
         self.assertEqual(opts.concurrency, 512)
+        self.assertEqual(opts.heartbeat_interval, 10)
 
 
     @disable_stdouts
     @disable_stdouts
     def test_main(self):
     def test_main(self):
@@ -488,8 +491,8 @@ class test_signal_handlers(WorkerAppCase):
         worker = self._Worker()
         worker = self._Worker()
         handlers = self.psig(cd.install_worker_int_handler, worker)
         handlers = self.psig(cd.install_worker_int_handler, worker)
         next_handlers = {}
         next_handlers = {}
-        state.should_stop = False
-        state.should_terminate = False
+        state.should_stop = None
+        state.should_terminate = None
 
 
         class Signals(platforms.Signals):
         class Signals(platforms.Signals):
 
 
@@ -502,15 +505,17 @@ class test_signal_handlers(WorkerAppCase):
             try:
             try:
                 handlers['SIGINT']('SIGINT', object())
                 handlers['SIGINT']('SIGINT', object())
                 self.assertTrue(state.should_stop)
                 self.assertTrue(state.should_stop)
+                self.assertEqual(state.should_stop, EX_FAILURE)
             finally:
             finally:
                 platforms.signals = p
                 platforms.signals = p
-                state.should_stop = False
+                state.should_stop = None
 
 
             try:
             try:
                 next_handlers['SIGINT']('SIGINT', object())
                 next_handlers['SIGINT']('SIGINT', object())
                 self.assertTrue(state.should_terminate)
                 self.assertTrue(state.should_terminate)
+                self.assertEqual(state.should_terminate, EX_FAILURE)
             finally:
             finally:
-                state.should_terminate = False
+                state.should_terminate = None
 
 
         with patch('celery.apps.worker.active_thread_count') as c:
         with patch('celery.apps.worker.active_thread_count') as c:
             c.return_value = 1
             c.return_value = 1
@@ -541,7 +546,7 @@ class test_signal_handlers(WorkerAppCase):
                 self.assertTrue(state.should_stop)
                 self.assertTrue(state.should_stop)
             finally:
             finally:
                 process.name = name
                 process.name = name
-                state.should_stop = False
+                state.should_stop = None
 
 
         with patch('celery.apps.worker.active_thread_count') as c:
         with patch('celery.apps.worker.active_thread_count') as c:
             c.return_value = 1
             c.return_value = 1
@@ -552,7 +557,7 @@ class test_signal_handlers(WorkerAppCase):
                     handlers['SIGINT']('SIGINT', object())
                     handlers['SIGINT']('SIGINT', object())
             finally:
             finally:
                 process.name = name
                 process.name = name
-                state.should_stop = False
+                state.should_stop = None
 
 
     @disable_stdouts
     @disable_stdouts
     def test_install_HUP_not_supported_handler(self):
     def test_install_HUP_not_supported_handler(self):
@@ -578,14 +583,17 @@ class test_signal_handlers(WorkerAppCase):
                     handlers['SIGQUIT']('SIGQUIT', object())
                     handlers['SIGQUIT']('SIGQUIT', object())
                     self.assertTrue(state.should_terminate)
                     self.assertTrue(state.should_terminate)
                 finally:
                 finally:
-                    state.should_terminate = False
+                    state.should_terminate = None
             with patch('celery.apps.worker.active_thread_count') as c:
             with patch('celery.apps.worker.active_thread_count') as c:
                 c.return_value = 1
                 c.return_value = 1
                 worker = self._Worker()
                 worker = self._Worker()
                 handlers = self.psig(
                 handlers = self.psig(
                     cd.install_worker_term_hard_handler, worker)
                     cd.install_worker_term_hard_handler, worker)
-                with self.assertRaises(WorkerTerminate):
-                    handlers['SIGQUIT']('SIGQUIT', object())
+                try:
+                    with self.assertRaises(WorkerTerminate):
+                        handlers['SIGQUIT']('SIGQUIT', object())
+                finally:
+                    state.should_terminate = None
         finally:
         finally:
             process.name = name
             process.name = name
 
 
@@ -597,9 +605,9 @@ class test_signal_handlers(WorkerAppCase):
             handlers = self.psig(cd.install_worker_term_handler, worker)
             handlers = self.psig(cd.install_worker_term_handler, worker)
             try:
             try:
                 handlers['SIGTERM']('SIGTERM', object())
                 handlers['SIGTERM']('SIGTERM', object())
-                self.assertTrue(state.should_stop)
+                self.assertEqual(state.should_stop, EX_OK)
             finally:
             finally:
-                state.should_stop = False
+                state.should_stop = None
 
 
     @disable_stdouts
     @disable_stdouts
     def test_worker_term_handler_when_single_thread(self):
     def test_worker_term_handler_when_single_thread(self):
@@ -611,7 +619,7 @@ class test_signal_handlers(WorkerAppCase):
                 with self.assertRaises(WorkerShutdown):
                 with self.assertRaises(WorkerShutdown):
                     handlers['SIGTERM']('SIGTERM', object())
                     handlers['SIGTERM']('SIGTERM', object())
             finally:
             finally:
-                state.should_stop = False
+                state.should_stop = None
 
 
     @patch('sys.__stderr__')
     @patch('sys.__stderr__')
     @skip_if_pypy
     @skip_if_pypy
@@ -635,7 +643,7 @@ class test_signal_handlers(WorkerAppCase):
                 worker = self._Worker()
                 worker = self._Worker()
                 handlers = self.psig(cd.install_worker_term_handler, worker)
                 handlers = self.psig(cd.install_worker_term_handler, worker)
                 handlers['SIGTERM']('SIGTERM', object())
                 handlers['SIGTERM']('SIGTERM', object())
-                self.assertTrue(state.should_stop)
+                self.assertEqual(state.should_stop, EX_OK)
             with patch('celery.apps.worker.active_thread_count') as c:
             with patch('celery.apps.worker.active_thread_count') as c:
                 c.return_value = 1
                 c.return_value = 1
                 worker = self._Worker()
                 worker = self._Worker()
@@ -644,7 +652,7 @@ class test_signal_handlers(WorkerAppCase):
                     handlers['SIGTERM']('SIGTERM', object())
                     handlers['SIGTERM']('SIGTERM', object())
         finally:
         finally:
             process.name = name
             process.name = name
-            state.should_stop = False
+            state.should_stop = None
 
 
     @disable_stdouts
     @disable_stdouts
     @patch('celery.platforms.close_open_fds')
     @patch('celery.platforms.close_open_fds')
@@ -663,14 +671,14 @@ class test_signal_handlers(WorkerAppCase):
             worker = self._Worker()
             worker = self._Worker()
             handlers = self.psig(cd.install_worker_restart_handler, worker)
             handlers = self.psig(cd.install_worker_restart_handler, worker)
             handlers['SIGHUP']('SIGHUP', object())
             handlers['SIGHUP']('SIGHUP', object())
-            self.assertTrue(state.should_stop)
+            self.assertEqual(state.should_stop, EX_OK)
             self.assertTrue(register.called)
             self.assertTrue(register.called)
             callback = register.call_args[0][0]
             callback = register.call_args[0][0]
             callback()
             callback()
             self.assertTrue(argv)
             self.assertTrue(argv)
         finally:
         finally:
             os.execv = execv
             os.execv = execv
-            state.should_stop = False
+            state.should_stop = None
 
 
     @disable_stdouts
     @disable_stdouts
     def test_worker_term_hard_handler_when_threaded(self):
     def test_worker_term_hard_handler_when_threaded(self):
@@ -682,7 +690,7 @@ class test_signal_handlers(WorkerAppCase):
                 handlers['SIGQUIT']('SIGQUIT', object())
                 handlers['SIGQUIT']('SIGQUIT', object())
                 self.assertTrue(state.should_terminate)
                 self.assertTrue(state.should_terminate)
             finally:
             finally:
-                state.should_terminate = False
+                state.should_terminate = None
 
 
     @disable_stdouts
     @disable_stdouts
     def test_worker_term_hard_handler_when_single_threaded(self):
     def test_worker_term_hard_handler_when_single_threaded(self):

+ 118 - 24
celery/tests/case.py

@@ -11,12 +11,14 @@ except AttributeError:
 import importlib
 import importlib
 import inspect
 import inspect
 import logging
 import logging
+import numbers
 import os
 import os
 import platform
 import platform
 import re
 import re
 import sys
 import sys
 import threading
 import threading
 import time
 import time
+import types
 import warnings
 import warnings
 
 
 from contextlib import contextmanager
 from contextlib import contextmanager
@@ -37,6 +39,7 @@ from kombu.utils import nested, symbol_by_name
 from celery import Celery
 from celery import Celery
 from celery.app import current_app
 from celery.app import current_app
 from celery.backends.cache import CacheBackend, DummyClient
 from celery.backends.cache import CacheBackend, DummyClient
+from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning
 from celery.five import (
 from celery.five import (
     WhateverIO, builtins, items, reraise,
     WhateverIO, builtins, items, reraise,
     string_t, values, open_fqdn,
     string_t, values, open_fqdn,
@@ -45,7 +48,7 @@ from celery.utils.functional import noop
 from celery.utils.imports import qualname
 from celery.utils.imports import qualname
 
 
 __all__ = [
 __all__ = [
-    'Case', 'AppCase', 'Mock', 'MagicMock',
+    'Case', 'AppCase', 'Mock', 'MagicMock', 'ANY', 'TaskMessage',
     'patch', 'call', 'sentinel', 'skip_unless_module',
     'patch', 'call', 'sentinel', 'skip_unless_module',
     'wrap_logger', 'with_environ', 'sleepdeprived',
     'wrap_logger', 'with_environ', 'sleepdeprived',
     'skip_if_environ', 'todo', 'skip', 'skip_if',
     'skip_if_environ', 'todo', 'skip', 'skip_if',
@@ -53,12 +56,15 @@ __all__ = [
     'replace_module_value', 'sys_platform', 'reset_modules',
     'replace_module_value', 'sys_platform', 'reset_modules',
     'patch_modules', 'mock_context', 'mock_open', 'patch_many',
     'patch_modules', 'mock_context', 'mock_open', 'patch_many',
     'assert_signal_called', 'skip_if_pypy',
     'assert_signal_called', 'skip_if_pypy',
-    'skip_if_jython', 'body_from_sig', 'restore_logging',
+    'skip_if_jython', 'task_message_from_sig', 'restore_logging',
 ]
 ]
 patch = mock.patch
 patch = mock.patch
 call = mock.call
 call = mock.call
 sentinel = mock.sentinel
 sentinel = mock.sentinel
 MagicMock = mock.MagicMock
 MagicMock = mock.MagicMock
+ANY = mock.ANY
+
+PY3 = sys.version_info[0] == 3
 
 
 CASE_REDEFINES_SETUP = """\
 CASE_REDEFINES_SETUP = """\
 {name} (subclass of AppCase) redefines private "setUp", should be: "setup"\
 {name} (subclass of AppCase) redefines private "setUp", should be: "setup"\
@@ -162,6 +168,35 @@ def ContextMock(*args, **kwargs):
     return obj
     return obj
 
 
 
 
+def _bind(f, o):
+    @wraps(f)
+    def bound_meth(*fargs, **fkwargs):
+        return f(o, *fargs, **fkwargs)
+    return bound_meth
+
+
+if PY3:  # pragma: no cover
+    def _get_class_fun(meth):
+        return meth
+else:
+    def _get_class_fun(meth):
+        return meth.__func__
+
+
+class MockCallbacks(object):
+
+    def __new__(cls, *args, **kwargs):
+        r = Mock(name=cls.__name__)
+        _get_class_fun(cls.__init__)(r, *args, **kwargs)
+        for key, value in items(vars(cls)):
+            if key not in ('__dict__', '__weakref__', '__new__', '__init__'):
+                if inspect.ismethod(value) or inspect.isfunction(value):
+                    r.__getattr__(key).side_effect = _bind(value, r)
+                else:
+                    r.__setattr__(key, value)
+        return r
+
+
 def skip_unless_module(module):
 def skip_unless_module(module):
 
 
     def _inner(fun):
     def _inner(fun):
@@ -193,6 +228,18 @@ class _AssertRaisesBaseContext(object):
         self.expected_regex = expected_regex
         self.expected_regex = expected_regex
 
 
 
 
+def _is_magic_module(m):
+    # some libraries create custom module types that are lazily
+    # lodaded, e.g. Django installs some modules in sys.modules that
+    # will load _tkinter and other shit when touched.
+
+    # pyflakes refuses to accept 'noqa' for this isinstance.
+    cls, modtype = m.__class__, types.ModuleType
+    return (not cls is modtype and (
+        '__getattr__' in vars(m.__class__) or
+        '__getattribute__' in vars(m.__class__)))
+
+
 class _AssertWarnsContext(_AssertRaisesBaseContext):
 class _AssertWarnsContext(_AssertRaisesBaseContext):
     """A context manager used to implement TestCase.assertWarns* methods."""
     """A context manager used to implement TestCase.assertWarns* methods."""
 
 
@@ -201,8 +248,17 @@ class _AssertWarnsContext(_AssertRaisesBaseContext):
         # to work properly.
         # to work properly.
         warnings.resetwarnings()
         warnings.resetwarnings()
         for v in list(values(sys.modules)):
         for v in list(values(sys.modules)):
-            if getattr(v, '__warningregistry__', None):
-                v.__warningregistry__ = {}
+            # do not evaluate Django moved modules and other lazily
+            # initialized modules.
+            if v and not _is_magic_module(v):
+                # use raw __getattribute__ to protect even better from
+                # lazily loaded modules
+                try:
+                    object.__getattribute__(v, '__warningregistry__')
+                except AttributeError:
+                    pass
+                else:
+                    object.__setattr__(v, '__warningregistry__', {})
         self.warnings_manager = warnings.catch_warnings(record=True)
         self.warnings_manager = warnings.catch_warnings(record=True)
         self.warnings = self.warnings_manager.__enter__()
         self.warnings = self.warnings_manager.__enter__()
         warnings.simplefilter('always', self.expected)
         warnings.simplefilter('always', self.expected)
@@ -253,6 +309,18 @@ class Case(unittest.TestCase):
         return _AssertWarnsContext(expected_warning, self,
         return _AssertWarnsContext(expected_warning, self,
                                    None, expected_regex)
                                    None, expected_regex)
 
 
+    @contextmanager
+    def assertDeprecated(self):
+        with self.assertWarnsRegex(CDeprecationWarning,
+                                   r'scheduled for removal'):
+            yield
+
+    @contextmanager
+    def assertPendingDeprecation(self):
+        with self.assertWarnsRegex(CPendingDeprecationWarning,
+                                   r'scheduled for deprecation'):
+            yield
+
     def assertDictContainsSubset(self, expected, actual, msg=None):
     def assertDictContainsSubset(self, expected, actual, msg=None):
         missing, mismatched = [], []
         missing, mismatched = [], []
 
 
@@ -344,8 +412,12 @@ class AppCase(Case):
         self._current_app = current_app()
         self._current_app = current_app()
         self._default_app = _state.default_app
         self._default_app = _state.default_app
         trap = Trap()
         trap = Trap()
+        self._prev_tls = _state._tls
         _state.set_default_app(trap)
         _state.set_default_app(trap)
-        _state._tls.current_app = trap
+
+        class NonTLS(object):
+            current_app = trap
+        _state._tls = NonTLS()
 
 
         self.app = self.Celery(set_as_current=False)
         self.app = self.Celery(set_as_current=False)
         if not self.contained:
         if not self.contained:
@@ -379,13 +451,12 @@ class AppCase(Case):
                 if isinstance(backend.client, DummyClient):
                 if isinstance(backend.client, DummyClient):
                     backend.client.cache.clear()
                     backend.client.cache.clear()
                 backend._cache.clear()
                 backend._cache.clear()
-        from celery._state import (
-            _tls, set_default_app, _set_task_join_will_block,
-        )
-        _set_task_join_will_block(False)
+        from celery import _state
+        _state._set_task_join_will_block(False)
 
 
-        set_default_app(self._default_app)
-        _tls.current_app = self._current_app
+        _state.set_default_app(self._default_app)
+        _state._tls = self._prev_tls
+        _state._tls.current_app = self._current_app
         if self.app is not self._current_app:
         if self.app is not self._current_app:
             self.app.close()
             self.app.close()
         self.app = None
         self.app = None
@@ -393,6 +464,15 @@ class AppCase(Case):
             self._threads_at_setup, list(threading.enumerate()),
             self._threads_at_setup, list(threading.enumerate()),
         )
         )
 
 
+        # Make sure no test left the shutdown flags enabled.
+        from celery.worker import state as worker_state
+        # check for EX_OK
+        self.assertIsNot(worker_state.should_stop, False)
+        self.assertIsNot(worker_state.should_terminate, False)
+        # check for other true values
+        self.assertFalse(worker_state.should_stop)
+        self.assertFalse(worker_state.should_terminate)
+
     def _get_test_name(self):
     def _get_test_name(self):
         return '.'.join([self.__class__.__name__, self._testMethodName])
         return '.'.join([self.__class__.__name__, self._testMethodName])
 
 
@@ -748,7 +828,7 @@ def skip_if_jython(fun):
     return _inner
     return _inner
 
 
 
 
-def body_from_sig(app, sig, utc=True):
+def task_message_from_sig(app, sig, utc=True):
     sig.freeze()
     sig.freeze()
     callbacks = sig.options.pop('link', None)
     callbacks = sig.options.pop('link', None)
     errbacks = sig.options.pop('link_error', None)
     errbacks = sig.options.pop('link_error', None)
@@ -760,21 +840,18 @@ def body_from_sig(app, sig, utc=True):
     if eta and isinstance(eta, datetime):
     if eta and isinstance(eta, datetime):
         eta = eta.isoformat()
         eta = eta.isoformat()
     expires = sig.options.pop('expires', None)
     expires = sig.options.pop('expires', None)
-    if expires and isinstance(expires, int):
+    if expires and isinstance(expires, numbers.Real):
         expires = app.now() + timedelta(seconds=expires)
         expires = app.now() + timedelta(seconds=expires)
     if expires and isinstance(expires, datetime):
     if expires and isinstance(expires, datetime):
         expires = expires.isoformat()
         expires = expires.isoformat()
-    return {
-        'task': sig.task,
-        'id': sig.id,
-        'args': sig.args,
-        'kwargs': sig.kwargs,
-        'callbacks': [dict(s) for s in callbacks] if callbacks else None,
-        'errbacks': [dict(s) for s in errbacks] if errbacks else None,
-        'eta': eta,
-        'utc': utc,
-        'expires': expires,
-    }
+    return TaskMessage(
+        sig.task, id=sig.id, args=sig.args,
+        kwargs=sig.kwargs,
+        callbacks=[dict(s) for s in callbacks] if callbacks else None,
+        errbacks=[dict(s) for s in errbacks] if errbacks else None,
+        eta=eta,
+        expires=expires,
+    )
 
 
 
 
 @contextmanager
 @contextmanager
@@ -790,3 +867,20 @@ def restore_logging():
         sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ = outs
         sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ = outs
         root.level = level
         root.level = level
         root.handlers[:] = handlers
         root.handlers[:] = handlers
+
+
+def TaskMessage(name, id=None, args=(), kwargs={}, **options):
+    from celery import uuid
+    from kombu.serialization import dumps
+    id = id or uuid()
+    message = Mock(name='TaskMessage-{0}'.format(id))
+    message.headers = {
+        'id': id,
+        'task': name,
+    }
+    message.headers.update(options)
+    message.content_type, message.content_encoding, message.body = dumps(
+        (args, kwargs), serializer='json',
+    )
+    message.payload = (args, kwargs)
+    return message

+ 3 - 26
celery/tests/compat_modules/test_compat.py

@@ -10,34 +10,10 @@ from celery.task import (
     periodic_task,
     periodic_task,
     PeriodicTask
     PeriodicTask
 )
 )
-from celery.utils.timeutils import timedelta_seconds
 
 
 from celery.tests.case import AppCase, depends_on_current_app
 from celery.tests.case import AppCase, depends_on_current_app
 
 
 
 
-class test_Task(AppCase):
-
-    def test_base_task_inherits_magic_kwargs_from_app(self):
-        from celery.task import Task as OldTask
-
-        class timkX(OldTask):
-            abstract = True
-
-        with self.Celery(set_as_current=False,
-                         accept_magic_kwargs=True) as app:
-            timkX.bind(app)
-            # see #918
-            self.assertFalse(timkX.accept_magic_kwargs)
-
-            from celery import Task as NewTask
-
-            class timkY(NewTask):
-                abstract = True
-
-            timkY.bind(app)
-            self.assertFalse(timkY.accept_magic_kwargs)
-
-
 @depends_on_current_app
 @depends_on_current_app
 class test_periodic_tasks(AppCase):
 class test_periodic_tasks(AppCase):
 
 
@@ -74,8 +50,9 @@ class test_periodic_tasks(AppCase):
             self.now() - p.run_every.run_every,
             self.now() - p.run_every.run_every,
         )
         )
         self.assertTrue(due)
         self.assertTrue(due)
-        self.assertEqual(remaining,
-                         timedelta_seconds(p.run_every.run_every))
+        self.assertEqual(
+            remaining, p.run_every.run_every.total_seconds(),
+        )
 
 
     def test_schedule_repr(self):
     def test_schedule_repr(self):
         p = self.my_periodic
         p = self.my_periodic

+ 0 - 4
celery/tests/compat_modules/test_compat_utils.py

@@ -40,11 +40,7 @@ class test_MagicModule(AppCase):
         def _test_decorators_task():
         def _test_decorators_task():
             pass
             pass
 
 
-        self.assertTrue(_test_decorators_task.accept_magic_kwargs)
-
     def test_decorators_periodic_task(self):
     def test_decorators_periodic_task(self):
         @celery.decorators.periodic_task(run_every=3600)
         @celery.decorators.periodic_task(run_every=3600)
         def _test_decorators_ptask():
         def _test_decorators_ptask():
             pass
             pass
-
-        self.assertTrue(_test_decorators_ptask.accept_magic_kwargs)

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác