소스 검색

Merge branch 'master' into bigbeat

Conflicts:
	celery/beat.py
Ask Solem 11 년 전
부모
커밋
ed629f615a
100개의 변경된 파일3853개의 추가작업 그리고 1897개의 파일을 삭제
  1. 6 0
      .coveragerc
  2. 4 0
      .gitignore
  3. 32 6
      .travis.yml
  4. 1065 0
      CONTRIBUTING.rst
  5. 10 2
      CONTRIBUTORS.txt
  6. 287 0
      Changelog
  7. 6 6
      README.rst
  8. 7 6
      celery/__init__.py
  9. 1 1
      celery/__main__.py
  10. 26 31
      celery/_state.py
  11. 5 6
      celery/app/__init__.py
  12. 264 224
      celery/app/amqp.py
  13. 101 58
      celery/app/base.py
  14. 38 235
      celery/app/builtins.py
  15. 11 11
      celery/app/control.py
  16. 4 2
      celery/app/defaults.py
  17. 12 7
      celery/app/log.py
  18. 2 2
      celery/app/registry.py
  19. 50 42
      celery/app/task.py
  20. 210 39
      celery/app/trace.py
  21. 15 10
      celery/app/utils.py
  22. 10 6
      celery/apps/beat.py
  23. 15 10
      celery/apps/worker.py
  24. 0 2
      celery/backends/__init__.py
  25. 16 8
      celery/backends/amqp.py
  26. 87 46
      celery/backends/base.py
  27. 1 1
      celery/backends/cache.py
  28. 4 5
      celery/backends/cassandra.py
  29. 40 31
      celery/backends/database/__init__.py
  30. 46 49
      celery/backends/database/session.py
  31. 0 1
      celery/backends/mongodb.py
  32. 76 4
      celery/backends/redis.py
  33. 35 16
      celery/beat.py
  34. 10 9
      celery/bin/amqp.py
  35. 15 21
      celery/bin/base.py
  36. 1 1
      celery/bin/beat.py
  37. 36 12
      celery/bin/celery.py
  38. 3 1
      celery/bin/celeryd_detach.py
  39. 2 2
      celery/bin/events.py
  40. 1 1
      celery/bin/graph.py
  41. 45 38
      celery/bin/multi.py
  42. 14 7
      celery/bin/worker.py
  43. 3 2
      celery/bootsteps.py
  44. 248 66
      celery/canvas.py
  45. 53 48
      celery/concurrency/asynpool.py
  46. 4 2
      celery/concurrency/base.py
  47. 7 0
      celery/concurrency/eventlet.py
  48. 7 2
      celery/concurrency/prefork.py
  49. 1 0
      celery/concurrency/solo.py
  50. 35 32
      celery/contrib/abortable.py
  51. 5 4
      celery/contrib/batches.py
  52. 3 3
      celery/contrib/migrate.py
  53. 1 1
      celery/contrib/rdb.py
  54. 73 0
      celery/contrib/sphinx.py
  55. 4 4
      celery/datastructures.py
  56. 5 18
      celery/events/__init__.py
  57. 1 1
      celery/events/dumper.py
  58. 1 1
      celery/events/snapshot.py
  59. 26 20
      celery/events/state.py
  60. 5 2
      celery/exceptions.py
  61. 20 188
      celery/five.py
  62. 15 6
      celery/fixups/django.py
  63. 2 2
      celery/loaders/base.py
  64. 33 2
      celery/local.py
  65. 20 11
      celery/platforms.py
  66. 101 29
      celery/result.py
  67. 11 10
      celery/schedules.py
  68. 5 4
      celery/security/certificate.py
  69. 6 9
      celery/security/serialization.py
  70. 2 2
      celery/task/__init__.py
  71. 23 7
      celery/task/base.py
  72. 9 9
      celery/task/http.py
  73. 1 1
      celery/task/sets.py
  74. 0 12
      celery/task/trace.py
  75. 1 1
      celery/tests/__init__.py
  76. 4 96
      celery/tests/app/test_amqp.py
  77. 26 23
      celery/tests/app/test_app.py
  78. 36 1
      celery/tests/app/test_beat.py
  79. 10 12
      celery/tests/app/test_builtins.py
  80. 4 7
      celery/tests/app/test_loaders.py
  81. 43 19
      celery/tests/app/test_log.py
  82. 49 51
      celery/tests/app/test_schedules.py
  83. 17 1
      celery/tests/app/test_utils.py
  84. 14 13
      celery/tests/backends/test_amqp.py
  85. 0 9
      celery/tests/backends/test_backends.py
  86. 13 10
      celery/tests/backends/test_base.py
  87. 2 2
      celery/tests/backends/test_cache.py
  88. 4 4
      celery/tests/backends/test_couchbase.py
  89. 3 3
      celery/tests/backends/test_database.py
  90. 11 10
      celery/tests/backends/test_mongodb.py
  91. 111 86
      celery/tests/backends/test_redis.py
  92. 1 1
      celery/tests/bin/test_amqp.py
  93. 5 5
      celery/tests/bin/test_base.py
  94. 2 1
      celery/tests/bin/test_celery.py
  95. 5 2
      celery/tests/bin/test_celeryd_detach.py
  96. 2 11
      celery/tests/bin/test_multi.py
  97. 26 18
      celery/tests/bin/test_worker.py
  98. 118 24
      celery/tests/case.py
  99. 3 26
      celery/tests/compat_modules/test_compat.py
  100. 0 4
      celery/tests/compat_modules/test_compat_utils.py

+ 6 - 0
.coveragerc

@@ -0,0 +1,6 @@
+[run]
+branch = 1
+cover_pylib = 0
+omit = celery.utils.debug,celery.tests.*,celery.bin.graph
+[report]
+omit = */python?.?/*,*/site-packages/*,*/pypy/*

+ 4 - 0
.gitignore

@@ -20,4 +20,8 @@ Documentation/
 .project
 .pydevproject
 .idea/
+.coverage
 celery/tests/cover/
+.ve*
+cover/
+

+ 32 - 6
.travis.yml

@@ -1,8 +1,34 @@
 language: python
-python:
-    - 2.6
-    - 2.7
-    - 3.3
+python: 2.7
+env:
+  global:
+    PYTHONUNBUFFERED=yes
+  matrix:
+    - TOXENV=2.7
+    - TOXENV=3.3
+    - TOXENV=3.4
+    - TOXENV=pypy
+before_install:
+  - |
+    if [[ $TOXENV = pypy ]]; then
+      deactivate
+      sudo apt-add-repository --yes ppa:pypy/ppa
+      sudo apt-get update
+      sudo apt-get install pypy
+      source ~/virtualenv/pypy/bin/activate
+    fi
+    python --version
+    uname -a
+    lsb_release -a
 install:
-    - pip install --use-mirrors tox
-script: TOXENV=py$(echo $TRAVIS_PYTHON_VERSION | tr -d .) tox -v
+  - pip install tox
+script:
+  - tox -v -- -v
+after_success:
+  - .tox/$TRAVIS_PYTHON_VERSION/bin/coveralls
+notifications:
+  irc:
+    channels:
+      - "chat.freenode.net#celery"
+    on_success: change
+    on_failure: always

+ 1065 - 0
CONTRIBUTING.rst

@@ -0,0 +1,1065 @@
+.. _contributing:
+
+==============
+ Contributing
+==============
+
+Welcome!
+
+This document is fairly extensive and you are not really expected
+to study this in detail for small contributions;
+
+    The most important rule is that contributing must be easy
+    and that the community is friendly and not nitpicking on details
+    such as coding style.
+
+If you're reporting a bug you should read the Reporting bugs section
+below to ensure that your bug report contains enough information
+to successfully diagnose the issue, and if you're contributing code
+you should try to mimic the conventions you see surrounding the code
+you are working on, but in the end all patches will be cleaned up by
+the person merging the changes so don't worry too much.
+
+.. contents::
+    :local:
+
+.. _community-code-of-conduct:
+
+Community Code of Conduct
+=========================
+
+The goal is to maintain a diverse community that is pleasant for everyone.
+That is why we would greatly appreciate it if everyone contributing to and
+interacting with the community also followed this Code of Conduct.
+
+The Code of Conduct covers our behavior as members of the community,
+in any forum, mailing list, wiki, website, Internet relay chat (IRC), public
+meeting or private correspondence.
+
+The Code of Conduct is heavily based on the `Ubuntu Code of Conduct`_, and
+the `Pylons Code of Conduct`_.
+
+.. _`Ubuntu Code of Conduct`: http://www.ubuntu.com/community/conduct
+.. _`Pylons Code of Conduct`: http://docs.pylonshq.com/community/conduct.html
+
+Be considerate.
+---------------
+
+Your work will be used by other people, and you in turn will depend on the
+work of others.  Any decision you take will affect users and colleagues, and
+we expect you to take those consequences into account when making decisions.
+Even if it's not obvious at the time, our contributions to Celery will impact
+the work of others.  For example, changes to code, infrastructure, policy,
+documentation and translations during a release may negatively impact
+others work.
+
+Be respectful.
+--------------
+
+The Celery community and its members treat one another with respect.  Everyone
+can make a valuable contribution to Celery.  We may not always agree, but
+disagreement is no excuse for poor behavior and poor manners.  We might all
+experience some frustration now and then, but we cannot allow that frustration
+to turn into a personal attack.  It's important to remember that a community
+where people feel uncomfortable or threatened is not a productive one.  We
+expect members of the Celery community to be respectful when dealing with
+other contributors as well as with people outside the Celery project and with
+users of Celery.
+
+Be collaborative.
+-----------------
+
+Collaboration is central to Celery and to the larger free software community.
+We should always be open to collaboration.  Your work should be done
+transparently and patches from Celery should be given back to the community
+when they are made, not just when the distribution releases.  If you wish
+to work on new code for existing upstream projects, at least keep those
+projects informed of your ideas and progress.  It many not be possible to
+get consensus from upstream, or even from your colleagues about the correct
+implementation for an idea, so don't feel obliged to have that agreement
+before you begin, but at least keep the outside world informed of your work,
+and publish your work in a way that allows outsiders to test, discuss and
+contribute to your efforts.
+
+When you disagree, consult others.
+----------------------------------
+
+Disagreements, both political and technical, happen all the time and
+the Celery community is no exception.  It is important that we resolve
+disagreements and differing views constructively and with the help of the
+community and community process.  If you really want to go a different
+way, then we encourage you to make a derivative distribution or alternate
+set of packages that still build on the work we've done to utilize as common
+of a core as possible.
+
+When you are unsure, ask for help.
+----------------------------------
+
+Nobody knows everything, and nobody is expected to be perfect.  Asking
+questions avoids many problems down the road, and so questions are
+encouraged.  Those who are asked questions should be responsive and helpful.
+However, when asking a question, care must be taken to do so in an appropriate
+forum.
+
+Step down considerately.
+------------------------
+
+Developers on every project come and go and Celery is no different.  When you
+leave or disengage from the project, in whole or in part, we ask that you do
+so in a way that minimizes disruption to the project.  This means you should
+tell people you are leaving and take the proper steps to ensure that others
+can pick up where you leave off.
+
+.. _reporting-bugs:
+
+
+Reporting Bugs
+==============
+
+.. _vulnsec:
+
+Security
+--------
+
+You must never report security related issues, vulnerabilities or bugs
+including sensitive information to the bug tracker, or elsewhere in public.
+Instead sensitive bugs must be sent by email to ``security@celeryproject.org``.
+
+If you'd like to submit the information encrypted our PGP key is::
+
+    -----BEGIN PGP PUBLIC KEY BLOCK-----
+    Version: GnuPG v1.4.15 (Darwin)
+
+    mQENBFJpWDkBCADFIc9/Fpgse4owLNvsTC7GYfnJL19XO0hnL99sPx+DPbfr+cSE
+    9wiU+Wp2TfUX7pCLEGrODiEP6ZCZbgtiPgId+JYvMxpP6GXbjiIlHRw1EQNH8RlX
+    cVxy3rQfVv8PGGiJuyBBjxzvETHW25htVAZ5TI1+CkxmuyyEYqgZN2fNd0wEU19D
+    +c10G1gSECbCQTCbacLSzdpngAt1Gkrc96r7wGHBBSvDaGDD2pFSkVuTLMbIRrVp
+    lnKOPMsUijiip2EMr2DvfuXiUIUvaqInTPNWkDynLoh69ib5xC19CSVLONjkKBsr
+    Pe+qAY29liBatatpXsydY7GIUzyBT3MzgMJlABEBAAG0MUNlbGVyeSBTZWN1cml0
+    eSBUZWFtIDxzZWN1cml0eUBjZWxlcnlwcm9qZWN0Lm9yZz6JATgEEwECACIFAlJp
+    WDkCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOArFOUDCicIw1IH/26f
+    CViDC7/P13jr+srRdjAsWvQztia9HmTlY8cUnbmkR9w6b6j3F2ayw8VhkyFWgYEJ
+    wtPBv8mHKADiVSFARS+0yGsfCkia5wDSQuIv6XqRlIrXUyqJbmF4NUFTyCZYoh+C
+    ZiQpN9xGhFPr5QDlMx2izWg1rvWlG1jY2Es1v/xED3AeCOB1eUGvRe/uJHKjGv7J
+    rj0pFcptZX+WDF22AN235WYwgJM6TrNfSu8sv8vNAQOVnsKcgsqhuwomSGsOfMQj
+    LFzIn95MKBBU1G5wOs7JtwiV9jefGqJGBO2FAvOVbvPdK/saSnB+7K36dQcIHqms
+    5hU4Xj0RIJiod5idlRC5AQ0EUmlYOQEIAJs8OwHMkrdcvy9kk2HBVbdqhgAREMKy
+    gmphDp7prRL9FqSY/dKpCbG0u82zyJypdb7QiaQ5pfPzPpQcd2dIcohkkh7G3E+e
+    hS2L9AXHpwR26/PzMBXyr2iNnNc4vTksHvGVDxzFnRpka6vbI/hrrZmYNYh9EAiv
+    uhE54b3/XhXwFgHjZXb9i8hgJ3nsO0pRwvUAM1bRGMbvf8e9F+kqgV0yWYNnh6QL
+    4Vpl1+epqp2RKPHyNQftbQyrAHXT9kQF9pPlx013MKYaFTADscuAp4T3dy7xmiwS
+    crqMbZLzfrxfFOsNxTUGE5vmJCcm+mybAtRo4aV6ACohAO9NevMx8pUAEQEAAYkB
+    HwQYAQIACQUCUmlYOQIbDAAKCRDgKxTlAwonCNFbB/9esir/f7TufE+isNqErzR/
+    aZKZo2WzZR9c75kbqo6J6DYuUHe6xI0OZ2qZ60iABDEZAiNXGulysFLCiPdatQ8x
+    8zt3DF9BMkEck54ZvAjpNSern6zfZb1jPYWZq3TKxlTs/GuCgBAuV4i5vDTZ7xK/
+    aF+OFY5zN7ciZHkqLgMiTZ+RhqRcK6FhVBP/Y7d9NlBOcDBTxxE1ZO1ute6n7guJ
+    ciw4hfoRk8qNN19szZuq3UU64zpkM2sBsIFM9tGF2FADRxiOaOWZHmIyVZriPFqW
+    RUwjSjs7jBVNq0Vy4fCu/5+e+XLOUBOoqtM5W7ELt0t1w9tXebtPEetV86in8fU2
+    =0chn
+    -----END PGP PUBLIC KEY BLOCK-----
+
+Other bugs
+----------
+
+Bugs can always be described to the `mailing-list`_, but the best
+way to report an issue and to ensure a timely response is to use the
+issue tracker.
+
+1) **Create a GitHub account.**
+
+You need to `create a GitHub account`_ to be able to create new issues
+and participate in the discussion.
+
+.. _`create a GitHub account`: https://github.com/signup/free
+
+2) **Determine if your bug is really a bug.**
+
+You should not file a bug if you are requesting support.  For that you can use
+the `mailing-list`_, or `irc-channel`_.
+
+3) **Make sure your bug hasn't already been reported.**
+
+Search through the appropriate Issue tracker.  If a bug like yours was found,
+check if you have new information that could be reported to help
+the developers fix the bug.
+
+4) **Check if you're using the latest version.**
+
+A bug could be fixed by some other improvements and fixes - it might not have an
+existing report in the bug tracker. Make sure you're using the latest releases of
+celery, billiard and kombu.
+
+5) **Collect information about the bug.**
+
+To have the best chance of having a bug fixed, we need to be able to easily
+reproduce the conditions that caused it.  Most of the time this information
+will be from a Python traceback message, though some bugs might be in design,
+spelling or other errors on the website/docs/code.
+
+    A) If the error is from a Python traceback, include it in the bug report.
+
+    B) We also need to know what platform you're running (Windows, OS X, Linux,
+       etc.), the version of your Python interpreter, and the version of Celery,
+       and related packages that you were running when the bug occurred.
+
+    C) If you are reporting a race condition or a deadlock, tracebacks can be
+       hard to get or might not be that useful. Try to inspect the process to
+       get more diagnostic data. Some ideas:
+
+       * Enable celery's ``breakpoint_signal`` and use it
+         to inspect the process's state.  This will allow you to open a
+         ``pdb`` session.
+       * Collect tracing data using strace_(Linux), dtruss (OSX) and ktrace(BSD),
+         ltrace_ and lsof_.
+
+    D) Include the output from the `celery report` command:
+        ::
+
+            $ celery -A proj report
+
+        This will also include your configuration settings and it try to
+        remove values for keys known to be sensitive, but make sure you also
+        verify the information before submitting so that it doesn't contain
+        confidential information like API tokens and authentication
+        credentials.
+
+6) **Submit the bug.**
+
+By default `GitHub`_ will email you to let you know when new comments have
+been made on your bug. In the event you've turned this feature off, you
+should check back on occasion to ensure you don't miss any questions a
+developer trying to fix the bug might ask.
+
+.. _`GitHub`: http://github.com
+.. _`strace`: http://en.wikipedia.org/wiki/Strace
+.. _`ltrace`: http://en.wikipedia.org/wiki/Ltrace
+.. _`lsof`: http://en.wikipedia.org/wiki/Lsof
+
+.. _issue-trackers:
+
+Issue Trackers
+--------------
+
+Bugs for a package in the Celery ecosystem should be reported to the relevant
+issue tracker.
+
+* Celery: http://github.com/celery/celery/issues/
+* Kombu: http://github.com/celery/kombu/issues
+* pyamqp: http://github.com/celery/pyamqp/issues
+* librabbitmq: http://github.com/celery/librabbitmq/issues
+* Django-Celery: http://github.com/celery/django-celery/issues
+
+If you are unsure of the origin of the bug you can ask the
+`mailing-list`_, or just use the Celery issue tracker.
+
+Contributors guide to the codebase
+==================================
+
+There's a separate section for internal details,
+including details about the codebase and a style guide.
+
+Read `internals-guide`_ for more!
+
+.. _versions:
+
+Versions
+========
+
+Version numbers consists of a major version, minor version and a release number.
+Since version 2.1.0 we use the versioning semantics described by
+semver: http://semver.org.
+
+Stable releases are published at PyPI
+while development releases are only available in the GitHub git repository as tags.
+All version tags starts with "v", so version 0.8.0 is the tag v0.8.0.
+
+.. _git-branches:
+
+Branches
+========
+
+Current active version branches:
+
+* master (http://github.com/celery/celery/tree/master)
+* 3.1 (http://github.com/celery/celery/tree/3.1)
+* 3.0 (http://github.com/celery/celery/tree/3.0)
+
+You can see the state of any branch by looking at the Changelog:
+
+    https://github.com/celery/celery/blob/master/Changelog
+
+If the branch is in active development the topmost version info should
+contain metadata like::
+
+    2.4.0
+    ======
+    :release-date: TBA
+    :status: DEVELOPMENT
+    :branch: master
+
+The ``status`` field can be one of:
+
+* ``PLANNING``
+
+    The branch is currently experimental and in the planning stage.
+
+* ``DEVELOPMENT``
+
+    The branch is in active development, but the test suite should
+    be passing and the product should be working and possible for users to test.
+
+* ``FROZEN``
+
+    The branch is frozen, and no more features will be accepted.
+    When a branch is frozen the focus is on testing the version as much
+    as possible before it is released.
+
+``master`` branch
+-----------------
+
+The master branch is where development of the next version happens.
+
+Maintenance branches
+--------------------
+
+Maintenance branches are named after the version, e.g. the maintenance branch
+for the 2.2.x series is named ``2.2``.  Previously these were named
+``releaseXX-maint``.
+
+The versions we currently maintain is:
+
+* 3.1
+
+  This is the current series.
+
+* 3.0
+
+  This is the previous series, and the last version to support Python 2.5.
+
+Archived branches
+-----------------
+
+Archived branches are kept for preserving history only,
+and theoretically someone could provide patches for these if they depend
+on a series that is no longer officially supported.
+
+An archived version is named ``X.Y-archived``.
+
+Our currently archived branches are:
+
+* 2.5-archived
+
+* 2.4-archived
+
+* 2.3-archived
+
+* 2.1-archived
+
+* 2.0-archived
+
+* 1.0-archived
+
+Feature branches
+----------------
+
+Major new features are worked on in dedicated branches.
+There is no strict naming requirement for these branches.
+
+Feature branches are removed once they have been merged into a release branch.
+
+Tags
+====
+
+Tags are used exclusively for tagging releases.  A release tag is
+named with the format ``vX.Y.Z``, e.g. ``v2.3.1``.
+Experimental releases contain an additional identifier ``vX.Y.Z-id``, e.g.
+``v3.0.0-rc1``.  Experimental tags may be removed after the official release.
+
+.. _contributing-changes:
+
+Working on Features & Patches
+=============================
+
+.. note::
+
+    Contributing to Celery should be as simple as possible,
+    so none of these steps should be considered mandatory.
+
+    You can even send in patches by email if that is your preferred
+    work method. We won't like you any less, any contribution you make
+    is always appreciated!
+
+    However following these steps may make maintainers life easier,
+    and may mean that your changes will be accepted sooner.
+
+Forking and setting up the repository
+-------------------------------------
+
+First you need to fork the Celery repository, a good introduction to this
+is in the Github Guide: `Fork a Repo`_.
+
+After you have cloned the repository you should checkout your copy
+to a directory on your machine:
+::
+
+    $ git clone git@github.com:username/celery.git
+
+When the repository is cloned enter the directory to set up easy access
+to upstream changes:
+::
+
+    $ cd celery
+    $ git remote add upstream git://github.com/celery/celery.git
+    $ git fetch upstream
+
+If you need to pull in new changes from upstream you should
+always use the ``--rebase`` option to ``git pull``:
+::
+
+    git pull --rebase upstream master
+
+With this option you don't clutter the history with merging
+commit notes. See `Rebasing merge commits in git`_.
+If you want to learn more about rebasing see the `Rebase`_
+section in the Github guides.
+
+If you need to work on a different branch than ``master`` you can
+fetch and checkout a remote branch like this::
+
+    git checkout --track -b 3.0-devel origin/3.0-devel
+
+.. _`Fork a Repo`: http://help.github.com/fork-a-repo/
+.. _`Rebasing merge commits in git`:
+    http://notes.envato.com/developers/rebasing-merge-commits-in-git/
+.. _`Rebase`: http://help.github.com/rebase/
+
+.. _contributing-testing:
+
+Running the unit test suite
+---------------------------
+
+To run the Celery test suite you need to install a few dependencies.
+A complete list of the dependencies needed are located in
+``requirements/test.txt``.
+
+Installing the test requirements:
+::
+
+    $ pip install -U -r requirements/test.txt
+
+When installation of dependencies is complete you can execute
+the test suite by calling ``nosetests``:
+::
+
+    $ nosetests
+
+Some useful options to ``nosetests`` are:
+
+* ``-x``
+
+    Stop running the tests at the first test that fails.
+
+* ``-s``
+
+    Don't capture output
+
+* ``--nologcapture``
+
+    Don't capture log output.
+
+* ``-v``
+
+    Run with verbose output.
+
+If you want to run the tests for a single test file only
+you can do so like this:
+::
+
+    $ nosetests celery.tests.test_worker.test_worker_job
+
+.. _contributing-pull-requests:
+
+Creating pull requests
+----------------------
+
+When your feature/bugfix is complete you may want to submit
+a pull requests so that it can be reviewed by the maintainers.
+
+Creating pull requests is easy, and also let you track the progress
+of your contribution.  Read the `Pull Requests`_ section in the Github
+Guide to learn how this is done.
+
+You can also attach pull requests to existing issues by following
+the steps outlined here: http://bit.ly/koJoso
+
+.. _`Pull Requests`: http://help.github.com/send-pull-requests/
+
+.. _contributing-coverage:
+
+Calculating test coverage
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To calculate test coverage you must first install the ``coverage`` module.
+
+Installing the ``coverage`` module:
+::
+
+    $ pip install -U coverage
+
+Code coverage in HTML:
+::
+
+    $ nosetests --with-coverage --cover-html
+
+The coverage output will then be located at
+``celery/tests/cover/index.html``.
+
+Code coverage in XML (Cobertura-style):
+::
+
+    $ nosetests --with-coverage --cover-xml --cover-xml-file=coverage.xml
+
+The coverage XML output will then be located at ``coverage.xml``
+
+.. _contributing-tox:
+
+Running the tests on all supported Python versions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There is a ``tox`` configuration file in the top directory of the
+distribution.
+
+To run the tests for all supported Python versions simply execute:
+::
+
+    $ tox
+
+If you only want to test specific Python versions use the ``-e``
+option:
+::
+
+    $ tox -e py26
+
+Building the documentation
+--------------------------
+
+To build the documentation you need to install the dependencies
+listed in ``requirements/docs.txt``:
+::
+
+    $ pip install -U -r requirements/docs.txt
+
+After these dependencies are installed you should be able to
+build the docs by running:
+::
+
+    $ cd docs
+    $ rm -rf .build
+    $ make html
+
+Make sure there are no errors or warnings in the build output.
+After building succeeds the documentation is available at ``.build/html``.
+
+.. _contributing-verify:
+
+Verifying your contribution
+---------------------------
+
+To use these tools you need to install a few dependencies.  These dependencies
+can be found in ``requirements/pkgutils.txt``.
+
+Installing the dependencies:
+::
+
+    $ pip install -U -r requirements/pkgutils.txt
+
+pyflakes & PEP8
+~~~~~~~~~~~~~~~
+
+To ensure that your changes conform to PEP8 and to run pyflakes
+execute:
+::
+
+    $ paver flake8
+
+To not return a negative exit code when this command fails use the
+``-E`` option, this can be convenient while developing:
+::
+
+    $ paver flake8 -E
+
+API reference
+~~~~~~~~~~~~~
+
+To make sure that all modules have a corresponding section in the API
+reference please execute:
+::
+
+    $ paver autodoc
+    $ paver verifyindex
+
+If files are missing you can add them by copying an existing reference file.
+
+If the module is internal it should be part of the internal reference
+located in ``docs/internals/reference/``.  If the module is public
+it should be located in ``docs/reference/``.
+
+For example if reference is missing for the module ``celery.worker.awesome``
+and this module is considered part of the public API, use the following steps:
+
+
+Use an existing file as a template:
+::
+
+    $ cd docs/reference/
+    $ cp celery.schedules.rst celery.worker.awesome.rst
+
+Edit the file using your favorite editor:
+::
+
+    $ vim celery.worker.awesome.rst
+
+        # change every occurance of ``celery.schedules`` to
+        # ``celery.worker.awesome``
+
+
+Edit the index using your favorite editor:
+::
+
+    $ vim index.rst
+
+        # Add ``celery.worker.awesome`` to the index.
+
+
+Commit your changes:
+::
+
+    # Add the file to git
+    $ git add celery.worker.awesome.rst
+    $ git add index.rst
+    $ git commit celery.worker.awesome.rst index.rst \
+        -m "Adds reference for celery.worker.awesome"
+
+.. _coding-style:
+
+Coding Style
+============
+
+You should probably be able to pick up the coding style
+from surrounding code, but it is a good idea to be aware of the
+following conventions.
+
+* All Python code must follow the `PEP-8`_ guidelines.
+
+`pep8.py`_ is an utility you can use to verify that your code
+is following the conventions.
+
+.. _`PEP-8`: http://www.python.org/dev/peps/pep-0008/
+.. _`pep8.py`: http://pypi.python.org/pypi/pep8
+
+* Docstrings must follow the `PEP-257`_ conventions, and use the following
+  style.
+
+    Do this:
+    ::
+
+        def method(self, arg):
+            """Short description.
+
+            More details.
+
+            """
+
+    or:
+    ::
+
+        def method(self, arg):
+            """Short description."""
+
+
+    but not this:
+    ::
+
+        def method(self, arg):
+            """
+            Short description.
+            """
+
+.. _`PEP-257`: http://www.python.org/dev/peps/pep-0257/
+
+* Lines should not exceed 78 columns.
+
+  You can enforce this in ``vim`` by setting the ``textwidth`` option:
+  ::
+
+        set textwidth=78
+
+  If adhering to this limit makes the code less readable, you have one more
+  character to go on, which means 78 is a soft limit, and 79 is the hard
+  limit :)
+
+* Import order
+
+    * Python standard library (`import xxx`)
+    * Python standard library ('from xxx import`)
+    * Third party packages.
+    * Other modules from the current package.
+
+    or in case of code using Django:
+
+    * Python standard library (`import xxx`)
+    * Python standard library ('from xxx import`)
+    * Third party packages.
+    * Django packages.
+    * Other modules from the current package.
+
+    Within these sections the imports should be sorted by module name.
+
+    Example:
+    ::
+
+        import threading
+        import time
+
+        from collections import deque
+        from Queue import Queue, Empty
+
+        from .datastructures import TokenBucket
+        from .five import zip_longest, items, range
+        from .utils import timeutils
+
+* Wildcard imports must not be used (`from xxx import *`).
+
+* For distributions where Python 2.5 is the oldest support version
+  additional rules apply:
+
+    * Absolute imports must be enabled at the top of every module::
+
+        from __future__ import absolute_import
+
+    * If the module uses the with statement and must be compatible
+      with Python 2.5 (celery is not) then it must also enable that::
+
+        from __future__ import with_statement
+
+    * Every future import must be on its own line, as older Python 2.5
+      releases did not support importing multiple features on the
+      same future import line::
+
+        # Good
+        from __future__ import absolute_import
+        from __future__ import with_statement
+
+        # Bad
+        from __future__ import absolute_import, with_statement
+
+     (Note that this rule does not apply if the package does not include
+     support for Python 2.5)
+
+
+* Note that we use "new-style` relative imports when the distribution
+  does not support Python versions below 2.5
+
+    This requires Python 2.5 or later:
+    ::
+
+        from . import submodule
+
+
+.. _feature-with-extras:
+
+Contributing features requiring additional libraries
+====================================================
+
+Some features like a new result backend may require additional libraries
+that the user must install.
+
+We use setuptools `extra_requires` for this, and all new optional features
+that require 3rd party libraries must be added.
+
+1) Add a new requirements file in `requirements/extras`
+
+    E.g. for the Cassandra backend this is
+    ``requirements/extras/cassandra.txt``, and the file looks like this::
+
+        pycassa
+
+    These are pip requirement files so you can have version specifiers and
+    multiple packages are separated by newline.  A more complex example could
+    be:
+
+        # pycassa 2.0 breaks Foo
+        pycassa>=1.0,<2.0
+        thrift
+
+2) Modify ``setup.py``
+
+    After the requirements file is added you need to add it as an option
+    to ``setup.py`` in the ``extras_require`` section::
+
+        extra['extras_require'] = {
+            # ...
+            'cassandra': extras('cassandra.txt'),
+        }
+
+3) Document the new feature in ``docs/includes/installation.txt``
+
+    You must add your feature to the list in the `bundles`_ section
+    of ``docs/includes/installation.txt``.
+
+    After you've made changes to this file you need to render
+    the distro ``README`` file:
+    ::
+
+        $ pip install -U requirements/pkgutils.txt
+        $ paver readme
+
+
+That's all that needs to be done, but remember that if your feature
+adds additional configuration options then these needs to be documented
+in ``docs/configuration.rst``.  Also all settings need to be added to the
+``celery/app/defaults.py`` module.
+
+Result backends require a separate section in the ``docs/configuration.rst``
+file.
+
+.. _contact_information:
+
+Contacts
+========
+
+This is a list of people that can be contacted for questions
+regarding the official git repositories, PyPI packages
+Read the Docs pages.
+
+If the issue is not an emergency then it is better
+to `report an issue`_.
+
+
+Committers
+----------
+
+Ask Solem
+~~~~~~~~~
+
+:github: https://github.com/ask
+:twitter: http://twitter.com/#!/asksol
+
+Mher Movsisyan
+~~~~~~~~~~~~~~
+
+:github: https://github.com/mher
+:twitter: http://twitter.com/#!/movsm
+
+Steeve Morin
+~~~~~~~~~~~~
+
+:github: https://github.com/steeve
+:twitter: http://twitter.com/#!/steeve
+
+Website
+-------
+
+The Celery Project website is run and maintained by
+
+Mauro Rocco
+~~~~~~~~~~~
+
+:github: https://github.com/fireantology
+:twitter: https://twitter.com/#!/fireantology
+
+with design by:
+
+Jan Henrik Helmers
+~~~~~~~~~~~~~~~~~~
+
+:web: http://www.helmersworks.com
+:twitter: http://twitter.com/#!/helmers
+
+
+.. _packages:
+
+Packages
+========
+
+celery
+------
+
+:git: https://github.com/celery/celery
+:CI: http://travis-ci.org/#!/celery/celery
+:PyPI: http://pypi.python.org/pypi/celery
+:docs: http://docs.celeryproject.org
+
+kombu
+-----
+
+Messaging library.
+
+:git: https://github.com/celery/kombu
+:CI: http://travis-ci.org/#!/celery/kombu
+:PyPI: http://pypi.python.org/pypi/kombu
+:docs: http://kombu.readthedocs.org
+
+amqp
+----
+
+Python AMQP 0.9.1 client.
+
+:git: https://github.com/celery/py-amqp
+:CI: http://travis-ci.org/#!/celery/py-amqp
+:PyPI: http://pypi.python.org/pypi/amqp
+:docs: http://amqp.readthedocs.org
+
+billiard
+--------
+
+Fork of multiprocessing containing improvements
+that will eventually be merged into the Python stdlib.
+
+:git: https://github.com/celery/billiard
+:PyPI: http://pypi.python.org/pypi/billiard
+
+librabbitmq
+-----------
+
+Very fast Python AMQP client written in C.
+
+:git: https://github.com/celery/librabbitmq
+:PyPI: http://pypi.python.org/pypi/librabbitmq
+
+celerymon
+---------
+
+Celery monitor web-service.
+
+:git: https://github.com/celery/celerymon
+:PyPI: http://pypi.python.org/pypi/celerymon
+
+django-celery
+-------------
+
+Django <-> Celery Integration.
+
+:git: https://github.com/celery/django-celery
+:PyPI: http://pypi.python.org/pypi/django-celery
+:docs: http://docs.celeryproject.org/en/latest/django
+
+cl
+--
+
+Actor library.
+
+:git: https://github.com/celery/cl
+:PyPI: http://pypi.python.org/pypi/cl
+
+cyme
+----
+
+Distributed Celery Instance manager.
+
+:git: https://github.com/celery/cyme
+:PyPI: http://pypi.python.org/pypi/cyme
+:docs: http://cyme.readthedocs.org/
+
+
+Deprecated
+----------
+
+- Flask-Celery
+
+:git: https://github.com/ask/Flask-Celery
+:PyPI: http://pypi.python.org/pypi/Flask-Celery
+
+- carrot
+
+:git: https://github.com/ask/carrot
+:PyPI: http://pypi.python.org/pypi/carrot
+
+- ghettoq
+
+:git: https://github.com/ask/ghettoq
+:PyPI: http://pypi.python.org/pypi/ghettoq
+
+- kombu-sqlalchemy
+
+:git: https://github.com/ask/kombu-sqlalchemy
+:PyPI: http://pypi.python.org/pypi/kombu-sqlalchemy
+
+- django-kombu
+
+:git: https://github.com/ask/django-kombu
+:PyPI: http://pypi.python.org/pypi/django-kombu
+
+- pylibrabbitmq
+
+Old name for ``librabbitmq``.
+
+:git: ``None``
+:PyPI: http://pypi.python.org/pypi/pylibrabbitmq
+
+.. _release-procedure:
+
+
+Release Procedure
+=================
+
+Updating the version number
+---------------------------
+
+The version number must be updated two places:
+
+    * ``celery/__init__.py``
+    * ``docs/include/introduction.txt``
+
+After you have changed these files you must render
+the ``README`` files.  There is a script to convert sphinx syntax
+to generic reStructured Text syntax, and the paver task `readme`
+does this for you:
+::
+
+    $ paver readme
+
+Now commit the changes:
+::
+
+    $ git commit -a -m "Bumps version to X.Y.Z"
+
+and make a new version tag:
+::
+
+    $ git tag vX.Y.Z
+    $ git push --tags
+
+Releasing
+---------
+
+Commands to make a new public stable release::
+
+    $ paver releaseok  # checks pep8, autodoc index, runs tests and more
+    $ paver removepyc  # Remove .pyc files
+    $ git clean -xdn   # Check that there's no left-over files in the repo
+    $ python setup.py sdist upload  # Upload package to PyPI
+
+If this is a new release series then you also need to do the
+following:
+
+* Go to the Read The Docs management interface at:
+    http://readthedocs.org/projects/celery/?fromdocs=celery
+
+* Enter "Edit project"
+
+    Change default branch to the branch of this series, e.g. ``2.4``
+    for series 2.4.
+
+* Also add the previous version under the "versions" tab.
+
+.. _`mailing-list`: http://groups.google.com/group/celery-users
+
+.. _`irc-channel`: http://docs.celeryproject.org/en/latest/getting-started/resources.html#irc
+
+.. _`internals-guide`: http://docs.celeryproject.org/en/latest/internals/guide.html
+
+.. _`bundles`: http://docs.celeryproject.org/en/latest/getting-started/introduction.html#bundles
+
+.. _`report an issue`: http://docs.celeryproject.org/en/latest/contributing.html#reporting-bugs
+

+ 10 - 2
CONTRIBUTORS.txt

@@ -152,5 +152,13 @@ Michael Robellard, 2013/11/07
 Vsevolod Kulaga, 2013/11/16
 Ionel Cristian Mărieș, 2013/12/09
 Константин Подшумок, 2013/12/16
-Antoine Legrand, 2014/09/01
-Pepijn de Vos, 2014/15/01
+Antoine Legrand, 2014/01/09
+Pepijn de Vos, 2014/01/15
+Dan McGee, 2014/01/27
+Paul Kilgo, 2014/01/28
+Martin Davidsson, 2014/02/08
+Chris Clark, 2014/02/20
+Matthew Duggan, 2014/04/10
+Brian Bouterse, 2014/04/10
+Dmitry Malinovsky, 2014/04/28
+Luke Pomfrey, 2014/05/06

+ 287 - 0
Changelog

@@ -8,6 +8,293 @@ This document contains change notes for bugfix releases in the 3.1.x series
 (Cipater), please see :ref:`whatsnew-3.1` for an overview of what's
 new in Celery 3.1.
 
+.. _version-3.1.11:
+
+3.1.11
+======
+:release-date: 2014-04-16 11:00 P.M UTC
+:release-by: Ask Solem
+
+- **Now compatible with RabbitMQ 3.3.0**
+
+    You need to run Celery 3.1.11 or later when using RabbitMQ 3.3,
+    and if you use the ``librabbitmq`` module you also have to upgrade
+    to librabbitmq 1.5.0:
+
+    .. code-block:: bash
+
+        $ pip install -U librabbitmq
+
+- **Requirements**:
+
+    - Now depends on :ref:`Kombu 3.0.15 <kombu:version-3.0.15>`.
+
+    - Now depends on `billiard 3.3.0.17`_.
+
+    - Bundle ``celery[librabbitmq]`` now depends on :mod:`librabbitmq` 1.5.0.
+
+.. _`billiard 3.3.0.17`:
+    https://github.com/celery/billiard/blob/master/CHANGES.txt
+
+- **Tasks**: The :setting:`CELERY_DEFAULT_DELIVERY_MODE` setting was being
+  ignored (Issue #1953).
+
+- **Worker**: New :option:`--heartbeat-interval` can be used to change the
+  time (in seconds) between sending event heartbeats.
+
+    Contributed by Matthew Duggan and Craig Northway.
+
+- **App**: Fixed memory leaks occurring when creating lots of temporary
+  app instances (Issue #1949).
+
+- **MongoDB**: SSL configuration with non-MongoDB transport breaks MongoDB
+  results backend (Issue #1973).
+
+    Fix contributed by Brian Bouterse.
+
+- **Logging**: The color formatter accidentally modified ``record.msg``
+  (Issue #1939).
+
+- **Results**: Fixed problem with task trails being stored multiple times,
+  causing ``result.collect()`` to hang (Issue #1936, Issue #1943).
+
+- **Results**: ``ResultSet`` now implements a ``.backend`` attribute for
+  compatibility with ``AsyncResult``.
+
+- **Results**: ``.forget()`` now also clears the local cache.
+
+- **Results**: Fixed problem with multiple calls to ``result._set_cache``
+  (Issue #1940).
+
+- **Results**: ``join_native`` populated result cache even if disabled.
+
+- **Results**: The YAML result serializer should now be able to handle storing
+  exceptions.
+
+- **Worker**: No longer sends task error emails for expected errors (in
+  ``@task(throws=(..., )))``.
+
+- **Canvas**: Fixed problem with exception deserialization when using
+  the JSON serializer (Issue #1987).
+
+- **Eventlet**: Fixes crash when ``celery.contrib.batches`` attempted to
+  cancel a non-existing timer (Issue #1984).
+
+- Can now import ``celery.version_info_t``, and ``celery.five`` (Issue #1968).
+
+
+.. _version-3.1.10:
+
+3.1.10
+======
+:release-date: 2014-03-22 09:40 P.M UTC
+:release-by: Ask Solem
+
+- **Requirements**:
+
+    - Now depends on :ref:`Kombu 3.0.14 <kombu:version-3.0.14>`.
+
+- **Redis:** Important note about events (Issue #1882).
+
+    There is a new transport option for Redis that enables monitors
+    to filter out unwanted events.  Enabling this option in the workers
+    will increase performance considerably:
+
+    .. code-block:: python
+
+        BROKER_TRANSPORT_OPTIONS = {'fanout_patterns': True}
+
+    Enabling this option means that your workers will not be able to see
+    workers with the option disabled (or is running an older version of
+    Celery), so if you do enable it then make sure you do so on all
+    nodes.
+
+    See :ref:`redis-caveats-fanout-patterns`.
+
+    This will be the default in Celery 3.2.
+
+- **Results**: The :class:`@AsyncResult` object now keeps a local cache
+  of the final state of the task.
+
+    This means that the global result cache can finally be disabled,
+    and you can do so by setting :setting:`CELERY_MAX_CACHED_RESULTS` to
+    :const:`-1`.  The lifetime of the cache will then be bound to the
+    lifetime of the result object, which will be the default behavior
+    in Celery 3.2.
+
+- **Events**: The "Substantial drift" warning message is now logged once
+  per node name only (Issue #1802).
+
+- **Worker**: Ability to use one log file per child process when using the
+  prefork pool.
+
+    This can be enabled by using the new ``%i`` and ``%I`` format specifiers
+    for the log file name.  See :ref:`worker-files-process-index`.
+
+- **Redis**: New experimental chord join implementation.
+
+    This is an optimization for chords when using the Redis result backend,
+    where the join operation is now considerably faster and using less
+    resources than the previous strategy.
+
+    The new option can be set in the result backend URL:
+
+        CELERY_RESULT_BACKEND = 'redis://localhost?new_join=1'
+
+    This must be enabled manually as it's incompatible
+    with workers and clients not using it, so be sure to enable
+    the option in all clients and workers if you decide to use it.
+
+- **Multi**: With ``-opt:index`` (e.g. :option:`-c:1`) the index now always refers
+  to the position of a node in the argument list.
+
+    This means that referring to a number will work when specifying a list
+    of node names and not just for a number range:
+
+    .. code-block:: bash
+
+        celery multi start A B C D -c:1 4 -c:2-4 8
+
+    In this example ``1`` refers to node A (as it's the first node in the
+    list).
+
+- **Signals**: The sender argument to ``Signal.connect`` can now be a proxy
+  object, which means that it can be used with the task decorator
+  (Issue #1873).
+
+- **Task**: A regression caused the ``queue`` argument to ``Task.retry`` to be
+  ignored (Issue #1892).
+
+- **App**: Fixed error message for :meth:`~@Celery.config_from_envvar`.
+
+    Fix contributed by Dmitry Malinovsky.
+
+- **Canvas**: Chords can now contain a group of other chords (Issue #1921).
+
+- **Canvas**: Chords can now be combined when using the amqp result backend
+  (a chord where the callback is also a chord).
+
+- **Canvas**: Calling ``result.get()`` for a chain task will now complete
+  even if one of the tasks in the chain is ``ignore_result=True``
+  (Issue #1905).
+
+- **Canvas**: Worker now also logs chord errors.
+
+- **Canvas**: A chord task raising an exception will now result in
+  any errbacks (``link_error``) to the chord callback to also be called.
+
+- **Results**: Reliability improvements to the SQLAlchemy database backend
+  (Issue #1786).
+
+    Previously the connection from the ``MainProcess`` was improperly
+    inherited by child processes.
+
+    Fix contributed by Ionel Cristian Mărieș.
+
+- **Task**: Task callbacks and errbacks are now called using the group
+  primitive.
+
+- **Task**: ``Task.apply`` now properly sets ``request.headers``
+  (Issue #1874).
+
+- **Worker**: Fixed ``UnicodeEncodeError`` occuring when worker is started
+  by `supervisord`.
+
+    Fix contributed by Codeb Fan.
+
+- **Beat**: No longer attempts to upgrade a newly created database file
+  (Issue #1923).
+
+- **Beat**: New setting :setting:``CELERYBEAT_SYNC_EVERY`` can be be used
+  to control file sync by specifying the number of tasks to send between
+  each sync.
+
+    Contributed by Chris Clark.
+
+- **Commands**: :program:`celery inspect memdump` no longer crashes
+  if the :mod:`psutil` module is not installed (Issue #1914).
+
+- **Worker**: Remote control commands now always accepts json serialized
+  messages (Issue #1870).
+
+- **Worker**: Gossip will now drop any task related events it receives
+  by mistake (Issue #1882).
+
+
+.. _version-3.1.9:
+
+3.1.9
+=====
+:release-date: 2014-02-10 06:43 P.M UTC
+:release-by: Ask Solem
+
+- **Requirements**:
+
+    - Now depends on :ref:`Kombu 3.0.12 <kombu:version-3.0.12>`.
+
+- **Prefork pool**: Better handling of exiting child processes.
+
+    Fix contributed by Ionel Cristian Mărieș.
+
+- **Prefork pool**: Now makes sure all file descriptors are removed
+  from the hub when a process is cleaned up.
+
+    Fix contributed by Ionel Cristian Mărieș.
+
+- **New Sphinx extension**: for autodoc documentation of tasks:
+  :mod:`celery.contrib.spinx` (Issue #1833).
+
+- **Django**: Now works with Django 1.7a1.
+
+- **Task**: Task.backend is now a property that forwards to ``app.backend``
+  if no custom backend has been specified for the task (Issue #1821).
+
+- **Generic init scripts**: Fixed bug in stop command.
+
+    Fix contributed by Rinat Shigapov.
+
+- **Generic init scripts**: Fixed compatibility with GNU :manpage:`stat`.
+
+    Fix contributed by Paul Kilgo.
+
+- **Generic init scripts**: Fixed compatibility with the minimal
+  :program:`dash` shell (Issue #1815).
+
+- **Commands**: The :program:`celery amqp basic.publish` command was not
+  working properly.
+
+    Fix contributed by Andrey Voronov.
+
+- **Commands**: Did no longer emit an error message if the pidfile exists
+  and the process is still alive (Issue #1855).
+
+- **Commands**: Better error message for missing arguments to preload
+  options (Issue #1860).
+
+- **Commands**: :program:`celery -h` did not work because of a bug in the
+  argument parser (Issue #1849).
+
+- **Worker**: Improved error message for message decoding errors.
+
+- **Time**: Now properly parses the `Z` timezone specifier in ISO 8601 date
+  strings.
+
+    Fix contributed by Martin Davidsson.
+
+- **Worker**: Now uses the *negotiated* heartbeat value to calculate
+  how often to run the heartbeat checks.
+
+- **Beat**: Fixed problem with beat hanging after the first schedule
+  iteration (Issue #1822).
+
+    Fix contributed by Roger Hu.
+
+- **Signals**: The header argument to :signal:`before_task_publish` is now
+  always a dictionary instance so that signal handlers can add headers.
+
+- **Worker**: A list of message headers is now included in message related
+  errors.
+
 .. _version-3.1.8:
 
 3.1.8

+ 6 - 6
README.rst

@@ -4,7 +4,7 @@
 
 .. image:: http://cloud.github.com/downloads/celery/celery/celery_128.png
 
-:Version: 3.1.8 (Cipater)
+:Version: 3.2.0a1 (Cipater)
 :Web: http://celeryproject.org/
 :Download: http://pypi.python.org/pypi/celery/
 :Source: http://github.com/celery/celery/
@@ -81,8 +81,8 @@ getting started tutorials:
 .. _`Next steps`:
     http://docs.celeryproject.org/en/latest/getting-started/next-steps.html
 
-Celery is
-==========
+Celery is...
+============
 
 - **Simple**
 
@@ -119,8 +119,8 @@ Celery is…
     Custom pool implementations, serializers, compression schemes, logging,
     schedulers, consumers, producers, autoscalers, broker transports and much more.
 
-It supports
-============
+It supports...
+==============
 
     - **Message Transports**
 
@@ -128,7 +128,7 @@ It supports…
         - MongoDB_ (experimental), Amazon SQS (experimental),
         - CouchDB_ (experimental), SQLAlchemy_ (experimental),
         - Django ORM (experimental), `IronMQ`_
-        - and more
+        - and more...
 
     - **Concurrency**
 

+ 7 - 6
celery/__init__.py

@@ -5,7 +5,7 @@
 # :copyright: (c) 2012-2013 GoPivotal, Inc., All rights reserved.
 # :license:   BSD (3 Clause), see LICENSE for more details.
 
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function, unicode_literals
 
 from collections import namedtuple
 
@@ -13,8 +13,8 @@ version_info_t = namedtuple(
     'version_info_t', ('major', 'minor', 'micro', 'releaselevel', 'serial'),
 )
 
-SERIES = 'Cipater'
-VERSION = version_info_t(3, 1, 8, '', '')
+SERIES = 'DEV'
+VERSION = version_info_t(3, 2, 0, 'a2', '')
 __version__ = '{0.major}.{0.minor}.{0.micro}{0.releaselevel}'.format(VERSION)
 __author__ = 'Ask Solem'
 __contact__ = 'ask@celeryproject.org'
@@ -127,9 +127,9 @@ def maybe_patch_concurrency(argv=sys.argv,
         concurrency.get_implementation(pool)
 
 # Lazy loading
-from .five import recreate_module
+from celery import five
 
-old_module, new_module = recreate_module(  # pragma: no cover
+old_module, new_module = five.recreate_module(  # pragma: no cover
     __name__,
     by_module={
         'celery.app': ['Celery', 'bugreport', 'shared_task'],
@@ -144,8 +144,9 @@ old_module, new_module = recreate_module(  # pragma: no cover
     __package__='celery', __file__=__file__,
     __path__=__path__, __doc__=__doc__, __version__=__version__,
     __author__=__author__, __contact__=__contact__,
-    __homepage__=__homepage__, __docformat__=__docformat__,
+    __homepage__=__homepage__, __docformat__=__docformat__, five=five,
     VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER,
+    version_info_t=version_info_t,
     maybe_patch_concurrency=maybe_patch_concurrency,
     _find_option_with_arg=_find_option_with_arg,
 )

+ 1 - 1
celery/__main__.py

@@ -1,4 +1,4 @@
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function, unicode_literals
 
 import sys
 

+ 26 - 31
celery/_state.py

@@ -9,7 +9,7 @@
     This module shouldn't be used directly.
 
 """
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import, print_function, unicode_literals
 
 import os
 import sys
@@ -19,43 +19,35 @@ import weakref
 from celery.local import Proxy
 from celery.utils.threads import LocalStack
 
-try:
-    from weakref import WeakSet as AppSet
-except ImportError:  # XXX Py2.6
-
-    class AppSet(object):  # noqa
-
-        def __init__(self):
-            self._refs = set()
-
-        def add(self, app):
-            self._refs.add(weakref.ref(app))
-
-        def __iter__(self):
-            dirty = []
-            try:
-                for appref in self._refs:
-                    app = appref()
-                    if app is None:
-                        dirty.append(appref)
-                    else:
-                        yield app
-            finally:
-                while dirty:
-                    self._refs.discard(dirty.pop())
-
 __all__ = ['set_default_app', 'get_current_app', 'get_current_task',
-           'get_current_worker_task', 'current_app', 'current_task']
+           'get_current_worker_task', 'current_app', 'current_task',
+           'connect_on_app_finalize']
 
 #: Global default app used when no current app.
 default_app = None
 
 #: List of all app instances (weakrefs), must not be used directly.
-_apps = AppSet()
+_apps = weakref.WeakSet()
+
+#: global set of functions to call whenever a new app is finalized
+#: E.g. Shared tasks, and builtin tasks are created
+#: by adding callbacks here.
+_on_app_finalizers = set()
 
 _task_join_will_block = False
 
 
+def connect_on_app_finalize(callback):
+    _on_app_finalizers.add(callback)
+    return callback
+
+
+def _announce_app_finalized(app):
+    callbacks = set(_on_app_finalizers)
+    for callback in callbacks:
+        callback(app)
+
+
 def _set_task_join_will_block(blocks):
     global _task_join_will_block
     _task_join_will_block = blocks
@@ -85,13 +77,16 @@ def _get_current_app():
         #: creates the global fallback app instance.
         from celery.app import Celery
         set_default_app(Celery(
-            'default',
+            'default', fixups=[], set_as_current=False,
             loader=os.environ.get('CELERY_LOADER') or 'default',
-            fixups=[],
-            set_as_current=False, accept_magic_kwargs=True,
         ))
     return _tls.current_app or default_app
 
+
+def _set_current_app(app):
+    _tls.current_app = app
+
+
 C_STRICT_APP = os.environ.get('C_STRICT_APP')
 if os.environ.get('C_STRICT_APP'):  # pragma: no cover
     def get_current_app():

+ 5 - 6
celery/app/__init__.py

@@ -6,22 +6,19 @@
     Celery Application.
 
 """
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function, unicode_literals
 
 import os
 
 from celery.local import Proxy
 from celery import _state
 from celery._state import (
-    set_default_app,
     get_current_app as current_app,
     get_current_task as current_task,
-    _get_active_apps,
-    _task_stack,
+    connect_on_app_finalize, set_default_app, _get_active_apps, _task_stack,
 )
 from celery.utils import gen_task_name
 
-from .builtins import shared_task as _shared_task
 from .base import Celery, AppPickler
 
 __all__ = ['Celery', 'AppPickler', 'default_app', 'app_or_default',
@@ -128,7 +125,9 @@ def shared_task(*args, **kwargs):
             name = options.get('name')
             # Set as shared task so that unfinalized apps,
             # and future apps will load the task.
-            _shared_task(lambda app: app._task_from_fun(fun, **options))
+            connect_on_app_finalize(
+                lambda app: app._task_from_fun(fun, **options)
+            )
 
             # Force all finalized apps to take this task as well.
             for app in _get_active_apps():

+ 264 - 224
celery/app/amqp.py

@@ -8,13 +8,16 @@
 """
 from __future__ import absolute_import
 
+import numbers
+
+from collections import Mapping, namedtuple
 from datetime import timedelta
 from weakref import WeakValueDictionary
 
 from kombu import Connection, Consumer, Exchange, Producer, Queue
 from kombu.common import Broadcast
 from kombu.pools import ProducerPool
-from kombu.utils import cached_property, uuid
+from kombu.utils import cached_property
 from kombu.utils.encoding import safe_repr
 from kombu.utils.functional import maybe_list
 
@@ -23,10 +26,9 @@ from celery.five import items, string_t
 from celery.utils.text import indent as textindent
 from celery.utils.timeutils import to_utc
 
-from . import app_or_default
 from . import routes as _routes
 
-__all__ = ['AMQP', 'Queues', 'TaskProducer', 'TaskConsumer']
+__all__ = ['AMQP', 'Queues', 'task_message']
 
 #: Human readable queue declaration.
 QUEUE_FORMAT = """
@@ -34,6 +36,9 @@ QUEUE_FORMAT = """
 key={0.routing_key}
 """
 
+task_message = namedtuple('task_message',
+                          ('headers', 'properties', 'body', 'sent_event'))
+
 
 class Queues(dict):
     """Queue name⇒ declaration mapping.
@@ -60,7 +65,7 @@ class Queues(dict):
         self.ha_policy = ha_policy
         self.autoexchange = Exchange if autoexchange is None else autoexchange
         if isinstance(queues, (tuple, list)):
-            queues = dict((q.name, q) for q in queues)
+            queues = {q.name: q for q in queues}
         for name, q in items(queues or {}):
             self.add(q) if isinstance(q, Queue) else self.add_compat(name, **q)
 
@@ -151,8 +156,9 @@ class Queues(dict):
                         Can be iterable or string.
         """
         if include:
-            self._consume_from = dict((name, self[name])
-                                      for name in maybe_list(include))
+            self._consume_from = {
+                name: self[name] for name in maybe_list(include)
+            }
     select_subset = select  # XXX compat
 
     def deselect(self, exclude):
@@ -182,203 +188,14 @@ class Queues(dict):
         return self
 
 
-class TaskProducer(Producer):
-    app = None
-    auto_declare = False
-    retry = False
-    retry_policy = None
-    utc = True
-    event_dispatcher = None
-    send_sent_event = False
-
-    def __init__(self, channel=None, exchange=None, *args, **kwargs):
-        self.retry = kwargs.pop('retry', self.retry)
-        self.retry_policy = kwargs.pop('retry_policy',
-                                       self.retry_policy or {})
-        self.send_sent_event = kwargs.pop('send_sent_event',
-                                          self.send_sent_event)
-        exchange = exchange or self.exchange
-        self.queues = self.app.amqp.queues  # shortcut
-        self.default_queue = self.app.amqp.default_queue
-        super(TaskProducer, self).__init__(channel, exchange, *args, **kwargs)
-
-    def publish_task(self, task_name, task_args=None, task_kwargs=None,
-                     countdown=None, eta=None, task_id=None, group_id=None,
-                     taskset_id=None,  # compat alias to group_id
-                     expires=None, exchange=None, exchange_type=None,
-                     event_dispatcher=None, retry=None, retry_policy=None,
-                     queue=None, now=None, retries=0, chord=None,
-                     callbacks=None, errbacks=None, routing_key=None,
-                     serializer=None, delivery_mode=None, compression=None,
-                     reply_to=None, time_limit=None, soft_time_limit=None,
-                     declare=None, headers=None,
-                     send_before_publish=signals.before_task_publish.send,
-                     before_receivers=signals.before_task_publish.receivers,
-                     send_after_publish=signals.after_task_publish.send,
-                     after_receivers=signals.after_task_publish.receivers,
-                     send_task_sent=signals.task_sent.send,  # XXX deprecated
-                     sent_receivers=signals.task_sent.receivers,
-                     **kwargs):
-        """Send task message."""
-        retry = self.retry if retry is None else retry
-
-        qname = queue
-        if queue is None and exchange is None:
-            queue = self.default_queue
-        if queue is not None:
-            if isinstance(queue, string_t):
-                qname, queue = queue, self.queues[queue]
-            else:
-                qname = queue.name
-            exchange = exchange or queue.exchange.name
-            routing_key = routing_key or queue.routing_key
-        if declare is None and queue and not isinstance(queue, Broadcast):
-            declare = [queue]
-
-        # merge default and custom policy
-        retry = self.retry if retry is None else retry
-        _rp = (dict(self.retry_policy, **retry_policy) if retry_policy
-               else self.retry_policy)
-        task_id = task_id or uuid()
-        task_args = task_args or []
-        task_kwargs = task_kwargs or {}
-        if not isinstance(task_args, (list, tuple)):
-            raise ValueError('task args must be a list or tuple')
-        if not isinstance(task_kwargs, dict):
-            raise ValueError('task kwargs must be a dictionary')
-        if countdown:  # Convert countdown to ETA.
-            now = now or self.app.now()
-            eta = now + timedelta(seconds=countdown)
-            if self.utc:
-                eta = to_utc(eta).astimezone(self.app.timezone)
-        if isinstance(expires, (int, float)):
-            now = now or self.app.now()
-            expires = now + timedelta(seconds=expires)
-            if self.utc:
-                expires = to_utc(expires).astimezone(self.app.timezone)
-        eta = eta and eta.isoformat()
-        expires = expires and expires.isoformat()
-
-        body = {
-            'task': task_name,
-            'id': task_id,
-            'args': task_args,
-            'kwargs': task_kwargs,
-            'retries': retries or 0,
-            'eta': eta,
-            'expires': expires,
-            'utc': self.utc,
-            'callbacks': callbacks,
-            'errbacks': errbacks,
-            'timelimit': (time_limit, soft_time_limit),
-            'taskset': group_id or taskset_id,
-            'chord': chord,
-        }
-
-        if before_receivers:
-            send_before_publish(
-                sender=task_name, body=body,
-                exchange=exchange,
-                routing_key=routing_key,
-                declare=declare,
-                headers=headers,
-                properties=kwargs,
-                retry_policy=retry_policy,
-            )
-
-        self.publish(
-            body,
-            exchange=exchange, routing_key=routing_key,
-            serializer=serializer or self.serializer,
-            compression=compression or self.compression,
-            headers=headers,
-            retry=retry, retry_policy=_rp,
-            reply_to=reply_to,
-            correlation_id=task_id,
-            delivery_mode=delivery_mode, declare=declare,
-            **kwargs
-        )
-
-        if after_receivers:
-            send_after_publish(sender=task_name, body=body,
-                               exchange=exchange, routing_key=routing_key)
-
-        if sent_receivers:  # XXX deprecated
-            send_task_sent(sender=task_name, task_id=task_id,
-                           task=task_name, args=task_args,
-                           kwargs=task_kwargs, eta=eta,
-                           taskset=group_id or taskset_id)
-        if self.send_sent_event:
-            evd = event_dispatcher or self.event_dispatcher
-            exname = exchange or self.exchange
-            if isinstance(exname, Exchange):
-                exname = exname.name
-            evd.publish(
-                'task-sent',
-                {
-                    'uuid': task_id,
-                    'name': task_name,
-                    'args': safe_repr(task_args),
-                    'kwargs': safe_repr(task_kwargs),
-                    'retries': retries,
-                    'eta': eta,
-                    'expires': expires,
-                    'queue': qname,
-                    'exchange': exname,
-                    'routing_key': routing_key,
-                },
-                self, retry=retry, retry_policy=retry_policy,
-            )
-        return task_id
-    delay_task = publish_task   # XXX Compat
-
-    @cached_property
-    def event_dispatcher(self):
-        # We call Dispatcher.publish with a custom producer
-        # so don't need the dispatcher to be "enabled".
-        return self.app.events.Dispatcher(enabled=False)
-
-
-class TaskPublisher(TaskProducer):
-    """Deprecated version of :class:`TaskProducer`."""
-
-    def __init__(self, channel=None, exchange=None, *args, **kwargs):
-        self.app = app_or_default(kwargs.pop('app', self.app))
-        self.retry = kwargs.pop('retry', self.retry)
-        self.retry_policy = kwargs.pop('retry_policy',
-                                       self.retry_policy or {})
-        exchange = exchange or self.exchange
-        if not isinstance(exchange, Exchange):
-            exchange = Exchange(exchange,
-                                kwargs.pop('exchange_type', 'direct'))
-        self.queues = self.app.amqp.queues  # shortcut
-        super(TaskPublisher, self).__init__(channel, exchange, *args, **kwargs)
-
-
-class TaskConsumer(Consumer):
-    app = None
-
-    def __init__(self, channel, queues=None, app=None, accept=None, **kw):
-        self.app = app or self.app
-        if accept is None:
-            accept = self.app.conf.CELERY_ACCEPT_CONTENT
-        super(TaskConsumer, self).__init__(
-            channel,
-            queues or list(self.app.amqp.queues.consume_from.values()),
-            accept=accept,
-            **kw
-        )
-
-
 class AMQP(object):
     Connection = Connection
     Consumer = Consumer
+    Producer = Producer
 
     #: compat alias to Connection
     BrokerConnection = Connection
 
-    producer_cls = TaskProducer
-    consumer_cls = TaskConsumer
     queues_cls = Queues
 
     #: Cached and prepared routing table.
@@ -396,9 +213,18 @@ class AMQP(object):
 
     def __init__(self, app):
         self.app = app
+        self.task_protocols = {
+            1: self.as_task_v1,
+            2: self.as_task_v2,
+        }
 
-    def flush_routes(self):
-        self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES)
+    @cached_property
+    def create_task_message(self):
+        return self.task_protocols[self.app.conf.CELERY_TASK_PROTOCOL]
+
+    @cached_property
+    def send_task_message(self):
+        return self._create_task_sender()
 
     def Queues(self, queues, create_missing=None, ha_policy=None,
                autoexchange=None):
@@ -426,35 +252,239 @@ class AMQP(object):
                               self.app.either('CELERY_CREATE_MISSING_QUEUES',
                                               create_missing), app=self.app)
 
-    @cached_property
-    def TaskConsumer(self):
-        """Return consumer configured to consume from the queues
-        we are configured for (``app.amqp.queues.consume_from``)."""
-        return self.app.subclass_with_self(self.consumer_cls,
-                                           reverse='amqp.TaskConsumer')
-    get_task_consumer = TaskConsumer  # XXX compat
+    def flush_routes(self):
+        self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES)
 
-    @cached_property
-    def TaskProducer(self):
-        """Return publisher used to send tasks.
+    def TaskConsumer(self, channel, queues=None, accept=None, **kw):
+        if accept is None:
+            accept = self.app.conf.CELERY_ACCEPT_CONTENT
+        return self.Consumer(
+            channel, accept=accept,
+            queues=queues or list(self.queues.consume_from.values()),
+            **kw
+        )
 
-        You should use `app.send_task` instead.
+    def as_task_v2(self, task_id, name, args=None, kwargs=None,
+                   countdown=None, eta=None, group_id=None,
+                   expires=None, retries=0, chord=None,
+                   callbacks=None, errbacks=None, reply_to=None,
+                   time_limit=None, soft_time_limit=None,
+                   create_sent_event=False, now=None, timezone=None,
+                   root_id=None, parent_id=None):
+        args = args or ()
+        kwargs = kwargs or {}
+        utc = self.utc
+        if not isinstance(args, (list, tuple)):
+            raise ValueError('task args must be a list or tuple')
+        if not isinstance(kwargs, Mapping):
+            raise ValueError('task keyword arguments must be a mapping')
+        if countdown:  # convert countdown to ETA
+            now = now or self.app.now()
+            timezone = timezone or self.app.timezone
+            eta = now + timedelta(seconds=countdown)
+            if utc:
+                eta = to_utc(eta).astimezone(timezone)
+        if isinstance(expires, numbers.Real):
+            now = now or self.app.now()
+            timezone = timezone or self.app.timezone
+            expires = now + timedelta(seconds=expires)
+            if utc:
+                expires = to_utc(expires).astimezone(timezone)
+        eta = eta and eta.isoformat()
+        expires = expires and expires.isoformat()
 
-        """
-        conf = self.app.conf
-        return self.app.subclass_with_self(
-            self.producer_cls,
-            reverse='amqp.TaskProducer',
-            exchange=self.default_exchange,
-            routing_key=conf.CELERY_DEFAULT_ROUTING_KEY,
-            serializer=conf.CELERY_TASK_SERIALIZER,
-            compression=conf.CELERY_MESSAGE_COMPRESSION,
-            retry=conf.CELERY_TASK_PUBLISH_RETRY,
-            retry_policy=conf.CELERY_TASK_PUBLISH_RETRY_POLICY,
-            send_sent_event=conf.CELERY_SEND_TASK_SENT_EVENT,
-            utc=conf.CELERY_ENABLE_UTC,
+        return task_message(
+            headers={
+                'lang': 'py',
+                'task': name,
+                'id': task_id,
+                'eta': eta,
+                'expires': expires,
+                'callbacks': callbacks,
+                'errbacks': errbacks,
+                'chain': None,  # TODO
+                'group': group_id,
+                'chord': chord,
+                'retries': retries,
+                'timelimit': [time_limit, soft_time_limit],
+                'root_id': root_id,
+                'parent_id': parent_id,
+            },
+            properties={
+                'correlation_id': task_id,
+                'reply_to': reply_to or '',
+            },
+            body=(args, kwargs),
+            sent_event={
+                'uuid': task_id,
+                'root': root_id,
+                'parent': parent_id,
+                'name': name,
+                'args': safe_repr(args),
+                'kwargs': safe_repr(kwargs),
+                'retries': retries,
+                'eta': eta,
+                'expires': expires,
+            } if create_sent_event else None,
+        )
+
+    def as_task_v1(self, task_id, name, args=None, kwargs=None,
+                   countdown=None, eta=None, group_id=None,
+                   expires=None, retries=0,
+                   chord=None, callbacks=None, errbacks=None, reply_to=None,
+                   time_limit=None, soft_time_limit=None,
+                   create_sent_event=False, now=None, timezone=None,
+                   root_id=None, parent_id=None):
+        args = args or ()
+        kwargs = kwargs or {}
+        utc = self.utc
+        if not isinstance(args, (list, tuple)):
+            raise ValueError('task args must be a list or tuple')
+        if not isinstance(kwargs, Mapping):
+            raise ValueError('task keyword arguments must be a mapping')
+        if countdown:  # convert countdown to ETA
+            now = now or self.app.now()
+            timezone = timezone or self.app.timezone
+            eta = now + timedelta(seconds=countdown)
+            if utc:
+                eta = to_utc(eta).astimezone(timezone)
+        if isinstance(expires, numbers.Real):
+            now = now or self.app.now()
+            timezone = timezone or self.app.timezone
+            expires = now + timedelta(seconds=expires)
+            if utc:
+                expires = to_utc(expires).astimezone(timezone)
+        eta = eta and eta.isoformat()
+        expires = expires and expires.isoformat()
+
+        return task_message(
+            headers={},
+            properties={
+                'correlation_id': task_id,
+                'reply_to': reply_to or '',
+            },
+            body={
+                'task': name,
+                'id': task_id,
+                'args': args,
+                'kwargs': kwargs,
+                'retries': retries,
+                'eta': eta,
+                'expires': expires,
+                'utc': utc,
+                'callbacks': callbacks,
+                'errbacks': errbacks,
+                'timelimit': (time_limit, soft_time_limit),
+                'taskset': group_id,
+                'chord': chord,
+            },
+            sent_event={
+                'uuid': task_id,
+                'name': name,
+                'args': safe_repr(args),
+                'kwargs': safe_repr(kwargs),
+                'retries': retries,
+                'eta': eta,
+                'expires': expires,
+            } if create_sent_event else None,
         )
-    TaskPublisher = TaskProducer  # compat
+
+    def _create_task_sender(self):
+        default_retry = self.app.conf.CELERY_TASK_PUBLISH_RETRY
+        default_policy = self.app.conf.CELERY_TASK_PUBLISH_RETRY_POLICY
+        default_delivery_mode = self.app.conf.CELERY_DEFAULT_DELIVERY_MODE
+        default_queue = self.default_queue
+        queues = self.queues
+        send_before_publish = signals.before_task_publish.send
+        before_receivers = signals.before_task_publish.receivers
+        send_after_publish = signals.after_task_publish.send
+        after_receivers = signals.after_task_publish.receivers
+
+        send_task_sent = signals.task_sent.send   # XXX compat
+        sent_receivers = signals.task_sent.receivers
+
+        default_evd = self._event_dispatcher
+        default_exchange = self.default_exchange
+
+        default_rkey = self.app.conf.CELERY_DEFAULT_ROUTING_KEY
+        default_serializer = self.app.conf.CELERY_TASK_SERIALIZER
+        default_compressor = self.app.conf.CELERY_MESSAGE_COMPRESSION
+
+        def publish_task(producer, name, message,
+                         exchange=None, routing_key=None, queue=None,
+                         event_dispatcher=None, retry=None, retry_policy=None,
+                         serializer=None, delivery_mode=None,
+                         compression=None, declare=None,
+                         headers=None, **kwargs):
+            retry = default_retry if retry is None else retry
+            headers2, properties, body, sent_event = message
+            if headers:
+                headers2.update(headers)
+            if kwargs:
+                properties.update(kwargs)
+
+            qname = queue
+            if queue is None and exchange is None:
+                queue = default_queue
+            if queue is not None:
+                if isinstance(queue, string_t):
+                    qname, queue = queue, queues[queue]
+                else:
+                    qname = queue.name
+            if delivery_mode is None:
+                try:
+                    delivery_mode = queue.exchange.delivery_mode
+                except AttributeError:
+                    delivery_mode = default_delivery_mode
+            exchange = exchange or queue.exchange.name
+            routing_key = routing_key or queue.routing_key
+            if declare is None and queue and not isinstance(queue, Broadcast):
+                declare = [queue]
+
+            # merge default and custom policy
+            retry = default_retry if retry is None else retry
+            _rp = (dict(default_policy, **retry_policy) if retry_policy
+                   else default_policy)
+
+            if before_receivers:
+                send_before_publish(
+                    sender=name, body=body,
+                    exchange=exchange, routing_key=routing_key,
+                    declare=declare, headers=headers2,
+                    properties=kwargs,  retry_policy=retry_policy,
+                )
+            ret = producer.publish(
+                body,
+                exchange=exchange or default_exchange,
+                routing_key=routing_key or default_rkey,
+                serializer=serializer or default_serializer,
+                compression=compression or default_compressor,
+                retry=retry, retry_policy=_rp,
+                delivery_mode=delivery_mode, declare=declare,
+                headers=headers2,
+                **properties
+            )
+            if after_receivers:
+                send_after_publish(sender=name, body=body, headers=headers2,
+                                   exchange=exchange, routing_key=routing_key)
+            if sent_receivers:  # XXX deprecated
+                send_task_sent(sender=name, task_id=body['id'], task=name,
+                               args=body['args'], kwargs=body['kwargs'],
+                               eta=body['eta'], taskset=body['taskset'])
+            if sent_event:
+                evd = event_dispatcher or default_evd
+                exname = exchange or self.exchange
+                if isinstance(name, Exchange):
+                    exname = exname.name
+                sent_event.update({
+                    'queue': qname,
+                    'exchange': exname,
+                    'routing_key': routing_key,
+                })
+                evd.publish('task-sent', sent_event,
+                            self, retry=retry, retry_policy=retry_policy)
+            return ret
+        return publish_task
 
     @cached_property
     def default_queue(self):
@@ -485,7 +515,7 @@ class AMQP(object):
             self._producer_pool = ProducerPool(
                 self.app.pool,
                 limit=self.app.pool.limit,
-                Producer=self.TaskProducer,
+                Producer=self.Producer,
             )
         return self._producer_pool
     publisher_pool = producer_pool  # compat alias
@@ -494,3 +524,13 @@ class AMQP(object):
     def default_exchange(self):
         return Exchange(self.app.conf.CELERY_DEFAULT_EXCHANGE,
                         self.app.conf.CELERY_DEFAULT_EXCHANGE_TYPE)
+
+    @cached_property
+    def utc(self):
+        return self.app.conf.CELERY_ENABLE_UTC
+
+    @cached_property
+    def _event_dispatcher(self):
+        # We call Dispatcher.publish with a custom producer
+        # so don't need the diuspatcher to be enabled.
+        return self.app.events.Dispatcher(enabled=False)

+ 101 - 58
celery/app/base.py

@@ -13,7 +13,6 @@ import threading
 import warnings
 
 from collections import defaultdict, deque
-from contextlib import contextmanager
 from copy import deepcopy
 from operator import attrgetter
 
@@ -26,25 +25,29 @@ from kombu.utils import cached_property, uuid
 from celery import platforms
 from celery import signals
 from celery._state import (
-    _task_stack, _tls, get_current_app, set_default_app,
-    _register_app, get_current_worker_task,
+    _task_stack, get_current_app, _set_current_app, set_default_app,
+    _register_app, get_current_worker_task, connect_on_app_finalize,
+    _announce_app_finalized,
 )
 from celery.exceptions import AlwaysEagerIgnored, ImproperlyConfigured
 from celery.five import items, values
 from celery.loaders import get_loader_cls
 from celery.local import PromiseProxy, maybe_evaluate
+from celery.utils.dispatch import Signal
 from celery.utils.functional import first, maybe_list
 from celery.utils.imports import instantiate, symbol_by_name
-from celery.utils.objects import mro_lookup
+from celery.utils.objects import FallbackContext, mro_lookup
 
 from .annotations import prepare as prepare_annotations
-from .builtins import shared_task, load_shared_tasks
 from .defaults import DEFAULTS, find_deprecated_settings
 from .registry import TaskRegistry
 from .utils import (
     AppPickler, Settings, bugreport, _unpickle_app, _unpickle_app_v2, appstr,
 )
 
+# Load all builtin tasks
+from . import builtins  # noqa
+
 __all__ = ['Celery']
 
 _EXECV = os.environ.get('FORKED_BY_MULTIPROCESSING')
@@ -58,6 +61,8 @@ and as such the configuration could not be loaded.
 Please set this variable and make it point to
 a configuration module."""
 
+_after_fork_registered = False
+
 
 def app_has_custom(app, attr):
     return mro_lookup(app.__class__, attr, stop=(Celery, object),
@@ -70,6 +75,29 @@ def _unpickle_appattr(reverse_name, args):
     return get_current_app()._rgetattr(reverse_name)(*args)
 
 
+def _global_after_fork():
+    # Previously every app would call:
+    #    `register_after_fork(app, app._after_fork)`
+    # but this created a leak as `register_after_fork` stores concrete object
+    # references and once registered an object cannot be removed without
+    # touching and iterating over the private afterfork registry list.
+    #
+    # See Issue #1949
+    from celery import _state
+    from multiprocessing.util import info
+    for app in _state.apps:
+        try:
+            app._after_fork()
+        except Exception as exc:
+            info('after forker raised exception: %r' % (exc, ), exc_info=1)
+
+
+def _ensure_after_fork():
+    global _after_fork_registered
+    _after_fork_registered = True
+    register_after_fork(_global_after_fork, _global_after_fork)
+
+
 class Celery(object):
     #: This is deprecated, use :meth:`reduce_keys` instead
     Pickler = AppPickler
@@ -89,11 +117,22 @@ class Celery(object):
     _pool = None
     builtin_fixups = BUILTIN_FIXUPS
 
+    #: Signal sent when app is loading configuration.
+    on_configure = None
+
+    #: Signal sent after app has prepared the configuration.
+    on_after_configure = None
+
+    #: Signal sent after app has been finalized.
+    on_after_finalize = None
+
+    #: ignored
+    accept_magic_kwargs = False
+
     def __init__(self, main=None, loader=None, backend=None,
                  amqp=None, events=None, log=None, control=None,
-                 set_as_current=True, accept_magic_kwargs=False,
-                 tasks=None, broker=None, include=None, changes=None,
-                 config_source=None, fixups=None, task_cls=None,
+                 set_as_current=True, tasks=None, broker=None, include=None,
+                 changes=None, config_source=None, fixups=None, task_cls=None,
                  autofinalize=True, **kwargs):
         self.clock = LamportClock()
         self.main = main
@@ -106,7 +145,6 @@ class Celery(object):
         self.task_cls = task_cls or self.task_cls
         self.set_as_current = set_as_current
         self.registry_cls = symbol_by_name(self.registry_cls)
-        self.accept_magic_kwargs = accept_magic_kwargs
         self.user_options = defaultdict(set)
         self.steps = defaultdict(set)
         self.autofinalize = autofinalize
@@ -143,11 +181,18 @@ class Celery(object):
         if self.set_as_current:
             self.set_current()
 
+        # Signals
+        if self.on_configure is None:
+            # used to be a method pre 3.2
+            self.on_configure = Signal()
+        self.on_after_configure = Signal()
+        self.on_after_finalize = Signal()
+
         self.on_init()
         _register_app(self)
 
     def set_current(self):
-        _tls.current_app = self
+        _set_current_app(self)
 
     def set_default(self):
         set_default_app(self)
@@ -183,8 +228,8 @@ class Celery(object):
             # a differnt task instance.  This makes sure it will always use
             # the task instance from the current app.
             # Really need a better solution for this :(
-            from . import shared_task as proxies_to_curapp
-            return proxies_to_curapp(*args, _force_evaluate=True, **opts)
+            from . import shared_task
+            return shared_task(*args, _force_evaluate=True, **opts)
 
         def inner_create_task_cls(shared=True, filter=None, **opts):
             _filt = filter  # stupid 2to3
@@ -193,13 +238,7 @@ class Celery(object):
                 if shared:
                     cons = lambda app: app._task_from_fun(fun, **opts)
                     cons.__name__ = fun.__name__
-                    shared_task(cons)
-                if self.accept_magic_kwargs:  # compat mode
-                    task = self._task_from_fun(fun, **opts)
-                    if filter:
-                        task = filter(task)
-                    return task
-
+                    connect_on_app_finalize(cons)
                 if self.finalized or opts.get('_force_evaluate'):
                     ret = self._task_from_fun(fun, **opts)
                 else:
@@ -231,11 +270,11 @@ class Celery(object):
 
         T = type(fun.__name__, (base, ), dict({
             'app': self,
-            'accept_magic_kwargs': False,
             'run': fun if bind else staticmethod(fun),
             '_decorated': True,
             '__doc__': fun.__doc__,
-            '__module__': fun.__module__}, **options))()
+            '__module__': fun.__module__,
+            '__wrapped__': fun}, **options))()
         task = self._tasks[T.name]  # return global instance.
         return task
 
@@ -245,7 +284,7 @@ class Celery(object):
                 if auto and not self.autofinalize:
                     raise RuntimeError('Contract breach: app not finalized')
                 self.finalized = True
-                load_shared_tasks(self)
+                _announce_app_finalized(self)
 
                 pending = self._pending
                 while pending:
@@ -254,6 +293,8 @@ class Celery(object):
                 for task in values(self._tasks):
                     task.bind(self)
 
+                self.on_after_finalize.send(sender=self)
+
     def add_defaults(self, fun):
         if not callable(fun):
             d, fun = fun, lambda: d
@@ -272,7 +313,8 @@ class Celery(object):
         if not module_name:
             if silent:
                 return False
-            raise ImproperlyConfigured(ERR_ENVVAR_NOT_SET.format(module_name))
+            raise ImproperlyConfigured(
+                ERR_ENVVAR_NOT_SET.format(variable_name))
         return self.config_from_object(module_name, silent=silent, force=force)
 
     def config_from_cmdline(self, argv, namespace='celery'):
@@ -300,26 +342,34 @@ class Celery(object):
                   eta=None, task_id=None, producer=None, connection=None,
                   router=None, result_cls=None, expires=None,
                   publisher=None, link=None, link_error=None,
-                  add_to_parent=True, reply_to=None, **options):
+                  add_to_parent=True, group_id=None, retries=0, chord=None,
+                  reply_to=None, time_limit=None, soft_time_limit=None,
+                  root_id=None, parent_id=None, **options):
+        amqp = self.amqp
         task_id = task_id or uuid()
         producer = producer or publisher  # XXX compat
-        router = router or self.amqp.router
+        router = router or amqp.router
         conf = self.conf
         if conf.CELERY_ALWAYS_EAGER:  # pragma: no cover
             warnings.warn(AlwaysEagerIgnored(
                 'CELERY_ALWAYS_EAGER has no effect on send_task',
             ), stacklevel=2)
         options = router.route(options, name, args, kwargs)
+
+        message = amqp.create_task_message(
+            task_id, name, args, kwargs, countdown, eta, group_id,
+            expires, retries, chord,
+            maybe_list(link), maybe_list(link_error),
+            reply_to or self.oid, time_limit, soft_time_limit,
+            self.conf.CELERY_SEND_TASK_SENT_EVENT,
+            root_id, parent_id,
+        )
+
         if connection:
-            producer = self.amqp.TaskProducer(connection)
+            producer = amqp.Producer(connection)
         with self.producer_or_acquire(producer) as P:
             self.backend.on_task_call(P, task_id)
-            task_id = P.publish_task(
-                name, args, kwargs, countdown=countdown, eta=eta,
-                task_id=task_id, expires=expires,
-                callbacks=maybe_list(link), errbacks=maybe_list(link_error),
-                reply_to=reply_to or self.oid, **options
-            )
+            amqp.send_task_message(P, name, message, **options)
         result = (result_cls or self.AsyncResult)(task_id)
         if add_to_parent:
             parent = get_current_worker_task()
@@ -355,27 +405,20 @@ class Celery(object):
         )
     broker_connection = connection
 
-    @contextmanager
-    def connection_or_acquire(self, connection=None, pool=True,
-                              *args, **kwargs):
-        if connection:
-            yield connection
-        else:
-            if pool:
-                with self.pool.acquire(block=True) as connection:
-                    yield connection
-            else:
-                with self.connection() as connection:
-                    yield connection
+    def _acquire_connection(self, pool=True):
+        """Helper for :meth:`connection_or_acquire`."""
+        if pool:
+            return self.pool.acquire(block=True)
+        return self.connection()
+
+    def connection_or_acquire(self, connection=None, pool=True, *_, **__):
+        return FallbackContext(connection, self._acquire_connection, pool=pool)
     default_connection = connection_or_acquire  # XXX compat
 
-    @contextmanager
     def producer_or_acquire(self, producer=None):
-        if producer:
-            yield producer
-        else:
-            with self.amqp.producer_pool.acquire(block=True) as producer:
-                yield producer
+        return FallbackContext(
+            producer, self.amqp.producer_pool.acquire, block=True,
+        )
     default_producer = producer_or_acquire  # XXX compat
 
     def prepare_config(self, c):
@@ -418,12 +461,12 @@ class Celery(object):
             self.loader)
         return backend(app=self, url=url)
 
-    def on_configure(self):
-        """Callback calld when the app loads configuration"""
-        pass
-
     def _get_config(self):
-        self.on_configure()
+        if isinstance(self.on_configure, Signal):
+            self.on_configure.send(sender=self)
+        else:
+            # used to be a method pre 3.2
+            self.on_configure()
         if self._config_source:
             self.loader.config_from_object(self._config_source)
         self.configured = True
@@ -437,6 +480,7 @@ class Celery(object):
         if self._preconf:
             for key, value in items(self._preconf):
                 setattr(s, key, value)
+        self.on_after_configure.send(sender=self, source=s)
         return s
 
     def _after_fork(self, obj_):
@@ -523,7 +567,6 @@ class Celery(object):
             'events': self.events_cls,
             'log': self.log_cls,
             'control': self.control_cls,
-            'accept_magic_kwargs': self.accept_magic_kwargs,
             'fixups': self.fixups,
             'config_source': self._config_source,
             'task_cls': self.task_cls,
@@ -534,7 +577,7 @@ class Celery(object):
         return (self.main, self.conf.changes,
                 self.loader_cls, self.backend_cls, self.amqp_cls,
                 self.events_cls, self.log_cls, self.control_cls,
-                self.accept_magic_kwargs, self._config_source)
+                False, self._config_source)
 
     @cached_property
     def Worker(self):
@@ -581,7 +624,7 @@ class Celery(object):
     @property
     def pool(self):
         if self._pool is None:
-            register_after_fork(self, self._after_fork)
+            _ensure_after_fork()
             limit = self.conf.BROKER_POOL_LIMIT
             self._pool = self.connection().Pool(limit=limit)
         return self._pool

+ 38 - 235
celery/app/builtins.py

@@ -9,38 +9,15 @@
 """
 from __future__ import absolute_import
 
-from collections import deque
+from celery._state import get_current_worker_task, connect_on_app_finalize
+from celery.utils.log import get_logger
 
-from celery._state import get_current_worker_task
-from celery.utils import uuid
+__all__ = []
 
-__all__ = ['shared_task', 'load_shared_tasks']
+logger = get_logger(__name__)
 
-#: global list of functions defining tasks that should be
-#: added to all apps.
-_shared_tasks = set()
 
-
-def shared_task(constructor):
-    """Decorator that specifies a function that generates a built-in task.
-
-    The function will then be called for every new app instance created
-    (lazily, so more exactly when the task registry for that app is needed).
-
-    The function must take a single ``app`` argument.
-    """
-    _shared_tasks.add(constructor)
-    return constructor
-
-
-def load_shared_tasks(app):
-    """Create built-in tasks for an app instance."""
-    constructors = set(_shared_tasks)
-    for constructor in constructors:
-        constructor(app)
-
-
-@shared_task
+@connect_on_app_finalize
 def add_backend_cleanup_task(app):
     """The backend cleanup task can be used to clean up the default result
     backend.
@@ -57,14 +34,14 @@ def add_backend_cleanup_task(app):
     return backend_cleanup
 
 
-@shared_task
+@connect_on_app_finalize
 def add_unlock_chord_task(app):
     """This task is used by result backends without native chord support.
 
     It joins chords by creating a task chain polling the header for completion.
 
     """
-    from celery.canvas import signature
+    from celery.canvas import maybe_signature
     from celery.exceptions import ChordError
     from celery.result import allow_join_result, result_from_tuple
 
@@ -86,6 +63,7 @@ def add_unlock_chord_task(app):
             interval = unlock_chord.default_retry_delay
 
         # check if the task group is ready, and if so apply the callback.
+        callback = maybe_signature(callback, app)
         deps = GroupResult(
             group_id,
             [result_from_tuple(r, app=app) for r in result],
@@ -93,7 +71,7 @@ def add_unlock_chord_task(app):
         j = deps.join_native if deps.supports_native_join else deps.join
 
         if deps.ready():
-            callback = signature(callback, app=app)
+            callback = maybe_signature(callback, app=app)
             try:
                 with allow_join_result():
                     ret = j(timeout=3.0, propagate=propagate)
@@ -105,16 +83,17 @@ def add_unlock_chord_task(app):
                     )
                 except StopIteration:
                     reason = repr(exc)
-
-                app._tasks[callback.task].backend.fail_from_current_stack(
-                    callback.id, exc=ChordError(reason),
-                )
+                logger.error('Chord %r raised: %r', group_id, exc, exc_info=1)
+                app.backend.chord_error_from_stack(callback,
+                                                   ChordError(reason))
             else:
                 try:
                     callback.delay(ret)
                 except Exception as exc:
-                    app._tasks[callback.task].backend.fail_from_current_stack(
-                        callback.id,
+                    logger.error('Chord %r raised: %r', group_id, exc,
+                                 exc_info=1)
+                    app.backend.chord_error_from_stack(
+                        callback,
                         exc=ChordError('Callback error: {0!r}'.format(exc)),
                     )
         else:
@@ -123,7 +102,7 @@ def add_unlock_chord_task(app):
     return unlock_chord
 
 
-@shared_task
+@connect_on_app_finalize
 def add_map_task(app):
     from celery.canvas import signature
 
@@ -134,7 +113,7 @@ def add_map_task(app):
     return xmap
 
 
-@shared_task
+@connect_on_app_finalize
 def add_starmap_task(app):
     from celery.canvas import signature
 
@@ -145,7 +124,7 @@ def add_starmap_task(app):
     return xstarmap
 
 
-@shared_task
+@connect_on_app_finalize
 def add_chunk_task(app):
     from celery.canvas import chunks as _chunks
 
@@ -155,182 +134,58 @@ def add_chunk_task(app):
     return chunks
 
 
-@shared_task
+@connect_on_app_finalize
 def add_group_task(app):
+    """No longer used, but here for backwards compatibility."""
     _app = app
-    from celery.canvas import maybe_signature, signature
+    from celery.canvas import maybe_signature
     from celery.result import result_from_tuple
 
     class Group(app.Task):
         app = _app
         name = 'celery.group'
-        accept_magic_kwargs = False
         _decorated = True
 
-        def run(self, tasks, result, group_id, partial_args):
+        def run(self, tasks, result, group_id, partial_args,
+                add_to_parent=True):
             app = self.app
             result = result_from_tuple(result, app)
             # any partial args are added to all tasks in the group
-            taskit = (signature(task, app=app).clone(partial_args)
+            taskit = (maybe_signature(task, app=app).clone(partial_args)
                       for i, task in enumerate(tasks))
-            if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER:
-                return app.GroupResult(
-                    result.id,
-                    [stask.apply(group_id=group_id) for stask in taskit],
-                )
             with app.producer_or_acquire() as pub:
-                [stask.apply_async(group_id=group_id, publisher=pub,
+                [stask.apply_async(group_id=group_id, producer=pub,
                                    add_to_parent=False) for stask in taskit]
             parent = get_current_worker_task()
-            if parent:
+            if add_to_parent and parent:
                 parent.add_trail(result)
             return result
-
-        def prepare(self, options, tasks, args, **kwargs):
-            options['group_id'] = group_id = (
-                options.setdefault('task_id', uuid()))
-
-            def prepare_member(task):
-                task = maybe_signature(task, app=self.app)
-                task.options['group_id'] = group_id
-                return task, task.freeze()
-
-            try:
-                tasks, res = list(zip(
-                    *[prepare_member(task) for task in tasks]
-                ))
-            except ValueError:  # tasks empty
-                tasks, res = [], []
-            return (tasks, self.app.GroupResult(group_id, res), group_id, args)
-
-        def apply_async(self, partial_args=(), kwargs={}, **options):
-            if self.app.conf.CELERY_ALWAYS_EAGER:
-                return self.apply(partial_args, kwargs, **options)
-            tasks, result, gid, args = self.prepare(
-                options, args=partial_args, **kwargs
-            )
-            super(Group, self).apply_async((
-                list(tasks), result.as_tuple(), gid, args), **options
-            )
-            return result
-
-        def apply(self, args=(), kwargs={}, **options):
-            return super(Group, self).apply(
-                self.prepare(options, args=args, **kwargs),
-                **options).get()
     return Group
 
 
-@shared_task
+@connect_on_app_finalize
 def add_chain_task(app):
-    from celery.canvas import (
-        Signature, chain, chord, group, maybe_signature, maybe_unroll_group,
-    )
-
+    """No longer used, but here for backwards compatibility."""
     _app = app
 
     class Chain(app.Task):
         app = _app
         name = 'celery.chain'
-        accept_magic_kwargs = False
         _decorated = True
 
-        def prepare_steps(self, args, tasks):
-            app = self.app
-            steps = deque(tasks)
-            next_step = prev_task = prev_res = None
-            tasks, results = [], []
-            i = 0
-            while steps:
-                # First task get partial args from chain.
-                task = maybe_signature(steps.popleft(), app=app)
-                task = task.clone() if i else task.clone(args)
-                res = task.freeze()
-                i += 1
-
-                if isinstance(task, group):
-                    task = maybe_unroll_group(task)
-                if isinstance(task, chain):
-                    # splice the chain
-                    steps.extendleft(reversed(task.tasks))
-                    continue
-
-                elif isinstance(task, group) and steps and \
-                        not isinstance(steps[0], group):
-                    # automatically upgrade group(..) | s to chord(group, s)
-                    try:
-                        next_step = steps.popleft()
-                        # for chords we freeze by pretending it's a normal
-                        # task instead of a group.
-                        res = Signature.freeze(next_step)
-                        task = chord(task, body=next_step, task_id=res.task_id)
-                    except IndexError:
-                        pass  # no callback, so keep as group
-                if prev_task:
-                    # link previous task to this task.
-                    prev_task.link(task)
-                    # set the results parent attribute.
-                    if not res.parent:
-                        res.parent = prev_res
-
-                if not isinstance(prev_task, chord):
-                    results.append(res)
-                    tasks.append(task)
-                prev_task, prev_res = task, res
-
-            return tasks, results
-
-        def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
-                        task_id=None, link=None, link_error=None, **options):
-            if self.app.conf.CELERY_ALWAYS_EAGER:
-                return self.apply(args, kwargs, **options)
-            options.pop('publisher', None)
-            tasks, results = self.prepare_steps(args, kwargs['tasks'])
-            result = results[-1]
-            if group_id:
-                tasks[-1].set(group_id=group_id)
-            if chord:
-                tasks[-1].set(chord=chord)
-            if task_id:
-                tasks[-1].set(task_id=task_id)
-                result = tasks[-1].type.AsyncResult(task_id)
-            # make sure we can do a link() and link_error() on a chain object.
-            if link:
-                tasks[-1].set(link=link)
-            # and if any task in the chain fails, call the errbacks
-            if link_error:
-                for task in tasks:
-                    task.set(link_error=link_error)
-            tasks[0].apply_async(**options)
-            return result
-
-        def apply(self, args=(), kwargs={}, signature=maybe_signature,
-                  **options):
-            app = self.app
-            last, fargs = None, args  # fargs passed to first task only
-            for task in kwargs['tasks']:
-                res = signature(task, app=app).clone(fargs).apply(
-                    last and (last.get(), ),
-                )
-                res.parent, last, fargs = last, res, None
-            return last
     return Chain
 
 
-@shared_task
+@connect_on_app_finalize
 def add_chord_task(app):
-    """Every chord is executed in a dedicated task, so that the chord
-    can be used as a signature, and this generates the task
-    responsible for that."""
-    from celery import group
+    """No longer used, but here for backwards compatibility."""
+    from celery import group, chord as _chord
     from celery.canvas import maybe_signature
     _app = app
-    default_propagate = app.conf.CELERY_CHORD_PROPAGATES
 
     class Chord(app.Task):
         app = _app
         name = 'celery.chord'
-        accept_magic_kwargs = False
         ignore_result = False
         _decorated = True
 
@@ -338,65 +193,13 @@ def add_chord_task(app):
                 countdown=1, max_retries=None, propagate=None,
                 eager=False, **kwargs):
             app = self.app
-            propagate = default_propagate if propagate is None else propagate
-            group_id = uuid()
-            AsyncResult = app.AsyncResult
-            prepare_member = self._prepare_member
-
             # - convert back to group if serialized
             tasks = header.tasks if isinstance(header, group) else header
             header = group([
-                maybe_signature(s, app=app).clone() for s in tasks
-            ])
-            # - eager applies the group inline
-            if eager:
-                return header.apply(args=partial_args, task_id=group_id)
-
-            results = [AsyncResult(prepare_member(task, body, group_id))
-                       for task in header.tasks]
-
-            return self.backend.apply_chord(
-                header, partial_args, group_id,
-                body, interval=interval, countdown=countdown,
-                max_retries=max_retries, propagate=propagate, result=results,
-            )
-
-        def _prepare_member(self, task, body, group_id):
-            opts = task.options
-            # d.setdefault would work but generating uuid's are expensive
-            try:
-                task_id = opts['task_id']
-            except KeyError:
-                task_id = opts['task_id'] = uuid()
-            opts.update(chord=body, group_id=group_id)
-            return task_id
-
-        def apply_async(self, args=(), kwargs={}, task_id=None,
-                        group_id=None, chord=None, **options):
-            app = self.app
-            if app.conf.CELERY_ALWAYS_EAGER:
-                return self.apply(args, kwargs, **options)
-            header = kwargs.pop('header')
-            body = kwargs.pop('body')
-            header, body = (maybe_signature(header, app=app),
-                            maybe_signature(body, app=app))
-            # forward certain options to body
-            if chord is not None:
-                body.options['chord'] = chord
-            if group_id is not None:
-                body.options['group_id'] = group_id
-            [body.link(s) for s in options.pop('link', [])]
-            [body.link_error(s) for s in options.pop('link_error', [])]
-            body_result = body.freeze(task_id)
-            parent = super(Chord, self).apply_async((header, body, args),
-                                                    kwargs, **options)
-            body_result.parent = parent
-            return body_result
-
-        def apply(self, args=(), kwargs={}, propagate=True, **options):
-            body = kwargs['body']
-            res = super(Chord, self).apply(args, dict(kwargs, eager=True),
-                                           **options)
-            return maybe_signature(body, app=self.app).apply(
-                args=(res.get(propagate=propagate).get(), ))
+                maybe_signature(s, app=app) for s in tasks
+            ], app=self.app)
+            body = maybe_signature(body, app=app)
+            ch = _chord(header, body)
+            return ch.run(header, body, partial_args, app, interval,
+                          countdown, max_retries, propagate, **kwargs)
     return Chord

+ 11 - 11
celery/app/control.py

@@ -15,26 +15,27 @@ from kombu.pidbox import Mailbox
 from kombu.utils import cached_property
 
 from celery.exceptions import DuplicateNodenameWarning
+from celery.utils.text import pluralize
 
 __all__ = ['Inspect', 'Control', 'flatten_reply']
 
 W_DUPNODE = """\
-Received multiple replies from node name {0!r}.
+Received multiple replies from node name: {0!r}.
 Please make sure you give each node a unique nodename using the `-n` option.\
 """
 
 
 def flatten_reply(reply):
-    nodes = {}
-    seen = set()
+    nodes, dupes = {}, set()
     for item in reply:
-        dup = next((nodename in seen for nodename in item), None)
-        if dup:
-            warnings.warn(DuplicateNodenameWarning(
-                W_DUPNODE.format(dup),
-            ))
-        seen.update(item)
+        [dupes.add(name) for name in item if name in nodes]
         nodes.update(item)
+    if dupes:
+        warnings.warn(DuplicateNodenameWarning(
+            W_DUPNODE.format(
+                pluralize(len(dupes), 'name'), ', '.join(sorted(dupes)),
+            ),
+        ))
     return nodes
 
 
@@ -125,8 +126,7 @@ class Control(object):
 
     def __init__(self, app=None):
         self.app = app
-        self.mailbox = self.Mailbox('celery', type='fanout',
-                                    accept=self.app.conf.CELERY_ACCEPT_CONTENT)
+        self.mailbox = self.Mailbox('celery', type='fanout', accept=['json'])
 
     @cached_property
     def inspect(self):

+ 4 - 2
celery/app/defaults.py

@@ -124,7 +124,7 @@ NAMESPACES = {
         'IMPORTS': Option((), type='tuple'),
         'INCLUDE': Option((), type='tuple'),
         'IGNORE_RESULT': Option(False, type='bool'),
-        'MAX_CACHED_RESULTS': Option(5000, type='int'),
+        'MAX_CACHED_RESULTS': Option(100, type='int'),
         'MESSAGE_COMPRESSION': Option(type='string'),
         'MONGODB_BACKEND_SETTINGS': Option(type='dict'),
         'REDIS_HOST': Option(type='string', **_REDIS_OLD),
@@ -146,6 +146,7 @@ NAMESPACES = {
         'SEND_TASK_ERROR_EMAILS': Option(False, type='bool'),
         'SEND_TASK_SENT_EVENT': Option(False, type='bool'),
         'STORE_ERRORS_EVEN_IF_IGNORED': Option(False, type='bool'),
+        'TASK_PROTOCOL': Option(1, type='int'),
         'TASK_PUBLISH_RETRY': Option(True, type='bool'),
         'TASK_PUBLISH_RETRY_POLICY': Option({
             'max_retries': 3,
@@ -196,6 +197,7 @@ NAMESPACES = {
         'SCHEDULE': Option({}, type='dict'),
         'SCHEDULER': Option('celery.beat:PersistentScheduler'),
         'SCHEDULE_FILENAME': Option('celerybeat-schedule'),
+        'SYNC_EVERY': Option(0, type='int'),
         'MAX_LOOP_INTERVAL': Option(0, type='float'),
         'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0',
                             alt='--loglevel argument'),
@@ -232,7 +234,7 @@ def flatten(d, ns=''):
                 stack.append((name + key + '_', value))
             else:
                 yield name + key, value
-DEFAULTS = dict((key, value.default) for key, value in flatten(NAMESPACES))
+DEFAULTS = {key: value.default for key, value in flatten(NAMESPACES)}
 
 
 def find_deprecated_settings(source):

+ 12 - 7
celery/app/log.py

@@ -24,7 +24,7 @@ from kombu.utils.encoding import set_default_encoding_file
 from celery import signals
 from celery._state import get_current_task
 from celery.five import class_property, string_t
-from celery.utils import isatty
+from celery.utils import isatty, node_format
 from celery.utils.log import (
     get_logger, mlevel,
     ColorFormatter, ensure_process_aware_logger,
@@ -65,9 +65,9 @@ class Logging(object):
         self.colorize = self.app.conf.CELERYD_LOG_COLOR
 
     def setup(self, loglevel=None, logfile=None, redirect_stdouts=False,
-              redirect_level='WARNING', colorize=None):
+              redirect_level='WARNING', colorize=None, hostname=None):
         handled = self.setup_logging_subsystem(
-            loglevel, logfile, colorize=colorize,
+            loglevel, logfile, colorize=colorize, hostname=hostname,
         )
         if not handled:
             if redirect_stdouts:
@@ -87,10 +87,12 @@ class Logging(object):
             CELERY_LOG_REDIRECT_LEVEL=str(loglevel or ''),
         )
 
-    def setup_logging_subsystem(self, loglevel=None, logfile=None,
-                                format=None, colorize=None, **kwargs):
+    def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None,
+                                colorize=None, hostname=None, **kwargs):
         if self.already_setup:
             return
+        if logfile and hostname:
+            logfile = node_format(logfile, hostname)
         self.already_setup = True
         loglevel = mlevel(loglevel or self.loglevel)
         format = format or self.format
@@ -107,6 +109,9 @@ class Logging(object):
 
             if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
                 root.handlers = []
+                get_logger('celery').handlers = []
+                get_logger('celery.task').handlers = []
+                get_logger('celery.redirected').handlers = []
 
             # Configure root logger
             self._configure_logger(
@@ -228,8 +233,8 @@ class Logging(object):
         return WatchedFileHandler(logfile)
 
     def _has_handler(self, logger):
-        return (logger.handlers and
-                not isinstance(logger.handlers[0], NullHandler))
+        if logger.handlers:
+            return any(not isinstance(h, NullHandler) for h in logger.handlers)
 
     def _is_configured(self, logger):
         return self._has_handler(logger) and not getattr(

+ 2 - 2
celery/app/registry.py

@@ -57,8 +57,8 @@ class TaskRegistry(dict):
         return self.filter_types('periodic')
 
     def filter_types(self, type):
-        return dict((name, task) for name, task in items(self)
-                    if getattr(task, 'type', 'regular') == type)
+        return {name: task for name, task in items(self)
+                if getattr(task, 'type', 'regular') == type}
 
 
 def _unpickle_task(name):

+ 50 - 42
celery/app/task.py

@@ -20,7 +20,7 @@ from celery.exceptions import MaxRetriesExceededError, Reject, Retry
 from celery.five import class_property, items, with_metaclass
 from celery.local import Proxy
 from celery.result import EagerResult
-from celery.utils import gen_task_name, fun_takes_kwargs, uuid, maybe_reraise
+from celery.utils import gen_task_name, uuid, maybe_reraise
 from celery.utils.functional import mattrgetter, maybe_list
 from celery.utils.imports import instantiate
 from celery.utils.mail import ErrorMail
@@ -93,6 +93,8 @@ class Context(object):
     headers = None
     delivery_info = None
     reply_to = None
+    root_id = None
+    parent_id = None
     correlation_id = None
     taskset = None   # compat alias to group
     group = None
@@ -176,14 +178,14 @@ class TaskType(type):
             # Hairy stuff,  here to be compatible with 2.x.
             # People should not use non-abstract task classes anymore,
             # use the task decorator.
-            from celery.app.builtins import shared_task
+            from celery._state import connect_on_app_finalize
             unique_name = '.'.join([task_module, name])
             if unique_name not in cls._creation_count:
                 # the creation count is used as a safety
                 # so that the same task is not added recursively
                 # to the set of constructors.
                 cls._creation_count[unique_name] = 1
-                shared_task(_CompatShared(
+                connect_on_app_finalize(_CompatShared(
                     unique_name,
                     lambda app: TaskType.__new__(cls, name, bases,
                                                  dict(attrs, _app=app)),
@@ -235,10 +237,6 @@ class Task(object):
     #: If :const:`True` the task is an abstract base class.
     abstract = True
 
-    #: If disabled the worker will not forward magic keyword arguments.
-    #: Deprecated and scheduled for removal in v4.0.
-    accept_magic_kwargs = False
-
     #: Maximum number of retries before giving up.  If set to :const:`None`,
     #: it will **never** stop retrying.
     max_retries = 3
@@ -313,7 +311,7 @@ class Task(object):
     #: :setting:`CELERY_ACKS_LATE` setting.
     acks_late = None
 
-    #: List/tuple of expected exceptions.
+    #: Tuple of expected exceptions.
     #:
     #: These are errors that are expected in normal operation
     #: and that should not be regarded as a real error by the worker.
@@ -343,6 +341,11 @@ class Task(object):
             'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'),
     )
 
+    #: ignored
+    accept_magic_kwargs = False
+
+    _backend = None  # set by backend property.
+
     __bound__ = False
 
     # - Tasks are lazily bound, so that configuration is not set
@@ -358,9 +361,6 @@ class Task(object):
         for attr_name, config_name in self.from_config:
             if getattr(self, attr_name, None) is None:
                 setattr(self, attr_name, conf[config_name])
-        if self.accept_magic_kwargs is None:
-            self.accept_magic_kwargs = app.accept_magic_kwargs
-        self.backend = app.backend
 
         # decorate with annotations from config.
         if not was_bound:
@@ -524,7 +524,7 @@ class Task(object):
         :keyword link_error: A single, or a list of tasks to apply
                       if an error occurs while executing the task.
 
-        :keyword producer: :class:~@amqp.TaskProducer` instance to use.
+        :keyword producer: :class:~@kombu.Producer` instance to use.
         :keyword add_to_parent: If set to True (default) and the task
             is applied while executing another task, then the result
             will be appended to the parent tasks ``request.children``
@@ -554,13 +554,13 @@ class Task(object):
             **dict(self._get_exec_options(), **options)
         )
 
-    def subtask_from_request(self, request=None, args=None, kwargs=None,
-                             **extra_options):
+    def signature_from_request(self, request=None, args=None, kwargs=None,
+                               queue=None, **extra_options):
         request = self.request if request is None else request
         args = request.args if args is None else args
         kwargs = request.kwargs if kwargs is None else kwargs
         limit_hard, limit_soft = request.timelimit or (None, None)
-        options = dict({
+        options = {
             'task_id': request.id,
             'link': request.callbacks,
             'link_error': request.errbacks,
@@ -568,8 +568,14 @@ class Task(object):
             'chord': request.chord,
             'soft_time_limit': limit_soft,
             'time_limit': limit_hard,
-        }, **request.delivery_info or {})
-        return self.subtask(args, kwargs, options, type=self, **extra_options)
+        }
+        options.update(
+            {'queue': queue} if queue else (request.delivery_info or {})
+        )
+        return self.signature(
+            args, kwargs, options, type=self, **extra_options
+        )
+    subtask_from_request = signature_from_request
 
     def retry(self, args=None, kwargs=None, exc=None, throw=True,
               eta=None, countdown=None, max_retries=None, **options):
@@ -643,7 +649,7 @@ class Task(object):
             countdown = self.default_retry_delay
 
         is_eager = request.is_eager
-        S = self.subtask_from_request(
+        S = self.signature_from_request(
             request, args, kwargs,
             countdown=countdown, eta=eta, retries=retries,
             **options
@@ -686,7 +692,7 @@ class Task(object):
 
         """
         # trace imports Task, so need to import inline.
-        from celery.app.trace import eager_trace_task
+        from celery.app.trace import build_tracer
 
         app = self._get_app()
         args = args or ()
@@ -709,28 +715,18 @@ class Task(object):
                    'loglevel': options.get('loglevel', 0),
                    'callbacks': maybe_list(link),
                    'errbacks': maybe_list(link_error),
+                   'headers': options.get('headers'),
                    'delivery_info': {'is_eager': True}}
-        if self.accept_magic_kwargs:
-            default_kwargs = {'task_name': task.name,
-                              'task_id': task_id,
-                              'task_retries': retries,
-                              'task_is_eager': True,
-                              'logfile': options.get('logfile'),
-                              'loglevel': options.get('loglevel', 0),
-                              'delivery_info': {'is_eager': True}}
-            supported_keys = fun_takes_kwargs(task.run, default_kwargs)
-            extend_with = dict((key, val)
-                               for key, val in items(default_kwargs)
-                               if key in supported_keys)
-            kwargs.update(extend_with)
-
         tb = None
-        retval, info = eager_trace_task(task, task_id, args, kwargs,
-                                        app=self._get_app(),
-                                        request=request, propagate=throw)
+        tracer = build_tracer(
+            task.name, task, eager=True,
+            propagate=throw, app=self._get_app(),
+        )
+        ret = tracer(task_id, args, kwargs, request)
+        retval = ret.retval
         if isinstance(retval, ExceptionInfo):
             retval, tb = retval.exception, retval.traceback
-        state = states.SUCCESS if info is None else info.state
+        state = states.SUCCESS if ret.info is None else ret.info.state
         return EagerResult(task_id, retval, state, traceback=tb)
 
     def AsyncResult(self, task_id, **kwargs):
@@ -742,20 +738,21 @@ class Task(object):
         return self._get_app().AsyncResult(task_id, backend=self.backend,
                                            task_name=self.name, **kwargs)
 
-    def subtask(self, args=None, *starargs, **starkwargs):
+    def signature(self, args=None, *starargs, **starkwargs):
         """Return :class:`~celery.signature` object for
         this task, wrapping arguments and execution options
         for a single task invocation."""
         starkwargs.setdefault('app', self.app)
         return signature(self, args, *starargs, **starkwargs)
+    subtask = signature
 
     def s(self, *args, **kwargs):
-        """``.s(*a, **k) -> .subtask(a, k)``"""
-        return self.subtask(args, kwargs)
+        """``.s(*a, **k) -> .signature(a, k)``"""
+        return self.signature(args, kwargs)
 
     def si(self, *args, **kwargs):
-        """``.si(*a, **k) -> .subtask(a, k, immutable=True)``"""
-        return self.subtask(args, kwargs, immutable=True)
+        """``.si(*a, **k) -> .signature(a, k, immutable=True)``"""
+        return self.signature(args, kwargs, immutable=True)
 
     def chunks(self, it, n):
         """Creates a :class:`~celery.canvas.chunks` task for this task."""
@@ -899,6 +896,17 @@ class Task(object):
             self._exec_options = extract_exec_options(self)
         return self._exec_options
 
+    @property
+    def backend(self):
+        backend = self._backend
+        if backend is None:
+            return self.app.backend
+        return backend
+
+    @backend.setter
+    def backend(self, value):  # noqa
+        self._backend = value
+
     @property
     def __name__(self):
         return self.__class__.__name__

+ 210 - 39
celery/app/trace.py

@@ -15,33 +15,84 @@ from __future__ import absolute_import
 # but in the end it only resulted in bad performance and horrible tracebacks,
 # so instead we now use one closure per task class.
 
+import logging
 import os
 import socket
 import sys
 
+from collections import namedtuple
 from warnings import warn
 
 from billiard.einfo import ExceptionInfo
 from kombu.exceptions import EncodeError
-from kombu.utils import kwdict
+from kombu.serialization import loads as loads_message, prepare_accept_content
+from kombu.utils.encoding import safe_repr, safe_str
 
-from celery import current_app
+from celery import current_app, group
 from celery import states, signals
 from celery._state import _task_stack
 from celery.app import set_default_app
 from celery.app.task import Task as BaseTask, Context
-from celery.exceptions import Ignore, Reject, Retry
+from celery.exceptions import Ignore, Reject, Retry, InvalidTaskError
+from celery.five import monotonic
 from celery.utils.log import get_logger
 from celery.utils.objects import mro_lookup
 from celery.utils.serialization import (
-    get_pickleable_exception,
-    get_pickleable_etype,
+    get_pickleable_exception, get_pickled_exception, get_pickleable_etype,
 )
+from celery.utils.text import truncate
 
-__all__ = ['TraceInfo', 'build_tracer', 'trace_task', 'eager_trace_task',
+__all__ = ['TraceInfo', 'build_tracer', 'trace_task',
            'setup_worker_optimizations', 'reset_worker_optimizations']
 
-_logger = get_logger(__name__)
+logger = get_logger(__name__)
+info = logger.info
+
+#: Format string used to log task success.
+LOG_SUCCESS = """\
+Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s\
+"""
+
+#: Format string used to log task failure.
+LOG_FAILURE = """\
+Task %(name)s[%(id)s] %(description)s: %(exc)s\
+"""
+
+#: Format string used to log task internal error.
+LOG_INTERNAL_ERROR = """\
+Task %(name)s[%(id)s] %(description)s: %(exc)s\
+"""
+
+#: Format string used to log task ignored.
+LOG_IGNORED = """\
+Task %(name)s[%(id)s] %(description)s\
+"""
+
+#: Format string used to log task rejected.
+LOG_REJECTED = """\
+Task %(name)s[%(id)s] %(exc)s\
+"""
+
+#: Format string used to log task retry.
+LOG_RETRY = """\
+Task %(name)s[%(id)s] retry: %(exc)s\
+"""
+
+log_policy_t = namedtuple(
+    'log_policy_t', ('format', 'description', 'severity', 'traceback', 'mail'),
+)
+
+log_policy_reject = log_policy_t(LOG_REJECTED, 'rejected', logging.WARN, 1, 1)
+log_policy_ignore = log_policy_t(LOG_IGNORED, 'ignored', logging.INFO, 0, 0)
+log_policy_internal = log_policy_t(
+    LOG_INTERNAL_ERROR, 'INTERNAL ERROR', logging.CRITICAL, 1, 1,
+)
+log_policy_expected = log_policy_t(
+    LOG_FAILURE, 'raised expected', logging.INFO, 0, 0,
+)
+log_policy_unexpected = log_policy_t(
+    LOG_FAILURE, 'raised unexpected', logging.ERROR, 1, 1,
+)
 
 send_prerun = signals.task_prerun.send
 send_postrun = signals.task_postrun.send
@@ -56,9 +107,11 @@ EXCEPTION_STATES = states.EXCEPTION_STATES
 IGNORE_STATES = frozenset([IGNORED, RETRY, REJECTED])
 
 #: set by :func:`setup_worker_optimizations`
-_tasks = None
+_localized = []
 _patched = {}
 
+trace_ok_t = namedtuple('trace_ok_t', ('retval', 'info', 'runtime', 'retstr'))
+
 
 def task_has_custom(task, attr):
     """Return true if the task or one of its bases
@@ -67,6 +120,19 @@ def task_has_custom(task, attr):
                       monkey_patched=['celery.app.task'])
 
 
+def get_log_policy(task, einfo, exc):
+    if isinstance(exc, Reject):
+        return log_policy_reject
+    elif isinstance(exc, Ignore):
+        return log_policy_ignore
+    elif einfo.internal:
+        return log_policy_internal
+    else:
+        if task.throws and isinstance(exc, task.throws):
+            return log_policy_expected
+        return log_policy_unexpected
+
+
 class TraceInfo(object):
     __slots__ = ('state', 'retval')
 
@@ -84,6 +150,12 @@ class TraceInfo(object):
             FAILURE: self.handle_failure,
         }[self.state](task, store_errors=store_errors)
 
+    def handle_reject(self, task, **kwargs):
+        self._log_error(task, ExceptionInfo())
+
+    def handle_ignore(self, task, **kwargs):
+        self._log_error(task, ExceptionInfo())
+
     def handle_retry(self, task, store_errors=True):
         """Handle retry exception."""
         # the exception raised is the Retry semi-predicate,
@@ -100,6 +172,10 @@ class TraceInfo(object):
             task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo)
             signals.task_retry.send(sender=task, request=req,
                                     reason=reason, einfo=einfo)
+            info(LOG_RETRY, {
+                'id': req.id, 'name': task.name,
+                'exc': safe_repr(reason.exc),
+            })
             return einfo
         finally:
             del(tb)
@@ -123,14 +199,47 @@ class TraceInfo(object):
                                       kwargs=req.kwargs,
                                       traceback=tb,
                                       einfo=einfo)
+            self._log_error(task, einfo)
             return einfo
         finally:
             del(tb)
 
+    def _log_error(self, task, einfo):
+        req = task.request
+        eobj = einfo.exception = get_pickled_exception(einfo.exception)
+        exception, traceback, exc_info, sargs, skwargs = (
+            safe_repr(eobj),
+            safe_str(einfo.traceback),
+            einfo.exc_info,
+            safe_repr(req.args),
+            safe_repr(req.kwargs),
+        )
+        policy = get_log_policy(task, einfo, eobj)
+
+        context = {
+            'hostname': req.hostname,
+            'id': req.id,
+            'name': task.name,
+            'exc': exception,
+            'traceback': traceback,
+            'args': sargs,
+            'kwargs': skwargs,
+            'description': policy.description,
+            'internal': einfo.internal,
+        }
+
+        logger.log(policy.severity, policy.format.strip(), context,
+                   exc_info=exc_info if policy.traceback else None,
+                   extra={'data': context})
+
+        if policy.mail:
+            task.send_error_email(context, einfo.exception)
+
 
 def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                  Info=TraceInfo, eager=False, propagate=False, app=None,
-                 IGNORE_STATES=IGNORE_STATES):
+                 monotonic=monotonic, truncate=truncate,
+                 trace_ok_t=trace_ok_t, IGNORE_STATES=IGNORE_STATES):
     """Return a function that traces task execution; catches all
     exceptions and updates result backend with the state and result
 
@@ -186,6 +295,7 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
     push_task = _task_stack.push
     pop_task = _task_stack.pop
     on_chord_part_return = backend.on_chord_part_return
+    _does_info = logger.isEnabledFor(logging.INFO)
 
     prerun_receivers = signals.task_prerun.receivers
     postrun_receivers = signals.task_postrun.receivers
@@ -200,13 +310,17 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
         I = Info(state, exc)
         R = I.handle_error_state(task, eager=eager)
         if call_errbacks:
-            [signature(errback, app=app).apply_async((uuid, ))
-             for errback in request.errbacks or []]
+            group(
+                [signature(errback, app=app)
+                 for errback in request.errbacks or []], app=app,
+            ).apply_async((uuid, ))
         return I, R, I.state, I.retval
 
     def trace_task(uuid, args, kwargs, request=None):
         # R      - is the possibly prepared return value.
         # I      - is the Info object.
+        # T      - runtime
+        # Rstr   - textual representation of return value
         # retval - is the always unmodified return value.
         # state  - is the resulting task state.
 
@@ -214,9 +328,14 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
         # for performance reasons, and because the function is so long
         # we want the main variables (I, and R) to stand out visually from the
         # the rest of the variables, so breaking PEP8 is worth it ;)
-        R = I = retval = state = None
-        kwargs = kwdict(kwargs)
+        R = I = T = Rstr = retval = state = None
+        time_start = monotonic()
         try:
+            try:
+                kwargs.items
+            except AttributeError:
+                raise InvalidTaskError(
+                    'Task keyword arguments is not a mapping')
             push_task(task)
             task_request = Context(request or {}, args=args,
                                    called_directly=False, kwargs=kwargs)
@@ -240,9 +359,11 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                 except Reject as exc:
                     I, R = Info(REJECTED, exc), ExceptionInfo(internal=True)
                     state, retval = I.state, I.retval
+                    I.handle_reject(task)
                 except Ignore as exc:
                     I, R = Info(IGNORED, exc), ExceptionInfo(internal=True)
                     state, retval = I.state, I.retval
+                    I.handle_ignore(task)
                 except Retry as exc:
                     I, R, state, retval = on_error(
                         task_request, exc, uuid, RETRY, call_errbacks=False,
@@ -255,8 +376,27 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                     try:
                         # callback tasks must be applied before the result is
                         # stored, so that result.children is populated.
-                        [signature(callback, app=app).apply_async((retval, ))
-                            for callback in task_request.callbacks or []]
+
+                        # groups are called inline and will store trail
+                        # separately, so need to call them separately
+                        # so that the trail's not added multiple times :(
+                        # (Issue #1936)
+                        callbacks = task.request.callbacks
+                        if callbacks:
+                            if len(task.request.callbacks) > 1:
+                                sigs, groups = [], []
+                                for sig in callbacks:
+                                    sig = signature(sig, app=app)
+                                    if isinstance(sig, group):
+                                        groups.append(sig)
+                                    else:
+                                        sigs.append(sig)
+                                for group_ in groups:
+                                    group.apply_async((retval, ))
+                                if sigs:
+                                    group(sigs).apply_async(retval, )
+                            else:
+                                signature(callbacks[0], app=app).delay(retval)
                         if publish_result:
                             store_result(
                                 uuid, retval, SUCCESS, request=task_request,
@@ -268,11 +408,18 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                             task_on_success(retval, uuid, args, kwargs)
                         if success_receivers:
                             send_success(sender=task, result=retval)
+                        if _does_info:
+                            T = monotonic() - time_start
+                            Rstr = truncate(safe_repr(R), 256)
+                            info(LOG_SUCCESS, {
+                                'id': uuid, 'name': name,
+                                'return_value': Rstr, 'runtime': T,
+                            })
 
                 # -* POST *-
                 if state not in IGNORE_STATES:
                     if task_request.chord:
-                        on_chord_part_return(task)
+                        on_chord_part_return(task, state, R)
                     if task_after_return:
                         task_after_return(
                             state, retval, uuid, args, kwargs, None,
@@ -293,15 +440,15 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                         except (KeyboardInterrupt, SystemExit, MemoryError):
                             raise
                         except Exception as exc:
-                            _logger.error('Process cleanup failed: %r', exc,
-                                          exc_info=True)
+                            logger.error('Process cleanup failed: %r', exc,
+                                         exc_info=True)
         except MemoryError:
             raise
         except Exception as exc:
             if eager:
                 raise
             R = report_internal_error(task, exc)
-        return R, I
+        return trace_ok_t(R, I, T, Rstr)
 
     return trace_task
 
@@ -310,33 +457,55 @@ def trace_task(task, uuid, args, kwargs, request={}, **opts):
     try:
         if task.__trace__ is None:
             task.__trace__ = build_tracer(task.name, task, **opts)
-        return task.__trace__(uuid, args, kwargs, request)[0]
+        return task.__trace__(uuid, args, kwargs, request)
     except Exception as exc:
         return report_internal_error(task, exc)
 
 
-def _trace_task_ret(name, uuid, args, kwargs, request={}, app=None, **opts):
-    return trace_task((app or current_app).tasks[name],
-                      uuid, args, kwargs, request, app=app, **opts)
+def _trace_task_ret(name, uuid, request, body, content_type,
+                    content_encoding, loads=loads_message, app=None,
+                    **extra_request):
+    app = app or current_app._get_current_object()
+    accept = prepare_accept_content(app.conf.CELERY_ACCEPT_CONTENT)
+    args, kwargs = loads(body, content_type, content_encoding, accept=accept)
+    request.update(args=args, kwargs=kwargs, **extra_request)
+    R, I, T, Rstr = trace_task(app.tasks[name],
+                               uuid, args, kwargs, request, app=app)
+    return (1, R, T) if I else (0, Rstr, T)
 trace_task_ret = _trace_task_ret
 
 
-def _fast_trace_task(task, uuid, args, kwargs, request={}):
+def _fast_trace_task_v1(task, uuid, args, kwargs, request={}, _loc=_localized):
     # setup_worker_optimizations will point trace_task_ret to here,
     # so this is the function used in the worker.
-    return _tasks[task].__trace__(uuid, args, kwargs, request)[0]
-
-
-def eager_trace_task(task, uuid, args, kwargs, request=None, **opts):
-    opts.setdefault('eager', True)
-    return build_tracer(task.name, task, **opts)(
-        uuid, args, kwargs, request)
+    tasks, _ = _loc
+    R, I, T, Rstr = tasks[task].__trace__(uuid, args, kwargs, request)[0]
+    # exception instance if error, else result text
+    return (1, R, T) if I else (0, Rstr, T)
+
+
+def _fast_trace_task(task, uuid, request, body, content_type,
+                     content_encoding, loads=loads_message, _loc=_localized,
+                     hostname=None, **_):
+    tasks, accept = _loc
+    if content_type:
+        args, kwargs = loads(body, content_type, content_encoding,
+                             accept=accept)
+    else:
+        args, kwargs = body
+    request.update({
+        'args': args, 'kwargs': kwargs, 'hostname': hostname,
+    })
+    R, I, T, Rstr = tasks[task].__trace__(
+        uuid, args, kwargs, request,
+    )
+    return (1, R, T) if I else (0, Rstr, T)
 
 
 def report_internal_error(task, exc):
     _type, _value, _tb = sys.exc_info()
     try:
-        _value = task.backend.prepare_exception(exc)
+        _value = task.backend.prepare_exception(exc, 'pickle')
         exc_info = ExceptionInfo((_type, _value, _tb), internal=True)
         warn(RuntimeWarning(
             'Exception raised outside body: {0!r}:\n{1}'.format(
@@ -347,7 +516,6 @@ def report_internal_error(task, exc):
 
 
 def setup_worker_optimizations(app):
-    global _tasks
     global trace_task_ret
 
     # make sure custom Task.__call__ methods that calls super
@@ -367,12 +535,15 @@ def setup_worker_optimizations(app):
     app.finalize()
 
     # set fast shortcut to task registry
-    _tasks = app._tasks
+    _localized[:] = [
+        app._tasks,
+        prepare_accept_content(app.conf.CELERY_ACCEPT_CONTENT),
+    ]
 
     trace_task_ret = _fast_trace_task
-    from celery.worker import job as job_module
-    job_module.trace_task_ret = _fast_trace_task
-    job_module.__optimize__()
+    from celery.worker import request as request_module
+    request_module.trace_task_ret = _fast_trace_task
+    request_module.__optimize__()
 
 
 def reset_worker_optimizations():
@@ -386,8 +557,8 @@ def reset_worker_optimizations():
         BaseTask.__call__ = _patched.pop('BaseTask.__call__')
     except KeyError:
         pass
-    from celery.worker import job as job_module
-    job_module.trace_task_ret = _trace_task_ret
+    from celery.worker import request as request_module
+    request_module.trace_task_ret = _trace_task_ret
 
 
 def _install_stack_protection():

+ 15 - 10
celery/app/utils.py

@@ -15,6 +15,8 @@ import re
 from collections import Mapping
 from types import ModuleType
 
+from kombu.utils.url import maybe_sanitize_url
+
 from celery.datastructures import ConfigurationView
 from celery.five import items, string_t, values
 from celery.platforms import pyimplementation
@@ -117,11 +119,11 @@ class Settings(ConfigurationView):
 
     def table(self, with_defaults=False, censored=True):
         filt = filter_hidden_settings if censored else lambda v: v
-        return filt(dict(
-            (k, v) for k, v in items(
+        return filt({
+            k: v for k, v in items(
                 self if with_defaults else self.without_defaults())
             if k.isupper() and not k.startswith('_')
-        ))
+        })
 
     def humanize(self, with_defaults=False, censored=True):
         """Return a human readable string showing changes to the
@@ -152,7 +154,6 @@ class AppPickler(object):
         return dict(main=main, loader=loader, backend=backend, amqp=amqp,
                     changes=changes, events=events, log=log, control=control,
                     set_as_current=False,
-                    accept_magic_kwargs=accept_magic_kwargs,
                     config_source=config_source)
 
     def construct(self, cls, **kwargs):
@@ -175,14 +176,18 @@ def filter_hidden_settings(conf):
     def maybe_censor(key, value, mask='*' * 8):
         if isinstance(value, Mapping):
             return filter_hidden_settings(value)
-        if isinstance(value, string_t) and HIDDEN_SETTINGS.search(key):
-            return mask
-        if isinstance(key, string_t) and 'BROKER_URL' in key.upper():
-            from kombu import Connection
-            return Connection(value).as_uri(mask=mask)
+        if isinstance(key, string_t):
+            if HIDDEN_SETTINGS.search(key):
+                return mask
+            elif 'BROKER_URL' in key.upper():
+                from kombu import Connection
+                return Connection(value).as_uri(mask=mask)
+            elif key.upper() in ('CELERY_RESULT_BACKEND', 'CELERY_BACKEND'):
+                return maybe_sanitize_url(value, mask=mask)
+
         return value
 
-    return dict((k, maybe_censor(k, v)) for k, v in items(conf))
+    return {k: maybe_censor(k, v) for k, v in items(conf)}
 
 
 def bugreport(app):

+ 10 - 6
celery/apps/beat.py

@@ -10,12 +10,14 @@
     and so on.
 
 """
-from __future__ import absolute_import, unicode_literals
+from __future__ import absolute_import, print_function, unicode_literals
 
+import numbers
 import socket
 import sys
 
 from celery import VERSION_BANNER, platforms, beat
+from celery.five import text_t
 from celery.utils.imports import qualname
 from celery.utils.log import LOG_LEVELS, get_logger
 from celery.utils.timeutils import humanize_seconds
@@ -66,7 +68,7 @@ class Beat(object):
         )
         self.pidfile = pidfile
 
-        if not isinstance(self.loglevel, int):
+        if not isinstance(self.loglevel, numbers.Integral):
             self.loglevel = LOG_LEVELS[self.loglevel.upper()]
 
     def _getopt(self, key, value):
@@ -97,10 +99,12 @@ class Beat(object):
                             scheduler_cls=self.scheduler_cls,
                             schedule_filename=self.schedule)
 
-        print(str(c.blue('__    ', c.magenta('-'),
-                  c.blue('    ... __   '), c.magenta('-'),
-                  c.blue('        _\n'),
-                  c.reset(self.startup_info(beat)))))
+        print(text_t(   # noqa (pyflakes chokes on print)
+            c.blue('__    ', c.magenta('-'),
+            c.blue('    ... __   '), c.magenta('-'),
+            c.blue('        _\n'),
+            c.reset(self.startup_info(beat))),
+        ))
         self.setup_logging()
         if self.socket_timeout:
             logger.debug('Setting default socket timeout to %r',

+ 15 - 10
celery/apps/worker.py

@@ -22,6 +22,7 @@ from functools import partial
 
 from billiard import current_process
 from kombu.utils.encoding import safe_str
+from kombu.utils.url import maybe_sanitize_url
 
 from celery import VERSION_BANNER, platforms, signals
 from celery.app import trace
@@ -30,7 +31,7 @@ from celery.exceptions import (
 )
 from celery.five import string, string_t
 from celery.loaders.app import AppLoader
-from celery.platforms import check_privileges
+from celery.platforms import EX_FAILURE, EX_OK, check_privileges
 from celery.utils import cry, isatty
 from celery.utils.imports import qualname
 from celery.utils.log import get_logger, in_sighandler, set_in_sighandler
@@ -165,10 +166,10 @@ class Worker(WorkController):
 
         # Dump configuration to screen so we have some basic information
         # for when users sends bug reports.
-        print(''.join([
+        print(safe_str(''.join([
             string(self.colored.cyan(' \n', self.startup_info())),
             string(self.colored.reset(self.extra_info() or '')),
-        ]), file=sys.__stdout__)
+        ])), file=sys.__stdout__)
         self.set_process_status('-active-')
         self.install_platform_tweaks(self)
 
@@ -181,7 +182,7 @@ class Worker(WorkController):
             colorize = not self.no_color
         return self.app.log.setup(
             self.loglevel, self.logfile,
-            redirect_stdouts=False, colorize=colorize,
+            redirect_stdouts=False, colorize=colorize, hostname=self.hostname,
         )
 
     def purge_messages(self):
@@ -227,7 +228,9 @@ class Worker(WorkController):
             hostname=safe_str(self.hostname),
             version=VERSION_BANNER,
             conninfo=self.app.connection().as_uri(),
-            results=self.app.conf.CELERY_RESULT_BACKEND or 'disabled',
+            results=maybe_sanitize_url(
+                self.app.conf.CELERY_RESULT_BACKEND or 'disabled',
+            ),
             concurrency=concurrency,
             platform=safe_str(_platform.platform()),
             events=events,
@@ -277,7 +280,7 @@ class Worker(WorkController):
 
 
 def _shutdown_handler(worker, sig='TERM', how='Warm',
-                      exc=WorkerShutdown, callback=None):
+                      exc=WorkerShutdown, callback=None, exitcode=EX_OK):
 
     def _handle_request(*args):
         with in_sighandler():
@@ -288,9 +291,9 @@ def _shutdown_handler(worker, sig='TERM', how='Warm',
                 safe_say('worker: {0} shutdown (MainProcess)'.format(how))
             if active_thread_count() > 1:
                 setattr(state, {'Warm': 'should_stop',
-                                'Cold': 'should_terminate'}[how], True)
+                                'Cold': 'should_terminate'}[how], exitcode)
             else:
-                raise exc()
+                raise exc(exitcode)
     _handle_request.__name__ = str('worker_{0}'.format(how))
     platforms.signals[sig] = _handle_request
 install_worker_term_handler = partial(
@@ -299,6 +302,7 @@ install_worker_term_handler = partial(
 if not is_jython:  # pragma: no cover
     install_worker_term_hard_handler = partial(
         _shutdown_handler, sig='SIGQUIT', how='Cold', exc=WorkerTerminate,
+        exitcode=EX_FAILURE,
     )
 else:  # pragma: no cover
     install_worker_term_handler = \
@@ -310,7 +314,8 @@ def on_SIGINT(worker):
     install_worker_term_hard_handler(worker, sig='SIGINT')
 if not is_jython:  # pragma: no cover
     install_worker_int_handler = partial(
-        _shutdown_handler, sig='SIGINT', callback=on_SIGINT
+        _shutdown_handler, sig='SIGINT', callback=on_SIGINT,
+        exitcode=EX_FAILURE,
     )
 else:  # pragma: no cover
     install_worker_int_handler = lambda *a, **kw: None
@@ -332,7 +337,7 @@ def install_worker_restart_handler(worker, sig='SIGHUP'):
         import atexit
         atexit.register(_reload_current_worker)
         from celery.worker import state
-        state.should_stop = True
+        state.should_stop = EX_OK
     platforms.signals[sig] = restart_worker_sig_handler
 
 

+ 0 - 2
celery/backends/__init__.py

@@ -16,7 +16,6 @@ from celery.local import Proxy
 from celery._state import current_app
 from celery.five import reraise
 from celery.utils.imports import symbol_by_name
-from celery.utils.functional import memoize
 
 __all__ = ['get_backend_cls', 'get_backend_by_url']
 
@@ -41,7 +40,6 @@ BACKEND_ALIASES = {
 default_backend = Proxy(lambda: current_app.backend)
 
 
-@memoize(100)
 def get_backend_cls(backend=None, loader=None):
     """Get backend class by name/alias"""
     backend = backend or 'disabled'

+ 16 - 8
celery/backends/amqp.py

@@ -141,6 +141,7 @@ class AMQPBackend(BaseBackend):
         return [self._create_binding(task_id)]
 
     def wait_for(self, task_id, timeout=None, cache=True, propagate=True,
+                 no_ack=True, on_interval=None,
                  READY_STATES=states.READY_STATES,
                  PROPAGATE_STATES=states.PROPAGATE_STATES,
                  **kwargs):
@@ -150,7 +151,8 @@ class AMQPBackend(BaseBackend):
             meta = cached_meta
         else:
             try:
-                meta = self.consume(task_id, timeout=timeout)
+                meta = self.consume(task_id, timeout=timeout, no_ack=no_ack,
+                                    on_interval=on_interval)
             except socket.timeout:
                 raise TimeoutError('The operation timed out.')
 
@@ -167,15 +169,18 @@ class AMQPBackend(BaseBackend):
 
             prev = latest = acc = None
             for i in range(backlog_limit):  # spool ffwd
-                prev, latest, acc = latest, acc, binding.get(
+                acc = binding.get(
                     accept=self.accept, no_ack=False,
                 )
                 if not acc:  # no more messages
                     break
+                if acc.payload['task_id'] == task_id:
+                    prev, latest = latest, acc
                 if prev:
                     # backends are not expected to keep history,
                     # so we delete everything except the most recent state.
                     prev.ack()
+                    prev = None
             else:
                 raise self.BacklogLimitExceeded(task_id)
 
@@ -193,7 +198,7 @@ class AMQPBackend(BaseBackend):
     poll = get_task_meta  # XXX compat
 
     def drain_events(self, connection, consumer,
-                     timeout=None, now=monotonic, wait=None):
+                     timeout=None, on_interval=None, now=monotonic, wait=None):
         wait = wait or connection.drain_events
         results = {}
 
@@ -209,27 +214,30 @@ class AMQPBackend(BaseBackend):
             if timeout and now() - time_start >= timeout:
                 raise socket.timeout()
             wait(timeout=timeout)
+            if on_interval:
+                on_interval()
             if results:  # got event on the wanted channel.
                 break
         self._cache.update(results)
         return results
 
-    def consume(self, task_id, timeout=None):
+    def consume(self, task_id, timeout=None, no_ack=True, on_interval=None):
         wait = self.drain_events
         with self.app.pool.acquire_channel(block=True) as (conn, channel):
             binding = self._create_binding(task_id)
             with self.Consumer(channel, binding,
-                               no_ack=True, accept=self.accept) as consumer:
+                               no_ack=no_ack, accept=self.accept) as consumer:
                 while 1:
                     try:
-                        return wait(conn, consumer, timeout)[task_id]
+                        return wait(
+                            conn, consumer, timeout, on_interval)[task_id]
                     except KeyError:
                         continue
 
     def _many_bindings(self, ids):
         return [self._create_binding(task_id) for task_id in ids]
 
-    def get_many(self, task_ids, timeout=None,
+    def get_many(self, task_ids, timeout=None, no_ack=True,
                  now=monotonic, getfields=itemgetter('status', 'task_id'),
                  READY_STATES=states.READY_STATES,
                  PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs):
@@ -263,7 +271,7 @@ class AMQPBackend(BaseBackend):
 
             bindings = self._many_bindings(task_ids)
             with self.Consumer(channel, bindings, on_message=on_message,
-                               accept=self.accept, no_ack=True):
+                               accept=self.accept, no_ack=no_ack):
                 wait = conn.drain_events
                 popleft = results.popleft
                 while ids:

+ 87 - 46
celery/backends/base.py

@@ -33,8 +33,8 @@ from celery.five import items
 from celery.result import (
     GroupResult, ResultBase, allow_join_result, result_from_tuple,
 )
-from celery.utils import timeutils
 from celery.utils.functional import LRUCache
+from celery.utils.log import get_logger
 from celery.utils.serialization import (
     get_pickled_exception,
     get_pickleable_exception,
@@ -46,12 +46,21 @@ __all__ = ['BaseBackend', 'KeyValueStoreBackend', 'DisabledBackend']
 EXCEPTION_ABLE_CODECS = frozenset(['pickle', 'yaml'])
 PY3 = sys.version_info >= (3, 0)
 
+logger = get_logger(__name__)
+
 
 def unpickle_backend(cls, args, kwargs):
     """Return an unpickled backend."""
     return cls(*args, app=current_app._get_current_object(), **kwargs)
 
 
+class _nulldict(dict):
+
+    def ignore(self, *a, **kw):
+        pass
+    __setitem__ = update = setdefault = ignore
+
+
 class BaseBackend(object):
     READY_STATES = states.READY_STATES
     UNREADY_STATES = states.UNREADY_STATES
@@ -90,9 +99,8 @@ class BaseBackend(object):
         (self.content_type,
          self.content_encoding,
          self.encoder) = serializer_registry._encoders[self.serializer]
-        self._cache = LRUCache(
-            limit=max_cached_results or conf.CELERY_MAX_CACHED_RESULTS,
-        )
+        cmax = max_cached_results or conf.CELERY_MAX_CACHED_RESULTS
+        self._cache = _nulldict() if cmax == -1 else LRUCache(limit=cmax)
         self.accept = prepare_accept_content(
             conf.CELERY_ACCEPT_CONTENT if accept is None else accept,
         )
@@ -111,6 +119,21 @@ class BaseBackend(object):
         return self.store_result(task_id, exc, status=states.FAILURE,
                                  traceback=traceback, request=request)
 
+    def chord_error_from_stack(self, callback, exc=None):
+        from celery import group
+        app = self.app
+        backend = app._tasks[callback.task].backend
+        try:
+            group(
+                [app.signature(errback)
+                 for errback in callback.options.get('link_error') or []],
+                app=app,
+            ).apply_async((callback.id, ))
+        except Exception as eb_exc:
+            return backend.fail_from_current_stack(callback.id, exc=eb_exc)
+        else:
+            return backend.fail_from_current_stack(callback.id, exc=exc)
+
     def fail_from_current_stack(self, task_id, exc=None):
         type_, real_exc, tb = sys.exc_info()
         try:
@@ -132,18 +155,21 @@ class BaseBackend(object):
                                  status=states.REVOKED, traceback=None,
                                  request=request)
 
-    def prepare_exception(self, exc):
+    def prepare_exception(self, exc, serializer=None):
         """Prepare exception for serialization."""
-        if self.serializer in EXCEPTION_ABLE_CODECS:
+        serializer = self.serializer if serializer is None else serializer
+        if serializer in EXCEPTION_ABLE_CODECS:
             return get_pickleable_exception(exc)
         return {'exc_type': type(exc).__name__, 'exc_message': str(exc)}
 
     def exception_to_python(self, exc):
         """Convert serialized exception to Python exception."""
+        if not isinstance(exc, BaseException):
+            exc = create_exception_cls(
+                from_utf8(exc['exc_type']), __name__)(exc['exc_message'])
         if self.serializer in EXCEPTION_ABLE_CODECS:
-            return get_pickled_exception(exc)
-        return create_exception_cls(
-            from_utf8(exc['exc_type']), __name__)(exc['exc_message'])
+            exc = get_pickled_exception(exc)
+        return exc
 
     def prepare_value(self, result):
         """Prepare value for storage."""
@@ -162,7 +188,9 @@ class BaseBackend(object):
                      content_encoding=self.content_encoding,
                      accept=self.accept)
 
-    def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5):
+    def wait_for(self, task_id,
+                 timeout=None, propagate=True, interval=0.5, no_ack=True,
+                 on_interval=None):
         """Wait for task and return its result.
 
         If the task raises an exception, this exception
@@ -185,6 +213,8 @@ class BaseBackend(object):
                 if propagate:
                     raise result
                 return result
+            if on_interval:
+                on_interval()
             # avoid hammering the CPU checking status.
             time.sleep(interval)
             time_elapsed += interval
@@ -195,7 +225,7 @@ class BaseBackend(object):
         if value is None:
             value = self.app.conf.CELERY_TASK_RESULT_EXPIRES
         if isinstance(value, timedelta):
-            value = timeutils.timedelta_seconds(value)
+            value = value.total_seconds()
         if value is not None and type:
             return type(value)
         return value
@@ -311,7 +341,7 @@ class BaseBackend(object):
     def on_task_call(self, producer, task_id):
         return {}
 
-    def on_chord_part_return(self, task, propagate=False):
+    def on_chord_part_return(self, task, state, result, propagate=False):
         pass
 
     def fallback_chord_unlock(self, group_id, body, result=None,
@@ -374,17 +404,26 @@ class KeyValueStoreBackend(BaseBackend):
     def expire(self, key, value):
         pass
 
-    def get_key_for_task(self, task_id):
+    def get_key_for_task(self, task_id, key=''):
         """Get the cache key for a task by id."""
-        return self.task_keyprefix + self.key_t(task_id)
+        key_t = self.key_t
+        return key_t('').join([
+            self.task_keyprefix, key_t(task_id), key_t(key),
+        ])
 
-    def get_key_for_group(self, group_id):
+    def get_key_for_group(self, group_id, key=''):
         """Get the cache key for a group by id."""
-        return self.group_keyprefix + self.key_t(group_id)
+        key_t = self.key_t
+        return key_t('').join([
+            self.group_keyprefix, key_t(group_id), key_t(key),
+        ])
 
-    def get_key_for_chord(self, group_id):
+    def get_key_for_chord(self, group_id, key=''):
         """Get the cache key for the chord waiting on group with given id."""
-        return self.chord_keyprefix + self.key_t(group_id)
+        key_t = self.key_t
+        return key_t('').join([
+            self.chord_keyprefix, key_t(group_id), key_t(key),
+        ])
 
     def _strip_prefix(self, key):
         """Takes bytes, emits string."""
@@ -397,16 +436,18 @@ class KeyValueStoreBackend(BaseBackend):
     def _mget_to_results(self, values, keys):
         if hasattr(values, 'items'):
             # client returns dict so mapping preserved.
-            return dict((self._strip_prefix(k), self.decode(v))
-                        for k, v in items(values)
-                        if v is not None)
+            return {
+                self._strip_prefix(k): self.decode(v)
+                for k, v in items(values) if v is not None
+            }
         else:
             # client returns list so need to recreate mapping.
-            return dict((bytes_to_str(keys[i]), self.decode(value))
-                        for i, value in enumerate(values)
-                        if value is not None)
+            return {
+                bytes_to_str(keys[i]): self.decode(value)
+                for i, value in enumerate(values) if value is not None
+            }
 
-    def get_many(self, task_ids, timeout=None, interval=0.5,
+    def get_many(self, task_ids, timeout=None, interval=0.5, no_ack=True,
                  READY_STATES=states.READY_STATES):
         interval = 0.5 if interval is None else interval
         ids = task_ids if isinstance(task_ids, set) else set(task_ids)
@@ -429,7 +470,7 @@ class KeyValueStoreBackend(BaseBackend):
             r = self._mget_to_results(self.mget([self.get_key_for_task(k)
                                                  for k in keys]), keys)
             cache.update(r)
-            ids.difference_update(set(bytes_to_str(v) for v in r))
+            ids.difference_update({bytes_to_str(v) for v in r})
             for key, value in items(r):
                 yield bytes_to_str(key), value
             if timeout and iterations * interval >= timeout:
@@ -479,12 +520,12 @@ class KeyValueStoreBackend(BaseBackend):
         self.save_group(group_id, self.app.GroupResult(group_id, result))
         return header(*partial_args, task_id=group_id)
 
-    def on_chord_part_return(self, task, propagate=None):
+    def on_chord_part_return(self, task, state, result, propagate=None):
         if not self.implements_incr:
             return
         app = self.app
         if propagate is None:
-            propagate = self.app.conf.CELERY_CHORD_PROPAGATES
+            propagate = app.conf.CELERY_CHORD_PROPAGATES
         gid = task.request.group
         if not gid:
             return
@@ -492,26 +533,26 @@ class KeyValueStoreBackend(BaseBackend):
         try:
             deps = GroupResult.restore(gid, backend=task.backend)
         except Exception as exc:
-            callback = maybe_signature(task.request.chord, app=self.app)
-            return app._tasks[callback.task].backend.fail_from_current_stack(
-                callback.id,
-                exc=ChordError('Cannot restore group: {0!r}'.format(exc)),
+            callback = maybe_signature(task.request.chord, app=app)
+            logger.error('Chord %r raised: %r', gid, exc, exc_info=1)
+            return self.chord_error_from_stack(
+                callback,
+                ChordError('Cannot restore group: {0!r}'.format(exc)),
             )
         if deps is None:
             try:
                 raise ValueError(gid)
             except ValueError as exc:
-                callback = maybe_signature(task.request.chord, app=self.app)
-                task = app._tasks[callback.task]
-                return task.backend.fail_from_current_stack(
-                    callback.id,
-                    exc=ChordError('GroupResult {0} no longer exists'.format(
-                        gid,
-                    ))
+                callback = maybe_signature(task.request.chord, app=app)
+                logger.error('Chord callback %r raised: %r', gid, exc,
+                             exc_info=1)
+                return self.chord_error_from_stack(
+                    callback,
+                    ChordError('GroupResult {0} no longer exists'.format(gid)),
                 )
         val = self.incr(key)
         if val >= len(deps):
-            callback = maybe_signature(task.request.chord, app=self.app)
+            callback = maybe_signature(task.request.chord, app=app)
             j = deps.join_native if deps.supports_native_join else deps.join
             try:
                 with allow_join_result():
@@ -525,16 +566,16 @@ class KeyValueStoreBackend(BaseBackend):
                 except StopIteration:
                     reason = repr(exc)
 
-                app._tasks[callback.task].backend.fail_from_current_stack(
-                    callback.id, exc=ChordError(reason),
-                )
+                logger.error('Chord %r raised: %r', gid, reason, exc_info=1)
+                self.chord_error_from_stack(callback, ChordError(reason))
             else:
                 try:
                     callback.delay(ret)
                 except Exception as exc:
-                    app._tasks[callback.task].backend.fail_from_current_stack(
-                        callback.id,
-                        exc=ChordError('Callback error: {0!r}'.format(exc)),
+                    logger.error('Chord %r raised: %r', gid, exc, exc_info=1)
+                    self.chord_error_from_stack(
+                        callback,
+                        ChordError('Callback error: {0!r}'.format(exc)),
                     )
             finally:
                 deps.delete()

+ 1 - 1
celery/backends/cache.py

@@ -73,7 +73,7 @@ class DummyClient(object):
 
     def get_multi(self, keys):
         cache = self.cache
-        return dict((k, cache[k]) for k in keys if k in cache)
+        return {k: cache[k] for k in keys if k in cache}
 
     def set(self, key, value, *args, **kwargs):
         self.cache[key] = value

+ 4 - 5
celery/backends/cassandra.py

@@ -22,7 +22,7 @@ from celery import states
 from celery.exceptions import ImproperlyConfigured
 from celery.five import monotonic
 from celery.utils.log import get_logger
-from celery.utils.timeutils import maybe_timedelta, timedelta_seconds
+from celery.utils.timeutils import maybe_timedelta
 
 from .base import BaseBackend
 
@@ -148,14 +148,13 @@ class CassandraBackend(BaseBackend):
                     'children': self.encode(
                         self.current_task_children(request),
                     )}
+            ttl = self.expires and max(self.expires.total_seconds(), 0)
             if self.detailed_mode:
                 meta['result'] = result
-                cf.insert(task_id, {date_done: self.encode(meta)},
-                          ttl=self.expires and timedelta_seconds(self.expires))
+                cf.insert(task_id, {date_done: self.encode(meta)}, ttl=ttl)
             else:
                 meta['result'] = self.encode(result)
-                cf.insert(task_id, meta,
-                          ttl=self.expires and timedelta_seconds(self.expires))
+                cf.insert(task_id, meta, ttl=ttl)
 
         return self._retry_on_error(_do_store)
 

+ 40 - 31
celery/backends/database/__init__.py

@@ -8,17 +8,21 @@
 """
 from __future__ import absolute_import
 
+import logging
+from contextlib import contextmanager
 from functools import wraps
 
 from celery import states
+from celery.backends.base import BaseBackend
 from celery.exceptions import ImproperlyConfigured
 from celery.five import range
 from celery.utils.timeutils import maybe_timedelta
 
-from celery.backends.base import BaseBackend
+from .models import Task
+from .models import TaskSet
+from .session import SessionManager
 
-from .models import Task, TaskSet
-from .session import ResultSession
+logger = logging.getLogger(__name__)
 
 __all__ = ['DatabaseBackend']
 
@@ -33,7 +37,19 @@ def _sqlalchemy_installed():
     return sqlalchemy
 _sqlalchemy_installed()
 
-from sqlalchemy.exc import DatabaseError, OperationalError
+from sqlalchemy.exc import DatabaseError, InvalidRequestError
+from sqlalchemy.orm.exc import StaleDataError
+
+
+@contextmanager
+def session_cleanup(session):
+    try:
+        yield
+    except Exception:
+        session.rollback()
+        raise
+    finally:
+        session.close()
 
 
 def retry(fun):
@@ -45,7 +61,12 @@ def retry(fun):
         for retries in range(max_retries):
             try:
                 return fun(*args, **kwargs)
-            except (DatabaseError, OperationalError):
+            except (DatabaseError, InvalidRequestError, StaleDataError):
+                logger.warning(
+                    "Failed operation %s. Retrying %s more times.",
+                    fun.__name__, max_retries - retries - 1,
+                    exc_info=True,
+                )
                 if retries + 1 >= max_retries:
                     raise
 
@@ -83,8 +104,8 @@ class DatabaseBackend(BaseBackend):
                 'Missing connection string! Do you have '
                 'CELERY_RESULT_DBURI set to a real value?')
 
-    def ResultSession(self):
-        return ResultSession(
+    def ResultSession(self, session_manager=SessionManager()):
+        return session_manager.session_factory(
             dburi=self.dburi,
             short_lived_sessions=self.short_lived_sessions,
             **self.engine_options
@@ -95,8 +116,9 @@ class DatabaseBackend(BaseBackend):
                       traceback=None, max_retries=3, **kwargs):
         """Store return value and status of an executed task."""
         session = self.ResultSession()
-        try:
-            task = session.query(Task).filter(Task.task_id == task_id).first()
+        with session_cleanup(session):
+            task = list(session.query(Task).filter(Task.task_id == task_id))
+            task = task and task[0]
             if not task:
                 task = Task(task_id)
                 session.add(task)
@@ -106,83 +128,70 @@ class DatabaseBackend(BaseBackend):
             task.traceback = traceback
             session.commit()
             return result
-        finally:
-            session.close()
 
     @retry
     def _get_task_meta_for(self, task_id):
         """Get task metadata for a task by id."""
         session = self.ResultSession()
-        try:
-            task = session.query(Task).filter(Task.task_id == task_id).first()
-            if task is None:
+        with session_cleanup(session):
+            task = list(session.query(Task).filter(Task.task_id == task_id))
+            task = task and task[0]
+            if not task:
                 task = Task(task_id)
                 task.status = states.PENDING
                 task.result = None
             return task.to_dict()
-        finally:
-            session.close()
 
     @retry
     def _save_group(self, group_id, result):
         """Store the result of an executed group."""
         session = self.ResultSession()
-        try:
+        with session_cleanup(session):
             group = TaskSet(group_id, result)
             session.add(group)
             session.flush()
             session.commit()
             return result
-        finally:
-            session.close()
 
     @retry
     def _restore_group(self, group_id):
         """Get metadata for group by id."""
         session = self.ResultSession()
-        try:
+        with session_cleanup(session):
             group = session.query(TaskSet).filter(
                 TaskSet.taskset_id == group_id).first()
             if group:
                 return group.to_dict()
-        finally:
-            session.close()
 
     @retry
     def _delete_group(self, group_id):
         """Delete metadata for group by id."""
         session = self.ResultSession()
-        try:
+        with session_cleanup(session):
             session.query(TaskSet).filter(
                 TaskSet.taskset_id == group_id).delete()
             session.flush()
             session.commit()
-        finally:
-            session.close()
 
     @retry
     def _forget(self, task_id):
         """Forget about result."""
         session = self.ResultSession()
-        try:
+        with session_cleanup(session):
             session.query(Task).filter(Task.task_id == task_id).delete()
             session.commit()
-        finally:
-            session.close()
 
     def cleanup(self):
         """Delete expired metadata."""
         session = self.ResultSession()
         expires = self.expires
         now = self.app.now()
-        try:
+        with session_cleanup(session):
             session.query(Task).filter(
                 Task.date_done < (now - expires)).delete()
             session.query(TaskSet).filter(
                 TaskSet.date_done < (now - expires)).delete()
             session.commit()
-        finally:
-            session.close()
 
     def __reduce__(self, args=(), kwargs={}):
         kwargs.update(

+ 46 - 49
celery/backends/database/session.py

@@ -8,58 +8,55 @@
 """
 from __future__ import absolute_import
 
-from collections import defaultdict
-from multiprocessing.util import register_after_fork
+from billiard.util import register_after_fork
 
 from sqlalchemy import create_engine
-from sqlalchemy.orm import sessionmaker
 from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy.pool import NullPool
 
 ResultModelBase = declarative_base()
 
-_SETUP = defaultdict(lambda: False)
-_ENGINES = {}
-_SESSIONS = {}
-
-__all__ = ['ResultSession', 'get_engine', 'create_session']
-
-
-class _after_fork(object):
-    registered = False
-
-    def __call__(self):
-        self.registered = False  # child must reregister
-        for engine in list(_ENGINES.values()):
-            engine.dispose()
-        _ENGINES.clear()
-        _SESSIONS.clear()
-after_fork = _after_fork()
-
-
-def get_engine(dburi, **kwargs):
-    try:
-        return _ENGINES[dburi]
-    except KeyError:
-        engine = _ENGINES[dburi] = create_engine(dburi, **kwargs)
-        after_fork.registered = True
-        register_after_fork(after_fork, after_fork)
-        return engine
-
-
-def create_session(dburi, short_lived_sessions=False, **kwargs):
-    engine = get_engine(dburi, **kwargs)
-    if short_lived_sessions or dburi not in _SESSIONS:
-        _SESSIONS[dburi] = sessionmaker(bind=engine)
-    return engine, _SESSIONS[dburi]
-
-
-def setup_results(engine):
-    if not _SETUP['results']:
-        ResultModelBase.metadata.create_all(engine)
-        _SETUP['results'] = True
-
-
-def ResultSession(dburi, **kwargs):
-    engine, session = create_session(dburi, **kwargs)
-    setup_results(engine)
-    return session()
+__all__ = ['SessionManager']
+
+
+class SessionManager(object):
+    def __init__(self):
+        self._engines = {}
+        self._sessions = {}
+        self.forked = False
+        self.prepared = False
+        register_after_fork(self, self._after_fork)
+
+    def _after_fork(self,):
+        self.forked = True
+
+    def get_engine(self, dburi, **kwargs):
+        if self.forked:
+            try:
+                return self._engines[dburi]
+            except KeyError:
+                engine = self._engines[dburi] = create_engine(dburi, **kwargs)
+                return engine
+        else:
+            kwargs['poolclass'] = NullPool
+            return create_engine(dburi, **kwargs)
+
+    def create_session(self, dburi, short_lived_sessions=False, **kwargs):
+        engine = self.get_engine(dburi, **kwargs)
+        if self.forked:
+            if short_lived_sessions or dburi not in self._sessions:
+                self._sessions[dburi] = sessionmaker(bind=engine)
+            return engine, self._sessions[dburi]
+        else:
+            return engine, sessionmaker(bind=engine)
+
+    def prepare_models(self, engine):
+        if not self.prepared:
+            ResultModelBase.metadata.create_all(engine)
+            self.prepared = True
+
+    def session_factory(self, dburi, **kwargs):
+        engine, session = self.create_session(dburi, **kwargs)
+        self.prepare_models(engine)
+        return session()

+ 0 - 1
celery/backends/mongodb.py

@@ -92,7 +92,6 @@ class MongoBackend(BaseBackend):
             self.options = dict(config, **config.pop('options', None) or {})
 
             # Set option defaults
-            self.options.setdefault('ssl', self.app.conf.BROKER_USE_SSL)
             self.options.setdefault('max_pool_size', self.max_pool_size)
             self.options.setdefault('auto_start_request', False)
 

+ 76 - 4
celery/backends/redis.py

@@ -13,9 +13,11 @@ from functools import partial
 from kombu.utils import cached_property, retry_over_time
 from kombu.utils.url import _parse_url
 
-from celery.exceptions import ImproperlyConfigured
+from celery import states
+from celery.canvas import maybe_signature
+from celery.exceptions import ChordError, ImproperlyConfigured
 from celery.five import string_t
-from celery.utils import deprecated_property
+from celery.utils import deprecated_property, strtobool
 from celery.utils.functional import dictfilter
 from celery.utils.log import get_logger
 from celery.utils.timeutils import humanize_seconds
@@ -56,7 +58,7 @@ class RedisBackend(KeyValueStoreBackend):
 
     def __init__(self, host=None, port=None, db=None, password=None,
                  expires=None, max_connections=None, url=None,
-                 connection_pool=None, **kwargs):
+                 connection_pool=None, new_join=False, **kwargs):
         super(RedisBackend, self).__init__(**kwargs)
         conf = self.app.conf
         if self.redis is None:
@@ -90,7 +92,17 @@ class RedisBackend(KeyValueStoreBackend):
         self.url = url
         self.expires = self.prepare_expires(expires, type=int)
 
-        self.connection_errors, self.channel_errors = get_redis_error_classes()
+        try:
+            new_join = strtobool(self.connparams.pop('new_join'))
+        except KeyError:
+            pass
+        if new_join:
+            self.apply_chord = self._new_chord_apply
+            self.on_chord_part_return = self._new_chord_return
+
+        self.connection_errors, self.channel_errors = (
+            get_redis_error_classes() if get_redis_error_classes
+            else ((), ()))
 
     def _params_from_url(self, url, defaults):
         scheme, host, port, user, password, path, query = _parse_url(url)
@@ -165,6 +177,66 @@ class RedisBackend(KeyValueStoreBackend):
     def expire(self, key, value):
         return self.client.expire(key, value)
 
+    def _unpack_chord_result(self, tup, decode,
+                             PROPAGATE_STATES=states.PROPAGATE_STATES):
+        _, tid, state, retval = decode(tup)
+        if state in PROPAGATE_STATES:
+            raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval))
+        return retval
+
+    def _new_chord_apply(self, header, partial_args, group_id, body,
+                         result=None, **options):
+        # avoids saving the group in the redis db.
+        return header(*partial_args, task_id=group_id)
+
+    def _new_chord_return(self, task, state, result, propagate=None,
+                          PROPAGATE_STATES=states.PROPAGATE_STATES):
+        app = self.app
+        if propagate is None:
+            propagate = self.app.conf.CELERY_CHORD_PROPAGATES
+        request = task.request
+        tid, gid = request.id, request.group
+        if not gid or not tid:
+            return
+
+        client = self.client
+        jkey = self.get_key_for_group(gid, '.j')
+        result = self.encode_result(result, state)
+        _, readycount, _ = client.pipeline()                            \
+            .rpush(jkey, self.encode([1, tid, state, result]))          \
+            .llen(jkey)                                                 \
+            .expire(jkey, 86400)                                        \
+            .execute()
+
+        try:
+            callback = maybe_signature(request.chord, app=app)
+            total = callback['chord_size']
+            if readycount >= total:
+                decode, unpack = self.decode, self._unpack_chord_result
+                resl, _ = client.pipeline()     \
+                    .lrange(jkey, 0, total)     \
+                    .delete(jkey)               \
+                    .execute()
+                try:
+                    callback.delay([unpack(tup, decode) for tup in resl])
+                except Exception as exc:
+                    error('Chord callback for %r raised: %r',
+                          request.group, exc, exc_info=1)
+                    app._tasks[callback.task].backend.fail_from_current_stack(
+                        callback.id,
+                        exc=ChordError('Callback error: {0!r}'.format(exc)),
+                    )
+        except ChordError as exc:
+            error('Chord %r raised: %r', request.group, exc, exc_info=1)
+            app._tasks[callback.task].backend.fail_from_current_stack(
+                callback.id, exc=exc,
+            )
+        except Exception as exc:
+            error('Chord %r raised: %r', request.group, exc, exc_info=1)
+            app._tasks[callback.task].backend.fail_from_current_stack(
+                callback.id, exc=ChordError('Join error: {0!r}'.format(exc)),
+            )
+
     @property
     def ConnectionPool(self):
         if self._ConnectionPool is None:

+ 35 - 16
celery/beat.py

@@ -165,12 +165,16 @@ class Scheduler(object):
     #: How often to sync the schedule (3 minutes by default)
     sync_every = 3 * 60
 
+    #: How many tasks can be called before a sync is forced.
+    sync_every_tasks = None
+
     _last_sync = None
+    _tasks_since_sync = 0
 
     logger = logger  # compat
 
     def __init__(self, app, schedule=None, max_interval=None,
-                 Publisher=None, lazy=False, **kwargs):
+                 Publisher=None, lazy=False, sync_every_tasks=None, **kwargs):
         self.app = app
         self.data = maybe_evaluate({} if schedule is None else schedule)
         self.max_interval = (max_interval
@@ -178,6 +182,9 @@ class Scheduler(object):
                              or self.max_interval)
         self.Publisher = Publisher or app.amqp.TaskProducer
         self._heap = None
+        self.sync_every_tasks = (
+            app.conf.CELERYBEAT_SYNC_EVERY if sync_every_tasks is None
+            else sync_every_tasks)
         if not lazy:
             self.setup_schedule()
 
@@ -236,8 +243,12 @@ class Scheduler(object):
         return min(next_time_to_run, self.max_interval)
 
     def should_sync(self):
-        return (not self._last_sync or
-                (monotonic() - self._last_sync) > self.sync_every)
+        return (
+            (not self._last_sync or
+               (monotonic() - self._last_sync) > self.sync_every) or
+            (self.sync_every_tasks and
+                self._tasks_since_sync >= self.sync_every_tasks)
+        )
 
     def reserve(self, entry):
         new_entry = self.schedule[entry.name] = next(entry)
@@ -264,6 +275,7 @@ class Scheduler(object):
                 "Couldn't apply scheduled task {0.name}: {exc}".format(
                     entry, exc=exc)), sys.exc_info()[2])
         finally:
+            self._tasks_since_sync += 1
             if self.should_sync():
                 self._do_sync()
 
@@ -279,6 +291,7 @@ class Scheduler(object):
             self.sync()
         finally:
             self._last_sync = monotonic()
+            self._tasks_since_sync = 0
 
     def sync(self):
         pass
@@ -298,9 +311,10 @@ class Scheduler(object):
         return self.Entry(**dict(entry, name=name, app=self.app))
 
     def update_from_dict(self, dict_):
-        self.schedule.update(dict(
-            (name, self._maybe_entry(name, entry))
-            for name, entry in items(dict_)))
+        self.schedule.update({
+            name: self._maybe_entry(name, entry)
+            for name, entry in items(dict_)
+        })
 
     def merge_inplace(self, b):
         schedule = self.schedule
@@ -368,7 +382,6 @@ class PersistentScheduler(Scheduler):
         try:
             self._store = self.persistence.open(self.schedule_filename,
                                                 writeback=True)
-            entries = self._store.setdefault('entries', {})
         except Exception as exc:
             error('Removing corrupted schedule file %r: %r',
                   self.schedule_filename, exc, exc_info=True)
@@ -376,15 +389,21 @@ class PersistentScheduler(Scheduler):
             self._store = self.persistence.open(self.schedule_filename,
                                                 writeback=True)
         else:
-            if '__version__' not in self._store:
-                warning('Reset: Account for new __version__ field')
-                self._store.clear()   # remove schedule at 2.2.2 upgrade.
-            if 'tz' not in self._store:
-                warning('Reset: Account for new tz field')
-                self._store.clear()   # remove schedule at 3.0.8 upgrade
-            if 'utc_enabled' not in self._store:
-                warning('Reset: Account for new utc_enabled field')
-                self._store.clear()   # remove schedule at 3.0.9 upgrade
+            try:
+                self._store['entries']
+            except KeyError:
+                # new schedule db
+                self._store['entries'] = {}
+            else:
+                if '__version__' not in self._store:
+                    warning('DB Reset: Account for new __version__ field')
+                    self._store.clear()   # remove schedule at 2.2.2 upgrade.
+                elif 'tz' not in self._store:
+                    warning('DB Reset: Account for new tz field')
+                    self._store.clear()   # remove schedule at 3.0.8 upgrade
+                elif 'utc_enabled' not in self._store:
+                    warning('DB Reset: Account for new utc_enabled field')
+                    self._store.clear()   # remove schedule at 3.0.9 upgrade
 
         tz = self.app.conf.CELERY_TIMEZONE
         stored_tz = self._store.get('tz')

+ 10 - 9
celery/bin/amqp.py

@@ -15,7 +15,6 @@ import pprint
 from functools import partial
 from itertools import count
 
-from amqp import Message
 from kombu.utils.encoding import safe_str
 
 from celery.utils.functional import padlist
@@ -175,7 +174,7 @@ class AMQShell(cmd.Cmd):
         'basic.get': Spec(('queue', str),
                           ('no_ack', bool, 'off'),
                           returns=dump_message),
-        'basic.publish': Spec(('msg', Message),
+        'basic.publish': Spec(('msg', str),
                               ('exchange', str),
                               ('routing_key', str),
                               ('mandatory', bool, 'no'),
@@ -247,32 +246,34 @@ class AMQShell(cmd.Cmd):
         return [cmd for cmd in names
                 if cmd.partition('.')[2].startswith(text)]
 
-    def dispatch(self, cmd, argline):
+    def dispatch(self, cmd, arglist):
         """Dispatch and execute the command.
 
         Lookup order is: :attr:`builtins` -> :attr:`amqp`.
 
         """
-        arglist = shlex.split(safe_str(argline))
+        if isinstance(arglist, string_t):
+            arglist = shlex.split(safe_str(arglist))
         if cmd in self.builtins:
             return getattr(self, self.builtins[cmd])(*arglist)
         fun, args, formatter = self.get_amqp_api_command(cmd, arglist)
         return formatter(fun(*args))
 
-    def parseline(self, line):
+    def parseline(self, parts):
         """Parse input line.
 
         :returns: tuple of three items:
             `(command_name, arglist, original_line)`
 
         """
-        parts = line.split()
         if parts:
-            return parts[0], ' '.join(parts[1:]), line
-        return '', '', line
+            return parts[0], parts[1:], ' '.join(parts)
+        return '', '', ''
 
     def onecmd(self, line):
         """Parse line and execute command."""
+        if isinstance(line, string_t):
+            line = shlex.split(safe_str(line))
         cmd, arg, line = self.parseline(line)
         if not line:
             return self.emptyline()
@@ -327,7 +328,7 @@ class AMQPAdmin(object):
     def run(self):
         shell = self.Shell(connect=self.connect, out=self.out)
         if self.args:
-            return shell.onecmd(' '.join(self.args))
+            return shell.onecmd(self.args)
         try:
             return shell.cmdloop()
         except KeyboardInterrupt:

+ 15 - 21
celery/bin/base.py

@@ -68,7 +68,6 @@ from __future__ import absolute_import, print_function, unicode_literals
 import os
 import random
 import re
-import socket
 import sys
 import warnings
 import json
@@ -86,7 +85,7 @@ from celery.five import items, string, string_t
 from celery.platforms import EX_FAILURE, EX_OK, EX_USAGE
 from celery.utils import term
 from celery.utils import text
-from celery.utils import NODENAME_DEFAULT, nodesplit
+from celery.utils import node_format, host_format
 from celery.utils.imports import symbol_by_name, import_from_cwd
 
 try:
@@ -106,7 +105,6 @@ Try --help?
 
 find_long_opt = re.compile(r'.+?(--.+?)(?:\s|,|$)')
 find_rst_ref = re.compile(r':\w+:`(.+?)`')
-find_sformat = re.compile(r'%(\w)')
 
 __all__ = ['Error', 'UsageError', 'Extensions', 'HelpFormatter',
            'Command', 'Option', 'daemon_options']
@@ -375,9 +373,10 @@ class Command(object):
 
     def prepare_args(self, options, args):
         if options:
-            options = dict((k, self.expanduser(v))
-                           for k, v in items(vars(options))
-                           if not k.startswith('_'))
+            options = {
+                k: self.expanduser(v)
+                for k, v in items(vars(options)) if not k.startswith('_')
+            }
         args = [self.expanduser(arg) for arg in args]
         self.check_args(args)
         return options, args
@@ -530,7 +529,12 @@ class Command(object):
                 opt = opts.get(arg)
                 if opt:
                     if opt.takes_value():
-                        acc[opt.dest] = args[index + 1]
+                        try:
+                            acc[opt.dest] = args[index + 1]
+                        except IndexError:
+                            raise ValueError(
+                                'Missing required argument for {0}'.format(
+                                    arg))
                         index += 1
                     elif opt.action == 'store_true':
                         acc[opt.dest] = True
@@ -561,20 +565,10 @@ class Command(object):
         pass
 
     def node_format(self, s, nodename, **extra):
-        name, host = nodesplit(nodename)
-        return self._simple_format(
-            s, host, n=name or NODENAME_DEFAULT, **extra)
-
-    def simple_format(self, s, **extra):
-        return self._simple_format(s, socket.gethostname(), **extra)
-
-    def _simple_format(self, s, host,
-                       match=find_sformat, expand=r'\1', **keys):
-        if s:
-            name, _, domain = host.partition('.')
-            keys = dict({'%': '%', 'h': host, 'n': name, 'd': domain}, **keys)
-            return match.sub(lambda m: keys[m.expand(expand)], s)
-        return s
+        return node_format(s, nodename, **extra)
+
+    def host_format(self, s, **extra):
+        return host_format(s, **extra)
 
     def _get_default_app(self, *args, **kwargs):
         from celery._state import get_current_app

+ 1 - 1
celery/bin/beat.py

@@ -24,7 +24,7 @@ The :program:`celery beat` command.
     Scheduler class to use.
     Default is :class:`celery.beat.PersistentScheduler`.
 
-.. cmdoption:: max-interval
+.. cmdoption:: --max-interval
 
     Max seconds to sleep between schedule iterations.
 

+ 36 - 12
celery/bin/celery.py

@@ -8,13 +8,15 @@ The :program:`celery` umbrella command.
 """
 from __future__ import absolute_import, unicode_literals
 
-import anyjson
+import numbers
 import os
 import sys
 
 from functools import partial
 from importlib import import_module
 
+from kombu.utils import json
+
 from celery.five import string_t, values
 from celery.platforms import EX_OK, EX_FAILURE, EX_UNAVAILABLE, EX_USAGE
 from celery.utils import term
@@ -61,7 +63,7 @@ if DEBUG:  # pragma: no cover
 
 
 def determine_exit_status(ret):
-    if isinstance(ret, int):
+    if isinstance(ret, numbers.Integral):
         return ret
     return EX_OK if ret else EX_FAILURE
 
@@ -161,12 +163,12 @@ class call(Command):
         # Positional args.
         args = kw.get('args') or ()
         if isinstance(args, string_t):
-            args = anyjson.loads(args)
+            args = json.loads(args)
 
         # Keyword args.
         kwargs = kw.get('kwargs') or {}
         if isinstance(kwargs, string_t):
-            kwargs = anyjson.loads(kwargs)
+            kwargs = json.loads(kwargs)
 
         # Expires can be int/float.
         expires = kw.get('expires') or None
@@ -571,10 +573,10 @@ class shell(Command):  # pragma: no cover
                        'signature': celery.signature}
 
         if not without_tasks:
-            self.locals.update(dict(
-                (task.__name__, task) for task in values(self.app.tasks)
-                if not task.name.startswith('celery.')),
-            )
+            self.locals.update({
+                task.__name__: task for task in values(self.app.tasks)
+                if not task.name.startswith('celery.')
+            })
 
         if force_python:
             return self.invoke_fallback_shell()
@@ -702,7 +704,7 @@ class CeleryCommand(Command):
             helps = '{self.prog_name} {command} --help'
         else:
             helps = '{self.prog_name} --help'
-        self.error(self.colored.magenta("Error: {0}".format(exc)))
+        self.error(self.colored.magenta('Error: {0}'.format(exc)))
         self.error("""Please try '{0}'""".format(helps.format(
             self=self, command=command,
         )))
@@ -715,11 +717,33 @@ class CeleryCommand(Command):
                 if value.startswith('--'):
                     rest.append(value)
                 elif value.startswith('-'):
-                    rest.extend([value] + [argv[index + 1]])
-                    index += 1
+                    # we eat the next argument even though we don't know
+                    # if this option takes an argument or not.
+                    # instead we will assume what is the command name in the
+                    # return statements below.
+                    try:
+                        nxt = argv[index + 1]
+                        if nxt.startswith('-'):
+                            # is another option
+                            rest.append(value)
+                        else:
+                            # is (maybe) a value for this option
+                            rest.extend([value, nxt])
+                            index += 1
+                    except IndexError:
+                        rest.append(value)
+                        break
                 else:
-                    return argv[index:] + rest
+                    break
                 index += 1
+            if argv[index:]:
+                # if there are more arguments left then divide and swap
+                # we assume the first argument in argv[i:] is the command
+                # name.
+                return argv[index:] + rest
+            # if there are no more arguments then the last arg in rest'
+            # must be the command.
+            [rest.pop()] + rest
         return []
 
     def prepare_prog_name(self, name):

+ 3 - 1
celery/bin/celeryd_detach.py

@@ -30,6 +30,7 @@ logger = get_logger(__name__)
 C_FAKEFORK = os.environ.get('C_FAKEFORK')
 
 OPTION_LIST = daemon_options(default_pidfile='celeryd.pid') + (
+    Option('--workdir', default=None, dest='working_directory'),
     Option('--fake',
            default=False, action='store_true', dest='fake',
            help="Don't fork (for debugging purposes)"),
@@ -39,7 +40,8 @@ OPTION_LIST = daemon_options(default_pidfile='celeryd.pid') + (
 def detach(path, argv, logfile=None, pidfile=None, uid=None,
            gid=None, umask=0, working_directory=None, fake=False, app=None):
     fake = 1 if C_FAKEFORK else fake
-    with detached(logfile, pidfile, uid, gid, umask, working_directory, fake):
+    with detached(logfile, pidfile, uid, gid, umask, working_directory, fake,
+                  after_forkers=False):
         try:
             os.execv(path, [path] + argv)
         except Exception:

+ 2 - 2
celery/bin/events.py

@@ -57,14 +57,14 @@ class events(Command):
         celery events -d --app=proj
             dump events to screen.
         celery events -b amqp://
-        celery events -C <camera> [options]
+        celery events -c <camera> [options]
             run snapshot camera.
 
     Examples::
 
         celery events
         celery events -d
-        celery events -C mod.attr -F 1.0 --detach --maxrate=100/m -l info
+        celery events -c mod.attr -F 1.0 --detach --maxrate=100/m -l info
     """
     doc = __doc__
     supports_args = False

+ 1 - 1
celery/bin/graph.py

@@ -34,7 +34,7 @@ class graph(Command):
 
     def bootsteps(self, *args, **kwargs):
         worker = self.app.WorkController()
-        include = set(arg.lower() for arg in args or ['worker', 'consumer'])
+        include = {arg.lower() for arg in args or ['worker', 'consumer']}
         if 'worker' in include:
             graph = worker.blueprint.graph
             if 'consumer' in include:

+ 45 - 38
celery/bin/multi.py

@@ -13,19 +13,19 @@ Examples
 
     # Pidfiles and logfiles are stored in the current directory
     # by default.  Use --pidfile and --logfile argument to change
-    # this.  The abbreviation %N will be expanded to the current
+    # this.  The abbreviation %n will be expanded to the current
     # node name.
-    $ celery multi start Leslie -E --pidfile=/var/run/celery/%N.pid
-                                    --logfile=/var/log/celery/%N.log
+    $ celery multi start Leslie -E --pidfile=/var/run/celery/%n.pid
+                                    --logfile=/var/log/celery/%n.log
 
 
     # You need to add the same arguments when you restart,
     # as these are not persisted anywhere.
-    $ celery multi restart Leslie -E --pidfile=/var/run/celery/%N.pid
-                                     --logfile=/var/run/celery/%N.log
+    $ celery multi restart Leslie -E --pidfile=/var/run/celery/%n.pid
+                                     --logfile=/var/run/celery/%n.log
 
     # To stop the node, you need to specify the same pidfile.
-    $ celery multi stop Leslie --pidfile=/var/run/celery/%N.pid
+    $ celery multi stop Leslie --pidfile=/var/run/celery/%n.pid
 
     # 3 workers, with 3 processes each
     $ celery multi start 3 -c 3
@@ -47,6 +47,9 @@ Examples
     # specify fully qualified nodenames
     $ celery multi start foo@worker.example.com bar@worker.example.com -c 3
 
+    # fully qualified nodenames but using the current hostname
+    $ celery multi start foo@%h bar@%h
+
     # Advanced example starting 10 workers in the background:
     #   * Three of the workers processes the images and video queue
     #   * Two of the workers processes the data queue with loglevel DEBUG
@@ -100,25 +103,26 @@ import signal
 import socket
 import sys
 
-from collections import defaultdict, namedtuple
+from collections import OrderedDict, defaultdict, namedtuple
+from functools import partial
 from subprocess import Popen
 from time import sleep
 
 from kombu.utils import cached_property
-from kombu.utils.compat import OrderedDict
 from kombu.utils.encoding import from_utf8
 
 from celery import VERSION_BANNER
 from celery.five import items
 from celery.platforms import Pidfile, IS_WINDOWS
-from celery.utils import term, nodesplit
+from celery.utils import term
+from celery.utils import host_format, node_format, nodesplit
 from celery.utils.text import pluralize
 
 __all__ = ['MultiTool']
 
-SIGNAMES = set(sig for sig in dir(signal)
-               if sig.startswith('SIG') and '_' not in sig)
-SIGMAP = dict((getattr(signal, name), name) for name in SIGNAMES)
+SIGNAMES = {sig for sig in dir(signal)
+            if sig.startswith('SIG') and '_' not in sig}
+SIGMAP = {getattr(signal, name): name for name in SIGNAMES}
 
 USAGE = """\
 usage: {prog_name} start <node1 node2 nodeN|range> [worker options]
@@ -247,8 +251,8 @@ class MultiTool(object):
         self.retcode = int(any(retcodes))
 
     def with_detacher_default_options(self, p):
-        _setdefaultopt(p.options, ['--pidfile', '-p'], '%N.pid')
-        _setdefaultopt(p.options, ['--logfile', '-f'], '%N.log')
+        _setdefaultopt(p.options, ['--pidfile', '-p'], '%n.pid')
+        _setdefaultopt(p.options, ['--logfile', '-f'], '%n.log')
         p.options.setdefault(
             '--cmd',
             '-m {0}'.format(celery_exe('worker', '--detach')),
@@ -320,7 +324,7 @@ class MultiTool(object):
             self.note('')
 
     def getpids(self, p, cmd, callback=None):
-        _setdefaultopt(p.options, ['--pidfile', '-p'], '%N.pid')
+        _setdefaultopt(p.options, ['--pidfile', '-p'], '%n.pid')
 
         nodes = []
         for node in multi_args(p, cmd):
@@ -478,26 +482,41 @@ def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''):
                 p.namespaces[subns].update(ns_opts)
             p.namespaces.pop(ns_name)
 
+    # Numbers in args always refers to the index in the list of names.
+    # (e.g. `start foo bar baz -c:1` where 1 is foo, 2 is bar, and so on).
+    for ns_name, ns_opts in list(items(p.namespaces)):
+        if ns_name.isdigit():
+            ns_index = int(ns_name) - 1
+            if ns_index < 0:
+                raise KeyError('Indexes start at 1 got: %r' % (ns_name, ))
+            try:
+                p.namespaces[names[ns_index]].update(ns_opts)
+            except IndexError:
+                raise KeyError('No node at index %r' % (ns_name, ))
+
     for name in names:
-        this_suffix = suffix
+        hostname = suffix
         if '@' in name:
-            this_name = options['-n'] = name
-            nodename, this_suffix = nodesplit(name)
-            name = nodename
+            nodename = options['-n'] = host_format(name)
+            shortname, hostname = nodesplit(nodename)
+            name = shortname
         else:
-            nodename = '%s%s' % (prefix, name)
-            this_name = options['-n'] = '%s@%s' % (nodename, this_suffix)
-        expand = abbreviations({'%h': this_name,
-                                '%n': name,
-                                '%N': nodename,
-                                '%d': this_suffix})
+            shortname = '%s%s' % (prefix, name)
+            nodename = options['-n'] = host_format(
+                '{0}@{1}'.format(shortname, hostname),
+            )
+
+        expand = partial(
+            node_format, nodename=nodename, N=shortname, d=hostname,
+            h=nodename,
+        )
         argv = ([expand(cmd)] +
                 [format_opt(opt, expand(value))
                  for opt, value in items(p.optmerge(name, options))] +
                 [passthrough])
         if append:
             argv.append(expand(append))
-        yield multi_args_t(this_name, argv, expand, name)
+        yield multi_args_t(nodename, argv, expand, name)
 
 
 class NamespacedOptionParser(object):
@@ -579,18 +598,6 @@ def parse_ns_range(ns, ranges=False):
     return ret
 
 
-def abbreviations(mapping):
-
-    def expand(S):
-        ret = S
-        if S is not None:
-            for short_opt, long_opt in items(mapping):
-                ret = ret.replace(short_opt, long_opt)
-        return ret
-
-    return expand
-
-
 def findsig(args, default=signal.SIGTERM):
     for arg in reversed(args):
         if len(arg) == 2 and arg[0] == '-':

+ 14 - 7
celery/bin/worker.py

@@ -71,8 +71,8 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 
 .. cmdoption:: -E, --events
 
-    Send events that can be captured by monitors like :program:`celery events`,
-    `celerymon`, and others.
+    Send task-related events that can be captured by monitors like
+    :program:`celery events`, `celerymon`, and others.
 
 .. cmdoption:: --without-gossip
 
@@ -86,6 +86,10 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 
     Do not send event heartbeats.
 
+.. cmdoption:: --heartbeat-interval
+
+    Interval in seconds at which to send worker heartbeat
+
 .. cmdoption:: --purge
 
     Purges all waiting tasks before the daemon is started.
@@ -171,7 +175,7 @@ class worker(Command):
         # parse options before detaching so errors can be handled.
         options, args = self.prepare_args(
             *self.parse_options(prog_name, argv, command))
-        self.maybe_detach([command] + sys.argv[1:])
+        self.maybe_detach([command] + argv)
         return self(*args, **options)
 
     def maybe_detach(self, argv, dopts=['-D', '--detach']):
@@ -192,7 +196,7 @@ class worker(Command):
         if self.app.IS_WINDOWS and kwargs.get('beat'):
             self.die('-B option does not work on Windows.  '
                      'Please run celery beat as a separate service.')
-        hostname = self.simple_format(default_nodename(hostname))
+        hostname = self.host_format(default_nodename(hostname))
         if loglevel:
             try:
                 loglevel = mlevel(loglevel)
@@ -201,12 +205,14 @@ class worker(Command):
                     loglevel, '|'.join(
                         l for l in LOG_LEVELS if isinstance(l, string_t))))
 
-        return self.app.Worker(
+        worker = self.app.Worker(
             hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
-            logfile=self.node_format(logfile, hostname),
+            logfile=logfile,  # node format handled by celery.app.log.setup
             pidfile=self.node_format(pidfile, hostname),
             state_db=self.node_format(state_db, hostname), **kwargs
-        ).start()
+        )
+        worker.start()
+        return worker.exitcode
 
     def with_pool_option(self, argv):
         # this command support custom pools
@@ -245,6 +251,7 @@ class worker(Command):
             Option('--without-gossip', action='store_true', default=False),
             Option('--without-mingle', action='store_true', default=False),
             Option('--without-heartbeat', action='store_true', default=False),
+            Option('--heartbeat-interval', type='int'),
             Option('-O', dest='optimization'),
             Option('-D', '--detach', action='store_true'),
         ) + daemon_options() + tuple(self.app.user_options['worker'])

+ 3 - 2
celery/bootsteps.py

@@ -232,6 +232,8 @@ class Blueprint(object):
         return next((C for C in values(self.steps) if C.last), None)
 
     def _firstpass(self, steps):
+        for step in values(steps):
+            step.requires = [symbol_by_name(dep) for dep in step.requires]
         stream = deque(step.requires for step in values(steps))
         while stream:
             for node in stream.popleft():
@@ -283,7 +285,6 @@ class StepType(type):
         attrs.update(
             __qualname__=qname,
             name=attrs.get('name') or qname,
-            requires=attrs.get('requires', ()),
         )
         return super(StepType, cls).__new__(cls, name, bases, attrs)
 
@@ -392,7 +393,7 @@ class StartStopStep(Step):
 
 
 class ConsumerStep(StartStopStep):
-    requires = ('Connection', )
+    requires = ('celery.worker.consumer:Connection', )
     consumers = None
 
     def get_consumers(self, channel):

+ 248 - 66
celery/canvas.py

@@ -12,14 +12,15 @@
 """
 from __future__ import absolute_import
 
+from collections import MutableSequence, deque
 from copy import deepcopy
 from functools import partial as _partial, reduce
 from operator import itemgetter
 from itertools import chain as _chain
 
-from kombu.utils import cached_property, fxrange, kwdict, reprcall, uuid
+from kombu.utils import cached_property, fxrange, reprcall, uuid
 
-from celery._state import current_app
+from celery._state import current_app, get_current_worker_task
 from celery.utils.functional import (
     maybe_list, is_list, regen,
     chunks as _chunks,
@@ -132,7 +133,7 @@ class Signature(dict):
     def from_dict(self, d, app=None):
         typ = d.get('subtask_type')
         if typ:
-            return self.TYPES[typ].from_dict(kwdict(d), app=app)
+            return self.TYPES[typ].from_dict(d, app=app)
         return Signature(d, app=app)
 
     def __init__(self, task=None, args=None, kwargs=None, options=None,
@@ -194,14 +195,19 @@ class Signature(dict):
         return s
     partial = clone
 
-    def freeze(self, _id=None):
+    def freeze(self, _id=None, group_id=None, chord=None, root_id=None):
         opts = self.options
         try:
             tid = opts['task_id']
         except KeyError:
             tid = opts['task_id'] = _id or uuid()
+        root_id = opts.setdefault('root_id', root_id)
         if 'reply_to' not in opts:
             opts['reply_to'] = self.app.oid
+        if group_id:
+            opts['group_id'] = group_id
+        if chord:
+            opts['chord'] = chord
         return self.AsyncResult(tid)
     _freeze = freeze
 
@@ -238,6 +244,8 @@ class Signature(dict):
 
     def append_to_list_option(self, key, value):
         items = self.options.setdefault(key, [])
+        if not isinstance(items, MutableSequence):
+            items = self.options[key] = [items]
         if value not in items:
             items.append(value)
         return value
@@ -278,7 +286,10 @@ class Signature(dict):
     def __reduce__(self):
         # for serialization, the task type is lazily loaded,
         # and not stored in the dict itself.
-        return subtask, (dict(self), )
+        return signature, (dict(self), )
+
+    def __json__(self):
+        return dict(self)
 
     def reprcall(self, *args, **kwargs):
         args, kwargs, _ = self._merge(args, kwargs, {})
@@ -344,20 +355,116 @@ class chain(Signature):
         if self.tasks:
             return self.apply_async(args, kwargs)
 
+    def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
+                    task_id=None, link=None, link_error=None,
+                    publisher=None, root_id=None, **options):
+        app = self.app
+        if app.conf.CELERY_ALWAYS_EAGER:
+            return self.apply(args, kwargs, **options)
+        tasks, results = self.prepare_steps(
+            args, self.tasks, root_id, link_error,
+        )
+        if not results:
+            return
+        result = results[-1]
+        last_task = tasks[-1]
+        if group_id:
+            last_task.set(group_id=group_id)
+        if chord:
+            last_task.set(chord=chord)
+        if task_id:
+            last_task.set(task_id=task_id)
+            result = last_task.type.AsyncResult(task_id)
+        # make sure we can do a link() and link_error() on a chain object.
+        if link:
+            tasks[-1].set(link=link)
+        tasks[0].apply_async(**options)
+        return result
+
+    def prepare_steps(self, args, tasks,
+                      root_id=None, link_error=None, app=None):
+        app = app or self.app
+        steps = deque(tasks)
+        next_step = prev_task = prev_res = None
+        tasks, results = [], []
+        i = 0
+        while steps:
+            task = steps.popleft()
+            if not i:  # first task
+                # first task gets partial args from chain
+                task = task.clone(args)
+                res = task.freeze(root_id=root_id)
+                root_id = res.id if root_id is None else root_id
+            else:
+                task = task.clone()
+                res = task.freeze(root_id=root_id)
+            i += 1
+
+            if isinstance(task, group):
+                task = maybe_unroll_group(task)
+
+            if isinstance(task, chain):
+                # splice the chain
+                steps.extendleft(reversed(task.tasks))
+                continue
+            elif isinstance(task, group) and steps and \
+                    not isinstance(steps[0], group):
+                # automatically upgrade group(...) | s to chord(group, s)
+                try:
+                    next_step = steps.popleft()
+                    # for chords we freeze by pretending it's a normal
+                    # signature instead of a group.
+                    res = Signature.freeze(next_step)
+                    task = chord(
+                        task, body=next_step,
+                        task_id=res.task_id, root_id=root_id,
+                    )
+                except IndexError:
+                    pass  # no callback, so keep as group.
+
+            if prev_task:
+                # link previous task to this task.
+                prev_task.link(task)
+                # set AsyncResult.parent
+                if not res.parent:
+                    res.parent = prev_res
+
+            if link_error:
+                task.set(link_error=link_error)
+
+            if not isinstance(prev_task, chord):
+                results.append(res)
+                tasks.append(task)
+            prev_task, prev_res = task, res
+
+        return tasks, results
+
+    def apply(self, args=(), kwargs={}, **options):
+        last, fargs = None, args
+        for task in self.tasks:
+            res = task.clone(fargs).apply(
+                last and (last.get(), ), **options
+            )
+            res.parent, last, fargs = last, res, None
+        return last
+
     @classmethod
     def from_dict(self, d, app=None):
         tasks = d['kwargs']['tasks']
         if d['args'] and tasks:
             # partial args passed on to first task in chain (Issue #1057).
             tasks[0]['args'] = tasks[0]._merge(d['args'])[0]
-        return chain(*d['kwargs']['tasks'], app=app, **kwdict(d['options']))
+        return chain(*d['kwargs']['tasks'], app=app, **d['options'])
 
     @property
-    def type(self):
-        try:
-            return self._type or self.tasks[0].type.app.tasks['celery.chain']
-        except KeyError:
-            return self.app.tasks['celery.chain']
+    def app(self):
+        app = self._app
+        if app is None:
+            try:
+                app = self.tasks[0]._app
+            except (KeyError, IndexError):
+                pass
+        return app or current_app
 
     def __repr__(self):
         return ' | '.join(repr(t) for t in self.tasks)
@@ -448,11 +555,6 @@ def _maybe_group(tasks):
     return tasks
 
 
-def _maybe_clone(tasks, app):
-    return [s.clone() if isinstance(s, Signature) else signature(s, app=app)
-            for s in tasks]
-
-
 @Signature.register_type
 class group(Signature):
 
@@ -471,15 +573,66 @@ class group(Signature):
             # partial args passed on to all tasks in the group (Issue #1057).
             for task in tasks:
                 task['args'] = task._merge(d['args'])[0]
-        return group(tasks, app=app, **kwdict(d['options']))
-
-    def apply_async(self, args=(), kwargs=None, **options):
-        tasks = _maybe_clone(self.tasks, app=self._app)
-        if not tasks:
+        return group(tasks, app=app, **d['options'])
+
+    def _prepared(self, tasks, partial_args, group_id, root_id, dict=dict,
+                  Signature=Signature, from_dict=Signature.from_dict):
+        for task in tasks:
+            if isinstance(task, dict):
+                if isinstance(task, Signature):
+                    # local sigs are always of type Signature, and we
+                    # clone them to make sure we do not modify the originals.
+                    task = task.clone()
+                else:
+                    # serialized sigs must be converted to Signature.
+                    task = from_dict(task)
+            if partial_args and not task.immutable:
+                task.args = tuple(partial_args) + tuple(task.args)
+            yield task, task.freeze(group_id=group_id, root_id=root_id)
+
+    def _apply_tasks(self, tasks, producer=None, app=None, **options):
+        app = app or self.app
+        with app.producer_or_acquire(producer) as producer:
+            for sig, res in tasks:
+                sig.apply_async(producer=producer, add_to_parent=False,
+                                **options)
+                yield res
+
+    def _freeze_gid(self, options):
+        # remove task_id and use that as the group_id,
+        # if we don't remove it then every task will have the same id...
+        options = dict(self.options, **options)
+        options['group_id'] = group_id = (
+            options.pop('task_id', uuid()))
+        return options, group_id, options.get('root_id')
+
+    def apply_async(self, args=(), kwargs=None, add_to_parent=True,
+                    producer=None, **options):
+        app = self.app
+        if app.conf.CELERY_ALWAYS_EAGER:
+            return self.apply(args, kwargs, **options)
+        if not self.tasks:
             return self.freeze()
-        type = self.type
-        return type(*type.prepare(dict(self.options, **options),
-                                  tasks, args))
+
+        options, group_id, root_id = self._freeze_gid(options)
+        tasks = self._prepared(self.tasks, args, group_id, root_id)
+        result = self.app.GroupResult(
+            group_id, list(self._apply_tasks(tasks, producer, app, **options)),
+        )
+        parent_task = get_current_worker_task()
+        if add_to_parent and parent_task:
+            parent_task.add_trail(result)
+        return result
+
+    def apply(self, args=(), kwargs={}, **options):
+        app = self.app
+        if not self.tasks:
+            return self.freeze()  # empty group returns GroupResult
+        options, group_id, root_id = self._freeze_gid(options)
+        tasks = self._prepared(self.tasks, args, group_id, root_id)
+        return app.GroupResult(group_id, [
+            sig.apply(**options) for sig, _ in tasks
+        ])
 
     def set_immutable(self, immutable):
         for task in self.tasks:
@@ -494,24 +647,26 @@ class group(Signature):
         sig = sig.clone().set(immutable=True)
         return self.tasks[0].link_error(sig)
 
-    def apply(self, *args, **kwargs):
-        if not self.tasks:
-            return self.freeze()  # empty group returns GroupResult
-        return Signature.apply(self, *args, **kwargs)
-
     def __call__(self, *partial_args, **options):
         return self.apply_async(partial_args, **options)
 
-    def freeze(self, _id=None):
+    def freeze(self, _id=None, group_id=None, chord=None, root_id=None):
         opts = self.options
         try:
             gid = opts['task_id']
         except KeyError:
             gid = opts['task_id'] = uuid()
+        if group_id:
+            opts['group_id'] = group_id
+        if chord:
+            opts['chord'] = group_id
+        root_id = opts.setdefault('root_id', root_id)
         new_tasks, results = [], []
         for task in self.tasks:
             task = maybe_signature(task, app=self._app).clone()
-            results.append(task._freeze())
+            results.append(task.freeze(
+                group_id=group_id, chord=chord, root_id=root_id,
+            ))
             new_tasks.append(task)
         self.tasks = self.kwargs['tasks'] = new_tasks
         return self.app.GroupResult(gid, results)
@@ -530,14 +685,14 @@ class group(Signature):
         return repr(self.tasks)
 
     @property
-    def type(self):
-        if self._type:
-            return self._type
-        # taking the app from the first task in the list, there may be a
-        # better solution for this, e.g. to consolidate tasks with the same
-        # app and apply them in batches.
-        app = self._app if self._app else self.tasks[0].type.app
-        return app.tasks[self['task']]
+    def app(self):
+        app = self._app
+        if app is None:
+            try:
+                app = self.tasks[0]._app
+            except (KeyError, IndexError):
+                pass
+        return app if app is not None else current_app
 
 
 @Signature.register_type
@@ -552,10 +707,13 @@ class chord(Signature):
         )
         self.subtask_type = 'chord'
 
+    def freeze(self, *args, **kwargs):
+        return self.body.freeze(*args, **kwargs)
+
     @classmethod
     def from_dict(self, d, app=None):
-        args, d['kwargs'] = self._unpack_args(**kwdict(d['kwargs']))
-        return self(*args, app=app, **kwdict(d))
+        args, d['kwargs'] = self._unpack_args(**d['kwargs'])
+        return self(*args, app=app, **d)
 
     @staticmethod
     def _unpack_args(header=None, body=None, **kwargs):
@@ -563,33 +721,56 @@ class chord(Signature):
         # than manually popping things off.
         return (header, body), kwargs
 
-    @property
-    def type(self):
-        if self._type:
-            return self._type
-        # we will be able to fix this mess in 3.2 when we no longer
-        # require an actual task implementation for chord/group
-        if self._app:
-            app = self._app
-        else:
-            try:
-                app = self.tasks[0].type.app
-            except IndexError:
-                app = self.body.type.app
-        return app.tasks['celery.chord']
-
-    def apply_async(self, args=(), kwargs={}, task_id=None, **options):
+    @cached_property
+    def app(self):
+        app = self._app
+        if app is None:
+            app = self.tasks[0]._app
+            if app is None:
+                app = self.body._app
+        return app if app is not None else current_app
+
+    def apply_async(self, args=(), kwargs={}, task_id=None,
+                    producer=None, publisher=None, connection=None,
+                    router=None, result_cls=None, **options):
         body = kwargs.get('body') or self.kwargs['body']
         kwargs = dict(self.kwargs, **kwargs)
         body = body.clone(**options)
+        app = self.app
+        tasks = (self.tasks.clone() if isinstance(self.tasks, group)
+                 else group(self.tasks))
+        if app.conf.CELERY_ALWAYS_EAGER:
+            return self.apply((), kwargs,
+                              body=body, task_id=task_id, **options)
+        return self.run(tasks, body, args, task_id=task_id, **options)
+
+    def apply(self, args=(), kwargs={}, propagate=True, body=None, **options):
+        body = self.body if body is None else body
+        tasks = (self.tasks.clone() if isinstance(self.tasks, group)
+                 else group(self.tasks))
+        return body.apply(
+            args=(tasks.apply().get(propagate=propagate), ),
+        )
 
-        _chord = self.type
-        if _chord.app.conf.CELERY_ALWAYS_EAGER:
-            return self.apply((), kwargs, task_id=task_id, **options)
-        res = body.freeze(task_id)
-        parent = _chord(self.tasks, body, args, **options)
-        res.parent = parent
-        return res
+    def run(self, header, body, partial_args, app=None, interval=None,
+            countdown=1, max_retries=None, propagate=None, eager=False,
+            task_id=None, **options):
+        app = app or self.app
+        propagate = (app.conf.CELERY_CHORD_PROPAGATES
+                     if propagate is None else propagate)
+        group_id = uuid()
+        root_id = body.options.get('root_id')
+        body.setdefault('chord_size', len(header.tasks))
+        results = header.freeze(
+            group_id=group_id, chord=body, root_id=root_id).results
+        bodyres = body.freeze(task_id, root_id=root_id)
+
+        parent = app.backend.apply_chord(
+            header, partial_args, group_id, body,
+            interval=interval, countdown=countdown,
+            max_retries=max_retries, propagate=propagate, result=results)
+        bodyres.parent = parent
+        return bodyres
 
     def __call__(self, body=None, **options):
         return self.apply_async((), {'body': body} if body else {}, **options)
@@ -626,7 +807,7 @@ class chord(Signature):
 
 
 def signature(varies, *args, **kwargs):
-    if not (args or kwargs) and isinstance(varies, dict):
+    if isinstance(varies, dict):
         if isinstance(varies, Signature):
             return varies.clone()
         return Signature.from_dict(varies)
@@ -638,9 +819,10 @@ def maybe_signature(d, app=None):
     if d is not None:
         if isinstance(d, dict):
             if not isinstance(d, Signature):
-                return signature(d, app=app)
+                d = signature(d)
         elif isinstance(d, list):
             return [maybe_signature(s, app=app) for s in d]
+
         if app is not None:
             d._app = app
         return d

+ 53 - 48
celery/concurrency/asynpool.py

@@ -37,16 +37,13 @@ from amqp.utils import promise
 from billiard.pool import RUN, TERMINATE, ACK, NACK, WorkersJoined
 from billiard import pool as _pool
 from billiard.compat import buf_t, setblocking, isblocking
-from billiard.einfo import ExceptionInfo
 from billiard.queues import _SimpleQueue
 from kombu.async import READ, WRITE, ERR
 from kombu.serialization import pickle as _pickle
 from kombu.utils import fxrange
-from kombu.utils.compat import get_errno
 from kombu.utils.eventio import SELECT_BAD_FD
 from celery.five import Counter, items, values
 from celery.utils.log import get_logger
-from celery.utils.text import truncate
 from celery.worker import state as worker_state
 
 try:
@@ -96,8 +93,6 @@ SCHED_STRATEGIES = {
     'fair': SCHED_STRATEGY_FAIR,
 }
 
-RESULT_MAXLEN = 128
-
 Ack = namedtuple('Ack', ('id', 'fd', 'payload'))
 
 
@@ -143,14 +138,14 @@ def _select(readers=None, writers=None, err=None, timeout=0):
             r = list(set(r) | set(e))
         return r, w, 0
     except (select.error, socket.error) as exc:
-        if get_errno(exc) == errno.EINTR:
+        if exc.errno == errno.EINTR:
             return [], [], 1
-        elif get_errno(exc) in SELECT_BAD_FD:
+        elif exc.errno in SELECT_BAD_FD:
             for fd in readers | writers | err:
                 try:
                     select.select([fd], [], [], 0)
                 except (select.error, socket.error) as exc:
-                    if get_errno(exc) not in SELECT_BAD_FD:
+                    if exc.errno not in SELECT_BAD_FD:
                         raise
                     readers.discard(fd)
                     writers.discard(fd)
@@ -170,11 +165,6 @@ class Worker(_pool.Worker):
         # is writable.
         self.outq.put((WORKER_UP, (pid, )))
 
-    def prepare_result(self, result, RESULT_MAXLEN=RESULT_MAXLEN):
-        if not isinstance(result, ExceptionInfo):
-            return truncate(repr(result), RESULT_MAXLEN)
-        return result
-
 
 class ResultHandler(_pool.ResultHandler):
     """Handles messages from the pool processes."""
@@ -205,7 +195,7 @@ class ResultHandler(_pool.ResultHandler):
                     fd, bufv[Hr:] if readcanbuf else bufv, 4 - Hr,
                 )
             except OSError as exc:
-                if get_errno(exc) not in UNAVAIL:
+                if exc.errno not in UNAVAIL:
                     raise
                 yield
             else:
@@ -227,7 +217,7 @@ class ResultHandler(_pool.ResultHandler):
                     fd, bufv[Br:] if readcanbuf else bufv, body_size - Br,
                 )
             except OSError as exc:
-                if get_errno(exc) not in UNAVAIL:
+                if exc.errno not in UNAVAIL:
                     raise
                 yield
             else:
@@ -250,21 +240,21 @@ class ResultHandler(_pool.ResultHandler):
         fileno_to_outq = self.fileno_to_outq
         on_state_change = self.on_state_change
         add_reader = hub.add_reader
-        hub_remove = hub.remove
+        remove_reader = hub.remove_reader
         recv_message = self._recv_message
 
         def on_result_readable(fileno):
             try:
                 fileno_to_outq[fileno]
             except KeyError:  # process gone
-                return hub_remove(fileno)
+                return remove_reader(fileno)
             it = recv_message(add_reader, fileno, on_state_change)
             try:
                 next(it)
             except StopIteration:
                 pass
             except (IOError, OSError, EOFError):
-                hub_remove(fileno)
+                remove_reader(fileno)
             else:
                 add_reader(fileno, it)
         return on_result_readable
@@ -347,8 +337,9 @@ class AsynPool(_pool.Pool):
         processes = self.cpu_count() if processes is None else processes
         self.synack = synack
         # create queue-pairs for all our processes in advance.
-        self._queues = dict((self.create_process_queues(), None)
-                            for _ in range(processes))
+        self._queues = {
+            self.create_process_queues(): None for _ in range(processes)
+        }
 
         # inqueue fileno -> process mapping
         self._fileno_to_inq = {}
@@ -485,7 +476,9 @@ class AsynPool(_pool.Pool):
     def _create_process_handlers(self, hub, READ=READ, ERR=ERR):
         """For async pool this will create the handlers called
         when a process is up/down and etc."""
-        add_reader, hub_remove = hub.add_reader, hub.remove
+        add_reader, remove_reader, remove_writer = (
+            hub.add_reader, hub.remove_reader, hub.remove_writer,
+        )
         cache = self._cache
         all_inqueues = self._all_inqueues
         fileno_to_inq = self._fileno_to_inq
@@ -536,7 +529,7 @@ class AsynPool(_pool.Pool):
 
         self.on_process_up = on_process_up
 
-        def _remove_from_index(obj, proc, index, callback=None):
+        def _remove_from_index(obj, proc, index, remove_fun, callback=None):
             # this remove the file descriptors for a process from
             # the indices.  we have to make sure we don't overwrite
             # another processes fds, as the fds may be reused.
@@ -552,7 +545,7 @@ class AsynPool(_pool.Pool):
             except KeyError:
                 pass
             else:
-                hub_remove(fd)
+                remove_fun(fd)
                 if callback is not None:
                     callback(fd)
             return fd
@@ -562,14 +555,29 @@ class AsynPool(_pool.Pool):
             if proc.dead:
                 return
             process_flush_queues(proc)
-            _remove_from_index(proc.outq._reader, proc, fileno_to_outq)
+            _remove_from_index(
+                proc.outq._reader, proc, fileno_to_outq, remove_reader,
+            )
             if proc.synq:
-                _remove_from_index(proc.synq._writer, proc, fileno_to_synq)
-            inq = _remove_from_index(proc.inq._writer, proc, fileno_to_inq,
-                                     callback=all_inqueues.discard)
+                _remove_from_index(
+                    proc.synq._writer, proc, fileno_to_synq, remove_writer,
+                )
+            inq = _remove_from_index(
+                proc.inq._writer, proc, fileno_to_inq, remove_writer,
+                callback=all_inqueues.discard,
+            )
             if inq:
                 busy_workers.discard(inq)
-            hub_remove(proc.sentinel)
+            remove_reader(proc.sentinel)
+            waiting_to_start.discard(proc)
+            self._active_writes.discard(proc.inqW_fd)
+            remove_writer(proc.inqW_fd)
+            remove_reader(proc.outqR_fd)
+            if proc.synqR_fd:
+                remove_reader(proc.synqR_fd)
+            if proc.synqW_fd:
+                self._active_writes.discard(proc.synqW_fd)
+                remove_reader(proc.synqW_fd)
         self.on_process_down = on_process_down
 
     def _create_write_handlers(self, hub,
@@ -713,7 +721,7 @@ class AsynPool(_pool.Pool):
                         except StopIteration:
                             pass
                         except OSError as exc:
-                            if get_errno(exc) != errno.EBADF:
+                            if exc.errno != errno.EBADF:
                                 raise
                         else:
                             add_writer(ready_fd, cor)
@@ -756,7 +764,7 @@ class AsynPool(_pool.Pool):
                     try:
                         Hw += send(header, Hw)
                     except Exception as exc:
-                        if get_errno(exc) not in UNAVAIL:
+                        if getattr(exc, 'errno', None) not in UNAVAIL:
                             raise
                         # suspend until more data
                         errors += 1
@@ -772,7 +780,7 @@ class AsynPool(_pool.Pool):
                     try:
                         Bw += send(body, Bw)
                     except Exception as exc:
-                        if get_errno(exc) not in UNAVAIL:
+                        if getattr(exc, 'errno', None) not in UNAVAIL:
                             raise
                         # suspend until more data
                         errors += 1
@@ -821,7 +829,7 @@ class AsynPool(_pool.Pool):
                     try:
                         Hw += send(header, Hw)
                     except Exception as exc:
-                        if get_errno(exc) not in UNAVAIL:
+                        if getattr(exc, 'errno', None) not in UNAVAIL:
                             raise
                         yield
 
@@ -830,7 +838,7 @@ class AsynPool(_pool.Pool):
                     try:
                         Bw += send(body, Bw)
                     except Exception as exc:
-                        if get_errno(exc) not in UNAVAIL:
+                        if getattr(exc, 'errno', None) not in UNAVAIL:
                             raise
                         # suspend until more data
                         yield
@@ -903,7 +911,7 @@ class AsynPool(_pool.Pool):
             self._busy_workers.clear()
 
     def _flush_writer(self, proc, writer):
-        fds = set([proc.inq._writer])
+        fds = {proc.inq._writer}
         try:
             while fds:
                 if not proc._is_alive():
@@ -932,9 +940,9 @@ class AsynPool(_pool.Pool):
         """Grow the pool by ``n`` proceses."""
         diff = max(self._processes - len(self._queues), 0)
         if diff:
-            self._queues.update(
-                dict((self.create_process_queues(), None) for _ in range(diff))
-            )
+            self._queues.update({
+                self.create_process_queues(): None for _ in range(diff)
+            })
 
     def on_shrink(self, n):
         """Shrink the pool by ``n`` processes."""
@@ -960,14 +968,13 @@ class AsynPool(_pool.Pool):
         return inq, outq, synq
 
     def on_process_alive(self, pid):
-        """Handler called when the WORKER_UP message is received
+        """Handler called when the :const:`WORKER_UP` message is received
         from a child process, which marks the process as ready
         to receive work."""
         try:
             proc = next(w for w in self._pool if w.pid == pid)
         except StopIteration:
-            # process already exited :(  this will be handled elsewhere.
-            return
+            return logger.warning('process with pid=%s already exited', pid)
         assert proc.inqW_fd not in self._fileno_to_inq
         assert proc.inqW_fd not in self._all_inqueues
         self._waiting_to_start.discard(proc)
@@ -1033,7 +1040,7 @@ class AsynPool(_pool.Pool):
                 try:
                     proc.inq.put(None)
                 except OSError as exc:
-                    if get_errno(exc) != errno.EBADF:
+                    if exc.errno != errno.EBADF:
                         raise
 
     def create_result_handler(self):
@@ -1077,21 +1084,19 @@ class AsynPool(_pool.Pool):
         """
         resq = proc.outq._reader
         on_state_change = self._result_handler.on_state_change
-        fds = set([resq])
+        fds = {resq}
         while fds and not resq.closed and self._state != TERMINATE:
             readable, _, again = _select(fds, None, fds, timeout=0.01)
             if readable:
                 try:
                     task = resq.recv()
                 except (OSError, IOError, EOFError) as exc:
-                    if get_errno(exc) == errno.EINTR:
+                    _errno = getattr(exc, 'errno', None)
+                    if _errno == errno.EINTR:
                         continue
-                    elif get_errno(exc) == errno.EAGAIN:
+                    elif _errno == errno.EAGAIN:
                         break
-                    else:
-                        debug('got %r while flushing process %r',
-                              exc, proc, exc_info=1)
-                    if get_errno(exc) not in UNAVAIL:
+                    elif _errno not in UNAVAIL:
                         debug('got %r while flushing process %r',
                               exc, proc, exc_info=1)
                     break

+ 4 - 2
celery/concurrency/base.py

@@ -66,11 +66,13 @@ class BasePool(object):
 
     _state = None
     _pool = None
+    _does_debug = True
 
     #: only used by multiprocessing pool
     uses_semaphore = False
 
     task_join_will_block = True
+    body_can_be_buffer = False
 
     def __init__(self, limit=None, putlocks=True,
                  forking_enable=True, callbacks_propagate=(), **options):
@@ -79,7 +81,6 @@ class BasePool(object):
         self.options = options
         self.forking_enable = forking_enable
         self.callbacks_propagate = callbacks_propagate
-        self._does_debug = logger.isEnabledFor(logging.DEBUG)
 
     def on_start(self):
         pass
@@ -111,7 +112,7 @@ class BasePool(object):
     def maintain_pool(self, *args, **kwargs):
         pass
 
-    def terminate_job(self, pid):
+    def terminate_job(self, pid, signal=None):
         raise NotImplementedError(
             '{0} does not implement kill_job'.format(type(self)))
 
@@ -128,6 +129,7 @@ class BasePool(object):
         self.on_terminate()
 
     def start(self):
+        self._does_debug = logger.isEnabledFor(logging.DEBUG)
         self.on_start()
         self._state = self.RUN
 

+ 7 - 0
celery/concurrency/eventlet.py

@@ -142,3 +142,10 @@ class TaskPool(base.BasePool):
         self._quick_put(apply_target, target, args, kwargs,
                         callback, accept_callback,
                         self.getpid)
+
+    def _get_info(self):
+        return {
+            'max-concurrency': self.limit,
+            'free-threads': self._pool.free(),
+            'running-threads': self._pool.running(),
+        }

+ 7 - 2
celery/concurrency/prefork.py

@@ -57,10 +57,15 @@ def process_initializer(app, hostname):
     # run once per process.
     app.loader.init_worker()
     app.loader.init_worker_process()
+    logfile = os.environ.get('CELERY_LOG_FILE') or None
+    if logfile and '%i' in logfile.lower():
+        # logfile path will differ so need to set up logging again.
+        app.log.already_setup = False
     app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0),
-                  os.environ.get('CELERY_LOG_FILE') or None,
+                  logfile,
                   bool(os.environ.get('CELERY_LOG_REDIRECT', False)),
-                  str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')))
+                  str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')),
+                  hostname=hostname)
     if os.environ.get('FORKED_BY_MULTIPROCESSING'):
         # pool did execv after fork
         trace.setup_worker_optimizations(app)

+ 1 - 0
celery/concurrency/solo.py

@@ -17,6 +17,7 @@ __all__ = ['TaskPool']
 
 class TaskPool(BasePool):
     """Solo task pool (blocking, inline, fast)."""
+    body_can_be_buffer = True
 
     def __init__(self, *args, **kwargs):
         super(TaskPool, self).__init__(*args, **kwargs)

+ 35 - 32
celery/contrib/abortable.py

@@ -28,49 +28,52 @@ In the consumer:
 
 .. code-block:: python
 
-   from celery.contrib.abortable import AbortableTask
-   from celery.utils.log import get_task_logger
-
-   logger = get_logger(__name__)
-
-   class MyLongRunningTask(AbortableTask):
-
-       def run(self, **kwargs):
-           results = []
-           for x in range(100):
-               # Check after every 5 loops..
-               if x % 5 == 0:  # alternatively, check when some timer is due
-                   if self.is_aborted(**kwargs):
-                       # Respect the aborted status and terminate
-                       # gracefully
-                       logger.warning('Task aborted.')
-                       return
-               y = do_something_expensive(x)
-               results.append(y)
-           logger.info('Task finished.')
-           return results
-
+    from __future__ import absolute_import
+
+    from celery.contrib.abortable import AbortableTask
+    from celery.utils.log import get_task_logger
+
+    from proj.celery import app
+
+    logger = get_logger(__name__)
+
+    @app.task(bind=True, base=AbortableTask)
+    def long_running_task(self):
+        results = []
+        for i in range(100):
+            # check after every 5 iterations...
+            # (or alternatively, check when some timer is due)
+            if not i % 5:
+                if self.is_aborted():
+                    # respect aborted state, and terminate gracefully.
+                    logger.warning('Task aborted')
+                    return
+                value = do_something_expensive(i)
+                results.append(y)
+        logger.info('Task complete')
+        return results
 
 In the producer:
 
 .. code-block:: python
 
-   from myproject.tasks import MyLongRunningTask
+    from __future__ import absolute_import
 
-   def myview(request):
+    import time
 
-       async_result = MyLongRunningTask.delay()
-       # async_result is of type AbortableAsyncResult
+    from proj.tasks import MyLongRunningTask
 
-       # After 10 seconds, abort the task
-       time.sleep(10)
-       async_result.abort()
+    def myview(request):
+        # result is of type AbortableAsyncResult
+        result = long_running_task.delay()
 
-       ...
+        # abort the task after 10 seconds
+        time.sleep(10)
+        result.abort()
 
-After the `async_result.abort()` call, the task execution is not
+After the `result.abort()` call, the task execution is not
 aborted immediately. In fact, it is not guaranteed to abort at all. Keep
-checking the `async_result` status, or call `async_result.wait()` to
+checking `result.state` status, or call `result.get(timeout=)` to
 have it block until the task is finished.
 
 .. note::

+ 5 - 4
celery/contrib/batches.py

@@ -47,7 +47,7 @@ messages, and every 10 seconds.
 
     from celery.contrib.batches import Batches
 
-    wot_api_target = "https://api.mywot.com/0.4/public_link_json"
+    wot_api_target = 'https://api.mywot.com/0.4/public_link_json'
 
     @app.task(base=Batches, flush_every=100, flush_interval=10)
     def wot_api(requests):
@@ -64,7 +64,7 @@ messages, and every 10 seconds.
         domains = [urlparse(url).netloc for url in urls]
         response = requests.get(
             wot_api_target,
-            params={"hosts": ('/').join(set(domains)) + '/'}
+            params={'hosts': ('/').join(set(domains)) + '/'}
         )
         return [response.json[domain] for domain in domains]
 
@@ -88,7 +88,7 @@ from itertools import count
 from celery.task import Task
 from celery.five import Empty, Queue
 from celery.utils.log import get_logger
-from celery.worker.job import Request
+from celery.worker.request import Request
 from celery.utils import noop
 
 __all__ = ['Batches']
@@ -226,7 +226,8 @@ class Batches(Task):
                 self.flush(requests)
         if not requests:
             logger.debug('Batches: Cancelling timer: Nothing in buffer.')
-            self._tref.cancel()  # cancel timer.
+            if self._tref:
+                self._tref.cancel()  # cancel timer.
             self._tref = None
 
     def apply_buffer(self, requests, args=(), kwargs={}):

+ 3 - 3
celery/contrib/migrate.py

@@ -99,7 +99,7 @@ def migrate_tasks(source, dest, migrate=migrate_task, app=None,
                   queues=None, **kwargs):
     app = app_or_default(app)
     queues = prepare_queues(queues)
-    producer = app.amqp.TaskProducer(dest)
+    producer = app.amqp.Producer(dest)
     migrate = partial(migrate, producer, queues=queues)
 
     def on_declare_queue(queue):
@@ -186,7 +186,7 @@ def move(predicate, connection=None, exchange=None, routing_key=None,
     app = app_or_default(app)
     queues = [_maybe_queue(app, queue) for queue in source or []] or None
     with app.connection_or_acquire(connection, pool=False) as conn:
-        producer = app.amqp.TaskProducer(conn)
+        producer = app.amqp.Producer(conn)
         state = State()
 
         def on_task(body, message):
@@ -250,7 +250,7 @@ def start_filter(app, conn, filter, limit=None, timeout=1.0,
     if isinstance(tasks, string_t):
         tasks = set(tasks.split(','))
     if tasks is None:
-        tasks = set([])
+        tasks = set()
 
     def update_state(body, message):
         state.count += 1

+ 1 - 1
celery/contrib/rdb.py

@@ -34,7 +34,7 @@ Inspired by http://snippets.dzone.com/posts/show/7248
     base port.  The selected port will be logged by the worker.
 
 """
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import, print_function, unicode_literals
 
 import errno
 import os

+ 73 - 0
celery/contrib/sphinx.py

@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+"""
+celery.contrib.sphinx
+=====================
+
+Sphinx documentation plugin
+
+**Usage**
+
+Add the extension to your :file:`docs/conf.py` configuration module:
+
+.. code-block:: python
+
+    extensions = (...,
+                  'celery.contrib.sphinx')
+
+If you would like to change the prefix for tasks in reference documentation
+then you can change the ``celery_task_prefix`` configuration value:
+
+.. code-block:: python
+
+    celery_task_prefix = '(task)'  # < default
+
+
+With the extension installed `autodoc` will automatically find
+task decorated objects and generate the correct (as well as
+add a ``(task)`` prefix), and you can also refer to the tasks
+using `:task:proj.tasks.add` syntax.
+
+Use ``.. autotask::`` to manually document a task.
+
+"""
+from __future__ import absolute_import
+
+from inspect import formatargspec, getargspec
+
+from sphinx.domains.python import PyModulelevel
+from sphinx.ext.autodoc import FunctionDocumenter
+
+from celery.app.task import BaseTask
+
+
+class TaskDocumenter(FunctionDocumenter):
+    objtype = 'task'
+    member_order = 11
+
+    @classmethod
+    def can_document_member(cls, member, membername, isattr, parent):
+        return isinstance(member, BaseTask) and getattr(member, '__wrapped__')
+
+    def format_args(self):
+        wrapped = getattr(self.object, '__wrapped__')
+        if wrapped is not None:
+            argspec = getargspec(wrapped)
+            fmt = formatargspec(*argspec)
+            fmt = fmt.replace('\\', '\\\\')
+            return fmt
+        return ''
+
+    def document_members(self, all_members=False):
+        pass
+
+
+class TaskDirective(PyModulelevel):
+
+    def get_signature_prefix(self, sig):
+        return self.env.config.celery_task_prefix
+
+
+def setup(app):
+    app.add_autodocumenter(TaskDocumenter)
+    app.domains['py'].directives['task'] = TaskDirective
+    app.add_config_value('celery_task_prefix', '(task)', True)

+ 4 - 4
celery/datastructures.py

@@ -186,9 +186,9 @@ class DependencyGraph(object):
         graph = DependencyGraph()
         components = self._tarjan72()
 
-        NC = dict((node, component)
-                  for component in components
-                  for node in component)
+        NC = {
+            node: component for component in components for node in component
+        }
         for component in components:
             graph.add_arc(component)
         for node in self:
@@ -555,7 +555,7 @@ class LimitedSet(object):
     """Kind-of Set with limitations.
 
     Good for when you need to test for membership (`a in set`),
-    but the list might become to big.
+    but the list might become too big.
 
     :keyword maxlen: Maximum number of members before we start
                      evicting expired members.

+ 5 - 18
celery/events/__init__.py

@@ -13,7 +13,6 @@ from __future__ import absolute_import
 import os
 import time
 import threading
-import warnings
 
 from collections import deque
 from contextlib import contextmanager
@@ -36,14 +35,6 @@ event_exchange = Exchange('celeryev', type='topic')
 
 _TZGETTER = itemgetter('utcoffset', 'timestamp')
 
-W_YAJL = """
-anyjson is currently using the yajl library.
-This json implementation is broken, it severely truncates floats
-so timestamps will not work.
-
-Please uninstall yajl or force anyjson to use a different library.
-"""
-
 CLIENT_CLOCK_SKEW = -1
 
 
@@ -112,7 +103,7 @@ class EventDispatcher(object):
     You need to :meth:`close` this after use.
 
     """
-    DISABLED_TRANSPORTS = set(['sql'])
+    DISABLED_TRANSPORTS = {'sql'}
 
     app = None
 
@@ -124,7 +115,7 @@ class EventDispatcher(object):
 
     def __init__(self, connection=None, hostname=None, enabled=True,
                  channel=None, buffer_while_offline=True, app=None,
-                 serializer=None, groups=None):
+                 serializer=None, groups=None, delivery_mode=1):
         self.app = app_or_default(app or self.app)
         self.connection = connection
         self.channel = channel
@@ -139,6 +130,7 @@ class EventDispatcher(object):
         self.groups = set(groups or [])
         self.tzoffset = [-time.timezone, -time.altzone]
         self.clock = self.app.clock
+        self.delivery_mode = delivery_mode
         if not connection and channel:
             self.connection = channel.connection.client
         self.enabled = enabled
@@ -150,12 +142,6 @@ class EventDispatcher(object):
             self.enable()
         self.headers = {'hostname': self.hostname}
         self.pid = os.getpid()
-        self.warn_if_yajl()
-
-    def warn_if_yajl(self):
-        import anyjson
-        if anyjson.implementation.name == 'yajl':
-            warnings.warn(UserWarning(W_YAJL))
 
     def __enter__(self):
         return self
@@ -213,6 +199,7 @@ class EventDispatcher(object):
                 declare=[exchange],
                 serializer=self.serializer,
                 headers=self.headers,
+                delivery_mode=self.delivery_mode,
             )
 
     def send(self, type, blind=False, **fields):
@@ -300,7 +287,7 @@ class EventReceiver(ConsumerMixin):
         self.adjust_clock = self.clock.adjust
         self.forward_clock = self.clock.forward
         if accept is None:
-            accept = set([self.app.conf.CELERY_EVENT_SERIALIZER, 'json'])
+            accept = {self.app.conf.CELERY_EVENT_SERIALIZER, 'json'}
         self.accept = accept
 
     def _get_queue_arguments(self):

+ 1 - 1
celery/events/dumper.py

@@ -7,7 +7,7 @@
     as they happen. Think of it like a `tcpdump` for Celery events.
 
 """
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import, print_function, unicode_literals
 
 import sys
 

+ 1 - 1
celery/events/snapshot.py

@@ -10,7 +10,7 @@
     in :mod:`djcelery.snapshots` in the `django-celery` distribution.
 
 """
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
 
 from kombu.utils.limits import TokenBucket
 

+ 26 - 20
celery/events/state.py

@@ -30,12 +30,12 @@ from time import time
 from weakref import ref
 
 from kombu.clocks import timetuple
-from kombu.utils import cached_property, kwdict
+from kombu.utils import cached_property
 
 from celery import states
 from celery.five import class_property, items, values
 from celery.utils import deprecated
-from celery.utils.functional import LRUCache
+from celery.utils.functional import LRUCache, memoize
 from celery.utils.log import get_logger
 
 PYPY = hasattr(sys, 'pypy_version_info')
@@ -54,8 +54,6 @@ Substantial drift from %s may mean clocks are out of sync.  Current drift is
 %s seconds.  [orig: %s recv: %s]
 """
 
-CAN_KWDICT = sys.version_info >= (2, 6, 5)
-
 logger = get_logger(__name__)
 warn = logger.warning
 
@@ -66,6 +64,14 @@ R_TASK = '<Task: {0.name}({0.uuid}) {0.state} clock:{0.clock}>'
 __all__ = ['Worker', 'Task', 'State', 'heartbeat_expires']
 
 
+@memoize(maxsize=1000, keyfun=lambda a, _: a[0])
+def _warn_drift(hostname, drift, local_received, timestamp):
+    # we use memoize here so the warning is only logged once per hostname
+    warn(DRIFT_WARNING, hostname, drift,
+         datetime.fromtimestamp(local_received),
+         datetime.fromtimestamp(timestamp))
+
+
 def heartbeat_expires(timestamp, freq=60,
                       expire_window=HEARTBEAT_EXPIRE_WINDOW,
                       Decimal=Decimal, float=float, isinstance=isinstance):
@@ -78,7 +84,7 @@ def heartbeat_expires(timestamp, freq=60,
 
 
 def _depickle_task(cls, fields):
-    return cls(**(fields if CAN_KWDICT else kwdict(fields)))
+    return cls(**fields)
 
 
 def with_unique_field(attr):
@@ -158,9 +164,8 @@ class Worker(object):
                     return
                 drift = abs(int(local_received) - int(timestamp))
                 if drift > HEARTBEAT_DRIFT_MAX:
-                    warn(DRIFT_WARNING, self.hostname, drift,
-                         datetime.fromtimestamp(local_received),
-                         datetime.fromtimestamp(timestamp))
+                    _warn_drift(self.hostname, drift,
+                                local_received, timestamp)
                 if local_received:
                     hearts = len(heartbeats)
                     if hearts > hbmax - 1:
@@ -215,7 +220,7 @@ class Worker(object):
     def _defaults(cls):
         """Deprecated, to be removed in 3.3"""
         source = cls()
-        return dict((k, getattr(source, k)) for k in cls._fields)
+        return {k: getattr(source, k) for k in cls._fields}
 
 
 @with_unique_field('uuid')
@@ -288,9 +293,9 @@ class Task(object):
             # this state logically happens-before the current state, so merge.
             keep = self.merge_rules.get(state)
             if keep is not None:
-                fields = dict(
-                    (k, v) for k, v in items(fields) if k in keep
-                )
+                fields = {
+                    k: v for k, v in items(fields) if k in keep
+                }
             for key, value in items(fields):
                 setattr(self, key, value)
         else:
@@ -316,9 +321,9 @@ class Task(object):
 
     def as_dict(self):
         get = object.__getattribute__
-        return dict(
-            (k, get(self, k)) for k in self._fields
-        )
+        return {
+            k: get(self, k) for k in self._fields
+        }
 
     def __reduce__(self):
         return _depickle_task, (self.__class__, self.as_dict())
@@ -372,7 +377,7 @@ class Task(object):
     def merge(self, state, timestamp, fields):
         keep = self.merge_rules.get(state)
         if keep is not None:
-            fields = dict((k, v) for k, v in items(fields) if k in keep)
+            fields = {k: v for k, v in items(fields) if k in keep}
         for key, value in items(fields):
             setattr(self, key, value)
 
@@ -380,7 +385,7 @@ class Task(object):
     def _defaults(cls):
         """Deprecated, to be removed in 3.3."""
         source = cls()
-        return dict((k, getattr(source, k)) for k in source._fields)
+        return {k: getattr(source, k) for k in source._fields}
 
 
 class State(object):
@@ -429,9 +434,10 @@ class State(object):
 
     def _clear_tasks(self, ready=True):
         if ready:
-            in_progress = dict(
-                (uuid, task) for uuid, task in self.itertasks()
-                if task.state not in states.READY_STATES)
+            in_progress = {
+                uuid: task for uuid, task in self.itertasks()
+                if task.state not in states.READY_STATES
+            }
             self.tasks.clear()
             self.tasks.update(in_progress)
         else:

+ 5 - 2
celery/exceptions.py

@@ -8,6 +8,8 @@
 """
 from __future__ import absolute_import
 
+import numbers
+
 from .five import string_t
 
 from billiard.exceptions import (  # noqa
@@ -98,7 +100,8 @@ class Retry(Exception):
     #: Exception (if any) that caused the retry to happen.
     exc = None
 
-    #: Time of retry (ETA), either int or :class:`~datetime.datetime`.
+    #: Time of retry (ETA), either :class:`numbers.Real` or
+    #: :class:`~datetime.datetime`.
     when = None
 
     def __init__(self, message=None, exc=None, when=None, **kwargs):
@@ -112,7 +115,7 @@ class Retry(Exception):
         Exception.__init__(self, exc, when, **kwargs)
 
     def humanize(self):
-        if isinstance(self.when, int):
+        if isinstance(self.when, numbers.Real):
             return 'in {0.when}s'.format(self)
         return 'at {0.when}'.format(self)
 

+ 20 - 188
celery/five.py

@@ -10,164 +10,15 @@
 """
 from __future__ import absolute_import
 
-__all__ = ['Counter', 'reload', 'UserList', 'UserDict', 'Queue', 'Empty',
-           'zip_longest', 'map', 'string', 'string_t',
-           'long_t', 'text_t', 'range', 'int_types', 'items', 'keys', 'values',
-           'nextfun', 'reraise', 'WhateverIO', 'with_metaclass',
-           'OrderedDict', 'THREAD_TIMEOUT_MAX', 'format_d',
-           'class_property', 'reclassmethod', 'create_module',
-           'recreate_module', 'monotonic']
+__all__ = [
+    'class_property', 'reclassmethod', 'create_module', 'recreate_module',
+]
 
-import io
-
-try:
-    from collections import Counter
-except ImportError:  # pragma: no cover
-    from collections import defaultdict
-
-    def Counter():  # noqa
-        return defaultdict(int)
-
-############## py3k #########################################################
-import sys
-PY3 = sys.version_info[0] == 3
-
-try:
-    reload = reload                         # noqa
-except NameError:                           # pragma: no cover
-    from imp import reload                  # noqa
-
-try:
-    from UserList import UserList           # noqa
-except ImportError:                         # pragma: no cover
-    from collections import UserList        # noqa
-
-try:
-    from UserDict import UserDict           # noqa
-except ImportError:                         # pragma: no cover
-    from collections import UserDict        # noqa
-
-
-from kombu.five import monotonic
-
-if PY3:  # pragma: no cover
-    import builtins
-
-    from queue import Queue, Empty
-    from itertools import zip_longest
-
-    map = map
-    string = str
-    string_t = str
-    long_t = int
-    text_t = str
-    range = range
-    int_types = (int, )
-    _byte_t = bytes
-
-    open_fqdn = 'builtins.open'
-
-    def items(d):
-        return d.items()
-
-    def keys(d):
-        return d.keys()
-
-    def values(d):
-        return d.values()
-
-    def nextfun(it):
-        return it.__next__
-
-    exec_ = getattr(builtins, 'exec')
-
-    def reraise(tp, value, tb=None):
-        if value.__traceback__ is not tb:
-            raise value.with_traceback(tb)
-        raise value
-
-else:
-    import __builtin__ as builtins  # noqa
-    from Queue import Queue, Empty  # noqa
-    from itertools import imap as map, izip_longest as zip_longest  # noqa
-    string = unicode                # noqa
-    string_t = basestring           # noqa
-    text_t = unicode                # noqa
-    long_t = long                   # noqa
-    range = xrange                  # noqa
-    int_types = (int, long)         # noqa
-    _byte_t = (str, bytes)          # noqa
-
-    open_fqdn = '__builtin__.open'
-
-    def items(d):                   # noqa
-        return d.iteritems()
-
-    def keys(d):                    # noqa
-        return d.iterkeys()
-
-    def values(d):                  # noqa
-        return d.itervalues()
-
-    def nextfun(it):                # noqa
-        return it.next
-
-    def exec_(code, globs=None, locs=None):  # pragma: no cover
-        """Execute code in a namespace."""
-        if globs is None:
-            frame = sys._getframe(1)
-            globs = frame.f_globals
-            if locs is None:
-                locs = frame.f_locals
-            del frame
-        elif locs is None:
-            locs = globs
-        exec("""exec code in globs, locs""")
-
-    exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""")
-
-
-def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])):
-    """Class decorator to set metaclass.
-
-    Works with both Python 2 and Python 3 and it does not add
-    an extra class in the lookup order like ``six.with_metaclass`` does
-    (that is -- it copies the original class instead of using inheritance).
-
-    """
-
-    def _clone_with_metaclass(Class):
-        attrs = dict((key, value) for key, value in items(vars(Class))
-                     if key not in skip_attrs)
-        return Type(Class.__name__, Class.__bases__, attrs)
-
-    return _clone_with_metaclass
-
-
-############## collections.OrderedDict ######################################
-# was moved to kombu
-from kombu.utils.compat import OrderedDict  # noqa
-
-############## threading.TIMEOUT_MAX #######################################
-try:
-    from threading import TIMEOUT_MAX as THREAD_TIMEOUT_MAX
-except ImportError:
-    THREAD_TIMEOUT_MAX = 1e10  # noqa
-
-############## format(int, ',d') ##########################
-
-if sys.version_info >= (2, 7):  # pragma: no cover
-    def format_d(i):
-        return format(i, ',d')
-else:  # pragma: no cover
-    def format_d(i):  # noqa
-        s = '%d' % i
-        groups = []
-        while s and s[-1].isdigit():
-            groups.append(s[-3:])
-            s = s[:-3]
-        return s + ','.join(reversed(groups))
+# extends amqp.five
+from amqp.five import *  # noqa
+from amqp.five import __all__ as _all_five
 
+__all__ += _all_five
 
 ############## Module Generation ##########################
 
@@ -191,7 +42,7 @@ MODULE_DEPRECATED = """
 The module %s is deprecated and will be removed in a future version.
 """
 
-DEFAULT_ATTRS = set(['__file__', '__path__', '__doc__', '__all__'])
+DEFAULT_ATTRS = {'__file__', '__path__', '__doc__', '__all__'}
 
 # im_func is no longer available in Py3.
 # instead the unbound method itself can be used.
@@ -210,25 +61,17 @@ def getappattr(path):
     return current_app._rgetattr(path)
 
 
-def _compat_task_decorator(*args, **kwargs):
-    from celery import current_app
-    kwargs.setdefault('accept_magic_kwargs', True)
-    return current_app.task(*args, **kwargs)
-
-
 def _compat_periodic_task_decorator(*args, **kwargs):
     from celery.task import periodic_task
-    kwargs.setdefault('accept_magic_kwargs', True)
     return periodic_task(*args, **kwargs)
 
-
 COMPAT_MODULES = {
     'celery': {
         'execute': {
             'send_task': 'send_task',
         },
         'decorators': {
-            'task': _compat_task_decorator,
+            'task': 'task',
             'periodic_task': _compat_periodic_task_decorator,
         },
         'log': {
@@ -238,7 +81,6 @@ COMPAT_MODULES = {
             'redirect_stdouts_to_logger': 'log.redirect_stdouts_to_logger',
         },
         'messaging': {
-            'TaskPublisher': 'amqp.TaskPublisher',
             'TaskConsumer': 'amqp.TaskConsumer',
             'establish_connection': 'connection',
             'get_consumer_set': 'amqp.TaskConsumer',
@@ -296,7 +138,7 @@ def reclassmethod(method):
     return classmethod(fun_of_method(method))
 
 
-class MagicModule(ModuleType):
+class LazyModule(ModuleType):
     _compat_modules = ()
     _all_by_module = {}
     _direct = {}
@@ -322,21 +164,23 @@ class MagicModule(ModuleType):
 
 
 def create_module(name, attrs, cls_attrs=None, pkg=None,
-                  base=MagicModule, prepare_attr=None):
+                  base=LazyModule, prepare_attr=None):
     fqdn = '.'.join([pkg.__name__, name]) if pkg else name
     cls_attrs = {} if cls_attrs is None else cls_attrs
     pkg, _, modname = name.rpartition('.')
     cls_attrs['__module__'] = pkg
 
-    attrs = dict((attr_name, prepare_attr(attr) if prepare_attr else attr)
-                 for attr_name, attr in items(attrs))
+    attrs = {
+        attr_name: (prepare_attr(attr) if prepare_attr else attr)
+        for attr_name, attr in items(attrs)
+    }
     module = sys.modules[fqdn] = type(modname, (base, ), cls_attrs)(fqdn)
     module.__dict__.update(attrs)
     return module
 
 
 def recreate_module(name, compat_modules=(), by_module={}, direct={},
-                    base=MagicModule, **attrs):
+                    base=LazyModule, **attrs):
     old_module = sys.modules[name]
     origins = get_origins(by_module)
     compat_modules = COMPAT_MODULES.get(name, ())
@@ -351,8 +195,9 @@ def recreate_module(name, compat_modules=(), by_module={}, direct={},
         ))),
     )
     new_module = create_module(name, attrs, cls_attrs=cattrs, base=base)
-    new_module.__dict__.update(dict((mod, get_compat_module(new_module, mod))
-                               for mod in compat_modules))
+    new_module.__dict__.update({
+        mod: get_compat_module(new_module, mod) for mod in compat_modules
+    })
     return old_module, new_module
 
 
@@ -376,18 +221,5 @@ def get_compat_module(pkg, name):
 def get_origins(defs):
     origins = {}
     for module, attrs in items(defs):
-        origins.update(dict((attr, module) for attr in attrs))
+        origins.update({attr: module for attr in attrs})
     return origins
-
-
-_SIO_write = io.StringIO.write
-_SIO_init = io.StringIO.__init__
-
-
-class WhateverIO(io.StringIO):
-
-    def __init__(self, v=None, *a, **kw):
-        _SIO_init(self, v.decode() if isinstance(v, _byte_t) else v, *a, **kw)
-
-    def write(self, data):
-        _SIO_write(self, data.decode() if isinstance(data, _byte_t) else data)

+ 15 - 6
celery/fixups/django.py

@@ -134,13 +134,22 @@ class DjangoWorkerFixup(object):
         )
 
     def validate_models(self):
-        from django.core.management.validation import get_validation_errors
         s = io.StringIO()
-        num_errors = get_validation_errors(s, None)
-        if num_errors:
-            raise RuntimeError(
-                'One or more Django models did not validate:\n{0}'.format(
-                    s.getvalue()))
+        try:
+            from django.core.management.validation import get_validation_errors
+        except ImportError:
+            import django
+            from django.core.management.base import BaseCommand
+            django.setup()
+            cmd = BaseCommand()
+            cmd.stdout, cmd.stderr = sys.stdout, sys.stderr
+            cmd.check()
+        else:
+            num_errors = get_validation_errors(s, None)
+            if num_errors:
+                raise RuntimeError(
+                    'One or more Django models did not validate:\n{0}'.format(
+                        s.getvalue()))
 
     def install(self):
         signals.beat_embedded_init.connect(self.close_database)

+ 2 - 2
celery/loaders/base.py

@@ -8,7 +8,6 @@
 """
 from __future__ import absolute_import
 
-import anyjson
 import imp as _imp
 import importlib
 import os
@@ -17,6 +16,7 @@ import sys
 
 from datetime import datetime
 
+from kombu.utils import json
 from kombu.utils import cached_property
 from kombu.utils.encoding import safe_str
 
@@ -178,7 +178,7 @@ class BaseLoader(object):
     def cmdline_config_parser(
             self, args, namespace='celery',
             re_type=re.compile(r'\((\w+)\)'),
-            extra_types={'json': anyjson.loads},
+            extra_types={'json': json.loads},
             override_types={'tuple': 'json',
                             'list': 'json',
                             'dict': 'json'}):

+ 33 - 2
celery/local.py

@@ -212,12 +212,27 @@ class PromiseProxy(Proxy):
 
     """
 
+    __slots__ = ('__pending__', )
+
     def _get_current_object(self):
         try:
             return object.__getattribute__(self, '__thing')
         except AttributeError:
             return self.__evaluate__()
 
+    def __then__(self, fun, *args, **kwargs):
+        if self.__evaluated__():
+            return fun(*args, **kwargs)
+        from collections import deque
+        try:
+            pending = object.__getattribute__(self, '__pending__')
+        except AttributeError:
+            pending = None
+        if pending is None:
+            pending = deque()
+            object.__setattr__(self, '__pending__', pending)
+        pending.append((fun, args, kwargs))
+
     def __evaluated__(self):
         try:
             object.__getattribute__(self, '__thing')
@@ -234,15 +249,31 @@ class PromiseProxy(Proxy):
                              '_Proxy__kwargs')):
         try:
             thing = Proxy._get_current_object(self)
+        except:
+            raise
+        else:
             object.__setattr__(self, '__thing', thing)
-            return thing
-        finally:
             for attr in _clean:
                 try:
                     object.__delattr__(self, attr)
                 except AttributeError:  # pragma: no cover
                     # May mask errors so ignore
                     pass
+            try:
+                pending = object.__getattribute__(self, '__pending__')
+            except AttributeError:
+                pass
+            else:
+                try:
+                    while pending:
+                        fun, args, kwargs = pending.popleft()
+                        fun(*args, **kwargs)
+                finally:
+                    try:
+                        object.__delattr__(self, '__pending__')
+                    except AttributeError:
+                        pass
+            return thing
 
 
 def maybe_evaluate(obj):

+ 20 - 11
celery/platforms.py

@@ -12,6 +12,7 @@ from __future__ import absolute_import, print_function
 import atexit
 import errno
 import math
+import numbers
 import os
 import platform as _platform
 import signal as _signal
@@ -23,7 +24,6 @@ from collections import namedtuple
 from billiard import current_process
 # fileno used to be in this module
 from kombu.utils import maybe_fileno
-from kombu.utils.compat import get_errno
 from kombu.utils.encoding import safe_str
 from contextlib import contextmanager
 
@@ -35,6 +35,7 @@ _setproctitle = try_import('setproctitle')
 resource = try_import('resource')
 pwd = try_import('pwd')
 grp = try_import('grp')
+mputil = try_import('multiprocessing.util')
 
 __all__ = ['EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM',
            'IS_OSX', 'IS_WINDOWS', 'pyimplementation', 'LockFailed',
@@ -49,6 +50,7 @@ EX_OK = getattr(os, 'EX_OK', 0)
 EX_FAILURE = 1
 EX_UNAVAILABLE = getattr(os, 'EX_UNAVAILABLE', 69)
 EX_USAGE = getattr(os, 'EX_USAGE', 64)
+EX_CANTCREAT = getattr(os, 'EX_CANTCREAT', 73)
 
 SYSTEM = _platform.system()
 IS_OSX = SYSTEM == 'Darwin'
@@ -258,7 +260,8 @@ def create_pidlock(pidfile):
 def _create_pidlock(pidfile):
     pidlock = Pidfile(pidfile)
     if pidlock.is_locked() and not pidlock.remove_if_stale():
-        raise SystemExit(PIDLOCKED.format(pidfile, pidlock.read_pid()))
+        print(PIDLOCKED.format(pidfile, pidlock.read_pid()), file=sys.stderr)
+        raise SystemExit(EX_CANTCREAT)
     pidlock.acquire()
     return pidlock
 
@@ -266,9 +269,10 @@ def _create_pidlock(pidfile):
 if hasattr(os, 'closerange'):
 
     def close_open_fds(keep=None):
-        keep = list(uniq(sorted(filter(None, (
-            maybe_fileno(f) for f in keep or []
-        )))))
+        # must make sure this is 0-inclusive (Issue #1882)
+        keep = list(uniq(sorted(
+            f for f in map(maybe_fileno, keep or []) if f is not None
+        )))
         maxfd = get_fdmax(default=2048)
         kL, kH = iter([-1] + keep), iter(keep + [maxfd])
         for low, high in zip_longest(kL, kH):
@@ -290,11 +294,13 @@ class DaemonContext(object):
     _is_open = False
 
     def __init__(self, pidfile=None, workdir=None, umask=None,
-                 fake=False, after_chdir=None, **kwargs):
+                 fake=False, after_chdir=None, after_forkers=True,
+                 **kwargs):
         self.workdir = workdir or DAEMON_WORKDIR
         self.umask = DAEMON_UMASK if umask is None else umask
         self.fake = fake
         self.after_chdir = after_chdir
+        self.after_forkers = after_forkers
         self.stdfds = (sys.stdin, sys.stdout, sys.stderr)
 
     def redirect_to_null(self, fd):
@@ -313,9 +319,12 @@ class DaemonContext(object):
             if self.after_chdir:
                 self.after_chdir()
 
-            close_open_fds(self.stdfds)
-            for fd in self.stdfds:
-                self.redirect_to_null(maybe_fileno(fd))
+            if not self.fake:
+                close_open_fds(self.stdfds)
+                for fd in self.stdfds:
+                    self.redirect_to_null(maybe_fileno(fd))
+                if self.after_forkers and mputil is not None:
+                    mputil._run_after_forkers()
 
             self._is_open = True
     __enter__ = open
@@ -521,7 +530,7 @@ def maybe_drop_privileges(uid=None, gid=None):
         try:
             setuid(0)
         except OSError as exc:
-            if get_errno(exc) != errno.EPERM:
+            if exc.errno != errno.EPERM:
                 raise
             pass  # Good: cannot restore privileges.
         else:
@@ -606,7 +615,7 @@ class Signals(object):
 
     def signum(self, signal_name):
         """Get signal number from signal name."""
-        if isinstance(signal_name, int):
+        if isinstance(signal_name, numbers.Integral):
             return signal_name
         if not isinstance(signal_name, string_t) \
                 or not signal_name.isupper():

+ 101 - 29
celery/result.py

@@ -11,12 +11,11 @@ from __future__ import absolute_import
 import time
 import warnings
 
-from collections import deque
+from collections import OrderedDict, deque
 from contextlib import contextmanager
 from copy import copy
 
 from kombu.utils import cached_property
-from kombu.utils.compat import OrderedDict
 
 from . import current_app
 from . import states
@@ -87,6 +86,7 @@ class AsyncResult(ResultBase):
         self.backend = backend or self.app.backend
         self.task_name = task_name
         self.parent = parent
+        self._cache = None
 
     def as_tuple(self):
         parent = self.parent
@@ -95,6 +95,7 @@ class AsyncResult(ResultBase):
 
     def forget(self):
         """Forget about (and possibly remove the result of) this task."""
+        self._cache = None
         self.backend.forget(self.id)
 
     def revoke(self, connection=None, terminate=False, signal=None,
@@ -118,7 +119,8 @@ class AsyncResult(ResultBase):
                                 terminate=terminate, signal=signal,
                                 reply=wait, timeout=timeout)
 
-    def get(self, timeout=None, propagate=True, interval=0.5):
+    def get(self, timeout=None, propagate=True, interval=0.5, no_ack=True,
+            follow_parents=True):
         """Wait until task is ready, and return its result.
 
         .. warning::
@@ -133,6 +135,10 @@ class AsyncResult(ResultBase):
            retrieve the result.  Note that this does not have any effect
            when using the amqp result store backend, as it does not
            use polling.
+        :keyword no_ack: Enable amqp no ack (automatically acknowledge
+            message).  If this is :const:`False` then the message will
+            **not be acked**.
+        :keyword follow_parents: Reraise any exception raised by parent task.
 
         :raises celery.exceptions.TimeoutError: if `timeout` is not
             :const:`None` and the result does not arrive within `timeout`
@@ -143,15 +149,32 @@ class AsyncResult(ResultBase):
 
         """
         assert_will_not_block()
-        if propagate and self.parent:
-            for node in reversed(list(self._parents())):
-                node.get(propagate=True, timeout=timeout, interval=interval)
+        on_interval = None
+        if follow_parents and propagate and self.parent:
+            on_interval = self._maybe_reraise_parent_error
+            on_interval()
 
-        return self.backend.wait_for(self.id, timeout=timeout,
-                                     propagate=propagate,
-                                     interval=interval)
+        if self._cache:
+            if propagate:
+                self.maybe_reraise()
+            return self.result
+
+        try:
+            return self.backend.wait_for(
+                self.id, timeout=timeout,
+                propagate=propagate,
+                interval=interval,
+                on_interval=on_interval,
+                no_ack=no_ack,
+            )
+        finally:
+            self._get_task_meta()  # update self._cache
     wait = get  # deprecated alias to :meth:`get`.
 
+    def _maybe_reraise_parent_error(self):
+        for node in reversed(list(self._parents())):
+            node.maybe_reraise()
+
     def _parents(self):
         node = self.parent
         while node:
@@ -238,6 +261,10 @@ class AsyncResult(ResultBase):
         """Returns :const:`True` if the task failed."""
         return self.state == states.FAILURE
 
+    def maybe_reraise(self):
+        if self.state in states.PROPAGATE_STATES:
+            raise self.result
+
     def build_graph(self, intermediate=False, formatter=None):
         graph = DependencyGraph(
             formatter=formatter or GraphFormatter(root=self.id, shape='oval'),
@@ -280,6 +307,9 @@ class AsyncResult(ResultBase):
     def __reduce_args__(self):
         return self.id, self.backend, self.task_name, None, self.parent
 
+    def __del__(self):
+        self._cache = None
+
     @cached_property
     def graph(self):
         return self.build_graph()
@@ -290,22 +320,41 @@ class AsyncResult(ResultBase):
 
     @property
     def children(self):
-        children = self.backend.get_children(self.id)
+        return self._get_task_meta().get('children')
+
+    def _get_task_meta(self):
+        if self._cache is None:
+            meta = self.backend.get_task_meta(self.id)
+            if meta:
+                state = meta['status']
+                if state == states.SUCCESS or state in states.PROPAGATE_STATES:
+                    return self._set_cache(meta)
+            return meta
+        return self._cache
+
+    def _set_cache(self, d):
+        state, children = d['status'], d.get('children')
+        if state in states.EXCEPTION_STATES:
+            d['result'] = self.backend.exception_to_python(d['result'])
         if children:
-            return [result_from_tuple(child, self.app) for child in children]
+            d['children'] = [
+                result_from_tuple(child, self.app) for child in children
+            ]
+        self._cache = d
+        return d
 
     @property
     def result(self):
         """When the task has been executed, this contains the return value.
         If the task raised an exception, this will be the exception
         instance."""
-        return self.backend.get_result(self.id)
+        return self._get_task_meta()['result']
     info = result
 
     @property
     def traceback(self):
         """Get the traceback of a failed task."""
-        return self.backend.get_traceback(self.id)
+        return self._get_task_meta().get('traceback')
 
     @property
     def state(self):
@@ -337,7 +386,7 @@ class AsyncResult(ResultBase):
                 then contains the tasks return value.
 
         """
-        return self.backend.get_status(self.id)
+        return self._get_task_meta()['status']
     status = state
 
     @property
@@ -426,6 +475,10 @@ class ResultSet(ResultBase):
         """
         return any(result.failed() for result in self.results)
 
+    def maybe_reraise(self):
+        for result in self.results:
+            result.maybe_reraise()
+
     def waiting(self):
         """Are any of the tasks incomplete?
 
@@ -506,7 +559,8 @@ class ResultSet(ResultBase):
             if timeout and elapsed >= timeout:
                 raise TimeoutError('The operation timed out')
 
-    def get(self, timeout=None, propagate=True, interval=0.5, callback=None):
+    def get(self, timeout=None, propagate=True, interval=0.5,
+            callback=None, no_ack=True):
         """See :meth:`join`
 
         This is here for API compatibility with :class:`AsyncResult`,
@@ -516,9 +570,10 @@ class ResultSet(ResultBase):
         """
         return (self.join_native if self.supports_native_join else self.join)(
             timeout=timeout, propagate=propagate,
-            interval=interval, callback=callback)
+            interval=interval, callback=callback, no_ack=no_ack)
 
-    def join(self, timeout=None, propagate=True, interval=0.5, callback=None):
+    def join(self, timeout=None, propagate=True, interval=0.5,
+             callback=None, no_ack=True):
         """Gathers the results of all tasks as a list in order.
 
         .. note::
@@ -557,6 +612,10 @@ class ResultSet(ResultBase):
                            ``result = app.AsyncResult(task_id)`` (both will
                            take advantage of the backend cache anyway).
 
+        :keyword no_ack: Automatic message acknowledgement (Note that if this
+            is set to :const:`False` then the messages *will not be
+            acknowledged*).
+
         :raises celery.exceptions.TimeoutError: if ``timeout`` is not
             :const:`None` and the operation takes longer than ``timeout``
             seconds.
@@ -573,16 +632,17 @@ class ResultSet(ResultBase):
                 remaining = timeout - (monotonic() - time_start)
                 if remaining <= 0.0:
                     raise TimeoutError('join operation timed out')
-            value = result.get(timeout=remaining,
-                               propagate=propagate,
-                               interval=interval)
+            value = result.get(
+                timeout=remaining, propagate=propagate,
+                interval=interval, no_ack=no_ack,
+            )
             if callback:
                 callback(result.id, value)
             else:
                 results.append(value)
         return results
 
-    def iter_native(self, timeout=None, interval=0.5):
+    def iter_native(self, timeout=None, interval=0.5, no_ack=True):
         """Backend optimized version of :meth:`iterate`.
 
         .. versionadded:: 2.2
@@ -597,12 +657,13 @@ class ResultSet(ResultBase):
         results = self.results
         if not results:
             return iter([])
-        return results[0].backend.get_many(
-            set(r.id for r in results), timeout=timeout, interval=interval,
+        return self.backend.get_many(
+            set(r.id for r in results),
+            timeout=timeout, interval=interval, no_ack=no_ack,
         )
 
     def join_native(self, timeout=None, propagate=True,
-                    interval=0.5, callback=None):
+                    interval=0.5, callback=None, no_ack=True):
         """Backend optimized version of :meth:`join`.
 
         .. versionadded:: 2.2
@@ -615,11 +676,11 @@ class ResultSet(ResultBase):
 
         """
         assert_will_not_block()
-        order_index = None if callback else dict(
-            (result.id, i) for i, result in enumerate(self.results)
-        )
+        order_index = None if callback else {
+            result.id: i for i, result in enumerate(self.results)
+        }
         acc = None if callback else [None for _ in range(len(self))]
-        for task_id, meta in self.iter_native(timeout, interval):
+        for task_id, meta in self.iter_native(timeout, interval, no_ack):
             value = meta['result']
             if propagate and meta['status'] in states.PROPAGATE_STATES:
                 raise value
@@ -656,7 +717,14 @@ class ResultSet(ResultBase):
 
     @property
     def supports_native_join(self):
-        return self.results[0].supports_native_join
+        try:
+            return self.results[0].supports_native_join
+        except IndexError:
+            pass
+
+    @property
+    def backend(self):
+        return self.app.backend if self.app else self.results[0].backend
 
 
 class GroupResult(ResultSet):
@@ -772,6 +840,10 @@ class EagerResult(AsyncResult):
         self._state = state
         self._traceback = traceback
 
+    def _get_task_meta(self):
+        return {'task_id': self.id, 'result': self._result, 'status':
+                self._state, 'traceback': self._traceback}
+
     def __reduce__(self):
         return self.__class__, self.__reduce_args__()
 

+ 11 - 10
celery/schedules.py

@@ -9,6 +9,7 @@
 """
 from __future__ import absolute_import
 
+import numbers
 import re
 
 from collections import namedtuple
@@ -20,8 +21,8 @@ from . import current_app
 from .five import range, string_t
 from .utils import is_iterable
 from .utils.timeutils import (
-    timedelta_seconds, weekday, maybe_timedelta, remaining,
-    humanize_seconds, timezone, maybe_make_aware, ffwd
+    weekday, maybe_timedelta, remaining, humanize_seconds,
+    timezone, maybe_make_aware, ffwd
 )
 from .datastructures import AttributeDict
 
@@ -115,7 +116,7 @@ class schedule(object):
         """
         last_run_at = self.maybe_make_aware(last_run_at)
         rem_delta = self.remaining_estimate(last_run_at)
-        remaining_s = timedelta_seconds(rem_delta)
+        remaining_s = max(rem_delta.total_seconds(), 0)
         if remaining_s == 0:
             return schedstate(is_due=True, next=self.seconds)
         return schedstate(is_due=False, next=remaining_s)
@@ -141,7 +142,7 @@ class schedule(object):
 
     @property
     def seconds(self):
-        return timedelta_seconds(self.run_every)
+        return max(self.run_every.total_seconds(), 0)
 
     @property
     def human_seconds(self):
@@ -382,7 +383,7 @@ class crontab(schedule):
 
             int         (like 7)
             str         (like '3-5,*/15', '*', or 'monday')
-            set         (like set([0,15,30,45]))
+            set         (like {0,15,30,45}
             list        (like [8-17])
 
         And convert it to an (expanded) set representing all time unit
@@ -401,8 +402,8 @@ class crontab(schedule):
         week.
 
         """
-        if isinstance(cronspec, int):
-            result = set([cronspec])
+        if isinstance(cronspec, numbers.Integral):
+            result = {cronspec}
         elif isinstance(cronspec, string_t):
             result = crontab_parser(max_, min_).parse(cronspec)
         elif isinstance(cronspec, set):
@@ -561,11 +562,11 @@ class crontab(schedule):
 
         """
         rem_delta = self.remaining_estimate(last_run_at)
-        rem = timedelta_seconds(rem_delta)
+        rem = max(rem_delta.total_seconds(), 0)
         due = rem == 0
         if due:
             rem_delta = self.remaining_estimate(self.now())
-            rem = timedelta_seconds(rem_delta)
+            rem = max(rem_delta.total_seconds(), 0)
         return schedstate(due, rem)
 
     def __eq__(self, other):
@@ -583,7 +584,7 @@ class crontab(schedule):
 
 def maybe_schedule(s, relative=False, app=None):
     if s is not None:
-        if isinstance(s, int):
+        if isinstance(s, numbers.Integral):
             s = timedelta(seconds=s)
         if isinstance(s, timedelta):
             return schedule(s, relative, app=app)

+ 5 - 4
celery/security/certificate.py

@@ -35,7 +35,7 @@ class Certificate(object):
 
     def get_serial_number(self):
         """Return the serial number in the certificate."""
-        return self._cert.get_serial_number()
+        return bytes_to_str(self._cert.get_serial_number())
 
     def get_issuer(self):
         """Return issuer (CA) as a string"""
@@ -66,14 +66,15 @@ class CertStore(object):
     def __getitem__(self, id):
         """get certificate by id"""
         try:
-            return self._certs[id]
+            return self._certs[bytes_to_str(id)]
         except KeyError:
             raise SecurityError('Unknown certificate: {0!r}'.format(id))
 
     def add_cert(self, cert):
-        if cert.get_id() in self._certs:
+        cert_id = bytes_to_str(cert.get_id())
+        if cert_id in self._certs:
             raise SecurityError('Duplicate certificate: {0!r}'.format(id))
-        self._certs[cert.get_id()] = cert
+        self._certs[cert_id] = cert
 
 
 class FSCertStore(CertStore):

+ 6 - 9
celery/security/serialization.py

@@ -44,7 +44,7 @@ class SecureSerializer(object):
         assert self._cert is not None
         with reraise_errors('Unable to serialize: {0!r}', (Exception, )):
             content_type, content_encoding, body = dumps(
-                data, serializer=self._serializer)
+                bytes_to_str(data), serializer=self._serializer)
             # What we sign is the serialized body, not the body itself.
             # this way the receiver doesn't have to decode the contents
             # to verify the signature (and thus avoiding potential flaws
@@ -89,15 +89,12 @@ class SecureSerializer(object):
 
         v = raw_payload[end_of_sig:].split(sep)
 
-        values = [bytes_to_str(signer), bytes_to_str(signature),
-                  bytes_to_str(v[0]), bytes_to_str(v[1]), bytes_to_str(v[2])]
-
         return {
-            'signer': values[0],
-            'signature': values[1],
-            'content_type': values[2],
-            'content_encoding': values[3],
-            'body': values[4],
+            'signer': signer,
+            'signature': signature,
+            'content_type': bytes_to_str(v[0]),
+            'content_encoding': bytes_to_str(v[1]),
+            'body': bytes_to_str(v[2]),
         }
 
 

+ 2 - 2
celery/task/__init__.py

@@ -12,7 +12,7 @@
 from __future__ import absolute_import
 
 from celery._state import current_app, current_task as current
-from celery.five import MagicModule, recreate_module
+from celery.five import LazyModule, recreate_module
 from celery.local import Proxy
 
 __all__ = [
@@ -32,7 +32,7 @@ if STATICA_HACK:  # pragma: no cover
     from .sets import TaskSet
 
 
-class module(MagicModule):
+class module(LazyModule):
 
     def __call__(self, *args, **kwargs):
         return self.task(*args, **kwargs)

+ 23 - 7
celery/task/base.py

@@ -24,6 +24,7 @@ __all__ = ['Task', 'PeriodicTask', 'task']
 #: list of methods that must be classmethods in the old API.
 _COMPAT_CLASSMETHODS = (
     'delay', 'apply_async', 'retry', 'apply', 'subtask_from_request',
+    'signature_from_request', 'signature',
     'AsyncResult', 'subtask', '_get_request', '_get_exec_options',
 )
 
@@ -50,7 +51,6 @@ class Task(BaseTask):
     priority = None
     type = 'regular'
     disable_error_emails = False
-    accept_magic_kwargs = False
 
     from_config = BaseTask.from_config + (
         ('exchange_type', 'CELERY_DEFAULT_EXCHANGE_TYPE'),
@@ -69,6 +69,16 @@ class Task(BaseTask):
     def request(cls):
         return cls._get_request()
 
+    @class_property
+    def backend(cls):
+        if cls._backend is None:
+            return cls.app.backend
+        return cls._backend
+
+    @backend.setter
+    def backend(cls, value):  # noqa
+        cls._backend = value
+
     @classmethod
     def get_logger(self, **kwargs):
         return get_task_logger(self.name)
@@ -96,12 +106,19 @@ class Task(BaseTask):
                       exchange_type=None, **options):
         """Deprecated method to get the task publisher (now called producer).
 
-        Should be replaced with :class:`@amqp.TaskProducer`:
+        Should be replaced with :class:`@kombu.Producer`:
 
         .. code-block:: python
 
-            with celery.connection() as conn:
-                with celery.amqp.TaskProducer(conn) as prod:
+            with app.connection() as conn:
+                with app.amqp.Producer(conn) as prod:
+                    my_task.apply_async(producer=prod)
+
+            or event better is to use the :class:`@amqp.producer_pool`:
+
+            .. code-block:: python
+
+                with app.producer_or_acquire() as prod:
                     my_task.apply_async(producer=prod)
 
         """
@@ -109,7 +126,7 @@ class Task(BaseTask):
         if exchange_type is None:
             exchange_type = self.exchange_type
         connection = connection or self.establish_connection()
-        return self._get_app().amqp.TaskProducer(
+        return self._get_app().amqp.Producer(
             connection,
             exchange=exchange and Exchange(exchange, exchange_type),
             routing_key=self.routing_key, **options
@@ -160,8 +177,7 @@ class PeriodicTask(Task):
 
 def task(*args, **kwargs):
     """Deprecated decorator, please use :func:`celery.task`."""
-    return current_app.task(*args, **dict({'accept_magic_kwargs': False,
-                                           'base': Task}, **kwargs))
+    return current_app.task(*args, **dict({'base': Task}, **kwargs))
 
 
 def periodic_task(*args, **options):

+ 9 - 9
celery/task/http.py

@@ -8,7 +8,6 @@
 """
 from __future__ import absolute_import
 
-import anyjson
 import sys
 
 try:
@@ -17,6 +16,8 @@ except ImportError:  # pragma: no cover
     from urllib import urlencode              # noqa
     from urlparse import urlparse, parse_qsl  # noqa
 
+from kombu.utils import json
+
 from celery import shared_task, __version__ as celery_version
 from celery.five import items, reraise
 from celery.utils.log import get_task_logger
@@ -41,13 +42,13 @@ else:
 
     from urllib2 import Request, urlopen  # noqa
 
-    def utf8dict(tup):  # noqa
+    def utf8dict(tup, enc='utf-8'):  # noqa
         """With a dict's items() tuple return a new dict with any utf-8
         keys/values encoded."""
-        return dict(
-            (k.encode('utf-8'),
-             v.encode('utf-8') if isinstance(v, unicode) else v)  # noqa
-            for k, v in tup)
+        return {
+            k.encode(enc): (v.encode(enc) if isinstance(v, unicode) else v)
+            for k, v in tup
+        }
 
 
 class InvalidResponseError(Exception):
@@ -62,7 +63,7 @@ class UnknownStatusError(InvalidResponseError):
     """The remote server gave an unknown status."""
 
 
-def extract_response(raw_response, loads=anyjson.loads):
+def extract_response(raw_response, loads=json.loads):
     """Extract the response text from a raw JSON response."""
     if not raw_response:
         raise InvalidResponseError('Empty response')
@@ -162,8 +163,7 @@ class HttpDispatch(object):
         return headers
 
 
-@shared_task(name='celery.http_dispatch', bind=True,
-             url=None, method=None, accept_magic_kwargs=False)
+@shared_task(name='celery.http_dispatch', bind=True, url=None, method=None)
 def dispatch(self, url=None, method='GET', **kwargs):
     """Task dispatching to an URL.
 

+ 1 - 1
celery/task/sets.py

@@ -46,7 +46,7 @@ class TaskSet(list):
         super(TaskSet, self).__init__(
             maybe_signature(t, app=self.app) for t in tasks or []
         )
-        self.Publisher = Publisher or self.app.amqp.TaskProducer
+        self.Publisher = Publisher or self.app.amqp.Producer
         self.total = len(self)  # XXX compat
 
     def apply_async(self, connection=None, publisher=None, taskset_id=None):

+ 0 - 12
celery/task/trace.py

@@ -1,12 +0,0 @@
-"""This module has moved to celery.app.trace."""
-from __future__ import absolute_import
-
-import sys
-
-from celery.utils import warn_deprecated
-
-warn_deprecated('celery.task.trace', removal='3.2',
-                alternative='Please use celery.app.trace instead.')
-
-from celery.app import trace
-sys.modules[__name__] = trace

+ 1 - 1
celery/tests/__init__.py

@@ -22,7 +22,7 @@ def setup():
         KOMBU_DISABLE_LIMIT_PROTECTION='yes',
     )
 
-    if os.environ.get('COVER_ALL_MODULES') or '--with-coverage3' in sys.argv:
+    if os.environ.get('COVER_ALL_MODULES') or '--with-coverage' in sys.argv:
         from warnings import catch_warnings
         with catch_warnings(record=True):
             import_all_modules()

+ 4 - 96
celery/tests/app/test_amqp.py

@@ -1,86 +1,10 @@
 from __future__ import absolute_import
 
-import datetime
-
-import pytz
-
 from kombu import Exchange, Queue
 
-from celery.app.amqp import Queues, TaskPublisher
+from celery.app.amqp import Queues
 from celery.five import keys
-from celery.tests.case import AppCase, Mock
-
-
-class test_TaskProducer(AppCase):
-
-    def test__exit__(self):
-        publisher = self.app.amqp.TaskProducer(self.app.connection())
-        publisher.release = Mock()
-        with publisher:
-            pass
-        publisher.release.assert_called_with()
-
-    def test_declare(self):
-        publisher = self.app.amqp.TaskProducer(self.app.connection())
-        publisher.exchange.name = 'foo'
-        publisher.declare()
-        publisher.exchange.name = None
-        publisher.declare()
-
-    def test_retry_policy(self):
-        prod = self.app.amqp.TaskProducer(Mock())
-        prod.channel.connection.client.declared_entities = set()
-        prod.publish_task('tasks.add', (2, 2), {},
-                          retry_policy={'frobulate': 32.4})
-
-    def test_publish_no_retry(self):
-        prod = self.app.amqp.TaskProducer(Mock())
-        prod.channel.connection.client.declared_entities = set()
-        prod.publish_task('tasks.add', (2, 2), {}, retry=False, chord=123)
-        self.assertFalse(prod.connection.ensure.call_count)
-
-    def test_publish_custom_queue(self):
-        prod = self.app.amqp.TaskProducer(Mock())
-        self.app.amqp.queues['some_queue'] = Queue(
-            'xxx', Exchange('yyy'), 'zzz',
-        )
-        prod.channel.connection.client.declared_entities = set()
-        prod.publish = Mock()
-        prod.publish_task('tasks.add', (8, 8), {}, retry=False,
-                          queue='some_queue')
-        self.assertEqual(prod.publish.call_args[1]['exchange'], 'yyy')
-        self.assertEqual(prod.publish.call_args[1]['routing_key'], 'zzz')
-
-    def test_publish_with_countdown(self):
-        prod = self.app.amqp.TaskProducer(Mock())
-        prod.channel.connection.client.declared_entities = set()
-        prod.publish = Mock()
-        now = datetime.datetime(2013, 11, 26, 16, 48, 46)
-        prod.publish_task('tasks.add', (1, 1), {}, retry=False,
-                          countdown=10, now=now)
-        self.assertEqual(
-            prod.publish.call_args[0][0]['eta'],
-            '2013-11-26T16:48:56+00:00',
-        )
-
-    def test_publish_with_countdown_and_timezone(self):
-        # use timezone with fixed offset to be sure it won't be changed
-        self.app.conf.CELERY_TIMEZONE = pytz.FixedOffset(120)
-        prod = self.app.amqp.TaskProducer(Mock())
-        prod.channel.connection.client.declared_entities = set()
-        prod.publish = Mock()
-        now = datetime.datetime(2013, 11, 26, 16, 48, 46)
-        prod.publish_task('tasks.add', (2, 2), {}, retry=False,
-                          countdown=20, now=now)
-        self.assertEqual(
-            prod.publish.call_args[0][0]['eta'],
-            '2013-11-26T18:49:06+02:00',
-        )
-
-    def test_event_dispatcher(self):
-        prod = self.app.amqp.TaskProducer(Mock())
-        self.assertTrue(prod.event_dispatcher)
-        self.assertFalse(prod.event_dispatcher.enabled)
+from celery.tests.case import AppCase
 
 
 class test_TaskConsumer(AppCase):
@@ -90,30 +14,14 @@ class test_TaskConsumer(AppCase):
             self.app.conf.CELERY_ACCEPT_CONTENT = ['application/json']
             self.assertEqual(
                 self.app.amqp.TaskConsumer(conn).accept,
-                set(['application/json'])
+                {'application/json'},
             )
             self.assertEqual(
                 self.app.amqp.TaskConsumer(conn, accept=['json']).accept,
-                set(['application/json']),
+                {'application/json'},
             )
 
 
-class test_compat_TaskPublisher(AppCase):
-
-    def test_compat_exchange_is_string(self):
-        producer = TaskPublisher(exchange='foo', app=self.app)
-        self.assertIsInstance(producer.exchange, Exchange)
-        self.assertEqual(producer.exchange.name, 'foo')
-        self.assertEqual(producer.exchange.type, 'direct')
-        producer = TaskPublisher(exchange='foo', exchange_type='topic',
-                                 app=self.app)
-        self.assertEqual(producer.exchange.type, 'topic')
-
-    def test_compat_exchange_is_Exchange(self):
-        producer = TaskPublisher(exchange=Exchange('foo'), app=self.app)
-        self.assertEqual(producer.exchange.name, 'foo')
-
-
 class test_PublisherPool(AppCase):
 
     def test_setup_nolimit(self):

+ 26 - 23
celery/tests/app/test_app.py

@@ -8,7 +8,6 @@ from copy import deepcopy
 from pickle import loads, dumps
 
 from amqp import promise
-from kombu import Exchange
 
 from celery import shared_task, current_app
 from celery import app as _app
@@ -252,14 +251,14 @@ class test_App(AppCase):
             _state._task_stack.pop()
 
     def test_task_not_shared(self):
-        with patch('celery.app.base.shared_task') as sh:
+        with patch('celery.app.base.connect_on_app_finalize') as sh:
             @self.app.task(shared=False)
             def foo():
                 pass
             self.assertFalse(sh.called)
 
     def test_task_compat_with_filter(self):
-        with self.Celery(accept_magic_kwargs=True) as app:
+        with self.Celery() as app:
             check = Mock()
 
             def filter(task):
@@ -272,7 +271,7 @@ class test_App(AppCase):
             check.assert_called_with(foo)
 
     def test_task_with_filter(self):
-        with self.Celery(accept_magic_kwargs=False) as app:
+        with self.Celery() as app:
             check = Mock()
 
             def filter(task):
@@ -336,10 +335,13 @@ class test_App(AppCase):
         def aawsX():
             pass
 
-        with patch('celery.app.amqp.TaskProducer.publish_task') as dt:
-            aawsX.apply_async((4, 5))
-            args = dt.call_args[0][1]
-            self.assertEqual(args, ('hello', 4, 5))
+        with patch('celery.app.amqp.AMQP.create_task_message') as create:
+            with patch('celery.app.amqp.AMQP.send_task_message') as send:
+                create.return_value = Mock(), Mock(), Mock(), Mock()
+                aawsX.apply_async((4, 5))
+                args = create.call_args[0][2]
+                self.assertEqual(args, ('hello', 4, 5))
+                self.assertTrue(send.called)
 
     def test_apply_async_adds_children(self):
         from celery._state import _task_stack
@@ -549,14 +551,14 @@ class test_App(AppCase):
         # Test passing in a string and make sure the string
         # gets there untouched
         self.app.conf.BROKER_FAILOVER_STRATEGY = 'foo-bar'
-        self.assertEquals(
+        self.assertEqual(
             self.app.connection('amqp:////value').failover_strategy,
             'foo-bar',
         )
 
         # Try passing in None
         self.app.conf.BROKER_FAILOVER_STRATEGY = None
-        self.assertEquals(
+        self.assertEqual(
             self.app.connection('amqp:////value').failover_strategy,
             itertools.cycle,
         )
@@ -566,7 +568,7 @@ class test_App(AppCase):
             yield True
 
         self.app.conf.BROKER_FAILOVER_STRATEGY = my_failover_strategy
-        self.assertEquals(
+        self.assertEqual(
             self.app.connection('amqp:////value').failover_strategy,
             my_failover_strategy,
         )
@@ -609,22 +611,23 @@ class test_App(AppCase):
             chan.close()
         assert conn.transport_cls == 'memory'
 
-        prod = self.app.amqp.TaskProducer(
-            conn, exchange=Exchange('foo_exchange'),
-            send_sent_event=True,
+        message = self.app.amqp.create_task_message(
+            'id', 'footask', (), {}, create_sent_event=True,
         )
 
+        prod = self.app.amqp.Producer(conn)
         dispatcher = Dispatcher()
-        self.assertTrue(prod.publish_task('footask', (), {},
-                                          exchange='moo_exchange',
-                                          routing_key='moo_exchange',
-                                          event_dispatcher=dispatcher))
+        self.app.amqp.send_task_message(
+            prod, 'footask', message,
+            exchange='moo_exchange', routing_key='moo_exchange',
+            event_dispatcher=dispatcher,
+        )
         self.assertTrue(dispatcher.sent)
         self.assertEqual(dispatcher.sent[0][0], 'task-sent')
-        self.assertTrue(prod.publish_task('footask', (), {},
-                                          event_dispatcher=dispatcher,
-                                          exchange='bar_exchange',
-                                          routing_key='bar_exchange'))
+        self.app.amqp.send_task_message(
+            prod, 'footask', message, event_dispatcher=dispatcher,
+            exchange='bar_exchange', routing_key='bar_exchange',
+        )
 
     def test_error_mail_sender(self):
         x = ErrorMail.subject % {'name': 'task_name',
@@ -644,7 +647,7 @@ class test_App(AppCase):
 
 class test_defaults(AppCase):
 
-    def test_str_to_bool(self):
+    def test_strtobool(self):
         for s in ('false', 'no', '0'):
             self.assertFalse(defaults.strtobool(s))
         for s in ('true', 'yes', '1'):

+ 36 - 1
celery/tests/app/test_beat.py

@@ -162,7 +162,7 @@ class test_Scheduler(AppCase):
         scheduler.apply_async(scheduler.Entry(task=foo.name, app=self.app))
         self.assertTrue(foo.apply_async.called)
 
-    def test_apply_async_should_not_sync(self):
+    def test_should_sync(self):
 
         @self.app.task(shared=False)
         def not_sync():
@@ -181,6 +181,41 @@ class test_Scheduler(AppCase):
         s.apply_async(s.Entry(task=not_sync.name, app=self.app))
         self.assertFalse(s._do_sync.called)
 
+    def test_should_sync_increments_sync_every_counter(self):
+        self.app.conf.CELERYBEAT_SYNC_EVERY = 2
+
+        @self.app.task(shared=False)
+        def not_sync():
+            pass
+        not_sync.apply_async = Mock()
+
+        s = mScheduler(app=self.app)
+        self.assertEqual(s.sync_every_tasks, 2)
+        s._do_sync = Mock()
+
+        s.apply_async(s.Entry(task=not_sync.name, app=self.app))
+        self.assertEqual(s._tasks_since_sync, 1)
+        s.apply_async(s.Entry(task=not_sync.name, app=self.app))
+        s._do_sync.assert_called_with()
+
+        self.app.conf.CELERYBEAT_SYNC_EVERY = 0
+
+    def test_sync_task_counter_resets_on_do_sync(self):
+        self.app.conf.CELERYBEAT_SYNC_EVERY = 1
+
+        @self.app.task(shared=False)
+        def not_sync():
+            pass
+        not_sync.apply_async = Mock()
+
+        s = mScheduler(app=self.app)
+        self.assertEqual(s.sync_every_tasks, 1)
+
+        s.apply_async(s.Entry(task=not_sync.name, app=self.app))
+        self.assertEqual(s._tasks_since_sync, 0)
+
+        self.app.conf.CELERYBEAT_SYNC_EVERY = 0
+
     @patch('celery.app.base.Celery.send_task')
     def test_send_task(self, send_task):
         b = beat.Scheduler(app=self.app)

+ 10 - 12
celery/tests/app/test_builtins.py

@@ -136,18 +136,18 @@ class test_chain(BuiltinsCase):
 
     def test_group_to_chord(self):
         c = (
-            group(self.add.s(i, i) for i in range(5)) |
+            group([self.add.s(i, i) for i in range(5)], app=self.app) |
             self.add.s(10) |
             self.add.s(20) |
             self.add.s(30)
         )
-        tasks, _ = c.type.prepare_steps((), c.tasks)
+        tasks, _ = c.prepare_steps((), c.tasks)
         self.assertIsInstance(tasks[0], chord)
         self.assertTrue(tasks[0].body.options['link'])
         self.assertTrue(tasks[0].body.options['link'][0].options['link'])
 
         c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10))
-        tasks2, _ = c2.type.prepare_steps((), c2.tasks)
+        tasks2, _ = c2.prepare_steps((), c2.tasks)
         self.assertIsInstance(tasks2[1], group)
 
     def test_apply_options(self):
@@ -158,7 +158,7 @@ class test_chain(BuiltinsCase):
                 return self
 
         def s(*args, **kwargs):
-            return static(self.add, args, kwargs, type=self.add)
+            return static(self.add, args, kwargs, type=self.add, app=self.app)
 
         c = s(2, 2) | s(4, 4) | s(8, 8)
         r1 = c.apply_async(task_id='some_id')
@@ -196,18 +196,16 @@ class test_chord(BuiltinsCase):
     def test_forward_options(self):
         body = self.xsum.s()
         x = chord([self.add.s(i, i) for i in range(10)], body=body)
-        x._type = Mock()
-        x._type.app.conf.CELERY_ALWAYS_EAGER = False
+        x.run = Mock(name='chord.run(x)')
         x.apply_async(group_id='some_group_id')
-        self.assertTrue(x._type.called)
-        resbody = x._type.call_args[0][1]
+        self.assertTrue(x.run.called)
+        resbody = x.run.call_args[0][1]
         self.assertEqual(resbody.options['group_id'], 'some_group_id')
         x2 = chord([self.add.s(i, i) for i in range(10)], body=body)
-        x2._type = Mock()
-        x2._type.app.conf.CELERY_ALWAYS_EAGER = False
+        x2.run = Mock(name='chord.run(x2)')
         x2.apply_async(chord='some_chord_id')
-        self.assertTrue(x2._type.called)
-        resbody = x2._type.call_args[0][1]
+        self.assertTrue(x2.run.called)
+        resbody = x2.run.call_args[0][1]
         self.assertEqual(resbody.options['chord'], 'some_chord_id')
 
     def test_apply_eager(self):

+ 4 - 7
celery/tests/app/test_loaders.py

@@ -7,7 +7,6 @@ import warnings
 from celery import loaders
 from celery.exceptions import (
     NotConfigured,
-    CPendingDeprecationWarning,
 )
 from celery.loaders import base
 from celery.loaders import default
@@ -34,16 +33,12 @@ class test_loaders(AppCase):
 
     @depends_on_current_app
     def test_current_loader(self):
-        with self.assertWarnsRegex(
-                CPendingDeprecationWarning,
-                r'deprecation'):
+        with self.assertPendingDeprecation():
             self.assertIs(loaders.current_loader(), self.app.loader)
 
     @depends_on_current_app
     def test_load_settings(self):
-        with self.assertWarnsRegex(
-                CPendingDeprecationWarning,
-                r'deprecation'):
+        with self.assertPendingDeprecation():
             self.assertIs(loaders.load_settings(), self.app.conf)
 
 
@@ -211,9 +206,11 @@ class test_DefaultLoader(AppCase):
         except ValueError:
             pass
         celery = sys.modules.pop('celery', None)
+        sys.modules.pop('celery.five', None)
         try:
             self.assertTrue(l.import_from_cwd('celery'))
             sys.modules.pop('celery', None)
+            sys.modules.pop('celery.five', None)
             sys.path.insert(0, os.getcwd())
             self.assertTrue(l.import_from_cwd('celery'))
         finally:

+ 43 - 19
celery/tests/app/test_log.py

@@ -2,6 +2,9 @@ from __future__ import absolute_import
 
 import sys
 import logging
+
+from collections import defaultdict
+from io import StringIO
 from tempfile import mktemp
 
 from celery import signals
@@ -94,7 +97,7 @@ class test_ColorFormatter(AppCase):
     @patch('celery.utils.log.safe_str')
     @patch('logging.Formatter.formatException')
     def test_formatException_not_string(self, fe, safe_str):
-        x = ColorFormatter('HELLO')
+        x = ColorFormatter()
         value = KeyError()
         fe.return_value = value
         self.assertIs(x.formatException(value), value)
@@ -103,16 +106,19 @@ class test_ColorFormatter(AppCase):
 
     @patch('logging.Formatter.formatException')
     @patch('celery.utils.log.safe_str')
-    def test_formatException_string(self, safe_str, fe, value='HELLO'):
-        x = ColorFormatter(value)
-        fe.return_value = value
-        self.assertTrue(x.formatException(value))
+    def test_formatException_string(self, safe_str, fe):
+        x = ColorFormatter()
+        fe.return_value = 'HELLO'
+        try:
+            raise Exception()
+        except Exception:
+            self.assertTrue(x.formatException(sys.exc_info()))
         if sys.version_info[0] == 2:
             self.assertTrue(safe_str.called)
 
     @patch('logging.Formatter.format')
     def test_format_object(self, _format):
-        x = ColorFormatter(object())
+        x = ColorFormatter()
         x.use_color = True
         record = Mock()
         record.levelname = 'ERROR'
@@ -121,7 +127,7 @@ class test_ColorFormatter(AppCase):
 
     @patch('celery.utils.log.safe_str')
     def test_format_raises(self, safe_str):
-        x = ColorFormatter('HELLO')
+        x = ColorFormatter()
 
         def on_safe_str(s):
             try:
@@ -133,6 +139,7 @@ class test_ColorFormatter(AppCase):
         class Record(object):
             levelname = 'ERROR'
             msg = 'HELLO'
+            exc_info = 1
             exc_text = 'error text'
             stack_info = None
 
@@ -145,15 +152,15 @@ class test_ColorFormatter(AppCase):
         record = Record()
         safe_str.return_value = record
 
-        x.format(record)
-        self.assertIn('<Unrepresentable', record.msg)
+        msg = x.format(record)
+        self.assertIn('<Unrepresentable', msg)
         self.assertEqual(safe_str.call_count, 1)
 
     @patch('celery.utils.log.safe_str')
     def test_format_raises_no_color(self, safe_str):
         if sys.version_info[0] == 3:
             raise SkipTest('py3k')
-        x = ColorFormatter('HELLO', False)
+        x = ColorFormatter(use_color=False)
         record = Mock()
         record.levelname = 'ERROR'
         record.msg = 'HELLO'
@@ -248,14 +255,31 @@ class test_default_logger(AppCase):
                 l.info('The quick brown fox...')
                 self.assertIn('The quick brown fox...', stderr.getvalue())
 
-    def test_setup_logger_no_handlers_file(self):
-        with restore_logging():
-            l = self.get_logger()
-            l.handlers = []
-            tempfile = mktemp(suffix='unittest', prefix='celery')
-            l = self.setup_logger(logfile=tempfile, loglevel=0, root=False)
-            self.assertIsInstance(get_handlers(l)[0],
-                                  logging.FileHandler)
+    @patch('os.fstat')
+    def test_setup_logger_no_handlers_file(self, *args):
+        tempfile = mktemp(suffix='unittest', prefix='celery')
+        _open = ('builtins.open' if sys.version_info[0] == 3
+                 else '__builtin__.open')
+        with patch(_open) as osopen:
+            with restore_logging():
+                files = defaultdict(StringIO)
+
+                def open_file(filename, *args, **kwargs):
+                    f = files[filename]
+                    f.fileno = Mock()
+                    f.fileno.return_value = 99
+                    return f
+
+                osopen.side_effect = open_file
+                l = self.get_logger()
+                l.handlers = []
+                l = self.setup_logger(
+                    logfile=tempfile, loglevel=logging.INFO, root=False,
+                )
+                self.assertIsInstance(
+                    get_handlers(l)[0], logging.FileHandler,
+                )
+                self.assertIn(tempfile, files)
 
     def test_redirect_stdouts(self):
         with restore_logging():
@@ -336,7 +360,7 @@ class test_task_logger(test_default_logger):
         return self.app.log.setup_task_loggers(*args, **kwargs)
 
     def get_logger(self, *args, **kwargs):
-        return get_task_logger("test_task_logger")
+        return get_task_logger('test_task_logger')
 
 
 class test_patch_logger_cls(AppCase):

+ 49 - 51
celery/tests/app/test_schedules.py

@@ -54,65 +54,63 @@ class test_crontab_parser(AppCase):
 
     def test_parse_range_wraps(self):
         self.assertEqual(crontab_parser(12).parse('11-1'),
-                         set([11, 0, 1]))
+                         {11, 0, 1})
         self.assertEqual(crontab_parser(60, 1).parse('2-1'),
                          set(range(1, 60 + 1)))
 
     def test_parse_groups(self):
         self.assertEqual(crontab_parser().parse('1,2,3,4'),
-                         set([1, 2, 3, 4]))
+                         {1, 2, 3, 4})
         self.assertEqual(crontab_parser().parse('0,15,30,45'),
-                         set([0, 15, 30, 45]))
+                         {0, 15, 30, 45})
         self.assertEqual(crontab_parser(min_=1).parse('1,2,3,4'),
-                         set([1, 2, 3, 4]))
+                         {1, 2, 3, 4})
 
     def test_parse_steps(self):
         self.assertEqual(crontab_parser(8).parse('*/2'),
-                         set([0, 2, 4, 6]))
+                         {0, 2, 4, 6})
         self.assertEqual(crontab_parser().parse('*/2'),
-                         set(i * 2 for i in range(30)))
+                         {i * 2 for i in range(30)})
         self.assertEqual(crontab_parser().parse('*/3'),
-                         set(i * 3 for i in range(20)))
+                         {i * 3 for i in range(20)})
         self.assertEqual(crontab_parser(8, 1).parse('*/2'),
-                         set([1, 3, 5, 7]))
+                         {1, 3, 5, 7})
         self.assertEqual(crontab_parser(min_=1).parse('*/2'),
-                         set(i * 2 + 1 for i in range(30)))
+                         {i * 2 + 1 for i in range(30)})
         self.assertEqual(crontab_parser(min_=1).parse('*/3'),
-                         set(i * 3 + 1 for i in range(20)))
+                         {i * 3 + 1 for i in range(20)})
 
     def test_parse_composite(self):
-        self.assertEqual(crontab_parser(8).parse('*/2'), set([0, 2, 4, 6]))
-        self.assertEqual(crontab_parser().parse('2-9/5'), set([2, 7]))
-        self.assertEqual(crontab_parser().parse('2-10/5'), set([2, 7]))
+        self.assertEqual(crontab_parser(8).parse('*/2'), {0, 2, 4, 6})
+        self.assertEqual(crontab_parser().parse('2-9/5'), {2, 7})
+        self.assertEqual(crontab_parser().parse('2-10/5'), {2, 7})
         self.assertEqual(
             crontab_parser(min_=1).parse('55-5/3'),
-            set([55, 58, 1, 4]),
+            {55, 58, 1, 4},
         )
-        self.assertEqual(crontab_parser().parse('2-11/5,3'), set([2, 3, 7]))
+        self.assertEqual(crontab_parser().parse('2-11/5,3'), {2, 3, 7})
         self.assertEqual(
             crontab_parser().parse('2-4/3,*/5,0-21/4'),
-            set([0, 2, 4, 5, 8, 10, 12, 15, 16,
-                 20, 25, 30, 35, 40, 45, 50, 55]),
+            {0, 2, 4, 5, 8, 10, 12, 15, 16, 20, 25, 30, 35, 40, 45, 50, 55},
         )
         self.assertEqual(
             crontab_parser().parse('1-9/2'),
-            set([1, 3, 5, 7, 9]),
+            {1, 3, 5, 7, 9},
         )
-        self.assertEqual(crontab_parser(8, 1).parse('*/2'), set([1, 3, 5, 7]))
-        self.assertEqual(crontab_parser(min_=1).parse('2-9/5'), set([2, 7]))
-        self.assertEqual(crontab_parser(min_=1).parse('2-10/5'), set([2, 7]))
+        self.assertEqual(crontab_parser(8, 1).parse('*/2'), {1, 3, 5, 7})
+        self.assertEqual(crontab_parser(min_=1).parse('2-9/5'), {2, 7})
+        self.assertEqual(crontab_parser(min_=1).parse('2-10/5'), {2, 7})
         self.assertEqual(
             crontab_parser(min_=1).parse('2-11/5,3'),
-            set([2, 3, 7]),
+            {2, 3, 7},
         )
         self.assertEqual(
             crontab_parser(min_=1).parse('2-4/3,*/5,1-21/4'),
-            set([1, 2, 5, 6, 9, 11, 13, 16, 17,
-                 21, 26, 31, 36, 41, 46, 51, 56]),
+            {1, 2, 5, 6, 9, 11, 13, 16, 17, 21, 26, 31, 36, 41, 46, 51, 56},
         )
         self.assertEqual(
             crontab_parser(min_=1).parse('1-9/2'),
-            set([1, 3, 5, 7, 9]),
+            {1, 3, 5, 7, 9},
         )
 
     def test_parse_errors_on_empty_string(self):
@@ -148,11 +146,11 @@ class test_crontab_parser(AppCase):
     def test_expand_cronspec_eats_iterables(self):
         self.assertEqual(
             crontab._expand_cronspec(iter([1, 2, 3]), 100),
-            set([1, 2, 3]),
+            {1, 2, 3},
         )
         self.assertEqual(
             crontab._expand_cronspec(iter([1, 2, 3]), 100, 1),
-            set([1, 2, 3]),
+            {1, 2, 3},
         )
 
     def test_expand_cronspec_invalid_type(self):
@@ -408,7 +406,7 @@ class test_crontab_is_due(AppCase):
 
     def test_simple_crontab_spec(self):
         c = self.crontab(minute=30)
-        self.assertEqual(c.minute, set([30]))
+        self.assertEqual(c.minute, {30})
         self.assertEqual(c.hour, set(range(24)))
         self.assertEqual(c.day_of_week, set(range(7)))
         self.assertEqual(c.day_of_month, set(range(1, 32)))
@@ -416,13 +414,13 @@ class test_crontab_is_due(AppCase):
 
     def test_crontab_spec_minute_formats(self):
         c = self.crontab(minute=30)
-        self.assertEqual(c.minute, set([30]))
+        self.assertEqual(c.minute, {30})
         c = self.crontab(minute='30')
-        self.assertEqual(c.minute, set([30]))
+        self.assertEqual(c.minute, {30})
         c = self.crontab(minute=(30, 40, 50))
-        self.assertEqual(c.minute, set([30, 40, 50]))
-        c = self.crontab(minute=set([30, 40, 50]))
-        self.assertEqual(c.minute, set([30, 40, 50]))
+        self.assertEqual(c.minute, {30, 40, 50})
+        c = self.crontab(minute={30, 40, 50})
+        self.assertEqual(c.minute, {30, 40, 50})
 
     def test_crontab_spec_invalid_minute(self):
         with self.assertRaises(ValueError):
@@ -432,11 +430,11 @@ class test_crontab_is_due(AppCase):
 
     def test_crontab_spec_hour_formats(self):
         c = self.crontab(hour=6)
-        self.assertEqual(c.hour, set([6]))
+        self.assertEqual(c.hour, {6})
         c = self.crontab(hour='5')
-        self.assertEqual(c.hour, set([5]))
+        self.assertEqual(c.hour, {5})
         c = self.crontab(hour=(4, 8, 12))
-        self.assertEqual(c.hour, set([4, 8, 12]))
+        self.assertEqual(c.hour, {4, 8, 12})
 
     def test_crontab_spec_invalid_hour(self):
         with self.assertRaises(ValueError):
@@ -446,17 +444,17 @@ class test_crontab_is_due(AppCase):
 
     def test_crontab_spec_dow_formats(self):
         c = self.crontab(day_of_week=5)
-        self.assertEqual(c.day_of_week, set([5]))
+        self.assertEqual(c.day_of_week, {5})
         c = self.crontab(day_of_week='5')
-        self.assertEqual(c.day_of_week, set([5]))
+        self.assertEqual(c.day_of_week, {5})
         c = self.crontab(day_of_week='fri')
-        self.assertEqual(c.day_of_week, set([5]))
+        self.assertEqual(c.day_of_week, {5})
         c = self.crontab(day_of_week='tuesday,sunday,fri')
-        self.assertEqual(c.day_of_week, set([0, 2, 5]))
+        self.assertEqual(c.day_of_week, {0, 2, 5})
         c = self.crontab(day_of_week='mon-fri')
-        self.assertEqual(c.day_of_week, set([1, 2, 3, 4, 5]))
+        self.assertEqual(c.day_of_week, {1, 2, 3, 4, 5})
         c = self.crontab(day_of_week='*/2')
-        self.assertEqual(c.day_of_week, set([0, 2, 4, 6]))
+        self.assertEqual(c.day_of_week, {0, 2, 4, 6})
 
     def test_crontab_spec_invalid_dow(self):
         with self.assertRaises(ValueError):
@@ -470,13 +468,13 @@ class test_crontab_is_due(AppCase):
 
     def test_crontab_spec_dom_formats(self):
         c = self.crontab(day_of_month=5)
-        self.assertEqual(c.day_of_month, set([5]))
+        self.assertEqual(c.day_of_month, {5})
         c = self.crontab(day_of_month='5')
-        self.assertEqual(c.day_of_month, set([5]))
+        self.assertEqual(c.day_of_month, {5})
         c = self.crontab(day_of_month='2,4,6')
-        self.assertEqual(c.day_of_month, set([2, 4, 6]))
+        self.assertEqual(c.day_of_month, {2, 4, 6})
         c = self.crontab(day_of_month='*/5')
-        self.assertEqual(c.day_of_month, set([1, 6, 11, 16, 21, 26, 31]))
+        self.assertEqual(c.day_of_month, {1, 6, 11, 16, 21, 26, 31})
 
     def test_crontab_spec_invalid_dom(self):
         with self.assertRaises(ValueError):
@@ -490,15 +488,15 @@ class test_crontab_is_due(AppCase):
 
     def test_crontab_spec_moy_formats(self):
         c = self.crontab(month_of_year=1)
-        self.assertEqual(c.month_of_year, set([1]))
+        self.assertEqual(c.month_of_year, {1})
         c = self.crontab(month_of_year='1')
-        self.assertEqual(c.month_of_year, set([1]))
+        self.assertEqual(c.month_of_year, {1})
         c = self.crontab(month_of_year='2,4,6')
-        self.assertEqual(c.month_of_year, set([2, 4, 6]))
+        self.assertEqual(c.month_of_year, {2, 4, 6})
         c = self.crontab(month_of_year='*/2')
-        self.assertEqual(c.month_of_year, set([1, 3, 5, 7, 9, 11]))
+        self.assertEqual(c.month_of_year, {1, 3, 5, 7, 9, 11})
         c = self.crontab(month_of_year='2-12/2')
-        self.assertEqual(c.month_of_year, set([2, 4, 6, 8, 10, 12]))
+        self.assertEqual(c.month_of_year, {2, 4, 6, 8, 10, 12})
 
     def test_crontab_spec_invalid_moy(self):
         with self.assertRaises(ValueError):

+ 17 - 1
celery/tests/app/test_utils.py

@@ -2,7 +2,7 @@ from __future__ import absolute_import
 
 from collections import Mapping, MutableMapping
 
-from celery.app.utils import Settings, bugreport
+from celery.app.utils import Settings, filter_hidden_settings, bugreport
 
 from celery.tests.case import AppCase, Mock
 
@@ -20,6 +20,22 @@ class TestSettings(AppCase):
         self.assertTrue(issubclass(Settings, MutableMapping))
 
 
+class test_filter_hidden_settings(AppCase):
+
+    def test_handles_non_string_keys(self):
+        """filter_hidden_settings shouldn't raise an exception when handling
+        mappings with non-string keys"""
+        conf = {
+            'STRING_KEY': 'VALUE1',
+            ('NON', 'STRING', 'KEY'): 'VALUE2',
+            'STRING_KEY2': {
+                'STRING_KEY3': 1,
+                ('NON', 'STRING', 'KEY', '2'): 2
+            },
+        }
+        filter_hidden_settings(conf)
+
+
 class test_bugreport(AppCase):
 
     def test_no_conn_driver_info(self):

+ 14 - 13
celery/tests/backends/test_amqp.py

@@ -33,8 +33,8 @@ class test_AMQPBackend(AppCase):
         return AMQPBackend(self.app, **opts)
 
     def test_mark_as_done(self):
-        tb1 = self.create_backend()
-        tb2 = self.create_backend()
+        tb1 = self.create_backend(max_cached_results=1)
+        tb2 = self.create_backend(max_cached_results=1)
 
         tid = uuid()
 
@@ -108,8 +108,8 @@ class test_AMQPBackend(AppCase):
             raise KeyError('foo')
 
         backend = AMQPBackend(self.app)
-        from celery.app.amqp import TaskProducer
-        prod, TaskProducer.publish = TaskProducer.publish, publish
+        from celery.app.amqp import Producer
+        prod, Producer.publish = Producer.publish, publish
         try:
             with self.assertRaises(KeyError):
                 backend.retry_policy['max_retries'] = None
@@ -119,7 +119,7 @@ class test_AMQPBackend(AppCase):
                 backend.retry_policy['max_retries'] = 10
                 backend.store_result('foo', 'bar', 'STARTED')
         finally:
-            TaskProducer.publish = prod
+            Producer.publish = prod
 
     def assertState(self, retval, state):
         self.assertEqual(retval['status'], state)
@@ -175,7 +175,7 @@ class test_AMQPBackend(AppCase):
         class MockBackend(AMQPBackend):
             Queue = MockBinding
 
-        backend = MockBackend(self.app)
+        backend = MockBackend(self.app, max_cached_results=100)
         backend._republish = Mock()
 
         yield results, backend, Message
@@ -183,29 +183,30 @@ class test_AMQPBackend(AppCase):
     def test_backlog_limit_exceeded(self):
         with self._result_context() as (results, backend, Message):
             for i in range(1001):
-                results.put(Message(status=states.RECEIVED))
+                results.put(Message(task_id='id', status=states.RECEIVED))
             with self.assertRaises(backend.BacklogLimitExceeded):
                 backend.get_task_meta('id')
 
     def test_poll_result(self):
         with self._result_context() as (results, backend, Message):
+            tid = uuid()
             # FFWD's to the latest state.
             state_messages = [
-                Message(status=states.RECEIVED, seq=1),
-                Message(status=states.STARTED, seq=2),
-                Message(status=states.FAILURE, seq=3),
+                Message(task_id=tid, status=states.RECEIVED, seq=1),
+                Message(task_id=tid, status=states.STARTED, seq=2),
+                Message(task_id=tid, status=states.FAILURE, seq=3),
             ]
             for state_message in state_messages:
                 results.put(state_message)
-            r1 = backend.get_task_meta(uuid())
+            r1 = backend.get_task_meta(tid)
             self.assertDictContainsSubset(
                 {'status': states.FAILURE, 'seq': 3}, r1,
                 'FFWDs to the last state',
             )
 
             # Caches last known state.
-            results.put(Message())
             tid = uuid()
+            results.put(Message(task_id=tid))
             backend.get_task_meta(tid)
             self.assertIn(tid, backend._cache, 'Caches last known state')
 
@@ -261,7 +262,7 @@ class test_AMQPBackend(AppCase):
                 b.drain_events(Connection(), consumer, timeout=0.1)
 
     def test_get_many(self):
-        b = self.create_backend()
+        b = self.create_backend(max_cached_results=10)
 
         tids = []
         for i in range(10):

+ 0 - 9
celery/tests/backends/test_backends.py

@@ -19,15 +19,6 @@ class test_backends(AppCase):
                 expect_cls,
             )
 
-    def test_get_backend_cache(self):
-        backends.get_backend_cls.clear()
-        hits = backends.get_backend_cls.hits
-        misses = backends.get_backend_cls.misses
-        self.assertTrue(backends.get_backend_cls('amqp', self.app.loader))
-        self.assertEqual(backends.get_backend_cls.misses, misses + 1)
-        self.assertTrue(backends.get_backend_cls('amqp', self.app.loader))
-        self.assertEqual(backends.get_backend_cls.hits, hits + 1)
-
     def test_unknown_backend(self):
         with self.assertRaises(ImportError):
             backends.get_backend_cls('fasodaopjeqijwqe', self.app.loader)

+ 13 - 10
celery/tests/backends/test_base.py

@@ -62,7 +62,7 @@ class test_BaseBackend_interface(AppCase):
             self.b.forget('SOMExx-N0nex1stant-IDxx-')
 
     def test_on_chord_part_return(self):
-        self.b.on_chord_part_return(None)
+        self.b.on_chord_part_return(None, None, None)
 
     def test_apply_chord(self, unlock='celery.chord_unlock'):
         self.app.tasks[unlock] = Mock()
@@ -234,9 +234,10 @@ class test_BaseBackend_dict(AppCase):
         self.assertIsInstance(self.b.prepare_value(g), self.app.GroupResult)
 
     def test_is_cached(self):
-        self.b._cache['foo'] = 1
-        self.assertTrue(self.b.is_cached('foo'))
-        self.assertFalse(self.b.is_cached('false'))
+        b = BaseBackend(app=self.app, max_cached_results=1)
+        b._cache['foo'] = 1
+        self.assertTrue(b.is_cached('foo'))
+        self.assertFalse(b.is_cached('false'))
 
 
 class test_KeyValueStoreBackend(AppCase):
@@ -246,7 +247,7 @@ class test_KeyValueStoreBackend(AppCase):
 
     def test_on_chord_part_return(self):
         assert not self.b.implements_incr
-        self.b.on_chord_part_return(None)
+        self.b.on_chord_part_return(None, None, None)
 
     def test_get_store_delete_result(self):
         tid = uuid()
@@ -282,12 +283,14 @@ class test_KeyValueStoreBackend(AppCase):
     def test_chord_part_return_no_gid(self):
         self.b.implements_incr = True
         task = Mock()
+        state = 'SUCCESS'
+        result = 10
         task.request.group = None
         self.b.get_key_for_chord = Mock()
         self.b.get_key_for_chord.side_effect = AssertionError(
             'should not get here',
         )
-        self.assertIsNone(self.b.on_chord_part_return(task))
+        self.assertIsNone(self.b.on_chord_part_return(task, state, result))
 
     @contextmanager
     def _chord_part_context(self, b):
@@ -315,14 +318,14 @@ class test_KeyValueStoreBackend(AppCase):
 
     def test_chord_part_return_propagate_set(self):
         with self._chord_part_context(self.b) as (task, deps, _):
-            self.b.on_chord_part_return(task, propagate=True)
+            self.b.on_chord_part_return(task, 'SUCCESS', 10, propagate=True)
             self.assertFalse(self.b.expire.called)
             deps.delete.assert_called_with()
             deps.join_native.assert_called_with(propagate=True, timeout=3.0)
 
     def test_chord_part_return_propagate_default(self):
         with self._chord_part_context(self.b) as (task, deps, _):
-            self.b.on_chord_part_return(task, propagate=None)
+            self.b.on_chord_part_return(task, 'SUCCESS', 10, propagate=None)
             self.assertFalse(self.b.expire.called)
             deps.delete.assert_called_with()
             deps.join_native.assert_called_with(
@@ -334,7 +337,7 @@ class test_KeyValueStoreBackend(AppCase):
         with self._chord_part_context(self.b) as (task, deps, callback):
             deps._failed_join_report = lambda: iter([])
             deps.join_native.side_effect = KeyError('foo')
-            self.b.on_chord_part_return(task)
+            self.b.on_chord_part_return(task, 'SUCCESS', 10)
             self.assertTrue(self.b.fail_from_current_stack.called)
             args = self.b.fail_from_current_stack.call_args
             exc = args[1]['exc']
@@ -348,7 +351,7 @@ class test_KeyValueStoreBackend(AppCase):
                 self.app.AsyncResult('culprit'),
             ])
             deps.join_native.side_effect = KeyError('foo')
-            b.on_chord_part_return(task)
+            b.on_chord_part_return(task, 'SUCCESS', 10)
             self.assertTrue(b.fail_from_current_stack.called)
             args = b.fail_from_current_stack.call_args
             exc = args[1]['exc']

+ 2 - 2
celery/tests/backends/test_cache.py

@@ -86,10 +86,10 @@ class test_CacheBackend(AppCase):
         tb.apply_chord(group(app=self.app), (), gid, {}, result=res)
 
         self.assertFalse(deps.join_native.called)
-        tb.on_chord_part_return(task)
+        tb.on_chord_part_return(task, 'SUCCESS', 10)
         self.assertFalse(deps.join_native.called)
 
-        tb.on_chord_part_return(task)
+        tb.on_chord_part_return(task, 'SUCCESS', 10)
         deps.join_native.assert_called_with(propagate=True, timeout=3.0)
         deps.delete.assert_called_with()
 

+ 4 - 4
celery/tests/backends/test_couchbase.py

@@ -129,8 +129,8 @@ class test_CouchBaseBackend(AppCase):
         url = 'couchbase://johndoe:mysecret@myhost:123/mycoolbucket'
         with self.Celery(backend=url) as app:
             x = app.backend
-            self.assertEqual(x.bucket, "mycoolbucket")
-            self.assertEqual(x.host, "myhost")
-            self.assertEqual(x.username, "johndoe")
-            self.assertEqual(x.password, "mysecret")
+            self.assertEqual(x.bucket, 'mycoolbucket')
+            self.assertEqual(x.host, 'myhost')
+            self.assertEqual(x.username, 'johndoe')
+            self.assertEqual(x.password, 'mysecret')
             self.assertEqual(x.port, 123)

+ 3 - 3
celery/tests/backends/test_database.py

@@ -42,16 +42,16 @@ class test_DatabaseBackend(AppCase):
         self.uri = 'sqlite:///test.db'
 
     def test_retry_helper(self):
-        from celery.backends.database import OperationalError
+        from celery.backends.database import DatabaseError
 
         calls = [0]
 
         @retry
         def raises():
             calls[0] += 1
-            raise OperationalError(1, 2, 3)
+            raise DatabaseError(1, 2, 3)
 
-        with self.assertRaises(OperationalError):
+        with self.assertRaises(DatabaseError):
             raises(max_retries=5)
         self.assertEqual(calls[0], 5)
 

+ 11 - 10
celery/tests/backends/test_mongodb.py

@@ -10,7 +10,7 @@ from celery.backends import mongodb as module
 from celery.backends.mongodb import MongoBackend, Bunch, pymongo
 from celery.exceptions import ImproperlyConfigured
 from celery.tests.case import (
-    AppCase, MagicMock, Mock, SkipTest,
+    AppCase, MagicMock, Mock, SkipTest, ANY,
     depends_on_current_app, patch, sentinel,
 )
 
@@ -98,7 +98,7 @@ class test_MongoBackend(AppCase):
 
             connection = self.backend._get_connection()
             mock_Connection.assert_called_once_with(
-                host='mongodb://localhost:27017', ssl=False, max_pool_size=10,
+                host='mongodb://localhost:27017', max_pool_size=10,
                 auto_start_request=False)
             self.assertEqual(sentinel.connection, connection)
 
@@ -113,7 +113,7 @@ class test_MongoBackend(AppCase):
 
             connection = self.backend._get_connection()
             mock_Connection.assert_called_once_with(
-                host=mongodb_uri, ssl=False, max_pool_size=10,
+                host=mongodb_uri, max_pool_size=10,
                 auto_start_request=False)
             self.assertEqual(sentinel.connection, connection)
 
@@ -176,7 +176,7 @@ class test_MongoBackend(AppCase):
 
         mock_get_database.assert_called_once_with()
         mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
-        mock_collection.save.assert_called_once()
+        mock_collection.save.assert_called_once_with(ANY)
         self.assertEqual(sentinel.result, ret_val)
 
     @patch('celery.backends.mongodb.MongoBackend._get_database')
@@ -196,9 +196,10 @@ class test_MongoBackend(AppCase):
         mock_get_database.assert_called_once_with()
         mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
         self.assertEqual(
-            ['status', 'task_id', 'date_done', 'traceback', 'result',
-             'children'],
-            list(ret_val.keys()))
+            list(sorted(['status', 'task_id', 'date_done', 'traceback',
+                         'result', 'children'])),
+            list(sorted(ret_val.keys())),
+        )
 
     @patch('celery.backends.mongodb.MongoBackend._get_database')
     def test_get_task_meta_for_no_result(self, mock_get_database):
@@ -232,7 +233,7 @@ class test_MongoBackend(AppCase):
 
         mock_get_database.assert_called_once_with()
         mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
-        mock_collection.save.assert_called_once()
+        mock_collection.save.assert_called_once_with(ANY)
         self.assertEqual(sentinel.result, ret_val)
 
     @patch('celery.backends.mongodb.MongoBackend._get_database')
@@ -298,7 +299,7 @@ class test_MongoBackend(AppCase):
         self.backend.taskmeta_collection = MONGODB_COLLECTION
 
         mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
-        mock_collection = Mock()
+        self.backend.collections = mock_collection = Mock()
 
         mock_get_database.return_value = mock_database
         mock_database.__getitem__.return_value = mock_collection
@@ -309,7 +310,7 @@ class test_MongoBackend(AppCase):
         mock_get_database.assert_called_once_with()
         mock_database.__getitem__.assert_called_once_with(
             MONGODB_COLLECTION)
-        mock_collection.assert_called_once()
+        self.assertTrue(mock_collection.remove.called)
 
     def test_get_database_authfailure(self):
         x = MongoBackend(app=self.app)

+ 111 - 86
celery/tests/backends/test_redis.py

@@ -4,52 +4,54 @@ from datetime import timedelta
 
 from pickle import loads, dumps
 
-from kombu.utils import cached_property, uuid
-
 from celery import signature
 from celery import states
 from celery import group
+from celery import uuid
 from celery.datastructures import AttributeDict
-from celery.exceptions import CPendingDeprecationWarning, ImproperlyConfigured
-from celery.utils.timeutils import timedelta_seconds
+from celery.exceptions import ImproperlyConfigured
 
 from celery.tests.case import (
-    AppCase, Mock, SkipTest, depends_on_current_app, patch,
+    AppCase, Mock, MockCallbacks, SkipTest, depends_on_current_app, patch,
 )
 
 
-class Redis(object):
+class Connection(object):
+    connected = True
+
+    def disconnect(self):
+        self.connected = False
+
 
-    class Connection(object):
-        connected = True
+class Pipeline(object):
 
-        def disconnect(self):
-            self.connected = False
+    def __init__(self, client):
+        self.client = client
+        self.steps = []
 
-    class Pipeline(object):
+    def __getattr__(self, attr):
 
-        def __init__(self, client):
-            self.client = client
-            self.steps = []
+        def add_step(*args, **kwargs):
+            self.steps.append((getattr(self.client, attr), args, kwargs))
+            return self
+        return add_step
 
-        def __getattr__(self, attr):
+    def execute(self):
+        return [step(*a, **kw) for step, a, kw in self.steps]
 
-            def add_step(*args, **kwargs):
-                self.steps.append((getattr(self.client, attr), args, kwargs))
-                return self
-            return add_step
 
-        def execute(self):
-            return [step(*a, **kw) for step, a, kw in self.steps]
+class Redis(MockCallbacks):
+    Connection = Connection
+    Pipeline = Pipeline
 
     def __init__(self, host=None, port=None, db=None, password=None, **kw):
         self.host = host
         self.port = port
         self.db = db
         self.password = password
-        self.connection = self.Connection()
         self.keyspace = {}
         self.expiry = {}
+        self.connection = self.Connection()
 
     def get(self, key):
         return self.keyspace.get(key)
@@ -63,16 +65,30 @@ class Redis(object):
 
     def expire(self, key, expires):
         self.expiry[key] = expires
+        return expires
 
     def delete(self, key):
-        self.keyspace.pop(key)
-
-    def publish(self, key, value):
-        pass
+        return bool(self.keyspace.pop(key, None))
 
     def pipeline(self):
         return self.Pipeline(self)
 
+    def _get_list(self, key):
+        try:
+            return self.keyspace[key]
+        except KeyError:
+            l = self.keyspace[key] = []
+            return l
+
+    def rpush(self, key, value):
+        self._get_list(key).append(value)
+
+    def lrange(self, key, start, stop):
+        return self._get_list(key)[start:stop]
+
+    def llen(self, key):
+        return len(self.keyspace.get(key) or [])
+
 
 class redis(object):
     Redis = Redis
@@ -91,41 +107,34 @@ class redis(object):
 class test_RedisBackend(AppCase):
 
     def get_backend(self):
-        from celery.backends import redis
+        from celery.backends.redis import RedisBackend
 
-        class RedisBackend(redis.RedisBackend):
+        class _RedisBackend(RedisBackend):
             redis = redis
 
-        return RedisBackend
+        return _RedisBackend
 
     def setup(self):
         self.Backend = self.get_backend()
 
-        class MockBackend(self.Backend):
-
-            @cached_property
-            def client(self):
-                return Mock()
-
-        self.MockBackend = MockBackend
-
     @depends_on_current_app
     def test_reduce(self):
         try:
             from celery.backends.redis import RedisBackend
-            x = RedisBackend(app=self.app)
+            x = RedisBackend(app=self.app, new_join=True)
             self.assertTrue(loads(dumps(x)))
         except ImportError:
             raise SkipTest('redis not installed')
 
     def test_no_redis(self):
-        self.MockBackend.redis = None
+        self.Backend.redis = None
         with self.assertRaises(ImproperlyConfigured):
-            self.MockBackend(app=self.app)
+            self.Backend(app=self.app, new_join=True)
 
     def test_url(self):
-        x = self.MockBackend(
+        x = self.Backend(
             'redis://:bosco@vandelay.com:123//1', app=self.app,
+            new_join=True,
         )
         self.assertTrue(x.connparams)
         self.assertEqual(x.connparams['host'], 'vandelay.com')
@@ -134,8 +143,9 @@ class test_RedisBackend(AppCase):
         self.assertEqual(x.connparams['password'], 'bosco')
 
     def test_socket_url(self):
-        x = self.MockBackend(
+        x = self.Backend(
             'socket:///tmp/redis.sock?virtual_host=/3', app=self.app,
+            new_join=True,
         )
         self.assertTrue(x.connparams)
         self.assertEqual(x.connparams['path'], '/tmp/redis.sock')
@@ -148,20 +158,17 @@ class test_RedisBackend(AppCase):
         self.assertEqual(x.connparams['db'], 3)
 
     def test_compat_propertie(self):
-        x = self.MockBackend(
+        x = self.Backend(
             'redis://:bosco@vandelay.com:123//1', app=self.app,
+            new_join=True,
         )
-        with self.assertWarnsRegex(CPendingDeprecationWarning,
-                                   r'scheduled for deprecation'):
+        with self.assertPendingDeprecation():
             self.assertEqual(x.host, 'vandelay.com')
-        with self.assertWarnsRegex(CPendingDeprecationWarning,
-                                   r'scheduled for deprecation'):
+        with self.assertPendingDeprecation():
             self.assertEqual(x.db, 1)
-        with self.assertWarnsRegex(CPendingDeprecationWarning,
-                                   r'scheduled for deprecation'):
+        with self.assertPendingDeprecation():
             self.assertEqual(x.port, 123)
-        with self.assertWarnsRegex(CPendingDeprecationWarning,
-                                   r'scheduled for deprecation'):
+        with self.assertPendingDeprecation():
             self.assertEqual(x.password, 'bosco')
 
     def test_conf_raises_KeyError(self):
@@ -171,71 +178,87 @@ class test_RedisBackend(AppCase):
             'CELERY_ACCEPT_CONTENT': ['json'],
             'CELERY_TASK_RESULT_EXPIRES': None,
         })
-        self.MockBackend(app=self.app)
+        self.Backend(app=self.app, new_join=True)
 
     def test_expires_defaults_to_config(self):
         self.app.conf.CELERY_TASK_RESULT_EXPIRES = 10
-        b = self.Backend(expires=None, app=self.app)
+        b = self.Backend(expires=None, app=self.app, new_join=True)
         self.assertEqual(b.expires, 10)
 
     def test_expires_is_int(self):
-        b = self.Backend(expires=48, app=self.app)
+        b = self.Backend(expires=48, app=self.app, new_join=True)
         self.assertEqual(b.expires, 48)
 
+    def test_set_new_join_from_url_query(self):
+        b = self.Backend('redis://?new_join=True;foobar=1', app=self.app)
+        self.assertEqual(b.on_chord_part_return, b._new_chord_return)
+        self.assertEqual(b.apply_chord, b._new_chord_apply)
+
+    def test_default_is_old_join(self):
+        b = self.Backend(app=self.app)
+        self.assertNotEqual(b.on_chord_part_return, b._new_chord_return)
+        self.assertNotEqual(b.apply_chord, b._new_chord_apply)
+
     def test_expires_is_None(self):
-        b = self.Backend(expires=None, app=self.app)
-        self.assertEqual(b.expires, timedelta_seconds(
-            self.app.conf.CELERY_TASK_RESULT_EXPIRES))
+        b = self.Backend(expires=None, app=self.app, new_join=True)
+        self.assertEqual(
+            b.expires,
+            self.app.conf.CELERY_TASK_RESULT_EXPIRES.total_seconds(),
+        )
 
     def test_expires_is_timedelta(self):
-        b = self.Backend(expires=timedelta(minutes=1), app=self.app)
+        b = self.Backend(
+            expires=timedelta(minutes=1), app=self.app, new_join=1,
+        )
         self.assertEqual(b.expires, 60)
 
     def test_apply_chord(self):
-        self.Backend(app=self.app).apply_chord(
+        self.Backend(app=self.app, new_join=True).apply_chord(
             group(app=self.app), (), 'group_id', {},
             result=[self.app.AsyncResult(x) for x in [1, 2, 3]],
         )
 
     def test_mget(self):
-        b = self.MockBackend(app=self.app)
+        b = self.Backend(app=self.app, new_join=True)
         self.assertTrue(b.mget(['a', 'b', 'c']))
         b.client.mget.assert_called_with(['a', 'b', 'c'])
 
     def test_set_no_expire(self):
-        b = self.MockBackend(app=self.app)
+        b = self.Backend(app=self.app, new_join=True)
         b.expires = None
         b.set('foo', 'bar')
 
     @patch('celery.result.GroupResult.restore')
     def test_on_chord_part_return(self, restore):
-        b = self.MockBackend(app=self.app)
-        deps = Mock()
-        deps.__len__ = Mock()
-        deps.__len__.return_value = 10
-        restore.return_value = deps
-        b.client.incr.return_value = 1
-        task = Mock()
-        task.name = 'foobarbaz'
-        self.app.tasks['foobarbaz'] = task
-        task.request.chord = signature(task)
-        task.request.group = 'group_id'
-
-        b.on_chord_part_return(task)
-        self.assertTrue(b.client.incr.call_count)
-
-        b.client.incr.return_value = len(deps)
-        b.on_chord_part_return(task)
-        deps.join_native.assert_called_with(propagate=True, timeout=3.0)
-        deps.delete.assert_called_with()
-
-        self.assertTrue(b.client.expire.call_count)
+        b = self.Backend(app=self.app, new_join=True)
+
+        def create_task():
+            tid = uuid()
+            task = Mock(name='task-{0}'.format(tid))
+            task.name = 'foobarbaz'
+            self.app.tasks['foobarbaz'] = task
+            task.request.chord = signature(task)
+            task.request.id = tid
+            task.request.chord['chord_size'] = 10
+            task.request.group = 'group_id'
+            return task
+
+        tasks = [create_task() for i in range(10)]
+
+        for i in range(10):
+            b.on_chord_part_return(tasks[i], states.SUCCESS, i)
+            self.assertTrue(b.client.rpush.call_count)
+            b.client.rpush.reset_mock()
+        self.assertTrue(b.client.lrange.call_count)
+        gkey = b.get_key_for_group('group_id', '.j')
+        b.client.delete.assert_called_with(gkey)
+        b.client.expire.assert_called_witeh(gkey, 86400)
 
     def test_process_cleanup(self):
-        self.Backend(app=self.app).process_cleanup()
+        self.Backend(app=self.app, new_join=True).process_cleanup()
 
     def test_get_set_forget(self):
-        b = self.Backend(app=self.app)
+        b = self.Backend(app=self.app, new_join=True)
         tid = uuid()
         b.store_result(tid, 42, states.SUCCESS)
         self.assertEqual(b.get_status(tid), states.SUCCESS)
@@ -244,8 +267,10 @@ class test_RedisBackend(AppCase):
         self.assertEqual(b.get_status(tid), states.PENDING)
 
     def test_set_expires(self):
-        b = self.Backend(expires=512, app=self.app)
+        b = self.Backend(expires=512, app=self.app, new_join=True)
         tid = uuid()
         key = b.get_key_for_task(tid)
         b.store_result(tid, 42, states.SUCCESS)
-        self.assertEqual(b.client.expiry[key], 512)
+        b.client.expire.assert_called_with(
+            key, 512,
+        )

+ 1 - 1
celery/tests/bin/test_amqp.py

@@ -124,7 +124,7 @@ class test_AMQShell(AppCase):
         self.assertNotIn('FOO', self.fh.getvalue())
 
     def test_run(self):
-        a = self.create_adm('queue.declare foo')
+        a = self.create_adm('queue.declare',  'foo')
         a.run()
         self.assertIn('ok', self.fh.getvalue())
 

+ 5 - 5
celery/tests/bin/test_base.py

@@ -241,21 +241,21 @@ class test_Command(AppCase):
         with self.assertRaises(AttributeError):
             cmd.find_app(__name__)
 
-    def test_simple_format(self):
+    def test_host_format(self):
         cmd = MockCommand(app=self.app)
         with patch('socket.gethostname') as hn:
             hn.return_value = 'blacktron.example.com'
-            self.assertEqual(cmd.simple_format(''), '')
+            self.assertEqual(cmd.host_format(''), '')
             self.assertEqual(
-                cmd.simple_format('celery@%h'),
+                cmd.host_format('celery@%h'),
                 'celery@blacktron.example.com',
             )
             self.assertEqual(
-                cmd.simple_format('celery@%d'),
+                cmd.host_format('celery@%d'),
                 'celery@example.com',
             )
             self.assertEqual(
-                cmd.simple_format('celery@%n'),
+                cmd.host_format('celery@%n'),
                 'celery@blacktron',
             )
 

+ 2 - 1
celery/tests/bin/test_celery.py

@@ -2,9 +2,10 @@ from __future__ import absolute_import
 
 import sys
 
-from anyjson import dumps
 from datetime import datetime
 
+from kombu.utils.json import dumps
+
 from celery import __main__
 from celery.platforms import EX_FAILURE, EX_USAGE, EX_OK
 from celery.bin.base import Error

+ 5 - 2
celery/tests/bin/test_celeryd_detach.py

@@ -24,8 +24,10 @@ if not IS_WINDOWS:
 
             detach('/bin/boo', ['a', 'b', 'c'], logfile='/var/log',
                    pidfile='/var/pid')
-            detached.assert_called_with('/var/log', '/var/pid', None, None, 0,
-                                        None, False)
+            detached.assert_called_with(
+                '/var/log', '/var/pid', None, None, 0, None, False,
+                after_forkers=False,
+            )
             execv.assert_called_with('/bin/boo', ['/bin/boo', 'a', 'b', 'c'])
 
             execv.side_effect = Exception('foo')
@@ -85,6 +87,7 @@ class test_Command(AppCase):
         detach.assert_called_with(
             path=x.execv_path, uid=None, gid=None,
             umask=0, fake=False, logfile='/var/log', pidfile='celeryd.pid',
+            working_directory=None,
             argv=x.execv_argv + [
                 '-c', '1', '-lDEBUG',
                 '--logfile=/var/log', '--pidfile=celeryd.pid',

+ 2 - 11
celery/tests/bin/test_multi.py

@@ -8,7 +8,6 @@ from celery.bin.multi import (
     main,
     MultiTool,
     findsig,
-    abbreviations,
     parse_ns_range,
     format_opt,
     quote,
@@ -30,14 +29,6 @@ class test_functions(AppCase):
         self.assertEqual(findsig(['-s']), signal.SIGTERM)
         self.assertEqual(findsig(['-log']), signal.SIGTERM)
 
-    def test_abbreviations(self):
-        expander = abbreviations({'%s': 'START',
-                                  '%x': 'STOP'})
-        self.assertEqual(expander('foo%s'), 'fooSTART')
-        self.assertEqual(expander('foo%x'), 'fooSTOP')
-        self.assertEqual(expander('foo%y'), 'foo%y')
-        self.assertIsNone(expander(None))
-
     def test_parse_ns_range(self):
         self.assertEqual(parse_ns_range('1-3', True), ['1', '2', '3'])
         self.assertEqual(parse_ns_range('1-3', False), ['1-3'])
@@ -78,6 +69,7 @@ class test_multi_args(AppCase):
 
     @patch('socket.gethostname')
     def test_parse(self, gethostname):
+        gethostname.return_value = 'example.com'
         p = NamespacedOptionParser([
             '-c:jerry,elaine', '5',
             '--loglevel:kramer=DEBUG',
@@ -120,12 +112,11 @@ class test_multi_args(AppCase):
         )
         expand = names[0][2]
         self.assertEqual(expand('%h'), '*P*jerry@*S*')
-        self.assertEqual(expand('%n'), 'jerry')
+        self.assertEqual(expand('%n'), '*P*jerry')
         names2 = list(multi_args(p, cmd='COMMAND', append='',
                       prefix='*P*', suffix='*S*'))
         self.assertEqual(names2[0][1][-1], '-- .disable_rate_limits=1')
 
-        gethostname.return_value = 'example.com'
         p2 = NamespacedOptionParser(['10', '-c:1', '5'])
         names3 = list(multi_args(p2, cmd='COMMAND'))
         self.assertEqual(len(names3), 10)

+ 26 - 18
celery/tests/bin/test_worker.py

@@ -17,6 +17,7 @@ from celery.bin.worker import worker, main as worker_main
 from celery.exceptions import (
     ImproperlyConfigured, WorkerShutdown, WorkerTerminate,
 )
+from celery.platforms import EX_FAILURE, EX_OK
 from celery.utils.log import ensure_process_aware_logger
 from celery.worker import state
 
@@ -443,8 +444,10 @@ class test_funs(WorkerAppCase):
     def test_parse_options(self):
         cmd = worker()
         cmd.app = self.app
-        opts, args = cmd.parse_options('worker', ['--concurrency=512'])
+        opts, args = cmd.parse_options('worker', ['--concurrency=512',
+                                       '--heartbeat-interval=10'])
         self.assertEqual(opts.concurrency, 512)
+        self.assertEqual(opts.heartbeat_interval, 10)
 
     @disable_stdouts
     def test_main(self):
@@ -488,8 +491,8 @@ class test_signal_handlers(WorkerAppCase):
         worker = self._Worker()
         handlers = self.psig(cd.install_worker_int_handler, worker)
         next_handlers = {}
-        state.should_stop = False
-        state.should_terminate = False
+        state.should_stop = None
+        state.should_terminate = None
 
         class Signals(platforms.Signals):
 
@@ -502,15 +505,17 @@ class test_signal_handlers(WorkerAppCase):
             try:
                 handlers['SIGINT']('SIGINT', object())
                 self.assertTrue(state.should_stop)
+                self.assertEqual(state.should_stop, EX_FAILURE)
             finally:
                 platforms.signals = p
-                state.should_stop = False
+                state.should_stop = None
 
             try:
                 next_handlers['SIGINT']('SIGINT', object())
                 self.assertTrue(state.should_terminate)
+                self.assertEqual(state.should_terminate, EX_FAILURE)
             finally:
-                state.should_terminate = False
+                state.should_terminate = None
 
         with patch('celery.apps.worker.active_thread_count') as c:
             c.return_value = 1
@@ -541,7 +546,7 @@ class test_signal_handlers(WorkerAppCase):
                 self.assertTrue(state.should_stop)
             finally:
                 process.name = name
-                state.should_stop = False
+                state.should_stop = None
 
         with patch('celery.apps.worker.active_thread_count') as c:
             c.return_value = 1
@@ -552,7 +557,7 @@ class test_signal_handlers(WorkerAppCase):
                     handlers['SIGINT']('SIGINT', object())
             finally:
                 process.name = name
-                state.should_stop = False
+                state.should_stop = None
 
     @disable_stdouts
     def test_install_HUP_not_supported_handler(self):
@@ -578,14 +583,17 @@ class test_signal_handlers(WorkerAppCase):
                     handlers['SIGQUIT']('SIGQUIT', object())
                     self.assertTrue(state.should_terminate)
                 finally:
-                    state.should_terminate = False
+                    state.should_terminate = None
             with patch('celery.apps.worker.active_thread_count') as c:
                 c.return_value = 1
                 worker = self._Worker()
                 handlers = self.psig(
                     cd.install_worker_term_hard_handler, worker)
-                with self.assertRaises(WorkerTerminate):
-                    handlers['SIGQUIT']('SIGQUIT', object())
+                try:
+                    with self.assertRaises(WorkerTerminate):
+                        handlers['SIGQUIT']('SIGQUIT', object())
+                finally:
+                    state.should_terminate = None
         finally:
             process.name = name
 
@@ -597,9 +605,9 @@ class test_signal_handlers(WorkerAppCase):
             handlers = self.psig(cd.install_worker_term_handler, worker)
             try:
                 handlers['SIGTERM']('SIGTERM', object())
-                self.assertTrue(state.should_stop)
+                self.assertEqual(state.should_stop, EX_OK)
             finally:
-                state.should_stop = False
+                state.should_stop = None
 
     @disable_stdouts
     def test_worker_term_handler_when_single_thread(self):
@@ -611,7 +619,7 @@ class test_signal_handlers(WorkerAppCase):
                 with self.assertRaises(WorkerShutdown):
                     handlers['SIGTERM']('SIGTERM', object())
             finally:
-                state.should_stop = False
+                state.should_stop = None
 
     @patch('sys.__stderr__')
     @skip_if_pypy
@@ -635,7 +643,7 @@ class test_signal_handlers(WorkerAppCase):
                 worker = self._Worker()
                 handlers = self.psig(cd.install_worker_term_handler, worker)
                 handlers['SIGTERM']('SIGTERM', object())
-                self.assertTrue(state.should_stop)
+                self.assertEqual(state.should_stop, EX_OK)
             with patch('celery.apps.worker.active_thread_count') as c:
                 c.return_value = 1
                 worker = self._Worker()
@@ -644,7 +652,7 @@ class test_signal_handlers(WorkerAppCase):
                     handlers['SIGTERM']('SIGTERM', object())
         finally:
             process.name = name
-            state.should_stop = False
+            state.should_stop = None
 
     @disable_stdouts
     @patch('celery.platforms.close_open_fds')
@@ -663,14 +671,14 @@ class test_signal_handlers(WorkerAppCase):
             worker = self._Worker()
             handlers = self.psig(cd.install_worker_restart_handler, worker)
             handlers['SIGHUP']('SIGHUP', object())
-            self.assertTrue(state.should_stop)
+            self.assertEqual(state.should_stop, EX_OK)
             self.assertTrue(register.called)
             callback = register.call_args[0][0]
             callback()
             self.assertTrue(argv)
         finally:
             os.execv = execv
-            state.should_stop = False
+            state.should_stop = None
 
     @disable_stdouts
     def test_worker_term_hard_handler_when_threaded(self):
@@ -682,7 +690,7 @@ class test_signal_handlers(WorkerAppCase):
                 handlers['SIGQUIT']('SIGQUIT', object())
                 self.assertTrue(state.should_terminate)
             finally:
-                state.should_terminate = False
+                state.should_terminate = None
 
     @disable_stdouts
     def test_worker_term_hard_handler_when_single_threaded(self):

+ 118 - 24
celery/tests/case.py

@@ -11,12 +11,14 @@ except AttributeError:
 import importlib
 import inspect
 import logging
+import numbers
 import os
 import platform
 import re
 import sys
 import threading
 import time
+import types
 import warnings
 
 from contextlib import contextmanager
@@ -37,6 +39,7 @@ from kombu.utils import nested, symbol_by_name
 from celery import Celery
 from celery.app import current_app
 from celery.backends.cache import CacheBackend, DummyClient
+from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning
 from celery.five import (
     WhateverIO, builtins, items, reraise,
     string_t, values, open_fqdn,
@@ -45,7 +48,7 @@ from celery.utils.functional import noop
 from celery.utils.imports import qualname
 
 __all__ = [
-    'Case', 'AppCase', 'Mock', 'MagicMock',
+    'Case', 'AppCase', 'Mock', 'MagicMock', 'ANY', 'TaskMessage',
     'patch', 'call', 'sentinel', 'skip_unless_module',
     'wrap_logger', 'with_environ', 'sleepdeprived',
     'skip_if_environ', 'todo', 'skip', 'skip_if',
@@ -53,12 +56,15 @@ __all__ = [
     'replace_module_value', 'sys_platform', 'reset_modules',
     'patch_modules', 'mock_context', 'mock_open', 'patch_many',
     'assert_signal_called', 'skip_if_pypy',
-    'skip_if_jython', 'body_from_sig', 'restore_logging',
+    'skip_if_jython', 'task_message_from_sig', 'restore_logging',
 ]
 patch = mock.patch
 call = mock.call
 sentinel = mock.sentinel
 MagicMock = mock.MagicMock
+ANY = mock.ANY
+
+PY3 = sys.version_info[0] == 3
 
 CASE_REDEFINES_SETUP = """\
 {name} (subclass of AppCase) redefines private "setUp", should be: "setup"\
@@ -162,6 +168,35 @@ def ContextMock(*args, **kwargs):
     return obj
 
 
+def _bind(f, o):
+    @wraps(f)
+    def bound_meth(*fargs, **fkwargs):
+        return f(o, *fargs, **fkwargs)
+    return bound_meth
+
+
+if PY3:  # pragma: no cover
+    def _get_class_fun(meth):
+        return meth
+else:
+    def _get_class_fun(meth):
+        return meth.__func__
+
+
+class MockCallbacks(object):
+
+    def __new__(cls, *args, **kwargs):
+        r = Mock(name=cls.__name__)
+        _get_class_fun(cls.__init__)(r, *args, **kwargs)
+        for key, value in items(vars(cls)):
+            if key not in ('__dict__', '__weakref__', '__new__', '__init__'):
+                if inspect.ismethod(value) or inspect.isfunction(value):
+                    r.__getattr__(key).side_effect = _bind(value, r)
+                else:
+                    r.__setattr__(key, value)
+        return r
+
+
 def skip_unless_module(module):
 
     def _inner(fun):
@@ -193,6 +228,18 @@ class _AssertRaisesBaseContext(object):
         self.expected_regex = expected_regex
 
 
+def _is_magic_module(m):
+    # some libraries create custom module types that are lazily
+    # lodaded, e.g. Django installs some modules in sys.modules that
+    # will load _tkinter and other shit when touched.
+
+    # pyflakes refuses to accept 'noqa' for this isinstance.
+    cls, modtype = m.__class__, types.ModuleType
+    return (not cls is modtype and (
+        '__getattr__' in vars(m.__class__) or
+        '__getattribute__' in vars(m.__class__)))
+
+
 class _AssertWarnsContext(_AssertRaisesBaseContext):
     """A context manager used to implement TestCase.assertWarns* methods."""
 
@@ -201,8 +248,17 @@ class _AssertWarnsContext(_AssertRaisesBaseContext):
         # to work properly.
         warnings.resetwarnings()
         for v in list(values(sys.modules)):
-            if getattr(v, '__warningregistry__', None):
-                v.__warningregistry__ = {}
+            # do not evaluate Django moved modules and other lazily
+            # initialized modules.
+            if v and not _is_magic_module(v):
+                # use raw __getattribute__ to protect even better from
+                # lazily loaded modules
+                try:
+                    object.__getattribute__(v, '__warningregistry__')
+                except AttributeError:
+                    pass
+                else:
+                    object.__setattr__(v, '__warningregistry__', {})
         self.warnings_manager = warnings.catch_warnings(record=True)
         self.warnings = self.warnings_manager.__enter__()
         warnings.simplefilter('always', self.expected)
@@ -253,6 +309,18 @@ class Case(unittest.TestCase):
         return _AssertWarnsContext(expected_warning, self,
                                    None, expected_regex)
 
+    @contextmanager
+    def assertDeprecated(self):
+        with self.assertWarnsRegex(CDeprecationWarning,
+                                   r'scheduled for removal'):
+            yield
+
+    @contextmanager
+    def assertPendingDeprecation(self):
+        with self.assertWarnsRegex(CPendingDeprecationWarning,
+                                   r'scheduled for deprecation'):
+            yield
+
     def assertDictContainsSubset(self, expected, actual, msg=None):
         missing, mismatched = [], []
 
@@ -344,8 +412,12 @@ class AppCase(Case):
         self._current_app = current_app()
         self._default_app = _state.default_app
         trap = Trap()
+        self._prev_tls = _state._tls
         _state.set_default_app(trap)
-        _state._tls.current_app = trap
+
+        class NonTLS(object):
+            current_app = trap
+        _state._tls = NonTLS()
 
         self.app = self.Celery(set_as_current=False)
         if not self.contained:
@@ -379,13 +451,12 @@ class AppCase(Case):
                 if isinstance(backend.client, DummyClient):
                     backend.client.cache.clear()
                 backend._cache.clear()
-        from celery._state import (
-            _tls, set_default_app, _set_task_join_will_block,
-        )
-        _set_task_join_will_block(False)
+        from celery import _state
+        _state._set_task_join_will_block(False)
 
-        set_default_app(self._default_app)
-        _tls.current_app = self._current_app
+        _state.set_default_app(self._default_app)
+        _state._tls = self._prev_tls
+        _state._tls.current_app = self._current_app
         if self.app is not self._current_app:
             self.app.close()
         self.app = None
@@ -393,6 +464,15 @@ class AppCase(Case):
             self._threads_at_setup, list(threading.enumerate()),
         )
 
+        # Make sure no test left the shutdown flags enabled.
+        from celery.worker import state as worker_state
+        # check for EX_OK
+        self.assertIsNot(worker_state.should_stop, False)
+        self.assertIsNot(worker_state.should_terminate, False)
+        # check for other true values
+        self.assertFalse(worker_state.should_stop)
+        self.assertFalse(worker_state.should_terminate)
+
     def _get_test_name(self):
         return '.'.join([self.__class__.__name__, self._testMethodName])
 
@@ -748,7 +828,7 @@ def skip_if_jython(fun):
     return _inner
 
 
-def body_from_sig(app, sig, utc=True):
+def task_message_from_sig(app, sig, utc=True):
     sig.freeze()
     callbacks = sig.options.pop('link', None)
     errbacks = sig.options.pop('link_error', None)
@@ -760,21 +840,18 @@ def body_from_sig(app, sig, utc=True):
     if eta and isinstance(eta, datetime):
         eta = eta.isoformat()
     expires = sig.options.pop('expires', None)
-    if expires and isinstance(expires, int):
+    if expires and isinstance(expires, numbers.Real):
         expires = app.now() + timedelta(seconds=expires)
     if expires and isinstance(expires, datetime):
         expires = expires.isoformat()
-    return {
-        'task': sig.task,
-        'id': sig.id,
-        'args': sig.args,
-        'kwargs': sig.kwargs,
-        'callbacks': [dict(s) for s in callbacks] if callbacks else None,
-        'errbacks': [dict(s) for s in errbacks] if errbacks else None,
-        'eta': eta,
-        'utc': utc,
-        'expires': expires,
-    }
+    return TaskMessage(
+        sig.task, id=sig.id, args=sig.args,
+        kwargs=sig.kwargs,
+        callbacks=[dict(s) for s in callbacks] if callbacks else None,
+        errbacks=[dict(s) for s in errbacks] if errbacks else None,
+        eta=eta,
+        expires=expires,
+    )
 
 
 @contextmanager
@@ -790,3 +867,20 @@ def restore_logging():
         sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ = outs
         root.level = level
         root.handlers[:] = handlers
+
+
+def TaskMessage(name, id=None, args=(), kwargs={}, **options):
+    from celery import uuid
+    from kombu.serialization import dumps
+    id = id or uuid()
+    message = Mock(name='TaskMessage-{0}'.format(id))
+    message.headers = {
+        'id': id,
+        'task': name,
+    }
+    message.headers.update(options)
+    message.content_type, message.content_encoding, message.body = dumps(
+        (args, kwargs), serializer='json',
+    )
+    message.payload = (args, kwargs)
+    return message

+ 3 - 26
celery/tests/compat_modules/test_compat.py

@@ -10,34 +10,10 @@ from celery.task import (
     periodic_task,
     PeriodicTask
 )
-from celery.utils.timeutils import timedelta_seconds
 
 from celery.tests.case import AppCase, depends_on_current_app
 
 
-class test_Task(AppCase):
-
-    def test_base_task_inherits_magic_kwargs_from_app(self):
-        from celery.task import Task as OldTask
-
-        class timkX(OldTask):
-            abstract = True
-
-        with self.Celery(set_as_current=False,
-                         accept_magic_kwargs=True) as app:
-            timkX.bind(app)
-            # see #918
-            self.assertFalse(timkX.accept_magic_kwargs)
-
-            from celery import Task as NewTask
-
-            class timkY(NewTask):
-                abstract = True
-
-            timkY.bind(app)
-            self.assertFalse(timkY.accept_magic_kwargs)
-
-
 @depends_on_current_app
 class test_periodic_tasks(AppCase):
 
@@ -74,8 +50,9 @@ class test_periodic_tasks(AppCase):
             self.now() - p.run_every.run_every,
         )
         self.assertTrue(due)
-        self.assertEqual(remaining,
-                         timedelta_seconds(p.run_every.run_every))
+        self.assertEqual(
+            remaining, p.run_every.run_every.total_seconds(),
+        )
 
     def test_schedule_repr(self):
         p = self.my_periodic

+ 0 - 4
celery/tests/compat_modules/test_compat_utils.py

@@ -40,11 +40,7 @@ class test_MagicModule(AppCase):
         def _test_decorators_task():
             pass
 
-        self.assertTrue(_test_decorators_task.accept_magic_kwargs)
-
     def test_decorators_periodic_task(self):
         @celery.decorators.periodic_task(run_every=3600)
         def _test_decorators_ptask():
             pass
-
-        self.assertTrue(_test_decorators_ptask.accept_magic_kwargs)

이 변경점에서 너무 많은 파일들이 변경되어 몇몇 파일들은 표시되지 않았습니다.