Browse Source

Merge branch 'master' into 3.1

Ask Solem 11 years ago
parent
commit
63dcd6fbea
65 changed files with 1759 additions and 196 deletions
  1. 6 0
      .coveragerc
  2. 4 0
      .gitignore
  3. 20 6
      .travis.yml
  4. 1038 0
      CONTRIBUTING.rst
  5. 5 2
      CONTRIBUTORS.txt
  6. 74 0
      Changelog
  7. 1 0
      celery/app/amqp.py
  8. 4 1
      celery/app/base.py
  9. 13 1
      celery/app/task.py
  10. 1 2
      celery/bin/amqp.py
  11. 6 1
      celery/bin/base.py
  12. 1 1
      celery/bin/beat.py
  13. 26 4
      celery/bin/celery.py
  14. 1 1
      celery/bootsteps.py
  15. 11 3
      celery/concurrency/asynpool.py
  16. 2 2
      celery/contrib/batches.py
  17. 73 0
      celery/contrib/sphinx.py
  18. 1 1
      celery/datastructures.py
  19. 15 6
      celery/fixups/django.py
  20. 7 4
      celery/platforms.py
  21. 10 0
      celery/task/base.py
  22. 1 1
      celery/tests/__init__.py
  23. 1 1
      celery/tests/app/test_log.py
  24. 4 4
      celery/tests/backends/test_couchbase.py
  25. 4 4
      celery/tests/backends/test_mongodb.py
  26. 12 3
      celery/tests/case.py
  27. 3 0
      celery/tests/utils/test_timeutils.py
  28. 7 7
      celery/tests/worker/test_hub.py
  29. 3 0
      celery/tests/worker/test_loops.py
  30. 15 1
      celery/utils/__init__.py
  31. 1 1
      celery/utils/dispatch/signal.py
  32. 3 1
      celery/utils/iso8601.py
  33. 14 4
      celery/worker/consumer.py
  34. 3 0
      celery/worker/job.py
  35. 1 0
      celery/worker/loops.py
  36. 110 0
      docs/_ext/githubsphinx.py
  37. 3 5
      docs/conf.py
  38. 6 1
      docs/configuration.rst
  39. 74 25
      docs/contributing.rst
  40. 1 1
      docs/getting-started/brokers/couchdb.rst
  41. 1 1
      docs/getting-started/brokers/django.rst
  42. 1 1
      docs/getting-started/brokers/mongodb.rst
  43. 1 1
      docs/getting-started/brokers/sqlalchemy.rst
  44. 1 1
      docs/getting-started/brokers/sqs.rst
  45. 1 1
      docs/history/changelog-3.0.rst
  46. 2 2
      docs/internals/guide.rst
  47. 4 0
      docs/reference/celery.contrib.sphinx.rst
  48. 1 0
      docs/reference/index.rst
  49. 1 1
      docs/tutorials/daemonizing.rst
  50. 1 0
      docs/tutorials/debugging.rst
  51. 8 1
      docs/userguide/canvas.rst
  52. 88 17
      docs/userguide/extending.rst
  53. 22 11
      docs/userguide/workers.rst
  54. 1 1
      docs/whatsnew-3.1.rst
  55. 6 0
      extra/generic-init.d/celeryd
  56. 11 1
      pavement.py
  57. 1 1
      requirements/default.txt
  58. 0 1
      requirements/docs.txt
  59. 1 0
      requirements/jython.txt
  60. 1 0
      requirements/test-ci.txt
  61. 0 1
      requirements/test.txt
  62. 0 1
      requirements/test3.txt
  63. 1 5
      setup.cfg
  64. 8 28
      setup.py
  65. 12 26
      tox.ini

+ 6 - 0
.coveragerc

@@ -0,0 +1,6 @@
+[run]
+branch = 1
+cover_pylib = 0
+omit = celery.utils.debug,celery.tests.*,celery.bin.graph
+[report]
+omit = */python?.?/*,*/site-packages/*,*/pypy/*

+ 4 - 0
.gitignore

@@ -20,4 +20,8 @@ Documentation/
 .project
 .pydevproject
 .idea/
+.coverage
 celery/tests/cover/
+.ve*
+cover/
+

+ 20 - 6
.travis.yml

@@ -1,8 +1,22 @@
 language: python
 python:
-    - 2.6
-    - 2.7
-    - 3.3
-install:
-    - pip install --use-mirrors tox
-script: TOXENV=py$(echo $TRAVIS_PYTHON_VERSION | tr -d .) tox -v
+  - 2.6
+  - 2.7
+  - 3.3
+  - pypy
+before_install:
+  - |
+    deactivate
+    if python --version |& grep PyPy; then
+      sudo apt-add-repository --yes ppa:pypy/ppa
+      sudo apt-get update
+      sudo apt-get install pypy
+      source ~/virtualenv/pypy/bin/activate
+    fi
+    python --version
+    uname -a
+    lsb_release -a
+    sudo pip install tox
+script: tox -v -e $TRAVIS_PYTHON_VERSION -- -v
+after_success:
+  - .tox/$TRAVIS_PYTHON_VERSION/bin/coveralls

+ 1038 - 0
CONTRIBUTING.rst

@@ -0,0 +1,1038 @@
+.. _contributing:
+
+==============
+ Contributing
+==============
+
+Welcome!
+
+This document is fairly extensive and you are not really expected
+to study this in detail for small contributions;
+
+    The most important rule is that contributing must be easy
+    and that the community is friendly and not nitpicking on details
+    such as coding style.
+
+If you're reporting a bug you should read the Reporting bugs section
+below to ensure that your bug report contains enough information
+to successfully diagnose the issue, and if you're contributing code
+you should try to mimic the conventions you see surrounding the code
+you are working on, but in the end all patches will be cleaned up by
+the person merging the changes so don't worry too much.
+
+.. contents::
+    :local:
+
+.. _community-code-of-conduct:
+
+Community Code of Conduct
+=========================
+
+The goal is to maintain a diverse community that is pleasant for everyone.
+That is why we would greatly appreciate it if everyone contributing to and
+interacting with the community also followed this Code of Conduct.
+
+The Code of Conduct covers our behavior as members of the community,
+in any forum, mailing list, wiki, website, Internet relay chat (IRC), public
+meeting or private correspondence.
+
+The Code of Conduct is heavily based on the `Ubuntu Code of Conduct`_, and
+the `Pylons Code of Conduct`_.
+
+.. _`Ubuntu Code of Conduct`: http://www.ubuntu.com/community/conduct
+.. _`Pylons Code of Conduct`: http://docs.pylonshq.com/community/conduct.html
+
+Be considerate.
+---------------
+
+Your work will be used by other people, and you in turn will depend on the
+work of others.  Any decision you take will affect users and colleagues, and
+we expect you to take those consequences into account when making decisions.
+Even if it's not obvious at the time, our contributions to Celery will impact
+the work of others.  For example, changes to code, infrastructure, policy,
+documentation and translations during a release may negatively impact
+others work.
+
+Be respectful.
+--------------
+
+The Celery community and its members treat one another with respect.  Everyone
+can make a valuable contribution to Celery.  We may not always agree, but
+disagreement is no excuse for poor behavior and poor manners.  We might all
+experience some frustration now and then, but we cannot allow that frustration
+to turn into a personal attack.  It's important to remember that a community
+where people feel uncomfortable or threatened is not a productive one.  We
+expect members of the Celery community to be respectful when dealing with
+other contributors as well as with people outside the Celery project and with
+users of Celery.
+
+Be collaborative.
+-----------------
+
+Collaboration is central to Celery and to the larger free software community.
+We should always be open to collaboration.  Your work should be done
+transparently and patches from Celery should be given back to the community
+when they are made, not just when the distribution releases.  If you wish
+to work on new code for existing upstream projects, at least keep those
+projects informed of your ideas and progress.  It many not be possible to
+get consensus from upstream, or even from your colleagues about the correct
+implementation for an idea, so don't feel obliged to have that agreement
+before you begin, but at least keep the outside world informed of your work,
+and publish your work in a way that allows outsiders to test, discuss and
+contribute to your efforts.
+
+When you disagree, consult others.
+----------------------------------
+
+Disagreements, both political and technical, happen all the time and
+the Celery community is no exception.  It is important that we resolve
+disagreements and differing views constructively and with the help of the
+community and community process.  If you really want to go a different
+way, then we encourage you to make a derivative distribution or alternate
+set of packages that still build on the work we've done to utilize as common
+of a core as possible.
+
+When you are unsure, ask for help.
+----------------------------------
+
+Nobody knows everything, and nobody is expected to be perfect.  Asking
+questions avoids many problems down the road, and so questions are
+encouraged.  Those who are asked questions should be responsive and helpful.
+However, when asking a question, care must be taken to do so in an appropriate
+forum.
+
+Step down considerately.
+------------------------
+
+Developers on every project come and go and Celery is no different.  When you
+leave or disengage from the project, in whole or in part, we ask that you do
+so in a way that minimizes disruption to the project.  This means you should
+tell people you are leaving and take the proper steps to ensure that others
+can pick up where you leave off.
+
+.. _reporting-bugs:
+
+
+Reporting Bugs
+==============
+
+.. _vulnsec:
+
+Security
+--------
+
+You must never report security related issues, vulnerabilities or bugs
+including sensitive information to the bug tracker, or elsewhere in public.
+Instead sensitive bugs must be sent by email to ``security@celeryproject.org``.
+
+If you'd like to submit the information encrypted our PGP key is::
+
+    -----BEGIN PGP PUBLIC KEY BLOCK-----
+    Version: GnuPG v1.4.15 (Darwin)
+
+    mQENBFJpWDkBCADFIc9/Fpgse4owLNvsTC7GYfnJL19XO0hnL99sPx+DPbfr+cSE
+    9wiU+Wp2TfUX7pCLEGrODiEP6ZCZbgtiPgId+JYvMxpP6GXbjiIlHRw1EQNH8RlX
+    cVxy3rQfVv8PGGiJuyBBjxzvETHW25htVAZ5TI1+CkxmuyyEYqgZN2fNd0wEU19D
+    +c10G1gSECbCQTCbacLSzdpngAt1Gkrc96r7wGHBBSvDaGDD2pFSkVuTLMbIRrVp
+    lnKOPMsUijiip2EMr2DvfuXiUIUvaqInTPNWkDynLoh69ib5xC19CSVLONjkKBsr
+    Pe+qAY29liBatatpXsydY7GIUzyBT3MzgMJlABEBAAG0MUNlbGVyeSBTZWN1cml0
+    eSBUZWFtIDxzZWN1cml0eUBjZWxlcnlwcm9qZWN0Lm9yZz6JATgEEwECACIFAlJp
+    WDkCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOArFOUDCicIw1IH/26f
+    CViDC7/P13jr+srRdjAsWvQztia9HmTlY8cUnbmkR9w6b6j3F2ayw8VhkyFWgYEJ
+    wtPBv8mHKADiVSFARS+0yGsfCkia5wDSQuIv6XqRlIrXUyqJbmF4NUFTyCZYoh+C
+    ZiQpN9xGhFPr5QDlMx2izWg1rvWlG1jY2Es1v/xED3AeCOB1eUGvRe/uJHKjGv7J
+    rj0pFcptZX+WDF22AN235WYwgJM6TrNfSu8sv8vNAQOVnsKcgsqhuwomSGsOfMQj
+    LFzIn95MKBBU1G5wOs7JtwiV9jefGqJGBO2FAvOVbvPdK/saSnB+7K36dQcIHqms
+    5hU4Xj0RIJiod5idlRC5AQ0EUmlYOQEIAJs8OwHMkrdcvy9kk2HBVbdqhgAREMKy
+    gmphDp7prRL9FqSY/dKpCbG0u82zyJypdb7QiaQ5pfPzPpQcd2dIcohkkh7G3E+e
+    hS2L9AXHpwR26/PzMBXyr2iNnNc4vTksHvGVDxzFnRpka6vbI/hrrZmYNYh9EAiv
+    uhE54b3/XhXwFgHjZXb9i8hgJ3nsO0pRwvUAM1bRGMbvf8e9F+kqgV0yWYNnh6QL
+    4Vpl1+epqp2RKPHyNQftbQyrAHXT9kQF9pPlx013MKYaFTADscuAp4T3dy7xmiwS
+    crqMbZLzfrxfFOsNxTUGE5vmJCcm+mybAtRo4aV6ACohAO9NevMx8pUAEQEAAYkB
+    HwQYAQIACQUCUmlYOQIbDAAKCRDgKxTlAwonCNFbB/9esir/f7TufE+isNqErzR/
+    aZKZo2WzZR9c75kbqo6J6DYuUHe6xI0OZ2qZ60iABDEZAiNXGulysFLCiPdatQ8x
+    8zt3DF9BMkEck54ZvAjpNSern6zfZb1jPYWZq3TKxlTs/GuCgBAuV4i5vDTZ7xK/
+    aF+OFY5zN7ciZHkqLgMiTZ+RhqRcK6FhVBP/Y7d9NlBOcDBTxxE1ZO1ute6n7guJ
+    ciw4hfoRk8qNN19szZuq3UU64zpkM2sBsIFM9tGF2FADRxiOaOWZHmIyVZriPFqW
+    RUwjSjs7jBVNq0Vy4fCu/5+e+XLOUBOoqtM5W7ELt0t1w9tXebtPEetV86in8fU2
+    =0chn
+    -----END PGP PUBLIC KEY BLOCK-----
+
+Other bugs
+----------
+
+Bugs can always be described to the ``mailing-list``, but the best
+way to report an issue and to ensure a timely response is to use the
+issue tracker.
+
+1) **Create a GitHub account.**
+
+You need to `create a GitHub account`_ to be able to create new issues
+and participate in the discussion.
+
+.. _`create a GitHub account`: https://github.com/signup/free
+
+2) **Determine if your bug is really a bug.**
+
+You should not file a bug if you are requesting support.  For that you can use
+the ``mailing-list``, or ``irc-channel``.
+
+3) **Make sure your bug hasn't already been reported.**
+
+Search through the appropriate Issue tracker.  If a bug like yours was found,
+check if you have new information that could be reported to help
+the developers fix the bug.
+
+4) **Check if you're using the latest version.**
+
+A bug could be fixed by some other improvements and fixes - it might not have an
+existing report in the bug tracker. Make sure you're using the latest releases of
+celery, billiard and kombu.
+
+5) **Collect information about the bug.**
+
+To have the best chance of having a bug fixed, we need to be able to easily
+reproduce the conditions that caused it.  Most of the time this information
+will be from a Python traceback message, though some bugs might be in design,
+spelling or other errors on the website/docs/code.
+
+    A) If the error is from a Python traceback, include it in the bug report.
+
+    B) We also need to know what platform you're running (Windows, OS X, Linux,
+       etc.), the version of your Python interpreter, and the version of Celery,
+       and related packages that you were running when the bug occurred.
+
+    C) If you are reporting a race condition or a deadlock, tracebacks can be
+       hard to get or might not be that useful. Try to inspect the process to
+       get more diagnostic data. Some ideas:
+
+       * Enable celery's ``breakpoint signal <breakpoint_signal>`` and use it
+         to inspect the process's state. This will allow you to open a ``pdb``
+         session.
+       * Collect tracing data using strace_(Linux), dtruss (OSX) and ktrace(BSD),
+         ltrace_ and lsof_.
+
+    D) Include the output from the `celery report` command:
+
+        .. code-block:: bash
+
+            $ celery -A proj report
+
+        This will also include your configuration settings and it try to
+        remove values for keys known to be sensitive, but make sure you also
+        verify the information before submitting so that it doesn't contain
+        confidential information like API tokens and authentication
+        credentials.
+
+6) **Submit the bug.**
+
+By default `GitHub`_ will email you to let you know when new comments have
+been made on your bug. In the event you've turned this feature off, you
+should check back on occasion to ensure you don't miss any questions a
+developer trying to fix the bug might ask.
+
+.. _`GitHub`: http://github.com
+.. _`strace`: http://en.wikipedia.org/wiki/Strace
+.. _`ltrace`: http://en.wikipedia.org/wiki/Ltrace
+.. _`lsof`: http://en.wikipedia.org/wiki/Lsof
+
+.. _issue-trackers:
+
+Issue Trackers
+--------------
+
+Bugs for a package in the Celery ecosystem should be reported to the relevant
+issue tracker.
+
+* Celery: http://github.com/celery/celery/issues/
+* Kombu: http://github.com/celery/kombu/issues
+* pyamqp: http://github.com/celery/pyamqp/issues
+* librabbitmq: http://github.com/celery/librabbitmq/issues
+* Django-Celery: http://github.com/celery/django-celery/issues
+
+If you are unsure of the origin of the bug you can ask the
+``mailing-list``, or just use the Celery issue tracker.
+
+Contributors guide to the codebase
+==================================
+
+There's a separate section for internal details,
+including details about the codebase and a style guide.
+
+Read ``internals-guide`` for more!
+
+.. _versions:
+
+Versions
+========
+
+Version numbers consists of a major version, minor version and a release number.
+Since version 2.1.0 we use the versioning semantics described by
+semver: http://semver.org.
+
+Stable releases are published at PyPI
+while development releases are only available in the GitHub git repository as tags.
+All version tags starts with “v”, so version 0.8.0 is the tag v0.8.0.
+
+.. _git-branches:
+
+Branches
+========
+
+Current active version branches:
+
+* master (http://github.com/celery/celery/tree/master)
+* 3.1 (http://github.com/celery/celery/tree/3.1)
+* 3.0 (http://github.com/celery/celery/tree/3.0)
+
+You can see the state of any branch by looking at the Changelog:
+
+    https://github.com/celery/celery/blob/master/Changelog
+
+If the branch is in active development the topmost version info should
+contain metadata like::
+
+    2.4.0
+    ======
+    :release-date: TBA
+    :status: DEVELOPMENT
+    :branch: master
+
+The ``status`` field can be one of:
+
+* ``PLANNING``
+
+    The branch is currently experimental and in the planning stage.
+
+* ``DEVELOPMENT``
+
+    The branch is in active development, but the test suite should
+    be passing and the product should be working and possible for users to test.
+
+* ``FROZEN``
+
+    The branch is frozen, and no more features will be accepted.
+    When a branch is frozen the focus is on testing the version as much
+    as possible before it is released.
+
+``master`` branch
+-----------------
+
+The master branch is where development of the next version happens.
+
+Maintenance branches
+--------------------
+
+Maintenance branches are named after the version, e.g. the maintenance branch
+for the 2.2.x series is named ``2.2``.  Previously these were named
+``releaseXX-maint``.
+
+The versions we currently maintain is:
+
+* 3.1
+
+  This is the current series.
+
+* 3.0
+
+  This is the previous series, and the last version to support Python 2.5.
+
+Archived branches
+-----------------
+
+Archived branches are kept for preserving history only,
+and theoretically someone could provide patches for these if they depend
+on a series that is no longer officially supported.
+
+An archived version is named ``X.Y-archived``.
+
+Our currently archived branches are:
+
+* 2.5-archived
+
+* 2.4-archived
+
+* 2.3-archived
+
+* 2.1-archived
+
+* 2.0-archived
+
+* 1.0-archived
+
+Feature branches
+----------------
+
+Major new features are worked on in dedicated branches.
+There is no strict naming requirement for these branches.
+
+Feature branches are removed once they have been merged into a release branch.
+
+Tags
+====
+
+Tags are used exclusively for tagging releases.  A release tag is
+named with the format ``vX.Y.Z``, e.g. ``v2.3.1``.
+Experimental releases contain an additional identifier ``vX.Y.Z-id``, e.g.
+``v3.0.0-rc1``.  Experimental tags may be removed after the official release.
+
+.. _contributing-changes:
+
+Working on Features & Patches
+=============================
+
+.. note::
+
+    Contributing to Celery should be as simple as possible,
+    so none of these steps should be considered mandatory.
+
+    You can even send in patches by email if that is your preferred
+    work method. We won't like you any less, any contribution you make
+    is always appreciated!
+
+    However following these steps may make maintainers life easier,
+    and may mean that your changes will be accepted sooner.
+
+Forking and setting up the repository
+-------------------------------------
+
+First you need to fork the Celery repository, a good introduction to this
+is in the Github Guide: `Fork a Repo`_.
+
+After you have cloned the repository you should checkout your copy
+to a directory on your machine:
+::
+
+    $ git clone git@github.com:username/celery.git
+
+When the repository is cloned enter the directory to set up easy access
+to upstream changes:
+::
+
+    $ cd celery
+::
+
+    $ git remote add upstream git://github.com/celery/celery.git
+::
+
+    $ git fetch upstream
+
+If you need to pull in new changes from upstream you should
+always use the ``--rebase`` option to ``git pull``:
+::
+
+    git pull --rebase upstream master
+
+With this option you don't clutter the history with merging
+commit notes. See `Rebasing merge commits in git`_.
+If you want to learn more about rebasing see the `Rebase`_
+section in the Github guides.
+
+If you need to work on a different branch than ``master`` you can
+fetch and checkout a remote branch like this::
+
+    git checkout --track -b 3.0-devel origin/3.0-devel
+
+For a list of branches see ``git-branches``.
+
+.. _`Fork a Repo`: http://help.github.com/fork-a-repo/
+.. _`Rebasing merge commits in git`:
+    http://notes.envato.com/developers/rebasing-merge-commits-in-git/
+.. _`Rebase`: http://help.github.com/rebase/
+
+.. _contributing-testing:
+
+Running the unit test suite
+---------------------------
+
+To run the Celery test suite you need to install a few dependencies.
+A complete list of the dependencies needed are located in
+``requirements/test.txt``.
+
+Installing the test requirements:
+::
+
+    $ pip install -U -r requirements/test.txt
+
+When installation of dependencies is complete you can execute
+the test suite by calling ``nosetests``:
+::
+
+    $ nosetests
+
+Some useful options to ``nosetests`` are:
+
+* ``-x``
+
+    Stop running the tests at the first test that fails.
+
+* ``-s``
+
+    Don't capture output
+
+* ``--nologcapture``
+
+    Don't capture log output.
+
+* ``-v``
+
+    Run with verbose output.
+
+If you want to run the tests for a single test file only
+you can do so like this:
+::
+
+    $ nosetests celery.tests.test_worker.test_worker_job
+
+.. _contributing-pull-requests:
+
+Creating pull requests
+----------------------
+
+When your feature/bugfix is complete you may want to submit
+a pull requests so that it can be reviewed by the maintainers.
+
+Creating pull requests is easy, and also let you track the progress
+of your contribution.  Read the `Pull Requests`_ section in the Github
+Guide to learn how this is done.
+
+You can also attach pull requests to existing issues by following
+the steps outlined here: http://bit.ly/koJoso
+
+.. _`Pull Requests`: http://help.github.com/send-pull-requests/
+
+.. _contributing-coverage:
+
+Calculating test coverage
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Code coverage in HTML:
+::
+
+    $ nosetests --with-coverage --cover-html
+
+The coverage output will then be located at
+``celery/tests/cover/index.html``.
+
+Code coverage in XML (Cobertura-style):
+::
+
+    $ nosetests --with-coverage --cover-xml --cover-xml-file=coverage.xml
+
+The coverage XML output will then be located at ``coverage.xml``
+
+.. _contributing-tox:
+
+Running the tests on all supported Python versions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There is a ``tox`` configuration file in the top directory of the
+distribution.
+
+To run the tests for all supported Python versions simply execute:
+::
+
+    $ tox
+
+If you only want to test specific Python versions use the ``-e``
+option:
+::
+
+    $ tox -e py26
+
+Building the documentation
+--------------------------
+
+To build the documentation you need to install the dependencies
+listed in ``requirements/docs.txt``:
+::
+
+    $ pip install -U -r requirements/docs.txt
+
+After these dependencies are installed you should be able to
+build the docs by running:
+::
+
+    $ cd docs
+    $ rm -rf .build
+    $ make html
+
+Make sure there are no errors or warnings in the build output.
+After building succeeds the documentation is available at ``.build/html``.
+
+.. _contributing-verify:
+
+Verifying your contribution
+---------------------------
+
+To use these tools you need to install a few dependencies.  These dependencies
+can be found in ``requirements/pkgutils.txt``.
+
+Installing the dependencies:
+::
+
+    $ pip install -U -r requirements/pkgutils.txt
+
+pyflakes & PEP8
+~~~~~~~~~~~~~~~
+
+To ensure that your changes conform to PEP8 and to run pyflakes
+execute:
+::
+
+    $ paver flake8
+
+To not return a negative exit code when this command fails use the
+``-E`` option, this can be convenient while developing:
+::
+
+    $ paver flake8 -E
+
+API reference
+~~~~~~~~~~~~~
+
+To make sure that all modules have a corresponding section in the API
+reference please execute:
+::
+
+    $ paver autodoc
+    $ paver verifyindex
+
+If files are missing you can add them by copying an existing reference file.
+
+If the module is internal it should be part of the internal reference
+located in ``docs/internals/reference/``.  If the module is public
+it should be located in ``docs/reference/``.
+
+For example if reference is missing for the module ``celery.worker.awesome``
+and this module is considered part of the public API, use the following steps:
+::
+
+    $ cd docs/reference/
+    $ cp celery.schedules.rst celery.worker.awesome.rst
+::
+
+    $ vim celery.worker.awesome.rst
+
+        # change every occurance of ``celery.schedules`` to
+        # ``celery.worker.awesome``
+::
+
+    $ vim index.rst
+
+        # Add ``celery.worker.awesome`` to the index.
+::
+
+    # Add the file to git
+    $ git add celery.worker.awesome.rst
+    $ git add index.rst
+    $ git commit celery.worker.awesome.rst index.rst \
+        -m "Adds reference for celery.worker.awesome"
+
+.. _coding-style:
+
+Coding Style
+============
+
+You should probably be able to pick up the coding style
+from surrounding code, but it is a good idea to be aware of the
+following conventions.
+
+* All Python code must follow the `PEP-8`_ guidelines.
+
+`pep8.py`_ is an utility you can use to verify that your code
+is following the conventions.
+
+.. _`PEP-8`: http://www.python.org/dev/peps/pep-0008/
+.. _`pep8.py`: http://pypi.python.org/pypi/pep8
+
+* Docstrings must follow the `PEP-257`_ conventions, and use the following
+  style.
+
+    Do this:
+
+    .. code-block:: python
+
+        def method(self, arg):
+            """Short description.
+
+            More details.
+
+            """
+
+    or:
+
+    .. code-block:: python
+
+        def method(self, arg):
+            """Short description."""
+
+
+    but not this:
+
+    .. code-block:: python
+
+        def method(self, arg):
+            """
+            Short description.
+            """
+
+.. _`PEP-257`: http://www.python.org/dev/peps/pep-0257/
+
+* Lines should not exceed 78 columns.
+
+  You can enforce this in ``vim`` by setting the ``textwidth`` option:
+
+  .. code-block:: vim
+
+        set textwidth=78
+
+  If adhering to this limit makes the code less readable, you have one more
+  character to go on, which means 78 is a soft limit, and 79 is the hard
+  limit :)
+
+* Import order
+
+    * Python standard library (`import xxx`)
+    * Python standard library ('from xxx import`)
+    * Third party packages.
+    * Other modules from the current package.
+
+    or in case of code using Django:
+
+    * Python standard library (`import xxx`)
+    * Python standard library ('from xxx import`)
+    * Third party packages.
+    * Django packages.
+    * Other modules from the current package.
+
+    Within these sections the imports should be sorted by module name.
+
+    Example:
+
+    .. code-block:: python
+
+        import threading
+        import time
+
+        from collections import deque
+        from Queue import Queue, Empty
+
+        from .datastructures import TokenBucket
+        from .five import zip_longest, items, range
+        from .utils import timeutils
+
+* Wildcard imports must not be used (`from xxx import *`).
+
+* For distributions where Python 2.5 is the oldest support version
+  additional rules apply:
+
+    * Absolute imports must be enabled at the top of every module::
+
+        from __future__ import absolute_import
+
+    * If the module uses the with statement and must be compatible
+      with Python 2.5 (celery is not) then it must also enable that::
+
+        from __future__ import with_statement
+
+    * Every future import must be on its own line, as older Python 2.5
+      releases did not support importing multiple features on the
+      same future import line::
+
+        # Good
+        from __future__ import absolute_import
+        from __future__ import with_statement
+
+        # Bad
+        from __future__ import absolute_import, with_statement
+
+     (Note that this rule does not apply if the package does not include
+     support for Python 2.5)
+
+
+* Note that we use "new-style` relative imports when the distribution
+  does not support Python versions below 2.5
+::
+
+        from . import submodule
+
+
+.. _feature-with-extras:
+
+Contributing features requiring additional libraries
+====================================================
+
+Some features like a new result backend may require additional libraries
+that the user must install.
+
+We use setuptools `extra_requires` for this, and all new optional features
+that require 3rd party libraries must be added.
+
+1) Add a new requirements file in `requirements/extras`
+
+    E.g. for the Cassandra backend this is
+    ``requirements/extras/cassandra.txt``, and the file looks like this::
+
+        pycassa
+
+    These are pip requirement files so you can have version specifiers and
+    multiple packages are separated by newline.  A more complex example could
+    be:
+
+        # pycassa 2.0 breaks Foo
+        pycassa>=1.0,<2.0
+        thrift
+
+2) Modify ``setup.py``
+
+    After the requirements file is added you need to add it as an option
+    to ``setup.py`` in the ``extras_require`` section::
+
+        extra['extras_require'] = {
+            # ...
+            'cassandra': extras('cassandra.txt'),
+        }
+
+3) Document the new feature in ``docs/includes/installation.txt``
+
+    You must add your feature to the list in the ``bundles`` section
+    of ``docs/includes/installation.txt``.
+
+    After you've made changes to this file you need to render
+    the distro ``README`` file:
+
+    .. code-block:: bash
+
+        $ pip install -U requirements/pkgutils.txt
+        $ paver readme
+
+
+That's all that needs to be done, but remember that if your feature
+adds additional configuration options then these needs to be documented
+in ``docs/configuration.rst``.  Also all settings need to be added to the
+``celery/app/defaults.py`` module.
+
+Result backends require a separate section in the ``docs/configuration.rst``
+file.
+
+.. _contact_information:
+
+Contacts
+========
+
+This is a list of people that can be contacted for questions
+regarding the official git repositories, PyPI packages
+Read the Docs pages.
+
+If the issue is not an emergency then it is better
+to ``report an issue <reporting-bugs>``.
+
+
+Committers
+----------
+
+Ask Solem
+~~~~~~~~~
+
+:github: https://github.com/ask
+:twitter: http://twitter.com/#!/asksol
+
+Mher Movsisyan
+~~~~~~~~~~~~~~
+
+:github: https://github.com/mher
+:twitter: http://twitter.com/#!/movsm
+
+Steeve Morin
+~~~~~~~~~~~~
+
+:github: https://github.com/steeve
+:twitter: http://twitter.com/#!/steeve
+
+Website
+-------
+
+The Celery Project website is run and maintained by
+
+Mauro Rocco
+~~~~~~~~~~~
+
+:github: https://github.com/fireantology
+:twitter: https://twitter.com/#!/fireantology
+
+with design by:
+
+Jan Henrik Helmers
+~~~~~~~~~~~~~~~~~~
+
+:web: http://www.helmersworks.com
+:twitter: http://twitter.com/#!/helmers
+
+
+.. _packages:
+
+Packages
+========
+
+celery
+------
+
+:git: https://github.com/celery/celery
+:CI: http://travis-ci.org/#!/celery/celery
+:PyPI: http://pypi.python.org/pypi/celery
+:docs: http://docs.celeryproject.org
+
+kombu
+-----
+
+Messaging library.
+
+:git: https://github.com/celery/kombu
+:CI: http://travis-ci.org/#!/celery/kombu
+:PyPI: http://pypi.python.org/pypi/kombu
+:docs: http://kombu.readthedocs.org
+
+billiard
+--------
+
+Fork of multiprocessing containing improvements
+that will eventually be merged into the Python stdlib.
+
+:git: https://github.com/celery/billiard
+:PyPI: http://pypi.python.org/pypi/billiard
+
+librabbitmq
+-----------
+
+Very fast Python AMQP client written in C.
+
+:git: https://github.com/celery/librabbitmq
+:PyPI: http://pypi.python.org/pypi/librabbitmq
+
+celerymon
+---------
+
+Celery monitor web-service.
+
+:git: https://github.com/celery/celerymon
+:PyPI: http://pypi.python.org/pypi/celerymon
+
+django-celery
+-------------
+
+Django <-> Celery Integration.
+
+:git: https://github.com/celery/django-celery
+:PyPI: http://pypi.python.org/pypi/django-celery
+:docs: http://docs.celeryproject.org/en/latest/django
+
+cl
+--
+
+Actor library.
+
+:git: https://github.com/celery/cl
+:PyPI: http://pypi.python.org/pypi/cl
+
+cyme
+----
+
+Distributed Celery Instance manager.
+
+:git: https://github.com/celery/cyme
+:PyPI: http://pypi.python.org/pypi/cyme
+:docs: http://cyme.readthedocs.org/
+
+
+Deprecated
+----------
+
+- Flask-Celery
+
+:git: https://github.com/ask/Flask-Celery
+:PyPI: http://pypi.python.org/pypi/Flask-Celery
+
+- carrot
+
+:git: https://github.com/ask/carrot
+:PyPI: http://pypi.python.org/pypi/carrot
+
+- ghettoq
+
+:git: https://github.com/ask/ghettoq
+:PyPI: http://pypi.python.org/pypi/ghettoq
+
+- kombu-sqlalchemy
+
+:git: https://github.com/ask/kombu-sqlalchemy
+:PyPI: http://pypi.python.org/pypi/kombu-sqlalchemy
+
+- django-kombu
+
+:git: https://github.com/ask/django-kombu
+:PyPI: http://pypi.python.org/pypi/django-kombu
+
+- pylibrabbitmq
+
+Old name for ``librabbitmq``.
+
+``None``
+:PyPI: http://pypi.python.org/pypi/pylibrabbitmq
+
+.. _release-procedure:
+
+
+Release Procedure
+=================
+
+Updating the version number
+---------------------------
+
+The version number must be updated two places:
+
+    * ``celery/__init__.py``
+    * ``docs/include/introduction.txt``
+
+After you have changed these files you must render
+the ``README`` files.  There is a script to convert sphinx syntax
+to generic reStructured Text syntax, and the paver task `readme`
+does this for you:
+::
+
+    $ paver readme
+
+Now commit the changes:
+::
+
+    $ git commit -a -m "Bumps version to X.Y.Z"
+
+and make a new version tag:
+::
+
+    $ git tag vX.Y.Z
+    $ git push --tags
+
+Releasing
+---------
+
+Commands to make a new public stable release::
+
+    $ paver releaseok  # checks pep8, autodoc index, runs tests and more
+    $ paver removepyc  # Remove .pyc files
+    $ git clean -xdn   # Check that there's no left-over files in the repo
+    $ python setup.py sdist upload  # Upload package to PyPI
+
+If this is a new release series then you also need to do the
+following:
+
+* Go to the Read The Docs management interface at:
+    http://readthedocs.org/projects/celery/?fromdocs=celery
+
+* Enter "Edit project"
+
+    Change default branch to the branch of this series, e.g. ``2.4``
+    for series 2.4.
+
+* Also add the previous version under the "versions" tab.
+

+ 5 - 2
CONTRIBUTORS.txt

@@ -152,5 +152,8 @@ Michael Robellard, 2013/11/07
 Vsevolod Kulaga, 2013/11/16
 Ionel Cristian Mărieș, 2013/12/09
 Константин Подшумок, 2013/12/16
-Antoine Legrand, 2014/09/01
-Pepijn de Vos, 2014/15/01
+Antoine Legrand, 2014/01/09
+Pepijn de Vos, 2014/01/15
+Dan McGee, 2014/01/27
+Paul Kilgo, 2014/01/28
+Martin Davidsson, 2014/02/08

+ 74 - 0
Changelog

@@ -8,6 +8,80 @@ This document contains change notes for bugfix releases in the 3.1.x series
 (Cipater), please see :ref:`whatsnew-3.1` for an overview of what's
 new in Celery 3.1.
 
+.. _version-3.1.9:
+
+3.1.9
+=====
+:release-date: 2014-02-10 06:00 P.M UTC
+:release-by: Ask Solem
+
+- **Requirements**:
+
+    - Now depends on :ref:`Kombu 3.0.12 <kombu:version-3.0.12>`.
+
+- **Prefork pool**: Better handling of exiting child processes.
+
+    Fix contributed by Ionel Cristian Mărieș.
+
+- **Prefork pool**: Now makes sure all file descriptors are removed
+  from the hub when a process is cleaned up.
+
+    Fix contributed by Ionel Cristian Mărieș.
+
+- **New Sphinx extension**: for autodoc documentation of tasks:
+  :mod:`celery.contrib.spinx` (Issue #1833).
+
+- **Django**: Now works with Django 1.7a1.
+
+- **Task**: Task.backend is now a property that forwards to ``app.backend``
+  if no custom backend has been specified for the task (Issue #1821).
+
+- **Generic init scripts**: Fixed bug in stop command.
+
+    Fix contributed by Rinat Shigapov.
+
+- **Generic init scripts**: Fixed compatibility with GNU :manpage:`stat`.
+
+    Fix contributed by Paul Kilgo.
+
+- **Generic init scripts**: Fixed compatibility with the minimal
+  :program:`dash` shell (Issue #1815).
+
+- **Commands**: The :program:`celery amqp basic.publish` command was not
+  working properly.
+
+    Fix contributed by Andrey Voronov.
+
+- **Commands**: Did no longer emit an error message if the pidfile exists
+  and the process is still alive (Issue #1855).
+
+- **Commands**: Better error message for missing arguments to preload
+  options (Issue #1860).
+
+- **Commands**: :program:`celery -h` did not work because of a bug in the
+  argument parser (Issue #1849).
+
+- **Worker**: Improved error message for message decoding errors.
+
+- **Time**: Now properly parses the `Z` timezone specifier in ISO 8601 date
+  strings.
+
+    Fix contributed by Martin Davidsson.
+
+- **Worker**: Now uses the *negotiated* heartbeat value to calculate
+  how often to run the heartbeat checks.
+
+- **Beat**: Fixed problem with beat hanging after the first schedule
+  iteration (Issue #1822).
+
+    Fix contributed by Roger Hu.
+
+- **Signals**: The header argument to :signal:`before_task_publish` is now
+  always a dictionary instance so that signal handlers can add headers.
+
+- **Worker**: A list of message headers is now included in message related
+  errors.
+
 .. _version-3.1.8:
 
 3.1.8

+ 1 - 0
celery/app/amqp.py

@@ -221,6 +221,7 @@ class TaskProducer(Producer):
                      **kwargs):
         """Send task message."""
         retry = self.retry if retry is None else retry
+        headers = {} if headers is None else headers
 
         qname = queue
         if queue is None and exchange is None:

+ 4 - 1
celery/app/base.py

@@ -33,6 +33,7 @@ from celery.exceptions import AlwaysEagerIgnored, ImproperlyConfigured
 from celery.five import items, values
 from celery.loaders import get_loader_cls
 from celery.local import PromiseProxy, maybe_evaluate
+from celery.utils import shadowsig
 from celery.utils.functional import first, maybe_list
 from celery.utils.imports import instantiate, symbol_by_name
 from celery.utils.objects import mro_lookup
@@ -235,7 +236,9 @@ class Celery(object):
             'run': fun if bind else staticmethod(fun),
             '_decorated': True,
             '__doc__': fun.__doc__,
-            '__module__': fun.__module__}, **options))()
+            '__module__': fun.__module__,
+            '__wrapped__': fun}, **options))()
+        shadowsig(T, fun)  # for inspect.getargspec
         task = self._tasks[T.name]  # return global instance.
         return task
 

+ 13 - 1
celery/app/task.py

@@ -343,6 +343,8 @@ class Task(object):
             'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'),
     )
 
+    _backend = None  # set by backend property.
+
     __bound__ = False
 
     # - Tasks are lazily bound, so that configuration is not set
@@ -360,7 +362,6 @@ class Task(object):
                 setattr(self, attr_name, conf[config_name])
         if self.accept_magic_kwargs is None:
             self.accept_magic_kwargs = app.accept_magic_kwargs
-        self.backend = app.backend
 
         # decorate with annotations from config.
         if not was_bound:
@@ -899,6 +900,17 @@ class Task(object):
             self._exec_options = extract_exec_options(self)
         return self._exec_options
 
+    @property
+    def backend(self):
+        backend = self._backend
+        if backend is None:
+            return self.app.backend
+        return backend
+
+    @backend.setter
+    def backend(self, value):  # noqa
+        self._backend = value
+
     @property
     def __name__(self):
         return self.__class__.__name__

+ 1 - 2
celery/bin/amqp.py

@@ -15,7 +15,6 @@ import pprint
 from functools import partial
 from itertools import count
 
-from amqp import Message
 from kombu.utils.encoding import safe_str
 
 from celery.utils.functional import padlist
@@ -175,7 +174,7 @@ class AMQShell(cmd.Cmd):
         'basic.get': Spec(('queue', str),
                           ('no_ack', bool, 'off'),
                           returns=dump_message),
-        'basic.publish': Spec(('msg', Message),
+        'basic.publish': Spec(('msg', str),
                               ('exchange', str),
                               ('routing_key', str),
                               ('mandatory', bool, 'no'),

+ 6 - 1
celery/bin/base.py

@@ -530,7 +530,12 @@ class Command(object):
                 opt = opts.get(arg)
                 if opt:
                     if opt.takes_value():
-                        acc[opt.dest] = args[index + 1]
+                        try:
+                            acc[opt.dest] = args[index + 1]
+                        except IndexError:
+                            raise ValueError(
+                                'Missing required argument for {0}'.format(
+                                    arg))
                         index += 1
                     elif opt.action == 'store_true':
                         acc[opt.dest] = True

+ 1 - 1
celery/bin/beat.py

@@ -24,7 +24,7 @@ The :program:`celery beat` command.
     Scheduler class to use.
     Default is :class:`celery.beat.PersistentScheduler`.
 
-.. cmdoption:: max-interval
+.. cmdoption:: --max-interval
 
     Max seconds to sleep between schedule iterations.
 

+ 26 - 4
celery/bin/celery.py

@@ -702,7 +702,7 @@ class CeleryCommand(Command):
             helps = '{self.prog_name} {command} --help'
         else:
             helps = '{self.prog_name} --help'
-        self.error(self.colored.magenta("Error: {0}".format(exc)))
+        self.error(self.colored.magenta('Error: {0}'.format(exc)))
         self.error("""Please try '{0}'""".format(helps.format(
             self=self, command=command,
         )))
@@ -715,11 +715,33 @@ class CeleryCommand(Command):
                 if value.startswith('--'):
                     rest.append(value)
                 elif value.startswith('-'):
-                    rest.extend([value] + [argv[index + 1]])
-                    index += 1
+                    # we eat the next argument even though we don't know
+                    # if this option takes an argument or not.
+                    # instead we will assume what is the command name in the
+                    # return statements below.
+                    try:
+                        nxt = argv[index + 1]
+                        if nxt.startswith('-'):
+                            # is another option
+                            rest.append(value)
+                        else:
+                            # is (maybe) a value for this option
+                            rest.extend([value, nxt])
+                            index += 1
+                    except IndexError:
+                        rest.append(value)
+                        break
                 else:
-                    return argv[index:] + rest
+                    break
                 index += 1
+            if argv[index:]:
+                # if there are more arguments left then divide and swap
+                # we assume the first argument in argv[i:] is the command
+                # name.
+                return argv[index:] + rest
+            # if there are no more arguments then the last arg in rest'
+            # must be the command.
+            [rest.pop()] + rest
         return []
 
     def prepare_prog_name(self, name):

+ 1 - 1
celery/bootsteps.py

@@ -392,7 +392,7 @@ class StartStopStep(Step):
 
 
 class ConsumerStep(StartStopStep):
-    requires = ('Connection', )
+    requires = ('celery.worker.consumer:Connection', )
     consumers = None
 
     def get_consumers(self, channel):

+ 11 - 3
celery/concurrency/asynpool.py

@@ -570,6 +570,15 @@ class AsynPool(_pool.Pool):
             if inq:
                 busy_workers.discard(inq)
             hub_remove(proc.sentinel)
+            waiting_to_start.discard(proc)
+            self._active_writes.discard(proc.inqW_fd)
+            hub_remove(proc.inqW_fd)
+            hub_remove(proc.outqR_fd)
+            if proc.synqR_fd:
+                hub_remove(proc.synqR_fd)
+            if proc.synqW_fd:
+                self._active_writes.discard(proc.synqW_fd)
+                hub_remove(proc.synqW_fd)
         self.on_process_down = on_process_down
 
     def _create_write_handlers(self, hub,
@@ -960,14 +969,13 @@ class AsynPool(_pool.Pool):
         return inq, outq, synq
 
     def on_process_alive(self, pid):
-        """Handler called when the WORKER_UP message is received
+        """Handler called when the :const:`WORKER_UP` message is received
         from a child process, which marks the process as ready
         to receive work."""
         try:
             proc = next(w for w in self._pool if w.pid == pid)
         except StopIteration:
-            # process already exited :(  this will be handled elsewhere.
-            return
+            return logger.warning('process with pid=%s already exited', pid)
         assert proc.inqW_fd not in self._fileno_to_inq
         assert proc.inqW_fd not in self._all_inqueues
         self._waiting_to_start.discard(proc)

+ 2 - 2
celery/contrib/batches.py

@@ -47,7 +47,7 @@ messages, and every 10 seconds.
 
     from celery.contrib.batches import Batches
 
-    wot_api_target = "https://api.mywot.com/0.4/public_link_json"
+    wot_api_target = 'https://api.mywot.com/0.4/public_link_json'
 
     @app.task(base=Batches, flush_every=100, flush_interval=10)
     def wot_api(requests):
@@ -64,7 +64,7 @@ messages, and every 10 seconds.
         domains = [urlparse(url).netloc for url in urls]
         response = requests.get(
             wot_api_target,
-            params={"hosts": ('/').join(set(domains)) + '/'}
+            params={'hosts': ('/').join(set(domains)) + '/'}
         )
         return [response.json[domain] for domain in domains]
 

+ 73 - 0
celery/contrib/sphinx.py

@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+"""
+celery.contrib.sphinx
+=====================
+
+Sphinx documentation plugin
+
+**Usage**
+
+Add the extension to your :file:`docs/conf.py` configuration module:
+
+.. code-block:: python
+
+    extensions = (...,
+                  'celery.contrib.sphinx')
+
+If you would like to change the prefix for tasks in reference documentation
+then you can change the ``celery_task_prefix`` configuration value:
+
+.. code-block:: python
+
+    celery_task_prefix = '(task)'  # < default
+
+
+With the extension installed `autodoc` will automatically find
+task decorated objects and generate the correct (as well as
+add a ``(task)`` prefix), and you can also refer to the tasks
+using `:task:proj.tasks.add` syntax.
+
+Use ``.. autotask::`` to manually document a task.
+
+"""
+from __future__ import absolute_import
+
+from inspect import formatargspec, getargspec
+
+from sphinx.domains.python import PyModulelevel
+from sphinx.ext.autodoc import FunctionDocumenter
+
+from celery.app.task import BaseTask
+
+
+class TaskDocumenter(FunctionDocumenter):
+    objtype = 'task'
+    member_order = 11
+
+    @classmethod
+    def can_document_member(cls, member, membername, isattr, parent):
+        return isinstance(member, BaseTask) and getattr(member, '__wrapped__')
+
+    def format_args(self):
+        wrapped = getattr(self.object, '__wrapped__')
+        if wrapped is not None:
+            argspec = getargspec(wrapped)
+            fmt = formatargspec(*argspec)
+            fmt = fmt.replace('\\', '\\\\')
+            return fmt
+        return ''
+
+    def document_members(self, all_members=False):
+        pass
+
+
+class TaskDirective(PyModulelevel):
+
+    def get_signature_prefix(self, sig):
+        return self.env.config.celery_task_prefix
+
+
+def setup(app):
+    app.add_autodocumenter(TaskDocumenter)
+    app.domains['py'].directives['task'] = TaskDirective
+    app.add_config_value('celery_task_prefix', '(task)', True)

+ 1 - 1
celery/datastructures.py

@@ -555,7 +555,7 @@ class LimitedSet(object):
     """Kind-of Set with limitations.
 
     Good for when you need to test for membership (`a in set`),
-    but the list might become to big.
+    but the list might become too big.
 
     :keyword maxlen: Maximum number of members before we start
                      evicting expired members.

+ 15 - 6
celery/fixups/django.py

@@ -134,13 +134,22 @@ class DjangoWorkerFixup(object):
         )
 
     def validate_models(self):
-        from django.core.management.validation import get_validation_errors
         s = io.StringIO()
-        num_errors = get_validation_errors(s, None)
-        if num_errors:
-            raise RuntimeError(
-                'One or more Django models did not validate:\n{0}'.format(
-                    s.getvalue()))
+        try:
+            from django.core.management.validation import get_validation_errors
+        except ImportError:
+            import django
+            from django.core.management.base import BaseCommand
+            django.setup()
+            cmd = BaseCommand()
+            cmd.stdout, cmd.stderr = sys.stdout, sys.stderr
+            cmd.check()
+        else:
+            num_errors = get_validation_errors(s, None)
+            if num_errors:
+                raise RuntimeError(
+                    'One or more Django models did not validate:\n{0}'.format(
+                        s.getvalue()))
 
     def install(self):
         signals.beat_embedded_init.connect(self.close_database)

+ 7 - 4
celery/platforms.py

@@ -49,6 +49,7 @@ EX_OK = getattr(os, 'EX_OK', 0)
 EX_FAILURE = 1
 EX_UNAVAILABLE = getattr(os, 'EX_UNAVAILABLE', 69)
 EX_USAGE = getattr(os, 'EX_USAGE', 64)
+EX_CANTCREAT = getattr(os, 'EX_CANTCREAT', 73)
 
 SYSTEM = _platform.system()
 IS_OSX = SYSTEM == 'Darwin'
@@ -258,7 +259,8 @@ def create_pidlock(pidfile):
 def _create_pidlock(pidfile):
     pidlock = Pidfile(pidfile)
     if pidlock.is_locked() and not pidlock.remove_if_stale():
-        raise SystemExit(PIDLOCKED.format(pidfile, pidlock.read_pid()))
+        print(PIDLOCKED.format(pidfile, pidlock.read_pid()), file=sys.stderr)
+        raise SystemExit(EX_CANTCREAT)
     pidlock.acquire()
     return pidlock
 
@@ -266,9 +268,10 @@ def _create_pidlock(pidfile):
 if hasattr(os, 'closerange'):
 
     def close_open_fds(keep=None):
-        keep = list(uniq(sorted(filter(None, (
-            maybe_fileno(f) for f in keep or []
-        )))))
+        # must make sure this is 0-inclusive (Issue #1882)
+        keep = list(uniq(sorted(
+            f for f in map(maybe_fileno, keep or []) if f is not None
+        )))
         maxfd = get_fdmax(default=2048)
         kL, kH = iter([-1] + keep), iter(keep + [maxfd])
         for low, high in zip_longest(kL, kH):

+ 10 - 0
celery/task/base.py

@@ -69,6 +69,16 @@ class Task(BaseTask):
     def request(cls):
         return cls._get_request()
 
+    @class_property
+    def backend(cls):
+        if cls._backend is None:
+            return cls.app.backend
+        return cls._backend
+
+    @backend.setter
+    def backend(cls, value):  # noqa
+        cls._backend = value
+
     @classmethod
     def get_logger(self, **kwargs):
         return get_task_logger(self.name)

+ 1 - 1
celery/tests/__init__.py

@@ -22,7 +22,7 @@ def setup():
         KOMBU_DISABLE_LIMIT_PROTECTION='yes',
     )
 
-    if os.environ.get('COVER_ALL_MODULES') or '--with-coverage3' in sys.argv:
+    if os.environ.get('COVER_ALL_MODULES') or '--with-coverage' in sys.argv:
         from warnings import catch_warnings
         with catch_warnings(record=True):
             import_all_modules()

+ 1 - 1
celery/tests/app/test_log.py

@@ -336,7 +336,7 @@ class test_task_logger(test_default_logger):
         return self.app.log.setup_task_loggers(*args, **kwargs)
 
     def get_logger(self, *args, **kwargs):
-        return get_task_logger("test_task_logger")
+        return get_task_logger('test_task_logger')
 
 
 class test_patch_logger_cls(AppCase):

+ 4 - 4
celery/tests/backends/test_couchbase.py

@@ -129,8 +129,8 @@ class test_CouchBaseBackend(AppCase):
         url = 'couchbase://johndoe:mysecret@myhost:123/mycoolbucket'
         with self.Celery(backend=url) as app:
             x = app.backend
-            self.assertEqual(x.bucket, "mycoolbucket")
-            self.assertEqual(x.host, "myhost")
-            self.assertEqual(x.username, "johndoe")
-            self.assertEqual(x.password, "mysecret")
+            self.assertEqual(x.bucket, 'mycoolbucket')
+            self.assertEqual(x.host, 'myhost')
+            self.assertEqual(x.username, 'johndoe')
+            self.assertEqual(x.password, 'mysecret')
             self.assertEqual(x.port, 123)

+ 4 - 4
celery/tests/backends/test_mongodb.py

@@ -10,7 +10,7 @@ from celery.backends import mongodb as module
 from celery.backends.mongodb import MongoBackend, Bunch, pymongo
 from celery.exceptions import ImproperlyConfigured
 from celery.tests.case import (
-    AppCase, MagicMock, Mock, SkipTest,
+    AppCase, MagicMock, Mock, SkipTest, ANY,
     depends_on_current_app, patch, sentinel,
 )
 
@@ -176,7 +176,7 @@ class test_MongoBackend(AppCase):
 
         mock_get_database.assert_called_once_with()
         mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
-        mock_collection.save.assert_called_once()
+        mock_collection.save.assert_called_once_with(ANY)
         self.assertEqual(sentinel.result, ret_val)
 
     @patch('celery.backends.mongodb.MongoBackend._get_database')
@@ -232,7 +232,7 @@ class test_MongoBackend(AppCase):
 
         mock_get_database.assert_called_once_with()
         mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
-        mock_collection.save.assert_called_once()
+        mock_collection.save.assert_called_once_with(ANY)
         self.assertEqual(sentinel.result, ret_val)
 
     @patch('celery.backends.mongodb.MongoBackend._get_database')
@@ -309,7 +309,7 @@ class test_MongoBackend(AppCase):
         mock_get_database.assert_called_once_with()
         mock_database.__getitem__.assert_called_once_with(
             MONGODB_COLLECTION)
-        mock_collection.assert_called_once()
+        mock_collection.assert_called_once_with()
 
     def test_get_database_authfailure(self):
         x = MongoBackend(app=self.app)

+ 12 - 3
celery/tests/case.py

@@ -44,8 +44,14 @@ from celery.five import (
 from celery.utils.functional import noop
 from celery.utils.imports import qualname
 
+try:  # pragma: no cover
+    from django.utils.six import MovedModule
+except ImportError:  # pragma: no cover
+    class MovedModule(object):  # noqa
+        pass
+
 __all__ = [
-    'Case', 'AppCase', 'Mock', 'MagicMock',
+    'Case', 'AppCase', 'Mock', 'MagicMock', 'ANY',
     'patch', 'call', 'sentinel', 'skip_unless_module',
     'wrap_logger', 'with_environ', 'sleepdeprived',
     'skip_if_environ', 'todo', 'skip', 'skip_if',
@@ -59,6 +65,7 @@ patch = mock.patch
 call = mock.call
 sentinel = mock.sentinel
 MagicMock = mock.MagicMock
+ANY = mock.ANY
 
 CASE_REDEFINES_SETUP = """\
 {name} (subclass of AppCase) redefines private "setUp", should be: "setup"\
@@ -201,8 +208,10 @@ class _AssertWarnsContext(_AssertRaisesBaseContext):
         # to work properly.
         warnings.resetwarnings()
         for v in list(values(sys.modules)):
-            if getattr(v, '__warningregistry__', None):
-                v.__warningregistry__ = {}
+            # do not evaluate Django moved modules:
+            if not isinstance(v, MovedModule):
+                if getattr(v, '__warningregistry__', None):
+                    v.__warningregistry__ = {}
         self.warnings_manager = warnings.catch_warnings(record=True)
         self.warnings = self.warnings_manager.__enter__()
         warnings.simplefilter('always', self.expected)

+ 3 - 0
celery/tests/utils/test_timeutils.py

@@ -66,6 +66,9 @@ class test_iso8601(Case):
         iso2 = iso.replace('+00:00', '+01:00')
         d2 = parse_iso8601(iso2)
         self.assertEqual(d2.tzinfo._minutes, +60)
+        iso3 = iso.replace('+00:00', 'Z')
+        d3 = parse_iso8601(iso3)
+        self.assertEqual(d3.tzinfo, pytz.UTC)
 
 
 class test_timeutils(Case):

+ 7 - 7
celery/tests/worker/test_hub.py

@@ -1,7 +1,7 @@
 from __future__ import absolute_import
 
 from kombu.async import Hub, READ, WRITE, ERR
-from kombu.async.hub import repr_flag, _rcb
+from kombu.async.debug import callback_for, repr_flag, _rcb
 from kombu.async.semaphore import DummyLock, LaxBoundedSemaphore
 
 from celery.five import range
@@ -234,11 +234,11 @@ class test_Hub(Case):
         hub.readers = {6: reader}
         hub.writers = {7: writer}
 
-        self.assertEqual(hub._callback_for(6, READ), reader)
-        self.assertEqual(hub._callback_for(7, WRITE), writer)
+        self.assertEqual(callback_for(hub, 6, READ), reader)
+        self.assertEqual(callback_for(hub, 7, WRITE), writer)
         with self.assertRaises(KeyError):
-            hub._callback_for(6, WRITE)
-        self.assertEqual(hub._callback_for(6, WRITE, 'foo'), 'foo')
+            callback_for(hub, 6, WRITE)
+        self.assertEqual(callback_for(hub, 6, WRITE, 'foo'), 'foo')
 
     def test_add_remove_readers(self):
         hub = Hub()
@@ -251,7 +251,7 @@ class test_Hub(Case):
 
         P.register.assert_has_calls([
             call(10, hub.READ | hub.ERR),
-            call(File(11), hub.READ | hub.ERR),
+            call(11, hub.READ | hub.ERR),
         ], any_order=True)
 
         self.assertEqual(hub.readers[10], (read_A, (10, )))
@@ -289,7 +289,7 @@ class test_Hub(Case):
 
         P.register.assert_has_calls([
             call(20, hub.WRITE),
-            call(File(21), hub.WRITE),
+            call(21, hub.WRITE),
         ], any_order=True)
 
         self.assertEqual(hub.writers[20], (write_A, ()))

+ 3 - 0
celery/tests/worker/test_loops.py

@@ -36,6 +36,9 @@ class X(object):
                          heartbeat,
                          Mock(name='clock')]
         self.connection.supports_heartbeats = True
+        self.connection.get_heartbeat_interval.side_effect = (
+            lambda: self.heartbeat
+        )
         self.consumer.callbacks = []
         self.obj.strategies = {}
         self.connection.connection_errors = (socket.error, )

+ 15 - 1
celery/utils/__init__.py

@@ -16,7 +16,7 @@ import warnings
 import datetime
 
 from functools import partial, wraps
-from inspect import getargspec
+from inspect import getargspec, ismethod
 from pprint import pprint
 
 from kombu.entity import Exchange, Queue
@@ -29,6 +29,8 @@ __all__ = ['worker_direct', 'warn_deprecated', 'deprecated', 'lpmerge',
            'jsonify', 'gen_task_name', 'nodename', 'nodesplit',
            'cached_property']
 
+PY3 = sys.version_info[0] == 3
+
 
 PENDING_DEPRECATION_FMT = """
     {description} is scheduled for deprecation in \
@@ -341,6 +343,18 @@ def default_nodename(hostname):
     name, host = nodesplit(hostname or '')
     return nodename(name or NODENAME_DEFAULT, host or socket.gethostname())
 
+
+def shadowsig(wrapper, wrapped):
+    if ismethod(wrapped):
+        wrapped = wrapped.__func__
+    wrapper.__code__ = wrapped.__code__
+    wrapper.__defaults__ = wrapper.func_defaults = wrapped.__defaults__
+
+    if not PY3:
+        wrapper.func_code = wrapper.__code__
+        wrapper.func_defaults = wrapper.__defaults__
+
+
 # ------------------------------------------------------------------------ #
 # > XXX Compat
 from .log import LOG_LEVELS     # noqa

+ 1 - 1
celery/utils/dispatch/signal.py

@@ -23,7 +23,7 @@ class Signal(object):  # pragma: no cover
 
     .. attribute:: receivers
         Internal attribute, holds a dictionary of
-        `{receriverkey (id): weakref(receiver)}` mappings.
+        `{receiverkey (id): weakref(receiver)}` mappings.
 
     """
 

+ 3 - 1
celery/utils/iso8601.py

@@ -59,7 +59,9 @@ def parse_iso8601(datestring):
         raise ValueError('unable to parse date string %r' % datestring)
     groups = m.groupdict()
     tz = groups['timezone']
-    if tz and tz != 'Z':
+    if tz == 'Z':
+        tz = FixedOffset(0)
+    elif tz:
         m = TIMEZONE_REGEX.match(tz)
         prefix, hours, minutes = m.groups()
         hours, minutes = int(hours), int(minutes)

+ 14 - 4
celery/worker/consumer.py

@@ -111,8 +111,16 @@ The full contents of the message body was:
 %s
 """
 
+MESSAGE_DECODE_ERROR = """\
+Can't decode message body: %r [type:%r encoding:%r headers:%s]
+
+body: %s
+"""
+
 MESSAGE_REPORT = """\
-body: {0} {{content_type:{1} content_encoding:{2} delivery_info:{3}}}\
+body: {0}
+{{content_type:{1} content_encoding:{2}
+  delivery_info:{3} headers={4}}}
 """
 
 MINGLE_GET_FIELDS = itemgetter('clock', 'revoked')
@@ -320,9 +328,10 @@ class Consumer(object):
         :param exc: The original exception instance.
 
         """
-        crit("Can't decode message body: %r (type:%r encoding:%r raw:%r')",
+        crit(MESSAGE_DECODE_ERROR,
              exc, message.content_type, message.content_encoding,
-             dump_body(message, message.body), exc_info=1)
+             safe_repr(message.headers), dump_body(message, message.body),
+             exc_info=1)
         message.ack()
 
     def on_close(self):
@@ -407,7 +416,8 @@ class Consumer(object):
         return MESSAGE_REPORT.format(dump_body(message, body),
                                      safe_repr(message.content_type),
                                      safe_repr(message.content_encoding),
-                                     safe_repr(message.delivery_info))
+                                     safe_repr(message.delivery_info),
+                                     safe_repr(message.headers))
 
     def on_unknown_message(self, body, message):
         warn(UNKNOWN_FORMAT, self._message_report(body, message))

+ 3 - 0
celery/worker/job.py

@@ -371,6 +371,9 @@ class Request(object):
         if self.store_errors:
             self.task.backend.mark_as_failure(self.id, exc, request=self)
 
+        if self.task.acks_late:
+            self.acknowledge()
+
     def on_success(self, ret_value, now=None, nowfun=monotonic):
         """Handler called if the task was successfully processed."""
         if isinstance(ret_value, ExceptionInfo):

+ 1 - 0
celery/worker/loops.py

@@ -29,6 +29,7 @@ def asynloop(obj, connection, consumer, blueprint, hub, qos,
     readers, writers = hub.readers, hub.writers
     hbtick = connection.heartbeat_check
     errors = connection.connection_errors
+    heartbeat = connection.get_heartbeat_interval()  # negotiated
     hub_add, hub_remove = hub.add, hub.remove
 
     on_task_received = obj.create_task_handler()

+ 110 - 0
docs/_ext/githubsphinx.py

@@ -0,0 +1,110 @@
+"""Stolen from sphinxcontrib-issuetracker.
+
+Had to modify this as the original will make one Github API request
+per issue, which is not at all needed if we just want to link to issues.
+
+"""
+from __future__ import absolute_import
+
+import re
+import sys
+
+from collections import namedtuple
+
+from docutils import nodes
+from docutils.transforms import Transform
+from sphinx.roles import XRefRole
+from sphinx.addnodes import pending_xref
+
+URL = 'https://github.com/{project}/issues/{issue_id}'
+
+Issue = namedtuple('Issue', ('id', 'title', 'url'))
+
+if sys.version_info[0] == 3:
+    str_t = text_t = str
+else:
+    str_t = basestring
+    text_t = unicode
+
+
+class IssueRole(XRefRole):
+    innernodeclass = nodes.inline
+
+
+class Issues(Transform):
+    default_priority = 999
+
+    def apply(self):
+        config = self.document.settings.env.config
+        github_project = config.github_project
+        issue_pattern = config.github_issue_pattern
+        if isinstance(issue_pattern, str_t):
+            issue_pattern = re.compile(issue_pattern)
+        for node in self.document.traverse(nodes.Text):
+            parent = node.parent
+            if isinstance(parent, (nodes.literal, nodes.FixedTextElement)):
+                continue
+            text = text_t(node)
+            new_nodes = []
+            last_issue_ref_end = 0
+            for match in issue_pattern.finditer(text):
+                head = text[last_issue_ref_end:match.start()]
+                if head:
+                    new_nodes.append(nodes.Text(head))
+                last_issue_ref_end = match.end()
+                issuetext = match.group(0)
+                issue_id = match.group(1)
+                refnode = pending_xref()
+                refnode['reftarget'] = issue_id
+                refnode['reftype'] = 'issue'
+                refnode['github_project'] = github_project
+                reftitle = issuetext
+                refnode.append(nodes.inline(
+                    issuetext, reftitle, classes=['xref', 'issue']))
+                new_nodes.append(refnode)
+            if not new_nodes:
+                continue
+            tail = text[last_issue_ref_end:]
+            if tail:
+                new_nodes.append(nodes.Text(tail))
+            parent.replace(node, new_nodes)
+
+
+def make_issue_reference(issue, content_node):
+    reference = nodes.reference()
+    reference['refuri'] = issue.url
+    if issue.title:
+        reference['reftitle'] = issue.title
+    reference.append(content_node)
+    return reference
+
+
+def resolve_issue_reference(app, env, node, contnode):
+    if node['reftype'] != 'issue':
+        return
+    issue_id = node['reftarget']
+    project = node['github_project']
+
+    issue = Issue(issue_id, None, URL.format(project=project,
+                                             issue_id=issue_id))
+    conttext = text_t(contnode[0])
+    formatted_conttext = nodes.Text(conttext.format(issue=issue))
+    formatted_contnode = nodes.inline(conttext, formatted_conttext,
+                                      classes=contnode['classes'])
+    return make_issue_reference(issue, formatted_contnode)
+
+
+def init_transformer(app):
+    app.add_transform(Issues)
+
+
+def setup(app):
+    app.require_sphinx('1.0')
+    app.add_role('issue', IssueRole())
+
+    app.add_config_value('github_project', None, 'env')
+    app.add_config_value('github_issue_pattern',
+                         re.compile(r'[Ii]ssue #(\d+)'), 'env')
+
+    app.connect(str('builder-inited'), init_transformer)
+    app.connect(str('missing-reference'), resolve_issue_reference)

+ 3 - 5
docs/conf.py

@@ -21,7 +21,8 @@ extensions = ['sphinx.ext.autodoc',
               'sphinx.ext.viewcode',
               'sphinx.ext.coverage',
               'sphinx.ext.intersphinx',
-              'sphinxcontrib.issuetracker',
+              'celery.contrib.sphinx',
+              'githubsphinx',
               'celerydocs']
 
 
@@ -116,10 +117,7 @@ html_sidebars = {
 
 ### Issuetracker
 
-if False:
-    issuetracker = 'github'
-    issuetracker_project = 'celery/celery'
-    issuetracker_issue_pattern = r'[Ii]ssue #(\d+)'
+github_project = 'celery/celery'
 
 # -- Options for Epub output -------------------------------------------
 

+ 6 - 1
docs/configuration.rst

@@ -1142,7 +1142,12 @@ CELERY_MAX_CACHED_RESULTS
 Result backends caches ready results used by the client.
 
 This is the total number of results to cache before older results are evicted.
-The default is 5000.
+The default is 5000.  0 or None means no limit.
+
+.. note::
+    
+    These results are kept in memory.  Reduce this setting if your Celery is utilizing
+    a large amount of memory.
 
 .. setting:: CELERY_CHORD_PROPAGATES
 

+ 74 - 25
docs/contributing.rst

@@ -4,6 +4,22 @@
  Contributing
 ==============
 
+Welcome!
+
+This document is fairly extensive and you are not really expected
+to study this in detail for small contributions;
+
+    The most important rule is that contributing must be easy
+    and that the community is friendly and not nitpicking on details
+    such as coding style.
+
+If you're reporting a bug you should read the Reporting bugs section
+below to ensure that your bug report contains enough information
+to successfully diagnose the issue, and if you're contributing code
+you should try to mimic the conventions you see surrounding the code
+you are working on, but in the end all patches will be cleaned up by
+the person merging the changes so don't worry too much.
+
 .. contents::
     :local:
 
@@ -32,7 +48,7 @@ Be considerate.
 Your work will be used by other people, and you in turn will depend on the
 work of others.  Any decision you take will affect users and colleagues, and
 we expect you to take those consequences into account when making decisions.
-Even if it's not obvious at the time, our contributions to Ubuntu will impact
+Even if it's not obvious at the time, our contributions to Celery will impact
 the work of others.  For example, changes to code, infrastructure, policy,
 documentation and translations during a release may negatively impact
 others work.
@@ -106,8 +122,8 @@ Security
 --------
 
 You must never report security related issues, vulnerabilities or bugs
-including senstive information to the bug tracker, or elsewhere in public.
-Instead sensitive bugs must be sent by email to security@celeryproject.org.
+including sensitive information to the bug tracker, or elsewhere in public.
+Instead sensitive bugs must be sent by email to ``security@celeryproject.org``.
 
 If you'd like to submit the information encrypted our PGP key is::
 
@@ -149,38 +165,66 @@ Bugs can always be described to the :ref:`mailing-list`, but the best
 way to report an issue and to ensure a timely response is to use the
 issue tracker.
 
-1) Create a GitHub account.
+1) **Create a GitHub account.**
 
 You need to `create a GitHub account`_ to be able to create new issues
 and participate in the discussion.
 
 .. _`create a GitHub account`: https://github.com/signup/free
 
-2) Determine if your bug is really a bug.
+2) **Determine if your bug is really a bug.**
 
 You should not file a bug if you are requesting support.  For that you can use
 the :ref:`mailing-list`, or :ref:`irc-channel`.
 
-3) Make sure your bug hasn't already been reported.
+3) **Make sure your bug hasn't already been reported.**
 
 Search through the appropriate Issue tracker.  If a bug like yours was found,
 check if you have new information that could be reported to help
 the developers fix the bug.
 
-4) Collect information about the bug.
+4) **Check if you're using the latest version.**
+
+A bug could be fixed by some other improvements and fixes - it might not have an
+existing report in the bug tracker. Make sure you're using the latest releases of
+celery, billiard and kombu.
+
+5) **Collect information about the bug.**
 
 To have the best chance of having a bug fixed, we need to be able to easily
 reproduce the conditions that caused it.  Most of the time this information
 will be from a Python traceback message, though some bugs might be in design,
 spelling or other errors on the website/docs/code.
 
-If the error is from a Python traceback, include it in the bug report.
+    A) If the error is from a Python traceback, include it in the bug report.
+
+    B) We also need to know what platform you're running (Windows, OS X, Linux,
+       etc.), the version of your Python interpreter, and the version of Celery,
+       and related packages that you were running when the bug occurred.
+
+    C) If you are reporting a race condition or a deadlock, tracebacks can be
+       hard to get or might not be that useful. Try to inspect the process to
+       get more diagnostic data. Some ideas:
+
+       * Enable celery's :ref:`breakpoint signal <breakpoint_signal>` and use it
+         to inspect the process's state. This will allow you to open a :mod:`pdb`
+         session.
+       * Collect tracing data using strace_(Linux), dtruss (OSX) and ktrace(BSD),
+         ltrace_ and lsof_.
+
+    D) Include the output from the `celery report` command:
 
-We also need to know what platform you're running (Windows, OSX, Linux, etc),
-the version of your Python interpreter, and the version of Celery, and related
-packages that you were running when the bug occurred.
+        .. code-block:: bash
 
-5) Submit the bug.
+            $ celery -A proj report
+
+        This will also include your configuration settings and it try to
+        remove values for keys known to be sensitive, but make sure you also
+        verify the information before submitting so that it doesn't contain
+        confidential information like API tokens and authentication
+        credentials.
+
+6) **Submit the bug.**
 
 By default `GitHub`_ will email you to let you know when new comments have
 been made on your bug. In the event you've turned this feature off, you
@@ -188,6 +232,9 @@ should check back on occasion to ensure you don't miss any questions a
 developer trying to fix the bug might ask.
 
 .. _`GitHub`: http://github.com
+.. _`strace`: http://en.wikipedia.org/wiki/Strace
+.. _`ltrace`: http://en.wikipedia.org/wiki/Ltrace
+.. _`lsof`: http://en.wikipedia.org/wiki/Lsof
 
 .. _issue-trackers:
 
@@ -198,9 +245,10 @@ Bugs for a package in the Celery ecosystem should be reported to the relevant
 issue tracker.
 
 * Celery: http://github.com/celery/celery/issues/
-* Django-Celery: http://github.com/celery/django-celery/issues
-* Celery-Pylons: http://bitbucket.org/ianschenck/celery-pylons/issues
 * Kombu: http://github.com/celery/kombu/issues
+* pyamqp: http://github.com/celery/pyamqp/issues
+* librabbitmq: http://github.com/celery/librabbitmq/issues
+* Django-Celery: http://github.com/celery/django-celery/issues
 
 If you are unsure of the origin of the bug you can ask the
 :ref:`mailing-list`, or just use the Celery issue tracker.
@@ -208,7 +256,7 @@ If you are unsure of the origin of the bug you can ask the
 Contributors guide to the codebase
 ==================================
 
-There's a seperate section for internal details,
+There's a separate section for internal details,
 including details about the codebase and a style guide.
 
 Read :ref:`internals-guide` for more!
@@ -281,18 +329,13 @@ for the 2.2.x series is named ``2.2``.  Previously these were named
 
 The versions we currently maintain is:
 
-* 2.3
+* 3.1
 
   This is the current series.
 
-* 2.2
-
-  This is the previous series, and the last version to support Python 2.4.
+* 3.0
 
-* 2.1
-
-  This is the last version to use the ``carrot`` AMQP library.
-  Recent versions use ``kombu``.
+  This is the previous series, and the last version to support Python 2.5.
 
 Archived branches
 -----------------
@@ -305,6 +348,12 @@ An archived version is named ``X.Y-archived``.
 
 Our currently archived branches are:
 
+* 2.5-archived
+
+* 2.4-archived
+
+* 2.3-archived
+
 * 2.1-archived
 
 * 2.0-archived
@@ -469,7 +518,7 @@ Code coverage in HTML:
 
 .. code-block:: bash
 
-    $ nosetests --with-coverage3 --cover3-html
+    $ nosetests --with-coverage --cover-html
 
 The coverage output will then be located at
 :file:`celery/tests/cover/index.html`.
@@ -478,7 +527,7 @@ Code coverage in XML (Cobertura-style):
 
 .. code-block:: bash
 
-    $ nosetests --with-coverage3 --cover3-xml --cover3-xml-file=coverage.xml
+    $ nosetests --with-coverage --cover-xml --cover-xml-file=coverage.xml
 
 The coverage XML output will then be located at :file:`coverage.xml`
 

+ 1 - 1
docs/getting-started/brokers/couchdb.rst

@@ -7,7 +7,7 @@
 .. admonition:: Experimental Status
 
     The CouchDB transport is in need of improvements in many areas and there
-    are several open bugs.  Sadly we don't have the resources or funds
+    are several open bugs.  Unfortunately we don't have the resources or funds
     required to improve the situation, so we're looking for contributors
     and partners willing to help.
 

+ 1 - 1
docs/getting-started/brokers/django.rst

@@ -7,7 +7,7 @@
 .. admonition:: Experimental Status
 
     The Django database transport is in need of improvements in many areas
-    and there are several open bugs.  Sadly we don't have the resources or funds
+    and there are several open bugs.  Unfortunately we don't have the resources or funds
     required to improve the situation, so we're looking for contributors
     and partners willing to help.
 

+ 1 - 1
docs/getting-started/brokers/mongodb.rst

@@ -7,7 +7,7 @@
 .. admonition:: Experimental Status
 
     The MongoDB transport is in need of improvements in many areas and there
-    are several open bugs.  Sadly we don't have the resources or funds
+    are several open bugs.  Unfortunately we don't have the resources or funds
     required to improve the situation, so we're looking for contributors
     and partners willing to help.
 

+ 1 - 1
docs/getting-started/brokers/sqlalchemy.rst

@@ -7,7 +7,7 @@
 .. admonition:: Experimental Status
 
     The SQLAlchemy transport is unstable in many areas and there are
-    several issues open.  Sadly we don't have the resources or funds
+    several issues open.  Unfortunately we don't have the resources or funds
     required to improve the situation, so we're looking for contributors
     and partners willing to help.
 

+ 1 - 1
docs/getting-started/brokers/sqs.rst

@@ -7,7 +7,7 @@
 .. admonition:: Experimental Status
 
     The SQS transport is in need of improvements in many areas and there
-    are several open bugs.  Sadly we don't have the resources or funds
+    are several open bugs.  Unfortunately we don't have the resources or funds
     required to improve the situation, so we're looking for contributors
     and partners willing to help.
 

+ 1 - 1
docs/history/changelog-3.0.rst

@@ -558,7 +558,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 
     execv was only enabled when transports other than amqp/redis was used,
     and it's there to prevent deadlocks caused by mutexes not being released
-    before the process forks.  Sadly it also changes the environment
+    before the process forks.  Unfortunately it also changes the environment
     introducing many corner case bugs that is hard to fix without adding
     horrible hacks.  Deadlock issues are reported far less often than the
     bugs that execv are causing, so we now disable it by default.

+ 2 - 2
docs/internals/guide.rst

@@ -179,8 +179,8 @@ can't co-exist in the same process space, this later posed a problem
 for using Celery with frameworks that doesn't have this limitation.
 
 Therefore the app concept was introduced.  When using apps you use 'celery'
-objects instead of importing things from celery submodules, this sadly
-also means that Celery essentially has two API's.
+objects instead of importing things from celery submodules, this
+(unfortunately) also means that Celery essentially has two API's.
 
 Here's an example using Celery in single-mode:
 

+ 4 - 0
docs/reference/celery.contrib.sphinx.rst

@@ -0,0 +1,4 @@
+.. currentmodule:: celery.contrib.sphinx
+
+.. automodule:: celery.contrib.sphinx
+    :members:

+ 1 - 0
docs/reference/index.rst

@@ -37,6 +37,7 @@
     celery.contrib.abortable
     celery.contrib.batches
     celery.contrib.migrate
+    celery.contrib.sphinx
     celery.contrib.rdb
     celery.contrib.methods
     celery.events

+ 1 - 1
docs/tutorials/daemonizing.rst

@@ -56,7 +56,7 @@ must also export them (e.g. ``export DISPLAY=":0"``)
 
         $ celery multi start worker1 \
             --pidfile="$HOME/run/celery/%n.pid" \
-            --logfile=""$HOME/log/celery/%n.log"
+            --logfile="$HOME/log/celery/%n.log"
 
         $ celery multi restart worker1 --pidfile="$HOME/run/celery/%n.pid"
 

+ 1 - 0
docs/tutorials/debugging.rst

@@ -86,6 +86,7 @@ The result of our vandalism can be seen in the worker logs::
 Tips
 ====
 
+.. _breakpoint_signal:
 
 Enabling the breakpoint signal
 ------------------------------

+ 8 - 1
docs/userguide/canvas.rst

@@ -250,7 +250,7 @@ The Primitives
         Chunking splits a long list of arguments into parts, e.g the operation::
 
             >>> items = zip(xrange(1000), xrange(1000))  # 1000 items
-            >>> add.chunks(items, 10))
+            >>> add.chunks(items, 10)
 
         will split the list of items into chunks of 10, resulting in 100
         tasks (each processing 10 items in sequence).
@@ -651,6 +651,13 @@ Chords
 
 .. versionadded:: 2.3
 
+.. note::
+
+    Tasks used within a chord must *not* ignore their results. If the result
+    backend is disabled for *any* task (header or body) in your chord you
+    should read ":ref:`chord-important-notes`".
+    
+
 A chord is a task that only executes after all of the tasks in a group have
 finished executing.
 

+ 88 - 17
docs/userguide/extending.rst

@@ -141,45 +141,76 @@ Attributes
     This is only supported by async I/O enabled transports (amqp, redis),
     in which case the `worker.use_eventloop` attribute should be set.
 
-    Your bootstep must require the Hub bootstep to use this.
+    Your worker bootstep must require the Hub bootstep to use this:
+
+    .. code-block:: python
+
+        class WorkerStep(bootsteps.StartStopStep):
+            requires = ('celery.worker.components:Hub', )
 
 .. attribute:: pool
 
     The current process/eventlet/gevent/thread pool.
     See :class:`celery.concurrency.base.BasePool`.
 
-    Your bootstep must require the Pool bootstep to use this.
+    Your worker bootstep must require the Pool bootstep to use this:
+
+    .. code-block:: python
+
+        class WorkerStep(bootsteps.StartStopStep):
+            requires = ('celery.worker.components:Pool', )
 
 .. attribute:: timer
 
     :class:`~kombu.async.timer.Timer` used to schedule functions.
 
-    Your bootstep must require the Timer bootstep to use this.
+    Your worker bootstep must require the Timer bootstep to use this:
+
+    .. code-block:: python
+
+        class WorkerStep(bootsteps.StartStopStep):
+            requires = ('celery.worker.components:Timer', )
 
 .. attribute:: statedb
 
     :class:`Database <celery.worker.state.Persistent>`` to persist state between
     worker restarts.
 
-    This only exists if the ``statedb`` argument is enabled.
-    Your bootstep must require the Statedb bootstep to use this.
+    This is only defined if the ``statedb`` argument is enabled.
+
+    Your worker bootstep must require the Statedb bootstep to use this:
+
+    .. code-block:: python
+
+        class WorkerStep(bootsteps.StartStopStep):
+            requires = ('celery.worker.components:Statedb', )
 
 .. attribute:: autoscaler
 
     :class:`~celery.worker.autoscaler.Autoscaler` used to automatically grow
     and shrink the number of processes in the pool.
 
-    This only exists if the ``autoscale`` argument is enabled.
-    Your bootstep must require the Autoscaler bootstep to use this.
+    This is only defined if the ``autoscale`` argument is enabled.
+
+    Your worker bootstep must require the `Autoscaler` bootstep to use this:
+
+    .. code-block:: python
+
+        class WorkerStep(bootsteps.StartStopStep):
+            requires = ('celery.worker.autoscaler:Autoscaler', )
 
 .. attribute:: autoreloader
 
     :class:`~celery.worker.autoreloder.Autoreloader` used to automatically
     reload use code when the filesystem changes.
 
-    This only exists if the ``autoreload`` argument is enabled.
-    Your bootstep must require the Autoreloader bootstep to use this.
+    This is only defined if the ``autoreload`` argument is enabled.
+    Your worker bootstep must require the `Autoreloader` bootstep to use this;
 
+    .. code-block:: python
+
+        class WorkerStep(bootsteps.StartStopStep):
+            requires = ('celery.worker.autoreloader:Autoreloader', )
 
 An example Worker bootstep could be:
 
@@ -285,25 +316,49 @@ Attributes
     This is only supported by async I/O enabled transports (amqp, redis),
     in which case the `worker.use_eventloop` attribute should be set.
 
-    Your bootstep must require the Hub bootstep to use this.
+    Your worker bootstep must require the Hub bootstep to use this:
+
+    .. code-block:: python
+
+        class WorkerStep(bootsteps.StartStopStep):
+            requires = ('celery.worker:Hub', )
+
 
 .. attribute:: connection
 
     The current broker connection (:class:`kombu.Connection`).
 
-    Your bootstep must require the 'Connection' bootstep to use this.
+    A consumer bootstep must require the 'Connection' bootstep
+    to use this:
+
+    .. code-block:: python
+
+        class Step(bootsteps.StartStopStep):
+            requires = ('celery.worker.consumer:Connection', )
 
 .. attribute:: event_dispatcher
 
     A :class:`@events.Dispatcher` object that can be used to send events.
 
-    Your bootstep must require the `Events` bootstep to use this.
+    A consumer bootstep must require the `Events` bootstep to use this.
+
+    .. code-block:: python
+
+        class Step(bootsteps.StartStopStep):
+            requires = ('celery.worker.consumer:Events', )
 
 .. attribute:: gossip
 
     Worker to worker broadcast communication
     (class:`~celery.worker.consumer.Gossip`).
 
+    A consumer bootstep must require the `Gossip` bootstep to use this.
+
+    .. code-block:: python
+
+        class Step(bootsteps.StartStopStep):
+            requires = ('celery.worker.consumer:Events', )
+
 .. attribute:: pool
 
     The current process/eventlet/gevent/thread pool.
@@ -318,13 +373,23 @@ Attributes
     Responsible for sending worker event heartbeats
     (:class:`~celery.worker.heartbeat.Heart`).
 
-    Your bootstep must require the `Heart` bootstep to use this.
+    Your consumer bootstep must require the `Heart` bootstep to use this:
+
+    .. code-block:: python
+
+        class Step(bootsteps.StartStopStep):
+            requires = ('celery.worker.consumer:Heart', )
 
 .. attribute:: task_consumer
 
     The :class:`kombu.Consumer` object used to consume task messages.
 
-    Your bootstep must require the `Tasks` bootstep to use this.
+    Your consumer bootstep must require the `Tasks` bootstep to use this:
+
+    .. code-block:: python
+
+        class Step(bootsteps.StartStopStep):
+            requires = ('celery.worker.consumer:Heart', )
 
 .. attribute:: strategies
 
@@ -339,7 +404,13 @@ Attributes
                 name, task, loader, hostname
             )
 
-    Your bootstep must require the `Tasks` bootstep to use this.
+    Your consumer bootstep must require the `Tasks` bootstep to use this:
+
+    .. code-block:: python
+
+        class Step(bootsteps.StartStopStep):
+            requires = ('celery.worker.consumer:Heart', )
+
 
 .. attribute:: task_buckets
 
@@ -627,8 +698,8 @@ and then after installation, read from the system using the :mod:`pkg_resources`
 
 Celery recognizes ``celery.commands`` entry-points to install additional
 subcommands, where the value of the entry-point must point to a valid subclass
-of :class:`celery.bin.base.Command`.  Sadly there is limited documentation,
-but you can find inspiration from the various commands in the
+of :class:`celery.bin.base.Command`.  There is limited documentation,
+unfortunately, but you can find inspiration from the various commands in the
 :mod:`celery.bin` package.
 
 This is how the Flower_ monitoring extension adds the :program:`celery flower` command,

+ 22 - 11
docs/userguide/workers.rst

@@ -86,16 +86,27 @@ command usually does the trick:
 Restarting the worker
 =====================
 
+To restart the worker you should send the `TERM` signal and start a new
+instance.  The easiest way to manage workers for development
+is by using `celery multi`:
+
+    .. code-block:: bash
+
+        $ celery multi start 1 -A proj -l info -c4 --pidfile=/var/run/celery/%n.pid
+        $ celery multi restart 1 --pidfile=/var/run/celery/%n.pid
+
+For production deployments you should be using init scripts or other process
+supervision systems (see :ref:`daemonizing`).
+
 Other than stopping then starting the worker to restart, you can also
-restart the worker using the :sig:`HUP` signal:
+restart the worker using the :sig:`HUP` signal, but note that the worker
+will be responsible for restarting itself so this is prone to problems and
+is not recommended in production:
 
 .. code-block:: bash
 
     $ kill -HUP $pid
 
-The worker will then replace itself with a new instance using the same
-arguments as it was started with.
-
 .. note::
 
     Restarting by :sig:`HUP` only works if the worker is running
@@ -524,11 +535,11 @@ If you want to specify a specific worker you can use the
 
 The same can be accomplished dynamically using the :meth:`@control.add_consumer` method::
 
-    >>> myapp.control.add_consumer('foo', reply=True)
+    >>> app.control.add_consumer('foo', reply=True)
     [{u'worker1.local': {u'ok': u"already consuming from u'foo'"}}]
 
-    >>> myapp.control.add_consumer('foo', reply=True,
-    ...                            destination=['worker1@example.com'])
+    >>> app.control.add_consumer('foo', reply=True,
+    ...                          destination=['worker1@example.com'])
     [{u'worker1.local': {u'ok': u"already consuming from u'foo'"}}]
 
 
@@ -536,7 +547,7 @@ By now I have only shown examples using automatic queues,
 If you need more control you can also specify the exchange, routing_key and
 even other options::
 
-    >>> myapp.control.add_consumer(
+    >>> app.control.add_consumer(
     ...     queue='baz',
     ...     exchange='ex',
     ...     exchange_type='topic',
@@ -577,7 +588,7 @@ You can also cancel consumers programmatically using the
 
 .. code-block:: bash
 
-    >>> myapp.control.cancel_consumer('foo', reply=True)
+    >>> app.control.cancel_consumer('foo', reply=True)
     [{u'worker1.local': {u'ok': u"no longer consuming from u'foo'"}}]
 
 .. control:: active_queues
@@ -606,10 +617,10 @@ reply to the request:
 This can also be done programmatically by using the
 :meth:`@control.inspect.active_queues` method::
 
-    >>> myapp.inspect().active_queues()
+    >>> app.control.inspect().active_queues()
     [...]
 
-    >>> myapp.inspect(['worker1.local']).active_queues()
+    >>> app.control.inspect(['worker1.local']).active_queues()
     [...]
 
 .. _worker-autoreloading:

+ 1 - 1
docs/whatsnew-3.1.rst

@@ -270,7 +270,7 @@ Caveats
 Django supported out of the box
 -------------------------------
 
-Celery 3.0 introduced a shiny new API, but sadly did not
+Celery 3.0 introduced a shiny new API, but unfortunately did not
 have a solution for Django users.
 
 The situation changes with this version as Django is now supported

+ 6 - 0
extra/generic-init.d/celeryd

@@ -292,6 +292,12 @@ check_status () {
 
     local one_failed=
     for pid_file in "$CELERYD_PID_DIR"/*.pid; do
+        if [ ! -r $pid_file ]; then
+            echo "${SCRIPT_NAME} is stopped: no pids were found"
+            one_failed=true
+            break
+        fi
+
         local node=`basename "$pid_file" .pid`
         local pid=`cat "$pid_file"`
         local cleaned_pid=`echo "$pid" | sed -e 's/[^0-9]//g'`

+ 11 - 1
pavement.py

@@ -90,12 +90,22 @@ def clean_readme(options):
     path('README.rst').unlink_p()
 
 
+@task
+def clean_contributing(options):
+    path('CONTRIBUTING.rst').unlink_p()
+
+
 @task
 @needs('clean_readme')
 def readme(options):
     sh('{0} extra/release/sphinx-to-rst.py docs/templates/readme.txt \
             > README.rst'.format(sys.executable))
 
+@task
+@needs('clean_contributing')
+def contributing(options):
+    sh('{0} extra/release/sphinx-to-rst.py docs/contributing.rst \
+            > CONTRIBUTING.rst'.format(sys.executable))
 
 @task
 def bump(options):
@@ -112,7 +122,7 @@ def bump(options):
 def test(options):
     cmd = 'CELERY_LOADER=default nosetests'
     if getattr(options, 'coverage', False):
-        cmd += ' --with-coverage3'
+        cmd += ' --with-coverage'
     if getattr(options, 'verbose', False):
         cmd += ' --verbosity=2'
     sh(cmd)

+ 1 - 1
requirements/default.txt

@@ -1,3 +1,3 @@
 pytz>dev
 billiard>=3.3.0.14,<3.4
-kombu>=3.0.10,<4.0
+kombu>=3.0.12,<4.0

+ 0 - 1
requirements/docs.txt

@@ -1,3 +1,2 @@
 Sphinx
-sphinxcontrib-issuetracker>=0.9
 SQLAlchemy

+ 1 - 0
requirements/jython.txt

@@ -1 +1,2 @@
 threadpool
+multiprocessing

+ 1 - 0
requirements/test-ci.txt

@@ -1,4 +1,5 @@
 coverage>=3.0
+coveralls
 redis
 #pymongo
 #SQLAlchemy

+ 0 - 1
requirements/test.txt

@@ -1,4 +1,3 @@
 unittest2>=0.5.1
 nose
-nose-cover3
 mock>=1.0.1

+ 0 - 1
requirements/test3.txt

@@ -1,4 +1,3 @@
 nose
-nose-cover3
 # FIXME required by kombu.tests.case
 mock >=1.0.1

+ 1 - 5
setup.cfg

@@ -1,9 +1,5 @@
 [nosetests]
 where = celery/tests
-cover3-branch = 1
-cover3-html = 1
-cover3-package = celery
-cover3-exclude = celery.utils.debug,celery.tests.*,celery.bin.graph
 
 [build_sphinx]
 source-dir = docs/
@@ -16,4 +12,4 @@ upload-dir = docs/.build/html
 [bdist_rpm]
 requires = pytz >= 2011b
            billiard >= 3.3.0.14
-           kombu >= 3.0.10
+           kombu >= 3.0.12

+ 8 - 28
setup.py

@@ -111,8 +111,7 @@ pats = {re_meta: add_default,
         re_vers: add_version,
         re_doc: add_doc}
 here = os.path.abspath(os.path.dirname(__file__))
-meta_fh = open(os.path.join(here, 'celery/__init__.py'))
-try:
+with open(os.path.join(here, 'celery/__init__.py')) as meta_fh:
     meta = {}
     for line in meta_fh:
         if line.strip() == '# -eof meta-':
@@ -121,8 +120,6 @@ try:
             m = pattern.match(line.strip())
             if m:
                 meta.update(handler(m))
-finally:
-    meta_fh.close()
 
 # -*- Installation Requires -*-
 
@@ -170,30 +167,13 @@ if CELERY_COMPAT_PROGRAMS:
 
 if is_setuptools:
     extras = lambda *p: reqs('extras', *p)
-    extra['extras_require'] = {
-        # Celery specific
-        'auth': extras('auth.txt'),
-        'cassandra': extras('cassandra.txt'),
-        'memcache': extras('memcache.txt'),
-        'couchbase': extras('couchbase.txt'),
-        'threads': extras('threads.txt'),
-        'eventlet': extras('eventlet.txt'),
-        'gevent': extras('gevent.txt'),
-
-        'msgpack': extras('msgpack.txt'),
-        'yaml': extras('yaml.txt'),
-        'redis': extras('redis.txt'),
-        'mongodb': extras('mongodb.txt'),
-        'sqs': extras('sqs.txt'),
-        'couchdb': extras('couchdb.txt'),
-        'beanstalk': extras('beanstalk.txt'),
-        'zookeeper': extras('zookeeper.txt'),
-        'zeromq': extras('zeromq.txt'),
-        'sqlalchemy': extras('sqlalchemy.txt'),
-        'librabbitmq': extras('librabbitmq.txt'),
-        'pyro': extras('pyro.txt'),
-        'slmq': extras('slmq.txt'),
-    }
+    # Celery specific
+    specific_list = ['auth', 'cassandra', 'memcache', 'couchbase', 'threads',
+                     'eventlet', 'gevent', 'msgpack', 'yaml', 'redis',
+                     'mongodb', 'sqs', 'couchdb', 'beanstalk', 'zookeeper',
+                     'zeromq', 'sqlalchemy', 'librabbitmq', 'pyro', 'slmq']
+    extras_require = dict((x, extras(x + '.txt')) for x in specific_list)
+    extra['extras_require'] = extras_require
 
 # -*- %%% -*-
 

+ 12 - 26
tox.ini

@@ -1,46 +1,37 @@
 [tox]
-envlist = py26,py27,py33,pypy
+envlist =
+    2.6,
+    2.7,
+    3.3,
+    pypy
 
 [testenv]
 sitepackages = False
 commands = nosetests
 
-[testenv:py33]
+[testenv:3.3]
 basepython = python3.3
 deps = -r{toxinidir}/requirements/default.txt
        -r{toxinidir}/requirements/test3.txt
        -r{toxinidir}/requirements/test-ci.txt
 commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
-           pip install anyjson
-           nosetests -vd celery.tests                          \
-               --with-xunit                                    \
-                 --xunit-file="{toxinidir}/nosetests.xml"
+           nosetests celery.tests --with-coverage --cover-inclusive --cover-erase []
 
-[testenv:py27]
+[testenv:2.7]
 basepython = python2.7
 deps = -r{toxinidir}/requirements/default.txt
        -r{toxinidir}/requirements/test.txt
        -r{toxinidir}/requirements/test-ci.txt
 commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
-           pip install anyjson
-           nosetests -v --with-xunit                            \
-                     --xunit-file={toxinidir}/nosetests.xml     \
-                     --with-coverage3 --cover3-xml              \
-                     --cover3-html-dir={toxinidir}/cover        \
-                     --cover3-xml-file={toxinidir}/coverage.xml
+           nosetests --with-coverage --cover-inclusive --cover-erase []
 
-[testenv:py26]
+[testenv:2.6]
 basepython = python2.6
 deps = -r{toxinidir}/requirements/default.txt
        -r{toxinidir}/requirements/test.txt
        -r{toxinidir}/requirements/test-ci.txt
 commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
-           pip install anyjson
-           nosetests -v --with-xunit                            \
-                     --xunit-file={toxinidir}/nosetests.xml     \
-                     --with-coverage3 --cover3-xml              \
-                     --cover3-html-dir={toxinidir}/cover        \
-                     --cover3-xml-file={toxinidir}/coverage.xml
+           nosetests --with-coverage --cover-inclusive --cover-erase []
 
 [testenv:pypy]
 basepython = pypy
@@ -48,9 +39,4 @@ deps = -r{toxinidir}/requirements/default.txt
        -r{toxinidir}/requirements/test.txt
        -r{toxinidir}/requirements/test-ci.txt
 commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
-           pip install anyjson
-           nosetests -v --with-xunit                            \
-                     --xunit-file={toxinidir}/nosetests.xml     \
-                     --with-coverage3 --cover3-xml              \
-                     --cover3-html-dir={toxinidir}/cover        \
-                     --cover3-xml-file={toxinidir}/coverage.xml
+           nosetests --with-coverage --cover-inclusive --cover-erase []