Browse Source

[docs] Spelling stuff

Ask Solem 9 years ago
parent
commit
0bdc27d8b3
76 changed files with 1316 additions and 869 deletions
  1. 2 1
      CONTRIBUTING.rst
  2. 4 1
      README.rst
  3. 1 1
      celery/_state.py
  4. 3 3
      celery/app/base.py
  5. 1 1
      celery/app/defaults.py
  6. 5 5
      celery/app/task.py
  7. 4 4
      celery/app/utils.py
  8. 1 1
      celery/backends/__init__.py
  9. 2 2
      celery/backends/base.py
  10. 1 1
      celery/backends/cassandra.py
  11. 6 6
      celery/backends/couchbase.py
  12. 4 4
      celery/backends/database/__init__.py
  13. 5 5
      celery/backends/filesystem.py
  14. 4 4
      celery/backends/mongodb.py
  15. 1 1
      celery/beat.py
  16. 1 1
      celery/bin/base.py
  17. 1 1
      celery/bin/celery.py
  18. 1 1
      celery/bin/worker.py
  19. 1 1
      celery/bootsteps.py
  20. 1 1
      celery/canvas.py
  21. 1 1
      celery/events/state.py
  22. 3 3
      celery/loaders/base.py
  23. 1 1
      celery/result.py
  24. 5 4
      celery/schedules.py
  25. 1 1
      celery/tests/backends/test_cassandra.py
  26. 13 13
      celery/tests/backends/test_couchbase.py
  27. 2 2
      celery/tests/utils/test_debug.py
  28. 2 2
      celery/utils/debug.py
  29. 1 1
      celery/worker/components.py
  30. 1 1
      celery/worker/state.py
  31. 5 0
      docs/Makefile
  32. 210 199
      docs/configuration.rst
  33. 40 41
      docs/contributing.rst
  34. 5 5
      docs/django/first-steps-with-django.rst
  35. 10 9
      docs/faq.rst
  36. 2 2
      docs/getting-started/introduction.rst
  37. 6 6
      docs/getting-started/next-steps.rst
  38. 3 0
      docs/glossary.rst
  39. 17 17
      docs/history/changelog-1.0.rst
  40. 3 3
      docs/history/changelog-2.0.rst
  41. 8 8
      docs/history/changelog-2.1.rst
  42. 2 2
      docs/history/changelog-2.2.rst
  43. 2 2
      docs/history/changelog-2.3.rst
  44. 1 1
      docs/history/changelog-2.4.rst
  45. 2 2
      docs/history/changelog-2.5.rst
  46. 11 11
      docs/history/changelog-3.0.rst
  47. 17 15
      docs/history/changelog-3.1.rst
  48. 2 0
      docs/history/index.rst
  49. 34 33
      docs/history/whatsnew-2.5.rst
  50. 37 36
      docs/history/whatsnew-3.0.rst
  51. 1 1
      docs/includes/installation.txt
  52. 0 2
      docs/index.rst
  53. 3 3
      docs/internals/guide.rst
  54. 2 2
      docs/internals/protocol.rst
  55. 1 1
      docs/reference/celery.rst
  56. 351 0
      docs/spelling_wordlist.txt
  57. 61 37
      docs/tutorials/daemonizing.rst
  58. 5 5
      docs/tutorials/debugging.rst
  59. 2 2
      docs/tutorials/task-cookbook.rst
  60. 3 3
      docs/userguide/application.rst
  61. 3 3
      docs/userguide/calling.rst
  62. 4 4
      docs/userguide/canvas.rst
  63. 1 1
      docs/userguide/concurrency/eventlet.rst
  64. 14 14
      docs/userguide/extending.rst
  65. 13 14
      docs/userguide/monitoring.rst
  66. 5 5
      docs/userguide/optimizing.rst
  67. 6 5
      docs/userguide/periodic-tasks.rst
  68. 8 6
      docs/userguide/routing.rst
  69. 2 2
      docs/userguide/security.rst
  70. 170 132
      docs/userguide/signals.rst
  71. 32 31
      docs/userguide/tasks.rst
  72. 22 20
      docs/userguide/workers.rst
  73. 44 41
      docs/whatsnew-3.1.rst
  74. 64 67
      docs/whatsnew-4.0.rst
  75. 1 1
      examples/celery_http_gateway/README.rst
  76. 2 1
      examples/django/proj/settings.py

+ 2 - 1
CONTRIBUTING.rst

@@ -290,7 +290,8 @@ You can see the state of any branch by looking at the Changelog:
     https://github.com/celery/celery/blob/master/Changelog
 
 If the branch is in active development the topmost version info should
-contain metadata like::
+contain meta-data like:
+::
 
     2.4.0
     ======

+ 4 - 1
README.rst

@@ -286,6 +286,9 @@ Transports and Backends
 :celery[sqs]:
     for using Amazon SQS as a message transport (*experimental*).
 
+:celery[tblib]
+    for using the ``task_remote_tracebacks`` feature.
+
 :celery[memcache]:
     for using memcached as a result backend (using pylibmc)
 
@@ -299,7 +302,7 @@ Transports and Backends
     for using CouchDB as a message transport (*experimental*).
 
 :celery[couchbase]:
-    for using CouchBase as a result backend.
+    for using Couchbase as a result backend.
 
 :celery[elasticsearch]
     for using Elasticsearch as a result backend.

+ 1 - 1
celery/_state.py

@@ -32,7 +32,7 @@ default_app = None
 _apps = weakref.WeakSet()
 
 #: global set of functions to call whenever a new app is finalized
-#: E.g. Shared tasks, and builtin tasks are created
+#: E.g. Shared tasks, and built-in tasks are created
 #: by adding callbacks here.
 _on_app_finalizers = set()
 

+ 3 - 3
celery/app/base.py

@@ -555,7 +555,7 @@ class Celery(object):
 
     def autodiscover_tasks(self, packages=None,
                            related_name='tasks', force=False):
-        """Try to autodiscover and import modules with a specific name (by
+        """Try to auto-discover and import modules with a specific name (by
         default 'tasks').
 
         If the name is empty, this will be delegated to fixups (e.g. Django).
@@ -585,8 +585,8 @@ class Celery(object):
             to "tasks", which means it look for "module.tasks" for every
             module in ``packages``.
         :keyword force: By default this call is lazy so that the actual
-            autodiscovery will not happen until an application imports the
-            default modules.  Forcing will cause the autodiscovery to happen
+            auto-discovery will not happen until an application imports the
+            default modules.  Forcing will cause the auto-discovery to happen
             immediately.
 
         """

+ 1 - 1
celery/app/defaults.py

@@ -354,7 +354,7 @@ def find_deprecated_settings(source):  # pragma: no cover
 
 @memoize(maxsize=None)
 def find(name, namespace='celery'):
-    # - Try specified namespace first.
+    # - Try specified name-space first.
     namespace = namespace.lower()
     try:
         return searchresult(

+ 5 - 5
celery/app/task.py

@@ -44,7 +44,7 @@ R_UNBOUND_TASK = '<unbound {0.__name__}{flags}>'
 R_SELF_TASK = '<@task {0.name} bound to other {0.__self__}>'
 R_INSTANCE = '<@task: {0.name} of {app}{flags}>'
 
-#: Here for backwards compatibility as tasks no longer use a custom metaclass.
+#: Here for backwards compatibility as tasks no longer use a custom meta-class.
 TaskType = type
 
 
@@ -200,7 +200,7 @@ class Task(object):
     autoregister = True
 
     #: If enabled the task will report its status as 'started' when the task
-    #: is executed by a worker.  Disabled by default as the normal behaviour
+    #: is executed by a worker.  Disabled by default as the normal behavior
     #: is to not report that level of granularity.  Tasks are either pending,
     #: finished, or waiting to be retried.
     #:
@@ -225,10 +225,10 @@ class Task(object):
     acks_late = None
 
     #: Even if :attr:`acks_late` is enabled, the worker will
-    #: acknowledge tasks when the worker process executing them abrubtly
+    #: acknowledge tasks when the worker process executing them abruptly
     #: exits or is signaled (e.g. :sig:`KILL`/:sig:`INT`, etc).
     #:
-    #: Setting this to true allows the message to be requeued instead,
+    #: Setting this to true allows the message to be re-queued instead,
     #: so that the task will execute again by the same worker, or another
     #: worker.
     #:
@@ -811,7 +811,7 @@ class Task(object):
         :keyword task_id: Id of the task to update, defaults to the
                           id of the current task
         :keyword state: New state (:class:`str`).
-        :keyword meta: State metadata (:class:`dict`).
+        :keyword meta: State meta-data (:class:`dict`).
 
 
 

+ 4 - 4
celery/app/utils.py

@@ -3,7 +3,7 @@
     celery.app.utils
     ~~~~~~~~~~~~~~~~
 
-    App utilities: Compat settings, bugreport tool, pickling apps.
+    App utilities: Compat settings, bug-report tool, pickling apps.
 
 """
 from __future__ import absolute_import, unicode_literals
@@ -33,7 +33,7 @@ from .defaults import (
 __all__ = ['Settings', 'appstr', 'bugreport',
            'filter_hidden_settings', 'find_app']
 
-#: Format used to generate bugreport information.
+#: Format used to generate bug-report information.
 BUGREPORT_INFO = """
 software -> celery:{celery_v} kombu:{kombu_v} py:{py_v}
             billiard:{billiard_v} {driver_v}
@@ -133,7 +133,7 @@ class Settings(ConfigurationView):
              <Option: type->bool default->False>))
 
         :param name: Name of option, cannot be partial.
-        :keyword namespace: Preferred namespace (``None`` by default).
+        :keyword namespace: Preferred name-space (``None`` by default).
 
         """
         return find(name, namespace)
@@ -295,7 +295,7 @@ def filter_hidden_settings(conf):
 
 
 def bugreport(app):
-    """Return a string containing information useful in bug reports."""
+    """Return a string containing information useful in bug-reports."""
     import billiard
     import celery
     import kombu

+ 1 - 1
celery/backends/__init__.py

@@ -33,7 +33,7 @@ BACKEND_ALIASES = {
     'database': 'celery.backends.database:DatabaseBackend',
     'elasticsearch': 'celery.backends.elasticsearch:ElasticsearchBackend',
     'cassandra': 'celery.backends.cassandra:CassandraBackend',
-    'couchbase': 'celery.backends.couchbase:CouchBaseBackend',
+    'couchbase': 'celery.backends.couchbase:CouchbaseBackend',
     'couchdb': 'celery.backends.couchdb:CouchDBBackend',
     'riak': 'celery.backends.riak:RiakBackend',
     'file': 'celery.backends.filesystem:FilesystemBackend',

+ 2 - 2
celery/backends/base.py

@@ -604,14 +604,14 @@ class BaseKeyValueStoreBackend(Backend):
         self.delete(self.get_key_for_group(group_id))
 
     def _get_task_meta_for(self, task_id):
-        """Get task metadata for a task by id."""
+        """Get task meta-data for a task by id."""
         meta = self.get(self.get_key_for_task(task_id))
         if not meta:
             return {'status': states.PENDING, 'result': None}
         return self.decode_result(meta)
 
     def _restore_group(self, group_id):
-        """Get task metadata for a task by id."""
+        """Get task meta-data for a task by id."""
         meta = self.get(self.get_key_for_group(group_id))
         # previously this was always pickled, but later this
         # was extended to support other serializers, so the

+ 1 - 1
celery/backends/cassandra.py

@@ -222,7 +222,7 @@ class CassandraBackend(BaseBackend):
         return 'cassandra://'
 
     def _get_task_meta_for(self, task_id):
-        """Get task metadata for a task by id."""
+        """Get task meta-data for a task by id."""
         self._get_connection()
 
         res = self._session.execute(self._read_stmt, (task_id, ))

+ 6 - 6
celery/backends/couchbase.py

@@ -3,7 +3,7 @@
     celery.backends.couchbase
     ~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    CouchBase result store backend.
+    Couchbase result store backend.
 
 """
 from __future__ import absolute_import, unicode_literals
@@ -24,11 +24,11 @@ from celery.exceptions import ImproperlyConfigured
 
 from .base import KeyValueStoreBackend
 
-__all__ = ['CouchBaseBackend']
+__all__ = ['CouchbaseBackend']
 
 
-class CouchBaseBackend(KeyValueStoreBackend):
-    """CouchBase backend.
+class CouchbaseBackend(KeyValueStoreBackend):
+    """Couchbase backend.
 
     :raises celery.exceptions.ImproperlyConfigured: if
         module :pypi:`couchbase` is not available.
@@ -49,13 +49,13 @@ class CouchBaseBackend(KeyValueStoreBackend):
     key_t = str_t
 
     def __init__(self, url=None, *args, **kwargs):
-        super(CouchBaseBackend, self).__init__(*args, **kwargs)
+        super(CouchbaseBackend, self).__init__(*args, **kwargs)
         self.url = url
 
         if Couchbase is None:
             raise ImproperlyConfigured(
                 'You need to install the couchbase library to use the '
-                'CouchBase backend.',
+                'Couchbase backend.',
             )
 
         uhost = uport = uname = upass = ubucket = None

+ 4 - 4
celery/backends/database/__init__.py

@@ -129,7 +129,7 @@ class DatabaseBackend(BaseBackend):
 
     @retry
     def _get_task_meta_for(self, task_id):
-        """Get task metadata for a task by id."""
+        """Get task meta-data for a task by id."""
         session = self.ResultSession()
         with session_cleanup(session):
             task = list(session.query(Task).filter(Task.task_id == task_id))
@@ -153,7 +153,7 @@ class DatabaseBackend(BaseBackend):
 
     @retry
     def _restore_group(self, group_id):
-        """Get metadata for group by id."""
+        """Get meta-data for group by id."""
         session = self.ResultSession()
         with session_cleanup(session):
             group = session.query(TaskSet).filter(
@@ -163,7 +163,7 @@ class DatabaseBackend(BaseBackend):
 
     @retry
     def _delete_group(self, group_id):
-        """Delete metadata for group by id."""
+        """Delete meta-data for group by id."""
         session = self.ResultSession()
         with session_cleanup(session):
             session.query(TaskSet).filter(
@@ -180,7 +180,7 @@ class DatabaseBackend(BaseBackend):
             session.commit()
 
     def cleanup(self):
-        """Delete expired metadata."""
+        """Delete expired meta-data."""
         session = self.ResultSession()
         expires = self.expires
         now = self.app.now()

+ 5 - 5
celery/backends/filesystem.py

@@ -3,7 +3,7 @@
     celery.backends.filesystem
     ~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    Filesystem result store backend.
+    File-system result store backend.
 """
 from __future__ import absolute_import, unicode_literals
 
@@ -25,14 +25,14 @@ except NameError:
     IsADirectoryError = IOError
 
 E_PATH_INVALID = """\
-The configured path for the Filesystem backend does not
+The configured path for the file-system backend does not
 work correctly, please make sure that it exists and has
 the correct permissions.\
 """
 
 
 class FilesystemBackend(KeyValueStoreBackend):
-    """Filesystem result backend.
+    """File-system result backend.
 
     Keyword arguments (in addition to those of KeyValueStoreBackend):
 
@@ -40,7 +40,7 @@ class FilesystemBackend(KeyValueStoreBackend):
     :param open: open function to use when opening files
     :param unlink: unlink function to use when deleting files
     :param sep: directory seperator (to join the directory with the key)
-    :param encoding: encoding used on the filesystem
+    :param encoding: encoding used on the file-system
 
     """
 
@@ -63,7 +63,7 @@ class FilesystemBackend(KeyValueStoreBackend):
     def _find_path(self, url):
         if not url:
             raise ImproperlyConfigured(
-                'You need to configure a path for the Filesystem backend')
+                'You need to configure a path for the File-system backend')
         if url is not None and url.startswith('file:///'):
             return url[7:]
 

+ 4 - 4
celery/backends/mongodb.py

@@ -187,7 +187,7 @@ class MongoBackend(BaseBackend):
         return result
 
     def _get_task_meta_for(self, task_id):
-        """Get task metadata for a task by id."""
+        """Get task meta-data for a task by id."""
         obj = self.collection.find_one({'_id': task_id})
         if obj:
             return self.meta_from_decoded({
@@ -242,7 +242,7 @@ class MongoBackend(BaseBackend):
         self.collection.remove({'_id': task_id})
 
     def cleanup(self):
-        """Delete expired metadata."""
+        """Delete expired meta-data."""
         self.collection.remove(
             {'date_done': {'$lt': self.app.now() - self.expires_delta}},
         )
@@ -273,7 +273,7 @@ class MongoBackend(BaseBackend):
 
     @cached_property
     def collection(self):
-        """Get the metadata task collection."""
+        """Get the meta-data task collection."""
         collection = self.database[self.taskmeta_collection]
 
         # Ensure an index on date_done is there, if not process the index
@@ -283,7 +283,7 @@ class MongoBackend(BaseBackend):
 
     @cached_property
     def group_collection(self):
-        """Get the metadata task collection."""
+        """Get the meta-data task collection."""
         collection = self.database[self.groupmeta_collection]
 
         # Ensure an index on date_done is there, if not process the index

+ 1 - 1
celery/beat.py

@@ -289,7 +289,7 @@ class Scheduler(object):
         return new_entry
 
     def apply_async(self, entry, producer=None, advance=True, **kwargs):
-        # Update timestamps and run counts before we actually execute,
+        # Update time-stamps and run counts before we actually execute,
         # so we have that done if an exception is raised (doesn't schedule
         # forever.)
         entry = self.reserve(entry) if advance else entry

+ 1 - 1
celery/bin/base.py

@@ -154,7 +154,7 @@ class Command(object):
     #: Enable if the application should support config from the cmdline.
     enable_config_from_cmdline = False
 
-    #: Default configuration namespace.
+    #: Default configuration name-space.
     namespace = None
 
     #: Text to print at end of --help

+ 1 - 1
celery/bin/celery.py

@@ -991,7 +991,7 @@ class help(Command):
 
 
 class report(Command):
-    """Shows information useful to include in bugreports."""
+    """Shows information useful to include in bug-reports."""
 
     def run(self, *args, **kwargs):
         self.out(self.app.bugreport())

+ 1 - 1
celery/bin/worker.py

@@ -81,7 +81,7 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 
 .. cmdoption:: --without-mingle
 
-    Do not synchronize with other workers at startup.
+    Do not synchronize with other workers at start-up.
 
 .. cmdoption:: --without-heartbeat
 

+ 1 - 1
celery/bootsteps.py

@@ -279,7 +279,7 @@ class Blueprint(object):
 
 
 class StepType(type):
-    """Metaclass for steps."""
+    """Meta-class for steps."""
 
     def __new__(cls, name, bases, attrs):
         module = attrs.get('__module__')

+ 1 - 1
celery/canvas.py

@@ -3,7 +3,7 @@
     celery.canvas
     ~~~~~~~~~~~~~
 
-    Composing task workflows.
+    Composing task work-flows.
 
     Documentation for some of these types are in :mod:`celery`.
     You should import these from :mod:`celery` and not this module.

+ 1 - 1
celery/events/state.py

@@ -3,7 +3,7 @@
     celery.events.state
     ~~~~~~~~~~~~~~~~~~~
 
-    This module implements a datastructure used to keep
+    This module implements a data-structure used to keep
     track of the state of a cluster of workers and the tasks
     it is working on (by consuming events).
 

+ 3 - 3
celery/loaders/base.py

@@ -197,12 +197,12 @@ class BaseLoader(object):
             key, value = arg.split('=', 1)
             key = key.lower().replace('.', '_')
 
-            # ## find namespace.
-            # .key=value|_key=value expands to default namespace.
+            # ## find name-space.
+            # .key=value|_key=value expands to default name-space.
             if key[0] == '_':
                 ns, key = namespace, key[1:]
             else:
-                # find namespace part of key
+                # find name-space part of key
                 ns, key = key.split('_', 1)
 
             ns_key = (ns and ns + '_' or '') + key

+ 1 - 1
celery/result.py

@@ -158,7 +158,7 @@ class AsyncResult(ResultBase):
         :keyword no_ack: Enable amqp no ack (automatically acknowledge
             message).  If this is :const:`False` then the message will
             **not be acked**.
-        :keyword follow_parents: Reraise any exception raised by parent task.
+        :keyword follow_parents: Re-raise any exception raised by parent task.
 
         :raises celery.exceptions.TimeoutError: if `timeout` is not
             :const:`None` and the result does not arrive within `timeout`

+ 5 - 4
celery/schedules.py

@@ -110,13 +110,13 @@ class schedule(object):
         * `(False, 12.3)`, means the task is not due, but that the scheduler
           should check again in 12.3 seconds.
 
-        The next time to check is used to save energy/cpu cycles,
+        The next time to check is used to save energy/CPU cycles,
         it does not need to be accurate but will influence the precision
         of your schedule.  You must also keep in mind
         the value of :setting:`beat_max_loop_interval`,
         which decides the maximum number of seconds the scheduler can
         sleep between re-checking the periodic task intervals.  So if you
-        have a task that changes schedule at runtime then your next_run_at
+        have a task that changes schedule at run-time then your next_run_at
         check will decide how long it will take before a change to the
         schedule takes effect.  The max loop interval takes precendence
         over the next check at value returned.
@@ -125,7 +125,8 @@ class schedule(object):
 
             The default max loop interval may vary for different schedulers.
             For the default scheduler the value is 5 minutes, but for e.g.
-            the django-celery database scheduler the value is 5 seconds.
+            the :pypi:`django-celery` database scheduler the value
+            is 5 seconds.
 
         """
         last_run_at = self.maybe_make_aware(last_run_at)
@@ -377,7 +378,7 @@ class crontab(schedule):
     is every seventh day, only months that begin on Sunday and are also
     in the `month_of_year` attribute will have execution events.  Or,
     `day_of_week` is 1 and `day_of_month` is '1-7,15-21' means every
-    first and third monday of every month present in `month_of_year`.
+    first and third Monday of every month present in `month_of_year`.
 
     """
 

+ 1 - 1
celery/tests/backends/test_cassandra.py

@@ -97,7 +97,7 @@ class test_CassandraBackend(AppCase):
         self.assertIsNone(x._session)
 
     def test_timeouting_cluster(self):
-        # Tests behaviour when Cluster.connect raises
+        # Tests behavior when Cluster.connect raises
         # cassandra.OperationTimedOut.
         from celery.backends import cassandra as mod
 

+ 13 - 13
celery/tests/backends/test_couchbase.py

@@ -1,11 +1,11 @@
-"""Tests for the CouchBaseBackend."""
+"""Tests for the CouchbaseBackend."""
 
 from __future__ import absolute_import, unicode_literals
 
 from kombu.utils.encoding import str_t
 
 from celery.backends import couchbase as module
-from celery.backends.couchbase import CouchBaseBackend
+from celery.backends.couchbase import CouchbaseBackend
 from celery.exceptions import ImproperlyConfigured
 from celery import backends
 from celery.tests.case import AppCase, MagicMock, Mock, patch, sentinel, skip
@@ -19,27 +19,27 @@ COUCHBASE_BUCKET = 'celery_bucket'
 
 
 @skip.unless_module('couchbase')
-class test_CouchBaseBackend(AppCase):
+class test_CouchbaseBackend(AppCase):
 
     def setup(self):
-        self.backend = CouchBaseBackend(app=self.app)
+        self.backend = CouchbaseBackend(app=self.app)
 
     def test_init_no_couchbase(self):
         prev, module.Couchbase = module.Couchbase, None
         try:
             with self.assertRaises(ImproperlyConfigured):
-                CouchBaseBackend(app=self.app)
+                CouchbaseBackend(app=self.app)
         finally:
             module.Couchbase = prev
 
     def test_init_no_settings(self):
         self.app.conf.couchbase_backend_settings = []
         with self.assertRaises(ImproperlyConfigured):
-            CouchBaseBackend(app=self.app)
+            CouchbaseBackend(app=self.app)
 
     def test_init_settings_is_None(self):
         self.app.conf.couchbase_backend_settings = None
-        CouchBaseBackend(app=self.app)
+        CouchbaseBackend(app=self.app)
 
     def test_get_connection_connection_exists(self):
         with patch('couchbase.connection.Connection') as mock_Connection:
@@ -52,7 +52,7 @@ class test_CouchBaseBackend(AppCase):
 
     def test_get(self):
         self.app.conf.couchbase_backend_settings = {}
-        x = CouchBaseBackend(app=self.app)
+        x = CouchbaseBackend(app=self.app)
         x._connection = Mock()
         mocked_get = x._connection.get = Mock()
         mocked_get.return_value.value = sentinel.retval
@@ -62,7 +62,7 @@ class test_CouchBaseBackend(AppCase):
 
     def test_set(self):
         self.app.conf.couchbase_backend_settings = None
-        x = CouchBaseBackend(app=self.app)
+        x = CouchbaseBackend(app=self.app)
         x._connection = MagicMock()
         x._connection.set = MagicMock()
         # should return None
@@ -70,7 +70,7 @@ class test_CouchBaseBackend(AppCase):
 
     def test_delete(self):
         self.app.conf.couchbase_backend_settings = {}
-        x = CouchBaseBackend(app=self.app)
+        x = CouchbaseBackend(app=self.app)
         x._connection = Mock()
         mocked_delete = x._connection.delete = Mock()
         mocked_delete.return_value = None
@@ -86,7 +86,7 @@ class test_CouchBaseBackend(AppCase):
             'password': 'mysecret',
             'port': '1234',
         }
-        x = CouchBaseBackend(app=self.app)
+        x = CouchbaseBackend(app=self.app)
         self.assertEqual(x.bucket, 'mycoolbucket')
         self.assertEqual(x.host, ['here.host.com', 'there.host.com'],)
         self.assertEqual(x.username, 'johndoe',)
@@ -94,9 +94,9 @@ class test_CouchBaseBackend(AppCase):
         self.assertEqual(x.port, 1234)
 
     def test_backend_by_url(self, url='couchbase://myhost/mycoolbucket'):
-        from celery.backends.couchbase import CouchBaseBackend
+        from celery.backends.couchbase import CouchbaseBackend
         backend, url_ = backends.get_backend_by_url(url, self.app.loader)
-        self.assertIs(backend, CouchBaseBackend)
+        self.assertIs(backend, CouchbaseBackend)
         self.assertEqual(url_, url)
 
     def test_backend_params_by_url(self):

+ 2 - 2
celery/tests/utils/test_debug.py

@@ -65,8 +65,8 @@ class test_humanbytes(Case):
     def test_humanbytes(self):
         self.assertEqual(debug.humanbytes(2 ** 20), '1MB')
         self.assertEqual(debug.humanbytes(4 * 2 ** 20), '4MB')
-        self.assertEqual(debug.humanbytes(2 ** 16), '64kB')
-        self.assertEqual(debug.humanbytes(2 ** 16), '64kB')
+        self.assertEqual(debug.humanbytes(2 ** 16), '64KB')
+        self.assertEqual(debug.humanbytes(2 ** 16), '64KB')
         self.assertEqual(debug.humanbytes(2 ** 8), '256b')
 
 

+ 2 - 2
celery/utils/debug.py

@@ -30,7 +30,7 @@ UNITS = (
     (2 ** 40.0, 'TB'),
     (2 ** 30.0, 'GB'),
     (2 ** 20.0, 'MB'),
-    (2 ** 10.0, 'kB'),
+    (2 ** 10.0, 'KB'),
     (0.0, 'b'),
 )
 
@@ -137,7 +137,7 @@ def hfloat(f, p=5):
 
 
 def humanbytes(s):
-    """Convert bytes to human-readable form (e.g. kB, MB)."""
+    """Convert bytes to human-readable form (e.g. KB, MB)."""
     return next(
         '{0}{1}'.format(hfloat(s / div if div else s), unit)
         for div, unit in UNITS if s >= div

+ 1 - 1
celery/worker/components.py

@@ -104,7 +104,7 @@ class Pool(bootsteps.StartStopStep):
     """Bootstep managing the worker pool.
 
     Describes how to initialize the worker pool, and starts and stops
-    the pool during worker startup/shutdown.
+    the pool during worker start-up/shutdown.
 
     Adds attributes:
 

+ 1 - 1
celery/worker/state.py

@@ -53,7 +53,7 @@ total_count = Counter()
 #: count of all tasks accepted by the worker
 all_total_count = [0]
 
-#: the list of currently revoked tasks.  Persistent if statedb set.
+#: the list of currently revoked tasks.  Persistent if ``statedb`` set.
 revoked = LimitedSet(maxlen=REVOKES_MAX, expires=REVOKE_EXPIRES)
 
 #: Update global state when a task has been reserved.

+ 5 - 0
docs/Makefile

@@ -49,6 +49,7 @@ help:
 	@echo "  coverage   to run coverage check of the documentation (if enabled)"
 	@echo "  apicheck   to verify that all modules are present in autodoc"
 	@echo "  configcheck to verify that all modules are present in autodoc"
+	@echo "  spelling   to perform a spell check"
 
 .PHONY: clean
 clean:
@@ -220,6 +221,10 @@ apicheck:
 configcheck:
 	$(SPHINXBUILD) -b configcheck $(ALLSPHINXOPTS) $(BUILDDIR)/configcheck
 
+.PHONY: spelling
+spelling:
+	SPELLCHECK=1 $(SPHINXBUILD) -b spelling $(ALLSPHINXOPTS) $(BUILDDIR)/spelling
+
 .PHONY: xml
 xml:
 	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml

File diff suppressed because it is too large
+ 210 - 199
docs/configuration.rst


+ 40 - 41
docs/contributing.rst

@@ -244,12 +244,12 @@ Issue Trackers
 Bugs for a package in the Celery ecosystem should be reported to the relevant
 issue tracker.
 
-* Celery: https://github.com/celery/celery/issues/
-* Kombu: https://github.com/celery/kombu/issues
-* pyamqp: https://github.com/celery/py-amqp/issues
-* vine: https://github.com/celery/vine/issues
-* librabbitmq: https://github.com/celery/librabbitmq/issues
-* Django-Celery: https://github.com/celery/django-celery/issues
+* :pypi:`celery`: https://github.com/celery/celery/issues/
+* :pypi:`kombu`: https://github.com/celery/kombu/issues
+* :pypi:`amqp`: https://github.com/celery/py-amqp/issues
+* :pypi:`vine`: https://github.com/celery/vine/issues
+* :pypi:`librabbitmq`: https://github.com/celery/librabbitmq/issues
+* :pypi:`django-celery`: https://github.com/celery/django-celery/issues
 
 If you are unsure of the origin of the bug you can ask the
 :ref:`mailing-list`, or just use the Celery issue tracker.
@@ -291,7 +291,9 @@ You can see the state of any branch by looking at the Changelog:
     https://github.com/celery/celery/blob/master/Changelog
 
 If the branch is in active development the topmost version info should
-contain metadata like::
+contain meta-data like:
+
+.. code-block:: restructuredtext
 
     2.4.0
     ======
@@ -937,8 +939,8 @@ Jan Henrik Helmers
 Packages
 ========
 
-celery
-------
+``celery``
+----------
 
 :git: https://github.com/celery/celery
 :CI: http://travis-ci.org/#!/celery/celery
@@ -946,8 +948,8 @@ celery
 :PyPI: http://pypi.python.org/pypi/celery
 :docs: http://docs.celeryproject.org
 
-kombu
------
+``kombu``
+---------
 
 Messaging library.
 
@@ -957,8 +959,8 @@ Messaging library.
 :PyPI: http://pypi.python.org/pypi/kombu
 :docs: http://kombu.readthedocs.org
 
-amqp
-----
+``amqp``
+--------
 
 Python AMQP 0.9.1 client.
 
@@ -968,8 +970,8 @@ Python AMQP 0.9.1 client.
 :PyPI: http://pypi.python.org/pypi/amqp
 :docs: http://amqp.readthedocs.org
 
-vine
-----
+``vine``
+--------
 
 Promise/deferred implementation.
 
@@ -979,8 +981,8 @@ Promise/deferred implementation.
 :PyPI: http://pypi.python.org/pypi/vine
 :docs: http://vine.readthedocs.org
 
-billiard
---------
+``billiard``
+------------
 
 Fork of multiprocessing containing improvements
 that will eventually be merged into the Python stdlib.
@@ -990,24 +992,16 @@ that will eventually be merged into the Python stdlib.
 :Windows-CI: https://ci.appveyor.com/project/ask/billiard
 :PyPI: http://pypi.python.org/pypi/billiard
 
-librabbitmq
------------
+``librabbitmq``
+---------------
 
 Very fast Python AMQP client written in C.
 
 :git: https://github.com/celery/librabbitmq
 :PyPI: http://pypi.python.org/pypi/librabbitmq
 
-celerymon
----------
-
-Celery monitor web-service.
-
-:git: https://github.com/celery/celerymon
-:PyPI: http://pypi.python.org/pypi/celerymon
-
-django-celery
--------------
+``django-celery``
+-----------------
 
 Django <-> Celery Integration.
 
@@ -1015,16 +1009,16 @@ Django <-> Celery Integration.
 :PyPI: http://pypi.python.org/pypi/django-celery
 :docs: http://docs.celeryproject.org/en/latest/django
 
-cl
---
+``cell``
+--------
 
 Actor library.
 
-:git: https://github.com/celery/cl
-:PyPI: http://pypi.python.org/pypi/cl
+:git: https://github.com/celery/cell
+:PyPI: http://pypi.python.org/pypi/cell
 
-cyme
-----
+``cyme``
+--------
 
 Distributed Celery Instance manager.
 
@@ -1036,32 +1030,37 @@ Distributed Celery Instance manager.
 Deprecated
 ----------
 
-- Flask-Celery
+- ``Flask-Celery``
 
 :git: https://github.com/ask/Flask-Celery
 :PyPI: http://pypi.python.org/pypi/Flask-Celery
 
-- carrot
+- ``celerymon``
+
+:git: https://github.com/celery/celerymon
+:PyPI: http://pypi.python.org/pypi/celerymon
+
+- ``carrot``
 
 :git: https://github.com/ask/carrot
 :PyPI: http://pypi.python.org/pypi/carrot
 
-- ghettoq
+- ``ghettoq``
 
 :git: https://github.com/ask/ghettoq
 :PyPI: http://pypi.python.org/pypi/ghettoq
 
-- kombu-sqlalchemy
+- ``kombu-sqlalchemy``
 
 :git: https://github.com/ask/kombu-sqlalchemy
 :PyPI: http://pypi.python.org/pypi/kombu-sqlalchemy
 
-- django-kombu
+- ``django-kombu``
 
 :git: https://github.com/ask/django-kombu
 :PyPI: http://pypi.python.org/pypi/django-kombu
 
-- pylibrabbitmq
+- ``pylibrabbitmq``
 
 Old name for :pypi:`librabbitmq`.
 

+ 5 - 5
docs/django/first-steps-with-django.rst

@@ -78,7 +78,7 @@ for Celery.  This means that you don't have to use multiple
 configuration files, and instead configure Celery directly
 from the Django settings; but you can also separate them if wanted.
 
-The uppercase namespace means that all Celery configuration options
+The uppercase name-space means that all Celery configuration options
 must be specified in uppercase instead of lowercase, and start with
 ``CELERY_``, so e.g. the :setting:`task_always_eager`` setting
 becomes ``CELERY_TASK_ALWAYS_EAGER``, and the :setting:`broker_url`
@@ -94,7 +94,7 @@ or execv:
 
 Next, a common practice for reusable apps is to define all tasks
 in a separate ``tasks.py`` module, and Celery does have a way to
-autodiscover these modules:
+auto-discover these modules:
 
 .. code-block:: python
 
@@ -113,7 +113,7 @@ of your installed apps, following the ``tasks.py`` convention::
 
 This way you do not have to manually add the individual modules
 to the :setting:`CELERY_IMPORTS <imports>` setting.  The ``lambda`` so that the
-autodiscovery can happen only when needed, and so that importing your
+auto-discovery can happen only when needed, and so that importing your
 module will not evaluate the Django settings object.
 
 Finally, the ``debug_task`` example is a task that dumps
@@ -148,7 +148,7 @@ result backends that uses the Django ORM and Django Cache frameworks.
 
 To use this with your project you need to follow these four steps:
 
-1. Install the ``django-celery`` library:
+1. Install the :pypi:`django-celery` library:
 
     .. code-block:: console
 
@@ -176,7 +176,7 @@ To use this with your project you need to follow these four steps:
 
         $ python manage.py syncdb
 
-4.  Configure celery to use the django-celery backend.
+4.  Configure celery to use the :pypi:`django-celery` backend.
 
     For the database backend you must use:
 

+ 10 - 9
docs/faq.rst

@@ -109,13 +109,13 @@ The pytz module provides timezone definitions and related tools.
 
 .. _`pytz`: http://pypi.python.org/pypi/pytz
 
-django-celery
-~~~~~~~~~~~~~
+``django-celery``
+~~~~~~~~~~~~~~~~~
 
-If you use django-celery then you don't have to install celery separately,
-as it will make sure that the required version is installed.
+If you use :pypi:`django-celery` then you don't have to install Celery
+separately, as it will make sure that the required version is installed.
 
-django-celery does not have any other dependencies.
+:pypi:`django-celery` does not have any other dependencies.
 
 kombu
 ~~~~~
@@ -875,8 +875,8 @@ Django
 
 .. _faq-django-database-tables:
 
-What purpose does the database tables created by django-celery have?
---------------------------------------------------------------------
+What purpose does the database tables created by ``django-celery`` have?
+------------------------------------------------------------------------
 
 Several database tables are created by default, these relate to
 
@@ -894,8 +894,9 @@ Several database tables are created by default, these relate to
 
 * Task results
 
-    The database result backend is enabled by default when using django-celery
-    (this is for historical reasons, and thus for backward compatibility).
+    The database result backend is enabled by default when using
+    :pypi:`django-celery` (this is for historical reasons, and thus for
+    backward compatibility).
 
     The results are stored in the ``TaskMeta`` and ``TaskSetMeta`` models.
     *these tables are not created if another result backend is configured*.

+ 2 - 2
docs/getting-started/introduction.rst

@@ -159,9 +159,9 @@ Features
 
             :ref:`Read more… <guide-monitoring>`.
 
-        - **Workflows**
+        - **Work-flows**
 
-            Simple and complex workflows can be composed using
+            Simple and complex work-flows can be composed using
             a set of powerful primitives we call the "canvas",
             including grouping, chaining, chunking and more.
 

+ 6 - 6
docs/getting-started/next-steps.rst

@@ -138,7 +138,7 @@ These options are described in more detailed in the :ref:`Workers Guide <guide-w
 Stopping the worker
 ~~~~~~~~~~~~~~~~~~~
 
-To stop the worker simply hit Ctrl+C.  A list of signals supported
+To stop the worker simply hit :kbd:`Control-c`.  A list of signals supported
 by the worker is detailed in the :ref:`Workers Guide <guide-workers>`.
 
 In the background
@@ -398,8 +398,8 @@ Calling tasks is described in detail in the
 
 .. _designing-workflows:
 
-*Canvas*: Designing Workflows
-=============================
+*Canvas*: Designing Work-flows
+==============================
 
 You just learned how to call a task using the tasks ``delay`` method,
 and this is often all you need, but sometimes you may want to pass the
@@ -490,7 +490,7 @@ To get to that I must introduce the canvas primitives…
 The Primitives
 --------------
 
-.. topic:: \ 
+.. topic:: \
 
     .. hlist::
         :columns: 2
@@ -503,7 +503,7 @@ The Primitives
         - :ref:`chunks <canvas-chunks>`
 
 These primitives are signature objects themselves, so they can be combined
-in any number of ways to compose complex workflows.
+in any number of ways to compose complex work-flows.
 
 .. note::
 
@@ -597,7 +597,7 @@ can be combined almost however you want, e.g::
 
     >>> upload_document.s(file) | group(apply_filter.s() for filter in filters)
 
-Be sure to read more about workflows in the :ref:`Canvas <guide-canvas>` user
+Be sure to read more about work-flows in the :ref:`Canvas <guide-canvas>` user
 guide.
 
 Routing

+ 3 - 0
docs/glossary.rst

@@ -105,3 +105,6 @@ Glossary
         Maximum number of unacknowledged messages a consumer can hold and if
         exceeded the transport should not deliver any more messages to that
         consumer.  See :ref:`optimizing-prefetch-limit`.
+
+    pidbox
+        A process mailbox, used to implement remote control commands.

+ 17 - 17
docs/history/changelog-1.0.rst

@@ -42,8 +42,8 @@
 Critical
 --------
 
-* SIGINT/Ctrl+C killed the pool, abruptly terminating the currently executing
-  tasks.
+* :sig:`INT`/:kbd:`Control-c` killed the pool, abruptly terminating the
+  currently executing tasks.
 
     Fixed by making the pool worker processes ignore :const:`SIGINT`.
 
@@ -54,8 +54,8 @@ Critical
 
 * Now depends on :pypi:`billiard` >= 0.3.1
 
-* worker: Previously exceptions raised by worker components could stall startup,
-  now it correctly logs the exceptions and shuts down.
+* worker: Previously exceptions raised by worker components could stall
+  start-up, now it correctly logs the exceptions and shuts down.
 
 * worker: Prefetch counts was set too late. QoS is now set as early as possible,
   so the worker: can't slurp in all the messages at start-up.
@@ -220,7 +220,7 @@ News
     If `Task.track_started` is enabled the task will report its status
     as "started" when the task is executed by a worker.
 
-    The default value is `False` as the normal behaviour is to not
+    The default value is `False` as the normal behavior is to not
     report that level of granularity. Tasks are either pending, finished,
     or waiting to be retried. Having a "started" status can be useful for
     when there are long running tasks and there is a need to report which
@@ -483,7 +483,7 @@ Fixes
 * The worker now shutdowns cleanly when receiving the :sig:`SIGTERM` signal.
 
 * The worker now does a cold shutdown if the :sig:`SIGINT` signal
-  is received (Ctrl+C),
+  is received (:kbd:`Control-c`),
   this means it tries to terminate as soon as possible.
 
 * Caching of results now moved to the base backend classes, so no need
@@ -629,11 +629,11 @@ Backward incompatible changes
 -----------------------------
 
 * Celery does not support detaching anymore, so you have to use the tools
-  available on your platform, or something like Supervisord to make
+  available on your platform, or something like :pypi:`supervisor` to make
   celeryd/celerybeat/celerymon into background processes.
 
     We've had too many problems with the worker daemonizing itself, so it was
-    decided it has to be removed. Example startup scripts has been added to
+    decided it has to be removed. Example start-up scripts has been added to
     the `extra/` directory:
 
     * Debian, Ubuntu, (start-stop-daemon)
@@ -738,7 +738,7 @@ Backward incompatible changes
   instead.
 
 * The worker no longer stores errors if `Task.ignore_result` is set, to
-  revert to the previous behaviour set
+  revert to the previous behavior set
   :setting:`CELERY_STORE_ERRORS_EVEN_IF_IGNORED` to `True`.
 
 * The statistics functionality has been removed in favor of events,
@@ -746,7 +746,7 @@ Backward incompatible changes
 
 * The module `celery.task.strategy` has been removed.
 
-* `celery.discovery` has been removed, and it's `autodiscover` function is
+* `celery.discovery` has been removed, and it's ``autodiscover`` function is
   now in `celery.loaders.djangoapp`. Reason: Internal API.
 
 * The :envvar:`CELERY_LOADER` environment variable now needs loader class name
@@ -840,7 +840,7 @@ News
 * Periodic tasks are now scheduled on the clock.
 
     I.e. `timedelta(hours=1)` means every hour at :00 minutes, not every
-    hour from the server starts.  To revert to the previous behaviour you
+    hour from the server starts.  To revert to the previous behavior you
     can set `PeriodicTask.relative = True`.
 
 * Now supports passing execute options to a TaskSets list of args, e.g.:
@@ -903,7 +903,7 @@ Changes
 
 * Log level for stdout/stderr changed from INFO to ERROR
 
-* ImportErrors are now properly propagated when autodiscovering tasks.
+* ImportErrors are now properly propagated when auto-discovering tasks.
 
 * You can now use `celery.messaging.establish_connection` to establish a
   connection to the broker.
@@ -961,7 +961,7 @@ Documentation
 * Now emits a warning if the --detach argument is used.
   --detach should not be used anymore, as it has several not easily fixed
   bugs related to it. Instead, use something like start-stop-daemon,
-  Supervisord or launchd (os x).
+  :pypi:`supervisor` or launchd (os x).
 
 
 * Make sure logger class is process aware, even if running Python >= 2.6.
@@ -1270,7 +1270,7 @@ News
 * New Tutorial: Creating a click counter using carrot and celery
 
 * Database entries for periodic tasks are now created at the workers
-    startup instead of for each check (which has been a forgotten TODO/XXX
+    start-up instead of for each check (which has been a forgotten TODO/XXX
     in the code for a long time)
 
 * New settings variable: :setting:`CELERY_TASK_RESULT_EXPIRES`
@@ -1481,7 +1481,7 @@ News
   it's the term used in the documentation from now on.
 
 * Make sure the pool and periodic task worker thread is terminated
-  properly at exit. (So `Ctrl-C` works again).
+  properly at exit. (So :kbd:`Control-c` works again).
 
 * Now depends on `python-daemon`.
 
@@ -1734,7 +1734,7 @@ arguments, so be sure to flush your task queue before you upgrade.
         >>> result.result
         [4, 8, 16]
 
-* Refactored the task metadata cache and database backends, and added
+* Refactored the task meta-data cache and database backends, and added
   a new backend for Tokyo Tyrant. You can set the backend in your django
   settings file.
 
@@ -1792,7 +1792,7 @@ arguments, so be sure to flush your task queue before you upgrade.
 
 * Added some unit tests
 
-* Can now use the database for task metadata (like if the task has
+* Can now use the database for task meta-data (like if the task has
   been executed or not). Set `settings.CELERY_TASK_META`
 
 * Can now run `python setup.py test` to run the unit tests from

+ 3 - 3
docs/history/changelog-2.0.rst

@@ -76,7 +76,7 @@ Fixes
 * :class:`~celery.task.control.inspect`: Replies did not work correctly
   if no destination was specified.
 
-* Can now store result/metadata for custom states.
+* Can now store result/meta-data for custom states.
 
 * Worker: A warning is now emitted if the sending of task error
   emails fails.
@@ -718,8 +718,8 @@ News
 
 * Worker: Added `--purge` as an alias to `--discard`.
 
-* Worker: Ctrl+C (SIGINT) once does warm shutdown, hitting Ctrl+C twice
-  forces termination.
+* Worker: :kbd:`Control-c` (SIGINT) once does warm shutdown,
+  hitting :kbd:`Control-c` twice forces termination.
 
 * Added support for using complex crontab-expressions in periodic tasks. For
   example, you can now use:

+ 8 - 8
docs/history/changelog-2.1.rst

@@ -49,7 +49,7 @@ Fixes
 * worker: Now properly handles errors occurring while trying to acknowledge
   the message.
 
-* `TaskRequest.on_failure` now encodes traceback using the current filesystem
+* `TaskRequest.on_failure` now encodes traceback using the current file-system
    encoding.  (Issue #286).
 
 * `EagerResult` can now be pickled (Issue #288).
@@ -80,7 +80,7 @@ Documentation
 * `EventReceiver`: now sends heartbeat request to find workers.
 
     This means :program:`celeryev` and friends finds workers immediately
-    at startup.
+    at start-up.
 
 * celeryev cursesmon: Set screen_delay to 10ms, so the screen refreshes more
   often.
@@ -278,8 +278,8 @@ Important Notes
 * No longer depends on SQLAlchemy, this needs to be installed separately
   if the database result backend is used.
 
-* django-celery now comes with a monitor for the Django Admin interface.
-  This can also be used if you're not a Django user.
+* :pypi:`django-celery` now comes with a monitor for the Django Admin
+  interface.  This can also be used if you're not a Django user.
   (Update: Django-Admin monitor has been replaced with Flower, see the
   Monitoring guide).
 
@@ -336,8 +336,8 @@ News
     This can then be stored in a database to generate statistics
     with, or even monitoring over longer time periods.
 
-    django-celery now comes with a Celery monitor for the Django
-    Admin interface. To use this you need to run the django-celery
+    :pypi:`django-celery` now comes with a Celery monitor for the Django
+    Admin interface. To use this you need to run the :pypi:`django-celery`
     snapshot camera, which stores snapshots to the database at configurable
     intervals.
 
@@ -349,7 +349,7 @@ News
 
             $ python manage.py syncdb
 
-    2. Start the django-celery snapshot camera:
+    2. Start the :pypi:`django-celery` snapshot camera:
 
         .. code-block:: console
 
@@ -497,7 +497,7 @@ News
 * worker: Now uses `multiprocessing.freeze_support()` so that it should work
   with **py2exe**, **PyInstaller**, **cx_Freeze**, etc.
 
-* worker: Now includes more metadata for the :state:`STARTED` state: PID and
+* worker: Now includes more meta-data for the :state:`STARTED` state: PID and
   host name of the worker that started the task.
 
     See issue #181

+ 2 - 2
docs/history/changelog-2.2.rst

@@ -982,8 +982,8 @@ Fixes
 
 * Windows: Utilities no longer output ANSI color codes on Windows
 
-* camqadm: Now properly handles Ctrl+C by simply exiting instead of showing
-  confusing traceback.
+* camqadm: Now properly handles :kbd:`Control-c` by simply exiting instead
+  of showing confusing traceback.
 
 * Windows: All tests are now passing on Windows.
 

+ 2 - 2
docs/history/changelog-2.3.rst

@@ -177,8 +177,8 @@ Important Notes
 
     .. note::
 
-        For django-celery users the default backend is still ``database``,
-        and results are not disabled by default.
+        For :pypi:`django-celery` users the default backend is
+        still ``database``, and results are not disabled by default.
 
 * The Debian init scripts have been deprecated in favor of the generic-init.d
   init scripts.

+ 1 - 1
docs/history/changelog-2.4.rst

@@ -365,7 +365,7 @@ News
 * Worker logged the string representation of args and kwargs
   without safe guards (Issue #480).
 
-* RHEL init script: Changed worker startup priority.
+* RHEL init script: Changed worker start-up priority.
 
     The default start / stop priorities for MySQL on RHEL are
 

+ 2 - 2
docs/history/changelog-2.5.rst

@@ -24,7 +24,7 @@ If you're looking for versions prior to 2.5 you should visit our
 This is a dummy release performed for the following goals:
 
 - Protect against force upgrading to Kombu 2.2.0
-- Version parity with django-celery
+- Version parity with :pypi:`django-celery`
 
 .. _version-2.5.3:
 
@@ -33,7 +33,7 @@ This is a dummy release performed for the following goals:
 :release-date: 2012-04-16 07:00 P.M BST
 :release-by: Ask Solem
 
-* A bug causes messages to be sent with UTC timestamps even though
+* A bug causes messages to be sent with UTC time-stamps even though
   :setting:`CELERY_ENABLE_UTC` was not enabled (Issue #636).
 
 * celerybeat: No longer crashes if an entry's args is set to None

+ 11 - 11
docs/history/changelog-3.0.rst

@@ -166,7 +166,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 - [Worker] Fixed memory leak when restarting after connection lost
   (Issue #1325).
 
-- [Worker] Fixed UnicodeDecodeError at startup (Issue #1373).
+- [Worker] Fixed UnicodeDecodeError at start-up (Issue #1373).
 
     Fix contributed by Jessica Tallon.
 
@@ -268,7 +268,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 
 - Now depends on :pypi:`billiard` 2.7.3.27.
 
-- Can now specify a whitelist of accepted serializers using
+- Can now specify a white-list of accepted serializers using
   the new :setting:`CELERY_ACCEPT_CONTENT` setting.
 
     This means that you can force the worker to discard messages
@@ -277,7 +277,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 
         CELERY_ACCEPT_CONTENT = ['json']
 
-    you can also specify MIME types in the whitelist::
+    you can also specify MIME types in the white-list::
 
         CELERY_ACCEPT_CONTENT = ['application/json']
 
@@ -378,7 +378,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 - Worker: Message decoding error log message now includes traceback
   information.
 
-- Worker: The startup banner now includes system platform.
+- Worker: The start-up banner now includes system platform.
 
 - ``celery inspect|status|control`` now gives an error if used
   with an SQL based broker transport.
@@ -413,7 +413,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
     Fix contributed by Simon Engledew.
 
 - Windows: Fixed problem with the worker trying to pickle the Django settings
-  module at worker startup.
+  module at worker start-up.
 
 - generic-init.d:  No longer double quotes ``$CELERYD_CHDIR`` (Issue #1235).
 
@@ -790,7 +790,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 - Docs updated to include the default value for the
   :setting:`CELERY_TASK_RESULT_EXPIRES` setting.
 
-- Improvements to the django-celery tutorial.
+- Improvements to the :pypi:`django-celery` tutorial.
 
     Contributed by Locker537.
 
@@ -955,7 +955,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 
 - Now depends on billiard 2.7.3.14
 
-    - Fixes crash at startup when using Django and pre-1.4 projects
+    - Fixes crash at start-up when using Django and pre-1.4 projects
       (setup_environ).
 
     - Hard time limits now sends the KILL signal shortly after TERM,
@@ -1246,7 +1246,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
     It's important to note that crontab schedules uses UTC time by default
     unless this setting is set.
 
-    Issue #904 and django-celery #150.
+    Issue #904 and :pypi:`django-celery` #150.
 
 - ``billiard.enable_forking`` is now only set by the processes pool.
 
@@ -1318,7 +1318,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 - Now depends on kombu 2.3.1 + billiard 2.7.3.11
 
 - Fixed a bug with the -B option (``cannot pickle thread.lock objects``)
-  (Issue #894 + Issue #892, + django-celery #154).
+  (Issue #894 + Issue #892, + :pypi:`django-celery` #154).
 
 - The :control:`restart_pool` control command now requires the
   :setting:`CELERYD_POOL_RESTARTS` setting to be enabled
@@ -1329,7 +1329,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 
 - ``chain.apply`` now passes args to the first task (Issue #889).
 
-- Documented previously secret options to the Django-Celery monitor
+- Documented previously secret options to the :pypi:`django-celery` monitor
   in the monitoring userguide (Issue #396).
 
 - Old changelog are now organized in separate documents for each series,
@@ -1381,7 +1381,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 
     It was discovered that the SQS transport adds objects that can't
     be pickled to the delivery info mapping, so we had to go back
-    to using the whitelist again.
+    to using the white-list again.
 
     Fixing this bug also means that the SQS transport is now working again.
 

+ 17 - 15
docs/history/changelog-3.1.rst

@@ -28,7 +28,7 @@ new in Celery 3.1.
 - **Prefork pool**: Fixes memory leak related to processes exiting
   (Issue #2927).
 
-- **Worker**: Fixes crash at startup when trying to censor passwords
+- **Worker**: Fixes crash at start-up when trying to censor passwords
   in MongoDB and Cache result backend URLs (Issue #3079, Issue #3045,
   Issue #3049, Issue #3068, Issue #3073).
 
@@ -147,7 +147,8 @@ new in Celery 3.1.
 
     Fix contributed by Colin McIntosh.
 
-- **Supervisord**: Set higher process priority in supervisord example.
+- **Supervisord**: Set higher process priority in the :pypi:`supervisord`
+    example.
 
     Contributed by George Tantiras.
 
@@ -882,7 +883,7 @@ News
   (Issue #1874).
 
 - **Worker**: Fixed ``UnicodeEncodeError`` occuring when worker is started
-  by `supervisord`.
+  by :pypi:`supervisor`.
 
     Fix contributed by Codeb Fan.
 
@@ -1015,8 +1016,8 @@ News
     Use ``result.get(callback=)`` (or ``result.iter_native()`` where available)
     instead.
 
-- **Worker**\|eventlet/gevent: A regression caused ``Ctrl+C`` to be ineffective
-  for shutdown.
+- **Worker**\|eventlet/gevent: A regression caused :kbd:`Control-c` to be
+  ineffective for shutdown.
 
 - **Redis result backend:** Now using a pipeline to store state changes
   for improved performance.
@@ -1029,7 +1030,7 @@ News
 
     Fix contributed by Brodie Rao.
 
-- **Generic init scripts:** Now runs a check at startup to verify
+- **Generic init scripts:** Now runs a check at start-up to verify
   that any configuration scripts are owned by root and that they
   are not world/group writeable.
 
@@ -1085,7 +1086,7 @@ News
     Fix contributed by Brodie Rao
 
 - **Worker:** Will no longer accept remote control commands while the
-  worker startup phase is incomplete (*Issue #1741*).
+  worker start-up phase is incomplete (*Issue #1741*).
 
 - **Commands:** The output of the event dump utility
   (:program:`celery events -d`) can now be piped into other commands.
@@ -1106,8 +1107,9 @@ News
   to path so that the current celery source code is used as a basis for
   API documentation (*Issue #1782*).
 
-- **Documentation:** Supervisord examples contained an extraneous '-' in a
-  :option:`--logfile <celery worker --logfile>` argument example.
+- **Documentation:** :pypi:`supervisor` examples contained an
+  extraneous '-' in a :option:`--logfile <celery worker --logfile>` argument
+  example.
 
     Fix contributed by Mohammad Almeer.
 
@@ -1167,8 +1169,8 @@ as this is likely to lead to resource starvation and eventually
 deadlock when using the prefork pool (see also :ref:`task-synchronous-subtasks`).
 
 If you really know what you are doing you can avoid the warning (and
-the future exception being raised) by moving the operation in a whitelist
-block:
+the future exception being raised) by moving the operation in a
+white-list block:
 
 .. code-block:: python
 
@@ -1296,7 +1298,7 @@ Fixes
 
 - Now depends on :ref:`Kombu 3.0.7 <kombu:version-3.0.7>`.
 
-- Fixed problem where Mingle caused the worker to hang at startup
+- Fixed problem where Mingle caused the worker to hang at start-up
   (Issue #1686).
 
 - Beat: Would attempt to drop privileges twice (Issue #1708).
@@ -1402,7 +1404,7 @@ Fixes
 
 - ``celery shell`` command: Fixed ``IPython.frontend`` deprecation warning.
 
-- The default app no longer includes the builtin fixups.
+- The default app no longer includes the built-in fixups.
 
     This fixes a bug where ``celery multi`` would attempt
     to load the Django settings module before entering
@@ -1457,7 +1459,7 @@ Fixes
   instead.
 
 - Worker now properly responds to ``inspect stats`` commands
-  even if received before startup is complete (Issue #1659).
+  even if received before start-up is complete (Issue #1659).
 
 - :signal:`task_postrun` is now sent within a :keyword:`finally` block,
   to make sure the signal is always sent.
@@ -1489,7 +1491,7 @@ Fixes
 - Django: Fixup now sets the default app so that threads will use
   the same app instance (e.g. for manage.py runserver).
 
-- Worker: Fixed Unicode error crash at startup experienced by some users.
+- Worker: Fixed Unicode error crash at start-up experienced by some users.
 
 - Calling ``.apply_async`` on an empty chain now works again (Issue #1650).
 

+ 2 - 0
docs/history/index.rst

@@ -14,7 +14,9 @@ version please visit :ref:`changelog`.
     :maxdepth: 2
 
     changelog-3.1
+    whatsnew-3.0
     changelog-3.0
+    whatsnew-2.5
     changelog-2.5
     changelog-2.4
     changelog-2.3

+ 34 - 33
docs/whatsnew-2.5.rst → docs/history/whatsnew-2.5.rst

@@ -108,8 +108,8 @@ lost worker processes, it should be worth it.
 
 .. _v250-optimizations:
 
-Optimizations
-=============
+Optimization
+============
 
 - The code path used when the worker executes a task has been heavily
   optimized, meaning the worker is able to process a great deal
@@ -126,8 +126,8 @@ Optimizations
 
 .. _v250-deprecations:
 
-Deprecations
-============
+Deprecation Time-line Changes
+=============================
 
 Removals
 --------
@@ -149,8 +149,8 @@ Removals
   from :mod:`celery.task.base`, please import them from :mod:`celery.task`
   instead (originally scheduled for removal in 2.4).
 
-Deprecations
-------------
+Deprecated modules
+------------------
 
 * The :mod:`celery.decorators` module has changed status
   from pending deprecation to deprecated, and is scheduled for removal
@@ -168,7 +168,7 @@ Celery can now be configured to treat all incoming and outgoing dates
 as UTC, and the local timezone can be configured.
 
 This is not yet enabled by default, since enabling
-time zone support means workers running versions pre 2.5
+time zone support means workers running versions pre-2.5
 will be out of sync with upgraded workers.
 
 To enable UTC you have to set :setting:`CELERY_ENABLE_UTC`::
@@ -188,9 +188,9 @@ UTC will enabled by default in version 3.0.
 
 .. note::
 
-    django-celery will use the local timezone as specified by the
+    :pypi:`django-celery` will use the local timezone as specified by the
     ``TIME_ZONE`` setting, it will also honor the new `USE_TZ`_ setting
-    introuced in Django 1.4.
+    introduced in Django 1.4.
 
 .. _`USE_TZ`: https://docs.djangoproject.com/en/dev/topics/i18n/timezones/
 
@@ -235,7 +235,7 @@ effectively reloading the code.
 File system notification backends are pluggable, and Celery comes with three
 implementations:
 
-* inotify (Linux)
+* ``inotify`` (Linux)
 
     Used if the :pypi:`pyinotify` library is installed.
     If you are running on Linux this is the recommended implementation,
@@ -246,9 +246,9 @@ implementations:
 
         $ pip install pyinotify
 
-* kqueue (OS X/BSD)
+* ``kqueue`` (OS X/BSD)
 
-* stat
+* ``stat``
 
     The fallback implementation simply polls the files using ``stat`` and is very
     expensive.
@@ -341,9 +341,9 @@ In Other News
 
 - Now depends on Kombu 2.1.0.
 
-- Efficient Chord support for the memcached backend (Issue #533)
+- Efficient Chord support for the Memcached backend (Issue #533)
 
-    This means memcached joins Redis in the ability to do non-polling
+    This means Memcached joins Redis in the ability to do non-polling
     chords.
 
     Contributed by Dan McGee.
@@ -352,7 +352,7 @@ In Other News
 
     The Rabbit result backend can now use the fallback chord solution.
 
-- Sending :sig:`QUIT` to celeryd will now cause it cold terminate.
+- Sending :sig:`QUIT` to ``celeryd`` will now cause it cold terminate.
 
     That is, it will not finish executing the tasks it is currently
     working on.
@@ -375,7 +375,7 @@ In Other News
 
     Contributed by Daniel Hepper.
 
-- celerybeat can now be configured on the command-line like celeryd.
+- ``celerybeat`` can now be configured on the command-line like ``celeryd``.
 
   Additional configuration must be added at the end of the argument list
   followed by ``--``, for example:
@@ -384,7 +384,7 @@ In Other News
 
     $ celerybeat -l info -- celerybeat.max_loop_interval=10.0
 
-- Now limits the number of frames in a traceback so that celeryd does not
+- Now limits the number of frames in a traceback so that ``celeryd`` does not
   crash on maximum recursion limit exceeded exceptions (Issue #615).
 
     The limit is set to the current recursion limit divided by 8 (which
@@ -410,19 +410,19 @@ In Other News
 - ``task.retry()`` now re-raises the original exception keeping
   the original stack trace.
 
-    Suggested by ojii.
+    Suggested by ``@ojii``.
 
 - The `--uid` argument to daemons now uses ``initgroups()`` to set
   groups to all the groups the user is a member of.
 
     Contributed by Łukasz Oleś.
 
-- celeryctl: Added ``shell`` command.
+- ``celeryctl``: Added ``shell`` command.
 
     The shell will have the current_app (``celery``) and all tasks
     automatically added to locals.
 
-- celeryctl: Added ``migrate`` command.
+- ``celeryctl``: Added ``migrate`` command.
 
     The migrate command moves all tasks from one broker to another.
     Note that this is experimental and you should have a backup
@@ -444,7 +444,7 @@ In Other News
     to set them.
 
     This is useful when using routing classes which decides a destination
-    at runtime.
+    at run-time.
 
     Contributed by Akira Matsuzaki.
 
@@ -464,7 +464,7 @@ In Other News
 
     Contributed by Steeve Morin.
 
-- MongoDB result backend: Now supports save and restore taskset.
+- MongoDB result backend: Now supports save and restore ``taskset``.
 
     Contributed by Julien Poissonnier.
 
@@ -476,7 +476,8 @@ In Other News
 
 - User (tilde) is now expanded in command-line arguments.
 
-- Can now configure CELERYCTL envvar in :file:`/etc/default/celeryd`.
+- Can now configure :envvar:`CELERYCTL` environment variable
+  in :file:`/etc/default/celeryd`.
 
     While not necessary for operation, :program:`celeryctl` is used for the
     ``celeryd status`` command, and the path to :program:`celeryctl` must be
@@ -509,18 +510,18 @@ Fixes
 
 - Windows: The ``celeryd`` program can now be used.
 
-    Previously Windows users had to launch celeryd using
+    Previously Windows users had to launch ``celeryd`` using
     ``python -m celery.bin.celeryd``.
 
 - Redis result backend: Now uses ``SETEX`` command to set result key,
   and expiry atomically.
 
-    Suggested by yaniv-aknin.
+    Suggested by ``@yaniv-aknin``.
 
-- celeryd: Fixed a problem where shutdown hanged when Ctrl+C was used to
-  terminate.
+- ``celeryd``: Fixed a problem where shutdown hanged when :kbd:`Control-c`
+  was used to terminate.
 
-- celeryd: No longer crashes when channel errors occur.
+- ``celeryd``: No longer crashes when channel errors occur.
 
     Fix contributed by Roger Hu.
 
@@ -550,10 +551,10 @@ Fixes
 - ``apply_async`` now forwards the original keyword arguments to ``apply``
   when :setting:`CELERY_ALWAYS_EAGER` is enabled.
 
-- celeryev now tries to re-establish the connection if the connection
+- ``celeryev`` now tries to re-establish the connection if the connection
   to the broker is lost (Issue #574).
 
-- celeryev: Fixed a crash occurring if a task has no associated worker
+- ``celeryev``: Fixed a crash occurring if a task has no associated worker
   information.
 
     Fix contributed by Matt Williamson.
@@ -561,12 +562,12 @@ Fixes
 - The current date and time is now consistently taken from the current loaders
   ``now`` method.
 
-- Now shows helpful error message when given a config module ending in
+- Now shows helpful error message when given a configuration module ending in
   ``.py`` that can't be imported.
 
-- celeryctl: The :option:`--expires <celery call --expires>` and
+- ``celeryctl``: The :option:`--expires <celery call --expires>` and
   :option:`--eta <celery call --eta>` arguments to the apply command
   can now be an ISO-8601 formatted string.
 
-- celeryctl now exits with exit status ``EX_UNAVAILABLE`` (69) if no replies
+- ``celeryctl`` now exits with exit status ``EX_UNAVAILABLE`` (69) if no replies
   have been received.

+ 37 - 36
docs/whatsnew-3.0.rst → docs/history/whatsnew-3.0.rst

@@ -42,7 +42,7 @@ Highlights
 
     - The worker is now thread-less, giving great performance improvements.
 
-    - The new "Canvas" makes it easy to define complex workflows.
+    - The new "Canvas" makes it easy to define complex work-flows.
 
         Ever wanted to chain tasks together? This is possible, but
         not just that, now you can even chain together groups and chords,
@@ -61,7 +61,7 @@ Highlights
 
         Celery will automatically use the :pypi:`librabbitmq` module
         if installed, which is a very fast and memory-optimized
-        replacement for the py-amqp module.
+        replacement for the :pypi:`amqp` module.
 
     - Redis support is more reliable with improved ack emulation.
 
@@ -90,8 +90,9 @@ Broadcast exchanges renamed
 ---------------------------
 
 The workers remote control command exchanges has been renamed
-(a new pidbox name), this is because the ``auto_delete`` flag on the exchanges
-has been removed, and that makes it incompatible with earlier versions.
+(a new :term:`pidbox` name), this is because the ``auto_delete`` flag on
+the exchanges has been removed, and that makes it incompatible with
+earlier versions.
 
 You can manually delete the old exchanges if you want,
 using the :program:`celery amqp` command (previously called ``camqadm``):
@@ -101,8 +102,8 @@ using the :program:`celery amqp` command (previously called ``camqadm``):
     $ celery amqp exchange.delete celeryd.pidbox
     $ celery amqp exchange.delete reply.celeryd.pidbox
 
-Eventloop
----------
+Event-loop
+----------
 
 The worker is now running *without threads* when used with RabbitMQ (AMQP),
 or Redis as a broker, resulting in:
@@ -118,7 +119,7 @@ Hopefully this can be extended to include additional broker transports
 in the future.
 
 For increased reliability the :setting:`CELERY_FORCE_EXECV` setting is enabled
-by default if the eventloop is not used.
+by default if the event-loop is not used.
 
 New ``celery`` umbrella command
 -------------------------------
@@ -126,7 +127,7 @@ New ``celery`` umbrella command
 All Celery's command-line programs are now available from a single
 :program:`celery` umbrella command.
 
-You can see a list of subcommands and options by running:
+You can see a list of sub-commands and options by running:
 
 .. code-block:: console
 
@@ -147,7 +148,7 @@ Now depends on :pypi:`billiard`.
 --------------------------------
 
 Billiard is a fork of the multiprocessing containing
-the no-execv patch by sbt (http://bugs.python.org/issue8713),
+the no-execv patch by ``sbt`` (http://bugs.python.org/issue8713),
 and also contains the pool improvements previously located in Celery.
 
 This fork was necessary as changes to the C extension code was required
@@ -164,7 +165,7 @@ for the no-execv patch to work.
 
 The :mod:`celery.app.task` module is now a module instead of a package.
 
-The setup.py install script will try to remove the old package,
+The :file:`setup.py` install script will try to remove the old package,
 but if that doesn't work for some reason you have to remove
 it manually.  This command helps:
 
@@ -388,8 +389,8 @@ accidentally changed while switching to using blocking pop.
 `group`/`chord`/`chain` are now subtasks
 ----------------------------------------
 
-- group is no longer an alias to TaskSet, but new alltogether,
-  since it was very difficult to migrate the TaskSet class to become
+- group is no longer an alias to ``TaskSet``, but new all together,
+  since it was very difficult to migrate the ``TaskSet`` class to become
   a subtask.
 
 - A new shortcut has been added to tasks:
@@ -425,10 +426,10 @@ accidentally changed while switching to using blocking pop.
 
         >>> chain(add.s(2, 2), pow.s(2)).apply_async().get()
 
-- A new subtask_type key has been added to the subtask dicts
+- A new subtask_type key has been added to the subtask dictionary.
 
-    This can be the string "chord", "group", "chain", "chunks",
-    "xmap", or "xstarmap".
+    This can be the string ``"chord"``, ``"group"``, ``"chain"``,
+    ``"chunks"``, ``"xmap"``, or ``"xstarmap"``.
 
 - maybe_subtask now uses subtask_type to reconstruct
   the object, to be used when using non-pickle serializers.
@@ -464,7 +465,7 @@ New remote control commands
 ---------------------------
 
 These commands were previously experimental, but they have proven
-stable and is now documented as part of the offical API.
+stable and is now documented as part of the official API.
 
 - :control:`add_consumer`/:control:`cancel_consumer`
 
@@ -497,7 +498,7 @@ stable and is now documented as part of the offical API.
 
 - :control:`autoscale`
 
-    Tells workers with `--autoscale` enabled to change autoscale
+    Tells workers with ``--autoscale`` enabled to change autoscale
     max/min concurrency settings.
 
     This command is available programmatically as :meth:`@control.autoscale`:
@@ -573,15 +574,15 @@ Logging support now conforms better with best practices.
 
 - All loggers inherit from a common logger called "celery".
 
-- Before task.get_logger would setup a new logger for every task,
-  and even set the loglevel.  This is no longer the case.
+- Before ``task.get_logger`` would setup a new logger for every task,
+  and even set the log level.  This is no longer the case.
 
     - Instead all task loggers now inherit from a common "celery.task" logger
       that is set up when programs call `setup_logging_subsystem`.
 
     - Instead of using LoggerAdapter to augment the formatter with
       the task_id and task_name field, the task base logger now use
-      a special formatter adding these values at runtime from the
+      a special formatter adding these values at run-time from the
       currently executing task.
 
 - In fact, ``task.get_logger`` is no longer recommended, it is better
@@ -672,7 +673,7 @@ Lazy task decorators
 
 The ``@task`` decorator is now lazy when used with custom apps.
 
-That is, if ``accept_magic_kwargs`` is enabled (herby called "compat mode"), the task
+That is, if ``accept_magic_kwargs`` is enabled (her by called "compat mode"), the task
 decorator executes inline like before, however for custom apps the @task
 decorator now returns a special PromiseProxy object that is only evaluated
 on access.
@@ -690,11 +691,11 @@ The :option:`--app <celery --app>` option now 'auto-detects'
       attribute named 'celery'.
 
     - If the provided path is a package it tries
-      to import a submodule named 'celery',
+      to import a sub module named celery',
       and get the celery attribute from that module.
 
-E.g. if you have a project named 'proj' where the
-celery app is located in 'from proj.celery import app',
+E.g. if you have a project named ``proj`` where the
+celery app is located in ``from proj.celery import app``,
 then the following will be equivalent:
 
 .. code-block:: console
@@ -752,12 +753,12 @@ In Other News
 
 - Module celery.actors has been removed, and will be part of cl instead.
 
-- Introduces new ``celery`` command, which is an entrypoint for all other
+- Introduces new ``celery`` command, which is an entry-point for all other
   commands.
 
     The main for this command can be run by calling ``celery.start()``.
 
-- Annotations now supports decorators if the key startswith '@'.
+- Annotations now supports decorators if the key starts with '@'.
 
     E.g.:
 
@@ -777,9 +778,9 @@ In Other News
     Also tasks are now always bound by class so that
     annotated methods end up being bound.
 
-- Bugreport now available as a command and broadcast command
+- Bug-report now available as a command and broadcast command
 
-    - Get it from a Python repl:
+    - Get it from a Python REPL:
 
         .. code-block:: pycon
 
@@ -869,7 +870,7 @@ In Other News
 - Deprecated module ``celery.conf`` has been removed.
 
 - The :setting:`CELERY_TIMEZONE` now always require the :pypi:`pytz`
-  library to be installed (exept if the timezone is set to `UTC`).
+  library to be installed (except if the timezone is set to `UTC`).
 
 - The Tokyo Tyrant backend has been removed and is no longer supported.
 
@@ -881,7 +882,7 @@ In Other News
 
 - Worker: now truncates very long message bodies in error reports.
 
-- No longer deepcopies exceptions when trying to serialize errors.
+- No longer deep-copies exceptions when trying to serialize errors.
 
 - :envvar:`CELERY_BENCH` environment variable, will now also list
   memory usage statistics at worker shutdown.
@@ -893,7 +894,7 @@ In Other News
 
     Contributed by Matt Long.
 
-- Worker/Celerybeat no longer logs the startup banner.
+- Worker/Beat no longer logs the start-up banner.
 
     Previously it would be logged with severity warning,
     now it's only written to stdout.
@@ -903,7 +904,7 @@ In Other News
 
 - New signal: :signal:`task_revoked`
 
-- celery.contrib.migrate: Many improvements including
+- :mod:`celery.contrib.migrate`: Many improvements including
   filtering, queue migration, and support for acking messages on the broker
   migrating from.
 
@@ -914,7 +915,7 @@ In Other News
 - Worker: No longer calls ``consume`` on the remote control command queue
   twice.
 
-    Probably didn't cause any problems, but was unecessary.
+    Probably didn't cause any problems, but was unnecessary.
 
 Internals
 ---------
@@ -923,7 +924,7 @@ Internals
 
     Both names still work.
 
-- Compat modules are now generated dynamically upon use.
+- Compatibility modules are now generated dynamically upon use.
 
     These modules are ``celery.messaging``, ``celery.log``,
     ``celery.decorators`` and ``celery.registry``.
@@ -995,8 +996,8 @@ but these removals should have no major effect.
 
 .. _v300-deprecations:
 
-Deprecations
-============
+Deprecation Time-line Changes
+=============================
 
 See the :ref:`deprecation-timeline`.
 

+ 1 - 1
docs/includes/installation.txt

@@ -94,7 +94,7 @@ Transports and Backends
     for using CouchDB as a message transport (*experimental*).
 
 :celery[couchbase]:
-    for using CouchBase as a result backend.
+    for using Couchbase as a result backend.
 
 :celery[elasticsearch]
     for using Elasticsearch as a result backend.

+ 0 - 2
docs/index.rst

@@ -51,8 +51,6 @@ Contents
     changelog
     whatsnew-4.0
     whatsnew-3.1
-    whatsnew-3.0
-    whatsnew-2.5
     reference/index
     internals/index
     history/index

+ 3 - 3
docs/internals/guide.rst

@@ -245,8 +245,8 @@ Module Overview
 
             "single-mode" uses this loader by default.
 
-    Extension loaders also exist, like ``django-celery``, ``celery-pylons``
-    and so on.
+    Extension loaders also exist, like :pypi:`django-celery`,
+    :pypi:`celery-pylons` and so on.
 
 - celery.worker
 
@@ -304,7 +304,7 @@ Module Overview
 
 - celery.contrib
 
-    Additional public code that doesn't fit into any other namespace.
+    Additional public code that doesn't fit into any other name-space.
 
 Worker overview
 ===============

+ 2 - 2
docs/internals/protocol.rst

@@ -104,7 +104,7 @@ Changes from version 1
     Worker may redirect the message to a worker that supports
     the language.
 
-- Metadata moved to headers.
+- Meta-data moved to headers.
 
     This means that workers/intermediates can inspect the message
     and make decisions based on the headers without decoding
@@ -149,7 +149,7 @@ Changes from version 1
 
 - ``correlation_id`` replaces ``task_id`` field.
 
-- ``root_id`` and ``parent_id`` fields helps keep track of workflows.
+- ``root_id`` and ``parent_id`` fields helps keep track of work-flows.
 
 - ``shadow`` lets you specify a different name for logs, monitors
   can be used for e.g. meta tasks that calls any function:

+ 1 - 1
docs/reference/celery.rst

@@ -140,7 +140,7 @@ and creating Celery applications.
 Canvas primitives
 -----------------
 
-See :ref:`guide-canvas` for more about creating task workflows.
+See :ref:`guide-canvas` for more about creating task work-flows.
 
 .. class:: group(task1[, task2[, task3[,… taskN]]])
 

+ 351 - 0
docs/spelling_wordlist.txt

@@ -0,0 +1,351 @@
+bugfix
+serializers
+yaml
+untrusted
+Kombu
+serializer
+conf
+tracebacks
+acks
+ack
+backend
+backends
+RPC
+MongoDB
+Cassandra
+Elasticsearch
+IronCache
+Couchbase
+CouchDB
+dburi
+sqlalchemy
+Memcached
+Redis
+celerymon
+supervisord
+prefork
+Malinovsky
+Dmitry
+Feanil
+Hoeve
+Allard
+deserialize
+deserialized
+AMQP
+Jaillet
+Quarta
+Davide
+Srinivasan
+Raghuram
+Katz
+Omer
+Garnero
+Gorbunov
+Oblovatniy
+Penhard
+prefork
+Mickaël
+Vanderbauwhede
+daemonizing
+Malinovsky
+Dmitry
+Pravec
+iterable
+Permana
+bootstep
+bootsteps
+regexes
+regex
+gevent
+eventlet
+async
+Cristian
+Mărieș
+Ionel
+Koshelev
+Solem
+callbacks
+errbacks
+Sterre
+der
+Môshe
+Demir
+Ahmet
+Gheem
+Alman
+Dartiguelongue
+Gilles
+Riak
+Zhavoronkov
+Yaroslav
+RSS
+Manipon
+Parncutt
+Nextdoor
+subtask
+Mallavarapu
+Aneil
+Mallavarapu
+Deane
+starmap
+refactor
+refactored
+deserialization
+deserializing
+deserializes
+serializes
+serialized
+serializing
+versa
+Django
+Jython
+CPython
+許邱翔
+Pavlovic
+Zoran
+Selivanov
+Yury
+Langford
+Wil
+Gorbunov
+Bolshakov
+Klindukh
+Kumar
+init
+pid
+Valentyn
+Tochev
+Tocho
+Schottdorf
+Tobias
+Grainger
+Sadaoui
+Tewfik
+Janež
+Tadej
+Khera
+Sukrit
+Seungha
+Tikhonov
+Sergey
+Azovskov
+Greinhofer
+Rémy
+Luckie
+Zhu
+Rongze
+Hu
+Carvalho
+Czajka
+Radek
+Maślanka
+Paulo
+Piotr
+Pearce
+Hoch
+Ori
+Korner
+Nyby
+Nik
+Attwood
+Movsisyan
+Mher
+Vdb
+Maxime
+Nikolov
+Atanasov
+Ribeiro
+Marcio
+Pomfrey
+Wiman
+Buckens
+Haskins
+Latitia
+Bujniewicz
+Krzysztof
+Ramaraju
+Kracekumar
+Maeda
+Kouhei
+Koukopoulos
+Konstantinos
+Groner
+Kai
+Patrin
+Rossi
+Whitlock
+Kirkham
+Jevnik
+Delalande
+Veatch
+Pulec
+Georgievsky
+Ilya
+Gylfason
+Hogni
+Ledesma
+Jiangmiao
+Gao
+Holop
+Frantisek
+Grossi
+Flavio
+Schwarz
+Sucu
+Fatih
+Ádám
+Dudás
+Brakhane
+Harrigan
+Baumgold
+Srinivas
+Davanum
+Bargen
+Danilo
+Marlow
+Jellick
+Farwell
+Erway
+Duryee
+Bouterse
+Peksag
+Berker
+Rouberol
+Koval
+Artyom
+Belaid
+Areski
+Yurchuk
+Andriy
+Rodionoff
+Fokau
+Rabbaglietti
+McGregor
+Bevan
+Zoë
+Bozorgkhan
+Kotlyarov
+Alexey
+Lebedev
+Rattray
+Goiri
+Gómez
+Aitor
+Guinet
+Adrien
+Renberg
+daemonization
+stdout
+stderr
+Taub
+prefetch
+prefetched
+args
+kwargs
+Rudakou
+Stas
+Aziz
+Jameel
+preload
+login
+Login
+Kamara
+Idan
+Masiero
+Alain
+Steeve
+failover
+prefetching
+prefetch
+changelog
+Cipater
+Eran
+Markey
+Johansson
+docstrings
+acking
+Brendon
+subtasks
+Crontab
+programmatically
+Germán
+tuple
+tuples
+acked
+execv
+pidbox
+Chiastic
+Seong
+Wun
+Mikalajūnas
+Ignas
+Lavin
+Metzlar
+Nagurney
+Streeter
+Poissonnier
+Julien
+Steeve
+Matsuzaki
+Akira
+Łukasz
+Cron
+Vixie
+crontab
+Clowes
+memcached
+pluggable
+reloader
+autoscale
+autoscaling
+autoscaler
+cryptographic
+subclassing
+resize
+filename
+hostname
+autocommit
+preload
+O'Reilly
+Breshears
+semipredicates
+semipredicate
+unpickled
+pickleable
+backtrace
+logfile
+loglevel
+Petrello
+Rundstein
+compat
+URI
+pre
+instantiation
+exitcode
+nodename
+misconfigure
+misconfiguration
+misconfigured
+Bridgen
+RabbitMQ
+fanout
+webhook
+webhooks
+daemonize
+optimizations
+Munin
+chunking
+subclasses
+prepend
+prepended
+arity
+msgpack
+natively
+rdb
+unmanaged
+wbits
+arg
+bufsize
+exc
+utils
+http

+ 61 - 37
docs/tutorials/daemonizing.rst

@@ -31,7 +31,7 @@ Init script: celeryd
 --------------------
 
 :Usage: `/etc/init.d/celeryd {start|stop|restart|status}`
-:Configuration file: /etc/default/celeryd
+:Configuration file: :file:`/etc/default/celeryd`
 
 To configure this script to run the worker properly you probably need to at least
 tell it where to change
@@ -147,12 +147,14 @@ as shown in the example Django project in :ref:`django-first-steps`.
 Available options
 ~~~~~~~~~~~~~~~~~~
 
-* CELERY_APP
+* ``CELERY_APP``
+
     App instance to use (value for :option:`--app <celery --app>` argument).
-    If you're still using the old API, or django-celery, then you
+    If you're still using the old API, or :pypi:`django-celery`, then you
     can omit this setting.
 
-* CELERY_BIN
+* ``CELERY_BIN``
+
     Absolute or relative path to the :program:`celery` program.
     Examples:
 
@@ -161,45 +163,56 @@ Available options
         * :file:`/virtualenvs/proj/bin/celery`
         * :file:`/virtualenvs/proj/bin/python -m celery`
 
-* CELERYD_NODES
+* ``CELERYD_NODES``
+
     List of node names to start (separated by space).
 
-* CELERYD_OPTS
+* ``CELERYD_OPTS``
+
     Additional command-line arguments for the worker, see
     `celery worker --help` for a list.  This also supports the extended
     syntax used by `multi` to configure settings for individual nodes.
     See `celery multi --help` for some multi-node configuration examples.
 
-* CELERYD_CHDIR
+* ``CELERYD_CHDIR``
+
     Path to change directory to at start. Default is to stay in the current
     directory.
 
-* CELERYD_PID_FILE
+* ``CELERYD_PID_FILE``
+
     Full path to the PID file. Default is /var/run/celery/%n.pid
 
-* CELERYD_LOG_FILE
+* ``CELERYD_LOG_FILE``
+
     Full path to the worker log file. Default is /var/log/celery/%n%I.log
     **Note**: Using `%I` is important when using the prefork pool as having
     multiple processes share the same log file will lead to race conditions.
 
-* CELERYD_LOG_LEVEL
+* ``CELERYD_LOG_LEVEL``
+
     Worker log level. Default is INFO.
 
-* CELERYD_USER
+* ``CELERYD_USER``
+
     User to run the worker as. Default is current user.
 
-* CELERYD_GROUP
+* ``CELERYD_GROUP``
+
     Group to run worker as. Default is current user.
 
-* CELERY_CREATE_DIRS
+* ``CELERY_CREATE_DIRS``
+
     Always create directories (log directory and pid file directory).
     Default is to only create directories when no custom logfile/pidfile set.
 
-* CELERY_CREATE_RUNDIR
+* ``CELERY_CREATE_RUNDIR``
+
     Always create pidfile directory.  By default only enabled when no custom
     pidfile location set.
 
-* CELERY_CREATE_LOGDIR
+* ``CELERY_CREATE_LOGDIR``
+
     Always create logfile directory.  By default only enable when no custom
     logfile location set.
 
@@ -208,7 +221,8 @@ Available options
 Init script: celerybeat
 -----------------------
 :Usage: `/etc/init.d/celerybeat {start|stop|restart}`
-:Configuration file: /etc/default/celerybeat or /etc/default/celeryd
+:Configuration file: :file:`/etc/default/celerybeat` or
+                     :file:``/etc/default/celeryd`.
 
 .. _generic-initd-celerybeat-example:
 
@@ -256,44 +270,54 @@ You should use the same template as above, but make sure the
 Available options
 ~~~~~~~~~~~~~~~~~
 
-* CELERY_APP
+* ``CELERY_APP``
+
     App instance to use (value for :option:`--app <celery --app>` argument).
 
-* CELERYBEAT_OPTS
+* ``CELERYBEAT_OPTS``
+
     Additional arguments to celerybeat, see `celerybeat --help` for a
     list.
 
-* CELERYBEAT_PID_FILE
+* ``CELERYBEAT_PID_FILE``
+
     Full path to the PID file. Default is /var/run/celeryd.pid.
 
-* CELERYBEAT_LOG_FILE
+* ``CELERYBEAT_LOG_FILE``
+
     Full path to the celeryd log file. Default is /var/log/celeryd.log
 
-* CELERYBEAT_LOG_LEVEL
+* ``CELERYBEAT_LOG_LEVEL``
+
     Log level to use for celeryd. Default is INFO.
 
-* CELERYBEAT_USER
+* ``CELERYBEAT_USER``
+
     User to run beat as. Default is current user.
 
-* CELERYBEAT_GROUP
+* ``CELERYBEAT_GROUP``
+
     Group to run beat as. Default is current user.
 
-* CELERY_CREATE_DIRS
+* ``CELERY_CREATE_DIRS``
+
     Always create directories (log directory and pid file directory).
     Default is to only create directories when no custom logfile/pidfile set.
 
-* CELERY_CREATE_RUNDIR
+* ``CELERY_CREATE_RUNDIR``
+
     Always create pidfile directory.  By default only enabled when no custom
     pidfile location set.
 
-* CELERY_CREATE_LOGDIR
+* ``CELERY_CREATE_LOGDIR``
+
     Always create logfile directory.  By default only enable when no custom
     logfile location set.
 
 .. _daemon-systemd-generic:
 
-Usage systemd
-=============
+Usage ``systemd``
+=================
 
 .. _generic-systemd-celery:
 
@@ -304,9 +328,10 @@ Service file: celery.service
 :Configuration file: /etc/conf.d/celery
 
 To create a temporary folders for the log and pid files change user and group in
-/usr/lib/tmpfiles.d/celery.conf.
-To configure user, group, chdir change settings User, Group and WorkingDirectory defines
-in /usr/lib/systemd/system/celery.service.
+:file:`/usr/lib/tmpfiles.d/celery.conf`.
+To configure user, group, ``chdir`` change settings:
+User, Group and WorkingDirectory defines in
+:file:`/usr/lib/systemd/system/celery.service`.
 
 .. _generic-systemd-celery-example:
 
@@ -346,7 +371,7 @@ This is an example configuration for a Python project:
 Example Django configuration
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-This is an example configuration for those using `django-celery`:
+This is an example configuration for those using :pypi:`django-celery`:
 
 .. code-block:: bash
 
@@ -409,19 +434,18 @@ you should :ref:`report it <reporting-bugs>`).
 
 .. _daemon-supervisord:
 
-`supervisord`_
-==============
+:pypi:`supervisor`
+==================
 
 * `extra/supervisord/`_
 
 .. _`extra/supervisord/`:
     https://github.com/celery/celery/tree/3.1/extra/supervisord/
-.. _`supervisord`: http://supervisord.org/
 
 .. _daemon-launchd:
 
-launchd (OS X)
-==============
+``launchd`` (OS X)
+==================
 
 * `extra/osx`_
 

+ 5 - 5
docs/tutorials/debugging.rst

@@ -21,11 +21,11 @@ Example usage:
     @task()
     def add(x, y):
         result = x + y
-        rdb.set_trace()  # <- set breakpoint
+        rdb.set_trace()  # <- set break-point
         return result
 
 
-:func:`~celery.contrib.rdb.set_trace` sets a breakpoint at the current
+:func:`~celery.contrib.rdb.set_trace` sets a break-point at the current
 location and creates a socket you can telnet into to remotely debug
 your task.
 
@@ -39,7 +39,7 @@ By default the debugger will only be available from the local host,
 to enable access from the outside you have to set the environment
 variable :envvar:`CELERY_RDB_HOST`.
 
-When the worker encounters your breakpoint it will log the following
+When the worker encounters your break-point it will log the following
 information:
 
 .. code-block:: text
@@ -94,8 +94,8 @@ Tips
 
 .. _breakpoint_signal:
 
-Enabling the breakpoint signal
-------------------------------
+Enabling the break-point signal
+-------------------------------
 
 If the environment variable :envvar:`CELERY_RDBSIG` is set, the worker
 will open up an rdb instance whenever the `SIGUSR2` signal is sent.

+ 2 - 2
docs/tutorials/task-cookbook.rst

@@ -21,12 +21,12 @@ It's part of an imaginary RSS feed importer called `djangofeeds`.
 The task takes a feed URL as a single argument, and imports that feed into
 a Django model called `Feed`. We ensure that it's not possible for two or
 more workers to import the same feed at the same time by setting a cache key
-consisting of the MD5 checksum of the feed URL.
+consisting of the MD5 check-sum of the feed URL.
 
 The cache key expires after some time in case something unexpected happens,
 and something always will...
 
-For this reason your tasks runtime should not exceeed the timeout.
+For this reason your tasks runtime should not exceed the timeout.
 
 
 .. code-block:: python

+ 3 - 3
docs/userguide/application.rst

@@ -134,7 +134,7 @@ and update several keys at once by using the ``update`` method::
 The configuration object consists of multiple dictionaries
 that are consulted in order:
 
-    #. Changes made at runtime.
+    #. Changes made at run-time.
     #. The configuration module (if any)
     #. The default configuration (:mod:`celery.app.defaults`).
 
@@ -273,7 +273,7 @@ If you add custom settings containing sensitive information you should name
 the keys using a name that Celery identifies as secret.
 
 A configuration setting will be censored if the name contains any of
-these substrings:
+these sub-strings:
 
 ``API``, ``TOKEN``, ``KEY``, ``SECRET``, ``PASS``, ``SIGNATURE``, ``DATABASE``
 
@@ -449,7 +449,7 @@ chain breaks:
 
         >>> Hello.delay('world!')
 
-    Later, it was decided that passing arbitrary call-ables
+    Later, it was decided that passing arbitrary call-able's
     was an anti-pattern, since it makes it very hard to use
     serializers other than pickle, and the feature was removed
     in 2.0, replaced by task decorators:

+ 3 - 3
docs/userguide/calling.rst

@@ -320,7 +320,7 @@ change this using the :setting:`task_serializer` setting,
 or for each individual task, or even per message.
 
 There's built-in support for :mod:`pickle`, `JSON`, `YAML`
-and `msgpack`, and you can also add your own custom serializers by registering
+and ``msgpack``, and you can also add your own custom serializers by registering
 them into the Kombu serializer registry
 
 .. seealso::
@@ -335,7 +335,7 @@ json -- JSON is supported in many programming languages, is now
     using the modern Python libraries such as :pypi:`simplejson`.
 
     The primary disadvantage to JSON is that it limits you to the following
-    data types: strings, Unicode, floats, boolean, dictionaries, and lists.
+    data types: strings, Unicode, floats, Boolean, dictionaries, and lists.
     Decimals and dates are notably missing.
 
     Also, binary data will be transferred using Base64 encoding, which will
@@ -500,4 +500,4 @@ AMQP's full routing capabilities. Interested parties may read the
 
     A number between `0` and `255`, where `255` is the highest priority.
 
-    Supported by: rabbitmq, redis (priority reversed, 0 is highest), beanstalk
+    Supported by: RabbitMQ, Redis (priority reversed, 0 is highest), Beanstalk

+ 4 - 4
docs/userguide/canvas.rst

@@ -1,8 +1,8 @@
 .. _guide-canvas:
 
-=============================
- Canvas: Designing Workflows
-=============================
+==============================
+ Canvas: Designing Work-flows
+==============================
 
 .. contents::
     :local:
@@ -305,7 +305,7 @@ The Primitives
 
 
 The primitives are also signature objects themselves, so that they can be combined
-in any number of ways to compose complex workflows.
+in any number of ways to compose complex work-flows.
 
 Here's some examples:
 

+ 1 - 1
docs/userguide/concurrency/eventlet.rst

@@ -31,7 +31,7 @@ often limited to a few processes per CPU.  With Eventlet you can efficiently
 spawn hundreds, or thousands of green threads.  In an informal test with a
 feed hub system the Eventlet pool could fetch and process hundreds of feeds
 every second, while the prefork pool spent 14 seconds processing 100
-feeds.  Note that is one of the applications evented I/O is especially good
+feeds.  Note that this is one of the applications async I/O is especially good
 at (asynchronous HTTP requests).  You may want a mix of both Eventlet and
 prefork workers, and route tasks according to compatibility or
 what works best.

+ 14 - 14
docs/userguide/extending.rst

@@ -195,7 +195,7 @@ Attributes
 
     This is only defined if the ``statedb`` argument is enabled.
 
-    Your worker bootstep must require the Statedb bootstep to use this:
+    Your worker bootstep must require the ``Statedb`` bootstep to use this:
 
     .. code-block:: python
 
@@ -223,7 +223,7 @@ Attributes
 .. attribute:: autoreloader
 
     :class:`~celery.worker.autoreloder.Autoreloader` used to automatically
-    reload use code when the filesystem changes.
+    reload use code when the file-system changes.
 
     This is only defined if the ``autoreload`` argument is enabled.
     Your worker bootstep must require the `Autoreloader` bootstep to use this;
@@ -516,7 +516,7 @@ Attributes
 
 .. attribute:: task_buckets
 
-    A :class:`~collections.defaultdict` used to lookup the rate limit for
+    A :class:`~collections.defaultdict` used to look-up the rate limit for
     a task by type.
     Entries in this dict may be None (for no limit) or a
     :class:`~kombu.utils.limits.TokenBucket` instance implementing
@@ -648,7 +648,7 @@ logs:
     <Consumer: w@example.com (terminating)> is shutting down
 
 The ``print`` statements will be redirected to the logging subsystem after
-the worker has been initialized, so the "is starting" lines are timestamped.
+the worker has been initialized, so the "is starting" lines are time-stamped.
 You may notice that this does no longer happen at shutdown, this is because
 the ``stop`` and ``shutdown`` methods are called inside a *signal handler*,
 and it's not safe to use logging inside such a handler.
@@ -769,7 +769,7 @@ Preload options
 ~~~~~~~~~~~~~~~
 
 The :program:`celery` umbrella command supports the concept of 'preload
-options', which are special options passed to all subcommands and parsed
+options', which are special options passed to all sub-commands and parsed
 outside of the main parsing step.
 
 The list of default preload options can be found in the API reference:
@@ -805,11 +805,11 @@ New commands can be added to the :program:`celery` umbrella command by using
     http://reinout.vanrees.org/weblog/2010/01/06/zest-releaser-entry-points.html
 
 
-Entry-points is special metadata that can be added to your packages ``setup.py`` program,
+Entry-points is special meta-data that can be added to your packages ``setup.py`` program,
 and then after installation, read from the system using the :mod:`pkg_resources` module.
 
 Celery recognizes ``celery.commands`` entry-points to install additional
-subcommands, where the value of the entry-point must point to a valid subclass
+sub-commands, where the value of the entry-point must point to a valid subclass
 of :class:`celery.bin.base.Command`.  There is limited documentation,
 unfortunately, but you can find inspiration from the various commands in the
 :mod:`celery.bin` package.
@@ -870,7 +870,7 @@ Worker API
 .. versionadded:: 3.0
 
 The worker uses asynchronous I/O when the amqp or redis broker transports are
-used.  The eventual goal is for all transports to use the eventloop, but that
+used.  The eventual goal is for all transports to use the event-loop, but that
 will take some time so other transports still use a threading-based solution.
 
 .. method:: hub.add(fd, callback, flags)
@@ -881,12 +881,12 @@ will take some time so other transports still use a threading-based solution.
     Add callback to be called when ``fd`` is readable.
 
     The callback will stay registered until explicitly removed using
-    :meth:`hub.remove(fd) <hub.remove>`, or the fd is automatically discarded
-    because it's no longer valid.
+    :meth:`hub.remove(fd) <hub.remove>`, or the file descriptor is
+    automatically discarded because it's no longer valid.
 
-    Note that only one callback can be registered for any given fd at a time,
-    so calling ``add`` a second time will remove any callback that
-    was previously registered for that fd.
+    Note that only one callback can be registered for any given
+    file descriptor at a time, so calling ``add`` a second time will remove
+    any callback that was previously registered for that file descriptor.
 
     A file descriptor is any file-like object that supports the ``fileno``
     method, or it can be the file descriptor number (int).
@@ -898,7 +898,7 @@ will take some time so other transports still use a threading-based solution.
 
 .. method:: hub.remove(fd)
 
-    Remove all callbacks for ``fd`` from the loop.
+    Remove all callbacks for file descriptor ``fd`` from the loop.
 
 Timer - Scheduling events
 -------------------------

+ 13 - 14
docs/userguide/monitoring.rst

@@ -50,8 +50,8 @@ Commands
   Also all known tasks will be automatically added to locals (unless the
   :option:`--without-tasks <celery shell --without-tasks>` flag is set).
 
-  Uses Ipython, bpython, or regular python in that order if installed.
-  You can force an implementation using
+  Uses :pypi:`Ipython`, :pypi:`bpython`, or regular python in that order if
+  installed.  You can force an implementation using
   :option:`--ipython <celery shell --ipython>`,
   :option:`--bpython <celery shell --bpython>`, or
   :option:`--python <celery shell --python>`.
@@ -181,7 +181,7 @@ Flower: Real-time Celery web-monitor
 Flower is a real-time web based monitor and administration tool for Celery.
 It is under active development, but is already an essential tool.
 Being the recommended monitor for Celery, it obsoletes the Django-Admin
-monitor, celerymon and the ncurses based monitor.
+monitor, ``celerymon`` and the ``ncurses`` based monitor.
 
 Flower is pronounced like "flow", but you can also use the botanical version
 if you prefer.
@@ -192,7 +192,7 @@ Features
 - Real-time monitoring using Celery Events
 
     - Task progress and history
-    - Ability to show task details (arguments, start time, runtime, and more)
+    - Ability to show task details (arguments, start time, run-time, and more)
     - Graphs and statistics
 
 - Remote Control
@@ -438,21 +438,20 @@ Munin
 This is a list of known Munin plug-ins that can be useful when
 maintaining a Celery cluster.
 
-* rabbitmq-munin: Munin plug-ins for RabbitMQ.
+* ``rabbitmq-munin``: Munin plug-ins for RabbitMQ.
 
     https://github.com/ask/rabbitmq-munin
 
-* celery_tasks: Monitors the number of times each task type has
+* ``celery_tasks``: Monitors the number of times each task type has
   been executed (requires `celerymon`).
 
     http://exchange.munin-monitoring.org/plugins/celery_tasks-2/details
 
-* celery_task_states: Monitors the number of tasks in each state
+* ``celery_task_states``: Monitors the number of tasks in each state
   (requires `celerymon`).
 
     http://exchange.munin-monitoring.org/plugins/celery_tasks/details
 
-
 .. _monitoring-events:
 
 Events
@@ -569,7 +568,7 @@ To process events in real-time you need the following
 
   It encapsulates solutions for many common things, like checking if a
   worker is still alive (by verifying heartbeats), merging event fields
-  together as events come in, making sure timestamps are in sync, and so on.
+  together as events come in, making sure time-stamps are in sync, and so on.
 
 
 Combining these you can easily process events in real-time:
@@ -605,7 +604,7 @@ Combining these you can easily process events in real-time:
 
 .. note::
 
-    The wakeup argument to ``capture`` sends a signal to all workers
+    The ``wakeup`` argument to ``capture`` sends a signal to all workers
     to force them to send a heartbeat.  This way you can immediately see
     workers when the monitor starts.
 
@@ -689,7 +688,7 @@ task-succeeded
 
 Sent if the task executed successfully.
 
-Runtime is the time it took to execute the task using the pool.
+Run-time is the time it took to execute the task using the pool.
 (Starting from the task is sent to the worker pool, and ending when the
 pool result handler callback is called).
 
@@ -709,7 +708,7 @@ task-rejected
 
 :signature: ``task-rejected(uuid, requeued)``
 
-The task was rejected by the worker, possibly to be requeued or moved to a
+The task was rejected by the worker, possibly to be re-queued or moved to a
 dead letter queue.
 
 .. event:: task-revoked
@@ -751,7 +750,7 @@ worker-online
 The worker has connected to the broker and is online.
 
 - `hostname`: Nodename of the worker.
-- `timestamp`: Event timestamp.
+- `timestamp`: Event time-stamp.
 - `freq`: Heartbeat frequency in seconds (float).
 - `sw_ident`: Name of worker software (e.g. ``py-celery``).
 - `sw_ver`: Software version (e.g. 2.2.0).
@@ -769,7 +768,7 @@ Sent every minute, if the worker has not sent a heartbeat in 2 minutes,
 it is considered to be offline.
 
 - `hostname`: Nodename of the worker.
-- `timestamp`: Event timestamp.
+- `timestamp`: Event time-stamp.
 - `freq`: Heartbeat frequency in seconds (float).
 - `sw_ident`: Name of worker software (e.g. ``py-celery``).
 - `sw_ver`: Software version (e.g. 2.2.0).

+ 5 - 5
docs/userguide/optimizing.rst

@@ -73,7 +73,7 @@ The broker connection pool is enabled by default since version 2.5.
 
 You can tweak the :setting:`broker_pool_limit` setting to minimize
 contention, and the value should be based on the number of
-active threads/greenthreads using broker connections.
+active threads/green-threads using broker connections.
 
 .. _optimizing-transient-queues:
 
@@ -142,7 +142,7 @@ or that the messages may not even fit in memory.
 
 The workers' default prefetch count is the
 :setting:`worker_prefetch_multiplier` setting multiplied by the number
-of concurrency slots[*]_ (processes/threads/greenthreads).
+of concurrency slots[*]_ (processes/threads/green-threads).
 
 If you have many tasks with a long duration you want
 the multiplier value to be 1, which means it will only reserve one
@@ -163,7 +163,7 @@ Reserve one task at a time
 --------------------------
 
 The task message is only deleted from the queue after the task is
-:term:`acknowledged`, so if the worker crashes before acknowleding the task,
+:term:`acknowledged`, so if the worker crashes before acknowledging the task,
 it can be redelivered to another worker (or the same after recovery).
 
 When using the default of early acknowledgment, having a prefetch multiplier setting
@@ -180,7 +180,7 @@ there are worker processes (10 unacknowledged tasks for
 
 That is possible, but not without also enabling
 :term:`late acknowledgment`.  Using this option over the
-default beahvior means a task that has already started executing will be
+default behavior means a task that has already started executing will be
 retried in the event of a power failure or the worker instance being killed
 abruptly, so this also means the task must be :term:`idempotent`
 
@@ -221,7 +221,7 @@ waiting for long running tasks to complete::
 
 The worker will send tasks to the process as long as the pipe buffer is
 writable.  The pipe buffer size varies based on the operating system: some may
-have a buffer as small as 64kb but on recent Linux versions the buffer
+have a buffer as small as 64KB but on recent Linux versions the buffer
 size is 1MB (can only be changed system wide).
 
 You can disable this prefetching behavior by enabling the

+ 6 - 5
docs/userguide/periodic-tasks.rst

@@ -182,7 +182,8 @@ Available Fields
 
     By default :class:`~datetime.timedelta` schedules are scheduled
     "by the clock". This means the frequency is rounded to the nearest
-    second, minute, hour or day depending on the period of the timedelta.
+    second, minute, hour or day depending on the period of the
+    :class:`~datetime.timedelta`.
 
     If `relative` is true the frequency is not rounded and will be
     relative to the time when :program:`celery beat` was started.
@@ -260,7 +261,7 @@ The syntax of these crontab expressions are very flexible.  Some examples:
 | ``crontab(0, 0,``                       | Execute on the first and third weeks of    |
 |         ``day_of_month='1-7,15-21')``   | the month.                                 |
 +-----------------------------------------+--------------------------------------------+
-| ``crontab(0, 0, day_of_month='11',``    | Execute on 11th of May every year.         |
+| ``crontab(0, 0, day_of_month='11',``    | Execute on the 11th of May every year.     |
 |          ``month_of_year='5')``         |                                            |
 +-----------------------------------------+--------------------------------------------+
 | ``crontab(0, 0,``                       | Execute on the first month of every        |
@@ -417,12 +418,12 @@ The default scheduler is :class:`celery.beat.PersistentScheduler`,
 which is simply keeping track of the last run times in a local database file
 (a :mod:`shelve`).
 
-`django-celery` also ships with a scheduler that stores the schedule in the
-Django database:
+:pypi:`django-celery` also ships with a scheduler that stores the schedule in
+the Django database:
 
 .. code-block:: console
 
     $ celery -A proj beat -S djcelery.schedulers.DatabaseScheduler
 
-Using `django-celery`'s scheduler you can add, modify and remove periodic
+Using :pypi:`django-celery`'s scheduler you can add, modify and remove periodic
 tasks from the Django Admin.

+ 8 - 6
docs/userguide/routing.rst

@@ -42,9 +42,11 @@ With this route enabled import feed tasks will be routed to the
 (named `"celery"` for historical reasons).
 
 Alternatively, you can use glob pattern matching, or even regular expressions,
-to match all tasks in the ``feed.tasks`` namespace::
+to match all tasks in the ``feed.tasks`` name-space:
 
-    task_routes = {'feed.tasks.*': {'queue': 'feeds'}}
+.. code-block:: python
+
+    app.conf.task_routes = {'feed.tasks.*': {'queue': 'feeds'}}
 
 If the order in which the patterns are matched is important you should should
 specify a tuple as the task router instead::
@@ -222,7 +224,7 @@ Special Routing Options
 
 RabbitMQ Message Priorities
 ---------------------------
-:supported transports: rabbitmq
+:supported transports: RabbitMQ
 
 .. versionadded:: 4.0
 
@@ -258,7 +260,7 @@ the content type of the message and its content encoding.  The
 content type is usually the serialization format used to serialize the
 message. The body contains the name of the task to execute, the
 task id (UUID), the arguments to apply it with and some additional
-metadata -- like the number of retries or an ETA.
+meta-data -- like the number of retries or an ETA.
 
 This is an example task message represented as a Python dictionary:
 
@@ -349,7 +351,7 @@ Topic exchanges
 ~~~~~~~~~~~~~~~
 
 Topic exchanges matches routing keys using dot-separated words, and the
-wildcard characters: ``*`` (matches a single word), and ``#`` (matches
+wild-card characters: ``*`` (matches a single word), and ``#`` (matches
 zero or more words).
 
 With routing keys like ``usa.news``, ``usa.weather``, ``norway.news`` and
@@ -652,7 +654,7 @@ Now the ``tasks.reload_cache`` task will be sent to every
 worker consuming from this queue.
 
 Here is another example of broadcast routing, this time with
-a celerybeat schedule:
+a :program:`celery beat` schedule:
 
 .. code-block:: python
 

+ 2 - 2
docs/userguide/security.rst

@@ -32,7 +32,7 @@ By default, workers trust that the data they get from the broker has not
 been tampered with. See `Message Signing`_ for information on how to make
 the broker connection more trustworthy.
 
-The first line of defence should be to put a firewall in front of the broker,
+The first line of defense should be to put a firewall in front of the broker,
 allowing only white-listed machines to access it.
 
 Keep in mind that both firewall misconfiguration, and temporarily disabling
@@ -222,7 +222,7 @@ open source implementations, used to keep
 cryptographic hashes of files in the file-system, so that administrators
 can be alerted when they change. This way when the damage is done and your
 system has been compromised you can tell exactly what files intruders
-have changed  (password files, logs, backdoors, rootkits and so on).
+have changed  (password files, logs, back-doors, root-kits and so on).
 Often this is the only way you will be able to detect an intrusion.
 
 Some open source implementations include:

+ 170 - 132
docs/userguide/signals.rst

@@ -55,8 +55,9 @@ is published:
             info=info,
         ))
 
-Signals use the same implementation as django.core.dispatch. As a result other
-keyword parameters (e.g. signal) are passed to all signal handlers by default.
+Signals use the same implementation as :mod:`django.core.dispatch`.  As a
+result other keyword parameters (e.g. signal) are passed to all signal
+handlers by default.
 
 The best practice for signal handlers is to accept arbitrary keyword
 arguments (i.e. ``**kwargs``).  That way new celery versions can add additional
@@ -72,8 +73,8 @@ Task Signals
 
 .. signal:: before_task_publish
 
-before_task_publish
-~~~~~~~~~~~~~~~~~~~
+``before_task_publish``
+~~~~~~~~~~~~~~~~~~~~~~~
 .. versionadded:: 3.1
 
 Dispatched before a task is published.
@@ -83,44 +84,44 @@ Sender is the name of the task being sent.
 
 Provides arguments:
 
-* body
+* ``body``
 
     Task message body.
 
     This is a mapping containing the task message fields
     (see :ref:`message-protocol-task-v1`).
 
-* exchange
+* ``exchange``
 
     Name of the exchange to send to or a :class:`~kombu.Exchange` object.
 
-* routing_key
+* ``routing_key``
 
     Routing key to use when sending the message.
 
-* headers
+* ``headers``
 
     Application headers mapping (can be modified).
 
-* properties
+* ``properties``
 
     Message properties (can be modified)
 
-* declare
+* ``declare``
 
     List of entities (:class:`~kombu.Exchange`,
     :class:`~kombu.Queue` or :class:`~kombu.binding` to declare before
     publishing the message.  Can be modified.
 
-* retry_policy
+* ``retry_policy``
 
     Mapping of retry options.  Can be any argument to
     :meth:`kombu.Connection.ensure` and can be modified.
 
 .. signal:: after_task_publish
 
-after_task_publish
-~~~~~~~~~~~~~~~~~~
+``after_task_publish``
+~~~~~~~~~~~~~~~~~~~~~~
 
 Dispatched when a task has been sent to the broker.
 Note that this is executed in the process that sent the task.
@@ -129,30 +130,30 @@ Sender is the name of the task being sent.
 
 Provides arguments:
 
-* headers
+* ``headers``
 
     The task message headers, see :ref:`message-protocol-task-v2`
     and :ref:`message-protocol-task-v1`.
     for a reference of possible fields that can be defined.
 
-* body
+* ``body``
 
     The task message body, see :ref:`message-protocol-task-v2`
     and :ref:`message-protocol-task-v1`.
     for a reference of possible fields that can be defined.
 
-* exchange
+* ``exchange``
 
     Name of the exchange or :class:`~kombu.Exchange` object used.
 
-* routing_key
+* ``routing_key``
 
     Routing key used.
 
 .. signal:: task_prerun
 
-task_prerun
-~~~~~~~~~~~
+``task_prerun``
+~~~~~~~~~~~~~~~
 
 Dispatched before a task is executed.
 
@@ -160,22 +161,26 @@ Sender is the task object being executed.
 
 Provides arguments:
 
-* task_id
+* ``task_id``
+
     Id of the task to be executed.
 
-* task
+* ``task``
+
     The task being executed.
 
-* args
-    the tasks positional arguments.
+* ``args``
+
+    The tasks positional arguments.
+
+* ``kwargs``
 
-* kwargs
     The tasks keyword arguments.
 
 .. signal:: task_postrun
 
-task_postrun
-~~~~~~~~~~~~
+``task_postrun``
+~~~~~~~~~~~~~~~~
 
 Dispatched after a task has been executed.
 
@@ -183,29 +188,34 @@ Sender is the task object executed.
 
 Provides arguments:
 
-* task_id
+* ``task_id``
+
     Id of the task to be executed.
 
-* task
+* ``task``
+
     The task being executed.
 
-* args
+* ``args``
+
     The tasks positional arguments.
 
-* kwargs
+* ``kwargs``
+
     The tasks keyword arguments.
 
-* retval
+* ``retval``
+
     The return value of the task.
 
-* state
+* ``state``
 
     Name of the resulting state.
 
 .. signal:: task_retry
 
-task_retry
-~~~~~~~~~~
+``task_retry``
+~~~~~~~~~~~~~~
 
 Dispatched when a task will be retried.
 
@@ -213,16 +223,16 @@ Sender is the task object.
 
 Provides arguments:
 
-* request
+* ``request``
 
     The current task request.
 
-* reason
+* ``reason``
 
     Reason for retry (usually an exception instance, but can always be
     coerced to :class:`str`).
 
-* einfo
+* ``einfo``
 
     Detailed exception information, including traceback
     (a :class:`billiard.einfo.ExceptionInfo` object).
@@ -230,8 +240,8 @@ Provides arguments:
 
 .. signal:: task_success
 
-task_success
-~~~~~~~~~~~~
+``task_success``
+~~~~~~~~~~~~~~~~
 
 Dispatched when a task succeeds.
 
@@ -239,13 +249,13 @@ Sender is the task object executed.
 
 Provides arguments
 
-* result
+* ``result``
     Return value of the task.
 
 .. signal:: task_failure
 
-task_failure
-~~~~~~~~~~~~
+``task_failure``
+~~~~~~~~~~~~~~~~
 
 Dispatched when a task fails.
 
@@ -253,28 +263,34 @@ Sender is the task object executed.
 
 Provides arguments:
 
-* task_id
+* ``task_id``
+
     Id of the task.
 
-* exception
+* ``exception``
+
     Exception instance raised.
 
-* args
+* ``args``
+
     Positional arguments the task was called with.
 
-* kwargs
+* ``kwargs``
+
     Keyword arguments the task was called with.
 
-* traceback
+* ``traceback``
+
     Stack trace object.
 
-* einfo
+* ``einfo``
+
     The :class:`celery.datastructures.ExceptionInfo` instance.
 
 .. signal:: task_revoked
 
-task_revoked
-~~~~~~~~~~~~
+``task_revoked``
+~~~~~~~~~~~~~~~~
 
 Dispatched when a task is revoked/terminated by the worker.
 
@@ -282,7 +298,7 @@ Sender is the task object revoked/terminated.
 
 Provides arguments:
 
-* request
+* ``request``
 
     This is a :class:`~celery.worker.request.Request` instance, and not
     ``task.request``.   When using the prefork pool this signal
@@ -290,20 +306,23 @@ Provides arguments:
     and should not be used.  Use this object instead, which should have many
     of the same fields.
 
-* terminated
+* ``terminated``
+
     Set to :const:`True` if the task was terminated.
 
-* signum
+* ``signum``
+
     Signal number used to terminate the task. If this is :const:`None` and
     terminated is :const:`True` then :sig:`TERM` should be assumed.
 
-* expired
+* ``expired``
+
   Set to :const:`True` if the task expired.
 
 .. signal:: task_unknown
 
-task_unknown
-~~~~~~~~~~~~
+``task_unknown``
+~~~~~~~~~~~~~~~~
 
 Dispatched when a worker receives a message for a task that is not registered.
 
@@ -311,26 +330,26 @@ Sender is the worker :class:`~celery.worker.consumer.Consumer`.
 
 Provides arguments:
 
-* name
+* ``name``
 
   Name of task not found in registry.
 
-* id
+* ``id``
 
   The task id found in the message.
 
-* message
+* ``message``
 
     Raw message object.
 
-* exc
+* ``exc``
 
     The error that occurred.
 
 .. signal:: task_rejected
 
-task_rejected
-~~~~~~~~~~~~~
+``task_rejected``
+~~~~~~~~~~~~~~~~~
 
 Dispatched when a worker receives an unknown type of message to one of its
 task queues.
@@ -339,11 +358,11 @@ Sender is the worker :class:`~celery.worker.consumer.Consumer`.
 
 Provides arguments:
 
-* message
+* ``message``
 
   Raw message object.
 
-* exc
+* ``exc``
 
     The error that occurred (if any).
 
@@ -352,8 +371,8 @@ App Signals
 
 .. signal:: import_modules
 
-import_modules
-~~~~~~~~~~~~~~
+``import_modules``
+~~~~~~~~~~~~~~~~~~
 
 This signal is sent when a program (worker, beat, shell) etc, asks
 for modules in the :setting:`include` and :setting:`imports`
@@ -366,8 +385,8 @@ Worker Signals
 
 .. signal:: celeryd_after_setup
 
-celeryd_after_setup
-~~~~~~~~~~~~~~~~~~~
+``celeryd_after_setup``
+~~~~~~~~~~~~~~~~~~~~~~~
 
 This signal is sent after the worker instance is set up, but before it
 calls run.  This means that any queues from the :option:`celery worker -Q`
@@ -389,22 +408,24 @@ used to route a task to any specific worker:
 
 Provides arguments:
 
-* sender
-  Nodename of the worker.
+* ``sender``
+
+  Node name of the worker.
+
+* ``instance``
 
-* instance
     This is the :class:`celery.apps.worker.Worker` instance to be initialized.
     Note that only the :attr:`app` and :attr:`hostname` (nodename) attributes have been
     set so far, and the rest of ``__init__`` has not been executed.
 
-* conf
-    The configuration of the current app.
+* ``conf``
 
+    The configuration of the current app.
 
 .. signal:: celeryd_init
 
-celeryd_init
-~~~~~~~~~~~~
+``celeryd_init``
+~~~~~~~~~~~~~~~~
 
 This is the first signal sent when :program:`celery worker` starts up.
 The ``sender`` is the host name of the worker, so this signal can be used
@@ -434,40 +455,43 @@ sender when you connect:
 
 Provides arguments:
 
-* sender
+* ``sender``
+
   Nodename of the worker.
 
-* instance
+* ``instance``
+
     This is the :class:`celery.apps.worker.Worker` instance to be initialized.
     Note that only the :attr:`app` and :attr:`hostname` (nodename) attributes have been
     set so far, and the rest of ``__init__`` has not been executed.
 
-* conf
+* ``conf``
+
     The configuration of the current app.
 
-* options
+* ``options``
 
     Options passed to the worker from command-line arguments (including
     defaults).
 
 .. signal:: worker_init
 
-worker_init
-~~~~~~~~~~~
+``worker_init``
+~~~~~~~~~~~~~~~
 
 Dispatched before the worker is started.
 
 .. signal:: worker_ready
 
-worker_ready
-~~~~~~~~~~~~
+``worker_ready``
+~~~~~~~~~~~~~~~~
 
 Dispatched when the worker is ready to accept work.
 
 .. signal:: worker_process_init
 
-worker_process_init
-~~~~~~~~~~~~~~~~~~~
+``worker_process_init``
+~~~~~~~~~~~~~~~~~~~~~~~
 
 Dispatched in all pool child processes when they start.
 
@@ -477,8 +501,8 @@ it failed to start.
 
 .. signal:: worker_process_shutdown
 
-worker_process_shutdown
-~~~~~~~~~~~~~~~~~~~~~~~
+``worker_process_shutdown``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Dispatched in all pool child processes just before they exit.
 
@@ -489,18 +513,18 @@ interrupted during.
 
 Provides arguments:
 
-* pid
+* ``pid``
 
     The pid of the child process that is about to shutdown.
 
-* exitcode
+* ``exitcode``
 
     The exitcode that will be used when the child process exits.
 
 .. signal:: worker_shutdown
 
-worker_shutdown
-~~~~~~~~~~~~~~~
+``worker_shutdown``
+~~~~~~~~~~~~~~~~~~~
 
 Dispatched when the worker is about to shut down.
 
@@ -509,16 +533,16 @@ Beat Signals
 
 .. signal:: beat_init
 
-beat_init
-~~~~~~~~~
+``beat_init``
+~~~~~~~~~~~~~
 
 Dispatched when :program:`celery beat` starts (either standalone or embedded).
 Sender is the :class:`celery.beat.Service` instance.
 
 .. signal:: beat_embedded_init
 
-beat_embedded_init
-~~~~~~~~~~~~~~~~~~
+``beat_embedded_init``
+~~~~~~~~~~~~~~~~~~~~~~
 
 Dispatched in addition to the :signal:`beat_init` signal when :program:`celery
 beat` is started as an embedded process.  Sender is the
@@ -529,8 +553,8 @@ Eventlet Signals
 
 .. signal:: eventlet_pool_started
 
-eventlet_pool_started
-~~~~~~~~~~~~~~~~~~~~~
+``eventlet_pool_started``
+~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Sent when the eventlet pool has been started.
 
@@ -538,8 +562,8 @@ Sender is the :class:`celery.concurrency.eventlet.TaskPool` instance.
 
 .. signal:: eventlet_pool_preshutdown
 
-eventlet_pool_preshutdown
-~~~~~~~~~~~~~~~~~~~~~~~~~
+``eventlet_pool_preshutdown``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Sent when the worker shutdown, just before the eventlet pool
 is requested to wait for remaining workers.
@@ -548,8 +572,8 @@ Sender is the :class:`celery.concurrency.eventlet.TaskPool` instance.
 
 .. signal:: eventlet_pool_postshutdown
 
-eventlet_pool_postshutdown
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+``eventlet_pool_postshutdown``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Sent when the pool has been joined and the worker is ready to shutdown.
 
@@ -557,8 +581,8 @@ Sender is the :class:`celery.concurrency.eventlet.TaskPool` instance.
 
 .. signal:: eventlet_pool_apply
 
-eventlet_pool_apply
-~~~~~~~~~~~~~~~~~~~
+``eventlet_pool_apply``
+~~~~~~~~~~~~~~~~~~~~~~~
 
 Sent whenever a task is applied to the pool.
 
@@ -566,15 +590,15 @@ Sender is the :class:`celery.concurrency.eventlet.TaskPool` instance.
 
 Provides arguments:
 
-* target
+* ``target``
 
     The target function.
 
-* args
+* ``args``
 
     Positional arguments.
 
-* kwargs
+* ``kwargs``
 
     Keyword arguments.
 
@@ -583,8 +607,8 @@ Logging Signals
 
 .. signal:: setup_logging
 
-setup_logging
-~~~~~~~~~~~~~
+``setup_logging``
+~~~~~~~~~~~~~~~~~
 
 Celery won't configure the loggers if this signal is connected,
 so you can use this to completely override the logging configuration
@@ -596,66 +620,80 @@ Celery then you can use the :signal:`after_setup_logger` and
 
 Provides arguments:
 
-* loglevel
+* ``loglevel``
+
     The level of the logging object.
 
-* logfile
+* ``logfile``
+
     The name of the logfile.
 
-* format
+* ``format``
+
     The log format string.
 
-* colorize
+* ``colorize``
+
     Specify if log messages are colored or not.
 
 .. signal:: after_setup_logger
 
-after_setup_logger
-~~~~~~~~~~~~~~~~~~
+``after_setup_logger``
+~~~~~~~~~~~~~~~~~~~~~~
 
 Sent after the setup of every global logger (not task loggers).
 Used to augment logging configuration.
 
 Provides arguments:
 
-* logger
+* ``logger``
+
     The logger object.
 
-* loglevel
+* ``loglevel``
+
     The level of the logging object.
 
-* logfile
+* ``logfile``
+
     The name of the logfile.
 
-* format
+* ``format``
+
     The log format string.
 
-* colorize
+* ``colorize``
+
     Specify if log messages are colored or not.
 
 .. signal:: after_setup_task_logger
 
-after_setup_task_logger
-~~~~~~~~~~~~~~~~~~~~~~~
+``after_setup_task_logger``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Sent after the setup of every single task logger.
 Used to augment logging configuration.
 
 Provides arguments:
 
-* logger
+* ``logger``
+
     The logger object.
 
-* loglevel
+* ``loglevel``
+
     The level of the logging object.
 
-* logfile
+* ``logfile``
+
     The name of the logfile.
 
-* format
+* ``format``
+
     The log format string.
 
-* colorize
+* ``colorize``
+
     Specify if log messages are colored or not.
 
 Command signals
@@ -663,8 +701,8 @@ Command signals
 
 .. signal:: user_preload_options
 
-user_preload_options
-~~~~~~~~~~~~~~~~~~~~
+``user_preload_options``
+~~~~~~~~~~~~~~~~~~~~~~~~
 
 This signal is sent after any of the Celery command line programs
 are finished parsing the user preload options.
@@ -696,11 +734,11 @@ a :class:`~celery.bin.celery.CeleryCommand`) object).
 
 Provides arguments:
 
-* app
+* ``app``
 
     The app instance.
 
-* options
+* ``options``
 
     Mapping of the parsed user preload options (with default values).
 
@@ -709,7 +747,7 @@ Deprecated Signals
 
 .. signal:: task_sent
 
-task_sent
-~~~~~~~~~
+``task_sent``
+~~~~~~~~~~~~~
 
 This signal is deprecated, please use :signal:`after_task_publish` instead.

+ 32 - 31
docs/userguide/tasks.rst

@@ -152,7 +152,7 @@ For example:
     >>> add.name
     'sum-of-two-numbers'
 
-A best practice is to use the module name as a namespace,
+A best practice is to use the module name as a name-space,
 this way names won't collide if there's already a task with that name
 defined in another module.
 
@@ -170,7 +170,7 @@ You can tell the name of the task by investigating its name attribute:
     'tasks.add'
 
 Which is exactly the name that would have been generated anyway,
-if the module name is "tasks.py":
+if the module name is :file:`tasks.py`:
 
 :file:`tasks.py`:
 
@@ -192,11 +192,12 @@ Automatic naming and relative imports
 Relative imports and automatic name generation do not go well together,
 so if you're using relative imports you should set the name explicitly.
 
-For example if the client imports the module "myapp.tasks" as ".tasks", and
-the worker imports the module as "myapp.tasks", the generated names won't match
-and an :exc:`~@NotRegistered` error will be raised by the worker.
+For example if the client imports the module ``"myapp.tasks"``
+as ``".tasks"``, and the worker imports the module as ``"myapp.tasks"``,
+the generated names won't match and an :exc:`~@NotRegistered` error will
+be raised by the worker.
 
-This is also the case when using Django and using `project.myapp`-style
+This is also the case when using Django and using ``project.myapp``-style
 naming in ``INSTALLED_APPS``:
 
 .. code-block:: python
@@ -358,7 +359,7 @@ The request defines the following attributes:
 
 :errback: A list of signatures to be called if this task fails.
 
-:utc: Set to true the caller has utc enabled (:setting:`enable_utc`).
+:utc: Set to true the caller has UTC enabled (:setting:`enable_utc`).
 
 
 .. versionadded:: 3.1
@@ -768,14 +769,14 @@ General
 
     If :const:`True` the task will report its status as "started"
     when the task is executed by a worker.
-    The default value is :const:`False` as the normal behaviour is to not
+    The default value is :const:`False` as the normal behavior is to not
     report that level of granularity. Tasks are either pending, finished,
     or waiting to be retried.  Having a "started" status can be useful for
     when there are long running tasks and there is a need to report which
     task is currently running.
 
     The host name and process id of the worker executing the task
-    will be available in the state metadata (e.g. `result.info['pid']`)
+    will be available in the state meta-data (e.g. `result.info['pid']`)
 
     The global default can be overridden by the
     :setting:`task_track_started` setting.
@@ -798,7 +799,7 @@ There are several *result backends* to choose from, and they all have
 different strengths and weaknesses (see :ref:`task-result-backends`).
 
 During its lifetime a task will transition through several possible states,
-and each state may have arbitrary metadata attached to it.  When a task
+and each state may have arbitrary meta-data attached to it.  When a task
 moves into a new state the previous state is
 forgotten about, but some transitions can be deducted, (e.g. a task now
 in the :state:`FAILED` state, is implied to have been in the
@@ -821,7 +822,7 @@ Result Backends
 If you want to keep track of tasks or need the return values, then Celery
 must store or send the states somewhere so that they can be retrieved later.
 There are several built-in result backends to choose from: SQLAlchemy/Django ORM,
-Memcached, RabbitMQ/QPid (rpc), MongoDB, and Redis -- or you can define your own.
+Memcached, RabbitMQ/QPid (``rpc``), MongoDB, and Redis -- or you can define your own.
 
 No backend works well for every use case.
 You should read about the strengths and weaknesses of each backend, and choose
@@ -886,8 +887,8 @@ STARTED
 Task has been started.
 Not reported by default, to enable please see :attr:`@Task.track_started`.
 
-:metadata: `pid` and `hostname` of the worker process executing
-           the task.
+:meta-data: `pid` and `hostname` of the worker process executing
+            the task.
 
 .. state:: SUCCESS
 
@@ -896,7 +897,7 @@ SUCCESS
 
 Task has been successfully executed.
 
-:metadata: `result` contains the return value of the task.
+:meta-data: `result` contains the return value of the task.
 :propagates: Yes
 :ready: Yes
 
@@ -907,9 +908,9 @@ FAILURE
 
 Task execution resulted in failure.
 
-:metadata: `result` contains the exception occurred, and `traceback`
-           contains the backtrace of the stack at the point when the
-           exception was raised.
+:meta-data: `result` contains the exception occurred, and `traceback`
+            contains the backtrace of the stack at the point when the
+            exception was raised.
 :propagates: Yes
 
 .. state:: RETRY
@@ -919,9 +920,9 @@ RETRY
 
 Task is being retried.
 
-:metadata: `result` contains the exception that caused the retry,
-           and `traceback` contains the backtrace of the stack at the point
-           when the exceptions was raised.
+:meta-data: `result` contains the exception that caused the retry,
+            and `traceback` contains the backtrace of the stack at the point
+            when the exceptions was raised.
 :propagates: No
 
 .. state:: REVOKED
@@ -958,7 +959,7 @@ Use :meth:`~@Task.update_state` to update a task's state:.
 Here I created the state `"PROGRESS"`, which tells any application
 aware of this state that the task is currently in progress, and also where
 it is in the process by having `current` and `total` counts as part of the
-state metadata.  This can then be used to create e.g. progress bars.
+state meta-data.  This can then be used to create e.g. progress bars.
 
 .. _pickling_exceptions:
 
@@ -1081,7 +1082,7 @@ messages are redelivered to.
 
 .. _`Dead Letter Exchanges`: http://www.rabbitmq.com/dlx.html
 
-Reject can also be used to requeue messages, but please be very careful
+Reject can also be used to re-queue messages, but please be very careful
 when using this as it can easily result in an infinite message loop.
 
 Example using reject when a task causes an out of memory condition:
@@ -1110,7 +1111,7 @@ Example using reject when a task causes an out of memory condition:
         except Exception as exc:
             raise self.retry(exc, countdown=10)
 
-Example requeuing the message:
+Example requeueing the message:
 
 .. code-block:: python
 
@@ -1334,7 +1335,7 @@ The default loader imports any modules listed in the
 :setting:`imports` setting.
 
 The entity responsible for registering your task in the registry is the
-metaclass: :class:`~celery.task.base.TaskType`.
+meta-class: :class:`~celery.task.base.TaskType`.
 
 If you want to register your task manually you can mark the
 task as :attr:`~@Task.abstract`:
@@ -1649,8 +1650,8 @@ I have a Django blog application allowing comments
 on blog posts.  I'll describe parts of the models/views and tasks for this
 application.
 
-blog/models.py
---------------
+``blog/models.py``
+------------------
 
 The comment model looks like this:
 
@@ -1681,8 +1682,8 @@ to the database, then I launch the spam filter task in the background.
 
 .. _task-example-blog-views:
 
-blog/views.py
--------------
+``blog/views.py``
+-----------------
 
 .. code-block:: python
 
@@ -1721,7 +1722,7 @@ blog/views.py
 
 
 To filter spam in comments I use `Akismet`_, the service
-used to filter spam in comments posted to the free weblog platform
+used to filter spam in comments posted to the free blog platform
 `Wordpress`.  `Akismet`_ is free for personal use, but for commercial use you
 need to pay.  You have to sign up to their service to get an API key.
 
@@ -1730,8 +1731,8 @@ To make API calls to `Akismet`_ I use the `akismet.py`_ library written by
 
 .. _task-example-blog-tasks:
 
-blog/tasks.py
--------------
+``blog/tasks.py``
+-----------------
 
 .. code-block:: python
 

+ 22 - 20
docs/userguide/workers.rst

@@ -55,7 +55,7 @@ these will expand to:
     - ``worker1.%n`` -> ``worker1.george``
     - ``worker1.%d`` -> ``worker1.example.com``
 
-.. admonition:: Note for :program:`supervisord` users.
+.. admonition:: Note for :pypi:`supervisor` users.
 
    The ``%`` sign must be escaped by adding a second one: `%%h`.
 
@@ -177,7 +177,7 @@ filename depending on the process that will eventually need to open the file.
 This can be used to specify one log file per child process.
 
 Note that the numbers will stay within the process limit even if processes
-exit or if autoscale/maxtasksperchild/time limits are used.  I.e. the number
+exit or if autoscale/``maxtasksperchild``/time limits are used.  I.e. the number
 is the *process index* not the process count or pid.
 
 * ``%i`` - Pool process index or 0 if MainProcess.
@@ -330,7 +330,7 @@ the `terminate` option is set.
     it's for terminating the process that is executing the task, and that
     process may have already started processing another task at the point
     when the signal is sent, so for this reason you must never call this
-    programatically.
+    programmatically.
 
 If `terminate` is set the worker child process processing the task
 will be terminated.  The default signal sent is `TERM`, but you can
@@ -428,7 +428,7 @@ Time Limits
 
     The time limit is set in two values, `soft` and `hard`.
     The soft time limit allows the task to catch an exception
-    to clean up before it is killed: the hard timeout is not catchable
+    to clean up before it is killed: the hard timeout is not catch-able
     and force terminates the task.
 
 A single task can potentially run forever, if you have lots of tasks
@@ -463,8 +463,8 @@ Time limits can also be set using the :setting:`task_time_limit` /
     platforms that do not support the ``SIGUSR1`` signal.
 
 
-Changing time limits at runtime
--------------------------------
+Changing time limits at run-time
+--------------------------------
 .. versionadded:: 2.3
 
 :broker support: *amqp, redis*
@@ -491,8 +491,8 @@ Rate Limits
 
 .. control:: rate_limit
 
-Changing rate-limits at runtime
--------------------------------
+Changing rate-limits at run-time
+--------------------------------
 
 Example changing the rate limit for the `myapp.mytask` task to execute
 at most 200 tasks of that type every minute:
@@ -594,7 +594,7 @@ By default it will consume from all queues defined in the
 :setting:`task_queues` setting (which if not specified defaults to the
 queue named ``celery``).
 
-You can specify what queues to consume from at startup, by giving a comma
+You can specify what queues to consume from at start-up, by giving a comma
 separated list of queues to the :option:`-Q <celery worker -Q>` option:
 
 .. code-block:: console
@@ -607,7 +607,7 @@ automatically generate a new queue for you (depending on the
 :setting:`task_create_missing_queues` option).
 
 You can also tell the worker to start and stop consuming from a queue at
-runtime using the remote control commands :control:`add_consumer` and
+run-time using the remote control commands :control:`add_consumer` and
 :control:`cancel_consumer`.
 
 .. control:: add_consumer
@@ -732,8 +732,8 @@ This can also be done programmatically by using the
 
 .. _worker-autoreloading:
 
-Autoreloading
-=============
+Auto-reloading
+==============
 
 .. versionadded:: 2.5
 
@@ -761,7 +761,7 @@ effectively reloading the code.
 File system notification backends are pluggable, and it comes with three
 implementations:
 
-* inotify (Linux)
+* ``inotify`` (Linux)
 
     Used if the :pypi:`pyinotify` library is installed.
     If you are running on Linux this is the recommended implementation,
@@ -772,9 +772,9 @@ implementations:
 
         $ pip install pyinotify
 
-* kqueue (OS X/BSD)
+* ``kqueue`` (OS X/BSD)
 
-* stat
+* ``stat``
 
     The fallback implementation simply polls the files using ``stat`` and is very
     expensive.
@@ -1011,9 +1011,11 @@ The output will include the following fields:
         Some transports expects the host name to be an URL, this applies to
         for example SQLAlchemy where the host name part is the connection URI:
 
+        .. code-block:: text
+
             redis+socket:///tmp/redis.sock
 
-        In this example the uri prefix will be ``redis``.
+        In this example the URI-prefix will be ``redis``.
 
     * ``userid``
 
@@ -1046,7 +1048,7 @@ The output will include the following fields:
 
     * ``processes``
 
-        List of pids (or thread-id's).
+        List of PIDs (or thread-id's).
 
     * ``put-guarded-by-semaphore``
 
@@ -1086,12 +1088,12 @@ The output will include the following fields:
 
     * ``idrss``
 
-        Amount of unshared memory used for data (in kilobytes times ticks of
+        Amount of non-shared memory used for data (in kilobytes times ticks of
         execution)
 
     * ``isrss``
 
-        Amount of unshared memory used for stack space (in kilobytes times
+        Amount of non-shared memory used for stack space (in kilobytes times
         ticks of execution)
 
     * ``ixrss``
@@ -1145,7 +1147,7 @@ The output will include the following fields:
 - ``total``
 
     Map of task names and the total number of tasks with that type
-    the worker has accepted since startup.
+    the worker has accepted since start-up.
 
 
 Additional Commands

+ 44 - 41
docs/whatsnew-3.1.rst

@@ -3,7 +3,7 @@
 ===========================================
  What's new in Celery 3.1 (Cipater)
 ===========================================
-:Author: Ask Solem (ask at celeryproject.org)
+:Author: Ask Solem (``ask at celeryproject.org``)
 
 .. sidebar:: Change history
 
@@ -102,7 +102,7 @@ requiring the ``2to3`` porting tool.
 .. note::
 
     This is also the last version to support Python 2.6! From Celery 4.0 and
-    onwards Python 2.7 or later will be required.
+    on-wards Python 2.7 or later will be required.
 
 .. _last-version-to-enable-pickle:
 
@@ -210,9 +210,9 @@ implementation.
     If a child process is killed or exits mysteriously the pool previously
     had to wait for 30 seconds before marking the task with a
     :exc:`~celery.exceptions.WorkerLostError`.  It had to do this because
-    the outqueue was shared between all processes, and the pool could not
+    the out-queue was shared between all processes, and the pool could not
     be certain whether the process completed the task or not.  So an arbitrary
-    timeout of 30 seconds was chosen, as it was believed that the outqueue
+    timeout of 30 seconds was chosen, as it was believed that the out-queue
     would have been drained by this point.
 
     This timeout is no longer necessary, and so the task can be marked as
@@ -229,7 +229,7 @@ Caveats
 .. topic:: Long running tasks
 
     The new pool will send tasks to a child process as long as the process
-    inqueue is writable, and since the socket is buffered this means
+    in-queue is writable, and since the socket is buffered this means
     that the processes are, in effect, prefetching tasks.
 
     This benefits performance but it also means that other tasks may be stuck
@@ -246,7 +246,7 @@ Caveats
         # will not start until T1 returns
 
     The buffer size varies based on the operating system: some may
-    have a buffer as small as 64kb but on recent Linux versions the buffer
+    have a buffer as small as 64KB but on recent Linux versions the buffer
     size is 1MB (can only be changed system wide).
 
     You can disable this prefetching behavior by enabling the
@@ -262,7 +262,7 @@ Caveats
 .. topic:: Max tasks per child
 
     If a process exits and pool prefetch is enabled the worker may have
-    already written many tasks to the process inqueue, and these tasks
+    already written many tasks to the process in-queue, and these tasks
     must then be moved back and rewritten to a new process.
 
     This is very expensive if you have the
@@ -282,7 +282,7 @@ in core and new Django users coming to Celery are now expected
 to use the new API directly.
 
 The Django community has a convention where there's a separate
-django-x package for every library, acting like a bridge between
+``django-x`` package for every library, acting like a bridge between
 Django and the library.
 
 Having a separate project for Django users has been a pain for Celery,
@@ -318,7 +318,8 @@ but if you would like to experiment with it you should know that:
         app.config_from_object('django.conf:settings')
 
     Neither will it automatically traverse your installed apps to find task
-    modules. If you want this behavior, you must explictly pass a list of Django instances to the Celery app:
+    modules. If you want this behavior, you must explicitly pass a list of
+    Django instances to the Celery app:
 
     .. code-block:: python
 
@@ -341,8 +342,8 @@ To get started with the new API you should first read the :ref:`first-steps`
 tutorial, and then you should read the Django-specific instructions in
 :ref:`django-first-steps`.
 
-The fixes and improvements applied by the django-celery library are now
-automatically applied by core Celery when it detects that
+The fixes and improvements applied by the :pypi:`django-celery` library
+are now automatically applied by core Celery when it detects that
 the :envvar:`DJANGO_SETTINGS_MODULE` environment variable is set.
 
 The distribution ships with a new example project using Django
@@ -371,7 +372,7 @@ Events are now ordered using logical time
 -----------------------------------------
 
 Keeping physical clocks in perfect sync is impossible, so using
-timestamps to order events in a distributed system is not reliable.
+time-stamps to order events in a distributed system is not reliable.
 
 Celery event messages have included a logical clock value for some time,
 but starting with this version that field is also used to order them.
@@ -382,7 +383,7 @@ This is a signed integer telling the difference from UTC time in hours,
 so e.g. an event sent from the Europe/London timezone in daylight savings
 time will have an offset of 1.
 
-:class:`@events.Receiver` will automatically convert the timestamps
+:class:`@events.Receiver` will automatically convert the time-stamps
 to the local timezone.
 
 .. note::
@@ -400,12 +401,13 @@ to the local timezone.
     You may notice that the logical clock is an integer value and
     increases very rapidly.  Do not worry about the value overflowing
     though, as even in the most busy clusters it may take several
-    millennia before the clock exceeds a 64 bits value.
+    millennium before the clock exceeds a 64 bits value.
 
 New worker node name format (``name@host``)
 -------------------------------------------
 
-Node names are now constructed by two elements: name and hostname separated by '@'.
+Node names are now constructed by two elements: name and host-name
+separated by '@'.
 
 This change was made to more easily identify multiple instances running
 on the same machine.
@@ -432,7 +434,7 @@ a worker would identify itself as 'worker1.example.com', it will now
 use 'celery@worker1.example.com'.
 
 Remember that the :option:`-n <celery worker -n>` argument also supports
-simple variable substitutions, so if the current hostname
+simple variable substitutions, so if the current host-name
 is *george.example.com* then the ``%h`` macro will expand into that:
 
 .. code-block:: console
@@ -442,17 +444,17 @@ is *george.example.com* then the ``%h`` macro will expand into that:
 
 The available substitutions are as follows:
 
-+---------------+---------------------------------------+
-| Variable      | Substitution                          |
-+===============+=======================================+
-| ``%h``        | Full hostname (including domain name) |
-+---------------+---------------------------------------+
-| ``%d``        | Domain name only                      |
-+---------------+---------------------------------------+
-| ``%n``        | Hostname only (without domain name)   |
-+---------------+---------------------------------------+
-| ``%%``        | The character ``%``                   |
-+---------------+---------------------------------------+
++---------------+----------------------------------------+
+| Variable      | Substitution                           |
++===============+========================================+
+| ``%h``        | Full host-name (including domain name) |
++---------------+----------------------------------------+
+| ``%d``        | Domain name only                       |
++---------------+----------------------------------------+
+| ``%n``        | Host-name only (without domain name)   |
++---------------+----------------------------------------+
+| ``%%``        | The character ``%``                    |
++---------------+----------------------------------------+
 
 Bound tasks
 -----------
@@ -484,7 +486,7 @@ the same cluster.
 
 Synchronized data currently includes revoked tasks and logical clock.
 
-This only happens at startup and causes a one second startup delay
+This only happens at start-up and causes a one second start-up delay
 to collect broadcast responses from other workers.
 
 You can disable this bootstep using the
@@ -602,8 +604,8 @@ This also means that dependencies are the same for both Python 2 and
 Python 3, and that the :file:`requirements/default-py3k.txt` file has
 been removed.
 
-Support for Setuptools extra requirements
------------------------------------------
+Support for :pypi:`setuptools` extra requirements
+-------------------------------------------------
 
 Pip now supports the :pypi:`setuptools` extra requirements format,
 so we have removed the old bundles concept, and instead specify
@@ -724,7 +726,7 @@ In Other News
 
     The :attr:`@user_options` attribute can be used
     to add additional command-line arguments, and expects
-    optparse-style options:
+    :mod:`optparse`-style options:
 
     .. code-block:: python
 
@@ -783,7 +785,7 @@ In Other News
 
 - New :setting:`BROKER_FAILOVER_STRATEGY` setting.
 
-    This setting can be used to change the transport failover strategy,
+    This setting can be used to change the transport fail-over strategy,
     can either be a callable returning an iterable or the name of a
     Kombu built-in failover strategy.  Default is "round-robin".
 
@@ -804,7 +806,7 @@ In Other News
     The `-P` option should always be used to select the eventlet/gevent pool
     to ensure that the patches are applied as early as possible.
 
-    If you start the worker in a wrapper (like Django's manage.py)
+    If you start the worker in a wrapper (like Django's :file:`manage.py`)
     then you must apply the patches manually, e.g. by creating an alternative
     wrapper that monkey patches at the start of the program before importing
     any other modules.
@@ -828,7 +830,7 @@ In Other News
     Configuration values will be converted to values supported by JSON
     where possible.
 
-    Contributed by Mher Movisyan.
+    Contributed by Mher Movsisyan.
 
 - New settings :setting:`CELERY_EVENT_QUEUE_TTL` and
   :setting:`CELERY_EVENT_QUEUE_EXPIRES`.
@@ -933,13 +935,13 @@ In Other News
     .. code-block:: console
 
         # Create graph of currently installed bootsteps in both the worker
-        # and consumer namespaces.
+        # and consumer name-spaces.
         $ celery graph bootsteps | dot -T png -o steps.png
 
-        # Graph of the consumer namespace only.
+        # Graph of the consumer name-space only.
         $ celery graph bootsteps consumer | dot -T png -o consumer_only.png
 
-        # Graph of the worker namespace only.
+        # Graph of the worker name-space only.
         $ celery graph bootsteps worker | dot -T png -o worker_only.png
 
     Or graphs of workers in a cluster:
@@ -1102,7 +1104,8 @@ In Other News
         # Consume from all queues in CELERY_QUEUES, but not the 'foo' queue.
         $ celery worker -A proj -l info -X foo
 
-- Adds :envvar:`C_FAKEFORK` envvar for simple init script/multi debugging.
+- Adds :envvar:`C_FAKEFORK` environment variable for simple
+  init-script/:program:`celery multi` debugging.
 
     This means that you can now do:
 
@@ -1189,8 +1192,8 @@ Scheduled Removals
 
 .. _v310-deprecations:
 
-Deprecations
-============
+Deprecation Time-line Changes
+=============================
 
 See the :ref:`deprecation-timeline`.
 
@@ -1234,7 +1237,7 @@ Fixes
 - Worker: Now makes sure that the shutdown process is not initiated multiple
   times.
 
-- Multi: Now properly handles both ``-f`` and
+- Programs: :program:`celery multi` now properly handles both ``-f`` and
   :option:`--logfile <celery worker --logfile>` options (Issue #1541).
 
 .. _v310-internal:

+ 64 - 67
docs/whatsnew-4.0.rst

@@ -3,7 +3,7 @@
 ===========================================
  What's new in Celery 4.0 (0Today8)
 ===========================================
-:Author: Ask Solem (ask at celeryproject.org)
+:Author: Ask Solem (``ask at celeryproject.org``)
 
 .. sidebar:: Change history
 
@@ -68,7 +68,7 @@ Joshua Harlow, Juan Rossi, Justin Patrin, Kai Groner, Kevin Harvey,
 Konstantinos Koukopoulos, Kouhei Maeda, Kracekumar Ramaraju,
 Krzysztof Bujniewicz, Latitia M. Haskins, Len Buckens, Lorenzo Mancini,
 Lucas Wiman, Luke Pomfrey, Marcio Ribeiro, Marin Atanasov Nikolov,
-Mark Parncutt, Maxime Vdb, Mher Movsisyan, Michael (michael-k),
+Mark Parncutt, Maxime Vdb, Mher Movsisyan, Michael (``@michael-k``),
 Michael Duane Mooring, Michael Permana, Mickaël Penhard, Mike Attwood,
 Morton Fox, Môshe van der Sterre, Nat Williams, Nathan Van Gheem, Nik Nyby,
 Omer Katz, Omer Korner, Ori Hoch, Paul Pearce, Paulo Bu, Philip Garnero,
@@ -79,9 +79,10 @@ Seungha Kim, Steve Peak, Sukrit Khera, Tadej Janež, Tewfik Sadaoui,
 Thomas French, Thomas Grainger, Tobias Schottdorf, Tocho Tochev,
 Valentyn Klindukh, Vic Kumar, Vladimir Bolshakov, Vladimir Gorbunov,
 Wayne Chang, Wil Langford, Will Thompson, William King, Yury Selivanov,
-Zoran Pavlovic, 許邱翔, @allenling, @bee-keeper, @ffeast, @flyingfoxlee,
-@gdw2, @gitaarik, @hankjin, @m-vdb, @mdk, @nokrik, @ocean1, @orlo666,
-@raducc, @wanglei, @worldexception.
+Zoran Pavlovic, 許邱翔, ``@allenling``, ``@bee-keeper``, ``@ffeast``,
+``@flyingfoxlee``, ``@gdw2``, ``@gitaarik``, ``@hankjin``, ``@m-vdb``,
+``@mdk``, ``@nokrik``, ``@ocean1``, ``@orlo666``, ``@raducc``,
+``@wanglei``, ``@worldexception``.
 
 .. _v400-important:
 
@@ -97,8 +98,8 @@ and also drops support for Python 3.3 so supported versions are:
 - CPython 2.7
 - CPython 3.4
 - CPython 3.5
-- PyPy 4.0 (pypy2)
-- PyPy 2.4 (pypy3)
+- PyPy 4.0 (``pypy2``)
+- PyPy 2.4 (``pypy3``)
 - Jython 2.7.0
 
 Lowercase setting names
@@ -163,7 +164,7 @@ names, are the renaming of some prefixes, like ``celerybeat_`` to ``beat_``,
 ``celeryd_`` to ``worker_``.
 
 The ``celery_`` prefix has also been removed, and task related settings
-from this namespace is now prefixed by ``task_``, worker related settings
+from this name-space is now prefixed by ``task_``, worker related settings
 with ``worker_``.
 
 Apart from this most of the settings will be the same in lowercase, apart from
@@ -231,7 +232,7 @@ then you have to configure your app before upgrading to 4.0:
 The Task base class no longer automatically register tasks
 ----------------------------------------------------------
 
-The :class:`~@Task` class is no longer using a special metaclass
+The :class:`~@Task` class is no longer using a special meta-class
 that automatically registers the task in the task registry.
 
 Instead this is now handled by the :class:`@task` decorators.
@@ -303,8 +304,8 @@ to enable these settings, before upgrading to 4.0:
         'fanout_prefix': True,
     }
 
-Django: Autodiscover now supports Django app configs
-----------------------------------------------------
+Django: Auto-discover now supports Django app configurations
+------------------------------------------------------------
 
 The :meth:`@autodiscover` function can now be called without arguments,
 and the Django handler will automatically find your installed apps:
@@ -397,13 +398,13 @@ some long-requested features:
   serialized with the message body.
 
     In version 1 of the protocol the worker always had to deserialize
-    the message to be able to read task metadata like the task id,
+    the message to be able to read task meta-data like the task id,
     name, etc.  This also meant that the worker was forced to double-decode
     the data, first deserializing the message on receipt, serializing
     the message again to send to child process, then finally the child process
     deserializes the message again.
 
-    Keeping the metadata fields in the message headers means the worker
+    Keeping the meta-data fields in the message headers means the worker
     does not actually have to decode the payload before delivering
     the task to the child process, and also that it's now possible
     for the worker to reroute a task written in a language different
@@ -423,7 +424,7 @@ some long-requested features:
   terminates, deserialization errors, unregistered tasks).
 
 - A new ``origin`` header contains information about the process sending
-  the task (worker nodename, or pid and hostname information).
+  the task (worker node-name, or PID and host-name information).
 
 - A new ``shadow`` header allows you to modify the task name used in logs.
 
@@ -457,17 +458,17 @@ some long-requested features:
   a tasks relationship with other tasks.
 
     - ``parent_id`` is the task id of the task that called this task
-    - ``root_id`` is the first task in the workflow.
+    - ``root_id`` is the first task in the work-flow.
 
     These fields can be used to improve monitors like flower to group
     related messages together (like chains, groups, chords, complete
-    workflows, etc).
+    work-flows, etc).
 
 - ``app.TaskProducer`` replaced by :meth:`@amqp.create_task_message`` and
   :meth:`@amqp.send_task_message``.
 
     Dividing the responsibilities into creating and sending means that
-    people who want to send messages using a Python amqp client directly,
+    people who want to send messages using a Python AMQP client directly,
     does not have to implement the protocol.
 
     The :meth:`@amqp.create_task_message` method calls either
@@ -489,10 +490,10 @@ actually executing the task, which means that logging utilities
 like Sentry can get full information about tasks that fail, including
 variables in the traceback.
 
-Prefork: One logfile per child process
-======================================
+Prefork: One log-file per child process
+=======================================
 
-Init scrips and :program:`celery multi` now uses the `%I` log file format
+Init-scrips and :program:`celery multi` now uses the `%I` log file format
 option (e.g. :file:`/var/log/celery/%n%I.log`).
 
 This change was necessary to ensure each child
@@ -500,14 +501,14 @@ process has a separate log file after moving task logging
 to the child process, as multiple processes writing to the same
 log file can cause corruption.
 
-You are encouraged to upgrade your init scripts and multi arguments
-to use this new option.
+You are encouraged to upgrade your init-scripts and
+:program:`celery multi` arguments to use this new option.
 
 Configure broker URL for read/write separately.
 ===============================================
 
 New :setting:`broker_read_url` and :setting:`broker_write_url` settings
-have been added so that separate broker urls can be provided
+have been added so that separate broker URLs can be provided
 for connections used for consuming/publishing.
 
 In addition to the configuration options, two new methods have been
@@ -538,7 +539,7 @@ the intent of the required connection.
 Canvas Refactor
 ===============
 
-The canvas/workflow implementation have been heavily refactored
+The canvas/work-flow implementation have been heavily refactored
 to fix some long outstanding issues.
 
 # BLALBLABLA
@@ -633,8 +634,8 @@ See :ref:`worker-maxmemperchild` for more information.
 
 Contributed by Dave Smith.
 
-Redis: Result backend optimizations
-===============================================
+Redis: Result backend optimization
+==================================
 
 RPC is now using pub/sub for streaming task results.
 ----------------------------------------------------
@@ -696,8 +697,8 @@ See :ref:`conf-elasticsearch-result-backend` for more information.
 
 Contributed by Ahmet Demir.
 
-New Filesystem result backend introduced.
-=========================================
+New File-system result backend introduced.
+==========================================
 
 See :ref:`conf-filesystem-result-backend` for more information.
 
@@ -731,24 +732,23 @@ Task.replace
 
 Task.replace changed, removes Task.replace_in_chord.
 
-The two methods had almost the same functionality, but the old Task.replace
-would force the new task to inherit the callbacks/errbacks of the existing
-task.
+The two methods had almost the same functionality, but the old
+``Task.replace`` would force the new task to inherit the
+callbacks/errbacks of the existing task.
 
 If you replace a node in a tree, then you would not expect the new node to
 inherit the children of the old node, so this seems like unexpected
 behavior.
 
-So self.replace(sig) now works for any task, in addition sig can now
+So ``self.replace(sig)`` now works for any task, in addition ``sig`` can now
 be a group.
 
 Groups are automatically converted to a chord, where the callback
 will "accumulate" the results of the group tasks.
 
-A new builtin task (`celery.accumulate` was added for this purpose)
+A new built-in task (`celery.accumulate` was added for this purpose)
 
 Closes #817
-
 Optimized Beat implementation
 =============================
 
@@ -757,8 +757,8 @@ for millions of periodic tasks by using a heap to schedule entries.
 
 Contributed by Ask Solem and Alexander Koshelev.
 
-Task Autoretry Decorator
-========================
+Task Auto-retry Decorator
+=========================
 
 Writing custom retry handling for exception events is so common
 that we now have built-in support for it.
@@ -799,7 +799,7 @@ In Other News
 
     - Now depends on :pypi:`billiard` version 3.5.
 
-    - No longer depends on :pypi:`anyjson` :sadface:
+    - No longer depends on :pypi:`anyjson` :(
 
 
 - **Tasks**: The "anon-exchange" is now used for simple name-name direct routing.
@@ -825,7 +825,7 @@ In Other News
   tasks.
 
 - **Programs**: New :program:`celery logtool`: Utility for filtering and parsing
-  celery worker logfiles
+  celery worker log-files
 
 - **Redis Transport**: The Redis transport now supports the
   :setting:`broker_use_ssl` option.
@@ -838,7 +838,7 @@ In Other News
 
 - **Worker**: Now preserves exit code (Issue #2024).
 
-- **Worker**: Loglevel for unrecoverable errors changed from ``error`` to
+- **Worker**: Log--level for unrecoverable errors changed from ``error`` to
   ``critical``.
 
 - **Worker**: Improved rate limiting accuracy.
@@ -858,28 +858,25 @@ In Other News
 
 - **Worker**: Improvements and fixes for LimitedSet
 
-    Getting rid of leaking memory + adding minlen size of the set
-    minlen is minimal residual size of set after operating for long.
-    Minlen items are kept, even if they should be expired by time, until
-    we get newer items.
+    Getting rid of leaking memory + adding ``minlen`` size of the set:
+    the minimal residual size of the set after operating for some time.
+    ``minlen`` items are kept, even if they should have been expired.
 
     Problems with older and even more old code:
 
-    1)
-       Heap would tend to grow in some scenarios
+    #. Heap would tend to grow in some scenarios
        (like adding an item multiple times).
 
-    2) Adding many items fast would not clean them soon enough (if ever).
+    #. Adding many items fast would not clean them soon enough (if ever).
 
-    3) When talking to other workers, revoked._data was sent, but
+    #. When talking to other workers, revoked._data was sent, but
        it was processed on the other side as iterable.
        That means giving those keys new (current)
-       timestamp. By doing this workers could recycle
+       time-stamp. By doing this workers could recycle
        items forever. Combined with 1) and 2), this means that in
        large set of workers, you are getting out of memory soon.
 
-    All those problems should be fixed now,
-    also some new unittests are added.
+    All those problems should be fixed now.
 
     This should fix issues #3095, #3086.
 
@@ -958,8 +955,8 @@ In Other News
 - **Programs**: :program:`celery multi` now passes through `%i` and `%I` log
   file formats.
 
-- **Programs**: ``%p`` can now be used to expand to the full worker nodename
-  in logfile/pidfile arguments.
+- **Programs**: ``%p`` can now be used to expand to the full worker node-name
+  in log-file/pid-file arguments.
 
 - **Programs**: A new command line option
    :option:`--executable <celery worker --executable>` is now
@@ -973,7 +970,7 @@ In Other News
 
     Contributed by Mickaël Penhard.
 
-- **Deployment**: Generic init scripts now support
+- **Deployment**: Generic init-scripts now support
   :envvar:`CELERY_SU`` and :envvar:`CELERYD_SU_ARGS` environment variables
   to set the path and arguments for :command:`su` (:manpage:`su(1)`).
 
@@ -986,7 +983,7 @@ In Other News
     Contributed by Alexander Oblovatniy.
 
 - **Tasks**: New :setting:`email_charset` setting allows for changing
-  the charset used for outgoing error emails.
+  the character set used for outgoing error emails.
 
     Contributed by Vladimir Gorbunov.
 
@@ -994,7 +991,7 @@ In Other News
 
     Fix contributed by Nat Williams.
 
-- **Worker**: Autoscale did not always update keepalive when scaling down.
+- **Worker**: Auto-scale did not always update keep-alive when scaling down.
 
     Fix contributed by Philip Garnero.
 
@@ -1028,7 +1025,7 @@ In Other News
 
     Contributed by Samuel Jaillet.
 
-- **Result Backends**: Fix problem with rpc/amqp backends where exception
+- **Result Backends**: Fix problem with RPC/AMQP backends where exception
     was not deserialized properly with the json serializer (Issue #2518).
 
     Fix contributed by Allard Hoeve.
@@ -1044,7 +1041,7 @@ In Other News
 
     Fix contributed by Feanil Patel.
 
-- **Tasks**: Task error email charset now set to ``utf-8`` by default
+- **Tasks**: Task error email character set now set to ``utf-8`` by default
   (Issue #2737).
 
 - Apps can now define how tasks are named (:meth:`@gen_task_name`).
@@ -1082,11 +1079,11 @@ Unscheduled Removals
 - The experimental :mod:`celery.contrib.methods` feature has been removed,
   as there were far many bugs in the implementation to be useful.
 
-- The CentOS init scripts have been removed.
+- The CentOS init-scripts have been removed.
 
-    These did not really add any features over the generic init scripts,
+    These did not really add any features over the generic init-scripts,
     so you are encouraged to use them instead, or something like
-    ``supervisord``.
+    :pypi:`supervisor`.
 
 
 .. _v400-removals:
@@ -1103,8 +1100,8 @@ Modules
     It is now part of the public API so should not change again.
 
 - Module ``celery.task.trace`` has been renamed to ``celery.app.trace``
-  as the ``celery.task`` package is being phased out.  The compat module
-  will be removed in version 4.0 so please change any import from::
+  as the ``celery.task`` package is being phased out.  The module
+  will be removed in version 5.0 so please change any import from::
 
     from celery.task.trace import X
 
@@ -1237,8 +1234,8 @@ Events
 Magic keyword arguments
 -----------------------
 
-Support for the very old magic keyword arguments accepted by tasks has finally
-been in 4.0.
+Support for the very old magic keyword arguments accepted by tasks is
+finally removed in this version.
 
 If you are still using these you have to rewrite any task still
 using the old ``celery.decorators`` module and depending
@@ -1285,13 +1282,13 @@ Task Settings
 =====================================  =====================================
 **Setting name**                       **Replace with**
 =====================================  =====================================
-``CELERY_CHORD_PROPAGATES``            N/a
+``CELERY_CHORD_PROPAGATES``            N/A
 =====================================  =====================================
 
 .. _v400-deprecations:
 
-Deprecations
-============
+Deprecation Time-line Changes
+=============================
 
 See the :ref:`deprecation-timeline`.
 

+ 1 - 1
examples/celery_http_gateway/README.rst

@@ -33,7 +33,7 @@ Then you can use the resulting task-id to get the return value::
 
 If you don't want to expose all tasks there are a few possible
 approaches. For instance you can extend the `apply` view to only
-accept a whitelist. Another possibility is to just make views for every task you want to
+accept a white-list. Another possibility is to just make views for every task you want to
 expose. We made on such view for ping in `views.ping`::
 
     $ curl http://localhost:8000/ping/

+ 2 - 1
examples/django/proj/settings.py

@@ -61,7 +61,8 @@ USE_L10N = True
 # If you set this to False, Django will not use timezone-aware datetimes.
 USE_TZ = True
 
-# Absolute filesystem path to the directory that will hold user-uploaded files.
+# Absolute file-system path to the directory that will hold
+# user-uploaded files.
 # Example: '/home/media/media.lawrence.com/media/'
 MEDIA_ROOT = ''
 

Some files were not shown because too many files changed in this diff