Browse Source

Documentation improvements

Ask Solem 9 years ago
parent
commit
9193b84d98
52 changed files with 1032 additions and 503 deletions
  1. 2 1
      celery/__init__.py
  2. 2 1
      celery/app/amqp.py
  3. 11 5
      celery/app/base.py
  4. 2 1
      celery/app/control.py
  5. 0 69
      celery/bin/base.py
  6. 29 0
      celery/bin/beat.py
  7. 251 0
      celery/bin/celery.py
  8. 33 0
      celery/bin/events.py
  9. 52 16
      celery/bin/worker.py
  10. 14 6
      celery/schedules.py
  11. 2 2
      celery/worker/autoscale.py
  12. 1 1
      celery/worker/consumer/consumer.py
  13. 1 1
      celery/worker/state.py
  14. 3 1
      docs/conf.py
  15. 16 17
      docs/configuration.rst
  16. 11 10
      docs/contributing.rst
  17. 9 7
      docs/faq.rst
  18. 1 1
      docs/getting-started/brokers/rabbitmq.rst
  19. 35 17
      docs/getting-started/brokers/redis.rst
  20. 33 17
      docs/getting-started/first-steps-with-celery.rst
  21. 3 3
      docs/getting-started/introduction.rst
  22. 12 11
      docs/getting-started/next-steps.rst
  23. 21 11
      docs/history/changelog-1.0.rst
  24. 166 92
      docs/history/changelog-2.0.rst
  25. 15 12
      docs/history/changelog-2.1.rst
  26. 16 13
      docs/history/changelog-2.2.rst
  27. 4 3
      docs/history/changelog-2.3.rst
  28. 4 3
      docs/history/changelog-2.4.rst
  29. 4 2
      docs/history/changelog-2.5.rst
  30. 18 12
      docs/history/changelog-3.0.rst
  31. 35 28
      docs/history/changelog-3.1.rst
  32. 14 5
      docs/includes/installation.txt
  33. 24 10
      docs/internals/deprecation.rst
  34. 4 3
      docs/internals/guide.rst
  35. 24 8
      docs/reference/celery.rst
  36. 2 2
      docs/tutorials/daemonizing.rst
  37. 17 7
      docs/tutorials/debugging.rst
  38. 1 1
      docs/userguide/calling.rst
  39. 2 2
      docs/userguide/concurrency/eventlet.rst
  40. 7 3
      docs/userguide/extending.rst
  41. 14 9
      docs/userguide/monitoring.rst
  42. 9 7
      docs/userguide/optimizing.rst
  43. 6 4
      docs/userguide/periodic-tasks.rst
  44. 13 9
      docs/userguide/remote-tasks.rst
  45. 2 2
      docs/userguide/routing.rst
  46. 3 3
      docs/userguide/signals.rst
  47. 5 5
      docs/userguide/tasks.rst
  48. 27 19
      docs/userguide/workers.rst
  49. 7 4
      docs/whatsnew-2.5.rst
  50. 3 4
      docs/whatsnew-3.0.rst
  51. 27 21
      docs/whatsnew-3.1.rst
  52. 15 12
      docs/whatsnew-4.0.rst

+ 2 - 1
celery/__init__.py

@@ -18,7 +18,7 @@ version_info_t = namedtuple(
 )
 
 SERIES = '0today8'
-VERSION = version_info_t(4, 0, 0, 'rc2', '')
+VERSION = version_info = version_info_t(4, 0, 0, 'rc2', '')
 
 __version__ = '{0.major}.{0.minor}.{0.micro}{0.releaselevel}'.format(VERSION)
 __author__ = 'Ask Solem'
@@ -156,6 +156,7 @@ old_module, new_module = five.recreate_module(  # pragma: no cover
     __homepage__=__homepage__, __docformat__=__docformat__, five=five,
     VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER,
     version_info_t=version_info_t,
+    version_info=version_info,
     maybe_patch_concurrency=maybe_patch_concurrency,
     _find_option_with_arg=_find_option_with_arg,
     absolute_import=absolute_import,

+ 2 - 1
celery/app/amqp.py

@@ -170,7 +170,8 @@ class Queues(dict):
 
     def select_add(self, queue, **kwargs):
         """Add new task queue that will be consumed from even when
-        a subset has been selected using the :option:`-Q` option."""
+        a subset has been selected using the
+        :option:`celery worker -Q` option."""
         q = self.add(queue, **kwargs)
         if self._consume_from is not None:
             self._consume_from[q.name] = q

+ 11 - 5
celery/app/base.py

@@ -284,7 +284,9 @@ class Celery(object):
         """Clean up after the application.
 
         Only necessary for dynamically created apps for which you can
-        use the :keyword:`with` statement instead::
+        use the :keyword:`with` statement instead:
+
+        .. code-block:: python
 
             with Celery(set_as_current=False) as app:
                 with app.connection_for_write() as conn:
@@ -322,7 +324,7 @@ class Celery(object):
 
             @app.task
             def refresh_feed(url):
-                return …
+                store_feed(feedparser.parse(url))
 
         with setting extra options:
 
@@ -330,7 +332,7 @@ class Celery(object):
 
             @app.task(exchange='feeds')
             def refresh_feed(url):
-                return 
+                return store_feed(feedparser.parse(url))
 
         .. admonition:: App Binding
 
@@ -450,7 +452,9 @@ class Celery(object):
         as a promise, and it won't be loaded until the configuration is
         actually needed.
 
-        This method can be compared to::
+        This method can be compared to:
+
+        .. code-block:: pycon
 
             >>> celery.conf.update(d)
 
@@ -553,7 +557,9 @@ class Celery(object):
 
         If the name is empty, this will be delegated to fixups (e.g. Django).
 
-        For example if you have an (imagined) directory tree like this::
+        For example if you have an (imagined) directory tree like this:
+
+        .. code-block:: text
 
             foo/__init__.py
                tasks.py

+ 2 - 1
celery/app/control.py

@@ -24,7 +24,8 @@ __all__ = ['Inspect', 'Control', 'flatten_reply']
 
 W_DUPNODE = """\
 Received multiple replies from node {0}: {1}.
-Please make sure you give each node a unique nodename using the `-n` option.\
+Please make sure you give each node a unique nodename using
+the celery worker `-n` option.\
 """
 
 

+ 0 - 69
celery/bin/base.py

@@ -1,73 +1,4 @@
 # -*- coding: utf-8 -*-
-"""
-
-.. _preload-options:
-
-Preload Options
----------------
-
-These options are supported by all commands,
-and usually parsed before command-specific arguments.
-
-.. cmdoption:: -A, --app
-
-    app instance to use (e.g. module.attr_name)
-
-.. cmdoption:: -b, --broker
-
-    url to broker.  default is 'amqp://guest@localhost//'
-
-.. cmdoption:: --loader
-
-    name of custom loader class to use.
-
-.. cmdoption:: --config
-
-    Name of the configuration module
-
-.. _daemon-options:
-
-Daemon Options
---------------
-
-These options are supported by commands that can detach
-into the background (daemon).  They will be present
-in any command that also has a `--detach` option.
-
-.. cmdoption:: -f, --logfile
-
-    Path to log file. If no logfile is specified, `stderr` is used.
-
-.. cmdoption:: --pidfile
-
-    Optional file used to store the process pid.
-
-    The program will not start if this file already exists
-    and the pid is still alive.
-
-.. cmdoption:: --uid
-
-    User id, or user name of the user to run as after detaching.
-
-.. cmdoption:: --gid
-
-    Group id, or group name of the main group to change to after
-    detaching.
-
-.. cmdoption:: --umask
-
-    Effective umask (in octal) of the process after detaching.  Inherits
-    the umask of the parent process by default.
-
-.. cmdoption:: --workdir
-
-    Optional directory to change to after detaching.
-
-.. cmdoption:: --executable
-
-    Executable to use for the detached process.
-
-"""
 from __future__ import absolute_import, print_function, unicode_literals
 
 import os

+ 29 - 0
celery/bin/beat.py

@@ -37,6 +37,35 @@ The :program:`celery beat` command.
     Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
     `ERROR`, `CRITICAL`, or `FATAL`.
 
+.. cmdoption:: --pidfile
+
+    Optional file used to store the process pid.
+
+    The program will not start if this file already exists
+    and the pid is still alive.
+
+.. cmdoption:: --uid
+
+    User id, or user name of the user to run as after detaching.
+
+.. cmdoption:: --gid
+
+    Group id, or group name of the main group to change to after
+    detaching.
+
+.. cmdoption:: --umask
+
+    Effective umask (in octal) of the process after detaching.  Inherits
+    the umask of the parent process by default.
+
+.. cmdoption:: --workdir
+
+    Optional directory to change to after detaching.
+
+.. cmdoption:: --executable
+
+    Executable to use for the detached process.
+
 """
 from __future__ import absolute_import, unicode_literals
 

+ 251 - 0
celery/bin/celery.py

@@ -5,6 +5,257 @@ The :program:`celery` umbrella command.
 
 .. program:: celery
 
+.. _preload-options:
+
+Preload Options
+---------------
+
+These options are supported by all commands,
+and usually parsed before command-specific arguments.
+
+.. cmdoption:: -A, --app
+
+    app instance to use (e.g. module.attr_name)
+
+.. cmdoption:: -b, --broker
+
+    url to broker.  default is 'amqp://guest@localhost//'
+
+.. cmdoption:: --loader
+
+    name of custom loader class to use.
+
+.. cmdoption:: --config
+
+    Name of the configuration module
+
+.. cmdoption:: -C, --no-color
+
+    Disable colors in output.
+
+.. cmdoption:: -q, --quiet
+
+    Give less verbose output (behavior depends on the sub command).
+
+.. cmdoption:: --help
+
+    Show help and exit.
+
+.. _daemon-options:
+
+Daemon Options
+--------------
+
+These options are supported by commands that can detach
+into the background (daemon).  They will be present
+in any command that also has a `--detach` option.
+
+.. cmdoption:: -f, --logfile
+
+    Path to log file. If no logfile is specified, `stderr` is used.
+
+.. cmdoption:: --pidfile
+
+    Optional file used to store the process pid.
+
+    The program will not start if this file already exists
+    and the pid is still alive.
+
+.. cmdoption:: --uid
+
+    User id, or user name of the user to run as after detaching.
+
+.. cmdoption:: --gid
+
+    Group id, or group name of the main group to change to after
+    detaching.
+
+.. cmdoption:: --umask
+
+    Effective umask (in octal) of the process after detaching.  Inherits
+    the umask of the parent process by default.
+
+.. cmdoption:: --workdir
+
+    Optional directory to change to after detaching.
+
+.. cmdoption:: --executable
+
+    Executable to use for the detached process.
+
+``celery inspect``
+------------------
+
+.. program:: celery inspect
+
+.. cmdoption:: -t, --timeout
+
+    Timeout in seconds (float) waiting for reply
+
+.. cmdoption:: -d, --destination
+
+    Comma separated list of destination node names.
+
+.. cmdoption:: -j, --json
+
+    Use json as output format.
+
+``celery control``
+------------------
+
+.. program:: celery control
+
+.. cmdoption:: -t, --timeout
+
+    Timeout in seconds (float) waiting for reply
+
+.. cmdoption:: -d, --destination
+
+    Comma separated list of destination node names.
+
+.. cmdoption:: -j, --json
+
+    Use json as output format.
+
+``celery migrate``
+------------------
+
+.. program:: celery migrate
+
+.. cmdoption:: -n, --limit
+
+    Number of tasks to consume (int).
+
+.. cmdoption:: -t, -timeout
+
+    Timeout in seconds (float) waiting for tasks.
+
+.. cmdoption:: -a, --ack-messages
+
+    Ack messages from source broker.
+
+.. cmdoption:: -T, --tasks
+
+    List of task names to filter on.
+
+.. cmdoption:: -Q, --queues
+
+    List of queues to migrate.
+
+.. cmdoption:: -F, --forever
+
+    Continually migrate tasks until killed.
+
+``celery upgrade``
+------------------
+
+.. program:: celery upgrade
+
+.. cmdoption:: --django
+
+    Upgrade a Django project.
+
+.. cmdoption:: --compat
+
+    Maintain backwards compatibility.
+
+.. cmdoption:: --no-backup
+
+    Don't backup original files.
+
+``celery shell``
+----------------
+
+.. program:: celery shell
+
+.. cmdoption:: -I, --ipython
+
+    Force :pypi:`iPython` implementation.
+
+.. cmdoption:: -B, --bpython
+
+    Force :pypi:`bpython` implementation.
+
+.. cmdoption:: -P, --python
+
+    Force default Python shell.
+
+.. cmdoption:: -T, --without-tasks
+
+    Don't add tasks to locals.
+
+.. cmdoption:: --eventlet
+
+    Use :pypi:`eventlet` monkey patches.
+
+.. cmdoption:: --gevent
+
+    Use :pypi:`gevent` monkey patches.
+
+
+``celery result``
+-----------------
+
+.. program:: celery result
+
+.. cmdoption:: -t, --task
+
+    Name of task (if custom backend).
+
+.. cmdoption:: --traceback
+
+    Show traceback if any.
+
+``celery purge``
+----------------
+
+.. program:: celery purge
+
+.. cmdoption:: -f, --force
+
+    Don't prompt for verification before deleting messages (DANGEROUS)
+
+``celery call``
+---------------
+
+.. program:: celery call
+
+.. cmdoption:: -a, --args
+
+    Positional arguments (json format).
+
+.. cmdoption:: -k, --kwargs
+
+    Keyword arguments (json format).
+
+.. cmdoption:: --eta
+
+    Scheduled time in ISO-8601 format.
+
+.. cmdoption:: --countdown
+
+    ETA in seconds from now (float/int).
+
+.. cmdoption:: --expires
+
+    Expiry time in float/int seconds, or a ISO-8601 date.
+
+.. cmdoption:: --serializer
+
+    Specify serializer to use (default is json).
+
+.. cmdoption:: --queue
+
+    Destination queue.
+
+.. cmdoption:: --exchange
+
+    Destination exchange (defaults to the queue exchange).
+
+.. cmdoption:: --routing-key
+
+    Destination routing key (defaults to the queue routing key).
+
 """
 from __future__ import absolute_import, unicode_literals, print_function
 

+ 33 - 0
celery/bin/events.py

@@ -34,6 +34,39 @@ The :program:`celery events` command.
     Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
     `ERROR`, `CRITICAL`, or `FATAL`.  Default is INFO.
 
+.. cmdoption:: -f, --logfile
+
+    Path to log file. If no logfile is specified, `stderr` is used.
+
+.. cmdoption:: --pidfile
+
+    Optional file used to store the process pid.
+
+    The program will not start if this file already exists
+    and the pid is still alive.
+
+.. cmdoption:: --uid
+
+    User id, or user name of the user to run as after detaching.
+
+.. cmdoption:: --gid
+
+    Group id, or group name of the main group to change to after
+    detaching.
+
+.. cmdoption:: --umask
+
+    Effective umask (in octal) of the process after detaching.  Inherits
+    the umask of the parent process by default.
+
+.. cmdoption:: --workdir
+
+    Optional directory to change to after detaching.
+
+.. cmdoption:: --executable
+
+    Executable to use for the detached process.
+
 """
 from __future__ import absolute_import, unicode_literals
 

+ 52 - 16
celery/bin/worker.py

@@ -20,15 +20,6 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 
     prefork (default), eventlet, gevent, solo or threads.
 
-.. cmdoption:: -f, --logfile
-
-    Path to log file. If no logfile is specified, `stderr` is used.
-
-.. cmdoption:: -l, --loglevel
-
-    Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
-    `ERROR`, `CRITICAL`, or `FATAL`.
-
 .. cmdoption:: -n, --hostname
 
     Set custom hostname, e.g. 'w1.%h'. Expands: %h (hostname),
@@ -45,6 +36,12 @@ The :program:`celery worker` command (previously known as ``celeryd``)
     By default all configured queues are enabled.
     Example: `-Q video,image`
 
+.. cmdoption:: -X, --exclude-queues
+
+    List of queues to disable for this worker, separated by comma.
+    By default all configured queues are enabled.
+    Example: `-X video,image`.
+
 .. cmdoption:: -I, --include
 
     Comma separated list of additional modules to import.
@@ -60,6 +57,10 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 
     Apply optimization profile.  Supported: default, fair
 
+.. cmdoption:: --prefetch-multiplier
+
+    Set custom prefetch multiplier value for this worker instance.
+
 .. cmdoption:: --scheduler
 
     Scheduler class to use. Default is celery.beat.PersistentScheduler
@@ -117,13 +118,6 @@ The :program:`celery worker` command (previously known as ``celeryd``)
     completed and the child process will be replaced afterwards.
     Default: no limit.
 
-.. cmdoption:: --pidfile
-
-    Optional file used to store the workers pid.
-
-    The worker will not start if this file already exists
-    and the pid is still alive.
-
 .. cmdoption:: --autoscale
 
     Enable autoscaling by providing
@@ -141,6 +135,48 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 
     Don't do execv after multiprocessing child fork.
 
+.. cmdoption:: --detach
+
+    Start worker as a background process.
+
+.. cmdoption:: -f, --logfile
+
+    Path to log file. If no logfile is specified, `stderr` is used.
+
+.. cmdoption:: -l, --loglevel
+
+    Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
+    `ERROR`, `CRITICAL`, or `FATAL`.
+
+.. cmdoption:: --pidfile
+
+    Optional file used to store the process pid.
+
+    The program will not start if this file already exists
+    and the pid is still alive.
+
+.. cmdoption:: --uid
+
+    User id, or user name of the user to run as after detaching.
+
+.. cmdoption:: --gid
+
+    Group id, or group name of the main group to change to after
+    detaching.
+
+.. cmdoption:: --umask
+
+    Effective umask (in octal) of the process after detaching.  Inherits
+    the umask of the parent process by default.
+
+.. cmdoption:: --workdir
+
+    Optional directory to change to after detaching.
+
+.. cmdoption:: --executable
+
+    Executable to use for the detached process.
+
 """
 from __future__ import absolute_import, unicode_literals
 

+ 14 - 6
celery/schedules.py

@@ -185,7 +185,9 @@ class crontab_parser(object):
     """Parser for crontab expressions. Any expression of the form 'groups'
     (see BNF grammar below) is accepted and expanded to a set of numbers.
     These numbers represent the units of time that the crontab needs to
-    run on::
+    run on:
+
+    .. code-block:: bnf
 
         digit   :: '0'..'9'
         dow     :: 'a'..'z'
@@ -197,7 +199,9 @@ class crontab_parser(object):
         groups  :: expr ( ',' expr ) *
 
     The parser is a general purpose one, useful for parsing hours, minutes and
-    day_of_week expressions.  Example usage::
+    day_of_week expressions.  Example usage:
+
+    .. code-block:: pycon
 
         >>> minutes = crontab_parser(60).parse('*/15')
         [0, 15, 30, 45]
@@ -207,7 +211,9 @@ class crontab_parser(object):
         [0, 1, 2, 3, 4, 5, 6]
 
     It can also parse day_of_month and month_of_year expressions if initialized
-    with an minimum of 1.  Example usage::
+    with an minimum of 1.  Example usage:
+
+    .. code-block:: pycon
 
         >>> days_of_month = crontab_parser(31, 1).parse('*/3')
         [1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31]
@@ -216,9 +222,9 @@ class crontab_parser(object):
         >>> months_of_year = crontab_parser(12, 1).parse('2-12/2')
         [2, 4, 6, 8, 10, 12]
 
-    The maximum possible expanded value returned is found by the formula::
+    The maximum possible expanded value returned is found by the formula:
 
-        max_ + min_ - 1
+        :math:`max_ + min_ - 1`
 
     """
     ParseException = ParseException
@@ -390,7 +396,9 @@ class crontab(schedule):
 
     @staticmethod
     def _expand_cronspec(cronspec, max_, min_=0):
-        """Takes the given cronspec argument in one of the forms::
+        """Takes the given cronspec argument in one of the forms:
+
+        .. code-block:: text
 
             int         (like 7)
             str         (like '3-5,*/15', '*', or 'monday')

+ 2 - 2
celery/worker/autoscale.py

@@ -7,8 +7,8 @@
     for growing and shrinking the pool according to the
     current autoscale settings.
 
-    The autoscale thread is only enabled if :option:`--autoscale`
-    has been enabled on the command-line.
+    The autoscale thread is only enabled if
+    the :option:`celery worker --autoscale` option is used.
 
 """
 from __future__ import absolute_import, unicode_literals

+ 1 - 1
celery/worker/consumer/consumer.py

@@ -249,7 +249,7 @@ class Consumer(object):
 
             Currently pool grow operations will end up with an offset
             of +1 if the initial size of the pool was 0 (e.g.
-            ``--autoscale=1,0``).
+            :option:`--autoscale=1,0 <celery worker --autoscale>`).
 
         """
         num_processes = self.pool.num_processes

+ 1 - 1
celery/worker/state.py

@@ -150,7 +150,7 @@ if C_BENCH:  # pragma: no cover
 
 class Persistent(object):
     """This is the persistent data stored by the worker when
-    :option:`--statedb` is enabled.
+    :option:`celery worker --statedb` is enabled.
 
     It currently only stores revoked task id's.
 

+ 3 - 1
docs/conf.py

@@ -19,7 +19,7 @@ import celery  # noqa
 extensions = [
     'sphinx.ext.autodoc',
     'sphinx.ext.coverage',
-    'sphinx.ext.pngmath',
+    'sphinx.ext.imgmath',
     'sphinx.ext.viewcode',
     'sphinx.ext.intersphinx',
     'sphinxcontrib.cheeseshop',
@@ -93,6 +93,8 @@ intersphinx_mapping = {
     'eventlet': ('http://eventlet.net/doc/', None),
     'gevent': ('http://gevent.org/', None),
     'pyOpenSSL': ('http://pyopenssl.readthedocs.org/en/stable/', None),
+    'nose': ('http://nose.readthedocs.org/en/latest', None),
+    'tox': ('http://tox.readthedocs.org/en/latest', None),
 }
 
 # The name of the Pygments (syntax highlighting) style to use.

+ 16 - 17
docs/configuration.rst

@@ -279,7 +279,7 @@ instead of a dict to choose which tasks to annotate:
             if task.name.startswith('tasks.'):
                 return {'rate_limit': '10/s'}
 
-    task_annotations = (MyAnnotate(), {})
+    task_annotations = (MyAnnotate(), {other,})
 
 .. setting:: task_compression
 
@@ -1285,9 +1285,10 @@ the :ref:`automatic routing facilities <routing-automatic>`.
 If you really want to configure advanced routing, this setting should
 be a list of :class:`kombu.Queue` objects the worker will consume from.
 
-Note that workers can be overriden this setting via the `-Q` option,
-or individual queues from this list (by name) can be excluded using
-the `-X` option.
+Note that workers can be overriden this setting via the
+:option:`-Q <celery worker -Q>` option, or individual queues from this
+list (by name) can be excluded using the :option:`-X <celery worker -X>`
+option.
 
 Also see :ref:`routing-basics` for more information.
 
@@ -1860,8 +1861,7 @@ Name of the file used to stores persistent worker state (like revoked tasks).
 Can be a relative or absolute path, but be aware that the suffix `.db`
 may be appended to the file name (depending on Python version).
 
-Can also be set via the :option:`--statedb` argument to
-:mod:`~celery.bin.worker`.
+Can also be set via the :option:`celery worker --statedb` argument.
 
 Not enabled by default.
 
@@ -2016,7 +2016,8 @@ worker_send_task_events
 ~~~~~~~~~~~~~~~~~~~~~~~
 
 Send task-related events so that tasks can be monitored using tools like
-`flower`.  Sets the default value for the workers :option:`-E` argument.
+`flower`.  Sets the default value for the workers
+:option:`-E <celery worker -E>` argument.
 
 .. setting:: task_send_sent_event
 
@@ -2199,9 +2200,9 @@ Name of the pool class used by the worker.
 .. admonition:: Eventlet/Gevent
 
     Never use this option to select the eventlet or gevent pool.
-    You must use the `-P` option to :program:`celery worker` instead, to
-    ensure the monkey patches are not applied too late, causing things
-    to break in strange ways.
+    You must use the :option:`-P <celery worker -P>` option to
+    :program:`celery worker` instead, to ensure the monkey patches
+    are not applied too late, causing things to break in strange ways.
 
 Default is ``celery.concurrency.prefork:TaskPool``.
 
@@ -2273,8 +2274,7 @@ beat_scheduler
 
 The default scheduler class.  Default is ``celery.beat:PersistentScheduler``.
 
-Can also be set via the :option:`-S` argument to
-:mod:`~celery.bin.beat`.
+Can also be set via the :option:`celery beat -S` argument.
 
 .. setting:: beat_schedule_filename
 
@@ -2285,8 +2285,7 @@ Name of the file used by `PersistentScheduler` to store the last run times
 of periodic tasks.  Can be a relative or absolute path, but be aware that the
 suffix `.db` may be appended to the file name (depending on Python version).
 
-Can also be set via the :option:`--schedule` argument to
-:mod:`~celery.bin.beat`.
+Can also be set via the :option:`celery beat --schedule` argument.
 
 .. setting:: beat_sync_every
 
@@ -2313,6 +2312,6 @@ but for e.g. the django-celery database scheduler it is 5 seconds
 because the schedule may be changed externally, and so it must take
 changes to the schedule into account.
 
-Also when running celery beat embedded (:option:`-B`) on Jython as a thread
-the max interval is overridden and set to 1 so that it's possible
-to shut down in a timely manner.
+Also when running celery beat embedded (:option:`-B <celery worker -B>`)
+on Jython as a thread the max interval is overridden and set to 1 so
+that it's possible to shut down in a timely manner.

+ 11 - 10
docs/contributing.rst

@@ -417,7 +417,7 @@ to upstream changes:
     $ git fetch upstream
 
 If you need to pull in new changes from upstream you should
-always use the :option:`--rebase` option to ``git pull``:
+always use the ``--rebase`` option to ``git pull``:
 
 .. code-block:: console
 
@@ -463,7 +463,7 @@ dependencies, so install these next:
     $ pip install -U -r requirements/default.txt
 
 After installing the dependencies required, you can now execute
-the test suite by calling ``nosetests``:
+the test suite by calling :pypi:`nosetests <nose>`:
 
 .. code-block:: console
 
@@ -471,19 +471,19 @@ the test suite by calling ``nosetests``:
 
 Some useful options to :command:`nosetests` are:
 
-* :option:`-x`
+* ``-x``
 
     Stop running the tests at the first test that fails.
 
-* :option:`-s`
+* ``-s``
 
     Don't capture output
 
-* :option:`--nologcapture`
+* ``-nologcapture``
 
     Don't capture log output.
 
-* :option:`-v`
+* ``-v``
 
     Run with verbose output.
 
@@ -546,7 +546,7 @@ The coverage XML output will then be located at :file:`coverage.xml`
 Running the tests on all supported Python versions
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-There is a ``tox`` configuration file in the top directory of the
+There is a :pypi:`tox` configuration file in the top directory of the
 distribution.
 
 To run the tests for all supported Python versions simply execute:
@@ -555,8 +555,7 @@ To run the tests for all supported Python versions simply execute:
 
     $ tox
 
-If you only want to test specific Python versions use the :option:`-e`
-option:
+Use the ``tox -e`` option if you only want to test specific Python versions:
 
 .. code-block:: console
 
@@ -1091,7 +1090,9 @@ and make a new version tag:
 Releasing
 ---------
 
-Commands to make a new public stable release::
+Commands to make a new public stable release:
+
+.. code-block:: console
 
     $ make distcheck  # checks pep8, autodoc index, runs tests and more
     $ make dist  # NOTE: Runs git clean -xdf and removes files not in the repo.

+ 9 - 7
docs/faq.rst

@@ -387,8 +387,9 @@ you have to use the AMQP API or the :program:`celery amqp` utility:
 
 The number 1753 is the number of messages deleted.
 
-You can also start :mod:`~celery.bin.worker` with the
-:option:`--purge` argument, to purge messages when the worker starts.
+You can also start the worker with the
+:option:`--purge <celery worker --purge>` option enabled to purge messages
+when the worker starts.
 
 .. _faq-messages-left-after-purge:
 
@@ -504,7 +505,7 @@ important that you are aware of the common pitfalls.
 
 * Events.
 
-Running :mod:`~celery.bin.worker` with the :option:`-E`/:option:`--events`
+Running :mod:`~celery.bin.worker` with the :option:`-E <celery worker -E>`
 option will send messages for events happening inside of the worker.
 
 Events should only be enabled if you have an active monitor consuming them,
@@ -527,7 +528,7 @@ If you don't use the results for a task, make sure you set the
 
     @app.task(ignore_result=True)
     def mytask():
-        
+        pass
 
     class MyTask(Task):
         ignore_result = True
@@ -703,7 +704,8 @@ so if you have more than one worker with the same host name, the
 control commands will be received in round-robin between them.
 
 To work around this you can explicitly set the nodename for every worker
-using the :option:`-n` argument to :mod:`~celery.bin.worker`:
+using the :option:`-n <celery worker -n>` argument to
+:mod:`~celery.bin.worker`:
 
 .. code-block:: console
 
@@ -754,7 +756,7 @@ create a new schedule subclass and override
     class my_schedule(schedule):
 
         def is_due(self, last_run_at):
-            return 
+            return run_now, next_time_to_check
 
 .. _faq-task-priorities:
 
@@ -837,7 +839,7 @@ How can I safely shut down the worker?
 executing jobs and shut down as soon as possible. No tasks should be lost.
 
 You should never stop :mod:`~celery.bin.worker` with the :sig:`KILL` signal
-(:option:`-9`), unless you've tried :sig:`TERM` a few times and waited a few
+(``kill -9``), unless you've tried :sig:`TERM` a few times and waited a few
 minutes to let it get a chance to shut down.
 
 Also make sure you kill the main worker process, not its child processes.

+ 1 - 1
docs/getting-started/brokers/rabbitmq.rst

@@ -152,7 +152,7 @@ To start the server:
 
     $ sudo rabbitmq-server
 
-you can also run it in the background by adding the :option:`-detached` option
+you can also run it in the background by adding the ``-detached`` option
 (note: only one dash):
 
 .. code-block:: console

+ 35 - 17
docs/getting-started/brokers/redis.rst

@@ -23,23 +23,31 @@ Configuration
 =============
 
 Configuration is easy, just configure the location of
-your Redis database::
+your Redis database:
 
-    broker_url = 'redis://localhost:6379/0'
+.. code-block:: python
 
-Where the URL is in the format of::
+    app.conf.broker_url = 'redis://localhost:6379/0'
+
+Where the URL is in the format of:
+
+.. code-block:: text
 
     redis://:password@hostname:port/db_number
 
 all fields after the scheme are optional, and will default to localhost on port 6379,
 using database 0.
 
-If a unix socket connection should be used, the URL needs to be in the format::
+If a unix socket connection should be used, the URL needs to be in the format:
+
+.. code-block:: text
 
     redis+socket:///path/to/redis.sock
 
 Specifying a different database number when using a unix socket is possible
-by adding the ``virtual_host`` parameter to the URL::
+by adding the ``virtual_host`` parameter to the URL:
+
+.. code-block:: text
 
     redis+socket:///path/to/redis.sock?virtual_host=db_number
 
@@ -52,9 +60,11 @@ The visibility timeout defines the number of seconds to wait
 for the worker to acknowledge the task before the message is redelivered
 to another worker.  Be sure to see :ref:`redis-caveats` below.
 
-This option is set via the :setting:`broker_transport_options` setting::
+This option is set via the :setting:`broker_transport_options` setting:
+
+.. code-block:: python
 
-    broker_transport_options = {'visibility_timeout': 3600}  # 1 hour.
+    app.conf.broker_transport_options = {'visibility_timeout': 3600}  # 1 hour.
 
 The default visibility timeout for Redis is 1 hour.
 
@@ -66,7 +76,7 @@ Results
 If you also want to store the state and return values of tasks in Redis,
 you should configure these settings::
 
-    result_backend = 'redis://localhost:6379/0'
+    app.conf.result_backend = 'redis://localhost:6379/0'
 
 For a complete list of options supported by the Redis result backend, see
 :ref:`conf-redis-result-backend`
@@ -84,9 +94,11 @@ Fanout prefix
 Broadcast messages will be seen by all virtual hosts by default.
 
 You have to set a transport option to prefix the messages so that
-they will only be received by the active virtual host::
+they will only be received by the active virtual host:
 
-    broker_transport_options = {'fanout_prefix': True}
+.. code-block:: python
+
+    app.conf.broker_transport_options = {'fanout_prefix': True}
 
 Note that you will not be able to communicate with workers running older
 versions or workers that does not have this setting enabled.
@@ -102,9 +114,11 @@ Fanout patterns
 Workers will receive all task related events by default.
 
 To avoid this you must set the ``fanout_patterns`` fanout option so that
-the workers may only subscribe to worker related events::
+the workers may only subscribe to worker related events:
+
+.. code-block:: python
 
-    broker_transport_options = {'fanout_patterns': True}
+    app.conf.broker_transport_options = {'fanout_patterns': True}
 
 Note that this change is backward incompatible so all workers in the
 cluster must have this option enabled, or else they will not be able to
@@ -134,9 +148,11 @@ Periodic tasks will not be affected by the visibility timeout,
 as this is a concept separate from ETA/countdown.
 
 You can increase this timeout by configuring a transport option
-with the same name::
+with the same name:
 
-    broker_transport_options = {'visibility_timeout': 43200}
+.. code-block:: python
+
+    app.conf.broker_transport_options = {'visibility_timeout': 43200}
 
 The value must be an int describing the number of seconds.
 
@@ -145,10 +161,12 @@ Key eviction
 
 Redis may evict keys from the database in some situations
 
-If you experience an error like::
+If you experience an error like:
+
+.. code-block:: text
 
-    InconsistencyError, Probably the key ('_kombu.binding.celery') has been
+    InconsistencyError: Probably the key ('_kombu.binding.celery') has been
     removed from the Redis database.
 
-you may want to configure the redis-server to not evict keys by setting
+then you may want to configure the redis-server to not evict keys by setting
 the ``timeout`` parameter to 0 in the redis configuration file.

+ 33 - 17
docs/getting-started/first-steps-with-celery.rst

@@ -226,12 +226,16 @@ built-in result backends to choose from: `SQLAlchemy`_/`Django`_ ORM,
 For this example you will use the `rpc` result backend, which sends states
 back as transient messages.  The backend is specified via the ``backend`` argument to
 :class:`@Celery`, (or via the :setting:`task_result_backend` setting if
-you choose to use a configuration module)::
+you choose to use a configuration module):
+
+.. code-block:: python
 
     app = Celery('tasks', backend='rpc://', broker='amqp://')
 
 Or if you want to use Redis as the result backend, but still use RabbitMQ as
-the message broker (a popular combination)::
+the message broker (a popular combination):
+
+.. code-block:: python
 
     app = Celery('tasks', backend='redis://localhost', broker='amqp://')
 
@@ -239,31 +243,41 @@ To read more about result backends please see :ref:`task-result-backends`.
 
 Now with the result backend configured, let's call the task again.
 This time you'll hold on to the :class:`~@AsyncResult` instance returned
-when you call a task::
+when you call a task:
+
+.. code-block:: pycon
 
     >>> result = add.delay(4, 4)
 
 The :meth:`~@AsyncResult.ready` method returns whether the task
-has finished processing or not::
+has finished processing or not:
+
+.. code-block:: pycon
 
     >>> result.ready()
     False
 
 You can wait for the result to complete, but this is rarely used
-since it turns the asynchronous call into a synchronous one::
+since it turns the asynchronous call into a synchronous one:
+
+.. code-block:: pycon
 
     >>> result.get(timeout=1)
     8
 
 In case the task raised an exception, :meth:`~@AsyncResult.get` will
 re-raise the exception, but you can override this by specifying
-the ``propagate`` argument::
+the ``propagate`` argument:
+
+.. code-block:: pycon
 
     >>> result.get(propagate=False)
 
 
 If the task raised an exception you can also gain access to the
-original traceback::
+original traceback:
+
+.. code-block:: pycon
 
     >>> result.traceback
@@ -407,7 +421,8 @@ Worker does not start: Permission Error
 
 - If you're using Debian, Ubuntu or other Debian-based distributions:
 
-    Debian recently renamed the ``/dev/shm`` special file to ``/run/shm``.
+    Debian recently renamed the :file:`/dev/shm` special file
+    to :file:`/run/shm`.
 
     A simple workaround is to create a symbolic link:
 
@@ -417,15 +432,16 @@ Worker does not start: Permission Error
 
 - Others:
 
-    If you provide any of the :option:`--pidfile`, :option:`--logfile` or
-    ``--statedb`` arguments, then you must make sure that they
-    point to a file/directory that is writable and readable by the
-    user starting the worker.
+    If you provide any of the :option:`--pidfile <celery worker --pidfile>`,
+    :option:`--logfile <celery worker --logfile>` or
+    :option:`--statedb <celery worker --statedb>` arguments, then you must
+    make sure that they point to a file/directory that is writable and
+    readable by the user starting the worker.
 
 Result backend does not work or tasks are always in ``PENDING`` state.
 ----------------------------------------------------------------------
 
-All tasks are ``PENDING`` by default, so the state would have been
+All tasks are :state:`PENDING` by default, so the state would have been
 better named "unknown".  Celery does not update any state when a task
 is sent, and any task with no history is assumed to be pending (you know
 the task id after all).
@@ -445,8 +461,8 @@ the task id after all).
     An old worker that is not configured with the expected result backend
     may be running and is hijacking the tasks.
 
-    The `--pidfile` argument can be set to an absolute path to make sure
-    this doesn't happen.
+    The :option:`--pidfile <celery worker --pidfile>` argument can be set to
+    an absolute path to make sure this doesn't happen.
 
 4) Make sure the client is configured with the right backend.
 
@@ -454,7 +470,7 @@ the task id after all).
     than the worker, you will not be able to receive the result,
     so make sure the backend is correct by inspecting it:
 
-    .. code-block:: python
+    .. code-block:: pycon
 
-        >>> result = task.delay()
+        >>> result = task.delay()
         >>> print(result.backend)

+ 3 - 3
docs/getting-started/introduction.rst

@@ -204,9 +204,9 @@ Features
 
         - **Resource Leak Protection**
 
-            The :option:`--maxtasksperchild` option is used for user tasks
-            leaking resources, like memory or file descriptors, that
-            are simply out of your control.
+            The :option:`--maxtasksperchild <celery worker --maxtasksperchild>`
+            option is used for user tasks leaking resources, like memory or
+            file descriptors, that are simply out of your control.
 
             :ref:`Read more… <worker-maxtasksperchild>`.
 

+ 12 - 11
docs/getting-started/next-steps.rst

@@ -94,7 +94,7 @@ When the worker starts you should see a banner and some messages::
 
 -- The *broker* is the URL you specified in the broker argument in our ``celery``
 module, you can also specify a different broker on the command-line by using
-the :option:`-b` option.
+the :option:`-b <celery -b>` option.
 
 -- *Concurrency* is the number of prefork worker process used
 to process your tasks concurrently, when all of these are busy doing work
@@ -102,7 +102,8 @@ new tasks will have to wait for one of the tasks to finish before
 it can be processed.
 
 The default concurrency number is the number of CPU's on that machine
-(including cores), you can specify a custom number using :option:`-c` option.
+(including cores), you can specify a custom number using
+the :option:`celery worker -c` option.
 There is no recommended value, as the optimal number depends on a number of
 factors, but if your tasks are mostly I/O-bound then you can try to increase
 it, experimentation has shown that adding more than twice the number
@@ -126,7 +127,7 @@ and prioritization, all described in the :ref:`Routing Guide
 <guide-routing>`.
 
 You can get a complete list of command-line arguments
-by passing in the `--help` flag:
+by passing in the :option:`--help <celery --help>` flag:
 
 .. code-block:: console
 
@@ -217,16 +218,16 @@ reference.
 
 .. _app-argument:
 
-About the :option:`--app` argument
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+About the :option:`--app <celery --app>` argument
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-The :option:`--app` argument specifies the Celery app instance to use,
-it must be in the form of ``module.path:attribute``
+The :option:`--app <celery --app>` argument specifies the Celery app instance
+to use, it must be in the form of ``module.path:attribute``
 
 But it also supports a shortcut form If only a package name is specified,
 where it'll try to search for the app instance, in the following order:
 
-With ``--app=proj``:
+With :option:`--app=proj <celery --app>`:
 
 1) an attribute named ``proj.app``, or
 2) an attribute named ``proj.celery``, or
@@ -625,7 +626,7 @@ with the ``queue`` argument to ``apply_async``:
     >>> add.apply_async((2, 2), queue='hipri')
 
 You can then make a worker consume from this queue by
-specifying the :option:`-Q` option:
+specifying the :option:`celery worker -Q` option:
 
 .. code-block:: console
 
@@ -662,8 +663,8 @@ This is implemented by using broadcast messaging, so all remote
 control commands are received by every worker in the cluster.
 
 You can also specify one or more workers to act on the request
-using the :option:`--destination` option, which is a comma separated
-list of worker host names:
+using the :option:`--destination <celery inspect --destination>` option,
+which is a comma separated list of worker host names:
 
 .. code-block:: console
 

+ 21 - 11
docs/history/changelog-1.0.rst

@@ -1395,9 +1395,11 @@ News
   restarted if it crashes). To use this start the worker with the
   --supervised` option (or alternatively `-S`).
 
-* views.apply: View calling a task. Example
+* views.apply: View calling a task.
 
-    ::
+    Example:
+
+    .. code-block:: text
 
         http://e.com/celery/apply/task_name/arg1/arg2//?kwarg1=a&kwarg2=b
 
@@ -1567,13 +1569,13 @@ arguments, so be sure to flush your task queue before you upgrade.
   `celery.task.apply_async` and `celery.Task.apply_async`.
 
   This also means the AMQP configuration has changed. Some settings has
-  been renamed, while others are new::
+  been renamed, while others are new:
 
-        CELERY_AMQP_EXCHANGE
-        CELERY_AMQP_PUBLISHER_ROUTING_KEY
-        CELERY_AMQP_CONSUMER_ROUTING_KEY
-        CELERY_AMQP_CONSUMER_QUEUE
-        CELERY_AMQP_EXCHANGE_TYPE
+    - ``CELERY_AMQP_EXCHANGE``
+    - ``CELERY_AMQP_PUBLISHER_ROUTING_KEY``
+    - ``CELERY_AMQP_CONSUMER_ROUTING_KEY``
+    - ``CELERY_AMQP_CONSUMER_QUEUE``
+    - ``CELERY_AMQP_EXCHANGE_TYPE``
 
   See the entry :ref:`faq-task-routing` in the
   :ref:`FAQ <faq>` for more information.
@@ -1734,7 +1736,11 @@ arguments, so be sure to flush your task queue before you upgrade.
 
 * Refactored the task metadata cache and database backends, and added
   a new backend for Tokyo Tyrant. You can set the backend in your django
-  settings file. E.g.::
+  settings file.
+
+    Example:
+
+    .. code-block:: python
 
         CELERY_RESULT_BACKEND = 'database'; # Uses the database
         CELERY_RESULT_BACKEND = 'cache'; # Uses the django cache framework
@@ -1828,13 +1834,17 @@ arguments, so be sure to flush your task queue before you upgrade.
 
         >>> url(r'^celery/$', include('celery.urls'))
 
-  then visiting the following url,::
+  then visiting the following url:
+
+  .. code-block:: text
 
         http://mysite/celery/$task_id/done/
 
   this will return a JSON dictionary like e.g:
 
-        >>> {'task': {'id': $task_id, 'executed': true}}
+  .. code-block:: json
+
+        {"task": {"id": "TASK_ID", "executed": true}}
 
 * `delay_task` now returns string id, not `uuid.UUID` instance.
 

+ 166 - 92
docs/history/changelog-2.0.rst

@@ -42,7 +42,9 @@ Fixes
   precedence over values defined in :setting:`CELERY_QUEUES` when merging
   the two.
 
-    With the follow settings::
+    With the follow settings:
+
+    .. code-block:: python
 
         CELERY_QUEUES = {'cpubound': {'exchange': 'cpubound',
                                       'routing_key': 'cpubound'}}
@@ -51,7 +53,9 @@ Fixes
                                        'routing_key': 'tasks.add',
                                        'serializer': 'json'}}
 
-    The final routing options for `tasks.add` will become::
+    The final routing options for `tasks.add` will become:
+
+    .. code-block:: python
 
         {'exchange': 'cpubound',
          'routing_key': 'tasks.add',
@@ -201,9 +205,11 @@ Documentation
 
 * Can now define a white list of errors to send error emails for.
 
-    Example::
+    Example:
 
-        CELERY_TASK_ERROR_WHITELIST = ('myapp.MalformedInputError')
+    .. code-block:: python
+
+        CELERY_TASK_ERROR_WHITELIST = ('myapp.MalformedInputError',)
 
     See issue #153.
 
@@ -215,7 +221,9 @@ Documentation
 
 * Added :class:`celery.task.control.inspect`: Inspects a running worker.
 
-    Examples::
+    Examples:
+
+    .. code-block:: pycon
 
         # Inspect a single worker
         >>> i = inspect('myworker.example.com')
@@ -337,7 +345,9 @@ Documentation
 
 * :setting:`CELERY_ROUTES` was broken if set to a single dict.
 
-    This example in the docs should now work again::
+    This example in the docs should now work again:
+
+    .. code-block:: python
 
         CELERY_ROUTES = {'feed.tasks.import_feed': 'feeds'}
 
@@ -348,7 +358,9 @@ Documentation
     Dumps information about the worker, like pool process ids, and
     total number of tasks executed by type.
 
-    Example reply::
+    Example reply:
+
+    .. code-block:: python
 
         [{'worker.local':
              'total': {'tasks.sleeptask': 6},
@@ -365,7 +377,9 @@ Documentation
     are arguments that is not JSON encodable. If you know
     the arguments are JSON safe, you can pass the argument `safe=True`.
 
-    Example reply::
+    Example reply:
+
+    .. code-block:: pycon
 
         >>> broadcast('dump_active', arguments={'safe': False}, reply=True)
         [{'worker.local': [
@@ -426,19 +440,25 @@ Upgrading for Django-users
 
 Django integration has been moved to a separate package: `django-celery`_.
 
-* To upgrade you need to install the `django-celery`_ module and change::
+* To upgrade you need to install the `django-celery`_ module and change:
+
+  .. code-block:: python
 
     INSTALLED_APPS = 'celery'
 
-  to::
+  to:
+
+  .. code-block:: python
 
     INSTALLED_APPS = 'djcelery'
 
 * If you use `mod_wsgi` you need to add the following line to your `.wsgi`
-  file::
+  file:
+
+    .. code-block:: python
 
-    import os
-    os.environ['CELERY_LOADER'] = 'django'
+        import os
+        os.environ['CELERY_LOADER'] = 'django'
 
 * The following modules has been moved to `django-celery`_:
 
@@ -500,10 +520,12 @@ See `SQLAlchemy Connection Strings`_ for more information about connection
 strings.
 
 To specify additional SQLAlchemy database engine options you can use
-the :setting:`CELERY_RESULT_ENGINE_OPTIONS` setting::
+the :setting:`CELERY_RESULT_ENGINE_OPTIONS` setting:
 
-    # echo enables verbose logging from SQLAlchemy.
-    CELERY_RESULT_ENGINE_OPTIONS = {'echo': True}
+    .. code-block:: python
+
+        # echo enables verbose logging from SQLAlchemy.
+        CELERY_RESULT_ENGINE_OPTIONS = {'echo': True}
 
 .. _`SQLAlchemy`:
     http://www.sqlalchemy.org
@@ -520,9 +542,11 @@ Cache result backend
 ~~~~~~~~~~~~~~~~~~~~
 
 The cache result backend is no longer using the Django cache framework,
-but it supports mostly the same configuration syntax::
+but it supports mostly the same configuration syntax:
+
+    .. code-block:: python
 
-    CELERY_CACHE_BACKEND = 'memcached://A.example.com:11211;B.example.com'
+        CELERY_CACHE_BACKEND = 'memcached://A.example.com:11211;B.example.com'
 
 To use the cache backend you must either have the `pylibmc`_ or
 `python-memcached`_ library installed, of which the former is regarded
@@ -548,7 +572,9 @@ Backward incompatible changes
     working configuration.
 
     Also this makes it possible to use the client side of celery without being
-    configured::
+    configured:
+
+    .. code-block:: pycon
 
         >>> from carrot.connection import BrokerConnection
         >>> conn = BrokerConnection('localhost', 'guest', 'guest', '/')
@@ -579,11 +605,15 @@ Backward incompatible changes
   (as scheduled by the :ref:`deprecation-timeline`):
 
     Assuming the implicit `Loader` class name is no longer supported,
-    if you use e.g.::
+    if you use e.g.:
+
+    .. code-block:: python
 
         CELERY_LOADER = 'myapp.loaders'
 
-    You need to include the loader class name, like this::
+    You need to include the loader class name, like this:
+
+    .. code-block:: python
 
         CELERY_LOADER = 'myapp.loaders.Loader'
 
@@ -608,11 +638,15 @@ Backward incompatible changes
     cPickle is broken in Python <= 2.5.
 
     It unsafely and incorrectly uses relative instead of absolute imports,
-    so e.g.::
+    so e.g.:
+
+    .. code-block:: python
 
           exceptions.KeyError
 
-    becomes::
+    becomes:
+
+    .. code-block:: python
 
           celery.exceptions.KeyError
 
@@ -688,13 +722,17 @@ News
   forces termination.
 
 * Added support for using complex crontab-expressions in periodic tasks. For
-  example, you can now use::
+  example, you can now use:
+
+    .. code-block:: pycon
 
-    >>> crontab(minute='*/15')
+        >>> crontab(minute='*/15')
 
-  or even::
+    or even:
 
-    >>> crontab(minute='*/30', hour='8-17,1-2', day_of_week='thu-fri')
+    .. code-block:: pycon
+
+        >>> crontab(minute='*/30', hour='8-17,1-2', day_of_week='thu-fri')
 
   See :ref:`guide-beat`.
 
@@ -733,7 +771,9 @@ News
     You can disable this using the :setting:`CELERY_CREATE_MISSING_QUEUES`
     setting.
 
-    The missing queues are created with the following options::
+    The missing queues are created with the following options:
+
+    .. code-block:: python
 
         CELERY_QUEUES[name] = {'exchange': name,
                                'exchange_type': 'direct',
@@ -838,19 +878,29 @@ News
     is then merged with the found route settings, where the routers settings
     have priority.
 
-    Example if :func:`~celery.execute.apply_async` has these arguments::
+    Example if :func:`~celery.execute.apply_async` has these arguments:
+
+    .. code-block:: pycon
 
        >>> Task.apply_async(immediate=False, exchange='video',
        ...                  routing_key='video.compress')
 
-    and a router returns::
+    and a router returns:
+
+    .. code-block:: python
 
         {'immediate': True,
          'exchange': 'urgent'}
 
-    the final message options will be::
+    the final message options will be:
 
-        immediate=True, exchange='urgent', routing_key='video.compress'
+    .. code-block:: pycon
+
+        >>> task.apply_async(
+        ...    immediate=True,
+        ...    exchange='urgent',
+        ...    routing_key='video.compress',
+        ... )
 
     (and any default message options defined in the
     :class:`~celery.task.base.Task` class)
@@ -863,7 +913,7 @@ News
    :meth:`~celery.task.base.Task.on_failure` as einfo keyword argument.
 
 * Worker: Added :setting:`CELERYD_MAX_TASKS_PER_CHILD` /
-  :option:`--maxtasksperchild`
+  :option:`celery worker --maxtasksperchild`
 
     Defines the maximum number of tasks a pool worker can process before
     the process is terminated and replaced by a new one.
@@ -879,8 +929,8 @@ News
 * New signal: :signal:`~celery.signals.worker_process_init`: Sent inside the
   pool worker process at init.
 
-* Worker: :option:`-Q` option: Ability to specify list of queues to use,
-  disabling other configured queues.
+* Worker: :option:`celery worker -Q` option: Ability to specify list of queues
+  to use, disabling other configured queues.
 
     For example, if :setting:`CELERY_QUEUES` defines four
     queues: `image`, `video`, `data` and `default`, the following
@@ -893,11 +943,13 @@ News
 
 * Worker: New return value for the `revoke` control command:
 
-    Now returns::
+    Now returns:
+
+    .. code-block:: python
 
         {'ok': 'task $id revoked'}
 
-    instead of `True`.
+    instead of :const:`True`.
 
 * Worker: Can now enable/disable events using remote control
 
@@ -947,62 +999,84 @@ News
 
     Some examples:
 
-    .. code-block:: console
+    - Advanced example with 10 workers:
+
+        * Three of the workers processes the images and video queue
+        * Two of the workers processes the data queue with loglevel DEBUG
+        * the rest processes the default' queue.
+
+        .. code-block:: console
+
+            $ celeryd-multi start 10 -l INFO -Q:1-3 images,video -Q:4,5:data -Q default -L:4,5 DEBUG
+
+    - Get commands to start 10 workers, with 3 processes each
+
+        .. code-block:: console
+
+            $ celeryd-multi start 3 -c 3
+            celeryd -n celeryd1.myhost -c 3
+            celeryd -n celeryd2.myhost -c 3
+            celeryd -n celeryd3.myhost -c 3
+
+    - Start 3 named workers
+
+        .. code-block:: console
+
+            $ celeryd-multi start image video data -c 3
+            celeryd -n image.myhost -c 3
+            celeryd -n video.myhost -c 3
+            celeryd -n data.myhost -c 3
+
+    - Specify custom hostname
+
+        .. code-block:: console
+
+            $ celeryd-multi start 2 -n worker.example.com -c 3
+            celeryd -n celeryd1.worker.example.com -c 3
+            celeryd -n celeryd2.worker.example.com -c 3
+
+        Additional options are added to each celeryd',
+        but you can also modify the options for ranges of or single workers
+
+    - 3 workers: Two with 3 processes, and one with 10 processes.
+
+        .. code-block:: console
+
+            $ celeryd-multi start 3 -c 3 -c:1 10
+            celeryd -n celeryd1.myhost -c 10
+            celeryd -n celeryd2.myhost -c 3
+            celeryd -n celeryd3.myhost -c 3
+
+    - Can also specify options for named workers
+
+        .. code-block:: console
+
+            $ celeryd-multi start image video data -c 3 -c:image 10
+            celeryd -n image.myhost -c 10
+            celeryd -n video.myhost -c 3
+            celeryd -n data.myhost -c 3
+
+    - Ranges and lists of workers in options is also allowed:
+      (``-c:1-3`` can also be written as ``-c:1,2,3``)
+
+        .. code-block:: console
+
+            $ celeryd-multi start 5 -c 3  -c:1-3 10
+            celeryd-multi -n celeryd1.myhost -c 10
+            celeryd-multi -n celeryd2.myhost -c 10
+            celeryd-multi -n celeryd3.myhost -c 10
+            celeryd-multi -n celeryd4.myhost -c 3
+            celeryd-multi -n celeryd5.myhost -c 3
+
+    - Lists also work with named workers:
+
+        .. code-block:: console
 
-        # Advanced example with 10 workers:
-        #   * Three of the workers processes the images and video queue
-        #   * Two of the workers processes the data queue with loglevel DEBUG
-        #   * the rest processes the default' queue.
-        $ celeryd-multi start 10 -l INFO -Q:1-3 images,video -Q:4,5:data
-            -Q default -L:4,5 DEBUG
-
-        # get commands to start 10 workers, with 3 processes each
-        $ celeryd-multi start 3 -c 3
-        celeryd -n celeryd1.myhost -c 3
-        celeryd -n celeryd2.myhost -c 3
-        celeryd -n celeryd3.myhost -c 3
-
-        # start 3 named workers
-        $ celeryd-multi start image video data -c 3
-        celeryd -n image.myhost -c 3
-        celeryd -n video.myhost -c 3
-        celeryd -n data.myhost -c 3
-
-        # specify custom hostname
-        $ celeryd-multi start 2 -n worker.example.com -c 3
-        celeryd -n celeryd1.worker.example.com -c 3
-        celeryd -n celeryd2.worker.example.com -c 3
-
-        # Additionl options are added to each celeryd',
-        # but you can also modify the options for ranges of or single workers
-
-        # 3 workers: Two with 3 processes, and one with 10 processes.
-        $ celeryd-multi start 3 -c 3 -c:1 10
-        celeryd -n celeryd1.myhost -c 10
-        celeryd -n celeryd2.myhost -c 3
-        celeryd -n celeryd3.myhost -c 3
-
-        # can also specify options for named workers
-        $ celeryd-multi start image video data -c 3 -c:image 10
-        celeryd -n image.myhost -c 10
-        celeryd -n video.myhost -c 3
-        celeryd -n data.myhost -c 3
-
-        # ranges and lists of workers in options is also allowed:
-        # (-c:1-3 can also be written as -c:1,2,3)
-        $ celeryd-multi start 5 -c 3  -c:1-3 10
-        celeryd-multi -n celeryd1.myhost -c 10
-        celeryd-multi -n celeryd2.myhost -c 10
-        celeryd-multi -n celeryd3.myhost -c 10
-        celeryd-multi -n celeryd4.myhost -c 3
-        celeryd-multi -n celeryd5.myhost -c 3
-
-        # lists also works with named workers
-        $ celeryd-multi start foo bar baz xuzzy -c 3 -c:foo,bar,baz 10
-        celeryd-multi -n foo.myhost -c 10
-        celeryd-multi -n bar.myhost -c 10
-        celeryd-multi -n baz.myhost -c 10
-        celeryd-multi -n xuzzy.myhost -c 3
+            $ celeryd-multi start foo bar baz xuzzy -c 3 -c:foo,bar,baz 10
+            celeryd-multi -n foo.myhost -c 10
+            celeryd-multi -n bar.myhost -c 10
+            celeryd-multi -n baz.myhost -c 10
+            celeryd-multi -n xuzzy.myhost -c 3
 
 * The worker now calls the result backends `process_cleanup` method
   *after* task execution instead of before.

+ 15 - 12
docs/history/changelog-2.1.rst

@@ -366,15 +366,15 @@ News
 
     New command-line arguments to celeryev:
 
-        * :option:`-c|--camera`: Snapshot camera class to use.
-        * :option:`--logfile|-f`: Log file
-        * :option:`--loglevel|-l`: Log level
-        * :option:`--maxrate|-r`: Shutter rate limit.
-        * :option:`--freq|-F`: Shutter frequency
+        * :option:`celery events --camera`: Snapshot camera class to use.
+        * :option:`celery events --logfile`: Log file
+        * :option:`celery events --loglevel`: Log level
+        * :option:`celery events --maxrate`: Shutter rate limit.
+        * :option:`celery events --freq`: Shutter frequency
 
-    The :option:`--camera` argument is the name of a class used to take
-    snapshots with. It must support the interface defined by
-    :class:`celery.events.snapshot.Polaroid`.
+    The :option:`--camera <celery events --camera>` argument is the name
+    of a class used to take snapshots with. It must support the interface
+    defined by :class:`celery.events.snapshot.Polaroid`.
 
     Shutter frequency controls how often the camera thread wakes up,
     while the rate limit controls how often it will actually take
@@ -389,7 +389,7 @@ News
     anything new.
 
     The rate limit is off by default, which means it will take a snapshot
-    for every :option:`--frequency` seconds.
+    for every :option:`--frequency <celery events --frequency>` seconds.
 
 * :func:`~celery.task.control.broadcast`: Added callback argument, this can be
   used to process replies immediately as they arrive.
@@ -458,8 +458,10 @@ News
             fileConfig('logging.conf')
 
     If there are no receivers for this signal, the logging subsystem
-    will be configured using the :option:`--loglevel`/:option:`--logfile`
-    argument, this will be used for *all defined loggers*.
+    will be configured using the
+    :option:`--loglevel <celery worker --loglevel>`/
+    :option:`--logfile <celery worker --logfile>`
+    arguments, this will be used for *all defined loggers*.
 
     Remember that the worker also redirects stdout and stderr
     to the celery logger, if manually configure logging
@@ -476,7 +478,8 @@ News
             stdouts = logging.getLogger('mystdoutslogger')
             log.redirect_stdouts_to_logger(stdouts, loglevel=logging.WARNING)
 
-* worker Added command line option :option:`-I`/:option:`--include`:
+* worker Added command line option
+  :option:`--include <celery worker --include>`:
 
     A comma separated list of (task) modules to be imported.
 

+ 16 - 13
docs/history/changelog-2.2.rst

@@ -20,9 +20,10 @@ Security Fixes
 --------------
 
 * [Security: `CELERYSA-0001`_] Daemons would set effective id's rather than
-  real id's when the :option:`--uid`/:option:`--gid` arguments to
-  :program:`celery multi`, :program:`celeryd_detach`,
-  :program:`celery beat` and :program:`celery events` were used.
+  real id's when the :option:`--uid <celery --uid>`/
+  :option:`--gid <celery --gid>` arguments to :program:`celery multi`,
+  :program:`celeryd_detach`, :program:`celery beat` and
+  :program:`celery events` were used.
 
   This means privileges weren't properly dropped, and that it would
   be possible to regain supervisor privileges later.
@@ -46,7 +47,7 @@ Security Fixes
 
 * Redis result backend now works with Redis 2.4.4.
 
-* multi: The :option:`--gid` option now works correctly.
+* multi: The :option:`--gid <celery --gid>` option now works correctly.
 
 * worker: Retry wrongfully used the repr of the traceback instead
   of the string representation.
@@ -361,7 +362,7 @@ Fixes
   instances, not classes.
 
 * :program:`celeryev` did not create pidfile even though the
-  :option:`--pidfile` argument was set.
+  :option:`--pidfile <celery events --pidfile>` argument was set.
 
 * Task logger format was no longer used. (Issue #317).
 
@@ -378,7 +379,7 @@ Fixes
     structure: the exchange key is now a dictionary containing the
     exchange declaration in full.
 
-* The :option:`-Q` option to :program:`celery worker` removed unused queue
+* The :option:`celery worker -Q` option removed unused queue
   declarations, so routing of tasks could fail.
 
     Queues are no longer removed, but rather `app.amqp.queues.consume_from()`
@@ -569,8 +570,8 @@ Important Notes
 
     This is great news for I/O-bound tasks!
 
-    To change pool implementations you use the :option:`-P|--pool` argument
-    to :program:`celery worker`, or globally using the
+    To change pool implementations you use the :option:`celery worker --pool`
+    argument, or globally using the
     :setting:`CELERYD_POOL` setting.  This can be the full name of a class,
     or one of the following aliases: `processes`, `eventlet`, `gevent`.
 
@@ -610,8 +611,10 @@ Important Notes
 
 * worker: Now supports Autoscaling of child worker processes.
 
-    The :option:`--autoscale` option can be used to configure the minimum
-    and maximum number of child worker processes::
+    The :option:`--autoscale <celery worker --autoscale>` option can be used
+    to configure the minimum and maximum number of child worker processes:
+
+    .. code-block:: text
 
         --autoscale=AUTOSCALE
              Enable autoscaling by providing
@@ -627,7 +630,7 @@ Important Notes
 
    Example usage:
 
-   .. code-block:: python
+   .. code-block:: text
 
         from celery.contrib import rdb
         from celery.task import task
@@ -635,10 +638,10 @@ Important Notes
         @task()
         def add(x, y):
             result = x + y
-            rdb.set_trace()  # <- set breakpoint
+            # set breakpoint
+            rdb.set_trace()
             return result
 
-
     :func:`~celery.contrib.rdb.set_trace` sets a breakpoint at the current
     location and creates a socket you can telnet into to remotely debug
     your task.

+ 4 - 3
docs/history/changelog-2.3.rst

@@ -20,9 +20,10 @@ Security Fixes
 --------------
 
 * [Security: `CELERYSA-0001`_] Daemons would set effective id's rather than
-  real id's when the :option:`--uid`/:option:`--gid` arguments to
-  :program:`celery multi`, :program:`celeryd_detach`,
-  :program:`celery beat` and :program:`celery events` were used.
+  real id's when the :option:`--uid <celery --uid>`/
+  :option:`--gid <celery --gid>` arguments to :program:`celery multi`,
+  :program:`celeryd_detach`, :program:`celery beat` and
+  :program:`celery events` were used.
 
   This means privileges weren't properly dropped, and that it would
   be possible to regain supervisor privileges later.

+ 4 - 3
docs/history/changelog-2.4.rst

@@ -37,7 +37,8 @@ Security Fixes
 --------------
 
 * [Security: `CELERYSA-0001`_] Daemons would set effective id's rather than
-  real id's when the :option:`--uid`/:option:`--gid` arguments to
+  real id's when the :option:`--uid <celery --uid>`/
+  :option:`--gid <celery --gid>` arguments to
   :program:`celery multi`, :program:`celeryd_detach`,
   :program:`celery beat` and :program:`celery events` were used.
 
@@ -202,8 +203,8 @@ Important Notes
     the configuration will be ignored, if a setting is not provided in the URL
     then the value from the configuration will be used as default.
 
-    Also, programs now support the :option:`-b|--broker` option to specify
-    a broker URL on the command-line:
+    Also, programs now support the :option:`--broker <celery --broker>`
+    option to specify a broker URL on the command-line:
 
     .. code-block:: console
 

+ 4 - 2
docs/history/changelog-2.5.rst

@@ -194,8 +194,10 @@ Fixes
 * Internal timer (timer2) now logs exceptions instead of swallowing them
   (Issue #626).
 
-* celery shell: can now be started with :option:`--eventlet` or
-  :option:`--gevent` options to apply their monkey patches.
+* celery shell: can now be started with
+  :option:`--eventlet <celery shell --eventlet>` or
+  :option:`--gevent <celery shell --gevent>` options to apply their
+  monkey patches.
 
 .. _version-2.5.0:
 

+ 18 - 12
docs/history/changelog-3.0.rst

@@ -57,7 +57,9 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
   debug the init scripts.
 
     Setting this will skip the daemonization step so that errors
-    printed to stderr after standard outs are closed can be seen::
+    printed to stderr after standard outs are closed can be seen:
+
+    .. code-block:: console
 
         $ C_FAKEFORK /etc/init.d/celeryd start
 
@@ -158,8 +160,8 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 
 - Now depends on :pypi:`billiard` 2.7.3.30.
 
-- ``--loader`` argument no longer supported importing loaders from the
-  current directory.
+- :option:`--loader <celery --loader>` argument no longer supported
+  importing loaders from the current directory.
 
 - [Worker] Fixed memory leak when restarting after connection lost
   (Issue #1325).
@@ -306,9 +308,10 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 
 - Worker: Optimized storing/loading the revoked tasks list (Issue #1289).
 
-    After this change the ``--statedb`` file will take up more disk space,
-    but loading from and storing the revoked tasks will be considerably
-    faster (what before took 5 minutes will now take less than a second).
+    After this change the :option:`celery worker --statedb` file will
+    take up more disk space, but loading from and storing the revoked
+    tasks will be considerably faster (what before took 5 minutes will
+    now take less than a second).
 
 - Celery will now suggest alternatives if there's a typo in the
   broker transport name (e.g. ``ampq`` -> ``amqp``).
@@ -680,10 +683,10 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 - Fixed a deadlock issue that could occur when the producer pool
   inherited the connection pool instance of the parent process.
 
-- The :option:`--loader` option now works again (Issue #1066).
+- The :option:`--loader <celery --loader>` option now works again (Issue #1066).
 
 - :program:`celery` umbrella command: All subcommands now supports
-  the :option:`--workdir` option (Issue #1063).
+  the :option:`--workdir <celery --workdir>` option (Issue #1063).
 
 - Groups included in chains now give GroupResults (Issue #1057)
 
@@ -840,7 +843,8 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 
 - Worker: ETA scheduler now uses millisecond precision (Issue #1040).
 
-- The ``--config`` argument to programs is now supported by all loaders.
+- The :option:`--config <celery --config>` argument to programs is
+  now supported by all loaders.
 
 - The :setting:`CASSANDRA_OPTIONS` setting has now been documented.
 
@@ -929,7 +933,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
   the working directory has been changed into.
 
 - :program:`celery worker` and :program:`celery beat` commands now respects
-  the :option:`--no-color` option (Issue #999).
+  the :option:`--no-color <celery --no-color>` option (Issue #999).
 
 - Fixed typos in eventlet examples (Issue #1000)
 
@@ -1348,9 +1352,11 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 
 - Now supports AMQP heartbeats if using the new ``pyamqp://`` transport.
 
-    - The py-amqp transport requires the :pypi:`amqp` library to be installed::
+    - The py-amqp transport requires the :pypi:`amqp` library to be installed:
+
+        .. code-block:: console
 
-        $ pip install amqp
+            $ pip install amqp
 
     - Then you need to set the transport URL prefix to ``pyamqp://``.
 

+ 35 - 28
docs/history/changelog-3.1.rst

@@ -303,15 +303,16 @@ new in Celery 3.1.
 
     Fix contributed by Alexander.
 
-- **Commands**: Worker now supports new ``--executable`` argument that can
-  be used with ``--detach``.
+- **Commands**: Worker now supports new
+  :option:`--executable <celery worker --executable>` argument that can
+  be used with :option:`celery worker --detach`.
 
     Contributed by Bert Vanderbauwhede.
 
 - **Canvas**: Fixed crash in chord unlock fallback task (Issue #2404).
 
-- **Worker**: Fixed rare crash occurring with ``--autoscale`` enabled
-  (Issue #2411).
+- **Worker**: Fixed rare crash occurring with
+  :option:`--autoscale <celery worker --autoscale>` enabled (Issue #2411).
 
 - **Django**: Properly recycle worker Django database connections when the
   Django ``CONN_MAX_AGE`` setting is enabled (Issue #2453).
@@ -423,7 +424,8 @@ new in Celery 3.1.
 :release-date: 2014-10-03 06:00 P.M UTC
 :release-by: Ask Solem
 
-- **Worker**: 3.1.15 broke ``-Ofair`` behavior (Issue #2286).
+- **Worker**: 3.1.15 broke :option:`-Ofair <celery worker -O>`
+  behavior (Issue #2286).
 
     This regression could result in all tasks executing
     in a single child process if ``-Ofair`` was enabled.
@@ -496,8 +498,8 @@ new in Celery 3.1.
 
 - **Django**: Compatibility with Django 1.7 on Windows (Issue #2126).
 
-- **Programs**: `--umask` argument can be now specified in both octal (if starting
-  with 0) or decimal.
+- **Programs**: :option:`--umask <celery --umask>` argument can now be
+  specified in both octal (if starting with 0) or decimal.
 
 
 .. _version-3.1.13:
@@ -715,8 +717,8 @@ News
 - **Tasks**: The :setting:`CELERY_DEFAULT_DELIVERY_MODE` setting was being
   ignored (Issue #1953).
 
-- **Worker**: New :option:`--heartbeat-interval` can be used to change the
-  time (in seconds) between sending event heartbeats.
+- **Worker**: New :option:`celery worker --heartbeat-interval` can be used
+  to change the time (in seconds) between sending event heartbeats.
 
     Contributed by Matthew Duggan and Craig Northway.
 
@@ -827,7 +829,7 @@ News
     with workers and clients not using it, so be sure to enable
     the option in all clients and workers if you decide to use it.
 
-- **Multi**: With ``-opt:index`` (e.g. :option:`-c:1`) the index now always refers
+- **Multi**: With ``-opt:index`` (e.g. ``-c:1``) the index now always refers
   to the position of a node in the argument list.
 
     This means that referring to a number will work when specifying a list
@@ -1045,7 +1047,7 @@ News
 - **Commands**: The :program:`celery purge` command now warns that the operation
   will delete all tasks and prompts the user for confirmation.
 
-    A new :option:`-f` was added that can be used to disable
+    A new :option:`-f <celery purge -f>` was added that can be used to disable
     interactive mode.
 
 - **Task**: ``.retry()`` did not raise the value provided in the ``exc`` argument
@@ -1095,8 +1097,8 @@ News
 
 - **Commands:** The :program:`celery inspect conf` utility now works.
 
-- **Commands:** The :option:`-no-color` argument was not respected by
-  all commands (*Issue #1799*).
+- **Commands:** The :option:`--no-color <celery --no-color>` argument was
+  not respected by all commands (*Issue #1799*).
 
 - **App:** Fixed rare bug with ``autodiscover_tasks()`` (*Issue #1797*).
 
@@ -1105,7 +1107,7 @@ News
   API documentation (*Issue #1782*).
 
 - **Documentation:** Supervisord examples contained an extraneous '-' in a
-  `--logfile` argument example.
+  :option:`--logfile <celery worker --logfile>` argument example.
 
     Fix contributed by Mohammad Almeer.
 
@@ -1234,10 +1236,13 @@ Fixes
 
     Fix contributed by Ionel Cristian Mărieș.
 
-- Worker with ``-B`` argument did not properly shut down the beat instance.
+- Worker with :option:`-B <celery worker -B>` argument did not properly
+  shut down the beat instance.
 
 - Worker: The ``%n`` and ``%h`` formats are now also supported by the
-  :option:`--logfile`, :option:`--pidfile` and :option:`--statedb` arguments.
+  :option:`--logfile <celery worker --logfile>`,
+  :option:`--pidfile <celery worker --pidfile>` and
+  :option:`--statedb <celery worker --statedb>` arguments.
 
     Example:
 
@@ -1377,17 +1382,19 @@ Fixes
     this ensures that the settings object is not prepared
     prematurely.
 
-- Fixed regression for ``--app`` argument experienced by
-  some users (Issue #1653).
+- Fixed regression for :option:`--app <celery --app>` argument
+  experienced by some users (Issue #1653).
 
-- Worker: Now respects the ``--uid`` and ``--gid`` arguments
-  even if ``--detach`` is not enabled.
+- Worker: Now respects the :option:`--uid <celery worker --uid>` and
+  :option:`--gid <celery worker --gid>` arguments even if
+  :option:`--detach <celery worker --detach>` is not enabled.
 
-- Beat: Now respects the ``--uid`` and ``--gid`` arguments
-  even if ``--detach`` is not enabled.
+- Beat: Now respects the :option:`--uid <celery beat --uid>` and
+  :option:`--gid <celery beat --gid>` arguments even if
+  :option:`--detach <celery beat --detach>` is not enabled.
 
-- Python 3: Fixed unorderable error occuring with the worker ``-B``
-  argument enabled.
+- Python 3: Fixed unorderable error occuring with the worker
+  :option:`-B <celery worker -B>` argument enabled.
 
 - ``celery.VERSION`` is now a named tuple.
 
@@ -1489,8 +1496,8 @@ Fixes
 - The ``celery multi show`` command now generates the same arguments
   as the start command does.
 
-- The ``--app`` argument could end up using a module object instead
-  of an app instance (with a resulting crash).
+- The :option:`--app <celery --app>` argument could end up using a module
+  object instead of an app instance (with a resulting crash).
 
 - Fixed a syntax error problem in the celerybeat init script.
 
@@ -1510,8 +1517,8 @@ Fixes
     ``unpack_from`` started supporting ``memoryview`` arguments
     in Python 2.7.6.
 
-- Worker: :option:`-B` argument accidentally closed files used
-  for logging.
+- Worker: :option:`-B <celery worker -B>` argument accidentally closed
+  files used for logging.
 
 - Task decorated tasks now keep their docstring (Issue #1636)
 

+ 14 - 5
docs/includes/installation.txt

@@ -6,11 +6,15 @@ Installation
 You can install Celery either via the Python Package Index (PyPI)
 or from source.
 
-To install using `pip`,::
+To install using `pip`,:
+
+.. code-block:: console
 
     $ pip install -U Celery
 
-To install using `easy_install`,::
+To install using `easy_install`,:
+
+.. code-block:: console
 
     $ easy_install -U Celery
 
@@ -122,7 +126,9 @@ Downloading and installing from source
 Download the latest version of Celery from
 http://pypi.python.org/pypi/celery/
 
-You can install it by doing the following,::
+You can install it by doing the following,:
+
+.. code-block:: console
 
     $ tar xvfz celery-0.0.0.tar.gz
     $ cd celery-0.0.0
@@ -141,15 +147,18 @@ With pip
 ~~~~~~~~
 
 The Celery development version also requires the development
-versions of ``kombu``, ``amqp`` and ``billiard``.
+versions of :pypi:`kombu`, :pypi:`amqp`, :pypi:`billiard` and :pypi:`vine`.
 
 You can install the latest snapshot of these using the following
-pip commands::
+pip commands:
+
+.. code-block:: console
 
     $ pip install https://github.com/celery/celery/zipball/master#egg=celery
     $ pip install https://github.com/celery/billiard/zipball/master#egg=billiard
     $ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp
     $ pip install https://github.com/celery/kombu/zipball/master#egg=kombu
+    $ pip install https://github.com/celery/vine/zipball/master#egg=vine
 
 With git
 ~~~~~~~~

+ 24 - 10
docs/internals/deprecation.rst

@@ -22,29 +22,41 @@ Compat Task Modules
 
 - Module ``celery.decorators`` will be removed:
 
-  Which means you need to change::
+    Which means you need to change:
 
-    from celery.decorators import task
+    .. code-block:: python
 
-Into::
+        from celery.decorators import task
 
-    from celery import task
+    Into:
+
+    .. code-block:: python
+
+        from celery import task
 
 - Module ``celery.task`` *may* be removed (not decided)
 
-    This means you should change::
+    This means you should change:
+
+    .. code-block:: python
 
         from celery.task import task
 
-    into::
+    into:
+
+    .. code-block:: python
 
         from celery import task
 
-    -- and::
+    -- and:
+
+    .. code-block:: python
 
         from celery.task import Task
 
-    into::
+    into:
+
+    .. code-block:: python
 
         from celery import Task
 
@@ -60,7 +72,9 @@ uses classmethods for these methods:
     - subtask
 
 This also means that you can't call these methods directly
-on the class, but have to instantiate the task first::
+on the class, but have to instantiate the task first:
+
+.. code-block:: pycon
 
     >>> MyTask.delay()          # NO LONGER WORKS
 
@@ -163,7 +177,7 @@ Result
 ------
 
 Apply to: :class:`~celery.result.AsyncResult`,
-:class:`~celery.result.EagerResult`::
+:class:`~celery.result.EagerResult`:
 
 - ``Result.wait()`` -> ``Result.get()``
 

+ 4 - 3
docs/internals/guide.rst

@@ -314,8 +314,9 @@ Worker overview
    This is the command-line interface to the worker.
 
    Responsibilities:
-       * Daemonization when `--detach` set,
-       * dropping privileges when using `--uid`/`--gid` arguments
+       * Daemonization when :option:`--detach <celery worker --detach>` set,
+       * dropping privileges when using :option:`--uid <celery worker --uid>`/
+         :option:`--gid <celery worker --gid>` arguments
        * Installs "concurrency patches" (eventlet/gevent monkey patches).
 
   ``app.worker_main(argv)`` calls
@@ -327,7 +328,7 @@ Worker overview
    * sets up logging and redirects stdouts
    * installs signal handlers (`TERM`/`HUP`/`STOP`/`USR1` (cry)/`USR2` (rdb))
    * prints banner and warnings (e.g. pickle warning)
-   * handles the ``--purge`` argument
+   * handles the :option:`celery worker --purge` argument
 
 * `app.WorkController` -> `celery.worker.WorkController`
 

+ 24 - 8
docs/reference/celery.rst

@@ -146,7 +146,9 @@ See :ref:`guide-canvas` for more about creating task workflows.
 
     Creates a group of tasks to be executed in parallel.
 
-    Example::
+    Example:
+
+    .. code-block:: pycon
 
         >>> res = group([add.s(2, 2), add.s(4, 4)])()
         >>> res.get()
@@ -167,17 +169,23 @@ See :ref:`guide-canvas` for more about creating task workflows.
     If called with only one argument, then that argument must
     be an iterable of tasks to chain.
 
-    Example::
+    Example:
+
+    .. code-block:: pycon
 
         >>> res = chain(add.s(2, 2), add.s(4))()
 
-    is effectively :math:`(2 + 2) + 4)`::
+    is effectively :math:`(2 + 2) + 4)`:
+
+    .. code-block:: pycon
 
         >>> res.get()
         8
 
     Calling a chain will return the result of the last task in the chain.
-    You can get to the other tasks by following the ``result.parent``'s::
+    You can get to the other tasks by following the ``result.parent``'s:
+
+    .. code-block:: pycon
 
         >>> res.parent.get()
         4
@@ -188,11 +196,15 @@ See :ref:`guide-canvas` for more about creating task workflows.
     The header is a group of tasks that must complete before the callback is
     called.  A chord is essentially a callback for a group of tasks.
 
-    Example::
+    Example:
+
+    .. code-block:: pycon
 
         >>> res = chord([add.s(2, 2), add.s(4, 4)])(sum_task.s())
 
-    is effectively :math:`\Sigma ((2 + 2) + (4 + 4))`::
+    is effectively :math:`\Sigma ((2 + 2) + (4 + 4))`:
+
+    .. code-block:: pycon
 
         >>> res.get()
         12
@@ -207,11 +219,15 @@ See :ref:`guide-canvas` for more about creating task workflows.
     Used as the parts in a :class:`group` or to safely pass
     tasks around as callbacks.
 
-    Signatures can also be created from tasks::
+    Signatures can also be created from tasks:
+
+    .. code-block:: pycon
 
         >>> add.signature(args=(), kwargs={}, options={})
 
-    or the ``.s()`` shortcut::
+    or the ``.s()`` shortcut:
+
+    .. code-block:: pycon
 
         >>> add.s(*args, **kwargs)
 

+ 2 - 2
docs/tutorials/daemonizing.rst

@@ -148,7 +148,7 @@ Available options
 ~~~~~~~~~~~~~~~~~~
 
 * CELERY_APP
-    App instance to use (value for ``--app`` argument).
+    App instance to use (value for :option:`--app <celery --app>` argument).
     If you're still using the old API, or django-celery, then you
     can omit this setting.
 
@@ -257,7 +257,7 @@ Available options
 ~~~~~~~~~~~~~~~~~
 
 * CELERY_APP
-    App instance to use (value for ``--app`` argument).
+    App instance to use (value for :option:`--app <celery --app>` argument).
 
 * CELERYBEAT_OPTS
     Additional arguments to celerybeat, see `celerybeat --help` for a

+ 17 - 7
docs/tutorials/debugging.rst

@@ -40,7 +40,9 @@ to enable access from the outside you have to set the environment
 variable :envvar:`CELERY_RDB_HOST`.
 
 When the worker encounters your breakpoint it will log the following
-information::
+information:
+
+.. code-block:: text
 
     [INFO/MainProcess] Received task:
         tasks.add[d7261c71-4962-47e5-b342-2448bedd20e8]
@@ -66,7 +68,9 @@ It may be a good idea to read the `Python Debugger Manual`_ if
 you have never used `pdb` before.
 
 To demonstrate, we will read the value of the ``result`` variable,
-change it and continue execution of the task::
+change it and continue execution of the task:
+
+.. code-block:: text
 
     (Pdb) result
     4
@@ -74,7 +78,9 @@ change it and continue execution of the task::
     (Pdb) continue
     Connection closed by foreign host.
 
-The result of our vandalism can be seen in the worker logs::
+The result of our vandalism can be seen in the worker logs:
+
+.. code-block:: text
 
     [2011-01-18 14:35:36,599: INFO/MainProcess] Task
         tasks.add[d7261c71-4962-47e5-b342-2448bedd20e8] succeeded
@@ -95,10 +101,14 @@ If the environment variable :envvar:`CELERY_RDBSIG` is set, the worker
 will open up an rdb instance whenever the `SIGUSR2` signal is sent.
 This is the case for both main and worker processes.
 
-For example starting the worker with::
+For example starting the worker with:
 
-    CELERY_RDBSIG=1 celery worker -l info
+.. code-block:: console
 
-You can start an rdb session for any of the worker processes by executing::
+    $ CELERY_RDBSIG=1 celery worker -l info
+
+You can start an rdb session for any of the worker processes by executing:
+
+.. code-block:: console
 
-    kill -USR2 <pid>
+    $ kill -USR2 <pid>

+ 1 - 1
docs/userguide/calling.rst

@@ -467,7 +467,7 @@ Simple routing (name <-> name) is accomplished using the ``queue`` option::
     add.apply_async(queue='priority.high')
 
 You can then assign workers to the ``priority.high`` queue by using
-the workers :option:`-Q` argument:
+the workers :option:`-Q <celery worker -Q>` argument:
 
 .. code-block:: console
 

+ 2 - 2
docs/userguide/concurrency/eventlet.rst

@@ -39,8 +39,8 @@ what works best.
 Enabling Eventlet
 =================
 
-You can enable the Eventlet pool by using the ``-P`` option to
-:program:`celery worker`:
+You can enable the Eventlet pool by using the :option:`celery worker -P`
+worker option.
 
 .. code-block:: console
 

+ 7 - 3
docs/userguide/extending.rst

@@ -633,7 +633,9 @@ It can be added both as a worker and consumer bootstep:
         app.steps['consumer'].add(InfoStep)
 
 Starting the worker with this step installed will give us the following
-logs::
+logs:
+
+.. code-block:: text
 
     <Worker: w@example.com (initializing)> is in init
     <Consumer: w@example.com (initializing)> is in init
@@ -655,8 +657,10 @@ which means that you cannot interrupt the function and
 call it again later.  It's important that the ``stop`` and ``shutdown`` methods
 you write is also :term:`reentrant`.
 
-Starting the worker with ``--loglevel=debug`` will show us more
-information about the boot process::
+Starting the worker with :option:`--loglevel=debug <celery worker --loglevel>`
+will show us more information about the boot process:
+
+.. code-block:: text
 
     [2013-05-29 16:18:20,509: DEBUG/MainProcess] | Worker: Preparing bootsteps.
     [2013-05-29 16:18:20,511: DEBUG/MainProcess] | Worker: Building graph...

+ 14 - 9
docs/userguide/monitoring.rst

@@ -48,11 +48,13 @@ Commands
 
   The locals will include the ``celery`` variable, which is the current app.
   Also all known tasks will be automatically added to locals (unless the
-  ``--without-tasks`` flag is set).
+  :option:`--without-tasks <celery shell --without-tasks>` flag is set).
 
   Uses Ipython, bpython, or regular python in that order if installed.
-  You can force an implementation using ``--force-ipython|-I``,
-  ``--force-bpython|-B``, or ``--force-python|-P``.
+  You can force an implementation using
+  :option:`--ipython <celery shell --ipython>`,
+  :option:`--bpython <celery shell --bpython>`, or
+  :option:`--python <celery shell --python>`.
 
 * **status**: List active nodes in this cluster
 
@@ -149,7 +151,8 @@ Commands
 
 .. note::
 
-    All ``inspect`` and ``control`` commands supports a ``--timeout`` argument,
+    All ``inspect`` and ``control`` commands supports a
+    :option:`--timeout <celery inspect --timeout>` argument,
     This is the number of seconds to wait for responses.
     You may have to increase this timeout if you're not getting a response
     due to latency.
@@ -161,7 +164,7 @@ Specifying destination nodes
 
 By default the inspect and control commands operates on all workers.
 You can specify a single, or a list of workers by using the
-`--destination` argument:
+:option:`--destination <celery inspect --destination>` argument:
 
 .. code-block:: console
 
@@ -254,13 +257,15 @@ Running the flower command will start a web-server that you can visit:
 
     $ celery -A proj flower
 
-The default port is http://localhost:5555, but you can change this using the `--port` argument:
+The default port is http://localhost:5555, but you can change this using the
+:option:`--port <flower --port>` argument:
 
 .. code-block:: console
 
     $ celery -A proj flower --port=5555
 
-Broker URL can also be passed through the `--broker` argument :
+Broker URL can also be passed through the
+:option:`--broker <celery --broker>` argument :
 
 .. code-block:: console
 
@@ -318,7 +323,7 @@ and it includes a tool to dump events to :file:`stdout`:
 
     $ celery -A proj events --dump
 
-For a complete list of options use ``--help``:
+For a complete list of options use :option:`--help <celery --help>`:
 
 .. code-block:: console
 
@@ -519,7 +524,7 @@ See the API reference for :mod:`celery.events.state` to read more
 about state objects.
 
 Now you can use this cam with :program:`celery events` by specifying
-it with the :option:`-c` option:
+it with the :option:`-c <celery events -c>` option:
 
 .. code-block:: console
 

+ 9 - 7
docs/userguide/optimizing.rst

@@ -170,7 +170,7 @@ the tasks according to the run-time. (see :ref:`guide-routing`).
        all messages will be delivered to the active node.
 
 .. [*] This is the concurrency setting; :setting:`worker_concurrency` or the
-       :option:`-c` option to the :program:`celery worker` program.
+       :option:`celery worker -c` option.
 
 
 Reserve one task at a time
@@ -182,13 +182,15 @@ it can be redelivered to another worker (or the same after recovery).
 
 When using the default of early acknowledgment, having a prefetch multiplier setting
 of 1, means the worker will reserve at most one extra task for every
-worker process: or in other words, if the worker is started with `-c 10`,
-the worker may reserve at most 20 tasks (10 unacknowledged tasks executing, and 10
-unacknowledged reserved tasks) at any time.
+worker process: or in other words, if the worker is started with
+:option:`-c 10 <celery worker -c>`, the worker may reserve at most 20
+tasks (10 unacknowledged tasks executing, and 10 unacknowledged reserved
+tasks) at any time.
 
 Often users ask if disabling "prefetching of tasks" is possible, but what
 they really mean by that is to have a worker only reserve as many tasks as
-there are worker processes (10 unacknowledged tasks for `-c 10`)
+there are worker processes (10 unacknowledged tasks for
+:option:`-c 10 <celery worker -c>`)
 
 That is possible, but not without also enabling
 :term:`late acknowledgment`.  Using this option over the
@@ -236,8 +238,8 @@ writable.  The pipe buffer size varies based on the operating system: some may
 have a buffer as small as 64kb but on recent Linux versions the buffer
 size is 1MB (can only be changed system wide).
 
-You can disable this prefetching behavior by enabling the :option:`-Ofair`
-worker option:
+You can disable this prefetching behavior by enabling the
+:option:`-Ofair <celery worker -O>` worker option:
 
 .. code-block:: console
 

+ 6 - 4
docs/userguide/periodic-tasks.rst

@@ -383,8 +383,8 @@ To start the :program:`celery beat` service:
     $ celery -A proj beat
 
 You can also start embed `beat` inside the worker by enabling
-workers `-B` option, this is convenient if you will never run
-more than one worker node, but it's not commonly used and for that
+workers :option:`-B <celery worker -B>` option, this is convenient if you'll
+never run more than one worker node, but it's not commonly used and for that
 reason is not recommended for production use:
 
 .. code-block:: console
@@ -410,8 +410,10 @@ location for this file:
 Using custom scheduler classes
 ------------------------------
 
-Custom scheduler classes can be specified on the command-line (the `-S`
-argument).  The default scheduler is :class:`celery.beat.PersistentScheduler`,
+Custom scheduler classes can be specified on the command-line (the
+:option:`-S <celery beat -S>` argument).
+
+The default scheduler is :class:`celery.beat.PersistentScheduler`,
 which is simply keeping track of the last run times in a local database file
 (a :mod:`shelve`).
 

+ 13 - 9
docs/userguide/remote-tasks.rst

@@ -22,13 +22,15 @@ result as a JSON response. The scheme to call a task is:
 
 .. code-block:: http
 
-    GET HTTP/1.1 http://example.com/mytask/?arg1=a&arg2=b&arg3=c
+    GET /mytask/?arg1=a&arg2=b&arg3=c HTTP/1.1
+    Host: example.com
 
 or using POST:
 
 .. code-block:: http
 
-    POST HTTP/1.1 http://example.com/mytask
+    POST /mytask HTTP/1.1
+    Host: example.com
 
 .. note::
 
@@ -39,22 +41,22 @@ Whether to use GET or POST is up to you and your requirements.
 The web page should then return a response in the following format
 if the execution was successful:
 
-.. code-block:: javascript
+.. code-block:: json
 
-    {'status': 'success', 'retval': …}
+    {"status": "success", "retval": "RETVAL"}
 
 or if there was an error:
 
-.. code-block:: javascript
+.. code-block:: json
 
-    {'status': 'failure', 'reason': 'Invalid moon alignment.'}
+    {"status": "failure", "reason": "Invalid moon alignment."}
 
 Enabling the HTTP task
 ----------------------
 
 To enable the HTTP dispatch task you have to add :mod:`celery.task.http`
-to :setting:`imports`, or start the worker with ``-I
-celery.task.http``.
+to :setting:`imports`, or start the worker with
+:option:`-I celery.task.http <celery worker -I>`.
 
 
 .. _webhook-django-example:
@@ -125,7 +127,9 @@ functionality:
     100
 
 The output of :program:`celery worker` (or the log file if enabled) should show the
-task being executed::
+task being executed:
+
+.. code-block:: text
 
     [INFO/MainProcess] Task celery.task.http.HttpDispatchTask
             [f2cc8efc-2a14-40cd-85ad-f1c77c94beeb] processed: 100

+ 2 - 2
docs/userguide/routing.rst

@@ -164,7 +164,7 @@ You can also override this using the `routing_key` argument to
 
 
 To make server `z` consume from the feed queue exclusively you can
-start it with the ``-Q`` option:
+start it with the :option:`celery worker -Q` option:
 
 .. code-block:: console
 
@@ -218,7 +218,7 @@ If you're confused about these terms, you should read up on AMQP.
 Special Routing Options
 =======================
 
-.. _routing-option-rabbitmq-priorities:
+.. _routing-options-rabbitmq-priorities:
 
 RabbitMQ Message Priorities
 ---------------------------

+ 3 - 3
docs/userguide/signals.rst

@@ -369,12 +369,12 @@ Worker Signals
 celeryd_after_setup
 ~~~~~~~~~~~~~~~~~~~
 
-This signal is sent after the worker instance is set up,
-but before it calls run.  This means that any queues from the :option:`-Q`
+This signal is sent after the worker instance is set up, but before it
+calls run.  This means that any queues from the :option:`celery worker -Q`
 option is enabled, logging has been set up and so on.
 
 It can be used to e.g. add custom queues that should always be consumed
-from, disregarding the :option:`-Q` option.  Here's an example
+from, disregarding the :option:`celery worker -Q` option.  Here's an example
 that sets up a direct queue for each worker, these queues can then be
 used to route a task to any specific worker:
 

+ 5 - 5
docs/userguide/tasks.rst

@@ -530,7 +530,7 @@ override this default.
     @app.task(bind=True, default_retry_delay=30 * 60)  # retry in 30 minutes.
     def add(self, x, y):
         try:
-            
+            something_raising()
         except Exception as exc:
             # overrides the default delay to retry after 1 minute
             raise self.retry(exc=exc, countdown=60)
@@ -1218,7 +1218,7 @@ that can be added to tasks like this:
     @app.task(base=DatabaseTask)
     def process_rows():
         for row in process_rows.db.table.all():
-            
+            process_row(row)
 
 The ``db`` attribute of the ``process_rows`` task will then
 always stay the same in each process.
@@ -1372,7 +1372,7 @@ wastes time and resources.
 .. code-block:: python
 
     @app.task(ignore_result=True)
-    def mytask():
+    def mytask():
         something()
 
 Results can even be disabled globally using the :setting:`task_ignore_result`
@@ -1594,7 +1594,7 @@ Let's have a look at another example:
 
     @transaction.commit_on_success
     def create_article(request):
-        article = Article.objects.create()
+        article = Article.objects.create()
         expand_abbreviations.delay(article.pk)
 
 This is a Django view creating an article object in the database,
@@ -1614,7 +1614,7 @@ depending on state from the current transaction*:
     @transaction.commit_manually
     def create_article(request):
         try:
-            article = Article.objects.create()
+            article = Article.objects.create()
         except:
             transaction.rollback()
             raise

+ 27 - 19
docs/userguide/workers.rst

@@ -34,7 +34,7 @@ For a full list of available command-line options see
 
 You can also start multiple workers on the same machine. If you do so
 be sure to give a unique name to each individual worker by specifying a
-node name with the :option:`--hostname|-n` argument:
+node name with the :option:`--hostname <celery worker --hostname>` argument:
 
 .. code-block:: console
 
@@ -143,8 +143,10 @@ The worker's main process overrides the following signals:
 Variables in file paths
 =======================
 
-The file path arguments for :option:`--logfile`, :option:`--pidfile` and :option:`--statedb`
-can contain variables that the worker will expand:
+The file path arguments for :option:`--logfile <celery worker --logfile>`,
+:option:`--pidfile <celery worker --pidfile>` and
+:option:`--statedb <celery worker --statedb>` can contain variables that the
+worker will expand:
 
 Node name replacements
 ----------------------
@@ -203,8 +205,9 @@ Concurrency
 
 By default multiprocessing is used to perform concurrent execution of tasks,
 but you can also use :ref:`Eventlet <concurrency-eventlet>`.  The number
-of worker processes/threads can be changed using the :option:`--concurrency`
-argument and defaults to the number of CPUs available on the machine.
+of worker processes/threads can be changed using the
+:option:`--concurrency <celery worker --concurrency>` argument and defaults
+to the number of CPUs available on the machine.
 
 .. admonition:: Number of processes (multiprocessing/prefork pool)
 
@@ -527,7 +530,8 @@ a worker can execute before it's replaced by a new process.
 This is useful if you have memory leaks you have no control over
 for example from closed source C extensions.
 
-The option can be set using the workers :option:`--maxtasksperchild` argument
+The option can be set using the workers
+:option:`--maxtasksperchild <celery worker --maxtasksperchild>` argument
 or using the :setting:`worker_max_tasks_per_child` setting.
 
 .. _worker-maxmemperchild:
@@ -545,7 +549,8 @@ memory a worker can execute before it's replaced by a new process.
 This is useful if you have memory leaks you have no control over
 for example from closed source C extensions.
 
-The option can be set using the workers :option:`--maxmemperchild` argument
+The option can be set using the workers
+:option:`--maxmemperchild <celery worker --maxmemperchild>` argument
 or using the :setting:`worker_max_memory_per_child` setting.
 
 .. _worker-autoscaling:
@@ -563,8 +568,10 @@ based on load:
 - The autoscaler adds more pool processes when there is work to do,
     - and starts removing processes when the workload is low.
 
-It's enabled by the :option:`--autoscale` option, which needs two
-numbers: the maximum and minimum number of pool processes::
+It's enabled by the :option:`--autoscale <celery worker --autoscale>` option,
+which needs two numbers: the maximum and minimum number of pool processes:
+
+.. code-block:: text
 
         --autoscale=AUTOSCALE
              Enable autoscaling by providing
@@ -587,8 +594,8 @@ By default it will consume from all queues defined in the
 :setting:`task_queues` setting (which if not specified defaults to the
 queue named ``celery``).
 
-You can specify what queues to consume from at startup,
-by giving a comma separated list of queues to the :option:`-Q` option:
+You can specify what queues to consume from at startup, by giving a comma
+separated list of queues to the :option:`-Q <celery worker -Q>` option:
 
 .. code-block:: console
 
@@ -621,7 +628,7 @@ named "``foo``" you can use the :program:`celery control` program:
         started consuming from u'foo'
 
 If you want to specify a specific worker you can use the
-:option:`--destination`` argument:
+:option:`--destination <celery control --destination>` argument:
 
 .. code-block:: console
 
@@ -673,8 +680,8 @@ you can use the :program:`celery control` program:
 
     $ celery -A proj control cancel_consumer foo
 
-The :option:`--destination` argument can be used to specify a worker, or a
-list of workers, to act on the command:
+The :option:`--destination <celery control --destination>` argument can be
+used to specify a worker, or a list of workers, to act on the command:
 
 .. code-block:: console
 
@@ -703,8 +710,8 @@ the :control:`active_queues` control command:
     [...]
 
 Like all other remote control commands this also supports the
-:option:`--destination` argument used to specify which workers should
-reply to the request:
+:option:`--destination <celery inspect --destination>` argument used
+to specify which workers should reply to the request:
 
 .. code-block:: console
 
@@ -732,10 +739,11 @@ Autoreloading
 
 :pool support: *prefork, eventlet, gevent, threads, solo*
 
-Starting :program:`celery worker` with the :option:`--autoreload` option will
+Starting :program:`celery worker` with the
+:option:`--autoreload <celery worker --autoreload>` option will
 enable the worker to watch for file system changes to all imported task
-modules (and also any non-task modules added to the
-:setting:`imports` setting or the :option:`-I|--include` option).
+modules (and also any non-task modules added to the :setting:`imports`
+setting or the :option:`--include <celery worker --include>` option).
 
 This is an experimental feature intended for use in development only,
 using auto-reload in production is discouraged as the behavior of reloading

+ 7 - 4
docs/whatsnew-2.5.rst

@@ -212,10 +212,12 @@ Contributed by Mher Movsisyan.
 Experimental support for automatic module reloading
 ---------------------------------------------------
 
-Starting :program:`celeryd` with the :option:`--autoreload` option will
+Starting :program:`celeryd` with the
+:option:`--autoreload <celery worker --autoreload>` option will
 enable the worker to watch for file system changes to all imported task
 modules imported (and also any non-task modules added to the
-:setting:`CELERY_IMPORTS` setting or the :option:`-I|--include` option).
+:setting:`CELERY_IMPORTS` setting or the
+:option:`celery worker --include` option).
 
 This is an experimental feature intended for use in development only,
 using auto-reload in production is discouraged as the behavior of reloading
@@ -303,7 +305,7 @@ that filter for tasks to annotate:
             if task.name.startswith('tasks.'):
                 return {'rate_limit': '10/s'}
 
-    CELERY_ANNOTATIONS = (MyAnnotate(), {})
+    CELERY_ANNOTATIONS = (MyAnnotate(), {other_annotations,})
 
 ``current`` provides the currently executing task
 -------------------------------------------------
@@ -562,7 +564,8 @@ Fixes
 - Now shows helpful error message when given a config module ending in
   ``.py`` that can't be imported.
 
-- celeryctl: The ``--expires`` and ``-eta`` arguments to the apply command
+- celeryctl: The :option:`--expires <celery call --expires>` and
+  :option:`--eta <celery call --eta>` arguments to the apply command
   can now be an ISO-8601 formatted string.
 
 - celeryctl now exits with exit status ``EX_UNAVAILABLE`` (69) if no replies

+ 3 - 4
docs/whatsnew-3.0.rst

@@ -170,8 +170,7 @@ it manually.  This command helps:
 
 .. code-block:: console
 
-    $ rm -r $(dirname $(python -c '
-        import celery;print(celery.__file__)'))/app/task/
+    $ rm -r $(dirname $(python -c 'import celery;print(celery.__file__)'))/app/task/
 
 If you experience an error like ``ImportError: cannot import name _unpickle_task``,
 you just have to remove the old package and everything is fine.
@@ -685,7 +684,7 @@ when the task registry is first used.
 Smart `--app` option
 --------------------
 
-The :option:`--app` option now 'auto-detects'
+The :option:`--app <celery --app>` option now 'auto-detects'
 
     - If the provided path is a module it tries to get an
       attribute named 'celery'.
@@ -865,7 +864,7 @@ In Other News
 - :setting:`CELERY_FORCE_EXECV` is now enabled by default.
 
     If the old behavior is wanted the setting can be set to False,
-    or the new :option:`--no-execv` to :program:`celery worker`.
+    or the new :option:`celery worker --no-execv` option.
 
 - Deprecated module ``celery.conf`` has been removed.
 

+ 27 - 21
docs/whatsnew-3.1.rst

@@ -249,8 +249,8 @@ Caveats
     have a buffer as small as 64kb but on recent Linux versions the buffer
     size is 1MB (can only be changed system wide).
 
-    You can disable this prefetching behavior by enabling the :option:`-Ofair`
-    worker option:
+    You can disable this prefetching behavior by enabling the
+    :option:`-Ofair <celery worker -O>` worker option:
 
     .. code-block:: console
 
@@ -265,9 +265,11 @@ Caveats
     already written many tasks to the process inqueue, and these tasks
     must then be moved back and rewritten to a new process.
 
-    This is very expensive if you have ``--maxtasksperchild`` set to a low
-    value (e.g. less than 10), so if you need to enable this option
-    you should also enable ``-Ofair`` to turn off the prefetching behavior.
+    This is very expensive if you have the
+    :option:`--maxtasksperchild <celery worker --maxtasksperchild>` option
+    set to a low value (e.g. less than 10), so if you need to enable this option
+    you should also enable :option:`-Ofair <celery worker -O>` to turn off the
+    prefetching behavior.
 
 Django supported out of the box
 -------------------------------
@@ -391,9 +393,9 @@ to the local timezone.
     starts.
 
     If all of the workers are shutdown the clock value will be lost
-    and reset to 0. To protect against this, you should specify
-    :option:`--statedb` so that the worker can persist the clock
-    value at shutdown.
+    and reset to 0. To protect against this, you should specify the
+    :option:`celery worker --statedb` option such that the worker can
+    persist the clock value at shutdown.
 
     You may notice that the logical clock is an integer value and
     increases very rapidly.  Do not worry about the value overflowing
@@ -429,9 +431,9 @@ node name in events and broadcast messages, so where before
 a worker would identify itself as 'worker1.example.com', it will now
 use 'celery@worker1.example.com'.
 
-Remember that the ``-n`` argument also supports simple variable
-substitutions, so if the current hostname is *george.example.com*
-then the ``%h`` macro will expand into that:
+Remember that the :option:`-n <celery worker -n>` argument also supports
+simple variable substitutions, so if the current hostname
+is *george.example.com* then the ``%h`` macro will expand into that:
 
 .. code-block:: console
 
@@ -485,7 +487,8 @@ Synchronized data currently includes revoked tasks and logical clock.
 This only happens at startup and causes a one second startup delay
 to collect broadcast responses from other workers.
 
-You can disable this bootstep using the ``--without-mingle`` argument.
+You can disable this bootstep using the
+:option:`celery worker --without-mingle` option.
 
 Gossip: Worker <-> Worker communication
 ---------------------------------------
@@ -504,7 +507,8 @@ resource usage or data locality) or restarting workers when they crash.
 We believe that although this is a small addition, it opens
 amazing possibilities.
 
-You can disable this bootstep using the ``--without-gossip`` argument.
+You can disable this bootstep using the
+:option:`celery worker --without-gossip` option.
 
 Bootsteps: Extending the worker
 -------------------------------
@@ -754,7 +758,8 @@ In Other News
     The monotonic clock function is built-in starting from Python 3.4,
     but we also have fallback implementations for Linux and OS X.
 
-- :program:`celery worker` now supports a ``--detach`` argument to start
+- :program:`celery worker` now supports a new
+  :option:`--detach <celery worker --detach>` argument to start
   the worker as a daemon in the background.
 
 - :class:`@events.Receiver` now sets a ``local_received`` field for incoming
@@ -909,7 +914,7 @@ In Other News
 
         from multiprocessing.util import register_after_fork
 
-        engine = create_engine()
+        engine = create_engine(*engine_args)
         register_after_fork(engine, engine.dispose)
 
 - A stress test suite for the Celery worker has been written.
@@ -1085,11 +1090,12 @@ In Other News
     :class:`~celery.worker.request.Request` object to get information
     about the task.
 
-- Worker: New :option:`-X` command line argument to exclude queues
-  (Issue #1399).
+- Worker: New :option:`-X <celery worker -X>` command line argument to
+  exclude queues (Issue #1399).
 
-    The :option:`-X` argument is the inverse of the :option:`-Q` argument
-    and accepts a list of queues to exclude (not consume from):
+    The :option:`-X <celery worker -X>` argument is the inverse of the
+    :option:`-Q <celery worker -Q>` argument and accepts a list of queues
+    to exclude (not consume from):
 
     .. code-block:: console
 
@@ -1228,8 +1234,8 @@ Fixes
 - Worker: Now makes sure that the shutdown process is not initiated multiple
   times.
 
-- Multi: Now properly handles both ``-f`` and ``--logfile`` options
-  (Issue #1541).
+- Multi: Now properly handles both ``-f`` and
+  :option:`--logfile <celery worker --logfile>` options (Issue #1541).
 
 .. _v310-internal:
 

+ 15 - 12
docs/whatsnew-4.0.rst

@@ -620,7 +620,8 @@ Prefork: Limit child process resident memory size.
 # 5cae0e754128750a893524dcba4ae030c414de33
 
 You can now limit the maximum amount of memory allocated per prefork
-pool child process by setting the worker :option:`--maxmemperchild` option,
+pool child process by setting the worker
+:option:`--maxmemperchild <celery worker --maxmemperchild>` option,
 or the :setting:`worker_max_memory_per_child` setting.
 
 The limit is for RSS/resident memory size and is specified in kilobytes.
@@ -808,8 +809,8 @@ In Other News
 - **Programs**: ``%n`` format for :program:`celery multi` is now synonym with
   ``%N`` to be consistent with :program:`celery worker`.
 
-- **Programs**: celery inspect/control now supports ``--json`` argument to
-  give output in json format.
+- **Programs**: celery inspect/control now supports a new
+  :option:`--json <celery inspect --json>` option to give output in json format.
 
 - **Programs**: :program:`celery inspect registered` now ignores built-in
   tasks.
@@ -951,13 +952,15 @@ In Other News
 - **Programs**: ``%p`` can now be used to expand to the full worker nodename
   in logfile/pidfile arguments.
 
-- **Programs**: A new command line option :option:``--executable`` is now
-  available for daemonizing programs.
+- **Programs**: A new command line option
+   :option:`--executable <celery worker --executable>` is now
+   available for daemonizing programs (:program:`celery worker` and
+   :program:`celery beat`).
 
     Contributed by Bert Vanderbauwhede.
 
 - **Programs**: :program:`celery worker` supports new
-  :option:`--prefetch-multiplier` option.
+  :option:`--prefetch-multiplier <celery worker --prefetch-multiplier>` option.
 
     Contributed by Mickaël Penhard.
 
@@ -1094,11 +1097,11 @@ Modules
   as the ``celery.task`` package is being phased out.  The compat module
   will be removed in version 4.0 so please change any import from::
 
-    from celery.task.trace import 
+    from celery.task.trace import X
 
   to::
 
-    from celery.app.trace import 
+    from celery.app.trace import X
 
 - Old compatibility aliases in the :mod:`celery.loaders` module
   has been removed.
@@ -1258,10 +1261,10 @@ Logging Settings
 =====================================  =====================================
 **Setting name**                       **Replace with**
 =====================================  =====================================
-``CELERYD_LOG_LEVEL``                  :option:`--loglevel`
-``CELERYD_LOG_FILE``                   :option:`--logfile``
-``CELERYBEAT_LOG_LEVEL``               :option:`--loglevel`
-``CELERYBEAT_LOG_FILE``                :option:`--loglevel``
+``CELERYD_LOG_LEVEL``                  :option:`celery worker --loglevel`
+``CELERYD_LOG_FILE``                   :option:`celery worker --logfile`
+``CELERYBEAT_LOG_LEVEL``               :option:`celery beat --loglevel`
+``CELERYBEAT_LOG_FILE``                :option:`celery beat --loglevel`
 ``CELERYMON_LOG_LEVEL``                celerymon is deprecated, use flower.
 ``CELERYMON_LOG_FILE``                 celerymon is deprecated, use flower.
 ``CELERYMON_LOG_FORMAT``               celerymon is deprecated, use flower.