Browse Source

Docs: I.e., E.g.

Ask Solem 8 years ago
parent
commit
68d003ae2b
68 changed files with 227 additions and 216 deletions
  1. 1 1
      celery/__init__.py
  2. 1 1
      celery/app/amqp.py
  3. 6 5
      celery/app/base.py
  4. 1 1
      celery/app/task.py
  5. 1 1
      celery/app/utils.py
  6. 1 1
      celery/apps/worker.py
  7. 1 2
      celery/bin/amqp.py
  8. 2 2
      celery/bin/base.py
  9. 2 2
      celery/bin/celery.py
  10. 1 1
      celery/bin/events.py
  11. 1 1
      celery/bin/worker.py
  12. 1 1
      celery/canvas.py
  13. 1 1
      celery/concurrency/__init__.py
  14. 1 1
      celery/concurrency/asynpool.py
  15. 1 2
      celery/events/__init__.py
  16. 3 3
      celery/events/state.py
  17. 1 1
      celery/five.py
  18. 3 3
      celery/loaders/base.py
  19. 1 1
      celery/platforms.py
  20. 1 1
      celery/result.py
  21. 2 2
      celery/schedules.py
  22. 1 1
      celery/task/base.py
  23. 2 4
      celery/utils/collections.py
  24. 2 2
      celery/utils/debug.py
  25. 4 4
      celery/utils/dispatch/saferef.py
  26. 1 1
      celery/utils/nodenames.py
  27. 1 1
      celery/utils/serialization.py
  28. 6 6
      celery/utils/timeutils.py
  29. 1 1
      celery/worker/__init__.py
  30. 2 2
      celery/worker/components.py
  31. 2 2
      celery/worker/control.py
  32. 14 10
      docs/contributing.rst
  33. 5 5
      docs/django/first-steps-with-django.rst
  34. 1 1
      docs/faq.rst
  35. 2 2
      docs/getting-started/brokers/rabbitmq.rst
  36. 2 2
      docs/getting-started/first-steps-with-celery.rst
  37. 10 6
      docs/getting-started/next-steps.rst
  38. 1 1
      docs/glossary.rst
  39. 22 18
      docs/history/changelog-1.0.rst
  40. 3 3
      docs/history/changelog-2.0.rst
  41. 3 1
      docs/history/changelog-2.1.rst
  42. 5 5
      docs/history/changelog-2.2.rst
  43. 3 3
      docs/history/changelog-3.0.rst
  44. 4 4
      docs/history/changelog-3.1.rst
  45. 3 3
      docs/history/whatsnew-2.5.rst
  46. 6 6
      docs/history/whatsnew-3.0.rst
  47. 1 1
      docs/internals/deprecation.rst
  48. 2 2
      docs/internals/guide.rst
  49. 6 5
      docs/internals/protocol.rst
  50. 1 1
      docs/reference/celery.utils.debug.rst
  51. 3 3
      docs/userguide/calling.rst
  52. 4 3
      docs/userguide/canvas.rst
  53. 10 12
      docs/userguide/configuration.rst
  54. 2 2
      docs/userguide/daemonizing.rst
  55. 8 7
      docs/userguide/extending.rst
  56. 9 9
      docs/userguide/monitoring.rst
  57. 5 5
      docs/userguide/periodic-tasks.rst
  58. 2 2
      docs/userguide/routing.rst
  59. 1 1
      docs/userguide/security.rst
  60. 4 4
      docs/userguide/signals.rst
  61. 10 10
      docs/userguide/tasks.rst
  62. 4 4
      docs/userguide/workers.rst
  63. 4 4
      docs/whatsnew-3.1.rst
  64. 2 2
      docs/whatsnew-4.0.rst
  65. 1 1
      extra/generic-init.d/celerybeat
  66. 3 4
      extra/generic-init.d/celeryd
  67. 4 4
      extra/zsh-completion/celery.zsh
  68. 2 2
      funtests/suite/test_leak.py

+ 1 - 1
celery/__init__.py

@@ -125,7 +125,7 @@ def maybe_patch_concurrency(argv=sys.argv,
     """With short and long opt alternatives that specify the command line
     """With short and long opt alternatives that specify the command line
     option to set the pool, this makes sure that anything that needs
     option to set the pool, this makes sure that anything that needs
     to be patched is completed as early as possible.
     to be patched is completed as early as possible.
-    (e.g. eventlet/gevent monkey patches)."""
+    (e.g., eventlet/gevent monkey patches)."""
     try:
     try:
         pool = _find_option_with_arg(argv, short_opts, long_opts)
         pool = _find_option_with_arg(argv, short_opts, long_opts)
     except KeyError:
     except KeyError:

+ 1 - 1
celery/app/amqp.py

@@ -227,7 +227,7 @@ class AMQP(object):
     _producer_pool = None
     _producer_pool = None
 
 
     # Exchange class/function used when defining automatic queues.
     # Exchange class/function used when defining automatic queues.
-    # E.g. you can use ``autoexchange = lambda n: None`` to use the
+    # For example, you can use ``autoexchange = lambda n: None`` to use the
     # AMQP default exchange: a shortcut to bypass routing
     # AMQP default exchange: a shortcut to bypass routing
     # and instead send directly to the queue named in the routing key.
     # and instead send directly to the queue named in the routing key.
     autoexchange = None
     autoexchange = None

+ 6 - 5
celery/app/base.py

@@ -126,7 +126,7 @@ class Celery(object):
         set_as_current (bool):  Make this the global current app.
         set_as_current (bool):  Make this the global current app.
         tasks (str, type): A task registry or the name of a registry class.
         tasks (str, type): A task registry or the name of a registry class.
         include (List[str]): List of modules every worker should import.
         include (List[str]): List of modules every worker should import.
-        fixups (List[str]): List of fix-up plug-ins (see e.g.
+        fixups (List[str]): List of fix-up plug-ins (e.g., see
             :mod:`celery.fixups.django`).
             :mod:`celery.fixups.django`).
         autofinalize (bool): If set to False a :exc:`RuntimeError`
         autofinalize (bool): If set to False a :exc:`RuntimeError`
             will be raised if the task registry or tasks are used before
             will be raised if the task registry or tasks are used before
@@ -550,7 +550,7 @@ class Celery(object):
         """Try to auto-discover and import modules with a specific name (by
         """Try to auto-discover and import modules with a specific name (by
         default 'tasks').
         default 'tasks').
 
 
-        If the name is empty, this will be delegated to fix-ups (e.g. Django).
+        If the name is empty, this will be delegated to fix-ups (e.g., Django).
 
 
         For example if you have an directory layout like this:
         For example if you have an directory layout like this:
 
 
@@ -619,7 +619,7 @@ class Celery(object):
         Supports the same arguments as :meth:`@-Task.apply_async`.
         Supports the same arguments as :meth:`@-Task.apply_async`.
 
 
         Arguments:
         Arguments:
-            name (str): Name of task to call (e.g. `"tasks.add"`).
+            name (str): Name of task to call (e.g., `"tasks.add"`).
             result_cls (~@AsyncResult): Specify custom result class.
             result_cls (~@AsyncResult): Specify custom result class.
         """
         """
         parent = have_parent = None
         parent = have_parent = None
@@ -905,7 +905,7 @@ class Celery(object):
         to be this app instance.
         to be this app instance.
 
 
         App-compatible means that the class has a class attribute that
         App-compatible means that the class has a class attribute that
-        provides the default app it should use, e.g.
+        provides the default app it should use, for example:
         ``class Foo: app = None``.
         ``class Foo: app = None``.
 
 
         Arguments:
         Arguments:
@@ -914,7 +914,8 @@ class Celery(object):
             attribute (str): Name of the attribute holding the app,
             attribute (str): Name of the attribute holding the app,
                 Default is 'app'.
                 Default is 'app'.
             reverse (str): Reverse path to this object used for pickling
             reverse (str): Reverse path to this object used for pickling
-                purposes.  E.g. for ``app.AsyncResult`` use ``"AsyncResult"``.
+                purposes. For example, to get ``app.AsyncResult``,
+                use ``"AsyncResult"``.
             keep_reduce (bool): If enabled a custom ``__reduce__``
             keep_reduce (bool): If enabled a custom ``__reduce__``
                 implementation won't be provided.
                 implementation won't be provided.
         """
         """

+ 1 - 1
celery/app/task.py

@@ -233,7 +233,7 @@ class Task(object):
 
 
     #: Even if :attr:`acks_late` is enabled, the worker will
     #: Even if :attr:`acks_late` is enabled, the worker will
     #: acknowledge tasks when the worker process executing them abruptly
     #: acknowledge tasks when the worker process executing them abruptly
-    #: exits or is signaled (e.g. :sig:`KILL`/:sig:`INT`, etc).
+    #: exits or is signaled (e.g., :sig:`KILL`/:sig:`INT`, etc).
     #:
     #:
     #: Setting this to true allows the message to be re-queued instead,
     #: Setting this to true allows the message to be re-queued instead,
     #: so that the task will execute again by the same worker, or another
     #: so that the task will execute again by the same worker, or another

+ 1 - 1
celery/app/utils.py

@@ -217,7 +217,7 @@ def detect_settings(conf, preconf={}, ignore_keys=set(), prefix=None,
         info, left = _settings_info, set()
         info, left = _settings_info, set()
 
 
     # only raise error for keys that the user didn't provide two keys
     # only raise error for keys that the user didn't provide two keys
-    # for (e.g. both ``result_expires`` and ``CELERY_TASK_RESULT_EXPIRES``).
+    # for (e.g., both ``result_expires`` and ``CELERY_TASK_RESULT_EXPIRES``).
     really_left = {key for key in left if info.convert[key] not in have}
     really_left = {key for key in left if info.convert[key] not in have}
     if really_left:
     if really_left:
         # user is mixing old/new, or new/old settings, give renaming
         # user is mixing old/new, or new/old settings, give renaming

+ 1 - 1
celery/apps/worker.py

@@ -125,7 +125,7 @@ class Worker(WorkController):
         app = self.app
         app = self.app
         WorkController.on_start(self)
         WorkController.on_start(self)
 
 
-        # this signal can be used to e.g. change queues after
+        # this signal can be used to, for example, change queues after
         # the -Q option has been applied.
         # the -Q option has been applied.
         signals.celeryd_after_setup.send(
         signals.celeryd_after_setup.send(
             sender=self.hostname, instance=self, conf=app.conf,
             sender=self.hostname, instance=self, conf=app.conf,

+ 1 - 2
celery/bin/amqp.py

@@ -73,8 +73,7 @@ class Spec(object):
     def str_args_to_python(self, arglist):
     def str_args_to_python(self, arglist):
         """Process list of string arguments to values according to spec.
         """Process list of string arguments to values according to spec.
 
 
-        e.g::
-
+        Example:
             >>> spec = Spec([('queue', str), ('if_unused', bool)])
             >>> spec = Spec([('queue', str), ('if_unused', bool)])
             >>> spec.str_args_to_python('pobox', 'true')
             >>> spec.str_args_to_python('pobox', 'true')
             ('pobox', True)
             ('pobox', True)

+ 2 - 2
celery/bin/base.py

@@ -535,8 +535,8 @@ class Command(object):
         supports a pool argument, and used to monkey patch eventlet/gevent
         supports a pool argument, and used to monkey patch eventlet/gevent
         environments as early as possible.
         environments as early as possible.
 
 
-        E.g::
-              has_pool_option = (['-P'], ['--pool'])
+        Example:
+              >>> has_pool_option = (['-P'], ['--pool'])
         """
         """
         pass
         pass
 
 

+ 2 - 2
celery/bin/celery.py

@@ -13,7 +13,7 @@ and usually parsed before command-specific arguments.
 
 
 .. cmdoption:: -A, --app
 .. cmdoption:: -A, --app
 
 
-    app instance to use (e.g. ``module.attr_name``)
+    app instance to use (e.g., ``module.attr_name``)
 
 
 .. cmdoption:: -b, --broker
 .. cmdoption:: -b, --broker
 
 
@@ -994,7 +994,7 @@ class upgrade(Command):
         return lines
         return lines
 
 
     def _to_new_key(self, line, keyfilter=pass1, source=defaults._TO_NEW_KEY):
     def _to_new_key(self, line, keyfilter=pass1, source=defaults._TO_NEW_KEY):
-        # sort by length to avoid e.g. broker_transport overriding
+        # sort by length to avoid, for example, broker_transport overriding
         # broker_transport_options.
         # broker_transport_options.
         for old_key in reversed(sorted(source, key=lambda x: len(x))):
         for old_key in reversed(sorted(source, key=lambda x: len(x))):
             new_line = line.replace(old_key, keyfilter(source[old_key]))
             new_line = line.replace(old_key, keyfilter(source[old_key]))

+ 1 - 1
celery/bin/events.py

@@ -25,7 +25,7 @@
 
 
 .. cmdoption:: -r, --maxrate
 .. cmdoption:: -r, --maxrate
 
 
-    Camera: Optional shutter rate limit (e.g. 10/m).
+    Camera: Optional shutter rate limit (e.g., 10/m).
 
 
 .. cmdoption:: -l, --loglevel
 .. cmdoption:: -l, --loglevel
 
 

+ 1 - 1
celery/bin/worker.py

@@ -22,7 +22,7 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 
 
 .. cmdoption:: -n, --hostname
 .. cmdoption:: -n, --hostname
 
 
-    Set custom hostname, e.g. 'w1.%h'.  Expands: %h (hostname),
+    Set custom hostname (e.g., 'w1@%h').  Expands: %h (hostname),
     %n (name) and %d, (domain).
     %n (name) and %d, (domain).
 
 
 .. cmdoption:: -B, --beat
 .. cmdoption:: -B, --beat

+ 1 - 1
celery/canvas.py

@@ -337,7 +337,7 @@ class Signature(dict):
 
 
         Returns:
         Returns:
             Signature: This is a chaining method call
             Signature: This is a chaining method call
-                (i.e. it will return ``self``).
+                (i.e., it will return ``self``).
         """
         """
         if immutable is not None:
         if immutable is not None:
             self.set_immutable(immutable)
             self.set_immutable(immutable)

+ 1 - 1
celery/concurrency/__init__.py

@@ -4,7 +4,7 @@ from __future__ import absolute_import, unicode_literals
 
 
 # Import from kombu directly as it's used
 # Import from kombu directly as it's used
 # early in the import stage, where celery.utils loads
 # early in the import stage, where celery.utils loads
-# too much (e.g. for eventlet patching)
+# too much (e.g., for eventlet patching)
 from kombu.utils.imports import symbol_by_name
 from kombu.utils.imports import symbol_by_name
 
 
 __all__ = ['get_implementation']
 __all__ = ['get_implementation']

+ 1 - 1
celery/concurrency/asynpool.py

@@ -744,7 +744,7 @@ class AsynPool(_pool.Pool):
             # with many processes, and also leans more towards fairness
             # with many processes, and also leans more towards fairness
             # in write stats when used with many processes
             # in write stats when used with many processes
             # [XXX On macOS, this may vary depending
             # [XXX On macOS, this may vary depending
-            # on event loop implementation (i.e select vs epoll), so
+            # on event loop implementation (i.e, select/poll vs epoll), so
             # have to test further]
             # have to test further]
             num_ready = len(ready_fds)
             num_ready = len(ready_fds)
 
 

+ 1 - 2
celery/events/__init__.py

@@ -62,8 +62,7 @@ def Event(type, _fields=None, __dict__=dict, __now__=time.time, **fields):
 def group_from(type):
 def group_from(type):
     """Get the group part of an event type name.
     """Get the group part of an event type name.
 
 
-    E.g.::
-
+    Example:
         >>> group_from('task-sent')
         >>> group_from('task-sent')
         'task'
         'task'
 
 

+ 3 - 3
celery/events/state.py

@@ -11,7 +11,7 @@ at the time of the last event.
 
 
 Snapshots (:mod:`celery.events.snapshot`) can be used to
 Snapshots (:mod:`celery.events.snapshot`) can be used to
 take "pictures" of this state at regular intervals
 take "pictures" of this state at regular intervals
-to e.g. store that in a database.
+to for example, store that in a database.
 """
 """
 from __future__ import absolute_import, unicode_literals
 from __future__ import absolute_import, unicode_literals
 
 
@@ -260,7 +260,7 @@ class Task(object):
         __slots__ = ('__dict__', '__weakref__')
         __slots__ = ('__dict__', '__weakref__')
 
 
     #: How to merge out of order events.
     #: How to merge out of order events.
-    #: Disorder is detected by logical ordering (e.g. :event:`task-received`
+    #: Disorder is detected by logical ordering (e.g., :event:`task-received`
     #: must've happened before a :event:`task-failed` event).
     #: must've happened before a :event:`task-failed` event).
     #:
     #:
     #: A merge rule consists of a state and a list of fields to keep from
     #: A merge rule consists of a state and a list of fields to keep from
@@ -304,7 +304,7 @@ class Task(object):
         # using .get is faster than catching KeyError in this case.
         # using .get is faster than catching KeyError in this case.
         state = task_event_to_state(type_)
         state = task_event_to_state(type_)
         if state is not None:
         if state is not None:
-            # sets e.g. self.succeeded to the timestamp.
+            # sets, for example, self.succeeded to the timestamp.
             setattr(self, type_, timestamp)
             setattr(self, type_, timestamp)
         else:
         else:
             state = type_.upper()  # custom state
             state = type_.upper()  # custom state

+ 1 - 1
celery/five.py

@@ -55,7 +55,7 @@ else:
 
 
 def getappattr(path):
 def getappattr(path):
     """Gets attribute from the current_app recursively,
     """Gets attribute from the current_app recursively,
-    e.g. getappattr('amqp.get_task_consumer')``."""
+    (e.g., ``getappattr('amqp.get_task_consumer')``."""
     from celery import current_app
     from celery import current_app
     return current_app._rgetattr(path)
     return current_app._rgetattr(path)
 
 

+ 3 - 3
celery/loaders/base.py

@@ -142,12 +142,12 @@ class BaseLoader(object):
     def _smart_import(self, path, imp=None):
     def _smart_import(self, path, imp=None):
         imp = self.import_module if imp is None else imp
         imp = self.import_module if imp is None else imp
         if ':' in path:
         if ':' in path:
-            # Path includes attribute so can just jump here.
-            # e.g. ``os.path:abspath``.
+            # Path includes attribute so can just jump
+            # here (e.g., ``os.path:abspath``).
             return symbol_by_name(path, imp=imp)
             return symbol_by_name(path, imp=imp)
 
 
         # Not sure if path is just a module name or if it includes an
         # Not sure if path is just a module name or if it includes an
-        # attribute name (e.g. ``os.path``, vs, ``os.path.abspath``).
+        # attribute name (e.g., ``os.path``, vs, ``os.path.abspath``).
         try:
         try:
             return imp(path)
             return imp(path)
         except ImportError:
         except ImportError:

+ 1 - 1
celery/platforms.py

@@ -715,7 +715,7 @@ else:
 
 
 
 
 def get_errno_name(n):
 def get_errno_name(n):
-    """Get errno for string, e.g. ``ENOENT``."""
+    """Get errno for string (e.g., ``ENOENT``)."""
     if isinstance(n, string_t):
     if isinstance(n, string_t):
         return getattr(errno, n)
         return getattr(errno, n)
     return n
     return n

+ 1 - 1
celery/result.py

@@ -624,7 +624,7 @@ class ResultSet(ResultBase):
 
 
         Note:
         Note:
             This can be an expensive operation for result store
             This can be an expensive operation for result store
-            backends that must resort to polling (e.g. database).
+            backends that must resort to polling (e.g., database).
 
 
             You should consider using :meth:`join_native` if your backend
             You should consider using :meth:`join_native` if your backend
             supports it.
             supports it.

+ 2 - 2
celery/schedules.py

@@ -115,7 +115,7 @@ class schedule(object):
         .. admonition:: Scheduler max interval variance
         .. admonition:: Scheduler max interval variance
 
 
             The default max loop interval may vary for different schedulers.
             The default max loop interval may vary for different schedulers.
-            For the default scheduler the value is 5 minutes, but for e.g.
+            For the default scheduler the value is 5 minutes, but for example
             the :pypi:`django-celery` database scheduler the value
             the :pypi:`django-celery` database scheduler the value
             is 5 seconds.
             is 5 seconds.
         """
         """
@@ -728,7 +728,7 @@ class solar(schedule):
 
 
     def remaining_estimate(self, last_run_at):
     def remaining_estimate(self, last_run_at):
         """Returns when the periodic task should run next as a
         """Returns when the periodic task should run next as a
-        :class:`~datetime.timedelta`, or if it shouldn't run today (e.g.
+        :class:`~datetime.timedelta`, or if it shouldn't run today (e.g.,
         the sun does not rise today), returns the time when the next check
         the sun does not rise today), returns the time when the next check
         should take place."""
         should take place."""
         last_run_at = self.maybe_make_aware(last_run_at)
         last_run_at = self.maybe_make_aware(last_run_at)

+ 1 - 1
celery/task/base.py

@@ -152,7 +152,7 @@ class Task(BaseTask):
     )
     )
 
 
     # In old Celery the @task decorator didn't exist, so one would create
     # In old Celery the @task decorator didn't exist, so one would create
-    # classes instead and use them directly (e.g. MyTask.apply_async()).
+    # classes instead and use them directly (e.g., MyTask.apply_async()).
     # the use of classmethods was a hack so that it was not necessary
     # the use of classmethods was a hack so that it was not necessary
     # to instantiate the class before using it, but it has only
     # to instantiate the class before using it, but it has only
     # given us pain (like all magic).
     # given us pain (like all magic).

+ 2 - 4
celery/utils/collections.py

@@ -109,10 +109,8 @@ class OrderedDict(_OrderedDict):
 
 
 
 
 class AttributeDictMixin(object):
 class AttributeDictMixin(object):
-    """Augment classes with a Mapping interface by adding attribute access.
-
-    I.e. `d.key -> d[key]`.
-    """
+    """Augment classes with a Mapping interface by adding attribute
+    access (i.e., `d.key -> d[key]`)."""
 
 
     def __getattr__(self, k):
     def __getattr__(self, k):
         """`d.key -> d[key]`"""
         """`d.key -> d[key]`"""

+ 2 - 2
celery/utils/debug.py

@@ -106,7 +106,7 @@ def memdump(samples=10, file=None):  # pragma: no cover
 def sample(x, n, k=0):
 def sample(x, n, k=0):
     """Given a list `x` a sample of length ``n`` of that list is returned.
     """Given a list `x` a sample of length ``n`` of that list is returned.
 
 
-    E.g. if `n` is 10, and `x` has 100 items, a list of every tenth.
+    For example, if `n` is 10, and `x` has 100 items, a list of every tenth.
     item is returned.
     item is returned.
 
 
     ``k`` can be used as offset.
     ``k`` can be used as offset.
@@ -132,7 +132,7 @@ def hfloat(f, p=5):
 
 
 
 
 def humanbytes(s):
 def humanbytes(s):
-    """Convert bytes to human-readable form (e.g. KB, MB)."""
+    """Convert bytes to human-readable form (e.g., KB, MB)."""
     return next(
     return next(
         '{0}{1}'.format(hfloat(s / div if div else s), unit)
         '{0}{1}'.format(hfloat(s / div if div else s), unit)
         for div, unit in UNITS if s >= div
         for div, unit in UNITS if s >= div

+ 4 - 4
celery/utils/dispatch/saferef.py

@@ -64,7 +64,7 @@ class BoundMethodWeakref(object):  # pragma: no cover
         deletion_methods (Sequence[Callable]): Callables taking
         deletion_methods (Sequence[Callable]): Callables taking
             single argument, a reference to this object which
             single argument, a reference to this object which
             will be called when *either* the target object or
             will be called when *either* the target object or
-            target function is garbage collected (i.e. when
+            target function is garbage collected (i.e., when
             this object becomes invalid).  These are specified
             this object becomes invalid).  These are specified
             as the on_delete parameters of :func:`safe_ref` calls.
             as the on_delete parameters of :func:`safe_ref` calls.
 
 
@@ -122,7 +122,7 @@ class BoundMethodWeakref(object):  # pragma: no cover
 
 
             on_delete (Callable): Optional callback which will be called
             on_delete (Callable): Optional callback which will be called
                 when this weak reference ceases to be valid
                 when this weak reference ceases to be valid
-                (i.e. either the object or the function is garbage
+                (i.e., either the object or the function is garbage
                 collected).  Should take a single argument,
                 collected).  Should take a single argument,
                 which will be passed a pointer to this object.
                 which will be passed a pointer to this object.
         """
         """
@@ -222,7 +222,7 @@ class BoundNonDescriptorMethodWeakref(BoundMethodWeakref):  # pragma: no cover
             >>> A.bar = foo
             >>> A.bar = foo
 
 
         This shouldn't be a common use case.  So, on platforms where methods
         This shouldn't be a common use case.  So, on platforms where methods
-        aren't descriptors (e.g. Jython) this implementation has the
+        aren't descriptors (e.g., Jython) this implementation has the
         advantage of working in the most cases.
         advantage of working in the most cases.
     """
     """
     def __init__(self, target, on_delete=None):
     def __init__(self, target, on_delete=None):
@@ -239,7 +239,7 @@ class BoundNonDescriptorMethodWeakref(BoundMethodWeakref):  # pragma: no cover
 
 
             on_delete (Callable): Optional callback which will be called
             on_delete (Callable): Optional callback which will be called
                 when this weak reference ceases to be valid
                 when this weak reference ceases to be valid
-                (i.e. either the object or the function is garbage
+                (i.e., either the object or the function is garbage
                 collected).  Should take a single argument,
                 collected).  Should take a single argument,
                 which will be passed a pointer to this object.
                 which will be passed a pointer to this object.
         """
         """

+ 1 - 1
celery/utils/nodenames.py

@@ -38,7 +38,7 @@ def worker_direct(hostname):
 
 
     Arguments:
     Arguments:
         hostname (str, ~kombu.Queue): The fully qualified node name of
         hostname (str, ~kombu.Queue): The fully qualified node name of
-            a worker (e.g. ``w1@example.com``).  If passed a
+            a worker (e.g., ``w1@example.com``).  If passed a
             :class:`kombu.Queue` instance it will simply return
             :class:`kombu.Queue` instance it will simply return
             that instead.
             that instead.
     """
     """

+ 1 - 1
celery/utils/serialization.py

@@ -46,7 +46,7 @@ def find_pickleable_exception(exc, loads=pickle.loads,
                               dumps=pickle.dumps):
                               dumps=pickle.dumps):
     """With an exception instance, iterate over its super classes (by MRO)
     """With an exception instance, iterate over its super classes (by MRO)
     and find the first super exception that's pickleable.  It does
     and find the first super exception that's pickleable.  It does
-    not go below :exc:`Exception` (i.e. it skips :exc:`Exception`,
+    not go below :exc:`Exception` (i.e., it skips :exc:`Exception`,
     :class:`BaseException` and :class:`object`).  If that happens
     :class:`BaseException` and :class:`object`).  If that happens
     you should use :exc:`UnpickleableException` instead.
     you should use :exc:`UnpickleableException` instead.
 
 

+ 6 - 6
celery/utils/timeutils.py

@@ -192,13 +192,13 @@ def remaining(start, ends_in, now=None, relative=False):
     """Calculate the remaining time for a start date and a
     """Calculate the remaining time for a start date and a
     :class:`~datetime.timedelta`.
     :class:`~datetime.timedelta`.
 
 
-    e.g. "how many seconds left for 30 seconds after start?"
+    For example, "how many seconds left for 30 seconds after start?"
 
 
     Arguments:
     Arguments:
         start (~datetime.datetime): Starting date.
         start (~datetime.datetime): Starting date.
         ends_in (~datetime.timedelta): The end delta.
         ends_in (~datetime.timedelta): The end delta.
         relative (bool): If enabled the end time will be calculated
         relative (bool): If enabled the end time will be calculated
-            using :func:`delta_resolution` (i.e. rounded to the
+            using :func:`delta_resolution` (i.e., rounded to the
             resolution of `ends_in`).
             resolution of `ends_in`).
         now (Callable): Function returning the current time and date.
         now (Callable): Function returning the current time and date.
             Defaults to :func:`datetime.utcnow`.
             Defaults to :func:`datetime.utcnow`.
@@ -244,12 +244,12 @@ def weekday(name):
 
 
 
 
 def humanize_seconds(secs, prefix='', sep='', now='now', microseconds=False):
 def humanize_seconds(secs, prefix='', sep='', now='now', microseconds=False):
-    """Show seconds in human form, e.g. 60 is "1 minute", 7200 is "2
-    hours".
+    """Show seconds in human form (e.g., 60 is "1 minute", 7200 is "2
+    hours").
 
 
     Arguments:
     Arguments:
-        prefix (str): can be used to add a preposition to the output,
-            e.g. 'in' will give 'in 1 second', but add nothing to 'now'.
+        prefix (str): can be used to add a preposition to the output
+            (e.g., 'in' will give 'in 1 second', but add nothing to 'now').
         now (str): Literal 'now'.
         now (str): Literal 'now'.
         microseconds (bool): Include microseconds.
         microseconds (bool): Include microseconds.
     """
     """

+ 1 - 1
celery/worker/__init__.py

@@ -6,7 +6,7 @@ while the worker program is in :mod:`celery.apps.worker`.
 
 
 The worker program is responsible for adding signal handlers,
 The worker program is responsible for adding signal handlers,
 setting up logging, etc.  This is a bare-bones worker without
 setting up logging, etc.  This is a bare-bones worker without
-global side-effects (i.e. except for the global state stored in
+global side-effects (i.e., except for the global state stored in
 :mod:`celery.worker.state`).
 :mod:`celery.worker.state`).
 
 
 The worker consists of several components, all managed by bootsteps
 The worker consists of several components, all managed by bootsteps

+ 2 - 2
celery/worker/components.py

@@ -39,8 +39,8 @@ class Timer(bootsteps.Step):
             w.timer = _Timer(max_interval=10.0)
             w.timer = _Timer(max_interval=10.0)
         else:
         else:
             if not w.timer_cls:
             if not w.timer_cls:
-                # Default Timer is set by the pool, as e.g. eventlet
-                # needs a custom implementation.
+                # Default Timer is set by the pool, as for example, the
+                # eventlet pool needs a custom timer implementation.
                 w.timer_cls = w.pool_cls.Timer
                 w.timer_cls = w.pool_cls.Timer
             w.timer = self.instantiate(w.timer_cls,
             w.timer = self.instantiate(w.timer_cls,
                                        max_interval=w.timer_precision,
                                        max_interval=w.timer_precision,

+ 2 - 2
celery/worker/control.py

@@ -144,7 +144,7 @@ def revoke(state, task_id, terminate=False, signal=None, **kwargs):
 
 
     Keyword Arguments:
     Keyword Arguments:
         terminate (bool): Also terminate the process if the task is active.
         terminate (bool): Also terminate the process if the task is active.
-        signal (str): Name of signal to use for terminate.  E.g. ``KILL``.
+        signal (str): Name of signal to use for terminate (e.g., ``KILL``).
     """
     """
     # supports list argument since 3.1
     # supports list argument since 3.1
     task_ids, task_id = set(maybe_list(task_id) or []), None
     task_ids, task_id = set(maybe_list(task_id) or []), None
@@ -183,7 +183,7 @@ def terminate(state, signal, task_id, **kwargs):
 
 
 @control_command(
 @control_command(
     args=[('task_name', text_t), ('rate_limit', text_t)],
     args=[('task_name', text_t), ('rate_limit', text_t)],
-    signature='<task_name> <rate_limit (e.g. 5/s | 5/m | 5/h)>',
+    signature='<task_name> <rate_limit (e.g., 5/s | 5/m | 5/h)>',
 )
 )
 def rate_limit(state, task_name, rate_limit, **kwargs):
 def rate_limit(state, task_name, rate_limit, **kwargs):
     """Tell worker(s) to modify the rate limit for a task by type.
     """Tell worker(s) to modify the rate limit for a task by type.

+ 14 - 10
docs/contributing.rst

@@ -327,9 +327,10 @@ The master branch is where development of the next version happens.
 Maintenance branches
 Maintenance branches
 --------------------
 --------------------
 
 
-Maintenance branches are named after the version, e.g. the maintenance branch
-for the 2.2.x series is named ``2.2``. Previously these were named
-``releaseXX-maint``.
+Maintenance branches are named after the version -- for example,
+the maintenance branch for the 2.2.x series is named ``2.2``.
+
+Previously these were named ``releaseXX-maint``.
 
 
 The versions we currently maintain is:
 The versions we currently maintain is:
 
 
@@ -375,10 +376,13 @@ Feature branches are removed once they've been merged into a release branch.
 Tags
 Tags
 ====
 ====
 
 
-Tags are used exclusively for tagging releases. A release tag is
-named with the format ``vX.Y.Z``, e.g. ``v2.3.1``.
-Experimental releases contain an additional identifier ``vX.Y.Z-id``, e.g.
-``v3.0.0-rc1``. Experimental tags may be removed after the official release.
+- Tags are used exclusively for tagging releases. A release tag is
+named with the format ``vX.Y.Z`` -- for example ``v2.3.1``.
+
+- Experimental releases contain an additional identifier ``vX.Y.Z-id`` --
+  for example ``v3.0.0-rc1``.
+
+- Experimental tags may be removed after the official release.
 
 
 .. _contributing-changes:
 .. _contributing-changes:
 
 
@@ -814,7 +818,7 @@ that require third-party libraries must be added.
 
 
 1) Add a new requirements file in `requirements/extras`
 1) Add a new requirements file in `requirements/extras`
 
 
-    E.g. for the Cassandra backend this is
+    For the Cassandra backend this is
     :file:`requirements/extras/cassandra.txt`, and the file looks like this:
     :file:`requirements/extras/cassandra.txt`, and the file looks like this:
 
 
     .. code-block:: text
     .. code-block:: text
@@ -1123,7 +1127,7 @@ following:
 
 
 * Enter "Edit project"
 * Enter "Edit project"
 
 
-    Change default branch to the branch of this series, e.g. ``2.4``
-    for series 2.4.
+    Change default branch to the branch of this series, for example, use
+    the ``2.4`` branch for the 2.4 series.
 
 
 * Also add the previous version under the "versions" tab.
 * Also add the previous version under the "versions" tab.

+ 5 - 5
docs/django/first-steps-with-django.rst

@@ -80,7 +80,7 @@ from the Django settings; but you can also separate them if wanted.
 
 
 The uppercase name-space means that all Celery configuration options
 The uppercase name-space means that all Celery configuration options
 must be specified in uppercase instead of lowercase, and start with
 must be specified in uppercase instead of lowercase, and start with
-``CELERY_``, so e.g. the :setting:`task_always_eager`` setting
+``CELERY_``, so for example the :setting:`task_always_eager`` setting
 becomes ``CELERY_TASK_ALWAYS_EAGER``, and the :setting:`broker_url`
 becomes ``CELERY_TASK_ALWAYS_EAGER``, and the :setting:`broker_url`
 setting becomes ``CELERY_BROKER_URL``.
 setting becomes ``CELERY_BROKER_URL``.
 
 
@@ -192,10 +192,10 @@ To use this with your project you need to follow these four steps:
 
 
 .. admonition:: Relative Imports
 .. admonition:: Relative Imports
 
 
-    You have to be consistent in how you import the task module, e.g. if
-    you have ``project.app`` in ``INSTALLED_APPS`` then you also
-    need to import the tasks ``from project.app`` or else the names
-    of the tasks will be different.
+    You have to be consistent in how you import the task module.
+    For example, if you have ``project.app`` in ``INSTALLED_APPS``, then you
+    must also import the tasks ``from project.app`` or else the names
+    of the tasks will end up being different.
 
 
     See :ref:`task-naming-relative-imports`
     See :ref:`task-naming-relative-imports`
 
 

+ 1 - 1
docs/faq.rst

@@ -403,7 +403,7 @@ as they're actually executed. After the worker has received a task, it will
 take some time until it's actually executed, especially if there are a lot
 take some time until it's actually executed, especially if there are a lot
 of tasks already waiting for execution. Messages that aren't acknowledged are
 of tasks already waiting for execution. Messages that aren't acknowledged are
 held on to by the worker until it closes the connection to the broker (AMQP
 held on to by the worker until it closes the connection to the broker (AMQP
-server). When that connection is closed (e.g. because the worker was stopped)
+server). When that connection is closed (e.g., because the worker was stopped)
 the tasks will be re-sent by the broker to the next available worker (or the
 the tasks will be re-sent by the broker to the next available worker (or the
 same worker when it has been restarted), so to properly purge the queue of
 same worker when it has been restarted), so to properly purge the queue of
 waiting tasks you have to stop all the workers, and then purge the tasks
 waiting tasks you have to stop all the workers, and then purge the tasks

+ 2 - 2
docs/getting-started/brokers/rabbitmq.rst

@@ -98,7 +98,7 @@ Finally, we can install RabbitMQ using :command:`brew`:
 
 
 After you've installed RabbitMQ with :command:`brew` you need to add the following to
 After you've installed RabbitMQ with :command:`brew` you need to add the following to
 your path to be able to start and stop the broker: add it to the start-up file for your
 your path to be able to start and stop the broker: add it to the start-up file for your
-shell (e.g. :file:`.bash_profile` or :file:`.profile`).
+shell (e.g., :file:`.bash_profile` or :file:`.profile`).
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
@@ -140,7 +140,7 @@ be `rabbit@myhost`, as verified by :command:`rabbitmqctl`:
     ...done.
     ...done.
 
 
 This is especially important if your DHCP server gives you a host name
 This is especially important if your DHCP server gives you a host name
-starting with an IP address, (e.g. `23.10.112.31.comcast.net`).  In this
+starting with an IP address, (e.g., `23.10.112.31.comcast.net`).  In this
 case RabbitMQ will try to use `rabbit@23`: an illegal host name.
 case RabbitMQ will try to use `rabbit@23`: an illegal host name.
 
 
 .. _rabbitmq-macOS-start-stop:
 .. _rabbitmq-macOS-start-stop:

+ 2 - 2
docs/getting-started/first-steps-with-celery.rst

@@ -131,8 +131,8 @@ defined in the `__main__` module.
 The second argument is the broker keyword argument, specifying the URL of the
 The second argument is the broker keyword argument, specifying the URL of the
 message broker you want to use. Here using RabbitMQ (also the default option).
 message broker you want to use. Here using RabbitMQ (also the default option).
 
 
-See :ref:`celerytut-broker` above for more choices,
-e.g. for RabbitMQ you can use ``amqp://localhost``, or for Redis you can
+See :ref:`celerytut-broker` above for more choices --
+for RabbitMQ you can use ``amqp://localhost``, or for Redis you can
 use ``redis://localhost``.
 use ``redis://localhost``.
 
 
 You defined a single task, called ``add``, returning the sum of two numbers.
 You defined a single task, called ``add``, returning the sum of two numbers.

+ 10 - 6
docs/getting-started/next-steps.rst

@@ -207,7 +207,7 @@ you're encouraged to put these in a dedicated directory:
 
 
 With the multi command you can start multiple workers, and there's a powerful
 With the multi command you can start multiple workers, and there's a powerful
 command-line syntax to specify arguments for different workers too,
 command-line syntax to specify arguments for different workers too,
-e.g:
+for example:
 
 
 .. code-block:: console
 .. code-block:: console
 
 
@@ -242,8 +242,8 @@ If none of these are found it'll try a submodule named ``proj.celery``:
 6) Any attribute in the module ``proj.celery`` where the value is a Celery
 6) Any attribute in the module ``proj.celery`` where the value is a Celery
    application.
    application.
 
 
-This scheme mimics the practices used in the documentation,
-i.e. ``proj:app`` for a single contained module, and ``proj.celery:app``
+This scheme mimics the practices used in the documentation -- that is,
+``proj:app`` for a single contained module, and ``proj.celery:app``
 for larger projects.
 for larger projects.
 
 
 
 
@@ -388,8 +388,10 @@ from this example:
     >>> res.state
     >>> res.state
     'PENDING'
     'PENDING'
 
 
-If the task is retried the stages can become even more complex,
-e.g, for a task that's retried two times the stages would be::
+If the task is retried the stages can become even more complex.
+To demonstrate, for a task that's retried two times the stages would be:
+
+.. code-block:: text
 
 
     PENDING -> STARTED -> RETRY -> STARTED -> RETRY -> STARTED -> SUCCESS
     PENDING -> STARTED -> RETRY -> STARTED -> RETRY -> STARTED -> SUCCESS
 
 
@@ -596,7 +598,9 @@ to a chord:
 
 
 
 
 Since these primitives are all of the signature type they
 Since these primitives are all of the signature type they
-can be combined almost however you want, e.g::
+can be combined almost however you want, for example:
+
+.. code-block:: pycon
 
 
     >>> upload_document.s(file) | group(apply_filter.s() for filter in filters)
     >>> upload_document.s(file) | group(apply_filter.s() for filter in filters)
 
 

+ 1 - 1
docs/glossary.rst

@@ -90,7 +90,7 @@ Glossary
 
 
     reentrant
     reentrant
         describes a function that can be interrupted in the middle of
         describes a function that can be interrupted in the middle of
-        execution (e.g. by hardware interrupt or signal), and then safely
+        execution (e.g., by hardware interrupt or signal), and then safely
         called again later. Reentrancy isn't the same as
         called again later. Reentrancy isn't the same as
         :term:`idempotence <idempotent>` as the return value doesn't have to
         :term:`idempotence <idempotent>` as the return value doesn't have to
         be the same given the same inputs, and a reentrant function may have
         be the same given the same inputs, and a reentrant function may have

+ 22 - 18
docs/history/changelog-1.0.rst

@@ -200,7 +200,7 @@ News
 
 
         @periodic_task(run_every=crontab(minutes=30))
         @periodic_task(run_every=crontab(minutes=30))
         def every_hour():
         def every_hour():
-            print('Runs every hour on the clock. e.g. 1:30, 2:30, 3:30 etc.')
+            print('Runs every hour on the clock (e.g., 1:30, 2:30, 3:30 etc.).')
 
 
     .. note::
     .. note::
         This a late addition. While we have unit tests, due to the
         This a late addition. While we have unit tests, due to the
@@ -575,8 +575,8 @@ Fixes
 * The ETA scheduler now deletes any revoked tasks it might encounter.
 * The ETA scheduler now deletes any revoked tasks it might encounter.
 
 
     As revokes aren't yet persistent, this is done to make sure the task
     As revokes aren't yet persistent, this is done to make sure the task
-    is revoked even though it's currently being hold because its ETA is e.g.
-    a week into the future.
+    is revoked even though, for example, it's currently being hold because
+    its ETA is a week into the future.
 
 
 * The `task_id` argument is now respected even if the task is executed
 * The `task_id` argument is now respected even if the task is executed
   eagerly (either using apply, or :setting:`CELERY_ALWAYS_EAGER`).
   eagerly (either using apply, or :setting:`CELERY_ALWAYS_EAGER`).
@@ -611,7 +611,7 @@ Fixes
 * Added `Task.delivery_mode` and the :setting:`CELERY_DEFAULT_DELIVERY_MODE`
 * Added `Task.delivery_mode` and the :setting:`CELERY_DEFAULT_DELIVERY_MODE`
   setting.
   setting.
 
 
-    These can be used to mark messages non-persistent (i.e. so they're
+    These can be used to mark messages non-persistent (i.e., so they're
     lost if the broker is restarted).
     lost if the broker is restarted).
 
 
 * Now have our own `ImproperlyConfigured` exception, instead of using the
 * Now have our own `ImproperlyConfigured` exception, instead of using the
@@ -757,8 +757,8 @@ Backward incompatible changes
 * The :envvar:`CELERY_LOADER` environment variable now needs loader class name
 * The :envvar:`CELERY_LOADER` environment variable now needs loader class name
   in addition to module name,
   in addition to module name,
 
 
-    E.g. where you previously had: `"celery.loaders.default"`, you now need
-    `"celery.loaders.default.Loader"`, using the previous syntax will result
+    For example, where you previously had: `"celery.loaders.default"`, you now
+    need `"celery.loaders.default.Loader"`, using the previous syntax will result
     in a `DeprecationWarning`.
     in a `DeprecationWarning`.
 
 
 * Detecting the loader is now lazy, and so isn't done when importing
 * Detecting the loader is now lazy, and so isn't done when importing
@@ -844,16 +844,20 @@ News
 
 
 * Periodic tasks are now scheduled on the clock.
 * Periodic tasks are now scheduled on the clock.
 
 
-    I.e. `timedelta(hours=1)` means every hour at :00 minutes, not every
+    That is, `timedelta(hours=1)` means every hour at :00 minutes, not every
     hour from the server starts. To revert to the previous behavior you
     hour from the server starts. To revert to the previous behavior you
     can set `PeriodicTask.relative = True`.
     can set `PeriodicTask.relative = True`.
 
 
-* Now supports passing execute options to a TaskSets list of args, e.g.:
+* Now supports passing execute options to a TaskSets list of args.
 
 
-    >>> ts = TaskSet(add, [([2, 2], {}, {'countdown': 1}),
-    ...                   ([4, 4], {}, {'countdown': 2}),
-    ...                   ([8, 8], {}, {'countdown': 3})])
-    >>> ts.run()
+    Example:
+
+    .. code-block:: pycon
+
+        >>> ts = TaskSet(add, [([2, 2], {}, {'countdown': 1}),
+        ...                    ([4, 4], {}, {'countdown': 2}),
+        ...                    ([8, 8], {}, {'countdown': 3})])
+        >>> ts.run()
 
 
 * Got a 3x performance gain by setting the prefetch count to four times the
 * Got a 3x performance gain by setting the prefetch count to four times the
   concurrency, (from an average task round-trip of 0.1s to 0.03s!).
   concurrency, (from an average task round-trip of 0.1s to 0.03s!).
@@ -1263,10 +1267,10 @@ Important changes
     goes away or stops responding, it is automatically replaced with
     goes away or stops responding, it is automatically replaced with
     a new one.
     a new one.
 
 
-* Task.name is now automatically generated out of class module+name, e.g.
-    `"djangotwitter.tasks.UpdateStatusesTask"`. Very convenient. No idea why
-    we didn't do this before. Some documentation is updated to not manually
-    specify a task name.
+* Task.name is now automatically generated out of class module+name, for
+  example `"djangotwitter.tasks.UpdateStatusesTask"`. Very convenient.
+  No idea why we didn't do this before. Some documentation is updated to not
+  manually specify a task name.
 
 
 .. _v060-news:
 .. _v060-news:
 
 
@@ -1399,7 +1403,7 @@ News
   by running `python manage.py celerystats`. See
   by running `python manage.py celerystats`. See
   `celery.monitoring` for more information.
   `celery.monitoring` for more information.
 
 
-* The Celery daemon can now be supervised (i.e. it is automatically
+* The Celery daemon can now be supervised (i.e., it is automatically
   restarted if it crashes). To use this start the worker with the
   restarted if it crashes). To use this start the worker with the
   --supervised` option (or alternatively `-S`).
   --supervised` option (or alternatively `-S`).
 
 
@@ -1850,7 +1854,7 @@ arguments, so be sure to flush your task queue before you upgrade.
 
 
         http://mysite/celery/$task_id/done/
         http://mysite/celery/$task_id/done/
 
 
-  this will return a JSON dictionary like e.g:
+  this will return a JSON dictionary, for example:
 
 
   .. code-block:: json
   .. code-block:: json
 
 

+ 3 - 3
docs/history/changelog-2.0.rst

@@ -600,7 +600,7 @@ Backward incompatible changes
   (as scheduled by the :ref:`deprecation-timeline`):
   (as scheduled by the :ref:`deprecation-timeline`):
 
 
     Assuming the implicit `Loader` class name is no longer supported,
     Assuming the implicit `Loader` class name is no longer supported,
-    if you use e.g.:
+    for example, if you use:
 
 
     .. code-block:: python
     .. code-block:: python
 
 
@@ -633,7 +633,7 @@ Backward incompatible changes
     cPickle is broken in Python <= 2.5.
     cPickle is broken in Python <= 2.5.
 
 
     It unsafely and incorrectly uses relative instead of absolute imports,
     It unsafely and incorrectly uses relative instead of absolute imports,
-    so e.g.:
+    so for example:
 
 
     .. code-block:: python
     .. code-block:: python
 
 
@@ -803,7 +803,7 @@ News
 
 
         Soft time limit. The :exc:`~@SoftTimeLimitExceeded`
         Soft time limit. The :exc:`~@SoftTimeLimitExceeded`
         exception will be raised when this is exceeded. The task can catch
         exception will be raised when this is exceeded. The task can catch
-        this to e.g. clean up before the hard time limit comes.
+        this to, for example, clean up before the hard time limit comes.
 
 
     New command-line arguments to ``celeryd`` added:
     New command-line arguments to ``celeryd`` added:
     `--time-limit` and `--soft-time-limit`.
     `--time-limit` and `--soft-time-limit`.

+ 3 - 1
docs/history/changelog-2.1.rst

@@ -505,7 +505,9 @@ News
 * subtask: Merge additional keyword arguments to `subtask()` into task keyword
 * subtask: Merge additional keyword arguments to `subtask()` into task keyword
   arguments.
   arguments.
 
 
-    e.g.:
+    For example:
+
+    .. code-block:: pycon
 
 
         >>> s = subtask((1, 2), {'foo': 'bar'}, baz=1)
         >>> s = subtask((1, 2), {'foo': 'bar'}, baz=1)
         >>> s.args
         >>> s.args

+ 5 - 5
docs/history/changelog-2.2.rst

@@ -696,7 +696,7 @@ Important Notes
     events will be gone as soon as the consumer stops. Also it means there
     events will be gone as soon as the consumer stops. Also it means there
     can be multiple monitors running at the same time.
     can be multiple monitors running at the same time.
 
 
-    The routing key of an event is the type of event (e.g. `worker.started`,
+    The routing key of an event is the type of event (e.g., `worker.started`,
     `worker.heartbeat`, `task.succeeded`, etc. This means a consumer can
     `worker.heartbeat`, `task.succeeded`, etc. This means a consumer can
     filter on specific types, to only be alerted of the events it cares about.
     filter on specific types, to only be alerted of the events it cares about.
 
 
@@ -898,9 +898,9 @@ News
 
 
 * The following fields have been added to all events in the worker class:
 * The following fields have been added to all events in the worker class:
 
 
-    * `sw_ident`: Name of worker software (e.g. ``"py-celery"``).
-    * `sw_ver`: Software version (e.g. 2.2.0).
-    * `sw_sys`: Operating System (e.g. Linux, Windows, Darwin).
+    * `sw_ident`: Name of worker software (e.g., ``"py-celery"``).
+    * `sw_ver`: Software version (e.g., 2.2.0).
+    * `sw_sys`: Operating System (e.g., Linux, Windows, Darwin).
 
 
 * For better accuracy the start time reported by the multiprocessing worker
 * For better accuracy the start time reported by the multiprocessing worker
   process is used when calculating task duration.
   process is used when calculating task duration.
@@ -1005,7 +1005,7 @@ Experimental
 * PyPy: worker now runs on PyPy.
 * PyPy: worker now runs on PyPy.
 
 
     It runs without any pool, so to get parallel execution you must start
     It runs without any pool, so to get parallel execution you must start
-    multiple instances (e.g. using :program:`multi`).
+    multiple instances (e.g., using :program:`multi`).
 
 
     Sadly an initial benchmark seems to show a 30% performance decrease on
     Sadly an initial benchmark seems to show a 30% performance decrease on
     ``pypy-1.4.1`` + JIT. We would like to find out why this is, so stay tuned.
     ``pypy-1.4.1`` + JIT. We would like to find out why this is, so stay tuned.

+ 3 - 3
docs/history/changelog-3.0.rst

@@ -314,7 +314,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
     now take less than a second).
     now take less than a second).
 
 
 - Celery will now suggest alternatives if there's a typo in the
 - Celery will now suggest alternatives if there's a typo in the
-  broker transport name (e.g. ``ampq`` -> ``amqp``).
+  broker transport name (e.g., ``ampq`` -> ``amqp``).
 
 
 - Worker: The auto-reloader would cause a crash if a monitored file
 - Worker: The auto-reloader would cause a crash if a monitored file
   was unlinked.
   was unlinked.
@@ -1116,7 +1116,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
     cleans up after the app like closing pool connections.
     cleans up after the app like closing pool connections.
 
 
     Note that this is only necessary when dynamically creating apps,
     Note that this is only necessary when dynamically creating apps,
-    e.g. for "temporary" apps.
+    for example "temporary" apps.
 
 
 - Support for piping a subtask to a chain.
 - Support for piping a subtask to a chain.
 
 
@@ -1503,7 +1503,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
   (Issue #859).
   (Issue #859).
 
 
 - Extension commands are now loaded after concurrency is set up
 - Extension commands are now loaded after concurrency is set up
-  so that they don't interfere with e.g. eventlet patching.
+  so that they don't interfere with things like eventlet patching.
 
 
 - Fixed bug in the threaded pool (Issue #863)
 - Fixed bug in the threaded pool (Issue #863)
 
 

+ 4 - 4
docs/history/changelog-3.1.rst

@@ -214,7 +214,7 @@ new in Celery 3.1.
 - **Results**: Redis result backend now allows for timeout to be set in the
 - **Results**: Redis result backend now allows for timeout to be set in the
   query portion of the result backend URL.
   query portion of the result backend URL.
 
 
-    E.g. ``CELERY_RESULT_BACKEND = 'redis://?timeout=10'``
+    For example ``CELERY_RESULT_BACKEND = 'redis://?timeout=10'``
 
 
     Contributed by Justin Patrin.
     Contributed by Justin Patrin.
 
 
@@ -834,7 +834,7 @@ News
     with workers and clients not using it, so be sure to enable
     with workers and clients not using it, so be sure to enable
     the option in all clients and workers if you decide to use it.
     the option in all clients and workers if you decide to use it.
 
 
-- **Multi**: With ``-opt:index`` (e.g. ``-c:1``) the index now always refers
+- **Multi**: With ``-opt:index`` (e.g., ``-c:1``) the index now always refers
   to the position of a node in the argument list.
   to the position of a node in the argument list.
 
 
     This means that referring to a number will work when specifying a list
     This means that referring to a number will work when specifying a list
@@ -1210,7 +1210,7 @@ Fixes
   not just by type (``all_active_count``).
   not just by type (``all_active_count``).
 
 
 - Init-scripts:  Fixed problem with reading configuration file
 - Init-scripts:  Fixed problem with reading configuration file
-  when the init-script is symlinked to a runlevel (e.g. ``S02celeryd``).
+  when the init-script is symlinked to a runlevel (e.g., ``S02celeryd``).
   (Issue #1740).
   (Issue #1740).
 
 
     This also removed a rarely used feature where you can symlink the script
     This also removed a rarely used feature where you can symlink the script
@@ -1493,7 +1493,7 @@ Fixes
 - Worker accidentally set a default socket timeout of 5 seconds.
 - Worker accidentally set a default socket timeout of 5 seconds.
 
 
 - Django: Fix-up now sets the default app so that threads will use
 - Django: Fix-up now sets the default app so that threads will use
-  the same app instance (e.g. for :command:`manage.py runserver`).
+  the same app instance (e.g., for :command:`manage.py runserver`).
 
 
 - Worker: Fixed Unicode error crash at start-up experienced by some users.
 - Worker: Fixed Unicode error crash at start-up experienced by some users.
 
 

+ 3 - 3
docs/history/whatsnew-2.5.rst

@@ -276,8 +276,8 @@ Previously you'd've to type ``update_twitter_status.retry(…)``
 here, which can be annoying for long task names.
 here, which can be annoying for long task names.
 
 
 .. note::
 .. note::
-    This won't work if the task function is called directly, i.e:
-    ``update_twitter_status(a, b)``. For that to work ``apply`` must
+    This won't work if the task function is called directly (i.e.,
+    ``update_twitter_status(a, b)``). For that to work ``apply`` must
     be used: ``update_twitter_status.apply((a, b))``.
     be used: ``update_twitter_status.apply((a, b))``.
 
 
 In Other News
 In Other News
@@ -315,7 +315,7 @@ In Other News
     Contributed by Steeve Morin.
     Contributed by Steeve Morin.
 
 
 - The Crontab parser now matches Vixie Cron behavior when parsing ranges
 - The Crontab parser now matches Vixie Cron behavior when parsing ranges
-  with steps (e.g. 1-59/2).
+  with steps (e.g., 1-59/2).
 
 
     Contributed by Daniel Hepper.
     Contributed by Daniel Hepper.
 
 

+ 6 - 6
docs/history/whatsnew-3.0.rst

@@ -364,7 +364,7 @@ priorities on the server side, which is why
 the feature is nicknamed "quasi-priorities";
 the feature is nicknamed "quasi-priorities";
 **Using routing is still the suggested way of ensuring
 **Using routing is still the suggested way of ensuring
 quality of service**, as client implemented priorities
 quality of service**, as client implemented priorities
-fall short in a number of ways, e.g. if the worker
+fall short in a number of ways, for example if the worker
 is busy with long running tasks, has prefetched many messages,
 is busy with long running tasks, has prefetched many messages,
 or the queues are congested.
 or the queues are congested.
 
 
@@ -694,7 +694,7 @@ The :option:`--app <celery --app>` option now 'auto-detects'
       to import a sub module named celery',
       to import a sub module named celery',
       and get the celery attribute from that module.
       and get the celery attribute from that module.
 
 
-E.g. if you have a project named ``proj`` where the
+For example, if you have a project named ``proj`` where the
 celery app is located in ``from proj.celery import app``,
 celery app is located in ``from proj.celery import app``,
 then the following will be equivalent:
 then the following will be equivalent:
 
 
@@ -718,7 +718,7 @@ In Other News
 - App instance factory methods have been converted to be cached
 - App instance factory methods have been converted to be cached
   descriptors that creates a new subclass on access.
   descriptors that creates a new subclass on access.
 
 
-    This means that e.g. ``app.Worker`` is an actual class
+    For example, this means that ``app.Worker`` is an actual class
     and will work as expected when:
     and will work as expected when:
 
 
     .. code-block:: python
     .. code-block:: python
@@ -760,7 +760,7 @@ In Other News
 
 
 - Annotations now supports decorators if the key starts with '@'.
 - Annotations now supports decorators if the key starts with '@'.
 
 
-    E.g.:
+    For example:
 
 
     .. code-block:: python
     .. code-block:: python
 
 
@@ -829,8 +829,8 @@ In Other News
 
 
 - ``group.skew(start=, stop=, step=)``
 - ``group.skew(start=, stop=, step=)``
 
 
-    Skew will skew the countdown for the individual tasks in a group,
-    e.g. with a group:
+    Skew will skew the countdown for the individual tasks in a group -- for
+    example with this group:
 
 
     .. code-block:: pycon
     .. code-block:: pycon
 
 

+ 1 - 1
docs/internals/deprecation.rst

@@ -219,7 +219,7 @@ Removals for version 2.0
 
 
 * :envvar:`CELERY_LOADER` definitions without class name.
 * :envvar:`CELERY_LOADER` definitions without class name.
 
 
-    E.g. `celery.loaders.default`, needs to include the class name:
+    For example,, `celery.loaders.default`, needs to include the class name:
     `celery.loaders.default.Loader`.
     `celery.loaders.default.Loader`.
 
 
 * :meth:`TaskSet.run`. Use :meth:`celery.task.base.TaskSet.apply_async`
 * :meth:`TaskSet.run`. Use :meth:`celery.task.base.TaskSet.apply_async`

+ 2 - 2
docs/internals/guide.rst

@@ -62,7 +62,7 @@ Naming
     .. note::
     .. note::
 
 
         Sometimes it makes sense to have a class mask as a function,
         Sometimes it makes sense to have a class mask as a function,
-        and there's precedence for this in the Python standard library (e.g.
+        and there's precedence for this in the Python standard library (e.g.,
         :class:`~contextlib.contextmanager`). Celery examples include
         :class:`~contextlib.contextmanager`). Celery examples include
         :class:`~celery.signature`, :class:`~celery.chord`,
         :class:`~celery.signature`, :class:`~celery.chord`,
         ``inspect``, :class:`~kombu.utils.functional.promise` and more..
         ``inspect``, :class:`~kombu.utils.functional.promise` and more..
@@ -327,7 +327,7 @@ Worker overview
    Responsibilities:
    Responsibilities:
    * sets up logging and redirects standard outs
    * sets up logging and redirects standard outs
    * installs signal handlers (`TERM`/`HUP`/`STOP`/`USR1` (cry)/`USR2` (rdb))
    * installs signal handlers (`TERM`/`HUP`/`STOP`/`USR1` (cry)/`USR2` (rdb))
-   * prints banner and warnings (e.g. pickle warning)
+   * prints banner and warnings (e.g., pickle warning)
    * handles the :option:`celery worker --purge` argument
    * handles the :option:`celery worker --purge` argument
 
 
 * `app.WorkController` -> `celery.worker.WorkController`
 * `app.WorkController` -> `celery.worker.WorkController`

+ 6 - 5
docs/internals/protocol.rst

@@ -108,7 +108,7 @@ Changes from version 1
 
 
     This means that workers/intermediates can inspect the message
     This means that workers/intermediates can inspect the message
     and make decisions based on the headers without decoding
     and make decisions based on the headers without decoding
-    the payload (that may be language specific, e.g. serialized by the
+    the payload (that may be language specific, for example serialized by the
     Python specific pickle serializer).
     Python specific pickle serializer).
 
 
 - Always UTC
 - Always UTC
@@ -154,7 +154,8 @@ Changes from version 1
 - ``root_id`` and ``parent_id`` fields helps keep track of work-flows.
 - ``root_id`` and ``parent_id`` fields helps keep track of work-flows.
 
 
 - ``shadow`` lets you specify a different name for logs, monitors
 - ``shadow`` lets you specify a different name for logs, monitors
-  can be used for e.g. meta tasks that calls any function:
+  can be used for concepts like tasks that calls a function
+  specified as argument:
 
 
     .. code-block:: python
     .. code-block:: python
 
 
@@ -335,7 +336,7 @@ Standard body fields
 - *string* ``type``
 - *string* ``type``
 
 
     The type of event. This is a string containing the *category* and
     The type of event. This is a string containing the *category* and
-    *action* separated by a dash delimiter (e.g. ``task-succeeded``).
+    *action* separated by a dash delimiter (e.g., ``task-succeeded``).
 
 
 - *string* ``hostname``
 - *string* ``hostname``
 
 
@@ -352,8 +353,8 @@ Standard body fields
 - *signed short* ``utcoffset``
 - *signed short* ``utcoffset``
 
 
     This field describes the timezone of the originating host, and is
     This field describes the timezone of the originating host, and is
-    specified as the number of hours ahead of/behind UTC. E.g. ``-2`` or
-    ``+1``.
+    specified as the number of hours ahead of/behind UTC (e.g., -2 or
+    +1).
 
 
 - *unsigned long long* ``pid``
 - *unsigned long long* ``pid``
 
 

+ 1 - 1
docs/reference/celery.utils.debug.rst

@@ -11,7 +11,7 @@ Sampling Memory Usage
 This module can be used to diagnose and sample the memory usage
 This module can be used to diagnose and sample the memory usage
 used by parts of your application.
 used by parts of your application.
 
 
-E.g to sample the memory usage of calling tasks you can do this:
+For example, to sample the memory usage of calling tasks you can do this:
 
 
 .. code-block:: python
 .. code-block:: python
 
 

+ 3 - 3
docs/userguide/calling.rst

@@ -30,7 +30,7 @@ The API defines a standard set of execution options, as well as three methods:
 
 
     - *calling* (``__call__``)
     - *calling* (``__call__``)
 
 
-        Applying an object supporting the calling API (e.g. ``add(2, 2)``)
+        Applying an object supporting the calling API (e.g., ``add(2, 2)``)
         means that the task will not be executed by a worker, but in the current
         means that the task will not be executed by a worker, but in the current
         process instead (a message won't be sent).
         process instead (a message won't be sent).
 
 
@@ -295,8 +295,8 @@ For example, the default policy correlates to:
 
 
 the maximum time spent retrying will be 0.4 seconds. It's set relatively
 the maximum time spent retrying will be 0.4 seconds. It's set relatively
 short by default because a connection failure could lead to a retry pile effect
 short by default because a connection failure could lead to a retry pile effect
-if the broker connection is down: e.g. many web server processes waiting
-to retry blocking other incoming requests.
+if the broker connection is down -- For example, many web server processes waiting
+to retry, blocking other incoming requests.
 
 
 .. _calling-connection-errors:
 .. _calling-connection-errors:
 
 

+ 4 - 3
docs/userguide/canvas.rst

@@ -273,7 +273,7 @@ The Primitives
 
 
         The map primitive works like the built-in ``map`` function, but creates
         The map primitive works like the built-in ``map`` function, but creates
         a temporary task where a list of arguments is applied to the task.
         a temporary task where a list of arguments is applied to the task.
-        E.g. ``task.map([1, 2])`` results in a single task
+        For example, ``task.map([1, 2])`` -- results in a single task
         being called, applying the arguments in order to the task function so
         being called, applying the arguments in order to the task function so
         that the result is:
         that the result is:
 
 
@@ -293,7 +293,8 @@ The Primitives
 
 
     - ``chunks``
     - ``chunks``
 
 
-        Chunking splits a long list of arguments into parts, e.g the operation:
+        Chunking splits a long list of arguments into parts, for example
+        the operation:
 
 
         .. code-block:: pycon
         .. code-block:: pycon
 
 
@@ -725,7 +726,7 @@ It supports the following operations:
 * :meth:`~celery.result.GroupResult.successful`
 * :meth:`~celery.result.GroupResult.successful`
 
 
     Return :const:`True` if all of the subtasks finished
     Return :const:`True` if all of the subtasks finished
-    successfully (e.g. didn't raise an exception).
+    successfully (e.g., didn't raise an exception).
 
 
 * :meth:`~celery.result.GroupResult.failed`
 * :meth:`~celery.result.GroupResult.failed`
 
 

+ 10 - 12
docs/userguide/configuration.rst

@@ -450,10 +450,8 @@ Default: No soft time limit.
 Task soft time limit in seconds.
 Task soft time limit in seconds.
 
 
 The :exc:`~@SoftTimeLimitExceeded` exception will be
 The :exc:`~@SoftTimeLimitExceeded` exception will be
-raised when this is exceeded. The task can catch this to
-e.g. clean up before the hard time limit comes.
-
-Example:
+raised when this is exceeded. For example, the task can catch this to
+clean up before the hard time limit comes:
 
 
 .. code-block:: python
 .. code-block:: python
 
 
@@ -489,7 +487,7 @@ Default: Disabled.
 
 
 Even if :setting:`task_acks_late` is enabled, the worker will
 Even if :setting:`task_acks_late` is enabled, the worker will
 acknowledge tasks when the worker process executing them abruptly
 acknowledge tasks when the worker process executing them abruptly
-exits or is signaled (e.g. :sig:`KILL`/:sig:`INT`, etc).
+exits or is signaled (e.g., :sig:`KILL`/:sig:`INT`, etc).
 
 
 Setting this to true allows the message to be re-queued instead,
 Setting this to true allows the message to be re-queued instead,
 so that the task will execute again by the same worker, or another
 so that the task will execute again by the same worker, or another
@@ -888,7 +886,7 @@ The fields of the URL are defined as follows:
 
 
 #. ``host``
 #. ``host``
 
 
-    Host name or IP address of the Redis server. e.g. `localhost`.
+    Host name or IP address of the Redis server (e.g., `localhost`).
 
 
 #. ``port``
 #. ``port``
 
 
@@ -943,7 +941,7 @@ This backend requires the following configuration directives to be set.
 
 
 Default: ``[]`` (empty list).
 Default: ``[]`` (empty list).
 
 
-List of ``host`` Cassandra servers. e.g.::
+List of ``host`` Cassandra servers. For example::
 
 
     cassandra_servers = ['localhost']
     cassandra_servers = ['localhost']
 
 
@@ -963,7 +961,7 @@ Port to contact the Cassandra servers on.
 
 
 Default: None.
 Default: None.
 
 
-The key-space in which to store the results. e.g.::
+The key-space in which to store the results. For example::
 
 
     cassandra_keyspace = 'tasks_keyspace'
     cassandra_keyspace = 'tasks_keyspace'
 
 
@@ -974,7 +972,7 @@ The key-space in which to store the results. e.g.::
 
 
 Default: None.
 Default: None.
 
 
-The table (column family) in which to store the results. e.g.::
+The table (column family) in which to store the results. For example::
 
 
     cassandra_table = 'tasks'
     cassandra_table = 'tasks'
 
 
@@ -1025,7 +1023,7 @@ AuthProvider class within ``cassandra.auth`` module to use. Values can be
 
 
 Default: ``{}`` (empty mapping).
 Default: ``{}`` (empty mapping).
 
 
-Named arguments to pass into the authentication provider. e.g.:
+Named arguments to pass into the authentication provider. For example::
 
 
 .. code-block:: python
 .. code-block:: python
 
 
@@ -1093,7 +1091,7 @@ The fields of the URL are defined as follows:
 
 
 #. ``host``
 #. ``host``
 
 
-    Host name or IP address of the Riak server. e.g. `'localhost'`.
+    Host name or IP address of the Riak server (e.g., `'localhost'`).
 
 
 #. ``port``
 #. ``port``
 
 
@@ -2274,7 +2272,7 @@ between checking the schedule.
 
 
 The default for this value is scheduler specific.
 The default for this value is scheduler specific.
 For the default Celery beat scheduler the value is 300 (5 minutes),
 For the default Celery beat scheduler the value is 300 (5 minutes),
-but for e.g. the :pypi:`django-celery` database scheduler it's 5 seconds
+but for example the :pypi:`django-celery` database scheduler it's 5 seconds
 because the schedule may be changed externally, and so it must take
 because the schedule may be changed externally, and so it must take
 changes to the schedule into account.
 changes to the schedule into account.
 
 

+ 2 - 2
docs/userguide/daemonizing.rst

@@ -38,7 +38,7 @@ configuration module).
 The daemonization script is configured by the file :file:`/etc/default/celeryd`.
 The daemonization script is configured by the file :file:`/etc/default/celeryd`.
 This is a shell (:command:`sh`) script where you can add environment variables like
 This is a shell (:command:`sh`) script where you can add environment variables like
 the configuration options below.  To add real environment variables affecting
 the configuration options below.  To add real environment variables affecting
-the worker you must also export them (e.g. :command:`export DISPLAY=":0"`)
+the worker you must also export them (e.g., :command:`export DISPLAY=":0"`)
 
 
 .. Admonition:: Superuser privileges required
 .. Admonition:: Superuser privileges required
 
 
@@ -110,7 +110,7 @@ This is an example configuration for a Python project.
 
 
     # Workers should run as an unprivileged user.
     # Workers should run as an unprivileged user.
     #   You need to create this user manually (or you can choose
     #   You need to create this user manually (or you can choose
-    #   a user/group combination that already exists, e.g. nobody).
+    #   a user/group combination that already exists (e.g., nobody).
     CELERYD_USER="celery"
     CELERYD_USER="celery"
     CELERYD_GROUP="celery"
     CELERYD_GROUP="celery"
 
 

+ 8 - 7
docs/userguide/extending.rst

@@ -136,7 +136,7 @@ Attributes
 
 
 .. attribute:: hostname
 .. attribute:: hostname
 
 
-    The workers node name (e.g. `worker1@example.com`)
+    The workers node name (e.g., `worker1@example.com`)
 
 
 .. _extending-worker-blueprint:
 .. _extending-worker-blueprint:
 
 
@@ -306,7 +306,7 @@ Attributes
 
 
 .. attribute:: hostname
 .. attribute:: hostname
 
 
-    The workers node name (e.g. `worker1@example.com`)
+    The workers node name (e.g., `worker1@example.com`)
 
 
 .. _extending-consumer-blueprint:
 .. _extending-consumer-blueprint:
 
 
@@ -504,7 +504,7 @@ Attributes
 .. attribute:: qos
 .. attribute:: qos
 
 
     The :class:`~kombu.common.QoS` object can be used to change the
     The :class:`~kombu.common.QoS` object can be used to change the
-    task channels current prefetch_count value, e.g:
+    task channels current prefetch_count value:
 
 
     .. code-block:: python
     .. code-block:: python
 
 
@@ -589,9 +589,9 @@ It can be added both as a worker and consumer bootstep:
             print('{0!r} is starting'.format(parent))
             print('{0!r} is starting'.format(parent))
 
 
         def stop(self, parent):
         def stop(self, parent):
-            # the Consumer calls stop every time the consumer is restarted
-            # (i.e. connection is lost) and also at shutdown. The Worker
-            # will call stop at shutdown only.
+            # the Consumer calls stop every time the consumer is
+            # restarted (i.e., connection is lost) and also at shutdown.
+            # The Worker will call stop at shutdown only.
             print('{0!r} is stopping'.format(parent))
             print('{0!r} is stopping'.format(parent))
 
 
         def shutdown(self, parent):
         def shutdown(self, parent):
@@ -745,7 +745,8 @@ outside of the main parsing step.
 The list of default preload options can be found in the API reference:
 The list of default preload options can be found in the API reference:
 :mod:`celery.bin.base`.
 :mod:`celery.bin.base`.
 
 
-You can add new preload options too, e.g. to specify a configuration template:
+You can add new preload options too, for example to specify a configuration
+template:
 
 
 .. code-block:: python
 .. code-block:: python
 
 

+ 9 - 9
docs/userguide/monitoring.rst

@@ -378,7 +378,7 @@ as manage users, virtual hosts and their permissions.
 
 
     The default virtual host (``"/"``) is used in these
     The default virtual host (``"/"``) is used in these
     examples, if you use a custom virtual host you have to add
     examples, if you use a custom virtual host you have to add
-    the ``-p`` argument to the command, e.g:
+    the ``-p`` argument to the command, for example:
     ``rabbitmqctl list_queues -p my_vhost …``
     ``rabbitmqctl list_queues -p my_vhost …``
 
 
 .. _`rabbitmqctl(1)`: http://www.rabbitmq.com/man/rabbitmqctl.1.man.html
 .. _`rabbitmqctl(1)`: http://www.rabbitmq.com/man/rabbitmqctl.1.man.html
@@ -457,8 +457,8 @@ The default queue is named `celery`. To get all available queues, invoke:
     the database. The recommended way around this is to use a
     the database. The recommended way around this is to use a
     dedicated `DATABASE_NUMBER` for Celery, you can also use
     dedicated `DATABASE_NUMBER` for Celery, you can also use
     database numbers to separate Celery applications from each other (virtual
     database numbers to separate Celery applications from each other (virtual
-    hosts), but this won't affect the monitoring events used by e.g. Flower
-    as Redis pub/sub commands are global rather than database based.
+    hosts), but this won't affect the monitoring events used by for example
+    Flower as Redis pub/sub commands are global rather than database based.
 
 
 .. _monitoring-munin:
 .. _monitoring-munin:
 
 
@@ -782,9 +782,9 @@ The worker has connected to the broker and is online.
 - `hostname`: Nodename of the worker.
 - `hostname`: Nodename of the worker.
 - `timestamp`: Event time-stamp.
 - `timestamp`: Event time-stamp.
 - `freq`: Heartbeat frequency in seconds (float).
 - `freq`: Heartbeat frequency in seconds (float).
-- `sw_ident`: Name of worker software (e.g. ``py-celery``).
-- `sw_ver`: Software version (e.g. 2.2.0).
-- `sw_sys`: Operating System (e.g. Linux/Darwin).
+- `sw_ident`: Name of worker software (e.g., ``py-celery``).
+- `sw_ver`: Software version (e.g., 2.2.0).
+- `sw_sys`: Operating System (e.g., Linux/Darwin).
 
 
 .. event:: worker-heartbeat
 .. event:: worker-heartbeat
 
 
@@ -800,9 +800,9 @@ it is considered to be offline.
 - `hostname`: Nodename of the worker.
 - `hostname`: Nodename of the worker.
 - `timestamp`: Event time-stamp.
 - `timestamp`: Event time-stamp.
 - `freq`: Heartbeat frequency in seconds (float).
 - `freq`: Heartbeat frequency in seconds (float).
-- `sw_ident`: Name of worker software (e.g. ``py-celery``).
-- `sw_ver`: Software version (e.g. 2.2.0).
-- `sw_sys`: Operating System (e.g. Linux/Darwin).
+- `sw_ident`: Name of worker software (e.g., ``py-celery``).
+- `sw_ver`: Software version (e.g., 2.2.0).
+- `sw_sys`: Operating System (e.g., Linux/Darwin).
 - `active`: Number of currently executing tasks.
 - `active`: Number of currently executing tasks.
 - `processed`: Total number of tasks processed by this worker.
 - `processed`: Total number of tasks processed by this worker.
 
 

+ 5 - 5
docs/userguide/periodic-tasks.rst

@@ -44,7 +44,7 @@ more information about configuration options.
 
 
 The default scheduler (storing the schedule in the :file:`celerybeat-schedule`
 The default scheduler (storing the schedule in the :file:`celerybeat-schedule`
 file) will automatically detect that the time zone has changed, and so will
 file) will automatically detect that the time zone has changed, and so will
-reset the schedule itself, but other schedulers may not be so smart (e.g. the
+reset the schedule itself, but other schedulers may not be so smart (e.g., the
 Django database scheduler, see below) and in that case you'll have to reset the
 Django database scheduler, see below) and in that case you'll have to reset the
 schedule manually.
 schedule manually.
 
 
@@ -174,8 +174,8 @@ Available Fields
     Execution options (:class:`dict`).
     Execution options (:class:`dict`).
 
 
     This can be any argument supported by
     This can be any argument supported by
-    :meth:`~celery.task.base.Task.apply_async`,
-    e.g. `exchange`, `routing_key`, `expires`, and so on.
+    :meth:`~celery.task.base.Task.apply_async` --
+    `exchange`, `routing_key`, `expires`, and so on.
 
 
 * `relative`
 * `relative`
 
 
@@ -360,8 +360,8 @@ All solar events are calculated using UTC, and are therefore
 unaffected by your timezone setting.
 unaffected by your timezone setting.
 
 
 In polar regions, the sun may not rise or set every day. The scheduler
 In polar regions, the sun may not rise or set every day. The scheduler
-is able to handle these cases, i.e. a ``sunrise`` event won't run on a day
-when the sun doesn't rise. The one exception is ``solar_noon``, which is
+is able to handle these cases (i.e., a ``sunrise`` event won't run on a day
+when the sun doesn't rise). The one exception is ``solar_noon``, which is
 formally defined as the moment the sun transits the celestial meridian,
 formally defined as the moment the sun transits the celestial meridian,
 and will occur every day even if the sun is below the horizon.
 and will occur every day even if the sun is below the horizon.
 
 

+ 2 - 2
docs/userguide/routing.rst

@@ -375,8 +375,8 @@ Related API commands
     :keyword passive: Passive means the exchange won't be created, but you
     :keyword passive: Passive means the exchange won't be created, but you
         can use this to check if the exchange already exists.
         can use this to check if the exchange already exists.
 
 
-    :keyword durable: Durable exchanges are persistent. I.e. they survive
-        a broker restart.
+    :keyword durable: Durable exchanges are persistent (i.e., they survive
+        a broker restart).
 
 
     :keyword auto_delete: This means the queue will be deleted by the broker
     :keyword auto_delete: This means the queue will be deleted by the broker
         when there are no more queues using it.
         when there are no more queues using it.

+ 1 - 1
docs/userguide/security.rst

@@ -53,7 +53,7 @@ Client
 ------
 ------
 
 
 In Celery, "client" refers to anything that sends messages to the
 In Celery, "client" refers to anything that sends messages to the
-broker, e.g. web-servers that apply tasks.
+broker, for example web-servers that apply tasks.
 
 
 Having the broker properly secured doesn't matter if arbitrary messages
 Having the broker properly secured doesn't matter if arbitrary messages
 can be sent through a client.
 can be sent through a client.

+ 4 - 4
docs/userguide/signals.rst

@@ -56,11 +56,11 @@ is published:
         ))
         ))
 
 
 Signals use the same implementation as :mod:`django.core.dispatch`. As a
 Signals use the same implementation as :mod:`django.core.dispatch`. As a
-result other keyword parameters (e.g. signal) are passed to all signal
+result other keyword parameters (e.g., signal) are passed to all signal
 handlers by default.
 handlers by default.
 
 
 The best practice for signal handlers is to accept arbitrary keyword
 The best practice for signal handlers is to accept arbitrary keyword
-arguments (i.e. ``**kwargs``). That way new Celery versions can add additional
+arguments (i.e., ``**kwargs``). That way new Celery versions can add additional
 arguments without breaking user code.
 arguments without breaking user code.
 
 
 .. _signal-ref:
 .. _signal-ref:
@@ -392,7 +392,7 @@ This signal is sent after the worker instance is set up, but before it
 calls run. This means that any queues from the :option:`celery worker -Q`
 calls run. This means that any queues from the :option:`celery worker -Q`
 option is enabled, logging has been set up and so on.
 option is enabled, logging has been set up and so on.
 
 
-It can be used to e.g. add custom queues that should always be consumed
+It can be used to add custom queues that should always be consumed
 from, disregarding the :option:`celery worker -Q` option. Here's an example
 from, disregarding the :option:`celery worker -Q` option. Here's an example
 that sets up a direct queue for each worker, these queues can then be
 that sets up a direct queue for each worker, these queues can then be
 used to route a task to any specific worker:
 used to route a task to any specific worker:
@@ -740,7 +740,7 @@ It can be used to add additional command-line arguments to the
 
 
 
 
 Sender is the :class:`~celery.bin.base.Command` instance, and the value depends
 Sender is the :class:`~celery.bin.base.Command` instance, and the value depends
-on the program that was called (e.g. for the umbrella command it'll be
+on the program that was called (e.g., for the umbrella command it'll be
 a :class:`~celery.bin.celery.CeleryCommand`) object).
 a :class:`~celery.bin.celery.CeleryCommand`) object).
 
 
 Provides arguments:
 Provides arguments:

+ 10 - 10
docs/userguide/tasks.rst

@@ -388,7 +388,7 @@ The request defines the following attributes:
 :chord: The unique id of the chord this task belongs to (if the task
 :chord: The unique id of the chord this task belongs to (if the task
         is part of the header).
         is part of the header).
 
 
-:correlation_id: Custom ID used for e.g. de-duplication.
+:correlation_id: Custom ID used for things like de-duplication.
 
 
 :args: Positional arguments.
 :args: Positional arguments.
 
 
@@ -414,13 +414,13 @@ The request defines the following attributes:
 
 
 :delivery_info: Additional message delivery information. This is a mapping
 :delivery_info: Additional message delivery information. This is a mapping
                 containing the exchange and routing key used to deliver this
                 containing the exchange and routing key used to deliver this
-                task. Used by e.g. :meth:`Task.retry() <@Task.retry>`
+                task. Used by for example :meth:`Task.retry() <@Task.retry>`
                 to resend the task to the same destination queue.
                 to resend the task to the same destination queue.
                 Availability of keys in this dict depends on the
                 Availability of keys in this dict depends on the
                 message broker used.
                 message broker used.
 
 
-:reply-to: Name of queue to send replies back to (used with e.g. RPC result
-           backend).
+:reply-to: Name of queue to send replies back to (used with RPC result
+           backend for example).
 
 
 :called_directly: This flag is set to true if the task wasn't
 :called_directly: This flag is set to true if the task wasn't
                   executed by the worker.
                   executed by the worker.
@@ -682,7 +682,7 @@ call, pass `retry_kwargs` argument to `~@Celery.task` decorator:
 
 
 This is provided as an alternative to manually handling the exceptions,
 This is provided as an alternative to manually handling the exceptions,
 and the example above will do the same as wrapping the task body
 and the example above will do the same as wrapping the task body
-in a :keyword:`try` ... :keyword:`except` statement, i.e.:
+in a :keyword:`try` ... :keyword:`except` statement:
 
 
 .. code-block:: python
 .. code-block:: python
 
 
@@ -799,7 +799,7 @@ General
     if not specified means rate limiting for tasks is disabled by default.
     if not specified means rate limiting for tasks is disabled by default.
 
 
     Note that this is a *per worker instance* rate limit, and not a global
     Note that this is a *per worker instance* rate limit, and not a global
-    rate limit. To enforce a global rate limit (e.g. for an API with a
+    rate limit. To enforce a global rate limit (e.g., for an API with a
     maximum number of  requests per second), you must restrict to a given
     maximum number of  requests per second), you must restrict to a given
     queue.
     queue.
 
 
@@ -876,7 +876,7 @@ General
     task is currently running.
     task is currently running.
 
 
     The host name and process id of the worker executing the task
     The host name and process id of the worker executing the task
-    will be available in the state meta-data (e.g. `result.info['pid']`)
+    will be available in the state meta-data (e.g., `result.info['pid']`)
 
 
     The global default can be overridden by the
     The global default can be overridden by the
     :setting:`task_track_started` setting.
     :setting:`task_track_started` setting.
@@ -901,7 +901,7 @@ different strengths and weaknesses (see :ref:`task-result-backends`).
 During its lifetime a task will transition through several possible states,
 During its lifetime a task will transition through several possible states,
 and each state may have arbitrary meta-data attached to it. When a task
 and each state may have arbitrary meta-data attached to it. When a task
 moves into a new state the previous state is
 moves into a new state the previous state is
-forgotten about, but some transitions can be deducted, (e.g. a task now
+forgotten about, but some transitions can be deducted, (e.g., a task now
 in the :state:`FAILED` state, is implied to have been in the
 in the :state:`FAILED` state, is implied to have been in the
 :state:`STARTED` state at some point).
 :state:`STARTED` state at some point).
 
 
@@ -1060,7 +1060,7 @@ Use :meth:`~@Task.update_state` to update a task's state:.
 Here I created the state `"PROGRESS"`, telling any application
 Here I created the state `"PROGRESS"`, telling any application
 aware of this state that the task is currently in progress, and also where
 aware of this state that the task is currently in progress, and also where
 it is in the process by having `current` and `total` counts as part of the
 it is in the process by having `current` and `total` counts as part of the
-state meta-data. This can then be used to create e.g. progress bars.
+state meta-data. This can then be used to create progress bars for example.
 
 
 .. _pickling_exceptions:
 .. _pickling_exceptions:
 
 
@@ -1295,7 +1295,7 @@ And you route every request to the same process, then it
 will keep state between requests.
 will keep state between requests.
 
 
 This can also be useful to cache resources,
 This can also be useful to cache resources,
-e.g. a base Task class that caches a database connection:
+For example, a base Task class that caches a database connection:
 
 
 .. code-block:: python
 .. code-block:: python
 
 

+ 4 - 4
docs/userguide/workers.rst

@@ -79,7 +79,7 @@ signal.
 If the worker won't shutdown after considerate time, for being
 If the worker won't shutdown after considerate time, for being
 stuck in an infinite-loop or similar, you can use the :sig:`KILL` signal to
 stuck in an infinite-loop or similar, you can use the :sig:`KILL` signal to
 force terminate the worker: but be aware that currently executing tasks will
 force terminate the worker: but be aware that currently executing tasks will
-be lost (i.e. unless the tasks have the :attr:`~@Task.acks_late`
+be lost (i.e., unless the tasks have the :attr:`~@Task.acks_late`
 option set).
 option set).
 
 
 Also as processes can't override the :sig:`KILL` signal, the worker will
 Also as processes can't override the :sig:`KILL` signal, the worker will
@@ -170,7 +170,7 @@ Node name replacements
 - ``%i``:  Prefork pool process index or 0 if MainProcess.
 - ``%i``:  Prefork pool process index or 0 if MainProcess.
 - ``%I``:  Prefork pool process index with separator.
 - ``%I``:  Prefork pool process index with separator.
 
 
-E.g. if the current hostname is ``george@foo.example.com`` then
+For example, if the current hostname is ``george@foo.example.com`` then
 these will expand to:
 these will expand to:
 
 
 - ``--logfile-%p.log`` -> :file:`george@foo.example.com.log`
 - ``--logfile-%p.log`` -> :file:`george@foo.example.com.log`
@@ -189,7 +189,7 @@ filename depending on the process that'll eventually need to open the file.
 This can be used to specify one log file per child process.
 This can be used to specify one log file per child process.
 
 
 Note that the numbers will stay within the process limit even if processes
 Note that the numbers will stay within the process limit even if processes
-exit or if ``maxtasksperchild``/time limits are used. I.e. the number
+exit or if ``maxtasksperchild``/time limits are used. That is, the number
 is the *process index*, not the process count or pid.
 is the *process index*, not the process count or pid.
 
 
 * ``%i`` - Pool process index or 0 if MainProcess.
 * ``%i`` - Pool process index or 0 if MainProcess.
@@ -864,7 +864,7 @@ The output will include the following fields:
 
 
     * ``transport``
     * ``transport``
 
 
-        Name of transport used (e.g. ``amqp`` or ``redis``)
+        Name of transport used (e.g., ``amqp`` or ``redis``)
 
 
     * ``transport_options``
     * ``transport_options``
 
 

+ 4 - 4
docs/whatsnew-3.1.rst

@@ -267,7 +267,7 @@ Caveats
 
 
     This is very expensive if you have the
     This is very expensive if you have the
     :option:`--maxtasksperchild <celery worker --maxtasksperchild>` option
     :option:`--maxtasksperchild <celery worker --maxtasksperchild>` option
-    set to a low value (e.g. less than 10), so if you need to enable this option
+    set to a low value (e.g., less than 10), so if you need to enable this option
     you should also enable :option:`-Ofair <celery worker -O>` to turn off the
     you should also enable :option:`-Ofair <celery worker -O>` to turn off the
     prefetching behavior.
     prefetching behavior.
 
 
@@ -380,7 +380,7 @@ but starting with this version that field is also used to order them.
 Also, events now record timezone information
 Also, events now record timezone information
 by including a new ``utcoffset`` field in the event message.
 by including a new ``utcoffset`` field in the event message.
 This is a signed integer telling the difference from UTC time in hours,
 This is a signed integer telling the difference from UTC time in hours,
-so e.g. an event sent from the Europe/London timezone in daylight savings
+so for example, an event sent from the Europe/London timezone in daylight savings
 time will have an offset of 1.
 time will have an offset of 1.
 
 
 :class:`@events.Receiver` will automatically convert the time-stamps
 :class:`@events.Receiver` will automatically convert the time-stamps
@@ -807,7 +807,7 @@ In Other News
     to ensure that the patches are applied as early as possible.
     to ensure that the patches are applied as early as possible.
 
 
     If you start the worker in a wrapper (like Django's :file:`manage.py`)
     If you start the worker in a wrapper (like Django's :file:`manage.py`)
-    then you must apply the patches manually, e.g. by creating an alternative
+    then you must apply the patches manually, for example by creating an alternative
     wrapper that monkey patches at the start of the program before importing
     wrapper that monkey patches at the start of the program before importing
     any other modules.
     any other modules.
 
 
@@ -967,7 +967,7 @@ In Other News
 - Changed the way that app instances are pickled.
 - Changed the way that app instances are pickled.
 
 
     Apps can now define a ``__reduce_keys__`` method that's used instead
     Apps can now define a ``__reduce_keys__`` method that's used instead
-    of the old ``AppPickler`` attribute. E.g. if your app defines a custom
+    of the old ``AppPickler`` attribute. For example, if your app defines a custom
     'foo' attribute that needs to be preserved when pickling you can define
     'foo' attribute that needs to be preserved when pickling you can define
     a ``__reduce_keys__`` as such:
     a ``__reduce_keys__`` as such:
 
 

+ 2 - 2
docs/whatsnew-4.0.rst

@@ -540,7 +540,7 @@ some long-requested features:
   task errors.
   task errors.
 
 
 - Worker calls callbacks/errbacks even when the result is sent by the
 - Worker calls callbacks/errbacks even when the result is sent by the
-  parent process (e.g. :exc:`WorkerLostError` when a child process
+  parent process (e.g., :exc:`WorkerLostError` when a child process
   terminates, deserialization errors, unregistered tasks).
   terminates, deserialization errors, unregistered tasks).
 
 
 - A new ``origin`` header contains information about the process sending
 - A new ``origin`` header contains information about the process sending
@@ -614,7 +614,7 @@ Prefork: One log-file per child process
 ---------------------------------------
 ---------------------------------------
 
 
 Init-scrips and :program:`celery multi` now uses the `%I` log file format
 Init-scrips and :program:`celery multi` now uses the `%I` log file format
-option (e.g. :file:`/var/log/celery/%n%I.log`).
+option (e.g., :file:`/var/log/celery/%n%I.log`).
 
 
 This change was necessary to ensure each child
 This change was necessary to ensure each child
 process has a separate log file after moving task logging
 process has a separate log file after moving task logging

+ 1 - 1
extra/generic-init.d/celerybeat

@@ -35,7 +35,7 @@ origin_is_runlevel_dir () {
     echo $?
     echo $?
 }
 }
 
 
-# Can be a runlevel symlink (e.g. S02celeryd)
+# Can be a runlevel symlink (e.g., S02celeryd)
 if [ $(origin_is_runlevel_dir) -eq 0 ]; then
 if [ $(origin_is_runlevel_dir) -eq 0 ]; then
     SCRIPT_FILE=$(readlink "$0")
     SCRIPT_FILE=$(readlink "$0")
 else
 else

+ 3 - 4
extra/generic-init.d/celeryd

@@ -20,9 +20,8 @@
 #
 #
 #
 #
 # To implement separate init-scripts, copy this script and give it a different
 # To implement separate init-scripts, copy this script and give it a different
-# name:
-# I.e., if my new application, "little-worker" needs an init, I
-# should just use:
+# name.  That is, if your new application named "little-worker" needs an init,
+# you should use:
 #
 #
 #   cp /etc/init.d/celeryd /etc/init.d/little-worker
 #   cp /etc/init.d/celeryd /etc/init.d/little-worker
 #
 #
@@ -43,7 +42,7 @@ origin_is_runlevel_dir () {
     echo $?
     echo $?
 }
 }
 
 
-# Can be a runlevel symlink (e.g. S02celeryd)
+# Can be a runlevel symlink (e.g., S02celeryd)
 if [ $(origin_is_runlevel_dir) -eq 0 ]; then
 if [ $(origin_is_runlevel_dir) -eq 0 ]; then
     SCRIPT_FILE=$(readlink "$0")
     SCRIPT_FILE=$(readlink "$0")
 else
 else

+ 4 - 4
extra/zsh-completion/celery.zsh

@@ -5,7 +5,7 @@
 #       ``/usr/share/zsh/site-functions``) and name the script ``_celery``
 #       ``/usr/share/zsh/site-functions``) and name the script ``_celery``
 #
 #
 #   Alternative B). Or, use this file as a oh-my-zsh plugin (rename the script
 #   Alternative B). Or, use this file as a oh-my-zsh plugin (rename the script
-#   to ``_celery``), and add it to .zshrc e.g. plugins=(celery git osx ruby)
+#   to ``_celery``), and add it to .zshrc: plugins=(celery git osx ruby)
 #
 #
 
 
 _celery () {
 _celery () {
@@ -20,7 +20,7 @@ ifargs=('--app=' '--broker=' '--loader=' '--config=' '--version')
 dopts=('--detach' '--umask=' '--gid=' '--uid=' '--pidfile=' '--logfile=' '--loglevel=')
 dopts=('--detach' '--umask=' '--gid=' '--uid=' '--pidfile=' '--logfile=' '--loglevel=')
 controlargs=('--timeout' '--destination')
 controlargs=('--timeout' '--destination')
 _arguments \
 _arguments \
-        '(-A --app=)'{-A,--app}'[app instance to use (e.g. module.attr_name):APP]' \
+        '(-A --app=)'{-A,--app}'[app instance to use (e.g., module.attr_name):APP]' \
         '(-b --broker=)'{-b,--broker}'[url to broker.  default is "amqp://guest@localhost//":BROKER]' \
         '(-b --broker=)'{-b,--broker}'[url to broker.  default is "amqp://guest@localhost//":BROKER]' \
         '(--loader)--loader[name of custom loader class to use.:LOADER]' \
         '(--loader)--loader[name of custom loader class to use.:LOADER]' \
         '(--config)--config[Name of the configuration module:CONFIG]' \
         '(--config)--config[Name of the configuration module:CONFIG]' \
@@ -44,7 +44,7 @@ case "$words[1]" in
     '(--purge --discard)'{--discard,--purge}'[Purges all waiting tasks before the daemon is started.]' \
     '(--purge --discard)'{--discard,--purge}'[Purges all waiting tasks before the daemon is started.]' \
     '(-f --logfile=)'{-f,--logfile=}'[Path to log file. If no logfile is specified, stderr is used.]' \
     '(-f --logfile=)'{-f,--logfile=}'[Path to log file. If no logfile is specified, stderr is used.]' \
     '(--loglevel=)--loglevel=:::(critical error warning info debug)' \
     '(--loglevel=)--loglevel=:::(critical error warning info debug)' \
-    '(-N --hostname=)'{-N,--hostname=}'[Set custom hostname, e.g. "foo@example.com".]' \
+    '(-N --hostname=)'{-N,--hostname=}'[Set custom hostname, e.g., "foo@example.com".]' \
     '(-B --beat)'{-B,--beat}'[Also run the celerybeat periodic task scheduler.]' \
     '(-B --beat)'{-B,--beat}'[Also run the celerybeat periodic task scheduler.]' \
     '(-s --schedule=)'{-s,--schedule=}'[Path to the schedule database if running with the -B option. Defaults to celerybeat-schedule.]' \
     '(-s --schedule=)'{-s,--schedule=}'[Path to the schedule database if running with the -B option. Defaults to celerybeat-schedule.]' \
     '(-S --statedb=)'{-S,--statedb=}'[Path to the state database.Default: None]' \
     '(-S --statedb=)'{-S,--statedb=}'[Path to the state database.Default: None]' \
@@ -122,7 +122,7 @@ case "$words[1]" in
     '(-d --dump)'{-d,--dump}'[Dump events to stdout.]' \
     '(-d --dump)'{-d,--dump}'[Dump events to stdout.]' \
     '(-c --camera=)'{-c,--camera=}'[Take snapshots of events using this camera.]' \
     '(-c --camera=)'{-c,--camera=}'[Take snapshots of events using this camera.]' \
     '(-F --frequency=)'{-F,--frequency=}'[Camera: Shutter frequency.  Default is every 1.0 seconds.]' \
     '(-F --frequency=)'{-F,--frequency=}'[Camera: Shutter frequency.  Default is every 1.0 seconds.]' \
-    '(-r --maxrate=)'{-r,--maxrate=}'[Camera: Optional shutter rate limit (e.g. 10/m).]'
+    '(-r --maxrate=)'{-r,--maxrate=}'[Camera: Optional shutter rate limit (e.g., 10/m).]'
     compadd -a dopts fargs
     compadd -a dopts fargs
     ;;
     ;;
     *)
     *)

+ 2 - 2
funtests/suite/test_leak.py

@@ -50,8 +50,8 @@ class LeakFunCase(unittest.TestCase):
         return before, after
         return before, after
 
 
     def appx(self, s, r=1):
     def appx(self, s, r=1):
-        """r==1 (10e1): Keep up to hundred kB,
-        e.g. 16,268MB becomes 16,2MB."""
+        """r==1 (10e1): Keep up to hundred kB (e.g., 16,268MB
+        becomes 16,2MB)."""
         return int(s / 10.0 ** (r + 1)) / 10.0
         return int(s / 10.0 ** (r + 1)) / 10.0
 
 
     def assertFreed(self, n, fun, *args, **kwargs):
     def assertFreed(self, n, fun, *args, **kwargs):