Browse Source

Merge branch '3.0'

Conflicts:
	celery/__main__.py
	celery/app/base.py
	celery/backends/cache.py
	celery/bin/celery.py
	celery/bin/celeryd.py
	celery/datastructures.py
	celery/events/dumper.py
	celery/local.py
	celery/utils/imports.py
	celery/utils/threads.py
	celery/utils/timer2.py
	celery/worker/control.py
	docs/userguide/tasks.rst
	setup.py
Ask Solem 12 years ago
parent
commit
1ee6603703
69 changed files with 976 additions and 437 deletions
  1. 1 1
      celery/__compat__.py
  2. 2 2
      celery/__init__.py
  3. 12 1
      celery/__main__.py
  4. 1 2
      celery/app/abstract.py
  5. 1 1
      celery/app/amqp.py
  6. 2 2
      celery/app/base.py
  7. 1 1
      celery/apps/worker.py
  8. 12 7
      celery/backends/cache.py
  9. 1 1
      celery/bin/camqadm.py
  10. 7 5
      celery/bin/celery.py
  11. 2 6
      celery/bin/celeryd.py
  12. 1 1
      celery/bin/celeryd_multi.py
  13. 1 1
      celery/concurrency/__init__.py
  14. 21 2
      celery/concurrency/eventlet.py
  15. 4 1
      celery/concurrency/gevent.py
  16. 7 7
      celery/datastructures.py
  17. 2 2
      celery/events/dumper.py
  18. 2 2
      celery/events/state.py
  19. 63 13
      celery/local.py
  20. 1 1
      celery/tests/bin/test_celery.py
  21. 4 8
      celery/tests/concurrency/test_processes.py
  22. 3 1
      celery/tests/security/test_security.py
  23. 0 2
      celery/tests/worker/test_control.py
  24. 4 4
      celery/utils/functional.py
  25. 5 60
      celery/utils/imports.py
  26. 7 8
      celery/utils/timer2.py
  27. 2 2
      celery/worker/autoreload.py
  28. 2 2
      celery/worker/buckets.py
  29. 1 1
      docs/conf.py
  30. 13 5
      docs/configuration.rst
  31. 73 20
      docs/contributing.rst
  32. 24 10
      docs/django/first-steps-with-django.rst
  33. 28 14
      docs/faq.rst
  34. 3 1
      docs/getting-started/brokers/beanstalk.rst
  35. 3 1
      docs/getting-started/brokers/couchdb.rst
  36. 3 1
      docs/getting-started/brokers/django.rst
  37. 3 1
      docs/getting-started/brokers/mongodb.rst
  38. 34 13
      docs/getting-started/brokers/rabbitmq.rst
  39. 3 1
      docs/getting-started/brokers/redis.rst
  40. 3 1
      docs/getting-started/brokers/sqs.rst
  41. 21 7
      docs/getting-started/first-steps-with-celery.rst
  42. 1 1
      docs/getting-started/introduction.rst
  43. 63 21
      docs/getting-started/next-steps.rst
  44. 28 10
      docs/history/changelog-1.0.rst
  45. 36 12
      docs/history/changelog-2.0.rst
  46. 39 14
      docs/history/changelog-2.1.rst
  47. 12 4
      docs/history/changelog-2.2.rst
  48. 2 0
      docs/history/changelog-2.3.rst
  49. 3 1
      docs/history/changelog-2.4.rst
  50. 3 1
      docs/history/changelog-2.5.rst
  51. 1 1
      docs/includes/introduction.txt
  52. 1 1
      docs/internals/app-overview.rst
  53. 2 2
      docs/internals/guide.rst
  54. 1 1
      docs/reference/celery.rst
  55. 9 5
      docs/tutorials/daemonizing.rst
  56. 3 1
      docs/tutorials/debugging.rst
  57. 11 7
      docs/userguide/application.rst
  58. 5 3
      docs/userguide/calling.rst
  59. 8 6
      docs/userguide/canvas.rst
  60. 3 1
      docs/userguide/concurrency/eventlet.rst
  61. 198 46
      docs/userguide/monitoring.rst
  62. 3 1
      docs/userguide/optimizing.rst
  63. 12 4
      docs/userguide/periodic-tasks.rst
  64. 34 15
      docs/userguide/routing.rst
  65. 18 19
      docs/userguide/tasks.rst
  66. 46 16
      docs/userguide/workers.rst
  67. 16 6
      docs/whatsnew-2.5.rst
  68. 33 14
      docs/whatsnew-3.0.rst
  69. 2 2
      setup.py

+ 1 - 1
celery/__compat__.py

@@ -189,7 +189,7 @@ def get_compat_module(pkg, name):
         fqdn = '.'.join([pkg.__name__, name])
         module = sys.modules[fqdn] = import_module(attrs)
         return module
-    attrs['__all__'] = attrs.keys()
+    attrs['__all__'] = list(attrs)
     return create_module(name, dict(attrs), pkg=pkg, prepare_attr=prepare)
 
 

+ 2 - 2
celery/__init__.py

@@ -22,6 +22,8 @@ __all__ = [
 ]
 VERSION_BANNER = '{0} ({1})'.format(__version__, SERIES)
 
+# -eof meta-
+
 # This is for static analyzers
 Celery = object
 bugreport = lambda *a, **kw: None
@@ -38,8 +40,6 @@ xmap = lambda *a, **kw: None
 xstarmap = lambda *a, **kw: None
 uuid = lambda: None
 
-# -eof meta-
-
 # Lazy loading
 from .__compat__ import recreate_module
 

+ 12 - 1
celery/__main__.py

@@ -3,11 +3,22 @@ from __future__ import absolute_import
 import sys
 
 
-def main():
+def maybe_patch_concurrency():
     from celery.platforms import maybe_patch_concurrency
     maybe_patch_concurrency(sys.argv, ['-P'], ['--pool'])
+
+
+def main():
+    maybe_patch_concurrency()
     from celery.bin.celery import main
     main()
 
+
+def _compat_worker():
+    maybe_patch_concurrency()
+    from celery.bin.celeryd import main
+    main()
+
+
 if __name__ == '__main__':
     main()

+ 1 - 2
celery/app/abstract.py

@@ -60,5 +60,4 @@ class configurated(object):
                 setattr(self, attr_name, attr_value)
 
     def confopts_as_dict(self):
-        return dict((key, getattr(self, key))
-                        for key in self.__confopts__.iterkeys())
+        return dict((key, getattr(self, key)) for key in self.__confopts__)

+ 1 - 1
celery/app/amqp.py

@@ -128,7 +128,7 @@ class Queues(dict):
 
     def select_remove(self, queue):
         if self._consume_from is None:
-            self.select_subset(k for k in self.keys() if k != queue)
+            self.select_subset(k for k in self if k != queue)
         else:
             self._consume_from.pop(queue, None)
 

+ 2 - 2
celery/app/base.py

@@ -8,6 +8,7 @@
 """
 from __future__ import absolute_import
 
+import threading
 import warnings
 
 from collections import deque
@@ -15,7 +16,6 @@ from contextlib import contextmanager
 from copy import deepcopy
 from functools import wraps
 from operator import attrgetter
-from threading import Lock
 
 from billiard.util import register_after_fork
 from kombu.clocks import LamportClock
@@ -77,7 +77,7 @@ class Celery(object):
         self._pending_defaults = deque()
 
         self.finalized = False
-        self._finalize_mutex = Lock()
+        self._finalize_mutex = threading.Lock()
         self._pending = deque()
         self._tasks = tasks
         if not isinstance(self._tasks, TaskRegistry):

+ 1 - 1
celery/apps/worker.py

@@ -130,7 +130,7 @@ class Worker(WorkController):
                 count, pluralize(count, 'message')))
 
     def tasklist(self, include_builtins=True):
-        tasks = self.app.tasks.keys()
+        tasks = self.app.tasks
         if not include_builtins:
             tasks = [t for t in tasks if not t.startswith('celery.')]
         return '\n'.join('  . {0}'.format(task) for task in sorted(tasks))

+ 12 - 7
celery/backends/cache.py

@@ -16,6 +16,15 @@ from .base import KeyValueStoreBackend
 
 _imp = [None]
 
+REQUIRES_BACKEND = """\
+The memcached backend requires either pylibmc or python-memcached.\
+"""
+
+UNKNOWN_BACKEND = """\
+The cache backend {0!r} is unknown,
+Please use one of the following backends instead: {1}\
+"""
+
 
 def import_best_memcache():
     if _imp[0] is None:
@@ -27,9 +36,7 @@ def import_best_memcache():
             try:
                 import memcache  # noqa
             except ImportError:
-                raise ImproperlyConfigured(
-                    'Memcached backend requires either the pylibmc '
-                    'or memcache library')
+                raise ImproperlyConfigured(REQUIRES_BACKEND)
         _imp[0] = (is_pylibmc, memcache)
     return _imp[0]
 
@@ -90,10 +97,8 @@ class CacheBackend(KeyValueStoreBackend):
         try:
             self.Client = backends[self.backend]()
         except KeyError:
-            raise ImproperlyConfigured(
-                    'Unknown cache backend: {0}. Please use one of the '
-                    'following backends: {1}'.format(self.backend,
-                                        ', '.join(backends.keys())))
+            raise ImproperlyConfigured(UNKNOWN_BACKEND.format(
+                self.backend, ', '.join(backends)))
 
     def get(self, key):
         return self.client.get(key)

+ 1 - 1
celery/bin/camqadm.py

@@ -238,7 +238,7 @@ class AMQShell(cmd.Cmd):
     def do_help(self, *args):
         if not args:
             self.say(HELP_HEADER)
-            for cmd_name in self.amqp.keys():
+            for cmd_name in self.amqp:
                 self.display_command_help(cmd_name, short=True)
             self.say(EXAMPLE_TEXT)
         else:

+ 7 - 5
celery/bin/celery.py

@@ -83,7 +83,6 @@ def load_extension_commands(namespace='celery.commands'):
         return
 
     for ep in iter_entry_points(namespace):
-        _get_extension_classes().append(ep.name)
         sym = ':'.join([ep.module_name, ep.attrs[0]])
         try:
             cls = symbol_by_name(sym)
@@ -91,6 +90,7 @@ def load_extension_commands(namespace='celery.commands'):
             warnings.warn(
                 'Cannot load extension {0!r}: {1!r}'.format(sym, exc))
         else:
+            _get_extension_classes().append(ep.name)
             command(cls, name=ep.name)
 
 
@@ -169,7 +169,7 @@ class Command(BaseCommand):
 
     def say_remote_command_reply(self, replies):
         c = self.colored
-        node = replies.keys()[0]
+        node = iter(replies).next()  # <-- take first.
         reply = replies[node]
         status, preply = self.prettify(reply)
         self.say_chat('->', c.cyan(node, ': ') + status,
@@ -333,7 +333,7 @@ class list_(Command):
 
     def run(self, what=None, *_, **kw):
         topics = {'bindings': self.list_bindings}
-        available = ', '.join(topics.keys())
+        available = ', '.join(topics)
         if not what:
             raise Error('You must specify one of {0}'.format(available))
         if what not in topics:
@@ -411,7 +411,7 @@ class purge(Command):
     fmt_empty = 'No messages purged from {qnum} {queues}'
 
     def run(self, *args, **kwargs):
-        queues = len(self.app.amqp.queues.keys())
+        queues = len(self.app.amqp.queues)
         messages = self.app.control.purge()
         fmt = self.fmt_purged if messages else self.fmt_empty
         self.out(fmt.format(
@@ -717,7 +717,9 @@ class shell(Command):  # pragma: no cover
           xmap, xstarmap subtask, Task
         - all registered tasks.
 
-    Example Session::
+    Example Session:
+
+    .. code-block:: bash
 
         $ celery shell
 

+ 2 - 6
celery/bin/celeryd.py

@@ -117,11 +117,6 @@ The :program:`celery worker` command (previously known as ``celeryd``)
 from __future__ import absolute_import
 
 import sys
-import sys
-from celery.platforms import maybe_patch_concurrency
-maybe_patch_concurrency(sys.argv, ['-P'], ['--pool'])
-
-from billiard import freeze_support
 
 from celery import concurrency
 from celery.bin.base import Command, Option
@@ -154,7 +149,7 @@ class WorkerCommand(Command):
                 loglevel = mlevel(loglevel)
             except KeyError:  # pragma: no cover
                 self.die('Unknown level {0!r}. Please use one of {1}.'.format(
-                    loglevel, '|'.join(l for l in LOG_LEVELS.keys()
+                    loglevel, '|'.join(l for l in LOG_LEVELS
                       if isinstance(l, basestring))))
         return self.app.Worker(
             hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, **kwargs
@@ -204,6 +199,7 @@ def main():
     # (see multiprocessing.forking.get_preparation_data())
     if __name__ != '__main__':  # pragma: no cover
         sys.modules['__main__'] = sys.modules[__name__]
+    from billiard import freeze_support
     freeze_support()
     worker = WorkerCommand()
     worker.execute_from_commandline()

+ 1 - 1
celery/bin/celeryd_multi.py

@@ -6,7 +6,7 @@
 Examples
 ========
 
-::
+.. code-block:: bash
 
     # Single worker with explicit name and events enabled.
     $ celeryd-multi start Leslie -E

+ 1 - 1
celery/concurrency/__init__.py

@@ -8,7 +8,7 @@
 """
 from __future__ import absolute_import
 
-from celery.utils.imports import symbol_by_name
+from celery.local import symbol_by_name
 
 ALIASES = {
     'processes': 'celery.concurrency.processes:TaskPool',

+ 21 - 2
celery/concurrency/eventlet.py

@@ -9,11 +9,30 @@
 from __future__ import absolute_import
 
 import os
-if not os.environ.get('EVENTLET_NOPATCH'):
+
+EVENTLET_NOPATCH = os.environ.get('EVENTLET_NOPATCH', False)
+EVENTLET_DBLOCK = int(os.environ.get('EVENTLET_NOBLOCK', 0))
+W_RACE = """\
+Celery module with %s imported before eventlet patched\
+"""
+
+def _racedetect():
+    import sys
+    for mod in (mod for mod in sys.modules if mod.startswith('celery.')):
+            for side in ('thread', 'threading', 'socket'):
+                if getattr(mod, side, None):
+                    warnings.warn(RuntimeWarning(W_RACE % side))
+_racedetect()
+
+
+PATCHED = [0]
+if not EVENTLET_NOPATCH and not PATCHED[0]:
+    PATCHED[0] += 1
     import eventlet
     import eventlet.debug
     eventlet.monkey_patch()
-    eventlet.debug.hub_prevent_multiple_readers(False)
+    eventlet.debug.hub_prevent_multiple_readers(True)
+    eventlet.debug.hub_blocking_detection(EVENTLET_DBLOCK)
 
 from time import time
 

+ 4 - 1
celery/concurrency/gevent.py

@@ -9,7 +9,10 @@
 from __future__ import absolute_import
 
 import os
-if not os.environ.get('GEVENT_NOPATCH'):
+
+PATCHED = [0]
+if not os.environ.get('GEVENT_NOPATCH') and not PATCHED[0]:
+    PATCHED[0] += 1
     from gevent import monkey
     monkey.patch_all()
 

+ 7 - 7
celery/datastructures.py

@@ -174,7 +174,7 @@ class DependencyGraph(object):
         P('}')
 
     def __iter__(self):
-        return self.adjacent.iterkeys()
+        return iter(self.adjacent)
 
     def __getitem__(self, node):
         return self.adjacent[node]
@@ -264,11 +264,11 @@ class DictAttribute(object):
         return hasattr(self.obj, key)
 
     def _iterate_keys(self):
-        return vars(self.obj).iterkeys()
+        return iter(vars(self.obj))
     iterkeys = _iterate_keys
 
     def __iter__(self):
-        return self.iterkeys()
+        return self._iterate_keys()
 
     def _iterate_items(self):
         return vars(self.obj).iteritems()
@@ -280,7 +280,7 @@ class DictAttribute(object):
     else:
 
         def keys(self):
-            return list(self._iterate_keys())
+            return list(self)
 
         def items(self):
             return list(self._iterate_items())
@@ -348,7 +348,7 @@ class ConfigurationView(AttributeDictMixin):
         return repr(dict(self.iteritems()))
 
     def __iter__(self):
-        return self.iterkeys()
+        return self._iterate_keys()
 
     def _iter(self, op):
         # defaults must be first in the stream, so values in
@@ -356,7 +356,7 @@ class ConfigurationView(AttributeDictMixin):
         return chain(*[op(d) for d in reversed(self._order)])
 
     def _iterate_keys(self):
-        return uniq(self._iter(lambda d: d.iterkeys()))
+        return uniq(self._iter(lambda d: d))
     iterkeys = _iterate_keys
 
     def _iterate_items(self):
@@ -439,7 +439,7 @@ class LimitedSet(object):
         return iter(self._data)
 
     def __repr__(self):
-        return 'LimitedSet({0!r})'.format(self._data.keys())
+        return 'LimitedSet({0!r})'.format(list(self._data))
 
     @property
     def chronologically(self):

+ 2 - 2
celery/events/dumper.py

@@ -56,14 +56,14 @@ class Dumper(object):
             return self.format_task_event(hostname, timestamp,
                                           type, task, event)
         fields = ', '.join('{0}={1}'.format(key, event[key])
-                        for key in sorted(event.keys()))
+                        for key in sorted(event))
         sep = fields and ':' or ''
         self.say('{0} [{1}] {2}{3} {4}'.format(hostname, timestamp,
                                             humanize_type(type), sep, fields))
 
     def format_task_event(self, hostname, timestamp, type, task, event):
         fields = ', '.join('{0}={1}'.format(key, event[key])
-                        for key in sorted(event.keys()))
+                        for key in sorted(event))
         sep = fields and ':' or ''
         self.say('{0} [{1}] {2}{3} {4} {5}'.format(hostname, timestamp,
                     humanize_type(type), sep, task, fields))

+ 2 - 2
celery/events/state.py

@@ -19,8 +19,8 @@
 from __future__ import absolute_import
 
 import heapq
+import threading
 
-from threading import Lock
 from time import time
 
 from kombu.utils import kwdict
@@ -218,7 +218,7 @@ class State(object):
         self.event_callback = callback
         self.group_handlers = {'worker': self.worker_event,
                                'task': self.task_event}
-        self._mutex = Lock()
+        self._mutex = threading.Lock()
 
     def freeze_while(self, fun, *args, **kwargs):
         clear_after = kwargs.pop('clear_after', False)

+ 63 - 13
celery/local.py

@@ -12,27 +12,74 @@
 """
 from __future__ import absolute_import
 
-# since each thread has its own greenlet we can just use those as identifiers
-# for the context.  If greenlets are not available we fall back to the
-# current thread ident.
-try:
-    from greenlet import getcurrent as get_ident
-except ImportError:  # pragma: no cover
+import importlib
+import sys
+
+
+def symbol_by_name(name, aliases={}, imp=None, package=None,
+        sep='.', default=None, **kwargs):
+    """Get symbol by qualified name.
+
+    The name should be the full dot-separated path to the class::
+
+        modulename.ClassName
+
+    Example::
+
+        celery.concurrency.processes.TaskPool
+                                    ^- class name
+
+    or using ':' to separate module and symbol::
+
+        celery.concurrency.processes:TaskPool
+
+    If `aliases` is provided, a dict containing short name/long name
+    mappings, the name is looked up in the aliases first.
+
+    Examples:
+
+        >>> symbol_by_name('celery.concurrency.processes.TaskPool')
+        <class 'celery.concurrency.processes.TaskPool'>
+
+        >>> symbol_by_name('default', {
+        ...     'default': 'celery.concurrency.processes.TaskPool'})
+        <class 'celery.concurrency.processes.TaskPool'>
+
+        # Does not try to look up non-string names.
+        >>> from celery.concurrency.processes import TaskPool
+        >>> symbol_by_name(TaskPool) is TaskPool
+        True
+
+    """
+    if imp is None:
+        imp = importlib.import_module
+
+    if not isinstance(name, basestring):
+        return name                                 # already a class
+
+    name = aliases.get(name) or name
+    sep = ':' if ':' in name else sep
+    module_name, _, cls_name = name.rpartition(sep)
+    if not module_name:
+        cls_name, module_name = None, package if package else cls_name
     try:
-        from thread import get_ident  # noqa
-    except ImportError:  # pragma: no cover
         try:
-            from dummy_thread import get_ident  # noqa
-        except ImportError:  # pragma: no cover
-            from _thread import get_ident  # noqa
+            module = imp(module_name, package=package, **kwargs)
+        except ValueError, exc:
+            raise ValueError, ValueError(
+                    "Couldn't import %r: %s" % (name, exc)), sys.exc_info()[2]
+        return getattr(module, cls_name) if cls_name else module
+    except (ImportError, AttributeError):
+        if default is None:
+            raise
+    return default
 
 
 def try_import(module, default=None):
     """Try to import and return module, or return
     None if the module does not exist."""
-    from importlib import import_module
     try:
-        return import_module(module)
+        return importlib.import_module(module)
     except ImportError:
         return default
 
@@ -221,6 +268,7 @@ def maybe_evaluate(obj):
         return obj.__maybe_evaluate__()
     except AttributeError:
         return obj
+<<<<<<< HEAD
 
 
 def release_local(local):
@@ -427,3 +475,5 @@ class LocalManager(object):
     def __repr__(self):
         return '<{0} storages: {1}>'.format(
             self.__class__.__name__, len(self.locals))
+=======
+>>>>>>> 3.0

+ 1 - 1
celery/tests/bin/test_celery.py

@@ -337,4 +337,4 @@ class test_main(AppCase):
     def test_main(self, Command):
         command = Command.return_value = Mock()
         main()
-        command.execute_from_commandline.assert_called_with()
+        command.execute_from_commandline.assert_called_with(None)

+ 4 - 8
celery/tests/concurrency/test_processes.py

@@ -160,14 +160,10 @@ class test_TaskPool(Case):
         pool.apply_async(lambda x: x, (2, ), {})
 
     def test_terminate_job(self):
-
-        @patch('celery.concurrency.processes._kill')
-        def _do_test(_kill):
-            pool = TaskPool(10)
-            pool.terminate_job(1341)
-            _kill.assert_called_with(1341, signal.SIGTERM)
-
-        _do_test()
+        pool = TaskPool(10)
+        pool._pool = Mock()
+        pool.terminate_job(1341)
+        pool._pool.terminate_job.assert_called_with(1341, None)
 
     def test_grow_shrink(self):
         pool = TaskPool(10)

+ 3 - 1
celery/tests/security/test_security.py

@@ -1,7 +1,9 @@
 """
 Keys and certificates for tests (KEY1 is a private key of CERT1, etc.)
 
-Generated with::
+Generated with:
+
+.. code-block:: bash
 
     $ openssl genrsa -des3 -passout pass:test -out key1.key 1024
     $ openssl req -new -key key1.key -out key1.csr -passin pass:test

+ 0 - 2
celery/tests/worker/test_control.py

@@ -155,8 +155,6 @@ class test_ControlPanel(Case):
         self.panel.handle('report')
 
     def test_active(self):
-        from celery.worker.job import TaskRequest
-
         r = TaskRequest(mytask.name, 'do re mi', (), {})
         state.active_requests.add(r)
         try:

+ 4 - 4
celery/utils/functional.py

@@ -9,10 +9,10 @@
 from __future__ import absolute_import
 
 import operator
+import threading
 
 from functools import partial, wraps
 from itertools import islice
-from threading import Lock, RLock
 
 from kombu.utils import cached_property
 from kombu.utils.functional import promise, maybe_promise
@@ -35,7 +35,7 @@ class LRUCache(UserDict):
 
     def __init__(self, limit=None):
         self.limit = limit
-        self.mutex = RLock()
+        self.mutex = threading.RLock()
         self.data = OrderedDict()
 
     def __getitem__(self, key):
@@ -61,7 +61,7 @@ class LRUCache(UserDict):
             self.data[key] = value
 
     def __iter__(self):
-        return self.data.iterkeys()
+        return iter(self.data)
 
     def _iterate_items(self):
         for k in self:
@@ -101,7 +101,7 @@ def maybe_list(l):
 def memoize(maxsize=None, Cache=LRUCache):
 
     def _memoize(fun):
-        mutex = Lock()
+        mutex = threading.Lock()
         cache = Cache(limit=maxsize)
 
         @wraps(fun)

+ 5 - 60
celery/utils/imports.py

@@ -15,6 +15,11 @@ import sys
 
 from contextlib import contextmanager
 
+# symbol_by_name was moved to local because it's used
+# early in the import stage, where celery.utils loads
+# too much (e.g. for eventlet patching)
+from celery.local import symbol_by_name
+
 from .compat import reload
 
 
@@ -36,66 +41,6 @@ else:
         return '%s.%s' % (obj.__module__, obj.__name__)
 
 
-def symbol_by_name(name, aliases={}, imp=None, package=None,
-        sep='.', default=None, **kwargs):
-    """Get symbol by qualified name.
-
-    The name should be the full dot-separated path to the class::
-
-        modulename.ClassName
-
-    Example::
-
-        celery.concurrency.processes.TaskPool
-                                    ^- class name
-
-    or using ':' to separate module and symbol::
-
-        celery.concurrency.processes:TaskPool
-
-    If `aliases` is provided, a dict containing short name/long name
-    mappings, the name is looked up in the aliases first.
-
-    Examples:
-
-        >>> symbol_by_name('celery.concurrency.processes.TaskPool')
-        <class 'celery.concurrency.processes.TaskPool'>
-
-        >>> symbol_by_name('default', {
-        ...     'default': 'celery.concurrency.processes.TaskPool'})
-        <class 'celery.concurrency.processes.TaskPool'>
-
-        # Does not try to look up non-string names.
-        >>> from celery.concurrency.processes import TaskPool
-        >>> symbol_by_name(TaskPool) is TaskPool
-        True
-
-    """
-    if imp is None:
-        imp = importlib.import_module
-
-    if not isinstance(name, basestring):
-        return name                                 # already a class
-
-    name = aliases.get(name) or name
-    sep = ':' if ':' in name else sep
-    module_name, _, cls_name = name.rpartition(sep)
-    if not module_name:
-        cls_name, module_name = None, package if package else cls_name
-    try:
-        try:
-            module = imp(module_name, package=package, **kwargs)
-        except ValueError as exc:
-            raise ValueError, ValueError(
-                    "Couldn't import {0!r}: {1}".format(
-                        name, exc)), sys.exc_info()[2]
-        return getattr(module, cls_name) if cls_name else module
-    except (ImportError, AttributeError):
-        if default is None:
-            raise
-    return default
-
-
 def instantiate(name, *args, **kwargs):
     """Instantiate class by name.
 

+ 7 - 8
celery/utils/timer2.py

@@ -12,11 +12,11 @@ import atexit
 import heapq
 import os
 import sys
+import threading
 
 from functools import wraps
 from future_builtins import map
 from itertools import count
-from threading import Condition, Event, Lock, Thread
 from time import time, sleep, mktime
 
 from datetime import datetime, timedelta
@@ -209,7 +209,7 @@ class Schedule(object):
         return [heapq.heappop(x) for x in [events] * len(events)]
 
 
-class Timer(Thread):
+class Timer(threading.Thread):
     Entry = Entry
     Schedule = Schedule
 
@@ -229,12 +229,11 @@ class Timer(Thread):
         self.schedule = schedule or self.Schedule(on_error=on_error,
                                                   max_interval=max_interval)
         self.on_tick = on_tick or self.on_tick
-
-        Thread.__init__(self)
-        self._is_shutdown = Event()
-        self._is_stopped = Event()
-        self.mutex = Lock()
-        self.not_empty = Condition(self.mutex)
+        threading.Thread.__init__(self)
+        self._is_shutdown = threading.Event()
+        self._is_stopped = threading.Event()
+        self.mutex = threading.Lock()
+        self.not_empty = threading.Condition(self.mutex)
         self.daemon = True
         self.name = 'Timer-{0}'.format(self._timer_count())
 

+ 2 - 2
celery/worker/autoreload.py

@@ -102,7 +102,7 @@ class StatMonitor(BaseMonitor):
             modified = dict((f, mt) for f, mt in self._mtimes()
                                 if self._maybe_modified(f, mt))
             if modified:
-                self.on_change(modified.keys())
+                self.on_change(modified)
                 self.modify_times.update(modified)
             time.sleep(self.interval)
 
@@ -232,7 +232,7 @@ class Autoreloader(bgThread):
         files.update(dict((module_file(sys.modules[m]), m)
                         for m in self.modules))
 
-        self._monitor = self.Monitor(files.keys(), self.on_change,
+        self._monitor = self.Monitor(files, self.on_change,
                 shutdown_event=self._is_shutdown, **self.options)
         self._hashes = dict([(f, file_hash(f)) for f in files])
 

+ 2 - 2
celery/worker/buckets.py

@@ -149,12 +149,12 @@ class TaskBucket(object):
 
     def init_with_registry(self):
         """Initialize with buckets for all the task types in the registry."""
-        for task in self.task_registry.keys():
+        for task in self.task_registry:
             self.add_bucket_for_type(task)
 
     def refresh(self):
         """Refresh rate limits for all task types in the registry."""
-        for task in self.task_registry.keys():
+        for task in self.task_registry:
             self.update_bucket_for_type(task)
 
     def get_bucket_for_type(self, task_name):

+ 1 - 1
docs/conf.py

@@ -75,7 +75,7 @@ intersphinx_mapping = {
 }
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'trac'
+pygments_style = 'colorful'
 
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,

+ 13 - 5
docs/configuration.rst

@@ -76,7 +76,9 @@ If not set then the systems default local time zone is used.
 
     Celery requires the :mod:`pytz` library to be installed,
     when using custom time zones (other than UTC).  You can
-    install it using :program:`pip` or :program:`easy_install`::
+    install it using :program:`pip` or :program:`easy_install`:
+
+    .. code-block:: bash
 
         $ pip install pytz
 
@@ -404,7 +406,9 @@ Configuring the backend URL
     The Redis backend requires the :mod:`redis` library:
     http://pypi.python.org/pypi/redis/
 
-    To install the redis package use `pip` or `easy_install`::
+    To install the redis package use `pip` or `easy_install`:
+
+    .. code-block:: bash
 
         $ pip install redis
 
@@ -509,7 +513,9 @@ Cassandra backend settings
     The Cassandra backend requires the :mod:`pycassa` library:
     http://pypi.python.org/pypi/pycassa/
 
-    To install the pycassa package use `pip` or `easy_install`::
+    To install the pycassa package use `pip` or `easy_install`:
+
+    .. code-block:: bash
 
         $ pip install pycassa
 
@@ -741,7 +747,9 @@ that's is used both by the client and the broker to detect if
 a connection was closed.
 
 Heartbeats are currently only supported by the ``pyamqp://`` transport,
-and this requires the :mod:`amqp` module::
+and this requires the :mod:`amqp` module:
+
+.. code-block:: bash
 
     $ pip install amqp
 
@@ -1065,7 +1073,7 @@ Example:
 
     from celery.exceptions import SoftTimeLimitExceeded
 
-    @celery.task()
+    @celery.task
     def mytask():
         try:
             return do_work()

+ 73 - 20
docs/contributing.rst

@@ -296,19 +296,31 @@ First you need to fork the Celery repository, a good introduction to this
 is in the Github Guide: `Fork a Repo`_.
 
 After you have cloned the repository you should checkout your copy
-to a directory on your machine::
+to a directory on your machine:
+
+.. code-block:: bash
 
     $ git clone git@github.com:username/celery.git
 
 When the repository is cloned enter the directory to set up easy access
-to upstream changes::
+to upstream changes:
+
+.. code-block:: bash
 
     $ cd celery
+
+.. code-block:: bash
+
     $ git remote add upstream git://github.com/celery/celery.git
+
+.. code-block:: bash
+
     $ git fetch upstream
 
 If you need to pull in new changes from upstream you should
-always use the :option:`--rebase` option to ``git pull``::
+always use the :option:`--rebase` option to ``git pull``:
+
+.. code-block:: bash
 
     git pull --rebase upstream master
 
@@ -338,12 +350,16 @@ To run the Celery test suite you need to install a few dependencies.
 A complete list of the dependencies needed are located in
 :file:`requirements/test.txt`.
 
-Installing the test requirements::
+Installing the test requirements:
+
+.. code-block:: bash
 
     $ pip -E $VIRTUAL_ENV install -U -r requirements/test.txt
 
 When installation of dependencies is complete you can execute
-the test suite by calling ``nosetests``::
+the test suite by calling ``nosetests``:
+
+.. code-block:: bash
 
     $ nosetests
 
@@ -366,7 +382,9 @@ Some useful options to :program:`nosetests` are:
     Run with verbose output.
 
 If you want to run the tests for a single test file only
-you can do so like this::
+you can do so like this:
+
+.. code-block:: bash
 
     $ nosetests celery.tests.test_worker.test_worker_job
 
@@ -392,14 +410,18 @@ the steps outlined here: http://bit.ly/koJoso
 Calculating test coverage
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 
-Code coverage in HTML::
+Code coverage in HTML:
+
+.. code-block:: bash
 
     $ nosetests --with-coverage3 --cover3-html
 
 The coverage output will then be located at
 :file:`celery/tests/cover/index.html`.
 
-Code coverage in XML (Cobertura-style)::
+Code coverage in XML (Cobertura-style):
+
+.. code-block:: bash
 
     $ nosetests --with-coverage3 --cover3-xml --cover3-xml-file=coverage.xml
 
@@ -413,12 +435,16 @@ Running the tests on all supported Python versions
 There is a ``tox`` configuration file in the top directory of the
 distribution.
 
-To run the tests for all supported Python versions simply execute::
+To run the tests for all supported Python versions simply execute:
+
+.. code-block:: bash
 
     $ tox
 
 If you only want to test specific Python versions use the :option:`-e`
-option::
+option:
+
+.. code-block:: bash
 
     $ tox -e py26
 
@@ -426,12 +452,16 @@ Building the documentation
 --------------------------
 
 To build the documentation you need to install the dependencies
-listed in :file:`requirements/docs.txt`::
+listed in :file:`requirements/docs.txt`:
+
+.. code-block:: bash
 
     $ pip -E $VIRTUAL_ENV install -U -r requirements/docs.txt
 
 After these dependencies are installed you should be able to
-build the docs by running::
+build the docs by running:
+
+.. code-block:: bash
 
     $ cd docs
     $ rm -rf .build
@@ -448,7 +478,9 @@ Verifying your contribution
 To use these tools you need to install a few dependencies.  These dependencies
 can be found in :file:`requirements/pkgutils.txt`.
 
-Installing the dependencies::
+Installing the dependencies:
+
+.. code-block:: bash
 
     $ pip -E $VIRTUAL_ENV install -U -r requirements/pkgutils.txt
 
@@ -456,12 +488,16 @@ pyflakes & PEP8
 ~~~~~~~~~~~~~~~
 
 To ensure that your changes conform to PEP8 and to run pyflakes
-execute::
+execute:
+
+.. code-block:: bash
 
     $ paver flake8
 
 To not return a negative exit code when this command fails use the
-:option:`-E` option, this can be convenient while developing::
+:option:`-E` option, this can be convenient while developing:
+
+.. code-block:: bash
 
     $ paver flake8 -E
 
@@ -469,7 +505,9 @@ API reference
 ~~~~~~~~~~~~~
 
 To make sure that all modules have a corresponding section in the API
-reference please execute::
+reference please execute:
+
+.. code-block:: bash
 
     $ paver autodoc
     $ paver verifyindex
@@ -481,19 +519,28 @@ located in :file:`docs/internals/reference/`.  If the module is public
 it should be located in :file:`docs/reference/`.
 
 For example if reference is missing for the module ``celery.worker.awesome``
-and this module is considered part of the public API, use the following steps::
+and this module is considered part of the public API, use the following steps:
+
+.. code-block:: bash
 
     $ cd docs/reference/
     $ cp celery.schedules.rst celery.worker.awesome.rst
+
+.. code-block:: bash
+
     $ vim celery.worker.awesome.rst
 
         # change every occurance of ``celery.schedules`` to
         # ``celery.worker.awesome``
 
+.. code-block:: bash
+
     $ vim index.rst
 
         # Add ``celery.worker.awesome`` to the index.
 
+.. code-block:: bash
+
     # Add the file to git
     $ git add celery.worker.awesome.rst
     $ git add index.rst
@@ -809,15 +856,21 @@ The version number must be updated two places:
 After you have changed these files you must render
 the :file:`README` files.  There is a script to convert sphinx syntax
 to generic reStructured Text syntax, and the paver task `readme`
-does this for you::
+does this for you:
+
+.. code-block:: bash
 
     $ paver readme
 
-Now commit the changes::
+Now commit the changes:
+
+.. code-block:: bash
 
     $ git commit -a -m "Bumps version to X.Y.Z"
 
-and make a new version tag::
+and make a new version tag:
+
+.. code-block:: bash
 
     $ git tag vX.Y.Z
     $ git push --tags

+ 24 - 10
docs/django/first-steps-with-django.rst

@@ -7,24 +7,32 @@ Configuring your Django project to use Celery
 
 You need four simple steps to use celery with your Django project.
 
-    1. Install the ``django-celery`` library::
+    1. Install the ``django-celery`` library:
 
-        $ pip install django-celery
+        .. code-block:: bash
 
-    2. Add the following lines to ``settings.py``::
+            $ pip install django-celery
 
-        import djcelery
-        djcelery.setup_loader()
+    2. Add the following lines to ``settings.py``:
+
+        .. code-block:: python
+
+            import djcelery
+            djcelery.setup_loader()
 
     3. Add ``djcelery`` to ``INSTALLED_APPS``.
 
     4. Create the celery database tables.
 
-        If you are using south_ for schema migrations, you'll want to::
+        If you are using south_ for schema migrations, you'll want to:
+
+        .. code-block:: bash
 
             $ python manage.py migrate djcelery
 
-        For those who are not using south, a normal ``syncdb`` will work::
+        For those who are not using south, a normal ``syncdb`` will work:
+
+        .. code-block:: bash
 
             $ python manage.py syncdb
 
@@ -63,7 +71,9 @@ to import these modules.
 
 For a simple demonstration create a new Django app called
 ``celerytest``.  To create this app you need to be in the directory
-of your Django project where ``manage.py`` is located and execute::
+of your Django project where ``manage.py`` is located and execute:
+
+.. code-block:: bash
 
     $ python manage.py startapp celerytest
 
@@ -88,12 +98,16 @@ Starting the worker process
 In a production environment you will want to run the worker in the background
 as a daemon - see :ref:`daemonizing` - but for testing and
 development it is useful to be able to start a worker instance by using the
-``celery worker`` manage command, much as you would use Django's runserver::
+``celery worker`` manage command, much as you would use Django's runserver:
+
+.. code-block:: bash
 
     $ python manage.py celery worker --loglevel=info
 
 For a complete listing of the command line options available,
-use the help command::
+use the help command:
+
+.. code-block:: bash
 
     $ python manage.py celery help
 

+ 28 - 14
docs/faq.rst

@@ -315,7 +315,9 @@ Why aren't my tasks processed?
 ------------------------------
 
 **Answer:** With RabbitMQ you can see how many consumers are currently
-receiving tasks by running the following command::
+receiving tasks by running the following command:
+
+.. code-block:: bash
 
     $ rabbitmqctl list_queues -p <myvhost> name messages consumers
     Listing queues ...
@@ -373,18 +375,24 @@ How do I purge all waiting tasks?
 ---------------------------------
 
 **Answer:** You can use the ``celery purge`` command to purge
-all configured task queues::
+all configured task queues:
+
+.. code-block:: bash
 
-        $ celery purge
+    $ celery purge
 
-or programatically::
+or programatically:
 
-        >>> from celery import current_app as celery
-        >>> celery.control.purge()
-        1753
+.. code-block:: python
+
+    >>> from celery import current_app as celery
+    >>> celery.control.purge()
+    1753
 
 If you only want to purge messages from a specific queue
-you have to use the AMQP API or the :program:`celery amqp` utility::
+you have to use the AMQP API or the :program:`celery amqp` utility:
+
+.. code-block:: bash
 
     $ celery amqp queue.purge <queue name>
 
@@ -619,7 +627,7 @@ How can I get the task id of the current task?
 
 **Answer**: The current id and more is available in the task request::
 
-    @celery.task()
+    @celery.task
     def mytask():
         cache.set(mytask.request.id, "Running")
 
@@ -665,7 +673,7 @@ Also, a common pattern is to add callbacks to tasks:
 
     logger = get_task_logger(__name__)
 
-    @celery.task()
+    @celery.task
     def add(x, y):
         return x + y
 
@@ -704,7 +712,9 @@ so if you have more than one worker with the same host name, the
 control commands will be received in round-robin between them.
 
 To work around this you can explicitly set the host name for every worker
-using the :option:`--hostname` argument to :mod:`~celery.bin.celeryd`::
+using the :option:`--hostname` argument to :mod:`~celery.bin.celeryd`:
+
+.. code-block:: bash
 
     $ celeryd --hostname=$(hostname).1
     $ celeryd --hostname=$(hostname).2
@@ -777,7 +787,7 @@ this is rarely the case. Imagine the following task:
 
 .. code-block:: python
 
-    @celery.task()
+    @celery.task
     def process_upload(filename, tmpfile):
         # Increment a file count stored in a database
         increment_file_counter()
@@ -888,7 +898,9 @@ celeryd keeps spawning processes at startup
 -------------------------------------------
 
 **Answer**: This is a known issue on Windows.
-You have to start celeryd with the command::
+You have to start celeryd with the command:
+
+.. code-block:: bash
 
     $ python -m celery.bin.celeryd
 
@@ -909,7 +921,9 @@ services instead.
 --------------------------------------
 
 **Answer**: You need to specify the :option:`--settings` argument to
-:program:`manage.py`::
+:program:`manage.py`:
+
+.. code-block:: bash
 
     $ python manage.py celeryd start --settings=settings
 

+ 3 - 1
docs/getting-started/brokers/beanstalk.rst

@@ -12,7 +12,9 @@ Installation
 For the Beanstalk support you have to install additional dependencies.
 You can install both Celery and these dependencies in one go using
 either the `celery-with-beanstalk`_, or the `django-celery-with-beanstalk`
-bundles::
+bundles:
+
+.. code-block:: bash
 
     $ pip install -U celery-with-beanstalk
 

+ 3 - 1
docs/getting-started/brokers/couchdb.rst

@@ -11,7 +11,9 @@ Installation
 
 For the CouchDB support you have to install additional dependencies.
 You can install both Celery and these dependencies in one go using
-either the `celery-with-couchdb`_, or the `django-celery-with-couchdb` bundles::
+either the `celery-with-couchdb`_, or the `django-celery-with-couchdb` bundles:
+
+.. code-block:: bash
 
     $ pip install -U celery-with-couchdb
 

+ 3 - 1
docs/getting-started/brokers/django.rst

@@ -25,7 +25,9 @@ configuration values.
 
     INSTALLED_APPS = ('djcelery.transport', )
 
-#. Sync your database schema::
+#. Sync your database schema:
+
+.. code-block:: bash
 
     $ python manage.py syncdb
 

+ 3 - 1
docs/getting-started/brokers/mongodb.rst

@@ -11,7 +11,9 @@ Installation
 
 For the MongoDB support you have to install additional dependencies.
 You can install both Celery and these dependencies in one go using
-either the `celery-with-mongodb`_, or the `django-celery-with-mongodb` bundles::
+either the `celery-with-mongodb`_, or the `django-celery-with-mongodb` bundles:
+
+.. code-block:: bash
 
     $ pip install -U celery-with-mongodb
 

+ 34 - 13
docs/getting-started/brokers/rabbitmq.rst

@@ -44,12 +44,18 @@ Setting up RabbitMQ
 -------------------
 
 To use celery we need to create a RabbitMQ user, a virtual host and
-allow that user access to that virtual host::
+allow that user access to that virtual host:
+
+.. code-block:: bash
 
     $ rabbitmqctl add_user myuser mypassword
 
+.. code-block:: bash
+
     $ rabbitmqctl add_vhost myvhost
 
+.. code-block:: bash
+
     $ rabbitmqctl set_permissions -p myvhost myuser ".*" ".*" ".*"
 
 See the RabbitMQ `Admin Guide`_ for more information about `access control`_.
@@ -75,27 +81,32 @@ install git. Download and install from the disk image at
 http://code.google.com/p/git-osx-installer/downloads/list?can=3
 
 When git is installed you can finally clone the repository, storing it at the
-:file:`/lol` location::
+:file:`/lol` location:
 
-    $ git clone git://github.com/mxcl/homebrew /lol
+.. code-block:: bash
 
+    $ git clone git://github.com/mxcl/homebrew /lol
 
 Brew comes with a simple utility called :program:`brew`, used to install, remove and
 query packages. To use it you first have to add it to :envvar:`PATH`, by
-adding the following line to the end of your :file:`~/.profile`::
+adding the following line to the end of your :file:`~/.profile`:
+
+.. code-block:: bash
 
     export PATH="/lol/bin:/lol/sbin:$PATH"
 
-Save your profile and reload it::
+Save your profile and reload it:
+
+.. code-block:: bash
 
     $ source ~/.profile
 
+Finally, we can install rabbitmq using :program:`brew`:
 
-Finally, we can install rabbitmq using :program:`brew`::
+.. code-block:: bash
 
     $ brew install rabbitmq
 
-
 .. _`Homebrew`: http://github.com/mxcl/homebrew/
 .. _`git`: http://git-scm.org
 
@@ -108,9 +119,11 @@ If you're using a DHCP server that is giving you a random host name, you need
 to permanently configure the host name. This is because RabbitMQ uses the host name
 to communicate with nodes.
 
-Use the :program:`scutil` command to permanently set your host name::
+Use the :program:`scutil` command to permanently set your host name:
+
+.. code-block:: bash
 
-    sudo scutil --set HostName myhost.local
+    $ sudo scutil --set HostName myhost.local
 
 Then add that host name to :file:`/etc/hosts` so it's possible to resolve it
 back into an IP address::
@@ -118,7 +131,9 @@ back into an IP address::
     127.0.0.1       localhost myhost myhost.local
 
 If you start the rabbitmq server, your rabbit node should now be `rabbit@myhost`,
-as verified by :program:`rabbitmqctl`::
+as verified by :program:`rabbitmqctl`:
+
+.. code-block:: bash
 
     $ sudo rabbitmqctl status
     Status of node rabbit@myhost ...
@@ -141,17 +156,23 @@ then RabbitMQ will try to use `rabbit@23`, which is an illegal host name.
 Starting/Stopping the RabbitMQ server
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-To start the server::
+To start the server:
+
+.. code-block:: bash
 
     $ sudo rabbitmq-server
 
 you can also run it in the background by adding the :option:`-detached` option
-(note: only one dash)::
+(note: only one dash):
+
+.. code-block:: bash
 
     $ sudo rabbitmq-server -detached
 
 Never use :program:`kill` to stop the RabbitMQ server, but rather use the
-:program:`rabbitmqctl` command::
+:program:`rabbitmqctl` command:
+
+.. code-block:: bash
 
     $ sudo rabbitmqctl stop
 

+ 3 - 1
docs/getting-started/brokers/redis.rst

@@ -11,7 +11,9 @@ Installation
 
 For the Redis support you have to install additional dependencies.
 You can install both Celery and these dependencies in one go using
-either the `celery-with-redis`_, or the `django-celery-with-redis` bundles::
+either the `celery-with-redis`_, or the `django-celery-with-redis` bundles:
+
+.. code-block:: bash
 
     $ pip install -U celery-with-redis
 

+ 3 - 1
docs/getting-started/brokers/sqs.rst

@@ -9,7 +9,9 @@
 Installation
 ============
 
-For the Amazon SQS support you have to install the `boto`_ library::
+For the Amazon SQS support you have to install the `boto`_ library:
+
+.. code-block:: bash
 
     $ pip install -U boto
 

+ 21 - 7
docs/getting-started/first-steps-with-celery.rst

@@ -54,7 +54,9 @@ Detailed information about using RabbitMQ with Celery:
 .. _`RabbitMQ`: http://www.rabbitmq.com/
 
 If you are using Ubuntu or Debian install RabbitMQ by executing this
-command::
+command:
+
+.. code-block:: bash
 
     $ sudo apt-get install rabbitmq-server
 
@@ -111,7 +113,9 @@ Installing Celery
 =================
 
 Celery is on the Python Package Index (PyPI), so it can be installed
-with standard Python tools like ``pip`` or ``easy_install``::
+with standard Python tools like ``pip`` or ``easy_install``:
+
+.. code-block:: bash
 
     $ pip install celery
 
@@ -158,7 +162,9 @@ Running the celery worker server
 ================================
 
 We now run the worker by executing our program with the ``worker``
-argument::
+argument:
+
+.. code-block:: bash
 
     $ celery -A tasks worker --loglevel=info
 
@@ -167,11 +173,15 @@ background as a daemon.  To do this you need to use the tools provided
 by your platform, or something like `supervisord`_ (see :ref:`daemonizing`
 for more information).
 
-For a complete listing of the command line options available, do::
+For a complete listing of the command line options available, do:
+
+.. code-block:: bash
 
     $  celery worker --help
 
-There also several other commands available, and help is also available::
+There also several other commands available, and help is also available:
+
+.. code-block:: bash
 
     $ celery help
 
@@ -331,7 +341,9 @@ current directory or on the Python path, it could look like this:
     CELERY_ENABLE_UTC = True
 
 To verify that your configuration file works properly, and doesn't
-contain any syntax errors, you can try to import it::
+contain any syntax errors, you can try to import it:
+
+.. code-block:: bash
 
     $ python -m celeryconfig
 
@@ -362,7 +374,9 @@ instead, so that only 10 tasks of this type can be processed in a minute
 
 If you are using RabbitMQ, Redis or MongoDB as the
 broker then you can also direct the workers to set a new rate limit
-for the task at runtime::
+for the task at runtime:
+
+.. code-block:: bash
 
     $ celery control rate_limit tasks.add 10/m
     worker.example.com: OK

+ 1 - 1
docs/getting-started/introduction.rst

@@ -88,7 +88,7 @@ Celery is…
 
             celery = Celery('hello', broker='amqp://guest@localhost//')
 
-            @celery.task()
+            @celery.task
             def hello():
                 return 'hello world'
 

+ 63 - 21
docs/getting-started/next-steps.rst

@@ -70,7 +70,9 @@ you simply import this instance.
 Starting the worker
 -------------------
 
-The :program:`celery` program can be used to start the worker::
+The :program:`celery` program can be used to start the worker:
+
+.. code-block:: bash
 
     $ celery worker --app=proj -l info
 
@@ -124,7 +126,9 @@ and emulating priorities, all described in the :ref:`Routing Guide
 <guide-routing>`.
 
 You can get a complete list of command line arguments
-by passing in the `--help` flag::
+by passing in the `--help` flag:
+
+.. code-block:: bash
 
     $ celery worker --help
 
@@ -143,14 +147,18 @@ In production you will want to run the worker in the background, this is
 described in detail in the :ref:`daemonization tutorial <daemonizing>`.
 
 The daemonization scripts uses the :program:`celery multi` command to
-start one or more workers in the background::
+start one or more workers in the background:
+
+.. code-block:: bash
 
     $ celery multi start w1 -A proj -l info
     celeryd-multi v3.0.0 (Chiastic Slide)
     > Starting nodes...
         > w1.halcyon.local: OK
 
-You can restart it too::
+You can restart it too:
+
+.. code-block:: bash
 
     $ celery multi restart w1 -A proj -l info
     celeryd-multi v3.0.0 (Chiastic Slide)
@@ -163,7 +171,9 @@ You can restart it too::
     > Stopping nodes...
         > w1.halcyon.local: TERM -> 64052
 
-or stop it::
+or stop it:
+
+.. code-block:: bash
 
     $ celery multi stop -w1 -A proj -l info
 
@@ -176,7 +186,9 @@ or stop it::
 
 By default it will create pid and log files in the current directory,
 to protect against multiple workers launching on top of each other
-you are encouraged to put these in a dedicated directory::
+you are encouraged to put these in a dedicated directory:
+
+.. code-block:: bash
 
     $ mkdir -p /var/run/celery
     $ mkdir -p /var/log/celery
@@ -185,7 +197,9 @@ you are encouraged to put these in a dedicated directory::
 
 With the multi command you can start multiple workers, and there is a powerful
 command line syntax to specify arguments for different workers too,
-e.g::
+e.g:
+
+.. code-block:: bash
 
     $ celeryd multi start 10 -A proj -l info -Q:1-3 images,video -Q:4,5 data \
         -Q default -L:4,5 debug
@@ -204,7 +218,9 @@ is the name of the module, and the attribute name comes last.
 If a package name is specified instead it will automatically
 try to find a ``celery`` module in that package, and if the name
 is a module it will try to find a ``celery`` attribute in that module.
-This means that these are all equal::
+This means that these are all equal:
+
+.. code-block:: bash
 
     $ celery --app=proj
     $ celery --app=proj.celery:
@@ -552,14 +568,18 @@ with the ``queue`` argument to ``apply_async``::
     >>> add.apply_async((2, 2), queue='hipri')
 
 You can then make a worker consume from this queue by
-specifying the :option:`-Q` option::
+specifying the :option:`-Q` option:
+
+.. code-block:: bash
 
     $ celery -A proj worker -Q hipri
 
 You may specify multiple queues by using a comma separated list,
 for example you can make the worker consume from both the default
 queue, and the ``hipri`` queue, where
-the default queue is named ``celery`` for historical reasons::
+the default queue is named ``celery`` for historical reasons:
+
+.. code-block:: bash
 
     $ celery -A proj worker -Q hipri,celery
 
@@ -575,7 +595,9 @@ Remote Control
 If you're using RabbitMQ (AMQP), Redis or MongoDB as the broker then
 you can control and inspect the worker at runtime.
 
-For example you can see what tasks the worker is currently working on::
+For example you can see what tasks the worker is currently working on:
+
+.. code-block:: bash
 
     $ celery -A proj inspect active
 
@@ -584,7 +606,9 @@ control commands are received by every worker in the cluster.
 
 You can also specify one or more workers to act on the request
 using the :option:`--destination` option, which is a comma separated
-list of worker host names::
+list of worker host names:
+
+.. code-block:: bash
 
     $ celery -A proj inspect active --destination=worker1.example.com
 
@@ -594,35 +618,49 @@ to the request.
 The :program:`celery inspect` command contains commands that
 does not change anything in the worker, it only replies information
 and statistics about what is going on inside the worker.
-For a list of inspect commands you can execute::
+For a list of inspect commands you can execute:
+
+.. code-block:: bash
 
     $ celery -A proj inspect --help
 
 Then there is the :program:`celery control` command, which contains
-commands that actually changes things in the worker at runtime::
+commands that actually changes things in the worker at runtime:
+
+.. code-block:: bash
 
     $ celery -A proj control --help
 
 For example you can force workers to enable event messages (used
-for monitoring tasks and workers)::
+for monitoring tasks and workers):
+
+.. code-block:: bash
 
     $ celery -A proj control enable_events
 
 When events are enabled you can then start the event dumper
-to see what the workers are doing::
+to see what the workers are doing:
+
+.. code-block:: bash
 
     $ celery -A proj events --dump
 
-or you can start the curses interface::
+or you can start the curses interface:
+
+.. code-block:: bash
 
     $ celery -A proj events
 
-when you're finished monitoring you can disable events again::
+when you're finished monitoring you can disable events again:
+
+.. code-block:: bash
 
     $ celery -A proj control disable_events
 
 The :program:`celery status` command also uses remote control commands
-and shows a list of online workers in the cluster::
+and shows a list of online workers in the cluster:
+
+.. code-block:: bash
 
     $ celery -A proj status
 
@@ -639,7 +677,9 @@ converts that UTC time to local time.  If you wish to use
 a different timezone than the system timezone then you must
 configure that using the :setting:`CELERY_TIMEZONE` setting.
 
-To use custom timezones you also have to install the :mod:`pytz` library::
+To use custom timezones you also have to install the :mod:`pytz` library:
+
+.. code-block:: bash
 
     $ pip install pytz
 
@@ -659,7 +699,9 @@ for throughput then you should read the :ref:`Optimizing Guide
 <guide-optimizing>`.
 
 If you're using RabbitMQ then you should install the :mod:`librabbitmq`
-module, which is an AMQP client implemented in C::
+module, which is an AMQP client implemented in C:
+
+.. code-block:: bash
 
     $ pip install librabbitmq
 

+ 28 - 10
docs/history/changelog-1.0.rst

@@ -17,11 +17,15 @@
   include `auto_delete` and `durable`. This broke the AMQP backend.
 
   If you've already used the AMQP backend this means you have to
-  delete the previous definitions::
+  delete the previous definitions:
+
+  .. code-block:: bash
 
       $ camqadm exchange.delete celeryresults
 
-  or::
+  or:
+
+  .. code-block:: bash
 
       $ python manage.py camqadm exchange.delete celeryresults
 
@@ -493,9 +497,11 @@ Fixes
     .. warning::
 
         If you're using Celery with Django, you can't use `project.settings`
-        as the settings module name, but the following should work::
+        as the settings module name, but the following should work:
+
+        .. code-block:: bash
 
-        $ python manage.py celeryd --settings=settings
+            $ python manage.py celeryd --settings=settings
 
 * Execution: `.messaging.TaskPublisher.send_task` now
   incorporates all the functionality apply_async previously did.
@@ -519,14 +525,18 @@ Fixes
 * `camqadm`: This is a new utility for command line access to the AMQP API.
 
     Excellent for deleting queues/bindings/exchanges, experimentation and
-    testing::
+    testing:
+
+    .. code-block:: bash
 
         $ camqadm
         1> help
 
     Gives an interactive shell, type `help` for a list of commands.
 
-    When using Django, use the management command instead::
+    When using Django, use the management command instead:
+
+    .. code-block:: bash
 
         $ python manage.py camqadm
         1> help
@@ -691,14 +701,18 @@ Backward incompatible changes
     This means `celeryd` no longer schedules periodic tasks by default,
     but a new daemon has been introduced: `celerybeat`.
 
-    To launch the periodic task scheduler you have to run celerybeat::
+    To launch the periodic task scheduler you have to run celerybeat:
+
+    .. code-block:: bash
 
         $ celerybeat
 
     Make sure this is running on one server only, if you run it twice, all
     periodic tasks will also be executed twice.
 
-    If you only have one worker server you can embed it into celeryd like this::
+    If you only have one worker server you can embed it into celeryd like this:
+
+    .. code-block:: bash
 
         $ celeryd --beat # Embed celerybeat in celeryd.
 
@@ -1517,7 +1531,9 @@ arguments, so be sure to flush your task queue before you upgrade.
   To turn off this feature, set `SEND_CELERY_TASK_ERROR_EMAILS` to
   `False` in your `settings.py`. Thanks to Grégoire Cachet.
 
-* You can now run the celery daemon by using `manage.py`::
+* You can now run the celery daemon by using `manage.py`:
+
+  .. code-block:: bash
 
         $ python manage.py celeryd
 
@@ -1653,7 +1669,9 @@ arguments, so be sure to flush your task queue before you upgrade.
 * Improved API documentation
 
 * Now using the Sphinx documentation system, you can build
-  the html documentation by doing ::
+  the html documentation by doing:
+
+    .. code-block:: bash
 
         $ cd docs
         $ make html

+ 36 - 12
docs/history/changelog-2.0.rst

@@ -273,11 +273,15 @@ Documentation
   stricter equivalence checks.
 
     If you've already hit this problem you may have to delete the
-    declaration::
+    declaration:
+
+    .. code-block:: bash
 
         $ camqadm exchange.delete celerycrq
 
-    or::
+    or:
+
+    .. code-block:: bash
 
         $ python manage.py camqadm exchange.delete celerycrq
 
@@ -378,7 +382,9 @@ Documentation
 
 * Added experimental support for persistent revokes.
 
-    Use the `-S|--statedb` argument to celeryd to enable it::
+    Use the `-S|--statedb` argument to celeryd to enable it:
+
+    .. code-block:: bash
 
         $ celeryd --statedb=/var/run/celeryd
 
@@ -590,7 +596,9 @@ Backward incompatible changes
     allows conflicting declarations for the auto_delete and durable settings.
 
     If you've already used celery with this backend chances are you
-    have to delete the previous declaration::
+    have to delete the previous declaration:
+
+    .. code-block:: bash
 
         $ camqadm exchange.delete celeryresults
 
@@ -627,7 +635,9 @@ News
     .. figure:: ../images/celeryevshotsm.jpg
 
     If you run `celeryev` with the `-d` switch it will act as an event
-    dumper, simply dumping the events it receives to standard out::
+    dumper, simply dumping the events it receives to standard out:
+
+    .. code-block:: bash
 
         $ celeryev -d
         -> celeryev: starting capture...
@@ -729,7 +739,9 @@ News
                                "routing_key": "name}
 
    This feature is added for easily setting up routing using the `-Q`
-   option to `celeryd`::
+   option to `celeryd`:
+
+   .. code-block:: bash
 
        $ celeryd -Q video, image
 
@@ -872,7 +884,9 @@ News
     For example, if :setting:`CELERY_QUEUES` defines four
     queues: `image`, `video`, `data` and `default`, the following
     command would make celeryd only consume from the `image` and `video`
-    queues::
+    queues:
+
+    .. code-block:: bash
 
         $ celeryd -Q image,video
 
@@ -899,19 +913,27 @@ News
     and :envvar:`CELERY_LOADER` environment variables, so when `nosetests`
     imports that, the unit test environment is all set up.
 
-    Before you run the tests you need to install the test requirements::
+    Before you run the tests you need to install the test requirements:
+
+    .. code-block:: bash
 
         $ pip install -r requirements/test.txt
 
-    Running all tests::
+    Running all tests:
+
+    .. code-block:: bash
 
         $ nosetests
 
-    Specifying the tests to run::
+    Specifying the tests to run:
+
+    .. code-block:: bash
 
         $ nosetests celery.tests.test_task
 
-    Producing HTML coverage::
+    Producing HTML coverage:
+
+    .. code-block:: bash
 
         $ nosetests --with-coverage3
 
@@ -922,7 +944,9 @@ News
 * :mod:`celeryd-multi <celeryd.bin.celeryd_multi>`: Tool for shell scripts
   to start multiple workers.
 
- Some examples::
+    Some examples:
+
+    .. code-block:: bash
 
         # Advanced example with 10 workers:
         #   * Three of the workers processes the images and video queue

+ 39 - 14
docs/history/changelog-2.1.rst

@@ -218,7 +218,9 @@ News
 
 
     Example using celeryctl to start consuming from queue "queue", in
-    exchange "exchange", of type "direct" using binding key "key"::
+    exchange "exchange", of type "direct" using binding key "key":
+
+    .. code-block:: bash
 
         $ celeryctl inspect add_consumer queue exchange direct key
         $ celeryctl inspect cancel_consumer queue
@@ -287,7 +289,9 @@ Important Notes
     :file:`platform.pyc`) file from your previous Celery installation.
 
     To do this use :program:`python` to find the location
-    of this module::
+    of this module:
+
+    .. code-block:: bash
 
         $ python
         >>> import celery.platform
@@ -295,7 +299,9 @@ Important Notes
         <module 'celery.platform' from '/opt/devel/celery/celery/platform.pyc'>
 
     Here the compiled module is in :file:`/opt/devel/celery/celery/`,
-    to remove the offending files do::
+    to remove the offending files do:
+
+    .. code-block:: bash
 
         $ rm -f /opt/devel/celery/celery/platform.py*
 
@@ -333,13 +339,17 @@ News
 
     To use the Django admin monitor you need to do the following:
 
-    1. Create the new database tables.
+    1. Create the new database tables:
+
+        .. code-block:: bash
 
-        $ python manage.py syncdb
+            $ python manage.py syncdb
 
-    2. Start the django-celery snapshot camera::
+    2. Start the django-celery snapshot camera:
 
-        $ python manage.py celerycam
+        .. code-block:: bash
+
+            $ python manage.py celerycam
 
     3. Open up the django admin to monitor your cluster.
 
@@ -388,9 +398,12 @@ News
   apply tasks and inspect the results of tasks.
 
     .. seealso::
+
         The :ref:`monitoring-celeryctl` section in the :ref:`guide`.
 
-    Some examples::
+    Some examples:
+
+    .. code-block:: bash
 
         $ celeryctl apply tasks.add -a '[2, 2]' --countdown=10
 
@@ -467,7 +480,9 @@ News
 
     A comma separated list of (task) modules to be imported.
 
-    Example::
+    Example:
+
+    .. code-block:: bash
 
         $ celeryd -I app1.tasks,app2.tasks
 
@@ -675,29 +690,39 @@ Experimental
 
 * celeryd-multi: Added daemonization support.
 
-    celeryd-multi can now be used to start, stop and restart worker nodes.
+    celeryd-multi can now be used to start, stop and restart worker nodes:
+
+    .. code-block:: bash
 
         $ celeryd-multi start jerry elaine george kramer
 
     This also creates PID files and log files (:file:`celeryd@jerry.pid`,
     ..., :file:`celeryd@jerry.log`. To specify a location for these files
     use the `--pidfile` and `--logfile` arguments with the `%n`
-    format::
+    format:
+
+    .. code-block:: bash
 
         $ celeryd-multi start jerry elaine george kramer \
                         --logfile=/var/log/celeryd@%n.log \
                         --pidfile=/var/run/celeryd@%n.pid
 
-    Stopping::
+    Stopping:
+
+    .. code-block:: bash
 
         $ celeryd-multi stop jerry elaine george kramer
 
     Restarting. The nodes will be restarted one by one as the old ones
-    are shutdown::
+    are shutdown:
+
+    .. code-block:: bash
 
         $ celeryd-multi restart jerry elaine george kramer
 
-    Killing the nodes (**WARNING**: Will discard currently executing tasks)::
+    Killing the nodes (**WARNING**: Will discard currently executing tasks):
+
+    .. code-block:: bash
 
         $ celeryd-multi kill jerry elaine george kramer
 

+ 12 - 4
docs/history/changelog-2.2.rst

@@ -656,7 +656,9 @@ Important Notes
             Waiting for client...
 
     If you telnet the port specified you will be presented
-    with a ``pdb`` shell::
+    with a ``pdb`` shell:
+
+    .. code-block:: bash
 
         $ telnet localhost 6900
         Connected to localhost.
@@ -699,7 +701,9 @@ Important Notes
         so it does not collide with older versions.
 
         If you would like to remove the old exchange you can do so
-        by executing the following command::
+        by executing the following command:
+
+        .. code-block:: bash
 
             $ camqadm exchange.delete celeryevent
 
@@ -707,7 +711,9 @@ Important Notes
   specified directly on the command line.
 
   Configuration options must appear after the last argument, separated
-  by two dashes::
+  by two dashes:
+
+  .. code-block:: bash
 
       $ celeryd -l info -I tasks -- broker.host=localhost broker.vhost=/app
 
@@ -908,7 +914,9 @@ News
 * The configuration module and loader to use can now be specified on
   the command line.
 
-    For example::
+    For example:
+
+    .. code-block:: bash
 
         $ celeryd --config=celeryconfig.py --loader=myloader.Loader
 

+ 2 - 0
docs/history/changelog-2.3.rst

@@ -284,6 +284,8 @@ News
 
     Example use:
 
+    .. code-block:: bash
+
         $ celeryd-multi start 4  -c 2  -- broker.host=amqp.example.com \
                                           broker.vhost=/               \
                                           celery.disable_rate_limits=yes

+ 3 - 1
docs/history/changelog-2.4.rst

@@ -203,7 +203,9 @@ Important Notes
     then the value from the configuration will be used as default.
 
     Also, programs now support the :option:`-b|--broker` option to specify
-    a broker URL on the command line::
+    a broker URL on the command line:
+
+    .. code-block:: bash
 
         $ celeryd -b redis://localhost
 

+ 3 - 1
docs/history/changelog-2.5.rst

@@ -136,7 +136,9 @@ Fixes
 - celeryctl can now be configured on the command line.
 
     Like with celeryd it is now possible to configure celery settings
-    on the command line for celeryctl::
+    on the command line for celeryctl:
+
+    .. code-block:: bash
 
         $ celeryctl -- broker.pool_limit=30
 

+ 1 - 1
docs/includes/introduction.txt

@@ -91,7 +91,7 @@ Celery is...
 
         celery = Celery('hello', broker='amqp://guest@localhost//')
 
-        @celery.task()
+        @celery.task
         def hello():
             return 'hello world'
 

+ 1 - 1
docs/internals/app-overview.rst

@@ -25,7 +25,7 @@ Creating tasks:
 
 .. code-block:: python
 
-    @celery.task()
+    @celery.task
     def add(x, y):
         return x + y
 

+ 2 - 2
docs/internals/guide.rst

@@ -191,7 +191,7 @@ Here's an example using Celery in single-mode:
 
     from .models import CeleryStats
 
-    @task()
+    @task
     def write_stats_to_db():
         stats = inspect().stats(timeout=1)
         for node_name, reply in stats:
@@ -205,7 +205,7 @@ and here's the same using Celery app objects:
     from .celery import celery
     from .models import CeleryStats
 
-    @celery.task()
+    @celery.task
     def write_stats_to_db():
         stats = celery.control.inspect().stats(timeout=1)
         for node_name, reply in stats:

+ 1 - 1
docs/reference/celery.rst

@@ -145,7 +145,7 @@ Application
 
         .. code-block:: python
 
-            @celery.task()
+            @celery.task
             def refresh_feed(url):
                 return ...
 

+ 9 - 5
docs/tutorials/daemonizing.rst

@@ -274,17 +274,21 @@ This can reveal hints as to why the service won't start.
 Also you will see the commands generated, so you can try to run the celeryd
 command manually to read the resulting error output.
 
-For example my `sh -x` output does this::
+For example my `sh -x` output does this:
 
-    ++ start-stop-daemon --start --chdir /opt/Opal/release/opal --quiet \
+.. code-block:: bash
+
+    ++ start-stop-daemon --start --chdir /opt/App/release/app --quiet \
         --oknodo --background --make-pidfile --pidfile /var/run/celeryd.pid \
-        --exec /opt/Opal/release/opal/manage.py celeryd -- --time-limit=300 \
+        --exec /opt/App/release/app/manage.py celeryd -- --time-limit=300 \
         -f /var/log/celeryd.log -l INFO
 
 Run the celeryd command after `--exec` (without the `--`) to show the
-actual resulting output::
+actual resulting output:
+
+.. code-block:: bash
 
-    $ /opt/Opal/release/opal/manage.py celeryd --time-limit=300 \
+    $ /opt/App/release/app/manage.py celeryd --time-limit=300 \
         -f /var/log/celeryd.log -l INFO
 
 .. _daemon-supervisord:

+ 3 - 1
docs/tutorials/debugging.rst

@@ -50,7 +50,9 @@ information::
         Waiting for client...
 
 If you telnet the port specified you will be presented
-with a `pdb` shell::
+with a `pdb` shell:
+
+.. code-block:: bash
 
     $ telnet localhost 6900
     Connected to localhost.

+ 11 - 7
docs/userguide/application.rst

@@ -45,7 +45,7 @@ Whenever you define a task, that task will also be added to the local registry:
 
 .. code-block:: python
 
-    >>> @celery.task()
+    >>> @celery.task
     ... def add(x, y):
     ...     return x + y
 
@@ -76,7 +76,7 @@ For example here, where the tasks module is also used to start a worker:
     from celery import Celery
     celery = Celery()
 
-    @celery.task()
+    @celery.task
     def add(x, y): return x + y
 
     if __name__ == '__main__':
@@ -98,7 +98,7 @@ You can specify another name for the main module:
     >>> celery.main
     'tasks'
 
-    >>> @celery.task()
+    >>> @celery.task
     ... def add(x, y):
     ...     return x + y
 
@@ -228,7 +228,9 @@ environment variable named :envvar:`CELERY_CONFIG_MODULE`:
     celery = Celery()
     celery.config_from_envvar('CELERY_CONFIG_MODULE')
 
-You can then specify the configuration module to use via the environment::
+You can then specify the configuration module to use via the environment:
+
+.. code-block:: bash
 
     $ CELERY_CONFIG_MODULE="celeryconfig.prod" celery worker -l info
 
@@ -256,7 +258,7 @@ we use the task, or access an attribute (in this case :meth:`repr`):
 
 .. code-block:: python
 
-    >>> @celery.task()
+    >>> @celery.task
     >>> def add(x, y):
     ...    return x + y
 
@@ -363,7 +365,9 @@ so that everything also works in the module-based compatibility API
 
 In development you can set the :envvar:`CELERY_TRACE_APP`
 environment variable to raise an exception if the app
-chain breaks::
+chain breaks:
+
+.. code-block:: bash
 
     $ CELERY_TRACE_APP=1 celery worker -l info
 
@@ -468,7 +472,7 @@ by changing its :meth:`@Celery.Task` attribute:
     >>> celery.Task
     <unbound MyBaseTask>
 
-    >>> @x.task()
+    >>> @x.task
     ... def add(x, y):
     ...     return x + y
 

+ 5 - 3
docs/userguide/calling.rst

@@ -87,7 +87,7 @@ called `add`, returning the sum of two arguments:
 
 .. code-block:: python
 
-    @celery.task()
+    @celery.task
     def add(x, y):
         return x + y
 
@@ -144,7 +144,7 @@ This is an example error callback:
 
 .. code-block:: python
 
-    @celery.task()
+    @celery.task
     def error_handler(uuid):
         result = AsyncResult(uuid)
         exc = result.get(propagate=False)
@@ -462,7 +462,9 @@ Simple routing (name <-> name) is accomplished using the ``queue`` option::
     add.apply_async(queue='priority.high')
 
 You can then assign workers to the ``priority.high`` queue by using
-the workers :option:`-Q` argument::
+the workers :option:`-Q` argument:
+
+.. code-block:: bash
 
     $ celery worker -l info -Q celery,priority.high
 

+ 8 - 6
docs/userguide/canvas.rst

@@ -461,7 +461,7 @@ the error callbacks take the id of the parent task as argument instead:
     import os
     from proj.celery import celery
 
-    @celery.task()
+    @celery.task
     def log_error(task_id):
         result = celery.AsyncResult(task_id)
         result.get(propagate=False)  # make sure result written.
@@ -538,7 +538,9 @@ You can even convert these graphs to *dot* format::
     ...     res.parent.parent.graph.to_dot(fh)
 
 
-and create images::
+and create images:
+
+.. code-block:: bash
 
     $ dot -Tpng graph.dot -o graph.png
 
@@ -676,11 +678,11 @@ already a standard function):
 
 .. code-block:: python
 
-    @celery.task()
+    @celery.task
     def add(x, y):
         return x + y
 
-    @celery.task()
+    @celery.task
     def tsum(numbers):
         return sum(numbers)
 
@@ -789,7 +791,7 @@ is the same as having a task doing:
 
 .. code-block:: python
 
-    @celery.task()
+    @celery.task
     def temp():
         return [xsum(range(10)), xsum(range(100))]
 
@@ -802,7 +804,7 @@ is the same as having a task doing:
 
 .. code-block:: python
 
-    @celery.task()
+    @celery.task
     def temp():
         return [add(i, i) for i in range(10)]
 

+ 3 - 1
docs/userguide/concurrency/eventlet.rst

@@ -40,7 +40,9 @@ Enabling Eventlet
 =================
 
 You can enable the Eventlet pool by using the ``-P`` option to
-:program:`celery worker`::
+:program:`celery worker`:
+
+.. code-block:: bash
 
     $ celery worker -P eventlet -c 1000
 

+ 198 - 46
docs/userguide/monitoring.rst

@@ -31,11 +31,15 @@ Workers
 :program:`celery` can also be used to inspect
 and manage worker nodes (and to some degree tasks).
 
-To list all the commands available do::
+To list all the commands available do:
+
+.. code-block:: bash
 
     $ celery help
 
-or to get help for a specific command do::
+or to get help for a specific command do:
+
+.. code-block:: bash
 
     $ celery <command> --help
 
@@ -53,12 +57,14 @@ Commands
   ``--force-bpython|-B``, or ``--force-python|-P``.
 
 * **status**: List active nodes in this cluster
-    ::
 
-    $ celery status
+    .. code-block:: bash
+
+            $ celery status
 
 * **result**: Show the result of a task
-    ::
+
+    .. code-block:: bash
 
         $ celery result -t tasks.add 4e196aa4-0141-4601-8138-7aa33db0f577
 
@@ -66,7 +72,8 @@ Commands
     task doesn't use a custom result backend.
 
 * **purge**: Purge messages from all configured task queues.
-    ::
+
+    .. code-block:: bash
 
         $ celery purge
 
@@ -75,14 +82,16 @@ Commands
         be permanently deleted!
 
 * **inspect active**: List active tasks
-    ::
+
+    .. code-block:: bash
 
         $ celery inspect active
 
     These are all the tasks that are currently being executed.
 
 * **inspect scheduled**: List scheduled ETA tasks
-    ::
+
+    .. code-block:: bash
 
         $ celery inspect scheduled
 
@@ -90,7 +99,8 @@ Commands
     `eta` or `countdown` argument set.
 
 * **inspect reserved**: List reserved tasks
-    ::
+
+    .. code-block:: bash
 
         $ celery inspect reserved
 
@@ -99,32 +109,38 @@ Commands
     with an eta).
 
 * **inspect revoked**: List history of revoked tasks
-    ::
+
+    .. code-block:: bash
 
         $ celery inspect revoked
 
 * **inspect registered**: List registered tasks
-    ::
+
+    .. code-block:: bash
 
         $ celery inspect registered
 
 * **inspect stats**: Show worker statistics
-    ::
+
+    .. code-block:: bash
 
         $ celery inspect stats
 
 * **control enable_events**: Enable events
-    ::
+
+    .. code-block:: bash
 
         $ celery control enable_events
 
 * **control disable_events**: Disable events
-    ::
+
+    .. code-block:: bash
 
         $ celery inspect disable_events
 
 * **migrate**: Migrate tasks from one broker to another (**EXPERIMENTAL**).
-  ::
+
+    .. code-block:: bash
 
         $ celery migrate redis://localhost amqp://localhost
 
@@ -146,7 +162,9 @@ Specifying destination nodes
 
 By default the inspect commands operates on all workers.
 You can specify a single, or a list of workers by using the
-`--destination` argument::
+`--destination` argument:
+
+.. code-block:: bash
 
     $ celery inspect -d w1,w2 reserved
 
@@ -180,11 +198,15 @@ More screenshots_:
 Usage
 ~~~~~
 
-Install Celery Flower: ::
+Install Celery Flower:
+
+.. code-block:: bash
 
     $ pip install flower
 
-Launch Celery Flower and open http://localhost:8008 in browser: ::
+Launch Celery Flower and open http://localhost:8008 in browser:
+
+.. code-block:: bash
 
     $ celery flower
 
@@ -218,11 +240,15 @@ but you won't see any data appearing until you start the snapshot camera.
 The camera takes snapshots of the events your workers sends at regular
 intervals, storing them in your database (See :ref:`monitoring-snapshots`).
 
-To start the camera run::
+To start the camera run:
+
+.. code-block:: bash
 
     $ python manage.py celerycam
 
-If you haven't already enabled the sending of events you need to do so::
+If you haven't already enabled the sending of events you need to do so:
+
+.. code-block:: bash
 
     $ python manage.py celery control enable_events
 
@@ -244,7 +270,9 @@ Shutter frequency
 By default the camera takes a snapshot every second, if this is too frequent
 or you want to have higher precision, then you can change this using the
 ``--frequency`` argument.  This is a float describing how often, in seconds,
-it should wake up to check if there are any new events::
+it should wake up to check if there are any new events:
+
+.. code-block:: bash
 
     $ python manage.py celerycam --frequency=3.0
 
@@ -307,18 +335,24 @@ camera in the same process.
 
 **Installing**
 
-Using :program:`pip`::
+Using :program:`pip`:
+
+.. code-block:: bash
 
     $ pip install -U django-celery
 
-or using :program:`easy_install`::
+or using :program:`easy_install`:
+
+.. code-block:: bash
 
     $ easy_install -U django-celery
 
 **Running**
 
 :program:`djcelerymon` reads configuration from your Celery configuration
-module, and sets up the Django environment using the same settings::
+module, and sets up the Django environment using the same settings:
+
+.. code-block:: bash
 
     $ djcelerymon
 
@@ -371,7 +405,9 @@ task and worker history.  You can inspect the result and traceback of tasks,
 and it also supports some management commands like rate limiting and shutting
 down workers.
 
-Starting::
+Starting:
+
+.. code-block:: bash
 
     $ celery events
 
@@ -381,15 +417,21 @@ You should see a screen like:
 
 
 `celery events` is also used to start snapshot cameras (see
-:ref:`monitoring-snapshots`::
+:ref:`monitoring-snapshots`:
+
+.. code-block:: bash
 
     $ celery events --camera=<camera-class> --frequency=1.0
 
-and it includes a tool to dump events to :file:`stdout`::
+and it includes a tool to dump events to :file:`stdout`:
+
+.. code-block:: bash
 
     $ celery events --dump
 
-For a complete list of options use ``--help``::
+For a complete list of options use ``--help``:
+
+.. code-block:: bash
 
     $ celery events --help
 
@@ -437,8 +479,9 @@ as manage users, virtual hosts and their permissions.
 Inspecting queues
 -----------------
 
-Finding the number of tasks in a queue::
+Finding the number of tasks in a queue:
 
+.. code-block:: bash
 
     $ rabbitmqctl list_queues name messages messages_ready \
                               messages_unacknowledged
@@ -451,11 +494,15 @@ not acknowledged yet (meaning it is in progress, or has been reserved).
 `messages` is the sum of ready and unacknowledged messages.
 
 
-Finding the number of workers currently consuming from a queue::
+Finding the number of workers currently consuming from a queue:
+
+.. code-block:: bash
 
     $ rabbitmqctl list_queues name consumers
 
-Finding the amount of memory allocated to a queue::
+Finding the amount of memory allocated to a queue:
+
+.. code-block:: bash
 
     $ rabbitmqctl list_queues name memory
 
@@ -476,11 +523,15 @@ the `redis-cli(1)` command to list lengths of queues.
 Inspecting queues
 -----------------
 
-Finding the number of tasks in a queue::
+Finding the number of tasks in a queue:
+
+.. code-block:: bash
 
     $ redis-cli -h HOST -p PORT -n DATABASE_NUMBER llen QUEUE_NAME
 
-The default queue is named `celery`. To get all available queues, invoke::
+The default queue is named `celery`. To get all available queues, invoke:
+
+.. code-block:: bash
 
     $ redis-cli -h HOST -p PORT -n DATABASE_NUMBER keys \*
 
@@ -547,7 +598,9 @@ write it to a database, send it by email or something else entirely.
 :program:`celery events` is then used to take snapshots with the camera,
 for example if you want to capture state every 2 seconds using the
 camera ``myapp.Camera`` you run :program:`celery events` with the following
-arguments::
+arguments:
+
+.. code-block:: bash
 
     $ celery events -c myapp.Camera --frequency=2.0
 
@@ -557,6 +610,11 @@ arguments::
 Custom Camera
 ~~~~~~~~~~~~~
 
+Cameras can be useful if you need to capture events and do something
+with those events at an interval.  For real-time event processing
+you should use :class:`@events.Receiver` directly, like in
+:ref:`event-real-time-example`.
+
 Here is an example camera, dumping the snapshot to screen:
 
 .. code-block:: python
@@ -565,7 +623,6 @@ Here is an example camera, dumping the snapshot to screen:
 
     from celery.events.snapshot import Polaroid
 
-
     class DumpCam(Polaroid):
 
         def on_shutter(self, state):
@@ -581,26 +638,121 @@ See the API reference for :mod:`celery.events.state` to read more
 about state objects.
 
 Now you can use this cam with :program:`celery events` by specifying
-it with the `-c` option::
+it with the `-c` option:
+
+.. code-block:: bash
 
     $ celery events -c myapp.DumpCam --frequency=2.0
 
-Or you can use it programmatically like this::
+Or you can use it programmatically like this:
 
-    from celery.events import EventReceiver
-    from celery.messaging import establish_connection
-    from celery.events.state import State
+.. code-block:: python
+
+    from celery import Celery
     from myapp import DumpCam
 
-    def main():
-        state = State()
-        with establish_connection() as connection:
-            recv = EventReceiver(connection, handlers={'*': state.event})
-            with DumpCam(state, freq=1.0):
+    def main(app, freq=1.0):
+        state = app.events.State()
+        with app.connection() as connection:
+            recv = app.events.Receiver(connection, handlers={'*': state.event})
+            with DumpCam(state, freq=freq):
                 recv.capture(limit=None, timeout=None)
 
     if __name__ == '__main__':
-        main()
+        celery = Celery(broker='amqp://guest@localhost//')
+        main(celery)
+
+.. _event-real-time-example:
+
+Real-time processing
+--------------------
+
+To process events in real-time we need the following
+
+- An event consumer (this is the ``Receiver``)
+
+- A set of handlers called when events come in.
+
+    You can have different handlers for each event type,
+    or a catch-all handler can be used ('*')
+
+- State (optional)
+
+  :class:`@events.State` is a convenient in-memory representation
+  of tasks and workers in the cluster that is updated as events come in.
+
+  It encapsulates solutions for many common things, like checking if a
+  worker is still alive (by verifying heartbeats), merging event fields
+  together as events come in, making sure timestamps are in sync, and so on.
+
+
+
+Combining these we can easily process events in real-time:
+
+
+.. code-block:: python
+
+
+    from celery import Celery
+
+
+    def monitor_events(app):
+        state = app.events.State()
+
+        def on_event(event):
+            state.event(event)   # <-- updates in-memory cluster state
+
+            print('Workers online: %r' % ', '.join(
+                worker for worker in state.workers if worker.alive
+            )
+
+        with app.connection() as connection:
+            recv = app.events.Receiver(connection, handlers={'*': on_event})
+            recv.capture(limit=None, timeout=None, wakeup=True)
+
+
+.. note::
+
+    The wakeup argument to ``capture`` sends a signal to all workers
+    to force them to send a heartbeat.  This way we can immediately see
+    workers when the monitor starts.
+
+
+We can listen to specific events by specifying the handlers:
+
+.. code-block:: python
+
+    from celery import Celery
+
+    def my_monitor(app):
+        state = app.events.State()
+
+        def announce_failed_tasks(event):
+            state.event(event)
+            task_id = event['uuid']
+
+            print('TASK FAILED: %s[%s] %s' % (
+                event['name'], task_id, state[task_id].info(), ))
+
+        def announce_dead_workers(event):
+            state.event(event)
+            hostname = event['hostname']
+
+            if not state.workers[hostname].alive:
+                print('Worker %s missed heartbeats' % (hostname, ))
+
+
+        with app.connection() as connection:
+            recv = app.events.Receiver(connection, handlers={
+                    'task-failed': announce_failed_tasks,
+                    'worker-heartbeat': announce_dead_workers,
+            })
+            recv.capture(limit=None, timeout=None, wakeup=True)
+
+    if __name__ == '__main__':
+        celery = Celery(broker='amqp://guest@localhost//')
+        my_monitor(celery)
+
 
 
 .. _event-reference:

+ 3 - 1
docs/userguide/optimizing.rst

@@ -58,7 +58,9 @@ librabbitmq
 -----------
 
 If you're using RabbitMQ (AMQP) as the broker then you can install the
-:mod:`librabbitmq` module to use an optimized client written in C::
+:mod:`librabbitmq` module to use an optimized client written in C:
+
+.. code-block:: bash
 
     $ pip install librabbitmq
 

+ 12 - 4
docs/userguide/periodic-tasks.rst

@@ -215,20 +215,26 @@ the :setting:`CELERY_TIMEZONE` setting:
 Starting the Scheduler
 ======================
 
-To start the :program:`celery beat` service::
+To start the :program:`celery beat` service:
+
+.. code-block:: bash
 
     $ celery beat
 
 You can also start embed `beat` inside the worker by enabling
 workers `-B` option, this is convenient if you only intend to
-use one worker node::
+use one worker node:
+
+.. code-block:: bash
 
     $ celery worker -B
 
 Beat needs to store the last run times of the tasks in a local database
 file (named `celerybeat-schedule` by default), so it needs access to
 write in the current directory, or alternatively you can specify a custom
-location for this file::
+location for this file:
+
+.. code-block:: bash
 
     $ celery beat -s /home/celery/var/run/celerybeat-schedule
 
@@ -248,7 +254,9 @@ which is simply keeping track of the last run times in a local database file
 (a :mod:`shelve`).
 
 `django-celery` also ships with a scheduler that stores the schedule in the
-Django database::
+Django database:
+
+.. code-block:: bash
 
     $ celery beat -S djcelery.schedulers.DatabaseScheduler
 

+ 34 - 15
docs/userguide/routing.rst

@@ -44,14 +44,18 @@ With this route enabled import feed tasks will be routed to the
 `"feeds"` queue, while all other tasks will be routed to the default queue
 (named `"celery"` for historical reasons).
 
-Now you can start server `z` to only process the feeds queue like this::
+Now you can start server `z` to only process the feeds queue like this:
 
-    (z)$ celery worker -Q feeds
+.. code-block:: bash
+
+    user@z:/$ celery worker -Q feeds
 
 You can specify as many queues as you want, so you can make this server
-process the default queue as well::
+process the default queue as well:
+
+.. code-block:: bash
 
-    (z)$ celery worker -Q feeds,celery
+    user@z:/$ celery worker -Q feeds,celery
 
 .. _routing-changing-default-queue:
 
@@ -142,19 +146,25 @@ You can also override this using the `routing_key` argument to
 
 
 To make server `z` consume from the feed queue exclusively you can
-start it with the ``-Q`` option::
+start it with the ``-Q`` option:
+
+.. code-block:: bash
 
-    (z)$ celery worker -Q feed_tasks --hostname=z.example.com
+    user@z:/$ celery worker -Q feed_tasks --hostname=z.example.com
 
-Servers `x` and `y` must be configured to consume from the default queue::
+Servers `x` and `y` must be configured to consume from the default queue:
 
-    (x)$ celery worker -Q default --hostname=x.example.com
-    (y)$ celery worker -Q default --hostname=y.example.com
+.. code-block:: bash
+
+    user@x:/$ celery worker -Q default --hostname=x.example.com
+    user@y:/$ celery worker -Q default --hostname=y.example.com
 
 If you want, you can even have your feed processing worker handle regular
-tasks as well, maybe in times when there's a lot of work to do::
+tasks as well, maybe in times when there's a lot of work to do:
+
+.. code-block:: python
 
-    (z)$ celery worker -Q feed_tasks,default --hostname=z.example.com
+    user@z:/$ celery worker -Q feed_tasks,default --hostname=z.example.com
 
 If you have another queue but on another exchange you want to add,
 just specify a custom exchange and exchange type:
@@ -356,7 +366,9 @@ queues or sending messages.  It can also be used for non-AMQP brokers,
 but different implementation may not implement all commands.
 
 You can write commands directly in the arguments to :program:`celery amqp`,
-or just start with no arguments to start it in shell-mode::
+or just start with no arguments to start it in shell-mode:
+
+.. code-block:: bash
 
     $ celery amqp
     -> connecting to amqp://guest@localhost:5672/.
@@ -368,8 +380,11 @@ have executed so far.  Type ``help`` for a list of commands available.
 It also supports auto-completion, so you can start typing a command and then
 hit the `tab` key to show a list of possible matches.
 
-Let's create a queue we can send messages to::
+Let's create a queue we can send messages to:
+
+.. code-block:: bash
 
+    $ celery amqp
     1> exchange.declare testexchange direct
     ok.
     2> queue.declare testqueue
@@ -502,11 +517,15 @@ All you need to define a new router is to create a class with a
             return None
 
 If you return the ``queue`` key, it will expand with the defined settings of
-that queue in :setting:`CELERY_QUEUES`::
+that queue in :setting:`CELERY_QUEUES`:
+
+.. code-block:: javascript
 
     {'queue': 'video', 'routing_key': 'video.compress'}
 
-    becomes -->
+becomes -->
+
+.. code-block:: javascript
 
         {'queue': 'video',
          'exchange': 'video',

+ 18 - 19
docs/userguide/tasks.rst

@@ -53,7 +53,7 @@ the :meth:`~@Celery.task` decorator:
 
     from .models import User
 
-    @celery.task()
+    @celery.task
     def create_user(username, password):
         User.objects.create(username=username, password=password)
 
@@ -79,7 +79,7 @@ these can be specified as arguments to the decorator:
 
         from celery import task
 
-        @task()
+        @task
         def add(x, y):
             return x + y
 
@@ -92,7 +92,7 @@ these can be specified as arguments to the decorator:
 
     .. code-block:: python
 
-        @celery.task()
+        @celery.task
         @decorator2
         @decorator1
         def add(x, y):
@@ -139,7 +139,7 @@ if the module name is "tasks.py":
 
 .. code-block:: python
 
-    @celery.task()
+    @celery.task
     def add(x, y):
         return x + y
 
@@ -227,7 +227,7 @@ An example task accessing information in the context is:
 
 .. code-block:: python
 
-    @celery.task()
+    @celery.task
     def dump_context(x, y):
         print('Executing task id {0.id}, args: {0.args!r} kwargs: {0.kwargs!r}'.format(
                 dump_context.request))
@@ -239,12 +239,11 @@ An example task accessing information in the context is:
 
     from celery import current_task
 
-    @celery.task()
+    @celery.task
     def dump_context(x, y):
         print('Executing task id {0.id}, args: {0.args!r} kwargs: {0.kwargs!r}'.format(
                 current_task.request))
 
-
 .. _task-logging:
 
 Logging
@@ -266,7 +265,7 @@ for all of your tasks at the top of your module:
 
     logger = get_task_logger(__name__)
 
-    @celery.task()
+    @celery.task
     def add(x, y):
         logger.info('Adding {0} + {1}'.format(x, y))
         return x + y
@@ -299,7 +298,7 @@ Here's an example using ``retry``:
 
 .. code-block:: python
 
-    @celery.task()
+    @celery.task
     def send_twitter_status(oauth, tweet):
         try:
             twitter = Twitter(oauth)
@@ -683,7 +682,7 @@ Use :meth:`~@Task.update_state` to update a task's state::
 
     from celery import current_task
 
-    @celery.task()
+    @celery.task
     def upload_files(filenames):
         for i, file in enumerate(filenames):
             current_task.update_state(state='PROGRESS',
@@ -765,7 +764,7 @@ As an example, the following code,
 
 .. code-block:: python
 
-    @celery.task()
+    @celery.task
     def add(x, y):
         return x + y
 
@@ -774,7 +773,7 @@ will do roughly this behind the scenes:
 
 .. code-block:: python
 
-    @celery.task()
+    @celery.task
     class AddTask(Task):
 
         def run(self, x, y):
@@ -1014,21 +1013,21 @@ Make your design asynchronous instead, for example by using *callbacks*.
 
 .. code-block:: python
 
-    @celery.task()
+    @celery.task
     def update_page_info(url):
         page = fetch_page.delay(url).get()
         info = parse_page.delay(url, page).get()
         store_page_info.delay(url, info)
 
-    @celery.task()
+    @celery.task
     def fetch_page(url):
         return myhttplib.get(url)
 
-    @celery.task()
+    @celery.task
     def parse_page(url, page):
         return myparser.parse_document(page)
 
-    @celery.task()
+    @celery.task
     def store_page_info(url, info):
         return PageInfo.objects.create(url, info)
 
@@ -1145,7 +1144,7 @@ that automatically expands some abbreviations in it:
         title = models.CharField()
         body = models.TextField()
 
-    @celery.task()
+    @celery.task
     def expand_abbreviations(article):
         article.body.replace('MyCorp', 'My Corporation')
         article.save()
@@ -1166,7 +1165,7 @@ re-fetch the article in the task body:
 
 .. code-block:: python
 
-    @celery.task()
+    @celery.task
     def expand_abbreviations(article_id):
         article = Article.objects.get(id=article_id)
         article.body.replace('MyCorp', 'My Corporation')
@@ -1327,7 +1326,7 @@ blog/tasks.py
     from blog.models import Comment
 
 
-    @celery.task()
+    @celery.task
     def spam_filter(comment_id, remote_addr=None):
         logger = spam_filter.get_logger()
         logger.info('Running spam filter for comment %s', comment_id)

+ 46 - 16
docs/userguide/workers.rst

@@ -19,18 +19,24 @@ Starting the worker
     in the background.  See :ref:`daemonizing` for help
     detaching the worker using popular daemonization tools.
 
-You can start the worker in the foreground by executing the command::
+You can start the worker in the foreground by executing the command:
+
+.. code-block:: bash
 
     $ celery worker --app=app -l info
 
 For a full list of available command line options see
-:mod:`~celery.bin.celeryd`, or simply do::
+:mod:`~celery.bin.celeryd`, or simply do:
+
+.. code-block:: bash
 
     $ celery worker --help
 
 You can also start multiple workers on the same machine. If you do so
 be sure to give a unique name to each individual worker by specifying a
-host name with the :option:`--hostname|-n` argument::
+host name with the :option:`--hostname|-n` argument:
+
+.. code-block:: bash
 
     $ celery worker --loglevel=INFO --concurrency=10 -n worker1.%h
     $ celery worker --loglevel=INFO --concurrency=10 -n worker2.%h
@@ -69,7 +75,9 @@ option set).
 
 Also as processes can't override the :sig:`KILL` signal, the worker will
 not be able to reap its children, so make sure to do so manually.  This
-command usually does the trick::
+command usually does the trick:
+
+.. code-block:: bash
 
     $ ps auxww | grep 'celery worker' | awk '{print $2}' | xargs kill -9
 
@@ -79,7 +87,9 @@ Restarting the worker
 =====================
 
 Other than stopping then starting the worker to restart, you can also
-restart the worker using the :sig:`HUP` signal::
+restart the worker using the :sig:`HUP` signal:
+
+.. code-block:: bash
 
     $ kill -HUP $pid
 
@@ -296,7 +306,7 @@ time limit kills it:
     from myapp import celery
     from celery.exceptions import SoftTimeLimitExceeded
 
-    @celery.task()
+    @celery.task
     def mytask():
         try:
             do_work()
@@ -415,7 +425,9 @@ By default it will consume from all queues defined in the
 queue named ``celery``).
 
 You can specify what queues to consume from at startup,
-by giving a comma separated list of queues to the :option:`-Q` option::
+by giving a comma separated list of queues to the :option:`-Q` option:
+
+.. code-block:: bash
 
     $ celery worker -l info -Q foo,bar,baz
 
@@ -437,14 +449,18 @@ The :control:`add_consumer` control command will tell one or more workers
 to start consuming from a queue. This operation is idempotent.
 
 To tell all workers in the cluster to start consuming from a queue
-named "``foo``" you can use the :program:`celery control` program::
+named "``foo``" you can use the :program:`celery control` program:
+
+.. code-block:: bash
 
     $ celery control add_consumer foo
     -> worker1.local: OK
         started consuming from u'foo'
 
 If you want to specify a specific worker you can use the
-:option:`--destination`` argument::
+:option:`--destination`` argument:
+
+.. code-block:: bash
 
     $ celery control add_consumer foo -d worker1.local
 
@@ -484,18 +500,24 @@ You can cancel a consumer by queue name using the :control:`cancel_consumer`
 control command.
 
 To force all workers in the cluster to cancel consuming from a queue
-you can use the :program:`celery control` program::
+you can use the :program:`celery control` program:
+
+.. code-block:: bash
 
     $ celery control cancel_consumer foo
 
 The :option:`--destination` argument can be used to specify a worker, or a
-list of workers, to act on the command::
+list of workers, to act on the command:
+
+.. code-block:: bash
 
     $ celery control cancel_consumer foo -d worker1.local
 
 
 You can also cancel consumers programmatically using the
-:meth:`@control.cancel_consumer` method::
+:meth:`@control.cancel_consumer` method:
+
+.. code-block:: bash
 
     >>> myapp.control.cancel_consumer('foo', reply=True)
     [{u'worker1.local': {u'ok': u"no longer consuming from u'foo'"}}]
@@ -506,14 +528,18 @@ Queues: List of active queues
 -----------------------------
 
 You can get a list of queues that a worker consumes from by using
-the :control:`active_queues` control command::
+the :control:`active_queues` control command:
+
+.. code-block:: bash
 
     $ celery inspect active_queues
     [...]
 
 Like all other remote control commands this also supports the
 :option:`--destination` argument used to specify which workers should
-reply to the request::
+reply to the request:
+
+.. code-block:: bash
 
     $ celery inspect active_queues -d worker1.local
     [...]
@@ -563,7 +589,9 @@ implementations:
     Used if the :mod:`pyinotify` library is installed.
     If you are running on Linux this is the recommended implementation,
     to install the :mod:`pyinotify` library you have to run the following
-    command::
+    command:
+
+    .. code-block:: bash
 
         $ pip install pyinotify
 
@@ -575,7 +603,9 @@ implementations:
     expensive.
 
 You can force an implementation by setting the :envvar:`CELERYD_FSNOTIFY`
-environment variable::
+environment variable:
+
+.. code-block:: bash
 
     $ env CELERYD_FSNOTIFY=stat celery worker -l info --autoreload
 

+ 16 - 6
docs/whatsnew-2.5.rst

@@ -63,9 +63,11 @@ race condition leading to an annoying warning.
     to be removed.
 
     The :program:`camqadm` command can be used to delete the
-    previous exchange::
+    previous exchange:
 
-        $ camqadm exchange.delete celeryresults
+    .. code-block:: bash
+
+            $ camqadm exchange.delete celeryresults
 
     As an alternative to deleting the old exchange you can
     configure a new name for the exchange::
@@ -237,7 +239,9 @@ implementations:
     Used if the :mod:`pyinotify` library is installed.
     If you are running on Linux this is the recommended implementation,
     to install the :mod:`pyinotify` library you have to run the following
-    command::
+    command:
+
+    .. code-block:: bash
 
         $ pip install pyinotify
 
@@ -249,7 +253,9 @@ implementations:
     expensive.
 
 You can force an implementation by setting the :envvar:`CELERYD_FSNOTIFY`
-environment variable::
+environment variable:
+
+.. code-block:: bash
 
     $ env CELERYD_FSNOTIFY=stat celeryd -l info --autoreload
 
@@ -371,7 +377,9 @@ In Other News
 - celerybeat can now be configured on the command line like celeryd.
 
   Additional configuration must be added at the end of the argument list
-  followed by ``--``, for example::
+  followed by ``--``, for example:
+
+  .. code-block:: bash
 
     $ celerybeat -l info -- celerybeat.max_loop_interval=10.0
 
@@ -419,7 +427,9 @@ In Other News
     Note that this is experimental and you should have a backup
     of the data before proceeding.
 
-    **Examples**::
+    **Examples**:
+
+    .. code-block:: bash
 
         $ celeryctl migrate redis://localhost amqp://localhost
         $ celeryctl migrate amqp://localhost//v1 amqp://localhost//v2

+ 33 - 14
docs/whatsnew-3.0.rst

@@ -94,7 +94,9 @@ The workers remote control command exchanges has been renamed
 has been removed, and that makes it incompatible with earlier versions.
 
 You can manually delete the old exchanges if you want,
-using the :program:`celery amqp` command (previously called ``camqadm``)::
+using the :program:`celery amqp` command (previously called ``camqadm``):
+
+.. code-block:: bash
 
     $ celery amqp exchange.delete celeryd.pidbox
     $ celery amqp exchange.delete reply.celeryd.pidbox
@@ -123,10 +125,11 @@ New ``celery`` umbrella command
 All Celery's command line programs are now available from a single
 :program:`celery` umbrella command.
 
-You can see a list of subcommands and options by running::
+You can see a list of subcommands and options by running:
 
-    $ celery help
+.. code-block:: bash
 
+    $ celery help
 
 Commands include:
 
@@ -162,7 +165,9 @@ The :mod:`celery.app.task` module is now a module instead of a package.
 
 The setup.py install script will try to remove the old package,
 but if that doesn't work for some reason you have to remove
-it manually.  This command helps::
+it manually.  This command helps:
+
+.. code-block:: bash
 
     $ rm -r $(dirname $(python -c '
         import celery;print(celery.__file__)'))/app/task/
@@ -295,7 +300,9 @@ Tasks can now have callbacks and errbacks, and dependencies are recorded
                 with open('graph.dot') as fh:
                     result.graph.to_dot(fh)
 
-            which can than be used to produce an image::
+            which can than be used to produce an image:
+
+            .. code-block:: bash
 
                 $ dot -Tpng graph.dot -o graph.png
 
@@ -466,7 +473,9 @@ stable and is now documented as part of the offical API.
         >>> celery.control.cancel_consumer(queue_name,
         ...     destination=['w1.example.com'])
 
-    or using the :program:`celery control` command::
+    or using the :program:`celery control` command:
+
+    .. code-block:: bash
 
         $ celery control -d w1.example.com add_consumer queue
         $ celery control -d w1.example.com cancel_consumer queue
@@ -488,7 +497,9 @@ stable and is now documented as part of the offical API.
         >>> celery.control.autoscale(max=10, min=5,
         ...     destination=['w1.example.com'])
 
-    or using the :program:`celery control` command::
+    or using the :program:`celery control` command:
+
+    .. code-block:: bash
 
         $ celery control -d w1.example.com autoscale 10 5
 
@@ -504,7 +515,9 @@ stable and is now documented as part of the offical API.
         >>> celery.control.pool_grow(2, destination=['w1.example.com'])
         >>> celery.contorl.pool_shrink(2, destination=['w1.example.com'])
 
-    or using the :program:`celery control` command::
+    or using the :program:`celery control` command:
+
+    .. code-block:: bash
 
         $ celery control -d w1.example.com pool_grow 2
         $ celery control -d w1.example.com pool_shrink 2
@@ -568,7 +581,7 @@ Logging support now conforms better with best practices.
 
         logger = get_task_logger(__name__)
 
-        @celery.task()
+        @celery.task
         def add(x, y):
             logger.debug('Adding %r + %r' % (x, y))
             return x + y
@@ -660,7 +673,9 @@ The :option:`--app` option now 'auto-detects'
 
 E.g. if you have a project named 'proj' where the
 celery app is located in 'from proj.celery import celery',
-then the following will be equivalent::
+then the following will be equivalent:
+
+.. code-block:: bash
 
         $ celery worker --app=proj
         $ celery worker --app=proj.celery:
@@ -743,13 +758,17 @@ In Other News
         >>> import celery
         >>> print(celery.bugreport())
 
-    - Using the ``celery`` command-line program::
+    - Using the ``celery`` command-line program:
+
+        .. code-block:: bash
+
+            $ celery report
 
-        $ celery report
+    - Get it from remote workers:
 
-    - Get it from remote workers::
+        .. code-block:: bash
 
-        $ celery inspect report
+            $ celery inspect report
 
 - Module ``celery.log`` moved to :mod:`celery.app.log`.
 

+ 2 - 2
setup.py

@@ -15,7 +15,7 @@ import os
 import sys
 import codecs
 
-CELERY_COMPAT_PROGRAMS = os.environ.get('CELERY_COMPAT_PROGRAMS')
+CELERY_COMPAT_PROGRAMS = int(os.environ.get('CELERY_COMPAT_PROGRAMS', 1))
 
 if sys.version_info < (2, 6):
     raise Exception('Celery 3.1 requires Python 2.6 or higher.')
@@ -182,7 +182,7 @@ console_scripts = entrypoints['console_scripts'] = [
 
 if CELERY_COMPAT_PROGRAMS:
     console_scripts.extend([
-        'celeryd = celery.bin.celeryd:main',
+        'celeryd = celery.__main__:_compat_worker',
         'celerybeat = celery.bin.celerybeat:main',
         'camqadm = celery.bin.camqadm:main',
         'celeryev = celery.bin.celeryev:main',