Browse Source

Doc improvements

Ask Solem 12 years ago
parent
commit
4f784187e6

+ 5 - 4
celery/app/builtins.py

@@ -183,12 +183,16 @@ def add_chain_task(app):
         accept_magic_kwargs = False
         accept_magic_kwargs = False
 
 
         def prepare_steps(self, args, tasks):
         def prepare_steps(self, args, tasks):
+            print('ARGS: %r' % (args, ))
             steps = deque(tasks)
             steps = deque(tasks)
             next_step = prev_task = prev_res = None
             next_step = prev_task = prev_res = None
             tasks, results = [], []
             tasks, results = [], []
+            i = 0
             while steps:
             while steps:
                 # First task get partial args from chain.
                 # First task get partial args from chain.
-                task = maybe_subtask(steps.popleft()).clone()
+                task = maybe_subtask(steps.popleft())
+                task = task.clone() if i else task.clone(args)
+                i += 1
                 tid = task.options.get('task_id')
                 tid = task.options.get('task_id')
                 if tid is None:
                 if tid is None:
                     tid = task.options['task_id'] = uuid()
                     tid = task.options['task_id'] = uuid()
@@ -212,9 +216,6 @@ def add_chain_task(app):
                 tasks.append(task)
                 tasks.append(task)
                 prev_task, prev_res = task, res
                 prev_task, prev_res = task, res
 
 
-            # First task receives partial args for chain()
-            if args and not tasks[0].immutable:
-                tasks[0].args = tuple(args) + tuple(tasks[0].args or ())
             return tasks, results
             return tasks, results
 
 
         def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
         def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,

+ 5 - 4
celery/app/control.py

@@ -155,7 +155,7 @@ class Control(object):
                               **kwargs)
                               **kwargs)
 
 
     def add_consumer(self, queue, exchange=None, exchange_type='direct',
     def add_consumer(self, queue, exchange=None, exchange_type='direct',
-            routing_key=None, **options):
+            routing_key=None, options=None, **kwargs):
         """Tell all (or specific) workers to start consuming from a new queue.
         """Tell all (or specific) workers to start consuming from a new queue.
 
 
         Only the queue name is required as if only the queue is specified
         Only the queue name is required as if only the queue is specified
@@ -177,9 +177,10 @@ class Control(object):
 
 
         """
         """
         return self.broadcast('add_consumer',
         return self.broadcast('add_consumer',
-                arguments={'queue': queue, 'exchange': exchange,
-                           'exchange_type': exchange_type,
-                           'routing_key': routing_key}, **options)
+                arguments=dict({'queue': queue, 'exchange': exchange,
+                                'exchange_type': exchange_type,
+                                'routing_key': routing_key}, **options),
+                **kwargs)
 
 
     def cancel_consumer(self, queue, **kwargs):
     def cancel_consumer(self, queue, **kwargs):
         """Tell all (or specific) workers to stop consuming from ``queue``.
         """Tell all (or specific) workers to stop consuming from ``queue``.

+ 1 - 1
celery/canvas.py

@@ -206,7 +206,7 @@ class chain(Signature):
         self.subtask_type = 'chain'
         self.subtask_type = 'chain'
 
 
     def __call__(self, *args, **kwargs):
     def __call__(self, *args, **kwargs):
-        return self.apply_async(*args, **kwargs)
+        return self.apply_async(args, kwargs)
 
 
     @classmethod
     @classmethod
     def from_dict(self, d):
     def from_dict(self, d):

+ 2 - 2
docs/getting-started/first-steps-with-celery.rst

@@ -1,9 +1,9 @@
 .. _tut-celery:
 .. _tut-celery:
 .. _first-steps:
 .. _first-steps:
 
 
-========================
+=========================
  First Steps with Celery
  First Steps with Celery
-========================
+=========================
 
 
 Celery is a task queue with batteries included.
 Celery is a task queue with batteries included.
 It is easy to use so that you can get started without learning
 It is easy to use so that you can get started without learning

+ 0 - 1
docs/getting-started/index.rst

@@ -8,7 +8,6 @@
 .. toctree::
 .. toctree::
     :maxdepth: 2
     :maxdepth: 2
 
 
-    intro
     introduction
     introduction
     brokers/index
     brokers/index
     first-steps-with-celery
     first-steps-with-celery

+ 0 - 233
docs/getting-started/intro.rst

@@ -1,233 +0,0 @@
-=============================
- Introduction to Task Queues
-=============================
-
-.. contents::
-    :local:
-    :depth: 1
-
-What are Task Queues?
-=====================
-
-HELLO
-
-What do I need?
-===============
-
-.. sidebar:: Version Requirements
-    :subtitle: Celery version 2.6 runs on
-
-    - Python ❨2.5, 2.6, 2.7, 3.2, 3.3❩
-    - PyPy ❨1.8, 1.9❩
-    - Jython ❨2.5, 2.7❩.
-
-    This is the last version to support Python 2.5,
-    and from the next version Python 2.6 or newer is required.
-    The last version to support Python 2.4 was Celery series 2.2.
-
-*Celery* requires a message broker to send and receive messages,
-but this term has been stretched to include everything from
-your fridge to financial-grade messaging systems.
-
-*Celery* can run on a single machine, on multiple machines, or even
-across datacenters.
-
-Celery is…
-==========
-
-.. topic:: ”
-
-    - **Simple**
-
-        Celery is easy to use and maintain, and does *not need configuration files*.
-
-        It has an active, friendly community you can talk to for support,
-        including a :ref:`mailing-list <mailing-list>` and and :ref:`IRC
-        channel <irc-channel>`.
-
-        Here's one of the simplest applications you can make:
-
-        .. code-block:: python
-
-            from celery import Celery
-
-            celery = Celery('hello', broker='amqp://guest@localhost//')
-
-            @celery.task()
-            def hello():
-                return 'hello world'
-
-    - **Highly Available**
-
-        Workers and clients will automatically retry in the event
-        of connection loss or failure, and some brokers support
-        HA in way of *Master/Master* or *Master/Slave* replication.
-
-    - **Fast**
-
-        A single Celery process can process millions of tasks a minute,
-        with sub-millisecond round-trip latency (using RabbitMQ,
-        py-librabbitmq, and optimized settings).
-
-    - **Flexible**
-
-        Almost every part of *Celery* can be extended or used on its own,
-        Custom pool implementations, serializers, compression schemes, logging,
-        schedulers, consumers, producers, autoscalers, broker transports and much more.
-
-
-.. topic:: It supports
-
-    .. hlist::
-        :columns: 2
-
-        - **Brokers**
-
-            - :ref:`RabbitMQ <broker-rabbitmq>`
-            - :ref:`Redis <broker-redis>`,
-            - :ref:`MongoDB <broker-mongodb>` 
-            - :ref:`Beanstalk <broker-beanstalk>`
-            - :ref:`CouchDB <broker-couchdb>` 
-            - :ref:`SQLAlchemy <broker-sqlalchemy>`
-            - :ref:`Django ORM <broker-django>` 
-            - :ref:`Amazon SQS <broker-sqs>`
-            - and more…
-
-        - **Concurrency**
-
-            - multiprocessing,
-            - Eventlet_, gevent_
-            - threads
-            - single
-
-        - **Result Stores**
-
-            - AMQP 
-            - Redis
-            - memcached 
-            - MongoDB
-            - SQLAlchemy
-            - Django ORM
-            - Apache Cassandra
-
-        - **Serialization**
-
-            - *pickle*, *json*, *yaml*, *msgpack*.
-            - Fine-grained serialization settings.
-
-        - **Compression**
-
-            - *zlib*, *bzip2*, or uncompressed.
-
-        - **Crypto**
-
-            - Cryptographic message signing.
-
-
-
-.. topic:: Features
-
-    .. hlist::
-        :columns: 2
-
-        - **Monitoring**
-
-            The stream of monitoring events emitted by the worker are used
-            by built-in and external tools to tell you what your cluster
-            is doing in real-time.
-
-            :ref:`Read more… <guide-monitoring>`.
-
-        - **Time Limits & Rate Limits**
-
-            You can control how many tasks can be executed per second/minute/hour,
-            or how long a task can be allowed to run, and this can be set as
-            a default, for a specific worker or individually for each task type.
-
-            :ref:`Read more… <worker-time-limits>`.
-
-        - **Autoreloading**
-
-            While in development workers can be configured to automatically reload source
-            code as it changes.
-
-            :ref:`Read more… <worker-autoreloading>`.
-
-        - **Autoscaling**
-
-            Dynamically resizing the worker pool depending on load,
-            or custom metrics specified by the user, used to limit
-            memory usage in shared hosting/cloud environment or to
-            enforce a given quality of service.
-
-            :ref:`Read more… <worker-autoscaling>`.
-
-        - **Resource Leak Protection**
-
-            The :option:`--maxtasksperchild` option is used for user tasks
-            leaking resources, like memory or file descriptors, that
-            are simply out of your control.
-
-            :ref:`Read more… <worker-maxtasksperchild>`.
-
-        - **User Components**
-
-            Each worker component can be customized, and additional components
-            can be defined by the user.  The worker is built up using "boot steps" — a
-            dependency graph enabling fine grained control of the worker's
-            internals.
-
-.. _`Eventlet`: http://eventlet.net/
-.. _`gevent`: http://gevent.org/
-
-
-.. topic:: I want to ⟶
-
-    .. hlist::
-        :columns: 2
-
-        - :ref:`get the return value of a task <task-states>`
-        - :ref:`use logging from my task <task-logging>`
-        - :ref:`learn about best practices <task-best-practices>`
-        - :ref:`create a custom task base class <task-custom-classes>`
-        - :ref:`add a callback to a group of tasks <chords>`
-        - :ref:`split a task into several chunks <chunking>`
-        - :ref:`optimize the worker <guide-optimizing>`
-        - :ref:`see a list of built-in task states <task-builtin-states>`
-        - :ref:`create custom task states <custom-states>`
-        - :ref:`set a custom task name <task-names>`
-        - :ref:`track when a task starts <task-track-started>`
-        - :ref:`retry a task when it fails <task-retry>`
-        - :ref:`get the id of the current task <task-request-info>`
-        - :ref:`know what queue a task was delivered to <task-request-info>`
-        - :ref:`see a list of running workers <monitoring-celeryctl>`
-        - :ref:`purge all messages <monitoring-celeryctl>`
-        - :ref:`inspect what the workers are doing <monitoring-celeryctl>`
-        - :ref:`see what tasks a worker has registerd <monitoring-celeryctl>`
-        - :ref:`migrate tasks to a new broker <monitoring-celeryctl>`
-        - :ref:`see a list of event message types <event-reference>`
-        - :ref:`contribute to Celery <contributing>`
-        - :ref:`learn about available configuration settings <configuration>`
-        - :ref:`receive email when a task fails <conf-error-mails>`
-        - :ref:`get a list of people and companies using Celery <res-using-celery>`
-        - :ref:`write my own remote control command <worker-custom-control-commands>`
-        - change worker queues at runtime
-
-.. topic:: Jump to ⟶
-
-    .. hlist::
-        :columns: 4
-
-        - :ref:`Brokers <brokers>`
-        - :ref:`Tasks <guide-tasks>`
-        - :ref:`Calling <guide-calling>`
-        - :ref:`Workers <guide-workers>`
-        - :ref:`Monitoring <guide-monitoring>`
-        - :ref:`Optimizing <guide-optimizing>`
-        - :ref:`Security <guide-security>`
-        - :ref:`Routing <guide-routing>`
-        - :ref:`Configuration Reference <configuration>`
-        - :ref:`Django <django>`
-        - :ref:`Contributing <contributing>`
-        - :ref:`Signals <signals>`
-        - :ref:`FAQ <faq>`

+ 312 - 4
docs/getting-started/introduction.rst

@@ -1,7 +1,315 @@
 .. _intro:
 .. _intro:
 
 
-==============
- Introduction
-==============
+========================
+ Introduction to Celery
+========================
 
 
-.. include:: ../includes/introduction.txt
+.. contents::
+    :local:
+    :depth: 1
+
+What is a Task Queue?
+=====================
+
+Task queues are used as a mechanism to distribute work across threads or
+machines.
+
+A task queue's input is a unit of work, called a task, dedicated worker
+processes then constantly monitor the queue for new work to perform.
+
+Celery communicates via messages using a broker
+to mediate between clients and workers.  To initiate a task a client puts a
+message on the queue, the broker then delivers the message to a worker.
+
+A Celery system can consist of multiple workers and brokers, giving way
+to high availability and horizontal scaling.
+
+Celery is written in Python, but the protocol can be implemented in any
+language.  So far there's RCelery_ for the Ruby programming language, and a
+`PHP client`, but language interoperability can also be achieved
+by :ref:`using webhooks <guide-webhooks>`.
+
+.. _RCelery: http://leapfrogdevelopment.github.com/rcelery/
+.. _`PHP client`: https://github.com/gjedeer/celery-php
+.. _`using webhooks`:
+    http://celery.github.com/celery/userguide/remote-tasks.html
+
+What do I need?
+===============
+
+.. sidebar:: Version Requirements
+    :subtitle: Celery version 2.6 runs on
+
+    - Python ❨2.5, 2.6, 2.7, 3.2, 3.3❩
+    - PyPy ❨1.8, 1.9❩
+    - Jython ❨2.5, 2.7❩.
+
+    This is the last version to support Python 2.5,
+    and from the next version Python 2.6 or newer is required.
+    The last version to support Python 2.4 was Celery series 2.2.
+
+*Celery* requires a message broker to send and receive messages.
+The RabbitMQ, Redis and MongoDB broker transports are feature complete,
+but there is also support for a myriad of other solutions, including
+using SQLite for local development.
+
+*Celery* can run on a single machine, on multiple machines, or even
+across datacenters.
+
+Get Started
+===========
+
+If this is the first time you're trying to use Celery, or you are
+new to Celery 2.6 coming from previous versions then you should our
+getting started tutorials:
+
+- :ref:`first-steps`
+- :ref:`next-steps`
+
+Celery is…
+==========
+
+.. topic:: ”
+
+    - **Simple**
+
+        Celery is easy to use and maintain, and does *not need configuration files*.
+
+        It has an active, friendly community you can talk to for support,
+        including a :ref:`mailing-list <mailing-list>` and and :ref:`IRC
+        channel <irc-channel>`.
+
+        Here's one of the simplest applications you can make:
+
+        .. code-block:: python
+
+            from celery import Celery
+
+            celery = Celery('hello', broker='amqp://guest@localhost//')
+
+            @celery.task()
+            def hello():
+                return 'hello world'
+
+    - **Highly Available**
+
+        Workers and clients will automatically retry in the event
+        of connection loss or failure, and some brokers support
+        HA in way of *Master/Master* or *Master/Slave* replication.
+
+    - **Fast**
+
+        A single Celery process can process millions of tasks a minute,
+        with sub-millisecond round-trip latency (using RabbitMQ,
+        py-librabbitmq, and optimized settings).
+
+    - **Flexible**
+
+        Almost every part of *Celery* can be extended or used on its own,
+        Custom pool implementations, serializers, compression schemes, logging,
+        schedulers, consumers, producers, autoscalers, broker transports and much more.
+
+
+.. topic:: It supports
+
+    .. hlist::
+        :columns: 2
+
+        - **Brokers**
+
+            - :ref:`RabbitMQ <broker-rabbitmq>`, :ref:`Redis <broker-redis>`,
+            - :ref:`MongoDB <broker-mongodb>`, :ref:`Beanstalk <broker-beanstalk>`
+            - :ref:`CouchDB <broker-couchdb>`, :ref:`SQLAlchemy <broker-sqlalchemy>`
+            - :ref:`Django ORM <broker-django>`, :ref:`Amazon SQS <broker-sqs>`,
+            - and more…
+
+        - **Concurrency**
+
+            - multiprocessing,
+            - Eventlet_, gevent_
+            - threads/single threaded
+
+        - **Result Stores**
+
+            - AMQP, Redis
+            - memcached, MongoDB
+            - SQLAlchemy, Django ORM
+            - Apache Cassandra
+
+        - **Serialization**
+
+            - *pickle*, *json*, *yaml*, *msgpack*.
+            - *zlib*, *bzip2* compression.
+            - Cryptographic message signing.
+
+
+Features
+========
+
+.. topic:: \ 
+
+    .. hlist::
+        :columns: 2
+
+        - **Monitoring**
+
+            The stream of monitoring events emitted by the worker are used
+            by built-in and external tools to tell you what your cluster
+            is doing in real-time.
+
+            :ref:`Read more… <guide-monitoring>`.
+
+        - **Workflows**
+
+            Simple and complex workflows can be composed using
+            a set of powerful primitives we call the "canvas",
+            including grouping, chaining, chunking and more.
+
+            :ref:`Read more… <guide-canvas>`.
+
+        - **Time & Rate Limits**
+
+            You can control how many tasks can be executed per second/minute/hour,
+            or how long a task can be allowed to run, and this can be set as
+            a default, for a specific worker or individually for each task type.
+
+            :ref:`Read more… <worker-time-limits>`.
+
+        - **Scheduling**
+
+            You can specify the time to run a task in seconds or a
+            :class:`~datetime.datetime`, or or you can use
+            periodic tasks for recurring events based on a
+            simple interval, or crontab expressions
+            supporting minute, hour, day of week, day of month, and
+            month of year.
+
+            :ref:`Read more… <guide-beat>`.
+
+        - **Autoreloading**
+
+            In development workers can be configured to automatically reload source
+            code as it changes, including inotify support on Linux.
+
+            :ref:`Read more… <worker-autoreloading>`.
+
+        - **Autoscaling**
+
+            Dynamically resizing the worker pool depending on load,
+            or custom metrics specified by the user, used to limit
+            memory usage in shared hosting/cloud environments or to
+            enforce a given quality of service.
+
+            :ref:`Read more… <worker-autoscaling>`.
+
+        - **Resource Leak Protection**
+
+            The :option:`--maxtasksperchild` option is used for user tasks
+            leaking resources, like memory or file descriptors, that
+            are simply out of your control.
+
+            :ref:`Read more… <worker-maxtasksperchild>`.
+
+        - **User Components**
+
+            Each worker component can be customized, and additional components
+            can be defined by the user.  The worker is built up using "boot steps" — a
+            dependency graph enabling fine grained control of the worker's
+            internals.
+
+.. _`Eventlet`: http://eventlet.net/
+.. _`gevent`: http://gevent.org/
+
+Framework Integration
+=====================
+
+Celery is easy to integrate with web frameworks, some of which even have
+integration packages:
+
+    +--------------------+------------------------+
+    | `Django`_          | `django-celery`_       |
+    +--------------------+------------------------+
+    | `Pyramid`_         | `pyramid_celery`_      |
+    +--------------------+------------------------+
+    | `Pylons`_          | `celery-pylons`_       |
+    +--------------------+------------------------+
+    | `Flask`_           | not needed             |
+    +--------------------+------------------------+
+    | `web2py`_          | `web2py-celery`_       |
+    +--------------------+------------------------+
+    | `Tornado`_         | `tornado-celery`_      |
+    +--------------------+------------------------+
+
+The integration packages are not strictly necessary, but they can make
+development easier, and sometimes they add important hooks like closing
+database connections at ``fork``.
+
+.. _`Django`: http://djangoproject.com/
+.. _`Pylons`: http://pylonshq.com/
+.. _`Flask`: http://flask.pocoo.org/
+.. _`web2py`: http://web2py.com/
+.. _`Bottle`: http://bottlepy.org/
+.. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html
+.. _`pyramid_celery`: http://pypi.python.org/pypi/pyramid_celery/
+.. _`django-celery`: http://pypi.python.org/pypi/django-celery
+.. _`celery-pylons`: http://pypi.python.org/pypi/celery-pylons
+.. _`web2py-celery`: http://code.google.com/p/web2py-celery/
+.. _`Tornado`: http://www.tornadoweb.org/
+.. _`tornado-celery`: http://github.com/mher/tornado-celery/
+
+Quickjump
+=========
+
+.. topic:: I want to ⟶
+
+    .. hlist::
+        :columns: 2
+
+        - :ref:`get the return value of a task <task-states>`
+        - :ref:`use logging from my task <task-logging>`
+        - :ref:`learn about best practices <task-best-practices>`
+        - :ref:`create a custom task base class <task-custom-classes>`
+        - :ref:`add a callback to a group of tasks <chords>`
+        - :ref:`split a task into several chunks <chunking>`
+        - :ref:`optimize the worker <guide-optimizing>`
+        - :ref:`see a list of built-in task states <task-builtin-states>`
+        - :ref:`create custom task states <custom-states>`
+        - :ref:`set a custom task name <task-names>`
+        - :ref:`track when a task starts <task-track-started>`
+        - :ref:`retry a task when it fails <task-retry>`
+        - :ref:`get the id of the current task <task-request-info>`
+        - :ref:`know what queue a task was delivered to <task-request-info>`
+        - :ref:`see a list of running workers <monitoring-celeryctl>`
+        - :ref:`purge all messages <monitoring-celeryctl>`
+        - :ref:`inspect what the workers are doing <monitoring-celeryctl>`
+        - :ref:`see what tasks a worker has registerd <monitoring-celeryctl>`
+        - :ref:`migrate tasks to a new broker <monitoring-celeryctl>`
+        - :ref:`see a list of event message types <event-reference>`
+        - :ref:`contribute to Celery <contributing>`
+        - :ref:`learn about available configuration settings <configuration>`
+        - :ref:`receive email when a task fails <conf-error-mails>`
+        - :ref:`get a list of people and companies using Celery <res-using-celery>`
+        - :ref:`write my own remote control command <worker-custom-control-commands>`
+        - :ref:`change worker queues at runtime <worker-queues>`
+
+.. topic:: Jump to ⟶
+
+    .. hlist::
+        :columns: 4
+
+        - :ref:`Brokers <brokers>`
+        - :ref:`Applications <guide-app>`
+        - :ref:`Tasks <guide-tasks>`
+        - :ref:`Calling <guide-calling>`
+        - :ref:`Workers <guide-workers>`
+        - :ref:`Daemonizing <daemonizing>`
+        - :ref:`Monitoring <guide-monitoring>`
+        - :ref:`Optimizing <guide-optimizing>`
+        - :ref:`Security <guide-security>`
+        - :ref:`Routing <guide-routing>`
+        - :ref:`Configuration <configuration>`
+        - :ref:`Django <django>`
+        - :ref:`Contributing <contributing>`
+        - :ref:`Signals <signals>`
+        - :ref:`FAQ <faq>`
+        - :ref:`API Reference <apiref>`

+ 407 - 0
docs/getting-started/next-steps.rst

@@ -141,7 +141,414 @@ These options are described in more detailed in the :ref:`Workers Guide <guide-w
         $ celery --app=proj.celery:celery
         $ celery --app=proj.celery:celery
 
 
 
 
+.. _calling-tasks:
+
+Calling Tasks
+=============
+
+You can call a task using the :meth:`delay` method::
+
+    >>> add.delay(2, 2)
+
+This method is actually a star-argument shortcut to another method called
+:meth:`apply_async`::
+
+    >>> add.apply_async((2, 2))
+
+The latter enables you to specify execution options like the time to run
+(countdown), the queue it should be sent to and so on::
+
+    >>> add.apply_async((2, 2), queue='lopri', countdown=10)
+
+In the above example the task will be sent to a queue named ``lopri`` and the
+task will execute, at the earliest, 10 seconds after the message was sent.
+
+Applying the task directly will execute the task in the current process,
+so that no message is sent::
+
+    >>> add(2, 2)
+    4
+
+These three methods - :meth:`delay`, :meth:`apply_async`, and applying
+(``__call__``), represents the Celery calling API, which are also used for
+subtasks.
+
+A more detailed overview of the Calling API can be found in the
+:ref:`Calling User Guide <guide-calling>`.
+
+Every task invocation will be given a unique identifier (an UUID), this
+is the task id.
+
+The ``delay`` and ``apply_async`` methods return an :class:`~@AsyncResult`
+instance, which can be used to keep track of the tasks execution state.
+But for this you need to enable a :ref:`result backend <task-result-backend>` so that
+the state can be stored somewhere.
+
+Results are disabled by default because of the fact that there is no result
+backend that suits every application, so to choose one you need to consider
+the drawbacks of each individual backend.  For many tasks
+keeping the return value isn't even very useful, so it's a sensible default to
+have.  Also note that result backends are not used for monitoring tasks and workers,
+for that we use dedicated event messages (see :ref:`guide-monitoring`).
+
+If you have a result backend configured we can retrieve the return
+value of a task::
+
+    >>> res = add.delay(2, 2)
+    >>> res.get(timeout=1)
+    4
+
+You can find the task's id by looking at the :attr:`id` attribute::
+
+    >>> res.id
+    d6b3aea2-fb9b-4ebc-8da4-848818db9114
+
+We can also inspect the exception and traceback if the task raised an
+exception, in fact ``result.get()`` will propagate any errors by default::
+
+    >>> res = add.delay(2)
+    >>> res.get(timeout=1)
+    Traceback (most recent call last):
+    File "<stdin>", line 1, in <module>
+    File "/opt/devel/celery/celery/result.py", line 113, in get
+        interval=interval)
+    File "/opt/devel/celery/celery/backends/amqp.py", line 138, in wait_for
+        raise self.exception_to_python(meta['result'])
+    TypeError: add() takes exactly 2 arguments (1 given)
+
+If you don't wish for the errors to propagate then you can disable that
+by passing the ``propagate`` argument::
+
+    >>> res.get(propagate=False)
+    TypeError('add() takes exactly 2 arguments (1 given)',)
+
+In this case it will return the exception instance raised instead,
+and so to check whether the task succeeded or failed you will have to
+use the corresponding methods on the result instance::
+
+    >>> res.failed()
+    True
+
+    >>> res.successful()
+    False
+
+So how does it know if the task has failed or not?  It can find out by looking
+at the tasks *state*::
+
+    >>> res.state
+    'FAILURE'
+
+A task can only be in a single state, but it can progress through several
+states. The stages of a typical task can be::
+
+    PENDING -> STARTED -> SUCCESS
+
+The started state is a special state that is only recorded if the
+:setting:`CELERY_TRACK_STARTED` setting is enabled, or if the
+``@task(track_started=True)`` option is set for the task.
+
+The pending state is actually not a recorded state, but rather
+the default state for any task id that is unknown, which you can see
+from this example::
+
+    >>> from proj.celery import celery
+
+    >>> res = celery.AsyncResult('this-id-does-not-exist')
+    >>> res.state
+    'PENDING'
+
+If the task is retried the stages can become even more complex,
+e.g, for a task that is retried two times the stages would be::
+
+    PENDING -> STARTED -> RETRY -> STARTED -> RETRY -> STARTED -> SUCCESS
+
+To read more about task states you should see the :ref:`task-states` section
+in the tasks user guide.
+
 .. _designing-workflows:
 .. _designing-workflows:
 
 
 *Canvas*: Designing Workflows
 *Canvas*: Designing Workflows
 =============================
 =============================
+
+We just learned how to call a task using the tasks ``delay`` method,
+and this is often all you need, but sometimes you may want to pass the
+signature of a task invocation to another process or as an argument to another
+function, for this Celery uses something called *subtasks*.
+
+A subtask wraps the arguments and execution options of a single task
+invocation in a way such that it can be passed to functions or even serialized
+and sent across the wire.
+
+You can create a subtask for the ``add`` task using the arguments ``(2, 2)``,
+and a countdown of 10 seconds like this::
+
+    >>> add.subtask((2, 2), countdown=10)
+    tasks.add(2, 2)
+
+There is also a shortcut using star arguments::
+
+    >>> add.s(2, 2)
+    tasks.add(2, 2)
+
+and it also supports keyword arguments::
+
+    >>> add.s(2, 2, debug=True)
+    tasks.add(2, 2, debug=True)
+
+From any subtask instance we can inspect the different fields::
+
+    >>> s = add.subtask((2, 2), {'debug': True}, countdown=10)
+    >>> s.args
+    (2, 2)
+    >>> s.kwargs
+    {'debug': True}
+    >>> s.options
+    {'countdown': 10}
+
+And there's that calling API again...
+-------------------------------------
+
+Subtask instances also support the calling API, which means you can use
+``delay``, ``apply_async``, or *calling* it directly.
+
+But there is a difference in that the subtask may already have
+an argument signature specified.  The ``add`` task takes two arguments,
+so a subtask specifying two arguments would make a complete signature::
+
+    >>> s1 = add.s(2, 2)
+    >>> res = s2.delay()
+    >>> res.get()
+    4
+
+But, you can also make incomplete signatures to create what we call
+*partials*::
+
+    # incomplete partial:  add(?, 2)
+    >>> s2 = add.s(2)
+
+``s2`` is now a partial subtask that needs another argument to be complete,
+and this can actually be resolved when calling the subtask::
+
+    # resolves the partial: add(8, 2)
+    >>> res = s2.delay(8)
+    >>> res.get()
+    10
+
+Here we added the argument 8, which was prepended to the existing argument 2
+forming a complete signature of ``add(8, 2)``.
+
+Keyword arguments can also be added later, these are then merged with any
+existing keyword arguments, but with new arguments taking precedence::
+
+    >>> s3 = add.s(2, 2, debug=True)
+    >>> s3.delay(debug=False)   # debug is now False.
+
+As stated subtasks supports the calling API, and with the introduction
+of partial arguments, which means that:
+
+- ``subtask.apply_async(args=(), kwargs={}, **options)``
+
+    Calls the subtask with optional partial arguments and partial
+    keyword arguments.  Also supports partial execution options.
+
+- ``subtask.delay(*args, **kwargs)``
+
+  Star argument version of ``apply_async``.  Any arguments will be prepended
+  to the arguments in the signature, and keyword arguments is merged with any
+  existing keys.
+
+So this all seems very useful, but what can we actually do with these?
+To get to that we must introduce the canvas primitives...
+
+The Primitives
+--------------
+
+- ``group``
+
+    The group primitive is a subtask that takes a list of tasks that should
+    be applied in parallel.
+
+- ``chain``
+
+    The chain primitive lets us link together subtasks so that one is called
+    after the other, essentially forming a *chain* of callbacks.
+
+- ``chord``
+
+    A chord is just like a group but with a callback.  A group consists
+    of a header group and a body,  where the body is a task that should execute
+    after all of the tasks in the header is complete.
+
+- ``map``
+
+    The map primitive works like the built-in ``map`` function, but creates
+    a temporary task where a list of arguments is applied to the task.
+    E.g. ``task.map([1, 2])`` results in a single task
+    being called, appyling the arguments in order to the task function so
+    that the result is::
+
+        res = [task(1), task(2)]
+
+- ``starmap``
+
+    Works exactly like map except the arguments are applied as ``*args``.
+    For example ``add.starmap([(2, 2), (4, 4)])`` results in a single
+    task calling::
+
+        res = [add(2, 2), add(4, 4)]
+
+- ``chunks``
+
+    Chunking splits a long list of arguments into parts, e.g the operation::
+
+        >>> add.chunks(zip(xrange(1000), xrange(1000), 10))
+
+    will create 10 tasks that apply 100 items each.
+
+
+The primitives are also subtasks themselves, so that they can be combined
+in any number of ways to compose complex workflows.
+
+Here's some examples::
+
+- Simple chain
+
+    Here's a simple chain, the first task executes passing its return value
+    to the next task in the chain, and so on.
+
+    .. code-block:: python
+
+        # 2 + 2 + 4 + 8
+        >>> res = chain(add.s(2, 2), add.s(4), add.s(8))()
+        >>> res.get()
+        16
+
+    This can also be written using pipes::
+
+        >>> (add.s(2, 2) | add.s(4) | add.s(8))().get()
+        16
+
+- Immutable subtasks
+
+    As we have learned signatures can be partial, so that arguments can be
+    added to the existing arguments, but you may not always want that,
+    for example if you don't want the result of the previous task in a chain.
+
+    In that case you can mark the subtask as immutable, so that the arguments
+    cannot be changed::
+
+        >>> add.subtask((2, 2), immutable=True)
+
+    There's also an ``.si`` shortcut for this::
+
+        >>> add.si(2, 2)
+
+    Now we can create a chain of independent tasks instead::
+
+        >>> res = (add.si(2, 2), add.si(4, 4), add.s(8, 8))()
+        >>> res.get()
+        16
+
+        >>> res.parent.get()
+        8
+
+        >>> res.parent.parent.get()
+        4
+
+- Simple group
+
+    We can easily create a group of tasks to execute in parallel::
+
+        >>> res = group(add.s(i, i) for i in xrange(10))()
+        >>> res.get(timeout=1)
+        [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
+
+    - For primitives `.apply_async` is special...
+
+        as it will create a temporary task to apply the tasks in,
+        for example by *applying the group*::
+
+            >>> g = group(add.s(i, i) for i in xrange(10))
+            >>> g()  # << applying
+
+        the act of sending the messages for the tasks in the group
+        will happen in the current process,
+        but with ``.apply_async`` this happens in a temporary task
+        instead::
+
+            >>> g = group(add.s(i, i) for i in xrange(10))
+            >>> g.apply_async()
+
+        This is useful because we can e.g. specify a time for the
+        messages in the group to be called::
+
+            >>> g.apply_async(countdown=10)
+
+- Simple chord
+
+    The chord primitive enables us to add callback to be called when
+    all of the tasks in a group has finished executing, which is often
+    required for algorithms that aren't embarrassingly parallel::
+
+        >>> res = chord((add.s(i, i) for i in xrange(10)), xsum.s())()
+        >>> res.get()
+        90
+
+    The above example creates 10 task that all start in parallel,
+    and when all of them is complete the return values is combined
+    into a list and sent to the ``xsum`` task.
+
+    The body of a chord can also be immutable, so that the return value
+    of the group is not passed on to the callback::
+
+        >>> chord((import_contact.s(c) for c in contacts),
+        ...       notify_complete.si(import_id)).apply_async()
+
+    Note the use of ``.si`` above which creates an immutable subtask.
+
+- Blow your mind by combining
+
+    Chains can be partial too::
+
+        >>> c1 = (add.s(4) | mul.s(8))
+
+        # (16 + 4) * 8
+        >>> res = c1(16)
+        >>> res.get()
+        160
+
+    Which means that you can combine chains too::
+
+        # ((4 + 16) * 2 + 4) * 8
+        >>> c2 = (add.s(4, 16) | mul.s(2) | (add.s(4) | mul.s(8)))
+        >>> c2
+        tasks.add(16) | tasks.mul(2) | tasks.add(4) | tasks.mul(8)
+
+        >>> res = c2()
+        >>> res.get()
+        352
+
+    Chaining a group together with another task will automatically
+    upgrade it to be a chord::
+
+        >>> c3 = (group(add.s(i, i) for i in xrange(10) | xsum.s()))
+        >>> res = c3()
+        >>> res.get()
+        90
+
+    Groups and chords accepts partial arguments too, which in case
+    the return value of the previous task is sent to all tasks in the group::
+
+
+        >>> new_user_workflow = (create_user.s() | group(
+        ...                      import_contacts.s(),
+        ...                      send_welcome_email.s()))
+        ... new_user_workflow.delay(username='artv',
+        ...                         first='Art',
+        ...                         last='Vandelay',
+        ...                         email='art@vandelay.com')
+
+
+Be sure to read more about workflows in the :ref:`Canvas <guide-canvas>` user
+guide.
+

+ 10 - 3
docs/index.rst

@@ -2,9 +2,16 @@
  Celery - Distributed Task Queue
  Celery - Distributed Task Queue
 =================================
 =================================
 
 
-Celery aims to be a fast, reliable and flexible, best-of-breed solution
-to process vast amounts of messages in a distributed fashion, while
-providing operations with the tools to maintain such a system.
+Celery is a simple, flexible and reliable distributed system to
+process vast amounts of messages, while providing operations with
+the tools required to maintain such a system.
+
+It's a task queue with focus on real-time processing, while also
+supporting task scheduling.
+
+Celery has a large and diverse community of users and contributors,
+you should come join us :ref:`on IRC <irc-channel>`
+or :ref:`our mailing-list <mailing-list>`.
 
 
 Celery is Open Source and licensed under the `BSD License`_.
 Celery is Open Source and licensed under the `BSD License`_.
 
 

+ 2 - 0
docs/reference/index.rst

@@ -1,3 +1,5 @@
+.. _apiref:
+
 ===============
 ===============
  API Reference
  API Reference
 ===============
 ===============

+ 124 - 0
docs/userguide/workers.rst

@@ -391,6 +391,130 @@ You can also define your own rules for the autoscaler by subclassing
 Some ideas for metrics include load average or the amount of memory available.
 Some ideas for metrics include load average or the amount of memory available.
 You can specify a custom autoscaler with the :setting:`CELERYD_AUTOSCALER` setting.
 You can specify a custom autoscaler with the :setting:`CELERYD_AUTOSCALER` setting.
 
 
+.. _worker-queues:
+
+Queues
+======
+
+A worker instance can consume from any number of queues.
+By default it will consume from all queues defined in the
+:setting:`CELERY_QUEUES` setting (which if not specified defaults to the
+queue named ``celery``).
+
+You can specify what queues to consume from at startup,
+by giving a comma separated list of queues to the :option:`-Q` option::
+
+    $ celery worker -l info -Q foo,bar,baz
+
+If the queue name is defined in :setting:`CELERY_QUEUES` it will use that
+configuration, but if it's not defined in the list of queues Celery will
+automatically generate a new queue for you (depending on the
+:setting:`CELERY_CREATE_MISSING_QUEUES` option).
+
+You can also tell the worker to start and stop consuming from a queue at
+runtime using the remote control commands :control:`add_consumer` and
+:control:`cancel_consumer`.
+
+.. control:: add_consumer
+
+Queues: Adding consumers
+------------------------
+
+The :control:`add_consumer` control command will tell one or more workers
+to start consuming from a queue. This operation is idempotent.
+
+To tell all workers in the cluster to start consuming from a queue
+named "``foo``" you can use the :program:`celery control` program::
+
+    $ celery control add_consumer foo
+    -> worker1.local: OK
+        started consuming from u'foo'
+
+If you want to specify a specific worker you can use the
+:option:`--destination`` argument::
+
+    $ celery control add_consumer foo -d worker1.local
+
+The same can be accomplished dynamically using the :meth:`@control.add_consumer` method::
+
+    >>> myapp.control.add_consumer('foo', reply=True)
+    [{u'worker1.local': {u'ok': u"already consuming from u'foo'"}}]
+
+    >>> myapp.control.add_consumer('foo', reply=True,
+    ...                            destination=['worker1.local'])
+    [{u'worker1.local': {u'ok': u"already consuming from u'foo'"}}]
+
+
+By now we have only used automatic queues, which is only using a queue name.
+If you need more control you can also specify the exchange, routing_key and
+other options::
+
+    >>> myapp.control.add_consumer(
+    ...     queue='baz',
+    ...     exchange='ex',
+    ...     exchange_type='topic',
+    ...     routing_key='media.*',
+    ...     options={
+    ...         'queue_durable': False,
+    ...         'exchange_durable': False,
+    ...     },
+    ...     reply=True,
+    ...     destination=['worker1.local', 'worker2.local'])
+
+
+.. control:: cancel_consumer
+
+Queues: Cancelling consumers
+----------------------------
+
+You can cancel a consumer by queue name using the :control:`cancel_consumer`
+control command.
+
+To force all workers in the cluster to cancel consuming from a queue
+you can use the :program:`celery control` program::
+
+    $ celery control cancel_consumer foo
+
+The :option:`--destination` argument can be used to specify a worker, or a
+list of workers, to act on the command::
+
+    $ celery control cancel_consumer foo -d worker1.local
+
+
+You can also cancel consumers programmatically using the
+:meth:`@control.cancel_consumer` method::
+
+    >>> myapp.control.cancel_consumer('foo', reply=True)
+    [{u'worker1.local': {u'ok': u"no longer consuming from u'foo'"}}]
+
+.. control:: active_queues
+
+Queues: List of active queues
+-----------------------------
+
+You can get a list of queues that a worker consumes from by using
+the :control:`active_queues` control command::
+
+    $ celery inspect active_queues
+    [...]
+
+Like all other remote control commands this also supports the
+:option:`--destination` argument used to specify which workers should
+reply to the request::
+
+    $ celery inspect active_queues -d worker1.local
+    [...]
+
+
+This can also be done programmatically by using the
+:meth:`@control.inspect.active_queues` method::
+
+    >>> myapp.inspect().active_queues()
+    [...]
+
+    >>> myapp.inspect(['worker1.local']).active_queues()
+    [...]
+
 .. _worker-autoreloading:
 .. _worker-autoreloading:
 
 
 Autoreloading
 Autoreloading

+ 17 - 11
docs/whatsnew-2.6.rst

@@ -4,18 +4,21 @@
  What's new in Celery 2.6
  What's new in Celery 2.6
 ==========================
 ==========================
 
 
-Celery aims to be a flexible and reliable, best-of-breed solution
-to process vast amounts of messages in a distributed fashion, while
-providing operations with the tools to maintain such a system.
+Celery is a simple, flexible and reliable distributed system to
+process vast amounts of messages, while providing operations with
+the tools required to maintain such a system.
+
+It's a task queue with focus on real-time processing, while also
+supporting task scheduling.
 
 
 Celery has a large and diverse community of users and contributors,
 Celery has a large and diverse community of users and contributors,
 you should come join us :ref:`on IRC <irc-channel>`
 you should come join us :ref:`on IRC <irc-channel>`
 or :ref:`our mailing-list <mailing-list>`.
 or :ref:`our mailing-list <mailing-list>`.
 
 
-To read more about Celery you should visit our `website`_.
+To read more about Celery you should go read the :ref:`introduction <intro>`.
 
 
 While this version is backward compatible with previous versions
 While this version is backward compatible with previous versions
-it is important that you read the following section.
+it's important that you read the following section.
 
 
 If you use Celery in combination with Django you must also
 If you use Celery in combination with Django you must also
 read the `django-celery changelog`_ and upgrade to `django-celery 2.6`_.
 read the `django-celery changelog`_ and upgrade to `django-celery 2.6`_.
@@ -27,10 +30,11 @@ as well as PyPy and Jython.
 
 
     - A new and improved API, that is both simpler and more powerful.
     - A new and improved API, that is both simpler and more powerful.
 
 
-        Everyone should read the new :ref:`first-steps` tutorial,
+        Everyone must read the new :ref:`first-steps` tutorial,
         and the new :ref:`next-steps` tutorial.
         and the new :ref:`next-steps` tutorial.
 
 
-    - Documentation rewritten and updated to use the new API
+        There's no plans to deprecate the old API, so you don't have
+        to be in a hurry to port your applications to the new API.
 
 
     - The worker is now thread-less, giving great performance improvements.
     - The worker is now thread-less, giving great performance improvements.
 
 
@@ -38,6 +42,11 @@ as well as PyPy and Jython.
 
 
     - The new "Canvas" makes it easy to define complex workflows.
     - The new "Canvas" makes it easy to define complex workflows.
 
 
+        Ever wanted to chain tasks together? This is possible, but
+        not just that, now you can even chain together groups and chords,
+        or even combine multiple chains.
+
+        Read more in the :ref:`Canvas <guide-canvas>` user guide.
 
 
 .. _`website`: http://celeryproject.org/
 .. _`website`: http://celeryproject.org/
 .. _`django-celery changelog`: http://bit.ly/djcelery-26-changelog
 .. _`django-celery changelog`: http://bit.ly/djcelery-26-changelog
@@ -201,9 +210,6 @@ Tasks can now have callbacks and errbacks, and dependencies are recorded
 `group`/`chord`/`chain` are now subtasks
 `group`/`chord`/`chain` are now subtasks
 ----------------------------------------
 ----------------------------------------
 
 
-- The source code for these, including subtask, has been moved
-  to new module celery.canvas.
-
 - group is no longer an alias to TaskSet, but new alltogether,
 - group is no longer an alias to TaskSet, but new alltogether,
   since it was very difficult to migrate the TaskSet class to become
   since it was very difficult to migrate the TaskSet class to become
   a subtask.
   a subtask.
@@ -429,7 +435,7 @@ by default, it will first be bound (and configured) when
 a concrete subclass is created.
 a concrete subclass is created.
 
 
 This means that you can safely import and make task base classes,
 This means that you can safely import and make task base classes,
-without also initializing the default app environment::
+without also initializing the app environment::
 
 
     from celery.task import Task
     from celery.task import Task