Browse Source

Merge branch '3.0'

Conflicts:
	Changelog
	README.rst
	celery/__init__.py
	celery/app/amqp.py
	celery/app/utils.py
	celery/apps/worker.py
	celery/backends/base.py
	celery/bin/celery.py
	celery/datastructures.py
	celery/events/__init__.py
	celery/local.py
	celery/tests/utilities/test_info.py
	celery/tests/worker/test_control.py
	celery/utils/log.py
	celery/utils/threads.py
	celery/worker/consumer.py
	celery/worker/job.py
	docs/getting-started/brokers/index.rst
	docs/getting-started/brokers/ironmq.rst
	docs/getting-started/first-steps-with-celery.rst
	docs/includes/introduction.txt
	requirements/default.txt
	setup.cfg
Ask Solem 12 years ago
parent
commit
c58f15461f

+ 4 - 3
README.rst

@@ -8,7 +8,7 @@
 :Web: http://celeryproject.org/
 :Web: http://celeryproject.org/
 :Download: http://pypi.python.org/pypi/celery/
 :Download: http://pypi.python.org/pypi/celery/
 :Source: http://github.com/celery/celery/
 :Source: http://github.com/celery/celery/
-:Keywords: task queue, job queue, asynchronous, rabbitmq, amqp, redis,
+:Keywords: task queue, job queue, asynchronous, async, rabbitmq, amqp, redis,
   python, webhooks, queue, distributed
   python, webhooks, queue, distributed
 
 
 --
 --
@@ -127,7 +127,7 @@ It supports...
         - RabbitMQ_, Redis_,
         - RabbitMQ_, Redis_,
         - MongoDB_, Beanstalk_,
         - MongoDB_, Beanstalk_,
         - CouchDB_, SQLAlchemy_,
         - CouchDB_, SQLAlchemy_,
-        - Django ORM, Amazon SQS,
+        - Django ORM, Amazon SQS, `IronMQ`_
         - and more...
         - and more...
 
 
     - **Concurrency**
     - **Concurrency**
@@ -139,7 +139,7 @@ It supports...
         - AMQP, Redis
         - AMQP, Redis
         - memcached, MongoDB
         - memcached, MongoDB
         - SQLAlchemy, Django ORM
         - SQLAlchemy, Django ORM
-        - Apache Cassandra
+        - Apache Cassandra, IronCache
 
 
     - **Serialization**
     - **Serialization**
 
 
@@ -156,6 +156,7 @@ It supports...
 .. _Beanstalk: http://kr.github.com/beanstalkd
 .. _Beanstalk: http://kr.github.com/beanstalkd
 .. _CouchDB: http://couchdb.apache.org
 .. _CouchDB: http://couchdb.apache.org
 .. _SQLAlchemy: http://sqlalchemy.org
 .. _SQLAlchemy: http://sqlalchemy.org
+.. _`IronMQ`: http://iron.io
 
 
 Framework Integration
 Framework Integration
 =====================
 =====================

+ 1 - 1
celery/app/__init__.py

@@ -134,4 +134,4 @@ def shared_task(*args, **kwargs):
 
 
     if len(args) == 1 and isinstance(args[0], Callable):
     if len(args) == 1 and isinstance(args[0], Callable):
         return create_shared_task(**kwargs)(args[0])
         return create_shared_task(**kwargs)(args[0])
-    return create_shared_task(**kwargs)
+    return create_shared_task(*args, **kwargs)

+ 7 - 3
celery/app/amqp.py

@@ -26,7 +26,7 @@ from . import routes as _routes
 
 
 #: Human readable queue declaration.
 #: Human readable queue declaration.
 QUEUE_FORMAT = """
 QUEUE_FORMAT = """
-. {0.name:<16} exchange={0.exchange.name}({0.exchange.type}) \
+.> {0.name:<16} exchange={0.exchange.name}({0.exchange.type}) \
 key={0.routing_key}
 key={0.routing_key}
 """
 """
 
 
@@ -301,11 +301,15 @@ class TaskPublisher(TaskProducer):
 class TaskConsumer(Consumer):
 class TaskConsumer(Consumer):
     app = None
     app = None
 
 
-    def __init__(self, channel, queues=None, app=None, **kw):
+    def __init__(self, channel, queues=None, app=None, accept=None, **kw):
         self.app = app or self.app
         self.app = app or self.app
+        if accept is None:
+            accept = self.app.conf.CELERY_ACCEPT_CONTENT
         super(TaskConsumer, self).__init__(
         super(TaskConsumer, self).__init__(
             channel,
             channel,
-            queues or list(self.app.amqp.queues.consume_from.values()), **kw
+            queues or list(self.app.amqp.queues.consume_from.values()),
+            accept=accept,
+            **kw
         )
         )
 
 
 
 

+ 6 - 2
celery/app/base.py

@@ -177,6 +177,7 @@ class Celery(object):
             return proxies_to_curapp(*args, **opts)
             return proxies_to_curapp(*args, **opts)
 
 
         def inner_create_task_cls(shared=True, filter=None, **opts):
         def inner_create_task_cls(shared=True, filter=None, **opts):
+            _filt = filter  # stupid 2to3
 
 
             def _create_task_cls(fun):
             def _create_task_cls(fun):
                 if shared:
                 if shared:
@@ -192,14 +193,17 @@ class Celery(object):
                 # return a proxy object that is only evaluated when first used
                 # return a proxy object that is only evaluated when first used
                 promise = PromiseProxy(self._task_from_fun, (fun, ), opts)
                 promise = PromiseProxy(self._task_from_fun, (fun, ), opts)
                 self._pending.append(promise)
                 self._pending.append(promise)
-                if filter:
-                    return filter(promise)
+                if _filt:
+                    return _filt(promise)
                 return promise
                 return promise
 
 
             return _create_task_cls
             return _create_task_cls
 
 
         if len(args) == 1 and isinstance(args[0], Callable):
         if len(args) == 1 and isinstance(args[0], Callable):
             return inner_create_task_cls(**opts)(*args)
             return inner_create_task_cls(**opts)(*args)
+        if args:
+            raise TypeError(
+                'task() takes no arguments (%s given)' % (len(args, )))
         return inner_create_task_cls(**opts)
         return inner_create_task_cls(**opts)
 
 
     def _task_from_fun(self, fun, **options):
     def _task_from_fun(self, fun, **options):

+ 2 - 1
celery/app/control.py

@@ -97,7 +97,8 @@ class Control(object):
 
 
     def __init__(self, app=None):
     def __init__(self, app=None):
         self.app = app_or_default(app)
         self.app = app_or_default(app)
-        self.mailbox = self.Mailbox('celery', type='fanout')
+        self.mailbox = self.Mailbox('celery', type='fanout',
+                                    accept=self.app.conf.CELERY_ACCEPT_CONTENT)
 
 
     @cached_property
     @cached_property
     def inspect(self):
     def inspect(self):

+ 1 - 0
celery/app/defaults.py

@@ -90,6 +90,7 @@ NAMESPACES = {
         'WRITE_CONSISTENCY': Option(type='string'),
         'WRITE_CONSISTENCY': Option(type='string'),
     },
     },
     'CELERY': {
     'CELERY': {
+        'ACCEPT_CONTENT': Option(None, type='any'),
         'ACKS_LATE': Option(False, type='bool'),
         'ACKS_LATE': Option(False, type='bool'),
         'ALWAYS_EAGER': Option(False, type='bool'),
         'ALWAYS_EAGER': Option(False, type='bool'),
         'ANNOTATIONS': Option(type='any'),
         'ANNOTATIONS': Option(type='any'),

+ 10 - 3
celery/app/task.py

@@ -509,9 +509,16 @@ class Task(object):
 
 
         :param args: Positional arguments to retry with.
         :param args: Positional arguments to retry with.
         :param kwargs: Keyword arguments to retry with.
         :param kwargs: Keyword arguments to retry with.
-        :keyword exc: Optional exception to raise instead of
-                      :exc:`~celery.exceptions.MaxRetriesExceededError`
-                      when the max restart limit has been exceeded.
+        :keyword exc: Custom exception to report when the max restart
+            limit has been exceeded (default:
+            :exc:`~celery.exceptions.MaxRetriesExceededError`).
+
+            If this argument is set and retry is called while
+            an exception was raised (``sys.exc_info()`` is set)
+            it will attempt to reraise the current exception.
+
+            If no exception was raised it will raise the ``exc``
+            argument provided.
         :keyword countdown: Time in seconds to delay the retry for.
         :keyword countdown: Time in seconds to delay the retry for.
         :keyword eta: Explicit time and date to run the retry at
         :keyword eta: Explicit time and date to run the retry at
                       (must be a :class:`~datetime.datetime` instance).
                       (must be a :class:`~datetime.datetime` instance).

+ 15 - 0
celery/apps/worker.py

@@ -14,6 +14,7 @@ from __future__ import absolute_import, print_function
 
 
 import logging
 import logging
 import os
 import os
+import platform as _platform
 import sys
 import sys
 import warnings
 import warnings
 
 
@@ -66,6 +67,7 @@ ARTLINES = [
 BANNER = """\
 BANNER = """\
 {hostname} v{version}
 {hostname} v{version}
 
 
+<<<<<<< HEAD
 [Configuration]
 [Configuration]
 . broker:      {conninfo}
 . broker:      {conninfo}
 . app:         {app}
 . app:         {app}
@@ -74,6 +76,18 @@ BANNER = """\
 
 
 [Queues]
 [Queues]
 {queues}
 {queues}
+=======
+%(platform)s
+
+[config]
+.> broker:      %(conninfo)s
+.> app:         %(app)s
+.> concurrency: %(concurrency)s
+.> events:      %(events)s
+
+[queues]
+%(queues)s
+>>>>>>> 3.0
 """
 """
 
 
 EXTRA_INFO_FMT = """
 EXTRA_INFO_FMT = """
@@ -193,6 +207,7 @@ class Worker(WorkController):
             version=VERSION_BANNER,
             version=VERSION_BANNER,
             conninfo=self.app.connection().as_uri(),
             conninfo=self.app.connection().as_uri(),
             concurrency=concurrency,
             concurrency=concurrency,
+            platform=_platform.platform(),
             events=events,
             events=events,
             queues=app.amqp.queues.format(indent=0, indent_first=False),
             queues=app.amqp.queues.format(indent=0, indent_first=False),
         ).splitlines()
         ).splitlines()

+ 1 - 0
celery/backends/amqp.py

@@ -48,6 +48,7 @@ class AMQPBackend(BaseBackend):
 
 
     BacklogLimitExceeded = BacklogLimitExceeded
     BacklogLimitExceeded = BacklogLimitExceeded
 
 
+    supports_autoexpire = True
     supports_native_join = True
     supports_native_join = True
 
 
     retry_policy = {
     retry_policy = {

+ 5 - 0
celery/backends/base.py

@@ -59,6 +59,11 @@ class BaseBackend(object):
     #: If true the backend must implement :meth:`get_many`.
     #: If true the backend must implement :meth:`get_many`.
     supports_native_join = False
     supports_native_join = False
 
 
+    #: If true the backend must automatically expire results.
+    #: The daily backend_cleanup periodic task will not be triggered
+    #: in this case.
+    supports_autoexpire = False
+
     def __init__(self, app=None, serializer=None,
     def __init__(self, app=None, serializer=None,
                  max_cached_results=None, **kwargs):
                  max_cached_results=None, **kwargs):
         from celery.app import app_or_default
         from celery.app import app_or_default

+ 1 - 0
celery/backends/cache.py

@@ -81,6 +81,7 @@ backends = {'memcache': lambda: get_best_memcache,
 
 
 class CacheBackend(KeyValueStoreBackend):
 class CacheBackend(KeyValueStoreBackend):
     servers = None
     servers = None
+    supports_autoexpire = True
     supports_native_join = True
     supports_native_join = True
     implements_incr = True
     implements_incr = True
 
 

+ 1 - 0
celery/backends/cassandra.py

@@ -45,6 +45,7 @@ class CassandraBackend(BaseBackend):
     detailed_mode = False
     detailed_mode = False
     _retry_timeout = 300
     _retry_timeout = 300
     _retry_wait = 3
     _retry_wait = 3
+    supports_autoexpire = True
 
 
     def __init__(self, servers=None, keyspace=None, column_family=None,
     def __init__(self, servers=None, keyspace=None, column_family=None,
                  cassandra_options=None, detailed_mode=False, **kwargs):
                  cassandra_options=None, detailed_mode=False, **kwargs):

+ 2 - 0
celery/backends/mongodb.py

@@ -48,6 +48,8 @@ class MongoBackend(BaseBackend):
     mongodb_taskmeta_collection = 'celery_taskmeta'
     mongodb_taskmeta_collection = 'celery_taskmeta'
     mongodb_max_pool_size = 10
     mongodb_max_pool_size = 10
 
 
+    supports_autoexpire = False
+
     def __init__(self, *args, **kwargs):
     def __init__(self, *args, **kwargs):
         """Initialize MongoDB backend instance.
         """Initialize MongoDB backend instance.
 
 

+ 1 - 0
celery/backends/redis.py

@@ -48,6 +48,7 @@ class RedisBackend(KeyValueStoreBackend):
     #: Maximium number of connections in the pool.
     #: Maximium number of connections in the pool.
     max_connections = None
     max_connections = None
 
 
+    supports_autoexpire = True
     supports_native_join = True
     supports_native_join = True
     implements_incr = True
     implements_incr = True
 
 

+ 2 - 1
celery/beat.py

@@ -164,7 +164,8 @@ class Scheduler(object):
 
 
     def install_default_entries(self, data):
     def install_default_entries(self, data):
         entries = {}
         entries = {}
-        if self.app.conf.CELERY_TASK_RESULT_EXPIRES:
+        if self.app.conf.CELERY_TASK_RESULT_EXPIRES and \
+                not self.app.backend.supports_autoexpire:
             if 'celery.backend_cleanup' not in data:
             if 'celery.backend_cleanup' not in data:
                 entries['celery.backend_cleanup'] = {
                 entries['celery.backend_cleanup'] = {
                     'task': 'celery.backend_cleanup',
                     'task': 'celery.backend_cleanup',

+ 2 - 0
celery/bin/celery.py

@@ -312,6 +312,8 @@ class _RemoteControl(Command):
 
 
         ensure_broadcast_supported(self.app)
         ensure_broadcast_supported(self.app)
 
 
+        ensure_broadcast_supported(self.app)
+
         destination = kwargs.get('destination')
         destination = kwargs.get('destination')
         timeout = kwargs.get('timeout') or self.choices[method][0]
         timeout = kwargs.get('timeout') or self.choices[method][0]
         if destination and isinstance(destination, string_t):
         if destination and isinstance(destination, string_t):

+ 2 - 1
celery/events/__init__.py

@@ -278,7 +278,8 @@ class EventReceiver(ConsumerMixin):
 
 
     def get_consumers(self, Consumer, channel):
     def get_consumers(self, Consumer, channel):
         return [Consumer(queues=[self.queue],
         return [Consumer(queues=[self.queue],
-                         callbacks=[self._receive], no_ack=True)]
+                         callbacks=[self._receive], no_ack=True,
+                         accept=['application/json'])]
 
 
     def on_consume_ready(self, connection, channel, consumers,
     def on_consume_ready(self, connection, channel, consumers,
                          wakeup=True, **kwargs):
                          wakeup=True, **kwargs):

+ 44 - 3
celery/local.py

@@ -16,6 +16,47 @@ import importlib
 
 
 from .five import long_t, string, string_t
 from .five import long_t, string, string_t
 
 
+__module__ = __name__  # used by Proxy class body
+
+
+def _default_cls_attr(name, type_, cls_value):
+    # Proxy uses properties to forward the standard
+    # class attributes __module__, __name__ and __doc__ to the real
+    # object, but these needs to be a string when accessed from
+    # the Proxy class directly.  This is a hack to make that work.
+    # -- See Issue #1087.
+
+    def __new__(cls, getter):
+        instance = type_.__new__(cls, cls_value)
+        instance.__getter = getter
+        return instance
+
+    def __get__(self, obj, cls=None):
+        return self.__getter(obj) if obj is not None else self
+
+    def __set__(self, obj, value):
+        raise AttributeError('readonly attribute')
+
+    return type(name, (type_, ), {
+        '__new__': __new__, '__get__': __get__, '__set__': __set__,
+    })
+
+
+class _cls_spec(str):
+
+    def __new__(cls, getter):
+        s = str.__new__(cls, getter.__module__)
+        s.__getter = getter
+        return s
+
+    def __get__(self, obj, cls=None):
+        if obj is not None:
+            return self.__getter(obj)
+        return self
+
+    def __set__(self, obj, value):
+        raise AttributeError('cannot set attribute')
+
 
 
 def symbol_by_name(name, aliases={}, imp=None, package=None,
 def symbol_by_name(name, aliases={}, imp=None, package=None,
                    sep='.', default=None, **kwargs):
                    sep='.', default=None, **kwargs):
@@ -97,18 +138,18 @@ class Proxy(object):
         if name is not None:
         if name is not None:
             object.__setattr__(self, '__custom_name__', name)
             object.__setattr__(self, '__custom_name__', name)
 
 
-    @property
+    @_default_cls_attr('name', str, __name__)
     def __name__(self):
     def __name__(self):
         try:
         try:
             return self.__custom_name__
             return self.__custom_name__
         except AttributeError:
         except AttributeError:
             return self._get_current_object().__name__
             return self._get_current_object().__name__
 
 
-    @property
+    @_default_cls_attr('module', str, __module__)
     def __module__(self):
     def __module__(self):
         return self._get_current_object().__module__
         return self._get_current_object().__module__
 
 
-    @property
+    @_default_cls_attr('doc', str, __doc__)
     def __doc__(self):
     def __doc__(self):
         return self._get_current_object().__doc__
         return self._get_current_object().__doc__
 
 

+ 1 - 1
celery/task/trace.py

@@ -107,7 +107,7 @@ class TraceInfo(object):
             signals.task_failure.send(sender=task, task_id=req.id,
             signals.task_failure.send(sender=task, task_id=req.id,
                                       exception=exc, args=req.args,
                                       exception=exc, args=req.args,
                                       kwargs=req.kwargs,
                                       kwargs=req.kwargs,
-                                      traceback=einfo.tb,
+                                      traceback=tb,
                                       einfo=einfo)
                                       einfo=einfo)
             return einfo
             return einfo
         finally:
         finally:

+ 11 - 0
celery/tests/app/test_beat.py

@@ -6,6 +6,7 @@ from datetime import datetime, timedelta
 from mock import Mock, call, patch
 from mock import Mock, call, patch
 from nose import SkipTest
 from nose import SkipTest
 
 
+from celery import current_app
 from celery import beat
 from celery import beat
 from celery import task
 from celery import task
 from celery.five import keys, string_t
 from celery.five import keys, string_t
@@ -221,11 +222,21 @@ class test_Scheduler(Case):
             s = mScheduler()
             s = mScheduler()
             s.install_default_entries({})
             s.install_default_entries({})
             self.assertNotIn('celery.backend_cleanup', s.data)
             self.assertNotIn('celery.backend_cleanup', s.data)
+        current_app.backend.supports_autoexpire = False
         with patch_settings(CELERY_TASK_RESULT_EXPIRES=30,
         with patch_settings(CELERY_TASK_RESULT_EXPIRES=30,
                             CELERYBEAT_SCHEDULE={}):
                             CELERYBEAT_SCHEDULE={}):
             s = mScheduler()
             s = mScheduler()
             s.install_default_entries({})
             s.install_default_entries({})
             self.assertIn('celery.backend_cleanup', s.data)
             self.assertIn('celery.backend_cleanup', s.data)
+        current_app.backend.supports_autoexpire = True
+        try:
+            with patch_settings(CELERY_TASK_RESULT_EXPIRES=31,
+                                CELERYBEAT_SCHEDULE={}):
+                s = mScheduler()
+                s.install_default_entries({})
+                self.assertNotIn('celery.backend_cleanup', s.data)
+        finally:
+            current_app.backend.supports_autoexpire = False
 
 
     def test_due_tick(self):
     def test_due_tick(self):
         scheduler = mScheduler()
         scheduler = mScheduler()

+ 2 - 2
celery/tests/utilities/test_info.py

@@ -32,8 +32,8 @@ QUEUES = {
 }
 }
 
 
 
 
-QUEUE_FORMAT1 = '. queue1           exchange=exchange1(type1) key=bind1'
-QUEUE_FORMAT2 = '. queue2           exchange=exchange2(type2) key=bind2'
+QUEUE_FORMAT1 = '.> queue1           exchange=exchange1(type1) key=bind1'
+QUEUE_FORMAT2 = '.> queue2           exchange=exchange2(type2) key=bind2'
 
 
 
 
 class test_Info(Case):
 class test_Info(Case):

+ 5 - 0
celery/tests/utilities/test_local.py

@@ -17,6 +17,11 @@ class test_try_import(Case):
 
 
 class test_Proxy(Case):
 class test_Proxy(Case):
 
 
+    def test_std_class_attributes(self):
+        self.assertEqual(Proxy.__name__, 'Proxy')
+        self.assertEqual(Proxy.__module__, 'celery.local')
+        self.assertIsInstance(Proxy.__doc__, str)
+
     def test_name(self):
     def test_name(self):
 
 
         def real():
         def real():

+ 12 - 2
celery/utils/log.py

@@ -18,6 +18,8 @@ from contextlib import contextmanager
 from billiard import current_process, util as mputil
 from billiard import current_process, util as mputil
 from kombu.log import get_logger as _get_logger, LOG_LEVELS
 from kombu.log import get_logger as _get_logger, LOG_LEVELS
 
 
+from celery.five import string_t
+
 from .encoding import safe_str, str_t
 from .encoding import safe_str, str_t
 from .term import colored
 from .term import colored
 
 
@@ -95,11 +97,19 @@ class ColorFormatter(logging.Formatter):
         color = self.colors.get(levelname)
         color = self.colors.get(levelname)
 
 
         if self.use_color and color:
         if self.use_color and color:
+            msg = record.msg
             try:
             try:
-                record.msg = safe_str(str_t(color(record.msg)))
+                # safe_str will repr the color object
+                # and color will break on non-string objects
+                # so need to reorder calls based on type.
+                # Issue #427
+                if isinstance(msg, string_t):
+                    record.msg = str_t(color(safe_str(msg)))
+                else:
+                    record.msg = safe_str(color(msg))
             except Exception as exc:
             except Exception as exc:
                 record.msg = '<Unrepresentable {0!r}: {1!r}>'.format(
                 record.msg = '<Unrepresentable {0!r}: {1!r}>'.format(
-                    type(record.msg), exc)
+                    type(msg), exc)
                 record.exc_info = True
                 record.exc_info = True
 
 
         if not PY3 and 'processName' not in record.__dict__:
         if not PY3 and 'processName' not in record.__dict__:

+ 1 - 0
celery/utils/threads.py

@@ -20,6 +20,7 @@ from celery.local import Proxy
 from celery.five import THREAD_TIMEOUT_MAX, items
 from celery.five import THREAD_TIMEOUT_MAX, items
 
 
 USE_FAST_LOCALS = os.environ.get('USE_FAST_LOCALS')
 USE_FAST_LOCALS = os.environ.get('USE_FAST_LOCALS')
+PY3 = sys.version_info[0] == 3
 
 
 
 
 class bgThread(threading.Thread):
 class bgThread(threading.Thread):

+ 1 - 1
celery/worker/consumer.py

@@ -239,7 +239,7 @@ class Consumer(object):
         """
         """
         crit("Can't decode message body: %r (type:%r encoding:%r raw:%r')",
         crit("Can't decode message body: %r (type:%r encoding:%r raw:%r')",
              exc, message.content_type, message.content_encoding,
              exc, message.content_type, message.content_encoding,
-             dump_body(message, message.body))
+             dump_body(message, message.body), exc_info=1)
         message.ack()
         message.ack()
 
 
     def on_close(self):
     def on_close(self):

+ 1 - 1
celery/worker/control.py

@@ -159,7 +159,7 @@ def dump_schedule(panel, safe=False, **kwargs):
 
 
 @Panel.register
 @Panel.register
 def dump_reserved(panel, safe=False, **kwargs):
 def dump_reserved(panel, safe=False, **kwargs):
-    reserved = state.reserved_requests
+    reserved = state.reserved_requests - state.active_requests
     if not reserved:
     if not reserved:
         logger.debug('--Empty queue--')
         logger.debug('--Empty queue--')
         return []
         return []

+ 4 - 3
celery/worker/job.py

@@ -73,9 +73,10 @@ class Request(object):
         __slots__ = (
         __slots__ = (
             'app', 'name', 'id', 'args', 'kwargs', 'on_ack', 'delivery_info',
             'app', 'name', 'id', 'args', 'kwargs', 'on_ack', 'delivery_info',
             'hostname', 'eventer', 'connection_errors', 'task', 'eta',
             'hostname', 'eventer', 'connection_errors', 'task', 'eta',
-            'expires', 'request_dict', 'acknowledged', 'utc', 'time_start',
-            'worker_pid', '_already_revoked', '_terminate_on_ack', '_tzlocal',
-            '__weakref__',
+            'expires', 'request_dict', 'acknowledged',
+            'utc', 'time_start', 'worker_pid', '_already_revoked',
+            '_terminate_on_ack',
+            '_tzlocal', '__weakref__',
         )
         )
 
 
     #: Format string used to log task success.
     #: Format string used to log task success.

+ 5 - 5
docs/conf.py

@@ -84,11 +84,11 @@ exclude_trees = ['.build']
 add_function_parentheses = True
 add_function_parentheses = True
 
 
 intersphinx_mapping = {
 intersphinx_mapping = {
-        "http://docs.python.org/dev": None,
-        "http://kombu.readthedocs.org/en/latest/": None,
-        "http://django-celery.readthedocs.org/en/latest": None,
-        "http://cyme.readthedocs.org/en/latest": None,
-        "http://amqp.readthedocs.org/en/latest": None,
+    'python': ('http://docs.python.org/dev', None),
+    'kombu': ('http://kombu.readthedocs.org/en/latest/', None),
+    'djcelery': ('http://django-celery.readthedocs.org/en/latest', None),
+    'cyme': ('http://cyme.readthedocs.org/en/latest', None),
+    'amqp': ('http://amqp.readthedocs.org/en/latest', None),
 }
 }
 
 
 # The name of the Pygments (syntax highlighting) style to use.
 # The name of the Pygments (syntax highlighting) style to use.

+ 25 - 1
docs/configuration.rst

@@ -213,6 +213,7 @@ Can be one of the following:
 .. _`MongoDB`: http://mongodb.org
 .. _`MongoDB`: http://mongodb.org
 .. _`Redis`: http://redis.io
 .. _`Redis`: http://redis.io
 .. _`Cassandra`: http://cassandra.apache.org/
 .. _`Cassandra`: http://cassandra.apache.org/
+.. _`IronCache`: http://www.iron.io/cache
 
 
 .. setting:: CELERY_RESULT_SERIALIZER
 .. setting:: CELERY_RESULT_SERIALIZER
 
 
@@ -775,6 +776,28 @@ persistent messages.
 Broker Settings
 Broker Settings
 ---------------
 ---------------
 
 
+.. setting:: CELERY_ACCEPT_CONTENT
+
+CELERY_ACCEPT_CONTENT
+~~~~~~~~~~~~~~~~~~~~~
+
+A whitelist of content-types/serializers to allow.
+
+If a message is received that is not in this list then
+the message will be discarded with an error.
+
+By default any content type is enabled (including pickle and yaml)
+so make sure untrusted parties do not have access to your broker.
+See :ref:`guide-security` for more.
+
+Example::
+
+    # using serializer name
+    CELERY_ACCEPT_CONTENT = ['json']
+
+    # or the actual content-type (MIME)
+    CELERY_ACCEPT_CONTENT = ['application/json']
+
 .. setting:: BROKER_TRANSPORT
 .. setting:: BROKER_TRANSPORT
 
 
 BROKER_TRANSPORT
 BROKER_TRANSPORT
@@ -801,7 +824,8 @@ default is ``amqp``, which uses ``librabbitmq`` by default or falls back to
 ``couchdb``.
 ``couchdb``.
 It can also be a fully qualified path to your own transport implementation.
 It can also be a fully qualified path to your own transport implementation.
 
 
-See the Kombu documentation for more information about broker URLs.
+See :ref:`kombu:connection-urls` in the Kombu documentation for more
+information.
 
 
 .. setting:: BROKER_HEARTBEAT
 .. setting:: BROKER_HEARTBEAT
 
 

+ 49 - 0
docs/getting-started/brokers/index.rst

@@ -9,6 +9,11 @@
 
 
 Celery supports several message transport alternatives.
 Celery supports several message transport alternatives.
 
 
+.. _broker_toc:
+
+Broker Instructions
+===================
+
 .. toctree::
 .. toctree::
     :maxdepth: 1
     :maxdepth: 1
 
 
@@ -21,3 +26,47 @@ Celery supports several message transport alternatives.
     couchdb
     couchdb
     beanstalk
     beanstalk
     ironmq
     ironmq
+
+.. _broker-overview:
+
+Broker Overview
+===============
+
+This is comparison table of the different transports supports,
+more information can be found in the documentation for each
+individual transport (see :ref:`broker_toc`).
+
++---------------+--------------+----------------+--------------------+
+| **Name**      | **Status**   | **Monitoring** | **Remote Control** |
++---------------+--------------+----------------+--------------------+
+| *RabbitMQ*    | Stable       | Yes            | Yes                |
++---------------+--------------+----------------+--------------------+
+| *Redis*       | Stable       | Yes            | Yes                |
++---------------+--------------+----------------+--------------------+
+| *Mongo DB*    | Experimental | Yes            | Yes                |
++---------------+--------------+----------------+--------------------+
+| *Beanstalk*   | Experimental | No             | No                 |
++---------------+--------------+----------------+--------------------+
+| *Amazon SQS*  | Experimental | No             | No                 |
++---------------+--------------+----------------+--------------------+
+| *Couch DB*    | Experimental | No             | No                 |
++---------------+--------------+----------------+--------------------+
+| *Zookeeper*   | Experimental | No             | No                 |
++---------------+--------------+----------------+--------------------+
+| *Django DB*   | Experimental | No             | No                 |
++---------------+--------------+----------------+--------------------+
+| *SQLAlchemy*  | Experimental | No             | No                 |
++---------------+--------------+----------------+--------------------+
+| *Iron MQ*     | 3rd party    | No             | No                 |
++---------------+--------------+----------------+--------------------+
+
+Experimental brokers may be functional but they do not have
+dedicated maintainers.
+
+Missing monitor support means that the transport does not
+implement events, and as such Flower, `celery events`, `celerymon`
+and other event-based monitoring tools will not work.
+
+Remote control means the ability to inspect and manage workers
+at runtime using the `celery inspect` and `celery control` commands
+(and other tools using the remote control API).

+ 3 - 3
docs/getting-started/brokers/ironmq.rst

@@ -46,14 +46,14 @@ And in this case the broker url may only be::
     ironmq://
     ironmq://
 
 
 Clouds
 Clouds
------
+------
 
 
 The default cloud/region is ``AWS us-east-1``. You can choose the IronMQ Rackspace cloud by changing the URL to::
 The default cloud/region is ``AWS us-east-1``. You can choose the IronMQ Rackspace cloud by changing the URL to::
 
 
     ironmq://project_id:token@mq-rackspace-dfw.iron.io
     ironmq://project_id:token@mq-rackspace-dfw.iron.io
 
 
 Results
 Results
-======
+=======
 
 
 You can store results in IronCache with the same Iron.io credentials, just set the results URL with the same syntax
 You can store results in IronCache with the same Iron.io credentials, just set the results URL with the same syntax
 as the broker URL, but changing the start to ``ironcache``::
 as the broker URL, but changing the start to ``ironcache``::
@@ -65,6 +65,6 @@ This will default to a cache named "Celery", if you want to change that::
     ironcache:://project_id:token@/awesomecache
     ironcache:://project_id:token@/awesomecache
 
 
 More Information
 More Information
-===============
+================
 
 
 You can find more information in the [iron_celery README](http://github.com/iron-io/iron_celery).
 You can find more information in the [iron_celery README](http://github.com/iron-io/iron_celery).

+ 4 - 9
docs/getting-started/first-steps-with-celery.rst

@@ -97,16 +97,11 @@ robust system in production.
 Other brokers
 Other brokers
 -------------
 -------------
 
 
-In addition to the above, there are other transport implementations
-to choose from, including
+In addition to the above, there are other experimental transport implementations
+to choose from, including :ref:`Amazon SQS <broker-sqs>`, :ref:`broker-mongodb`
+and :ref:`IronMQ <broker-ironmq>`.
 
 
-* :ref:`Amazon SQS <broker-sqs>`
-* :ref:`broker-mongodb`
-* :ref:`IronMQ <broker-ironmq>`
-
-See also `Transport Comparison`_.
-
-.. _`Transport Comparison`: http://kombu.readthedocs.org/en/latest/introduction.html#transport-comparison
+See :ref:`broker-overview` for a full list.
 
 
 .. _celerytut-installation:
 .. _celerytut-installation:
 
 

+ 100 - 2
docs/history/changelog-3.0.rst

@@ -13,14 +13,73 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 
 
 3.0.18
 3.0.18
 ======
 ======
-:release-date: 2013-04-09 04:00:00 P.M BST
+:release-date: 2013-04-12 05:00:00 P.M BST
+
+- Now depends on :mod:`kombu` 2.5.10.
+
+    See the :ref:`kombu changelog <kombu:version-2.5.10>`.
 
 
 - Now depends on :mod:`billiard` 2.7.3.26.
 - Now depends on :mod:`billiard` 2.7.3.26.
 
 
-- Now depends on :mod:`kombu` 2.5.9.
+- Can now specify a whitelist of accepted serializers using
+  the new :setting:`CELERY_ACCEPT_CONTENT` setting.
+
+    This means that you can force the worker to discard messages
+    serialized with pickle and other untrusted serializers.
+    For example to only allow JSON serialized messages use::
+
+        CELERY_ACCEPT_CONTENT = ['json']
+
+    you can also specify MIME types in the whitelist::
+
+        CELERY_ACCEPT_CONTENT = ['application/json']
+
+- Fixed deadlock in multiprocessing's pool caused by the
+  semaphore not being released when terminated by signal.
+
+- Processes Pool: It's now possible to debug pool processes using GDB.
+
+- ``celery report`` now censors possibly secret settings, like passwords
+  and secret tokens.
+
+    You should still check the output before pasting anything
+    on the internet.
+
+- Connection URLs now ignore multiple '+' tokens.
 
 
 - Worker/statedb: Now uses pickle protocol 2 (Py2.5+)
 - Worker/statedb: Now uses pickle protocol 2 (Py2.5+)
 
 
+- Fixed Python 3 compatibility issues.
+
+- Worker:  A warning is now given if a worker is started with the
+  same node name as an existing worker.
+
+- Worker: Fixed a deadlock that could occur while revoking tasks (Issue #1297).
+
+- Worker: The :sig:`HUP` handler now closes all open file descriptors
+  before restarting to ensure file descriptors does not leak (Issue #1270).
+
+- Worker: Optimized storing/loading the revoked tasks list (Issue #1289).
+
+    After this change the ``--statedb`` file will take up more disk space,
+    but loading from and storing the revoked tasks will be considerably
+    faster (what before took 5 minutes will now take less than a second).
+
+- Celery will now suggest alternatives if there's a typo in the
+  broker transport name (e.g. ``ampq`` -> ``amqp``).
+
+- Worker: The auto-reloader would cause a crash if a monitored file
+  was unlinked.
+
+    Fix contributed by Agris Ameriks.
+
+- Fixed AsyncResult pickling error.
+
+    Fix contributed by Thomas Minor.
+
+- Fixed handling of Unicode in logging output when using log colors
+  (Issue #427).
+
 - :class:`~celery.app.utils.ConfigurationView` is now a ``MutableMapping``.
 - :class:`~celery.app.utils.ConfigurationView` is now a ``MutableMapping``.
 
 
     Contributed by Aaron Harnly.
     Contributed by Aaron Harnly.
@@ -33,9 +92,48 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 
 
     Fix contributed by Theo Spears.
     Fix contributed by Theo Spears.
 
 
+- The `inspect reserved` remote control command included active (started) tasks
+  with the reserved tasks (Issue #1030).
+
+- The :signal:`task_failure` signal received a modified traceback object
+  meant for pickling purposes, this has been fixed so that it now
+  receives the real traceback instead.
+
+- The ``@task`` decorator silently ignored positional arguments,
+  it now raises the expected :exc:`TypeError` instead (Issue #1125).
+
+- The worker will now properly handle messages with invalid
+  eta/expires fields (Issue #1232).
+
+- The ``pool_restart`` remote control command now reports
+  an error if the :setting:`CELERYD_POOL_RESTARTS` setting is not set.
+
+- ``celery.conf.add_defaults`` can now be used with non-dict objects.
+
+- Fixed compatibility problems in the Proxy class (Issue #1087).
+
+    The class attributes ``__module__``, ``__name__`` and ``__doc__``
+    are now meaningful string objects.
+
+    Thanks to Marius Gedminas.
+
+- MongoDB Backend: The :setting:`MONGODB_BACKEND_SETTINGS` setting
+  now accepts a ``option`` key that lets you forward arbitrary kwargs
+  to the underlying ``pymongo.Connection` object (Issue #1015).
+
+- Beat: The daily backend cleanup task is no longer enabled
+  for result backends that support automatic result expiration (Issue #1031).
+
 - Canvas list operations now takes application instance from the first
 - Canvas list operations now takes application instance from the first
   task in the list, instead of depending on the ``current_app`` (Issue #1249).
   task in the list, instead of depending on the ``current_app`` (Issue #1249).
 
 
+- Worker: Message decoding error log message now includes traceback
+  information.
+
+- Worker: The startup banner now includes system platform.
+
+- ``celery inspect|status|control`` now gives an error if used
+  with an SQL based broker transport.
 
 
 .. _version-3.0.17:
 .. _version-3.0.17:
 
 

+ 4 - 3
docs/includes/introduction.txt

@@ -2,7 +2,7 @@
 :Web: http://celeryproject.org/
 :Web: http://celeryproject.org/
 :Download: http://pypi.python.org/pypi/celery/
 :Download: http://pypi.python.org/pypi/celery/
 :Source: http://github.com/celery/celery/
 :Source: http://github.com/celery/celery/
-:Keywords: task queue, job queue, asynchronous, rabbitmq, amqp, redis,
+:Keywords: task queue, job queue, asynchronous, async, rabbitmq, amqp, redis,
   python, webhooks, queue, distributed
   python, webhooks, queue, distributed
 
 
 --
 --
@@ -121,7 +121,7 @@ It supports...
         - RabbitMQ_, Redis_,
         - RabbitMQ_, Redis_,
         - MongoDB_, Beanstalk_,
         - MongoDB_, Beanstalk_,
         - CouchDB_, SQLAlchemy_,
         - CouchDB_, SQLAlchemy_,
-        - Django ORM, Amazon SQS,
+        - Django ORM, Amazon SQS, `IronMQ`_
         - and more...
         - and more...
 
 
     - **Concurrency**
     - **Concurrency**
@@ -133,7 +133,7 @@ It supports...
         - AMQP, Redis
         - AMQP, Redis
         - memcached, MongoDB
         - memcached, MongoDB
         - SQLAlchemy, Django ORM
         - SQLAlchemy, Django ORM
-        - Apache Cassandra
+        - Apache Cassandra, IronCache
 
 
     - **Serialization**
     - **Serialization**
 
 
@@ -150,6 +150,7 @@ It supports...
 .. _Beanstalk: http://kr.github.com/beanstalkd
 .. _Beanstalk: http://kr.github.com/beanstalkd
 .. _CouchDB: http://couchdb.apache.org
 .. _CouchDB: http://couchdb.apache.org
 .. _SQLAlchemy: http://sqlalchemy.org
 .. _SQLAlchemy: http://sqlalchemy.org
+.. _`IronMQ`: http://iron.io
 
 
 Framework Integration
 Framework Integration
 =====================
 =====================

+ 1 - 4
docs/userguide/calling.rst

@@ -318,10 +318,7 @@ or for each individual task, or even per message.
 
 
 There's built-in support for :mod:`pickle`, `JSON`, `YAML`
 There's built-in support for :mod:`pickle`, `JSON`, `YAML`
 and `msgpack`, and you can also add your own custom serializers by registering
 and `msgpack`, and you can also add your own custom serializers by registering
-them into the Kombu serializer registry (see `Kombu: Serialization of Data`_).
-
-.. _`Kombu: Serialization of Data`:
-    http://packages.python.org/kombu/introduction.html#serialization-of-data
+them into the Kombu serializer registry (see ref:`kombu:guide-serialization`).
 
 
 Each option has its advantages and disadvantages.
 Each option has its advantages and disadvantages.
 
 

+ 2 - 5
docs/userguide/routing.rst

@@ -7,11 +7,8 @@
 .. note::
 .. note::
 
 
     Alternate routing concepts like topic and fanout may not be
     Alternate routing concepts like topic and fanout may not be
-    available for all transports, please consult the `transport comparison table`_.
-
-.. _`transport comparison table`:
-    http://kombu.readthedocs.org/en/latest/introduction.html#transport-comparison
-
+    available for all transports, please consult the
+    :ref:`transport comparison table <kombu:transport-comparison>`.
 
 
 .. contents::
 .. contents::
     :local:
     :local:

+ 17 - 1
docs/userguide/security.rst

@@ -100,7 +100,23 @@ unauthenticated.
 
 
 .. [*] http://nadiana.com/python-pickle-insecure
 .. [*] http://nadiana.com/python-pickle-insecure
 
 
-Celery comes with a special `auth` serializer that validates
+You can disable untrusted content by specifying
+a whitelist of accepted content-types in the :setting:`CELERY_ACCEPT_CONTENT`
+setting:
+
+.. code-block:: python
+
+    CELERY_ACCEPT_CONTENT = ['json']
+
+
+This accepts a list of serializer names and content-types, so you could
+also specify the content type for json:
+
+.. code-block:: python
+
+    CELERY_ACCEPT_CONTENT = ['application/json']
+
+Celery also comes with a special `auth` serializer that validates
 communication between Celery clients and workers, making sure
 communication between Celery clients and workers, making sure
 that messages originates from trusted sources.
 that messages originates from trusted sources.
 Using `Public-key cryptography` the `auth` serializer can verify the
 Using `Public-key cryptography` the `auth` serializer can verify the

+ 25 - 4
docs/userguide/tasks.rst

@@ -324,10 +324,6 @@ Here's an example using ``retry``:
         except (Twitter.FailWhaleError, Twitter.LoginError) as exc:
         except (Twitter.FailWhaleError, Twitter.LoginError) as exc:
             raise send_twitter_status.retry(exc=exc)
             raise send_twitter_status.retry(exc=exc)
 
 
-Here the `exc` argument was used to pass the current exception to
-:meth:`~@Task.retry`.  Both the exception and the traceback will
-be available in the task state (if a result backend is enabled).
-
 .. note::
 .. note::
 
 
     The :meth:`~@Task.retry` call will raise an exception so any code after the retry
     The :meth:`~@Task.retry` call will raise an exception so any code after the retry
@@ -339,6 +335,31 @@ be available in the task state (if a result backend is enabled).
     This is normal operation and always happens unless the
     This is normal operation and always happens unless the
     ``throw`` argument to retry is set to :const:`False`.
     ``throw`` argument to retry is set to :const:`False`.
 
 
+The ``exc`` method is used to pass exception information that is
+used in logs, and when storing task results.
+Both the exception and the traceback will
+be available in the task state (if a result backend is enabled).
+
+If the task has a ``max_retries`` value the current exception
+will be re-raised if the max number of retries has been exceeded,
+but this will not happen if:
+
+- An ``exc`` argument was not given.
+
+    In this case the :exc:`celery.exceptions.MaxRetriesExceeded`
+    exception will be raised.
+
+- There is no current exception
+
+    If there's no original exception to re-raise the ``exc``
+    argument will be used instead, so:
+
+    .. code-block:: python
+
+        send_twitter_status.retry(exc=Twitter.LoginError())
+
+    will raise the ``exc`` argument given.
+
 .. _task-retry-custom-delay:
 .. _task-retry-custom-delay:
 
 
 Using a custom retry delay
 Using a custom retry delay

+ 1 - 2
docs/whatsnew-2.5.rst

@@ -18,14 +18,13 @@ While this version is backward compatible with previous versions
 it is important that you read the following section.
 it is important that you read the following section.
 
 
 If you use Celery in combination with Django you must also
 If you use Celery in combination with Django you must also
-read the `django-celery changelog`_ and upgrade to `django-celery 2.5`_.
+read the `django-celery changelog <djcelery:version-2.5.0>` and upgrade to `django-celery 2.5`_.
 
 
 This version is officially supported on CPython 2.5, 2.6, 2.7, 3.2 and 3.3,
 This version is officially supported on CPython 2.5, 2.6, 2.7, 3.2 and 3.3,
 as well as PyPy and Jython.
 as well as PyPy and Jython.
 
 
 
 
 .. _`website`: http://celeryproject.org/
 .. _`website`: http://celeryproject.org/
-.. _`django-celery changelog`: http://bit.ly/djcelery-25-changelog
 .. _`django-celery 2.5`: http://pypi.python.org/pypi/django-celery/
 .. _`django-celery 2.5`: http://pypi.python.org/pypi/django-celery/
 
 
 .. contents::
 .. contents::

+ 1 - 1
requirements/default.txt

@@ -1,3 +1,3 @@
 pytz
 pytz
 billiard>=2.7.3.26
 billiard>=2.7.3.26
-kombu>=2.5.9
+kombu>=2.5.10

+ 1 - 1
setup.cfg

@@ -16,4 +16,4 @@ upload-dir = docs/.build/html
 [bdist_rpm]
 [bdist_rpm]
 requires = pytz
 requires = pytz
            billiard >= 2.7.3.26
            billiard >= 2.7.3.26
-           kombu >= 2.5.9
+           kombu >= 2.5.10