Prechádzať zdrojové kódy

Merge branch 'canvas_refactor'

Ask Solem 11 rokov pred
rodič
commit
eb7dab17dd
52 zmenil súbory, kde vykonal 1423 pridanie a 1462 odobranie
  1. 1 3
      celery/_state.py
  2. 11 4
      celery/app/amqp.py
  3. 8 16
      celery/app/base.py
  4. 14 181
      celery/app/builtins.py
  5. 14 27
      celery/app/task.py
  6. 174 32
      celery/app/trace.py
  7. 0 1
      celery/app/utils.py
  8. 4 4
      celery/backends/base.py
  9. 209 56
      celery/canvas.py
  10. 0 9
      celery/concurrency/asynpool.py
  11. 3 1
      celery/concurrency/base.py
  12. 1 0
      celery/concurrency/solo.py
  13. 1 1
      celery/contrib/batches.py
  14. 2 4
      celery/events/state.py
  15. 9 8
      celery/five.py
  16. 1 3
      celery/task/base.py
  17. 1 2
      celery/task/http.py
  18. 0 12
      celery/task/trace.py
  19. 2 2
      celery/tests/app/test_app.py
  20. 10 12
      celery/tests/app/test_builtins.py
  21. 28 14
      celery/tests/case.py
  22. 0 23
      celery/tests/compat_modules/test_compat.py
  23. 0 4
      celery/tests/compat_modules/test_compat_utils.py
  24. 0 1
      celery/tests/compat_modules/test_decorators.py
  25. 4 4
      celery/tests/tasks/test_chord.py
  26. 0 4
      celery/tests/tasks/test_tasks.py
  27. 7 4
      celery/tests/tasks/test_trace.py
  28. 7 22
      celery/tests/worker/test_control.py
  29. 21 22
      celery/tests/worker/test_loops.py
  30. 156 260
      celery/tests/worker/test_request.py
  31. 8 9
      celery/tests/worker/test_strategy.py
  32. 113 56
      celery/tests/worker/test_worker.py
  33. 0 9
      celery/utils/__init__.py
  34. 3 1
      celery/utils/objects.py
  35. 1 1
      celery/worker/autoscale.py
  36. 35 23
      celery/worker/consumer.py
  37. 4 2
      celery/worker/control.py
  38. 0 590
      celery/worker/job.py
  39. 2 2
      celery/worker/loops.py
  40. 494 0
      celery/worker/request.py
  41. 2 2
      celery/worker/state.py
  42. 45 12
      celery/worker/strategy.py
  43. 1 1
      docs/internals/app-overview.rst
  44. 11 7
      docs/internals/protov2.rst
  45. 1 1
      docs/internals/worker.rst
  46. 3 3
      docs/reference/celery.worker.request.rst
  47. 1 1
      docs/reference/index.rst
  48. 1 1
      docs/userguide/extending.rst
  49. 1 1
      docs/userguide/signals.rst
  50. 4 3
      docs/whatsnew-3.1.rst
  51. 0 1
      extra/release/doc4allmods
  52. 5 0
      funtests/stress/stress/templates.py

+ 1 - 3
celery/_state.py

@@ -77,10 +77,8 @@ def _get_current_app():
         #: creates the global fallback app instance.
         from celery.app import Celery
         set_default_app(Celery(
-            'default',
+            'default', fixups=[], set_as_current=False,
             loader=os.environ.get('CELERY_LOADER') or 'default',
-            fixups=[],
-            set_as_current=False, accept_magic_kwargs=True,
         ))
     return _tls.current_app or default_app
 

+ 11 - 4
celery/app/amqp.py

@@ -269,7 +269,8 @@ class AMQP(object):
                    expires=None, retries=0, chord=None,
                    callbacks=None, errbacks=None, reply_to=None,
                    time_limit=None, soft_time_limit=None,
-                   create_sent_event=False, now=None, timezone=None):
+                   create_sent_event=False, now=None, timezone=None,
+                   root_id=None, parent_id=None):
         args = args or ()
         kwargs = kwargs or {}
         utc = self.utc
@@ -295,7 +296,8 @@ class AMQP(object):
         return task_message(
             headers={
                 'lang': 'py',
-                'c_type': name,
+                'task': name,
+                'id': task_id,
                 'eta': eta,
                 'expires': expires,
                 'callbacks': callbacks,
@@ -304,7 +306,9 @@ class AMQP(object):
                 'group': group_id,
                 'chord': chord,
                 'retries': retries,
-                'timelimit': (time_limit, soft_time_limit),
+                'timelimit': [time_limit, soft_time_limit],
+                'root_id': root_id,
+                'parent_id': parent_id,
             },
             properties={
                 'correlation_id': task_id,
@@ -313,6 +317,8 @@ class AMQP(object):
             body=(args, kwargs),
             sent_event={
                 'uuid': task_id,
+                'root': root_id,
+                'parent': parent_id,
                 'name': name,
                 'args': safe_repr(args),
                 'kwargs': safe_repr(kwargs),
@@ -327,7 +333,8 @@ class AMQP(object):
                    expires=None, retries=0,
                    chord=None, callbacks=None, errbacks=None, reply_to=None,
                    time_limit=None, soft_time_limit=None,
-                   create_sent_event=False, now=None, timezone=None):
+                   create_sent_event=False, now=None, timezone=None,
+                   root_id=None, parent_id=None):
         args = args or ()
         kwargs = kwargs or {}
         utc = self.utc

+ 8 - 16
celery/app/base.py

@@ -9,12 +9,10 @@
 from __future__ import absolute_import
 
 import os
-import sys
 import threading
 import warnings
 
 from collections import defaultdict, deque
-from contextlib import contextmanager
 from copy import deepcopy
 from operator import attrgetter
 
@@ -128,11 +126,13 @@ class Celery(object):
     #: Signal sent after app has been finalized.
     on_after_finalize = None
 
+    #: ignored
+    accept_magic_kwargs = False
+
     def __init__(self, main=None, loader=None, backend=None,
                  amqp=None, events=None, log=None, control=None,
-                 set_as_current=True, accept_magic_kwargs=False,
-                 tasks=None, broker=None, include=None, changes=None,
-                 config_source=None, fixups=None, task_cls=None,
+                 set_as_current=True, tasks=None, broker=None, include=None,
+                 changes=None, config_source=None, fixups=None, task_cls=None,
                  autofinalize=True, **kwargs):
         self.clock = LamportClock()
         self.main = main
@@ -145,7 +145,6 @@ class Celery(object):
         self.task_cls = task_cls or self.task_cls
         self.set_as_current = set_as_current
         self.registry_cls = symbol_by_name(self.registry_cls)
-        self.accept_magic_kwargs = accept_magic_kwargs
         self.user_options = defaultdict(set)
         self.steps = defaultdict(set)
         self.autofinalize = autofinalize
@@ -240,12 +239,6 @@ class Celery(object):
                     cons = lambda app: app._task_from_fun(fun, **opts)
                     cons.__name__ = fun.__name__
                     connect_on_app_finalize(cons)
-                if self.accept_magic_kwargs:  # compat mode
-                    task = self._task_from_fun(fun, **opts)
-                    if filter:
-                        task = filter(task)
-                    return task
-
                 if self.finalized or opts.get('_force_evaluate'):
                     ret = self._task_from_fun(fun, **opts)
                 else:
@@ -277,7 +270,6 @@ class Celery(object):
 
         T = type(fun.__name__, (base, ), dict({
             'app': self,
-            'accept_magic_kwargs': False,
             'run': fun if bind else staticmethod(fun),
             '_decorated': True,
             '__doc__': fun.__doc__,
@@ -352,7 +344,7 @@ class Celery(object):
                   publisher=None, link=None, link_error=None,
                   add_to_parent=True, group_id=None, retries=0, chord=None,
                   reply_to=None, time_limit=None, soft_time_limit=None,
-                  **options):
+                  root_id=None, parent_id=None, **options):
         amqp = self.amqp
         task_id = task_id or uuid()
         producer = producer or publisher  # XXX compat
@@ -370,6 +362,7 @@ class Celery(object):
             maybe_list(link), maybe_list(link_error),
             reply_to or self.oid, time_limit, soft_time_limit,
             self.conf.CELERY_SEND_TASK_SENT_EVENT,
+            root_id, parent_id,
         )
 
         if connection:
@@ -574,7 +567,6 @@ class Celery(object):
             'events': self.events_cls,
             'log': self.log_cls,
             'control': self.control_cls,
-            'accept_magic_kwargs': self.accept_magic_kwargs,
             'fixups': self.fixups,
             'config_source': self._config_source,
             'task_cls': self.task_cls,
@@ -585,7 +577,7 @@ class Celery(object):
         return (self.main, self.conf.changes,
                 self.loader_cls, self.backend_cls, self.amqp_cls,
                 self.events_cls, self.log_cls, self.control_cls,
-                self.accept_magic_kwargs, self._config_source)
+                False, self._config_source)
 
     @cached_property
     def Worker(self):

+ 14 - 181
celery/app/builtins.py

@@ -9,10 +9,7 @@
 """
 from __future__ import absolute_import
 
-from collections import deque
-
 from celery._state import get_current_worker_task, connect_on_app_finalize
-from celery.utils import uuid
 from celery.utils.log import get_logger
 
 __all__ = []
@@ -44,7 +41,7 @@ def add_unlock_chord_task(app):
     It joins chords by creating a task chain polling the header for completion.
 
     """
-    from celery.canvas import signature
+    from celery.canvas import maybe_signature
     from celery.exceptions import ChordError
     from celery.result import allow_join_result, result_from_tuple
 
@@ -66,6 +63,7 @@ def add_unlock_chord_task(app):
             interval = unlock_chord.default_retry_delay
 
         # check if the task group is ready, and if so apply the callback.
+        callback = maybe_signature(callback, app)
         deps = GroupResult(
             group_id,
             [result_from_tuple(r, app=app) for r in result],
@@ -73,7 +71,7 @@ def add_unlock_chord_task(app):
         j = deps.join_native if deps.supports_native_join else deps.join
 
         if deps.ready():
-            callback = signature(callback, app=app)
+            callback = maybe_signature(callback, app=app)
             try:
                 with allow_join_result():
                     ret = j(timeout=3.0, propagate=propagate)
@@ -138,14 +136,14 @@ def add_chunk_task(app):
 
 @connect_on_app_finalize
 def add_group_task(app):
+    """No longer used, but here for backwards compatibility."""
     _app = app
-    from celery.canvas import maybe_signature, signature
+    from celery.canvas import maybe_signature
     from celery.result import result_from_tuple
 
     class Group(app.Task):
         app = _app
         name = 'celery.group'
-        accept_magic_kwargs = False
         _decorated = True
 
         def run(self, tasks, result, group_id, partial_args,
@@ -153,13 +151,8 @@ def add_group_task(app):
             app = self.app
             result = result_from_tuple(result, app)
             # any partial args are added to all tasks in the group
-            taskit = (signature(task, app=app).clone(partial_args)
+            taskit = (maybe_signature(task, app=app).clone(partial_args)
                       for i, task in enumerate(tasks))
-            if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER:
-                return app.GroupResult(
-                    result.id,
-                    [stask.apply(group_id=group_id) for stask in taskit],
-                )
             with app.producer_or_acquire() as pub:
                 [stask.apply_async(group_id=group_id, producer=pub,
                                    add_to_parent=False) for stask in taskit]
@@ -167,152 +160,32 @@ def add_group_task(app):
             if add_to_parent and parent:
                 parent.add_trail(result)
             return result
-
-        def prepare(self, options, tasks, args, **kwargs):
-            options['group_id'] = group_id = (
-                options.setdefault('task_id', uuid()))
-
-            def prepare_member(task):
-                task = maybe_signature(task, app=self.app)
-                task.options['group_id'] = group_id
-                return task, task.freeze()
-
-            try:
-                tasks, res = list(zip(
-                    *[prepare_member(task) for task in tasks]
-                ))
-            except ValueError:  # tasks empty
-                tasks, res = [], []
-            return (tasks, self.app.GroupResult(group_id, res), group_id, args)
-
-        def apply_async(self, partial_args=(), kwargs={}, **options):
-            if self.app.conf.CELERY_ALWAYS_EAGER:
-                return self.apply(partial_args, kwargs, **options)
-            tasks, result, gid, args = self.prepare(
-                options, args=partial_args, **kwargs
-            )
-            super(Group, self).apply_async((
-                list(tasks), result.as_tuple(), gid, args), **options
-            )
-            return result
-
-        def apply(self, args=(), kwargs={}, **options):
-            return super(Group, self).apply(
-                self.prepare(options, args=args, **kwargs),
-                **options).get()
     return Group
 
 
 @connect_on_app_finalize
 def add_chain_task(app):
-    from celery.canvas import (
-        Signature, chain, chord, group, maybe_signature, maybe_unroll_group,
-    )
-
+    """No longer used, but here for backwards compatibility."""
     _app = app
 
     class Chain(app.Task):
         app = _app
         name = 'celery.chain'
-        accept_magic_kwargs = False
         _decorated = True
 
-        def prepare_steps(self, args, tasks):
-            app = self.app
-            steps = deque(tasks)
-            next_step = prev_task = prev_res = None
-            tasks, results = [], []
-            i = 0
-            while steps:
-                # First task get partial args from chain.
-                task = maybe_signature(steps.popleft(), app=app)
-                task = task.clone() if i else task.clone(args)
-                res = task.freeze()
-                i += 1
-
-                if isinstance(task, group):
-                    task = maybe_unroll_group(task)
-                if isinstance(task, chain):
-                    # splice the chain
-                    steps.extendleft(reversed(task.tasks))
-                    continue
-
-                elif isinstance(task, group) and steps and \
-                        not isinstance(steps[0], group):
-                    # automatically upgrade group(..) | s to chord(group, s)
-                    try:
-                        next_step = steps.popleft()
-                        # for chords we freeze by pretending it's a normal
-                        # task instead of a group.
-                        res = Signature.freeze(next_step)
-                        task = chord(task, body=next_step, task_id=res.task_id)
-                    except IndexError:
-                        pass  # no callback, so keep as group
-                if prev_task:
-                    # link previous task to this task.
-                    prev_task.link(task)
-                    # set the results parent attribute.
-                    if not res.parent:
-                        res.parent = prev_res
-
-                if not isinstance(prev_task, chord):
-                    results.append(res)
-                    tasks.append(task)
-                prev_task, prev_res = task, res
-
-            return tasks, results
-
-        def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
-                        task_id=None, link=None, link_error=None, **options):
-            if self.app.conf.CELERY_ALWAYS_EAGER:
-                return self.apply(args, kwargs, **options)
-            options.pop('publisher', None)
-            tasks, results = self.prepare_steps(args, kwargs['tasks'])
-            result = results[-1]
-            if group_id:
-                tasks[-1].set(group_id=group_id)
-            if chord:
-                tasks[-1].set(chord=chord)
-            if task_id:
-                tasks[-1].set(task_id=task_id)
-                result = tasks[-1].type.AsyncResult(task_id)
-            # make sure we can do a link() and link_error() on a chain object.
-            if link:
-                tasks[-1].set(link=link)
-            # and if any task in the chain fails, call the errbacks
-            if link_error:
-                for task in tasks:
-                    task.set(link_error=link_error)
-            tasks[0].apply_async(**options)
-            return result
-
-        def apply(self, args=(), kwargs={}, signature=maybe_signature,
-                  **options):
-            app = self.app
-            last, fargs = None, args  # fargs passed to first task only
-            for task in kwargs['tasks']:
-                res = signature(task, app=app).clone(fargs).apply(
-                    last and (last.get(), ),
-                )
-                res.parent, last, fargs = last, res, None
-            return last
     return Chain
 
 
 @connect_on_app_finalize
 def add_chord_task(app):
-    """Every chord is executed in a dedicated task, so that the chord
-    can be used as a signature, and this generates the task
-    responsible for that."""
-    from celery import group
+    """No longer used, but here for backwards compatibility."""
+    from celery import group, chord as _chord
     from celery.canvas import maybe_signature
     _app = app
-    default_propagate = app.conf.CELERY_CHORD_PROPAGATES
 
     class Chord(app.Task):
         app = _app
         name = 'celery.chord'
-        accept_magic_kwargs = False
         ignore_result = False
         _decorated = True
 
@@ -320,53 +193,13 @@ def add_chord_task(app):
                 countdown=1, max_retries=None, propagate=None,
                 eager=False, **kwargs):
             app = self.app
-            propagate = default_propagate if propagate is None else propagate
-            group_id = uuid()
-
             # - convert back to group if serialized
             tasks = header.tasks if isinstance(header, group) else header
             header = group([
-                maybe_signature(s, app=app).clone() for s in tasks
+                maybe_signature(s, app=app) for s in tasks
             ], app=self.app)
-            # - eager applies the group inline
-            if eager:
-                return header.apply(args=partial_args, task_id=group_id)
-
-            body.setdefault('chord_size', len(header.tasks))
-            results = header.freeze(group_id=group_id, chord=body).results
-
-            return self.backend.apply_chord(
-                header, partial_args, group_id,
-                body, interval=interval, countdown=countdown,
-                max_retries=max_retries, propagate=propagate, result=results,
-            )
-
-        def apply_async(self, args=(), kwargs={}, task_id=None,
-                        group_id=None, chord=None, **options):
-            app = self.app
-            if app.conf.CELERY_ALWAYS_EAGER:
-                return self.apply(args, kwargs, **options)
-            header = kwargs.pop('header')
-            body = kwargs.pop('body')
-            header, body = (maybe_signature(header, app=app),
-                            maybe_signature(body, app=app))
-            # forward certain options to body
-            if chord is not None:
-                body.options['chord'] = chord
-            if group_id is not None:
-                body.options['group_id'] = group_id
-            [body.link(s) for s in options.pop('link', [])]
-            [body.link_error(s) for s in options.pop('link_error', [])]
-            body_result = body.freeze(task_id)
-            parent = super(Chord, self).apply_async((header, body, args),
-                                                    kwargs, **options)
-            body_result.parent = parent
-            return body_result
-
-        def apply(self, args=(), kwargs={}, propagate=True, **options):
-            body = kwargs['body']
-            res = super(Chord, self).apply(args, dict(kwargs, eager=True),
-                                           **options)
-            return maybe_signature(body, app=self.app).apply(
-                args=(res.get(propagate=propagate).get(), ))
+            body = maybe_signature(body, app=app)
+            ch = _chord(header, body)
+            return ch.run(header, body, partial_args, app, interval,
+                          countdown, max_retries, propagate, **kwargs)
     return Chord

+ 14 - 27
celery/app/task.py

@@ -20,7 +20,7 @@ from celery.exceptions import MaxRetriesExceededError, Reject, Retry
 from celery.five import class_property, items, with_metaclass
 from celery.local import Proxy
 from celery.result import EagerResult
-from celery.utils import gen_task_name, fun_takes_kwargs, uuid, maybe_reraise
+from celery.utils import gen_task_name, uuid, maybe_reraise
 from celery.utils.functional import mattrgetter, maybe_list
 from celery.utils.imports import instantiate
 from celery.utils.mail import ErrorMail
@@ -93,6 +93,8 @@ class Context(object):
     headers = None
     delivery_info = None
     reply_to = None
+    root_id = None
+    parent_id = None
     correlation_id = None
     taskset = None   # compat alias to group
     group = None
@@ -235,10 +237,6 @@ class Task(object):
     #: If :const:`True` the task is an abstract base class.
     abstract = True
 
-    #: If disabled the worker will not forward magic keyword arguments.
-    #: Deprecated and scheduled for removal in v4.0.
-    accept_magic_kwargs = False
-
     #: Maximum number of retries before giving up.  If set to :const:`None`,
     #: it will **never** stop retrying.
     max_retries = 3
@@ -343,6 +341,9 @@ class Task(object):
             'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'),
     )
 
+    #: ignored
+    accept_magic_kwargs = False
+
     _backend = None  # set by backend property.
 
     __bound__ = False
@@ -360,8 +361,6 @@ class Task(object):
         for attr_name, config_name in self.from_config:
             if getattr(self, attr_name, None) is None:
                 setattr(self, attr_name, conf[config_name])
-        if self.accept_magic_kwargs is None:
-            self.accept_magic_kwargs = app.accept_magic_kwargs
 
         # decorate with annotations from config.
         if not was_bound:
@@ -693,7 +692,7 @@ class Task(object):
 
         """
         # trace imports Task, so need to import inline.
-        from celery.app.trace import eager_trace_task
+        from celery.app.trace import build_tracer
 
         app = self._get_app()
         args = args or ()
@@ -718,28 +717,16 @@ class Task(object):
                    'errbacks': maybe_list(link_error),
                    'headers': options.get('headers'),
                    'delivery_info': {'is_eager': True}}
-        if self.accept_magic_kwargs:
-            default_kwargs = {'task_name': task.name,
-                              'task_id': task_id,
-                              'task_retries': retries,
-                              'task_is_eager': True,
-                              'logfile': options.get('logfile'),
-                              'loglevel': options.get('loglevel', 0),
-                              'delivery_info': {'is_eager': True}}
-            supported_keys = fun_takes_kwargs(task.run, default_kwargs)
-            extend_with = {
-                key: val for key, val in items(default_kwargs)
-                if key in supported_keys
-            }
-            kwargs.update(extend_with)
-
         tb = None
-        retval, info = eager_trace_task(task, task_id, args, kwargs,
-                                        app=self._get_app(),
-                                        request=request, propagate=throw)
+        tracer = build_tracer(
+            task.name, task, eager=True,
+            propagate=throw, app=self._get_app(),
+        )
+        ret = tracer(task_id, args, kwargs, request)
+        retval = ret.retval
         if isinstance(retval, ExceptionInfo):
             retval, tb = retval.exception, retval.traceback
-        state = states.SUCCESS if info is None else info.state
+        state = states.SUCCESS if ret.info is None else ret.info.state
         return EagerResult(task_id, retval, state, traceback=tb)
 
     def AsyncResult(self, task_id, **kwargs):

+ 174 - 32
celery/app/trace.py

@@ -15,33 +15,84 @@ from __future__ import absolute_import
 # but in the end it only resulted in bad performance and horrible tracebacks,
 # so instead we now use one closure per task class.
 
+import logging
 import os
 import socket
 import sys
 
+from collections import namedtuple
 from warnings import warn
 
 from billiard.einfo import ExceptionInfo
 from kombu.exceptions import EncodeError
-from kombu.utils import kwdict
+from kombu.serialization import loads as loads_message, prepare_accept_content
+from kombu.utils.encoding import safe_repr, safe_str
 
 from celery import current_app, group
 from celery import states, signals
 from celery._state import _task_stack
 from celery.app import set_default_app
 from celery.app.task import Task as BaseTask, Context
-from celery.exceptions import Ignore, Reject, Retry
+from celery.exceptions import Ignore, Reject, Retry, InvalidTaskError
+from celery.five import monotonic
 from celery.utils.log import get_logger
 from celery.utils.objects import mro_lookup
 from celery.utils.serialization import (
-    get_pickleable_exception,
-    get_pickleable_etype,
+    get_pickleable_exception, get_pickled_exception, get_pickleable_etype,
 )
+from celery.utils.text import truncate
 
-__all__ = ['TraceInfo', 'build_tracer', 'trace_task', 'eager_trace_task',
+__all__ = ['TraceInfo', 'build_tracer', 'trace_task',
            'setup_worker_optimizations', 'reset_worker_optimizations']
 
-_logger = get_logger(__name__)
+logger = get_logger(__name__)
+info = logger.info
+
+#: Format string used to log task success.
+LOG_SUCCESS = """\
+Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s\
+"""
+
+#: Format string used to log task failure.
+LOG_FAILURE = """\
+Task %(name)s[%(id)s] %(description)s: %(exc)s\
+"""
+
+#: Format string used to log task internal error.
+LOG_INTERNAL_ERROR = """\
+Task %(name)s[%(id)s] %(description)s: %(exc)s\
+"""
+
+#: Format string used to log task ignored.
+LOG_IGNORED = """\
+Task %(name)s[%(id)s] %(description)s\
+"""
+
+#: Format string used to log task rejected.
+LOG_REJECTED = """\
+Task %(name)s[%(id)s] %(exc)s\
+"""
+
+#: Format string used to log task retry.
+LOG_RETRY = """\
+Task %(name)s[%(id)s] retry: %(exc)s\
+"""
+
+log_policy_t = namedtuple(
+    'log_policy_t', ('format', 'description', 'severity', 'traceback', 'mail'),
+)
+
+log_policy_reject = log_policy_t(LOG_REJECTED, 'rejected', logging.WARN, 1, 1)
+log_policy_ignore = log_policy_t(LOG_IGNORED, 'ignored', logging.INFO, 0, 0)
+log_policy_internal = log_policy_t(
+    LOG_INTERNAL_ERROR, 'INTERNAL ERROR', logging.CRITICAL, 1, 1,
+)
+log_policy_expected = log_policy_t(
+    LOG_FAILURE, 'raised expected', logging.INFO, 0, 0,
+)
+log_policy_unexpected = log_policy_t(
+    LOG_FAILURE, 'raised unexpected', logging.ERROR, 1, 1,
+)
 
 send_prerun = signals.task_prerun.send
 send_postrun = signals.task_postrun.send
@@ -56,9 +107,11 @@ EXCEPTION_STATES = states.EXCEPTION_STATES
 IGNORE_STATES = frozenset([IGNORED, RETRY, REJECTED])
 
 #: set by :func:`setup_worker_optimizations`
-_tasks = None
+_localized = []
 _patched = {}
 
+trace_ok_t = namedtuple('trace_ok_t', ('retval', 'info', 'runtime', 'retstr'))
+
 
 def task_has_custom(task, attr):
     """Return true if the task or one of its bases
@@ -67,6 +120,19 @@ def task_has_custom(task, attr):
                       monkey_patched=['celery.app.task'])
 
 
+def get_log_policy(task, einfo, exc):
+    if isinstance(exc, Reject):
+        return log_policy_reject
+    elif isinstance(exc, Ignore):
+        return log_policy_ignore
+    elif einfo.internal:
+        return log_policy_internal
+    else:
+        if task.throws and isinstance(exc, task.throws):
+            return log_policy_expected
+        return log_policy_unexpected
+
+
 class TraceInfo(object):
     __slots__ = ('state', 'retval')
 
@@ -100,6 +166,10 @@ class TraceInfo(object):
             task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo)
             signals.task_retry.send(sender=task, request=req,
                                     reason=reason, einfo=einfo)
+            info(LOG_RETRY, {
+                'id': req.id, 'name': task.name,
+                'exc': safe_repr(reason.exc),
+            })
             return einfo
         finally:
             del(tb)
@@ -123,14 +193,47 @@ class TraceInfo(object):
                                       kwargs=req.kwargs,
                                       traceback=tb,
                                       einfo=einfo)
+            self._log_error(task, einfo)
             return einfo
         finally:
             del(tb)
 
+    def _log_error(self, task, einfo):
+        req = task.request
+        eobj = einfo.exception = get_pickled_exception(einfo.exception)
+        exception, traceback, exc_info, sargs, skwargs = (
+            safe_repr(eobj),
+            safe_str(einfo.traceback),
+            einfo.exc_info,
+            safe_repr(req.args),
+            safe_repr(req.kwargs),
+        )
+        policy = get_log_policy(task, einfo, eobj)
+
+        context = {
+            'hostname': req.hostname,
+            'id': req.id,
+            'name': task.name,
+            'exc': exception,
+            'traceback': traceback,
+            'args': sargs,
+            'kwargs': skwargs,
+            'description': policy.description,
+            'internal': einfo.internal,
+        }
+
+        logger.log(policy.severity, policy.format.strip(), context,
+                   exc_info=exc_info if policy.traceback else None,
+                   extra={'data': context})
+
+        if policy.mail:
+            task.send_error_email(context, einfo.exception)
+
 
 def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                  Info=TraceInfo, eager=False, propagate=False, app=None,
-                 IGNORE_STATES=IGNORE_STATES):
+                 monotonic=monotonic, truncate=truncate,
+                 trace_ok_t=trace_ok_t, IGNORE_STATES=IGNORE_STATES):
     """Return a function that traces task execution; catches all
     exceptions and updates result backend with the state and result
 
@@ -186,6 +289,7 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
     push_task = _task_stack.push
     pop_task = _task_stack.pop
     on_chord_part_return = backend.on_chord_part_return
+    _does_info = logger.isEnabledFor(logging.INFO)
 
     prerun_receivers = signals.task_prerun.receivers
     postrun_receivers = signals.task_postrun.receivers
@@ -209,6 +313,8 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
     def trace_task(uuid, args, kwargs, request=None):
         # R      - is the possibly prepared return value.
         # I      - is the Info object.
+        # T      - runtime
+        # Rstr   - textual representation of return value
         # retval - is the always unmodified return value.
         # state  - is the resulting task state.
 
@@ -216,9 +322,14 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
         # for performance reasons, and because the function is so long
         # we want the main variables (I, and R) to stand out visually from the
         # the rest of the variables, so breaking PEP8 is worth it ;)
-        R = I = retval = state = None
-        kwargs = kwdict(kwargs)
+        R = I = T = Rstr = retval = state = None
+        time_start = monotonic()
         try:
+            try:
+                kwargs.items
+            except AttributeError:
+                raise InvalidTaskError(
+                    'Task keyword arguments is not a mapping')
             push_task(task)
             task_request = Context(request or {}, args=args,
                                    called_directly=False, kwargs=kwargs)
@@ -289,6 +400,13 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                             task_on_success(retval, uuid, args, kwargs)
                         if success_receivers:
                             send_success(sender=task, result=retval)
+                        if _does_info:
+                            T = monotonic() - time_start
+                            Rstr = truncate(safe_repr(R), 256)
+                            info(LOG_SUCCESS, {
+                                'id': uuid, 'name': name,
+                                'return_value': Rstr, 'runtime': T,
+                            })
 
                 # -* POST *-
                 if state not in IGNORE_STATES:
@@ -314,15 +432,15 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                         except (KeyboardInterrupt, SystemExit, MemoryError):
                             raise
                         except Exception as exc:
-                            _logger.error('Process cleanup failed: %r', exc,
-                                          exc_info=True)
+                            logger.error('Process cleanup failed: %r', exc,
+                                         exc_info=True)
         except MemoryError:
             raise
         except Exception as exc:
             if eager:
                 raise
             R = report_internal_error(task, exc)
-        return R, I
+        return trace_ok_t(R, I, T, Rstr)
 
     return trace_task
 
@@ -331,27 +449,49 @@ def trace_task(task, uuid, args, kwargs, request={}, **opts):
     try:
         if task.__trace__ is None:
             task.__trace__ = build_tracer(task.name, task, **opts)
-        return task.__trace__(uuid, args, kwargs, request)[0]
+        return task.__trace__(uuid, args, kwargs, request)
     except Exception as exc:
         return report_internal_error(task, exc)
 
 
-def _trace_task_ret(name, uuid, args, kwargs, request={}, app=None, **opts):
-    return trace_task((app or current_app).tasks[name],
-                      uuid, args, kwargs, request, app=app, **opts)
+def _trace_task_ret(name, uuid, request, body, content_type,
+                    content_encoding, loads=loads_message, app=None,
+                    **extra_request):
+    app = app or current_app._get_current_object()
+    accept = prepare_accept_content(app.conf.CELERY_ACCEPT_CONTENT)
+    args, kwargs = loads(body, content_type, content_encoding, accept=accept)
+    request.update(args=args, kwargs=kwargs, **extra_request)
+    R, I, T, Rstr = trace_task(app.tasks[name],
+                               uuid, args, kwargs, request, app=app)
+    return (1, R, T) if I else (0, Rstr, T)
 trace_task_ret = _trace_task_ret
 
 
-def _fast_trace_task(task, uuid, args, kwargs, request={}):
+def _fast_trace_task_v1(task, uuid, args, kwargs, request={}, _loc=_localized):
     # setup_worker_optimizations will point trace_task_ret to here,
     # so this is the function used in the worker.
-    return _tasks[task].__trace__(uuid, args, kwargs, request)[0]
-
-
-def eager_trace_task(task, uuid, args, kwargs, request=None, **opts):
-    opts.setdefault('eager', True)
-    return build_tracer(task.name, task, **opts)(
-        uuid, args, kwargs, request)
+    tasks, _ = _loc
+    R, I, T, Rstr = tasks[task].__trace__(uuid, args, kwargs, request)[0]
+    # exception instance if error, else result text
+    return (1, R, T) if I else (0, Rstr, T)
+
+
+def _fast_trace_task(task, uuid, request, body, content_type,
+                     content_encoding, loads=loads_message, _loc=_localized,
+                     hostname=None, **_):
+    tasks, accept = _loc
+    if content_type:
+        args, kwargs = loads(body, content_type, content_encoding,
+                             accept=accept)
+    else:
+        args, kwargs = body
+    request.update({
+        'args': args, 'kwargs': kwargs, 'hostname': hostname,
+    })
+    R, I, T, Rstr = tasks[task].__trace__(
+        uuid, args, kwargs, request,
+    )
+    return (1, R, T) if I else (0, Rstr, T)
 
 
 def report_internal_error(task, exc):
@@ -368,7 +508,6 @@ def report_internal_error(task, exc):
 
 
 def setup_worker_optimizations(app):
-    global _tasks
     global trace_task_ret
 
     # make sure custom Task.__call__ methods that calls super
@@ -388,12 +527,15 @@ def setup_worker_optimizations(app):
     app.finalize()
 
     # set fast shortcut to task registry
-    _tasks = app._tasks
+    _localized[:] = [
+        app._tasks,
+        prepare_accept_content(app.conf.CELERY_ACCEPT_CONTENT),
+    ]
 
     trace_task_ret = _fast_trace_task
-    from celery.worker import job as job_module
-    job_module.trace_task_ret = _fast_trace_task
-    job_module.__optimize__()
+    from celery.worker import request as request_module
+    request_module.trace_task_ret = _fast_trace_task
+    request_module.__optimize__()
 
 
 def reset_worker_optimizations():
@@ -407,8 +549,8 @@ def reset_worker_optimizations():
         BaseTask.__call__ = _patched.pop('BaseTask.__call__')
     except KeyError:
         pass
-    from celery.worker import job as job_module
-    job_module.trace_task_ret = _trace_task_ret
+    from celery.worker import request as request_module
+    request_module.trace_task_ret = _trace_task_ret
 
 
 def _install_stack_protection():

+ 0 - 1
celery/app/utils.py

@@ -152,7 +152,6 @@ class AppPickler(object):
         return dict(main=main, loader=loader, backend=backend, amqp=amqp,
                     changes=changes, events=events, log=log, control=control,
                     set_as_current=False,
-                    accept_magic_kwargs=accept_magic_kwargs,
                     config_source=config_source)
 
     def construct(self, cls, **kwargs):

+ 4 - 4
celery/backends/base.py

@@ -165,11 +165,11 @@ class BaseBackend(object):
 
     def exception_to_python(self, exc):
         """Convert serialized exception to Python exception."""
-        if self.serializer in EXCEPTION_ABLE_CODECS:
-            return get_pickled_exception(exc)
-        elif not isinstance(exc, BaseException):
-            return create_exception_cls(
+        if not isinstance(exc, BaseException):
+            exc = create_exception_cls(
                 from_utf8(exc['exc_type']), __name__)(exc['exc_message'])
+        if self.serializer in EXCEPTION_ABLE_CODECS:
+            exc = get_pickled_exception(exc)
         return exc
 
     def prepare_value(self, result):

+ 209 - 56
celery/canvas.py

@@ -12,6 +12,7 @@
 """
 from __future__ import absolute_import
 
+from collections import deque
 from copy import deepcopy
 from functools import partial as _partial, reduce
 from operator import itemgetter
@@ -19,7 +20,7 @@ from itertools import chain as _chain
 
 from kombu.utils import cached_property, fxrange, kwdict, reprcall, uuid
 
-from celery._state import current_app
+from celery._state import current_app, get_current_worker_task
 from celery.utils.functional import (
     maybe_list, is_list, regen,
     chunks as _chunks,
@@ -194,12 +195,13 @@ class Signature(dict):
         return s
     partial = clone
 
-    def freeze(self, _id=None, group_id=None, chord=None):
+    def freeze(self, _id=None, group_id=None, chord=None, root_id=None):
         opts = self.options
         try:
             tid = opts['task_id']
         except KeyError:
             tid = opts['task_id'] = _id or uuid()
+        root_id = opts.setdefault('root_id', root_id)
         if 'reply_to' not in opts:
             opts['reply_to'] = self.app.oid
         if group_id:
@@ -348,6 +350,99 @@ class chain(Signature):
         if self.tasks:
             return self.apply_async(args, kwargs)
 
+    def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
+                    task_id=None, link=None, link_error=None,
+                    publisher=None, root_id=None, **options):
+        app = self.app
+        if app.conf.CELERY_ALWAYS_EAGER:
+            return self.apply(args, kwargs, **options)
+        tasks, results = self.prepare_steps(
+            args, self.tasks, root_id, link_error,
+        )
+        if not results:
+            return
+        result = results[-1]
+        last_task = tasks[-1]
+        if group_id:
+            last_task.set(group_id=group_id)
+        if chord:
+            last_task.set(chord=chord)
+        if task_id:
+            last_task.set(task_id=task_id)
+            result = last_task.type.AsyncResult(task_id)
+        # make sure we can do a link() and link_error() on a chain object.
+        if link:
+            tasks[-1].set(link=link)
+        tasks[0].apply_async(**options)
+        return result
+
+    def prepare_steps(self, args, tasks,
+                      root_id=None, link_error=None, app=None):
+        app = app or self.app
+        steps = deque(tasks)
+        next_step = prev_task = prev_res = None
+        tasks, results = [], []
+        i = 0
+        while steps:
+            task = steps.popleft()
+            if not i:  # first task
+                # first task gets partial args from chain
+                task = task.clone(args)
+                res = task.freeze(root_id=root_id)
+                root_id = res.id if root_id is None else root_id
+            else:
+                task = task.clone()
+                res = task.freeze(root_id=root_id)
+            i += 1
+
+            if isinstance(task, group):
+                task = maybe_unroll_group(task)
+
+            if isinstance(task, chain):
+                # splice the chain
+                steps.extendleft(reversed(task.tasks))
+                continue
+            elif isinstance(task, group) and steps and \
+                    not isinstance(steps[0], group):
+                # automatically upgrade group(...) | s to chord(group, s)
+                try:
+                    next_step = steps.popleft()
+                    # for chords we freeze by pretending it's a normal
+                    # signature instead of a group.
+                    res = Signature.freeze(next_step)
+                    task = chord(
+                        task, body=next_step,
+                        task_id=res.task_id, root_id=root_id,
+                    )
+                except IndexError:
+                    pass  # no callback, so keep as group.
+
+            if prev_task:
+                # link previous task to this task.
+                prev_task.link(task)
+                # set AsyncResult.parent
+                if not res.parent:
+                    res.parent = prev_res
+
+            if link_error:
+                task.set(link_error=link_error)
+
+            if not isinstance(prev_task, chord):
+                results.append(res)
+                tasks.append(task)
+            prev_task, prev_res = task, res
+
+        return tasks, results
+
+    def apply(self, args=(), kwargs={}, **options):
+        last, fargs = None, args
+        for task in self.tasks:
+            res = task.clone(fargs).apply(
+                last and (last.get(), ), **options
+            )
+            res.parent, last, fargs = last, res, None
+        return last
+
     @classmethod
     def from_dict(self, d, app=None):
         tasks = d['kwargs']['tasks']
@@ -357,11 +452,14 @@ class chain(Signature):
         return chain(*d['kwargs']['tasks'], app=app, **kwdict(d['options']))
 
     @property
-    def type(self):
-        try:
-            return self._type or self.tasks[0].type.app.tasks['celery.chain']
-        except KeyError:
-            return self.app.tasks['celery.chain']
+    def app(self):
+        app = self._app
+        if app is None:
+            try:
+                app = self.tasks[0]._app
+            except (KeyError, IndexError):
+                pass
+        return app or current_app
 
     def __repr__(self):
         return ' | '.join(repr(t) for t in self.tasks)
@@ -452,11 +550,6 @@ def _maybe_group(tasks):
     return tasks
 
 
-def _maybe_clone(tasks, app):
-    return [s.clone() if isinstance(s, Signature) else signature(s, app=app)
-            for s in tasks]
-
-
 @Signature.register_type
 class group(Signature):
 
@@ -477,13 +570,54 @@ class group(Signature):
                 task['args'] = task._merge(d['args'])[0]
         return group(tasks, app=app, **kwdict(d['options']))
 
-    def apply_async(self, args=(), kwargs=None, add_to_parent=True, **options):
-        tasks = _maybe_clone(self.tasks, app=self._app)
-        if not tasks:
+    def _prepared(self, tasks, partial_args, group_id, root_id):
+        for task in tasks:
+            task = task.clone(partial_args)
+            yield task, task.freeze(group_id=group_id, root_id=root_id)
+
+    def _apply_tasks(self, tasks, producer=None, app=None, **options):
+        app = app or self.app
+        with app.producer_or_acquire(producer) as producer:
+            for sig, res in tasks:
+                sig.apply_async(producer=producer, add_to_parent=False,
+                                **options)
+                yield res
+
+    def _freeze_gid(self, options):
+        # remove task_id and use that as the group_id,
+        # if we don't remove it then every task will have the same id...
+        options = dict(self.options, **options)
+        options['group_id'] = group_id = (
+            options.pop('task_id', uuid()))
+        return options, group_id, options.get('root_id')
+
+    def apply_async(self, args=(), kwargs=None, add_to_parent=True,
+                    producer=None, **options):
+        app = self.app
+        if app.conf.CELERY_ALWAYS_EAGER:
+            return self.apply(args, kwargs, **options)
+        if not self.tasks:
             return self.freeze()
-        type = self.type
-        return type(*type.prepare(dict(self.options, **options), tasks, args),
-                    add_to_parent=add_to_parent)
+
+        options, group_id, root_id = self._freeze_gid(options)
+        tasks = self._prepared(self.tasks, args, group_id, root_id)
+        result = self.app.GroupResult(
+            group_id, list(self._apply_tasks(tasks, producer, app, **options)),
+        )
+        parent_task = get_current_worker_task()
+        if add_to_parent and parent_task:
+            parent_task.add_trail(result)
+        return result
+
+    def apply(self, args=(), kwargs={}, **options):
+        app = self.app
+        if not self.tasks:
+            return self.freeze()  # empty group returns GroupResult
+        options, group_id, root_id = self._freeze_gid(options)
+        tasks = self._prepared(self.tasks, args, group_id, root_id)
+        return app.GroupResult(group_id, [
+            sig.apply(**options) for sig, _ in tasks
+        ])
 
     def set_immutable(self, immutable):
         for task in self.tasks:
@@ -498,15 +632,10 @@ class group(Signature):
         sig = sig.clone().set(immutable=True)
         return self.tasks[0].link_error(sig)
 
-    def apply(self, *args, **kwargs):
-        if not self.tasks:
-            return self.freeze()  # empty group returns GroupResult
-        return Signature.apply(self, *args, **kwargs)
-
     def __call__(self, *partial_args, **options):
         return self.apply_async(partial_args, **options)
 
-    def freeze(self, _id=None, group_id=None, chord=None):
+    def freeze(self, _id=None, group_id=None, chord=None, root_id=None):
         opts = self.options
         try:
             gid = opts['task_id']
@@ -516,10 +645,13 @@ class group(Signature):
             opts['group_id'] = group_id
         if chord:
             opts['chord'] = group_id
+        root_id = opts.setdefault('root_id', root_id)
         new_tasks, results = [], []
         for task in self.tasks:
             task = maybe_signature(task, app=self._app).clone()
-            results.append(task.freeze(group_id=group_id, chord=chord))
+            results.append(task.freeze(
+                group_id=group_id, chord=chord, root_id=root_id,
+            ))
             new_tasks.append(task)
         self.tasks = self.kwargs['tasks'] = new_tasks
         return self.app.GroupResult(gid, results)
@@ -538,14 +670,14 @@ class group(Signature):
         return repr(self.tasks)
 
     @property
-    def type(self):
-        if self._type:
-            return self._type
-        # taking the app from the first task in the list, there may be a
-        # better solution for this, e.g. to consolidate tasks with the same
-        # app and apply them in batches.
-        app = self._app if self._app else self.tasks[0].type.app
-        return app.tasks[self['task']]
+    def app(self):
+        app = self._app
+        if app is None:
+            try:
+                app = self.tasks[0]._app
+            except (KeyError, IndexError):
+                pass
+        return app if app is not None else current_app
 
 
 @Signature.register_type
@@ -560,8 +692,8 @@ class chord(Signature):
         )
         self.subtask_type = 'chord'
 
-    def freeze(self, _id=None, group_id=None, chord=None):
-        return self.body.freeze(_id, group_id=group_id, chord=chord)
+    def freeze(self, *args, **kwargs):
+        return self.body.freeze(*args, **kwargs)
 
     @classmethod
     def from_dict(self, d, app=None):
@@ -574,20 +706,14 @@ class chord(Signature):
         # than manually popping things off.
         return (header, body), kwargs
 
-    @property
-    def type(self):
-        if self._type:
-            return self._type
-        # we will be able to fix this mess in 3.2 when we no longer
-        # require an actual task implementation for chord/group
-        if self._app:
-            app = self._app
-        else:
-            try:
-                app = self.tasks[0].type.app
-            except IndexError:
-                app = self.body.type.app
-        return app.tasks['celery.chord']
+    @cached_property
+    def app(self):
+        app = self._app
+        if app is None:
+            app = self.tasks[0]._app
+            if app is None:
+                app = self.body._app
+        return app if app is not None else current_app
 
     def apply_async(self, args=(), kwargs={}, task_id=None,
                     producer=None, publisher=None, connection=None,
@@ -595,14 +721,41 @@ class chord(Signature):
         body = kwargs.get('body') or self.kwargs['body']
         kwargs = dict(self.kwargs, **kwargs)
         body = body.clone(**options)
+        app = self.app
+        tasks = (self.tasks.clone() if isinstance(self.tasks, group)
+                 else group(self.tasks))
+        if app.conf.CELERY_ALWAYS_EAGER:
+            return self.apply((), kwargs,
+                              body=body, task_id=task_id, **options)
+        return self.run(tasks, body, args, task_id=task_id, **options)
+
+    def apply(self, args=(), kwargs={}, propagate=True, body=None, **options):
+        body = self.body if body is None else body
+        tasks = (self.tasks.clone() if isinstance(self.tasks, group)
+                 else group(self.tasks))
+        return body.apply(
+            args=(tasks.apply().get(propagate=propagate), ),
+        )
 
-        _chord = self.type
-        if _chord.app.conf.CELERY_ALWAYS_EAGER:
-            return self.apply((), kwargs, task_id=task_id, **options)
-        res = body.freeze(task_id)
-        parent = _chord(self.tasks, body, args, **options)
-        res.parent = parent
-        return res
+    def run(self, header, body, partial_args, app=None, interval=None,
+            countdown=1, max_retries=None, propagate=None, eager=False,
+            task_id=None, **options):
+        app = app or self.app
+        propagate = (app.conf.CELERY_CHORD_PROPAGATES
+                     if propagate is None else propagate)
+        group_id = uuid()
+        root_id = body.options.get('root_id')
+        body.setdefault('chord_size', len(header.tasks))
+        results = header.freeze(
+            group_id=group_id, chord=body, root_id=root_id).results
+        bodyres = body.freeze(task_id, root_id=root_id)
+
+        parent = app.backend.apply_chord(
+            header, partial_args, group_id, body,
+            interval=interval, countdown=countdown,
+            max_retries=max_retries, propagate=propagate, result=results)
+        bodyres.parent = parent
+        return bodyres
 
     def __call__(self, body=None, **options):
         return self.apply_async((), {'body': body} if body else {}, **options)

+ 0 - 9
celery/concurrency/asynpool.py

@@ -37,7 +37,6 @@ from amqp.utils import promise
 from billiard.pool import RUN, TERMINATE, ACK, NACK, WorkersJoined
 from billiard import pool as _pool
 from billiard.compat import buf_t, setblocking, isblocking
-from billiard.einfo import ExceptionInfo
 from billiard.queues import _SimpleQueue
 from kombu.async import READ, WRITE, ERR
 from kombu.serialization import pickle as _pickle
@@ -46,7 +45,6 @@ from kombu.utils.compat import get_errno
 from kombu.utils.eventio import SELECT_BAD_FD
 from celery.five import Counter, items, values
 from celery.utils.log import get_logger
-from celery.utils.text import truncate
 from celery.worker import state as worker_state
 
 try:
@@ -96,8 +94,6 @@ SCHED_STRATEGIES = {
     'fair': SCHED_STRATEGY_FAIR,
 }
 
-RESULT_MAXLEN = 128
-
 Ack = namedtuple('Ack', ('id', 'fd', 'payload'))
 
 
@@ -170,11 +166,6 @@ class Worker(_pool.Worker):
         # is writable.
         self.outq.put((WORKER_UP, (pid, )))
 
-    def prepare_result(self, result, RESULT_MAXLEN=RESULT_MAXLEN):
-        if not isinstance(result, ExceptionInfo):
-            return truncate(repr(result), RESULT_MAXLEN)
-        return result
-
 
 class ResultHandler(_pool.ResultHandler):
     """Handles messages from the pool processes."""

+ 3 - 1
celery/concurrency/base.py

@@ -66,11 +66,13 @@ class BasePool(object):
 
     _state = None
     _pool = None
+    _does_debug = True
 
     #: only used by multiprocessing pool
     uses_semaphore = False
 
     task_join_will_block = True
+    body_can_be_buffer = False
 
     def __init__(self, limit=None, putlocks=True,
                  forking_enable=True, callbacks_propagate=(), **options):
@@ -79,7 +81,6 @@ class BasePool(object):
         self.options = options
         self.forking_enable = forking_enable
         self.callbacks_propagate = callbacks_propagate
-        self._does_debug = logger.isEnabledFor(logging.DEBUG)
 
     def on_start(self):
         pass
@@ -128,6 +129,7 @@ class BasePool(object):
         self.on_terminate()
 
     def start(self):
+        self._does_debug = logger.isEnabledFor(logging.DEBUG)
         self.on_start()
         self._state = self.RUN
 

+ 1 - 0
celery/concurrency/solo.py

@@ -17,6 +17,7 @@ __all__ = ['TaskPool']
 
 class TaskPool(BasePool):
     """Solo task pool (blocking, inline, fast)."""
+    body_can_be_buffer = True
 
     def __init__(self, *args, **kwargs):
         super(TaskPool, self).__init__(*args, **kwargs)

+ 1 - 1
celery/contrib/batches.py

@@ -88,7 +88,7 @@ from itertools import count
 from celery.task import Task
 from celery.five import Empty, Queue
 from celery.utils.log import get_logger
-from celery.worker.job import Request
+from celery.worker.request import Request
 from celery.utils import noop
 
 __all__ = ['Batches']

+ 2 - 4
celery/events/state.py

@@ -30,7 +30,7 @@ from time import time
 from weakref import ref
 
 from kombu.clocks import timetuple
-from kombu.utils import cached_property, kwdict
+from kombu.utils import cached_property
 
 from celery import states
 from celery.five import class_property, items, values
@@ -54,8 +54,6 @@ Substantial drift from %s may mean clocks are out of sync.  Current drift is
 %s seconds.  [orig: %s recv: %s]
 """
 
-CAN_KWDICT = sys.version_info >= (2, 6, 5)
-
 logger = get_logger(__name__)
 warn = logger.warning
 
@@ -86,7 +84,7 @@ def heartbeat_expires(timestamp, freq=60,
 
 
 def _depickle_task(cls, fields):
-    return cls(**(fields if CAN_KWDICT else kwdict(fields)))
+    return cls(**fields)
 
 
 def with_unique_field(attr):

+ 9 - 8
celery/five.py

@@ -28,6 +28,14 @@ except ImportError:  # pragma: no cover
     def Counter():  # noqa
         return defaultdict(int)
 
+try:
+    buffer_t = buffer
+except NameError:  # pragma: no cover
+    # Py3 does not have buffer, but we only need isinstance.
+
+    class buffer_t(object):  # noqa
+        pass
+
 ############## py3k #########################################################
 import sys
 PY3 = sys.version_info[0] == 3
@@ -210,15 +218,8 @@ def getappattr(path):
     return current_app._rgetattr(path)
 
 
-def _compat_task_decorator(*args, **kwargs):
-    from celery import current_app
-    kwargs.setdefault('accept_magic_kwargs', True)
-    return current_app.task(*args, **kwargs)
-
-
 def _compat_periodic_task_decorator(*args, **kwargs):
     from celery.task import periodic_task
-    kwargs.setdefault('accept_magic_kwargs', True)
     return periodic_task(*args, **kwargs)
 
 
@@ -228,7 +229,7 @@ COMPAT_MODULES = {
             'send_task': 'send_task',
         },
         'decorators': {
-            'task': _compat_task_decorator,
+            'task': 'task',
             'periodic_task': _compat_periodic_task_decorator,
         },
         'log': {

+ 1 - 3
celery/task/base.py

@@ -51,7 +51,6 @@ class Task(BaseTask):
     priority = None
     type = 'regular'
     disable_error_emails = False
-    accept_magic_kwargs = False
 
     from_config = BaseTask.from_config + (
         ('exchange_type', 'CELERY_DEFAULT_EXCHANGE_TYPE'),
@@ -178,8 +177,7 @@ class PeriodicTask(Task):
 
 def task(*args, **kwargs):
     """Deprecated decorator, please use :func:`celery.task`."""
-    return current_app.task(*args, **dict({'accept_magic_kwargs': False,
-                                           'base': Task}, **kwargs))
+    return current_app.task(*args, **dict({'base': Task}, **kwargs))
 
 
 def periodic_task(*args, **options):

+ 1 - 2
celery/task/http.py

@@ -162,8 +162,7 @@ class HttpDispatch(object):
         return headers
 
 
-@shared_task(name='celery.http_dispatch', bind=True,
-             url=None, method=None, accept_magic_kwargs=False)
+@shared_task(name='celery.http_dispatch', bind=True, url=None, method=None)
 def dispatch(self, url=None, method='GET', **kwargs):
     """Task dispatching to an URL.
 

+ 0 - 12
celery/task/trace.py

@@ -1,12 +0,0 @@
-"""This module has moved to celery.app.trace."""
-from __future__ import absolute_import
-
-import sys
-
-from celery.utils import warn_deprecated
-
-warn_deprecated('celery.task.trace', removal='3.2',
-                alternative='Please use celery.app.trace instead.')
-
-from celery.app import trace
-sys.modules[__name__] = trace

+ 2 - 2
celery/tests/app/test_app.py

@@ -258,7 +258,7 @@ class test_App(AppCase):
             self.assertFalse(sh.called)
 
     def test_task_compat_with_filter(self):
-        with self.Celery(accept_magic_kwargs=True) as app:
+        with self.Celery() as app:
             check = Mock()
 
             def filter(task):
@@ -271,7 +271,7 @@ class test_App(AppCase):
             check.assert_called_with(foo)
 
     def test_task_with_filter(self):
-        with self.Celery(accept_magic_kwargs=False) as app:
+        with self.Celery() as app:
             check = Mock()
 
             def filter(task):

+ 10 - 12
celery/tests/app/test_builtins.py

@@ -136,18 +136,18 @@ class test_chain(BuiltinsCase):
 
     def test_group_to_chord(self):
         c = (
-            group(self.add.s(i, i) for i in range(5)) |
+            group([self.add.s(i, i) for i in range(5)], app=self.app) |
             self.add.s(10) |
             self.add.s(20) |
             self.add.s(30)
         )
-        tasks, _ = c.type.prepare_steps((), c.tasks)
+        tasks, _ = c.prepare_steps((), c.tasks)
         self.assertIsInstance(tasks[0], chord)
         self.assertTrue(tasks[0].body.options['link'])
         self.assertTrue(tasks[0].body.options['link'][0].options['link'])
 
         c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10))
-        tasks2, _ = c2.type.prepare_steps((), c2.tasks)
+        tasks2, _ = c2.prepare_steps((), c2.tasks)
         self.assertIsInstance(tasks2[1], group)
 
     def test_apply_options(self):
@@ -158,7 +158,7 @@ class test_chain(BuiltinsCase):
                 return self
 
         def s(*args, **kwargs):
-            return static(self.add, args, kwargs, type=self.add)
+            return static(self.add, args, kwargs, type=self.add, app=self.app)
 
         c = s(2, 2) | s(4, 4) | s(8, 8)
         r1 = c.apply_async(task_id='some_id')
@@ -196,18 +196,16 @@ class test_chord(BuiltinsCase):
     def test_forward_options(self):
         body = self.xsum.s()
         x = chord([self.add.s(i, i) for i in range(10)], body=body)
-        x._type = Mock()
-        x._type.app.conf.CELERY_ALWAYS_EAGER = False
+        x.run = Mock(name='chord.run(x)')
         x.apply_async(group_id='some_group_id')
-        self.assertTrue(x._type.called)
-        resbody = x._type.call_args[0][1]
+        self.assertTrue(x.run.called)
+        resbody = x.run.call_args[0][1]
         self.assertEqual(resbody.options['group_id'], 'some_group_id')
         x2 = chord([self.add.s(i, i) for i in range(10)], body=body)
-        x2._type = Mock()
-        x2._type.app.conf.CELERY_ALWAYS_EAGER = False
+        x2.run = Mock(name='chord.run(x2)')
         x2.apply_async(chord='some_chord_id')
-        self.assertTrue(x2._type.called)
-        resbody = x2._type.call_args[0][1]
+        self.assertTrue(x2.run.called)
+        resbody = x2.run.call_args[0][1]
         self.assertEqual(resbody.options['chord'], 'some_chord_id')
 
     def test_apply_eager(self):

+ 28 - 14
celery/tests/case.py

@@ -48,7 +48,7 @@ from celery.utils.functional import noop
 from celery.utils.imports import qualname
 
 __all__ = [
-    'Case', 'AppCase', 'Mock', 'MagicMock', 'ANY',
+    'Case', 'AppCase', 'Mock', 'MagicMock', 'ANY', 'TaskMessage',
     'patch', 'call', 'sentinel', 'skip_unless_module',
     'wrap_logger', 'with_environ', 'sleepdeprived',
     'skip_if_environ', 'todo', 'skip', 'skip_if',
@@ -56,7 +56,7 @@ __all__ = [
     'replace_module_value', 'sys_platform', 'reset_modules',
     'patch_modules', 'mock_context', 'mock_open', 'patch_many',
     'assert_signal_called', 'skip_if_pypy',
-    'skip_if_jython', 'body_from_sig', 'restore_logging',
+    'skip_if_jython', 'task_message_from_sig', 'restore_logging',
 ]
 patch = mock.patch
 call = mock.call
@@ -819,7 +819,7 @@ def skip_if_jython(fun):
     return _inner
 
 
-def body_from_sig(app, sig, utc=True):
+def task_message_from_sig(app, sig, utc=True):
     sig.freeze()
     callbacks = sig.options.pop('link', None)
     errbacks = sig.options.pop('link_error', None)
@@ -835,17 +835,14 @@ def body_from_sig(app, sig, utc=True):
         expires = app.now() + timedelta(seconds=expires)
     if expires and isinstance(expires, datetime):
         expires = expires.isoformat()
-    return {
-        'task': sig.task,
-        'id': sig.id,
-        'args': sig.args,
-        'kwargs': sig.kwargs,
-        'callbacks': [dict(s) for s in callbacks] if callbacks else None,
-        'errbacks': [dict(s) for s in errbacks] if errbacks else None,
-        'eta': eta,
-        'utc': utc,
-        'expires': expires,
-    }
+    return TaskMessage(
+        sig.task, id=sig.id, args=sig.args,
+        kwargs=sig.kwargs,
+        callbacks=[dict(s) for s in callbacks] if callbacks else None,
+        errbacks=[dict(s) for s in errbacks] if errbacks else None,
+        eta=eta,
+        expires=expires,
+    )
 
 
 @contextmanager
@@ -861,3 +858,20 @@ def restore_logging():
         sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ = outs
         root.level = level
         root.handlers[:] = handlers
+
+
+def TaskMessage(name, id=None, args=(), kwargs={}, **options):
+    from celery import uuid
+    from kombu.serialization import dumps
+    id = id or uuid()
+    message = Mock(name='TaskMessage-{0}'.format(id))
+    message.headers = {
+        'id': id,
+        'task': name,
+    }
+    message.headers.update(options)
+    message.content_type, message.content_encoding, message.body = dumps(
+        (args, kwargs), serializer='json',
+    )
+    message.payload = (args, kwargs)
+    return message

+ 0 - 23
celery/tests/compat_modules/test_compat.py

@@ -15,29 +15,6 @@ from celery.utils.timeutils import timedelta_seconds
 from celery.tests.case import AppCase, depends_on_current_app
 
 
-class test_Task(AppCase):
-
-    def test_base_task_inherits_magic_kwargs_from_app(self):
-        from celery.task import Task as OldTask
-
-        class timkX(OldTask):
-            abstract = True
-
-        with self.Celery(set_as_current=False,
-                         accept_magic_kwargs=True) as app:
-            timkX.bind(app)
-            # see #918
-            self.assertFalse(timkX.accept_magic_kwargs)
-
-            from celery import Task as NewTask
-
-            class timkY(NewTask):
-                abstract = True
-
-            timkY.bind(app)
-            self.assertFalse(timkY.accept_magic_kwargs)
-
-
 @depends_on_current_app
 class test_periodic_tasks(AppCase):
 

+ 0 - 4
celery/tests/compat_modules/test_compat_utils.py

@@ -40,11 +40,7 @@ class test_MagicModule(AppCase):
         def _test_decorators_task():
             pass
 
-        self.assertTrue(_test_decorators_task.accept_magic_kwargs)
-
     def test_decorators_periodic_task(self):
         @celery.decorators.periodic_task(run_every=3600)
         def _test_decorators_ptask():
             pass
-
-        self.assertTrue(_test_decorators_ptask.accept_magic_kwargs)

+ 0 - 1
celery/tests/compat_modules/test_decorators.py

@@ -27,7 +27,6 @@ class test_decorators(AppCase):
     def assertCompatDecorator(self, decorator, type, **opts):
         task = decorator(**opts)(add)
         self.assertEqual(task(8, 8), 16)
-        self.assertTrue(task.accept_magic_kwargs)
         self.assertIsInstance(task, type)
 
     def test_task(self):

+ 4 - 4
celery/tests/tasks/test_chord.py

@@ -205,7 +205,7 @@ class test_chord(ChordCase):
         m = Mock()
         m.app.conf.CELERY_ALWAYS_EAGER = False
         m.AsyncResult = AsyncResult
-        prev, chord._type = chord._type, m
+        prev, chord.run = chord.run, m
         try:
             x = chord(self.add.s(i, i) for i in range(10))
             body = self.add.s(2)
@@ -214,9 +214,9 @@ class test_chord(ChordCase):
             # does not modify original signature
             with self.assertRaises(KeyError):
                 body.options['task_id']
-            self.assertTrue(chord._type.called)
+            self.assertTrue(chord.run.called)
         finally:
-            chord._type = prev
+            chord.run = prev
 
 
 class test_Chord_task(ChordCase):
@@ -227,7 +227,7 @@ class test_Chord_task(ChordCase):
         self.app.backend.cleanup.__name__ = 'cleanup'
         Chord = self.app.tasks['celery.chord']
 
-        body = dict()
+        body = self.add.signature()
         Chord(group(self.add.signature((i, i)) for i in range(5)), body)
         Chord([self.add.signature((j, j)) for j in range(5)], body)
         self.assertEqual(self.app.backend.apply_chord.call_count, 2)

+ 0 - 4
celery/tests/tasks/test_tasks.py

@@ -363,10 +363,6 @@ class test_tasks(TasksCase):
         self.mytask.app.Task._app = None
         self.assertIn('unbound', repr(self.mytask.app.Task, ))
 
-    def test_bind_no_magic_kwargs(self):
-        self.mytask.accept_magic_kwargs = None
-        self.mytask.bind(self.mytask.app)
-
     def test_annotate(self):
         with patch('celery.app.task.resolve_all_annotations') as anno:
             anno.return_value = [{'FOO': 'BAR'}]

+ 7 - 4
celery/tests/tasks/test_trace.py

@@ -6,7 +6,7 @@ from celery import states
 from celery.exceptions import Ignore, Retry
 from celery.app.trace import (
     TraceInfo,
-    eager_trace_task,
+    build_tracer,
     trace_task,
     setup_worker_optimizations,
     reset_worker_optimizations,
@@ -14,9 +14,12 @@ from celery.app.trace import (
 from celery.tests.case import AppCase, Mock, patch
 
 
-def trace(app, task, args=(), kwargs={}, propagate=False, **opts):
-    return eager_trace_task(task, 'id-1', args, kwargs,
-                            propagate=propagate, app=app, **opts)
+def trace(app, task, args=(), kwargs={},
+          propagate=False, eager=True, request=None, **opts):
+    t = build_tracer(task.name, task,
+                     eager=eager, propagate=propagate, app=app, **opts)
+    ret = t('id-1', args, kwargs, request)
+    return ret.retval, ret.info
 
 
 class TraceCase(AppCase):

+ 7 - 22
celery/tests/worker/test_control.py

@@ -16,12 +16,12 @@ from celery.worker import WorkController as _WC
 from celery.worker import consumer
 from celery.worker import control
 from celery.worker import state as worker_state
-from celery.worker.job import Request
+from celery.worker.request import Request
 from celery.worker.state import revoked
 from celery.worker.control import Panel
 from celery.worker.pidbox import Pidbox, gPidbox
 
-from celery.tests.case import AppCase, Mock, call, patch
+from celery.tests.case import AppCase, Mock, TaskMessage, call, patch
 
 hostname = socket.gethostname()
 
@@ -250,12 +250,7 @@ class test_ControlPanel(AppCase):
         self.panel.handle('report')
 
     def test_active(self):
-        r = Request({
-            'task': self.mytask.name,
-            'id': 'do re mi',
-            'args': (),
-            'kwargs': {},
-        }, app=self.app)
+        r = Request(TaskMessage(self.mytask.name, 'do re mi'), app=self.app)
         worker_state.active_requests.add(r)
         try:
             self.assertTrue(self.panel.handle('dump_active'))
@@ -347,12 +342,7 @@ class test_ControlPanel(AppCase):
         consumer = Consumer(self.app)
         panel = self.create_panel(consumer=consumer)
         self.assertFalse(panel.handle('dump_schedule'))
-        r = Request({
-            'task': self.mytask.name,
-            'id': 'CAFEBABE',
-            'args': (),
-            'kwargs': {},
-        }, app=self.app)
+        r = Request(TaskMessage(self.mytask.name, 'CAFEBABE'), app=self.app)
         consumer.timer.schedule.enter_at(
             consumer.timer.Entry(lambda x: x, (r, )),
             datetime.now() + timedelta(seconds=10))
@@ -363,19 +353,14 @@ class test_ControlPanel(AppCase):
 
     def test_dump_reserved(self):
         consumer = Consumer(self.app)
-        worker_state.reserved_requests.add(Request({
-            'task': self.mytask.name,
-            'id': uuid(),
-            'args': (2, 2),
-            'kwargs': {},
-        }, app=self.app))
+        worker_state.reserved_requests.add(
+            Request(TaskMessage(self.mytask.name, args=(2, 2)), app=self.app),
+        )
         try:
             panel = self.create_panel(consumer=consumer)
             response = panel.handle('dump_reserved', {'safe': True})
             self.assertDictContainsSubset(
                 {'name': self.mytask.name,
-                 'args': (2, 2),
-                 'kwargs': {},
                  'hostname': socket.gethostname()},
                 response[0],
             )

+ 21 - 22
celery/tests/worker/test_loops.py

@@ -11,7 +11,7 @@ from celery.worker import state
 from celery.worker.consumer import Consumer
 from celery.worker.loops import asynloop, synloop
 
-from celery.tests.case import AppCase, Mock, body_from_sig
+from celery.tests.case import AppCase, Mock, task_message_from_sig
 
 
 class X(object):
@@ -107,7 +107,7 @@ def get_task_callback(*args, **kwargs):
     x = X(*args, **kwargs)
     x.blueprint.state = CLOSE
     asynloop(*x.args)
-    return x, x.consumer.callbacks[0]
+    return x, x.consumer.on_message
 
 
 class test_asynloop(AppCase):
@@ -132,45 +132,44 @@ class test_asynloop(AppCase):
 
     def task_context(self, sig, **kwargs):
         x, on_task = get_task_callback(self.app, **kwargs)
-        body = body_from_sig(self.app, sig)
-        message = Mock()
-        strategy = x.obj.strategies[sig.task] = Mock()
-        return x, on_task, body, message, strategy
+        message = task_message_from_sig(self.app, sig)
+        strategy = x.obj.strategies[sig.task] = Mock(name='strategy')
+        return x, on_task, message, strategy
 
     def test_on_task_received(self):
-        _, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2))
-        on_task(body, msg)
+        _, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
+        on_task(msg)
         strategy.assert_called_with(
-            msg, body, msg.ack_log_error, msg.reject_log_error, [],
+            msg, None, msg.ack_log_error, msg.reject_log_error, [],
         )
 
     def test_on_task_received_executes_on_task_message(self):
         cbs = [Mock(), Mock(), Mock()]
-        _, on_task, body, msg, strategy = self.task_context(
+        _, on_task, msg, strategy = self.task_context(
             self.add.s(2, 2), on_task_message=cbs,
         )
-        on_task(body, msg)
+        on_task(msg)
         strategy.assert_called_with(
-            msg, body, msg.ack_log_error, msg.reject_log_error, cbs,
+            msg, None, msg.ack_log_error, msg.reject_log_error, cbs,
         )
 
     def test_on_task_message_missing_name(self):
-        x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2))
-        body.pop('task')
-        on_task(body, msg)
-        x.on_unknown_message.assert_called_with(body, msg)
+        x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
+        msg.headers.pop('task')
+        on_task(msg)
+        x.on_unknown_message.assert_called_with(((2, 2), {}), msg)
 
     def test_on_task_not_registered(self):
-        x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2))
+        x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
         exc = strategy.side_effect = KeyError(self.add.name)
-        on_task(body, msg)
-        x.on_unknown_task.assert_called_with(body, msg, exc)
+        on_task(msg)
+        x.on_invalid_task.assert_called_with(None, msg, exc)
 
     def test_on_task_InvalidTaskError(self):
-        x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2))
+        x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
         exc = strategy.side_effect = InvalidTaskError()
-        on_task(body, msg)
-        x.on_invalid_task.assert_called_with(body, msg, exc)
+        on_task(msg)
+        x.on_invalid_task.assert_called_with(None, msg, exc)
 
     def test_should_terminate(self):
         x = X(self.app)

+ 156 - 260
celery/tests/worker/test_request.py

@@ -1,7 +1,7 @@
 # -*- coding: utf-8 -*-
 from __future__ import absolute_import, unicode_literals
 
-import anyjson
+import numbers
 import os
 import signal
 import socket
@@ -10,7 +10,6 @@ import sys
 from datetime import datetime, timedelta
 
 from billiard.einfo import ExceptionInfo
-from kombu.transport.base import Message
 from kombu.utils.encoding import from_utf8, default_encode
 
 from celery import states
@@ -27,16 +26,17 @@ from celery.concurrency.base import BasePool
 from celery.exceptions import (
     Ignore,
     InvalidTaskError,
+    Reject,
     Retry,
     TaskRevokedError,
     Terminated,
     WorkerLostError,
 )
-from celery.five import keys, monotonic
+from celery.five import monotonic
 from celery.signals import task_revoked
 from celery.utils import uuid
-from celery.worker import job as module
-from celery.worker.job import Request, logger as req_logger
+from celery.worker import request as module
+from celery.worker.request import Request, logger as req_logger
 from celery.worker.state import revoked
 
 from celery.tests.case import (
@@ -44,8 +44,9 @@ from celery.tests.case import (
     Case,
     Mock,
     SkipTest,
+    TaskMessage,
     assert_signal_called,
-    body_from_sig,
+    task_message_from_sig,
     patch,
 )
 
@@ -85,7 +86,7 @@ def jail(app, task_id, name, args, kwargs):
     task.__trace__ = None  # rebuild
     return trace_task(
         task, task_id, args, kwargs, request=request, eager=False, app=app,
-    )
+    ).retval
 
 
 class test_default_encode(AppCase):
@@ -138,7 +139,7 @@ class test_trace_task(AppCase):
             raise KeyError(i)
         self.mytask_raising = mytask_raising
 
-    @patch('celery.app.trace._logger')
+    @patch('celery.app.trace.logger')
     def test_process_cleanup_fails(self, _logger):
         self.mytask.backend = Mock()
         self.mytask.backend.process_cleanup = Mock(side_effect=KeyError())
@@ -227,9 +228,10 @@ class test_Request(AppCase):
 
     def get_request(self, sig, Request=Request, **kwargs):
         return Request(
-            body_from_sig(self.app, sig),
-            on_ack=Mock(),
-            eventer=Mock(),
+            task_message_from_sig(self.app, sig),
+            on_ack=Mock(name='on_ack'),
+            on_reject=Mock(name='on_reject'),
+            eventer=Mock(name='eventer'),
             app=self.app,
             connection_errors=(socket.error, ),
             task=sig.type,
@@ -245,8 +247,9 @@ class test_Request(AppCase):
             self.get_request(self.add.s(2, 2).set(expires='12345'))
 
     def test_valid_expires_with_utc_makes_aware(self):
-        with patch('celery.worker.job.maybe_make_aware') as mma:
-            self.get_request(self.add.s(2, 2).set(expires=10))
+        with patch('celery.worker.request.maybe_make_aware') as mma:
+            self.get_request(self.add.s(2, 2).set(expires=10),
+                             maybe_make_aware=mma)
             self.assertTrue(mma.called)
 
     def test_maybe_expire_when_expires_is_None(self):
@@ -273,7 +276,7 @@ class test_Request(AppCase):
             uuid=req.id, terminated=True, signum='9', expired=False,
         )
 
-    def test_log_error_propagates_MemoryError(self):
+    def test_on_failure_propagates_MemoryError(self):
         einfo = None
         try:
             raise MemoryError()
@@ -282,9 +285,9 @@ class test_Request(AppCase):
         self.assertIsNotNone(einfo)
         req = self.get_request(self.add.s(2, 2))
         with self.assertRaises(MemoryError):
-            req._log_error(einfo)
+            req.on_failure(einfo)
 
-    def test_log_error_when_Ignore(self):
+    def test_on_failure_Ignore_acknowledges(self):
         einfo = None
         try:
             raise Ignore()
@@ -292,48 +295,55 @@ class test_Request(AppCase):
             einfo = ExceptionInfo(internal=True)
         self.assertIsNotNone(einfo)
         req = self.get_request(self.add.s(2, 2))
-        req._log_error(einfo)
+        req.on_failure(einfo)
         req.on_ack.assert_called_with(req_logger, req.connection_errors)
 
+    def test_on_failure_Reject_rejects(self):
+        einfo = None
+        try:
+            raise Reject()
+        except Reject:
+            einfo = ExceptionInfo(internal=True)
+        self.assertIsNotNone(einfo)
+        req = self.get_request(self.add.s(2, 2))
+        req.on_failure(einfo)
+        req.on_reject.assert_called_with(
+            req_logger, req.connection_errors, False,
+        )
+
+    def test_on_failure_Reject_rejects_with_requeue(self):
+        einfo = None
+        try:
+            raise Reject(requeue=True)
+        except Reject:
+            einfo = ExceptionInfo(internal=True)
+        self.assertIsNotNone(einfo)
+        req = self.get_request(self.add.s(2, 2))
+        req.on_failure(einfo)
+        req.on_reject.assert_called_with(
+            req_logger, req.connection_errors, True,
+        )
+
     def test_tzlocal_is_cached(self):
         req = self.get_request(self.add.s(2, 2))
         req._tzlocal = 'foo'
         self.assertEqual(req.tzlocal, 'foo')
 
-    def test_execute_magic_kwargs(self):
-        task = self.add.s(2, 2)
-        task.freeze()
-        req = self.get_request(task)
-        self.add.accept_magic_kwargs = True
-        pool = Mock()
-        req.execute_using_pool(pool)
-        self.assertTrue(pool.apply_async.called)
-        args = pool.apply_async.call_args[1]['args']
-        self.assertEqual(args[0], task.task)
-        self.assertEqual(args[1], task.id)
-        self.assertEqual(args[2], task.args)
-        kwargs = args[3]
-        self.assertEqual(kwargs.get('task_name'), task.task)
-
-    def xRequest(self, body=None, **kwargs):
-        body = dict({'task': self.mytask.name,
-                     'id': uuid(),
-                     'args': [1],
-                     'kwargs': {'f': 'x'}}, **body or {})
-        return Request(body, app=self.app, **kwargs)
+    def xRequest(self, name=None, id=None, args=None, kwargs=None,
+                 on_ack=None, on_reject=None, **head):
+        args = [1] if args is None else args
+        kwargs = {'f': 'x'} if kwargs is None else kwargs
+        on_ack = on_ack or Mock(name='on_ack')
+        on_reject = on_reject or Mock(name='on_reject')
+        message = TaskMessage(
+            name or self.mytask.name, id, args=args, kwargs=kwargs, **head
+        )
+        return Request(message, app=self.app,
+                       on_ack=on_ack, on_reject=on_reject)
 
     def test_task_wrapper_repr(self):
         self.assertTrue(repr(self.xRequest()))
 
-    @patch('celery.worker.job.kwdict')
-    def test_kwdict(self, kwdict):
-        prev, module.NEEDS_KWDICT = module.NEEDS_KWDICT, True
-        try:
-            self.xRequest()
-            self.assertTrue(kwdict.called)
-        finally:
-            module.NEEDS_KWDICT = prev
-
     def test_sets_store_errors(self):
         self.mytask.ignore_result = True
         job = self.xRequest()
@@ -350,12 +360,7 @@ class test_Request(AppCase):
         self.assertIn('task-frobulated', job.eventer.sent)
 
     def test_on_retry(self):
-        job = Request({
-            'task': self.mytask.name,
-            'id': uuid(),
-            'args': [1],
-            'kwargs': {'f': 'x'},
-        }, app=self.app)
+        job = self.get_request(self.mytask.s(1, f='x'))
         job.eventer = MockEventDispatcher()
         try:
             raise Retry('foo', KeyError('moofoobar'))
@@ -372,12 +377,7 @@ class test_Request(AppCase):
             job.on_failure(einfo)
 
     def test_compat_properties(self):
-        job = Request({
-            'task': self.mytask.name,
-            'id': uuid(),
-            'args': [1],
-            'kwargs': {'f': 'x'},
-        }, app=self.app)
+        job = self.xRequest()
         self.assertEqual(job.task_id, job.id)
         self.assertEqual(job.task_name, job.name)
         job.task_id = 'ID'
@@ -388,12 +388,7 @@ class test_Request(AppCase):
     def test_terminate__task_started(self):
         pool = Mock()
         signum = signal.SIGTERM
-        job = Request({
-            'task': self.mytask.name,
-            'id': uuid(),
-            'args': [1],
-            'kwrgs': {'f': 'x'},
-        }, app=self.app)
+        job = self.get_request(self.mytask.s(1, f='x'))
         with assert_signal_called(
                 task_revoked, sender=job.task, request=job,
                 terminated=True, expired=False, signum=signum):
@@ -404,12 +399,7 @@ class test_Request(AppCase):
 
     def test_terminate__task_reserved(self):
         pool = Mock()
-        job = Request({
-            'task': self.mytask.name,
-            'id': uuid(),
-            'args': [1],
-            'kwargs': {'f': 'x'},
-        }, app=self.app)
+        job = self.get_request(self.mytask.s(1, f='x'))
         job.time_start = None
         job.terminate(pool, signal='TERM')
         self.assertFalse(pool.terminate_job.called)
@@ -417,13 +407,9 @@ class test_Request(AppCase):
         job.terminate(pool, signal='TERM')
 
     def test_revoked_expires_expired(self):
-        job = Request({
-            'task': self.mytask.name,
-            'id': uuid(),
-            'args': [1],
-            'kwargs': {'f': 'x'},
-            'expires': datetime.utcnow() - timedelta(days=1),
-        }, app=self.app)
+        job = self.get_request(self.mytask.s(1, f='x').set(
+            expires=datetime.utcnow() - timedelta(days=1)
+        ))
         with assert_signal_called(
                 task_revoked, sender=job.task, request=job,
                 terminated=False, expired=True, signum=None):
@@ -435,9 +421,9 @@ class test_Request(AppCase):
             )
 
     def test_revoked_expires_not_expired(self):
-        job = self.xRequest({
-            'expires': datetime.utcnow() + timedelta(days=1),
-        })
+        job = self.xRequest(
+            expires=datetime.utcnow() + timedelta(days=1),
+        )
         job.revoked()
         self.assertNotIn(job.id, revoked)
         self.assertNotEqual(
@@ -447,47 +433,15 @@ class test_Request(AppCase):
 
     def test_revoked_expires_ignore_result(self):
         self.mytask.ignore_result = True
-        job = self.xRequest({
-            'expires': datetime.utcnow() - timedelta(days=1),
-        })
+        job = self.xRequest(
+            expires=datetime.utcnow() - timedelta(days=1),
+        )
         job.revoked()
         self.assertIn(job.id, revoked)
         self.assertNotEqual(
             self.mytask.backend.get_status(job.id), states.REVOKED,
         )
 
-    def test_send_email(self):
-        app = self.app
-        mail_sent = [False]
-
-        def mock_mail_admins(*args, **kwargs):
-            mail_sent[0] = True
-
-        def get_ei():
-            try:
-                raise KeyError('moofoobar')
-            except:
-                return ExceptionInfo()
-
-        app.mail_admins = mock_mail_admins
-        self.mytask.send_error_emails = True
-        job = self.xRequest()
-        einfo = get_ei()
-        job.on_failure(einfo)
-        self.assertTrue(mail_sent[0])
-
-        einfo = get_ei()
-        mail_sent[0] = False
-        self.mytask.send_error_emails = False
-        job.on_failure(einfo)
-        self.assertFalse(mail_sent[0])
-
-        einfo = get_ei()
-        mail_sent[0] = False
-        self.mytask.send_error_emails = True
-        job.on_failure(einfo)
-        self.assertTrue(mail_sent[0])
-
     def test_already_revoked(self):
         job = self.xRequest()
         job._already_revoked = True
@@ -510,10 +464,10 @@ class test_Request(AppCase):
 
     def test_execute_acks_late(self):
         self.mytask_raising.acks_late = True
-        job = self.xRequest({
-            'task': self.mytask_raising.name,
-            'kwargs': {},
-        })
+        job = self.xRequest(
+            name=self.mytask_raising.name,
+            kwargs={},
+        )
         job.execute()
         self.assertTrue(job.acknowledged)
         job.execute()
@@ -555,10 +509,10 @@ class test_Request(AppCase):
     def test_on_success_acks_early(self):
         job = self.xRequest()
         job.time_start = 1
-        job.on_success(42)
+        job.on_success((0, 42, 0.001))
         prev, module._does_info = module._does_info, False
         try:
-            job.on_success(42)
+            job.on_success((0, 42, 0.001))
             self.assertFalse(job.acknowledged)
         finally:
             module._does_info = prev
@@ -570,7 +524,7 @@ class test_Request(AppCase):
             try:
                 raise SystemExit()
             except SystemExit:
-                job.on_success(ExceptionInfo())
+                job.on_success((1, ExceptionInfo(), 0.01))
             else:
                 assert False
 
@@ -579,7 +533,7 @@ class test_Request(AppCase):
         job.time_start = 1
         job.eventer = Mock()
         job.eventer.send = Mock()
-        job.on_success(42)
+        job.on_success((0, 42, 0.001))
         self.assertTrue(job.eventer.send.called)
 
     def test_on_success_when_failure(self):
@@ -589,14 +543,14 @@ class test_Request(AppCase):
         try:
             raise KeyError('foo')
         except Exception:
-            job.on_success(ExceptionInfo())
+            job.on_success((1, ExceptionInfo(), 0.001))
             self.assertTrue(job.on_failure.called)
 
     def test_on_success_acks_late(self):
         job = self.xRequest()
         job.time_start = 1
         self.mytask.acks_late = True
-        job.on_success(42)
+        job.on_success((0, 42, 0.001))
         self.assertTrue(job.acknowledged)
 
     def test_on_failure_WorkerLostError(self):
@@ -634,12 +588,13 @@ class test_Request(AppCase):
             self.assertTrue(job.acknowledged)
 
     def test_from_message_invalid_kwargs(self):
-        body = dict(task=self.mytask.name, id=1, args=(), kwargs='foo')
+        m = TaskMessage(self.mytask.name, args=(), kwargs='foo')
+        req = Request(m, app=self.app)
         with self.assertRaises(InvalidTaskError):
-            Request(body, message=None, app=self.app)
+            raise req.execute().exception
 
-    @patch('celery.worker.job.error')
-    @patch('celery.worker.job.warn')
+    @patch('celery.worker.request.error')
+    @patch('celery.worker.request.warn')
     def test_on_timeout(self, warn, error):
 
         job = self.xRequest()
@@ -662,37 +617,60 @@ class test_Request(AppCase):
         from celery.app import trace
         setup_worker_optimizations(self.app)
         self.assertIs(trace.trace_task_ret, trace._fast_trace_task)
+        tid = uuid()
+        message = TaskMessage(self.mytask.name, tid, args=[4])
         try:
             self.mytask.__trace__ = build_tracer(
                 self.mytask.name, self.mytask, self.app.loader, 'test',
                 app=self.app,
             )
-            res = trace.trace_task_ret(self.mytask.name, uuid(), [4], {})
-            self.assertEqual(res, 4 ** 4)
+            failed, res, runtime = trace.trace_task_ret(
+                self.mytask.name, tid, message.headers, message.body,
+                message.content_type, message.content_encoding)
+            self.assertFalse(failed)
+            self.assertEqual(res, repr(4 ** 4))
+            self.assertTrue(runtime)
+            self.assertIsInstance(runtime, numbers.Real)
         finally:
             reset_worker_optimizations()
             self.assertIs(trace.trace_task_ret, trace._trace_task_ret)
         delattr(self.mytask, '__trace__')
-        res = trace.trace_task_ret(
-            self.mytask.name, uuid(), [4], {}, app=self.app,
+        failed, res, runtime = trace.trace_task_ret(
+            self.mytask.name, tid, message.headers, message.body,
+            message.content_type, message.content_encoding, app=self.app,
         )
-        self.assertEqual(res, 4 ** 4)
+        self.assertFalse(failed)
+        self.assertEqual(res, repr(4 ** 4))
+        self.assertTrue(runtime)
+        self.assertIsInstance(runtime, numbers.Real)
 
     def test_trace_task_ret(self):
         self.mytask.__trace__ = build_tracer(
             self.mytask.name, self.mytask, self.app.loader, 'test',
             app=self.app,
         )
-        res = _trace_task_ret(self.mytask.name, uuid(), [4], {}, app=self.app)
-        self.assertEqual(res, 4 ** 4)
+        tid = uuid()
+        message = TaskMessage(self.mytask.name, tid, args=[4])
+        _, R, _ = _trace_task_ret(
+            self.mytask.name, tid, message.headers,
+            message.body, message.content_type,
+            message.content_encoding, app=self.app,
+        )
+        self.assertEqual(R, repr(4 ** 4))
 
     def test_trace_task_ret__no_trace(self):
         try:
             delattr(self.mytask, '__trace__')
         except AttributeError:
             pass
-        res = _trace_task_ret(self.mytask.name, uuid(), [4], {}, app=self.app)
-        self.assertEqual(res, 4 ** 4)
+        tid = uuid()
+        message = TaskMessage(self.mytask.name, tid, args=[4])
+        _, R, _ = _trace_task_ret(
+            self.mytask.name, tid, message.headers,
+            message.body, message.content_type,
+            message.content_encoding, app=self.app,
+        )
+        self.assertEqual(R, repr(4 ** 4))
 
     def test_trace_catches_exception(self):
 
@@ -705,7 +683,7 @@ class test_Request(AppCase):
 
         with self.assertWarnsRegex(RuntimeWarning,
                                    r'Exception raised outside'):
-            res = trace_task(raising, uuid(), [], {}, app=self.app)
+            res = trace_task(raising, uuid(), [], {}, app=self.app)[0]
             self.assertIsInstance(res, ExceptionInfo)
 
     def test_worker_task_trace_handle_retry(self):
@@ -749,71 +727,39 @@ class test_Request(AppCase):
         finally:
             self.mytask.pop_request()
 
-    def test_task_wrapper_mail_attrs(self):
-        job = self.xRequest({'args': [], 'kwargs': {}})
-        x = job.success_msg % {
-            'name': job.name,
-            'id': job.id,
-            'return_value': 10,
-            'runtime': 0.3641,
-        }
-        self.assertTrue(x)
-        x = job.error_msg % {
-            'name': job.name,
-            'id': job.id,
-            'exc': 'FOOBARBAZ',
-            'description': 'raised unexpected',
-            'traceback': 'foobarbaz',
-        }
-        self.assertTrue(x)
-
     def test_from_message(self):
         us = 'æØåveéðƒeæ'
-        body = {'task': self.mytask.name, 'id': uuid(),
-                'args': [2], 'kwargs': {us: 'bar'}}
-        m = Message(None, body=anyjson.dumps(body), backend='foo',
-                    content_type='application/json',
-                    content_encoding='utf-8')
-        job = Request(m.decode(), message=m, app=self.app)
+        tid = uuid()
+        m = TaskMessage(self.mytask.name, tid, args=[2], kwargs={us: 'bar'})
+        job = Request(m, app=self.app)
         self.assertIsInstance(job, Request)
-        self.assertEqual(job.name, body['task'])
-        self.assertEqual(job.id, body['id'])
-        self.assertEqual(job.args, body['args'])
-        us = from_utf8(us)
-        if sys.version_info < (2, 6):
-            self.assertEqual(next(keys(job.kwargs)), us)
-            self.assertIsInstance(next(keys(job.kwargs)), str)
+        self.assertEqual(job.name, self.mytask.name)
+        self.assertEqual(job.id, tid)
+        self.assertIs(job.message, m)
 
     def test_from_message_empty_args(self):
-        body = {'task': self.mytask.name, 'id': uuid()}
-        m = Message(None, body=anyjson.dumps(body), backend='foo',
-                    content_type='application/json',
-                    content_encoding='utf-8')
-        job = Request(m.decode(), message=m, app=self.app)
+        tid = uuid()
+        m = TaskMessage(self.mytask.name, tid, args=[], kwargs={})
+        job = Request(m, app=self.app)
         self.assertIsInstance(job, Request)
-        self.assertEqual(job.args, [])
-        self.assertEqual(job.kwargs, {})
 
     def test_from_message_missing_required_fields(self):
-        body = {}
-        m = Message(None, body=anyjson.dumps(body), backend='foo',
-                    content_type='application/json',
-                    content_encoding='utf-8')
+        m = TaskMessage(self.mytask.name)
+        m.headers.clear()
         with self.assertRaises(KeyError):
-            Request(m.decode(), message=m, app=self.app)
+            Request(m, app=self.app)
 
     def test_from_message_nonexistant_task(self):
-        body = {'task': 'cu.mytask.doesnotexist', 'id': uuid(),
-                'args': [2], 'kwargs': {'æØåveéðƒeæ': 'bar'}}
-        m = Message(None, body=anyjson.dumps(body), backend='foo',
-                    content_type='application/json',
-                    content_encoding='utf-8')
+        m = TaskMessage(
+            'cu.mytask.doesnotexist',
+            args=[2], kwargs={'æØåveéðƒeæ': 'bar'},
+        )
         with self.assertRaises(KeyError):
-            Request(m.decode(), message=m, app=self.app)
+            Request(m, app=self.app)
 
     def test_execute(self):
         tid = uuid()
-        job = self.xRequest({'id': tid, 'args': [4], 'kwargs': {}})
+        job = self.xRequest(id=tid, args=[4], kwargs={})
         self.assertEqual(job.execute(), 256)
         meta = self.mytask.backend.get_task_meta(tid)
         self.assertEqual(meta['status'], states.SUCCESS)
@@ -826,38 +772,17 @@ class test_Request(AppCase):
             return i ** i
 
         tid = uuid()
-        job = self.xRequest({
-            'task': mytask_no_kwargs.name,
-            'id': tid,
-            'args': [4],
-            'kwargs': {},
-        })
+        job = self.xRequest(
+            name=mytask_no_kwargs.name,
+            id=tid,
+            args=[4],
+            kwargs={},
+        )
         self.assertEqual(job.execute(), 256)
         meta = mytask_no_kwargs.backend.get_task_meta(tid)
         self.assertEqual(meta['result'], 256)
         self.assertEqual(meta['status'], states.SUCCESS)
 
-    def test_execute_success_some_kwargs(self):
-        scratch = {'task_id': None}
-
-        @self.app.task(shared=False, accept_magic_kwargs=True)
-        def mytask_some_kwargs(i, task_id):
-            scratch['task_id'] = task_id
-            return i ** i
-
-        tid = uuid()
-        job = self.xRequest({
-            'task': mytask_some_kwargs.name,
-            'id': tid,
-            'args': [4],
-            'kwargs': {},
-        })
-        self.assertEqual(job.execute(), 256)
-        meta = mytask_some_kwargs.backend.get_task_meta(tid)
-        self.assertEqual(scratch.get('task_id'), tid)
-        self.assertEqual(meta['result'], 256)
-        self.assertEqual(meta['status'], states.SUCCESS)
-
     def test_execute_ack(self):
         scratch = {'ACK': False}
 
@@ -865,7 +790,7 @@ class test_Request(AppCase):
             scratch['ACK'] = True
 
         tid = uuid()
-        job = self.xRequest({'id': tid, 'args': [4]}, on_ack=on_ack)
+        job = self.xRequest(id=tid, args=[4], on_ack=on_ack)
         self.assertEqual(job.execute(), 256)
         meta = self.mytask.backend.get_task_meta(tid)
         self.assertTrue(scratch['ACK'])
@@ -874,12 +799,13 @@ class test_Request(AppCase):
 
     def test_execute_fail(self):
         tid = uuid()
-        job = self.xRequest({
-            'task': self.mytask_raising.name,
-            'id': tid,
-            'args': [4],
-            'kwargs': {},
-        })
+        job = self.xRequest(
+            name=self.mytask_raising.name,
+            id=tid,
+            args=[4],
+            kwargs={},
+        )
+        print(job.execute())
         self.assertIsInstance(job.execute(), ExceptionInfo)
         meta = self.mytask_raising.backend.get_task_meta(tid)
         self.assertEqual(meta['status'], states.FAILURE)
@@ -887,7 +813,7 @@ class test_Request(AppCase):
 
     def test_execute_using_pool(self):
         tid = uuid()
-        job = self.xRequest({'id': tid, 'args': [4]})
+        job = self.xRequest(id=tid, args=[4])
 
         class MockPool(BasePool):
             target = None
@@ -908,48 +834,18 @@ class test_Request(AppCase):
         self.assertTrue(p.target)
         self.assertEqual(p.args[0], self.mytask.name)
         self.assertEqual(p.args[1], tid)
-        self.assertEqual(p.args[2], [4])
-        self.assertIn('f', p.args[3])
-        self.assertIn([4], p.args)
+        self.assertEqual(p.args[3], job.message.body)
 
-        job.task.accept_magic_kwargs = False
-        job.execute_using_pool(p)
-
-    def test_default_kwargs(self):
-        self.maxDiff = 3000
-        tid = uuid()
-        job = self.xRequest({'id': tid, 'args': [4]})
-        self.assertDictEqual(
-            job.extend_with_default_kwargs(), {
-                'f': 'x',
-                'logfile': None,
-                'loglevel': None,
-                'task_id': job.id,
-                'task_retries': 0,
-                'task_is_eager': False,
-                'delivery_info': {
-                    'exchange': None,
-                    'routing_key': None,
-                    'priority': 0,
-                    'redelivered': False,
-                },
-                'task_name': job.name})
-
-    @patch('celery.worker.job.logger')
-    def _test_on_failure(self, exception, logger):
-        app = self.app
+    def _test_on_failure(self, exception):
         tid = uuid()
-        job = self.xRequest({'id': tid, 'args': [4]})
+        job = self.xRequest(id=tid, args=[4])
+        job.send_event = Mock(name='send_event')
         try:
             raise exception
         except Exception:
             exc_info = ExceptionInfo()
-            app.conf.CELERY_SEND_TASK_ERROR_EMAILS = True
             job.on_failure(exc_info)
-            self.assertTrue(logger.log.called)
-            context = logger.log.call_args[0][2]
-            self.assertEqual(self.mytask.name, context['name'])
-            self.assertIn(tid, context['id'])
+            self.assertTrue(job.send_event.called)
 
     def test_on_failure(self):
         self._test_on_failure(Exception('Inside unit tests'))

+ 8 - 9
celery/tests/worker/test_strategy.py

@@ -8,7 +8,7 @@ from kombu.utils.limits import TokenBucket
 from celery.worker import state
 from celery.utils.timeutils import rate
 
-from celery.tests.case import AppCase, Mock, patch, body_from_sig
+from celery.tests.case import AppCase, Mock, patch, task_message_from_sig
 
 
 class test_default_strategy(AppCase):
@@ -22,17 +22,16 @@ class test_default_strategy(AppCase):
 
     class Context(object):
 
-        def __init__(self, sig, s, reserved, consumer, message, body):
+        def __init__(self, sig, s, reserved, consumer, message):
             self.sig = sig
             self.s = s
             self.reserved = reserved
             self.consumer = consumer
             self.message = message
-            self.body = body
 
         def __call__(self, **kwargs):
             return self.s(
-                self.message, self.body,
+                self.message, None,
                 self.message.ack, self.message.reject, [], **kwargs
             )
 
@@ -71,15 +70,14 @@ class test_default_strategy(AppCase):
         if limit:
             bucket = TokenBucket(rate(limit), capacity=1)
             consumer.task_buckets[sig.task] = bucket
+        consumer.controller.state.revoked = set()
         consumer.disable_rate_limits = not rate_limits
         consumer.event_dispatcher.enabled = events
         s = sig.type.start_strategy(self.app, consumer, task_reserved=reserved)
         self.assertTrue(s)
 
-        message = Mock()
-        body = body_from_sig(self.app, sig, utc=utc)
-
-        yield self.Context(sig, s, reserved, consumer, message, body)
+        message = task_message_from_sig(self.app, sig, utc=utc)
+        yield self.Context(sig, s, reserved, consumer, message)
 
     def test_when_logging_disabled(self):
         with patch('celery.worker.strategy.logger') as logger:
@@ -129,9 +127,10 @@ class test_default_strategy(AppCase):
     def test_when_revoked(self):
         task = self.add.s(2, 2)
         task.freeze()
-        state.revoked.add(task.id)
         try:
             with self._context(task) as C:
+                C.consumer.controller.state.revoked.add(task.id)
+                state.revoked.add(task.id)
                 C()
                 with self.assertRaises(ValueError):
                     C.get_request()

+ 113 - 56
celery/tests/worker/test_worker.py

@@ -17,19 +17,21 @@ from celery.bootsteps import RUN, CLOSE, StartStopStep
 from celery.concurrency.base import BasePool
 from celery.datastructures import AttributeDict
 from celery.exceptions import (
-    WorkerShutdown, WorkerTerminate, TaskRevokedError,
+    WorkerShutdown, WorkerTerminate, TaskRevokedError, InvalidTaskError,
 )
 from celery.five import Empty, range, Queue as FastQueue
 from celery.utils import uuid
 from celery.worker import components
 from celery.worker import consumer
 from celery.worker.consumer import Consumer as __Consumer
-from celery.worker.job import Request
+from celery.worker.request import Request
 from celery.utils import worker_direct
 from celery.utils.serialization import pickle
 from celery.utils.timer2 import Timer
 
-from celery.tests.case import AppCase, Mock, SkipTest, patch, restore_logging
+from celery.tests.case import (
+    AppCase, Mock, SkipTest, TaskMessage, patch, restore_logging,
+)
 
 
 def MockStep(step=None):
@@ -123,6 +125,13 @@ def create_message(channel, **data):
     return m
 
 
+def create_task_message(channel, *args, **kwargs):
+    m = TaskMessage(*args, **kwargs)
+    m.channel = channel
+    m.delivery_info = {'consumer_tag': 'mock'}
+    return m
+
+
 class test_Consumer(AppCase):
 
     def setup(self):
@@ -144,7 +153,7 @@ class test_Consumer(AppCase):
         l.connection = Mock()
         l.connection.info.return_value = {'foo': 'bar'}
         l.controller = l.app.WorkController()
-        l.controller.pool = Mock()
+        l.pool = l.controller.pool = Mock()
         l.controller.pool.info.return_value = [Mock(), Mock()]
         l.controller.consumer = l
         info = l.controller.stats()
@@ -158,6 +167,8 @@ class test_Consumer(AppCase):
 
     def test_connection(self):
         l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
 
         l.blueprint.start(l)
         self.assertIsInstance(l.connection, Connection)
@@ -207,32 +218,35 @@ class test_Consumer(AppCase):
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
         l.blueprint.state = RUN
         l.steps.pop()
-        backend = Mock()
-        m = create_message(backend, unknown={'baz': '!!!'})
+        channel = Mock()
+        m = create_message(channel, unknown={'baz': '!!!'})
         l.event_dispatcher = mock_event_dispatcher()
         l.node = MockNode()
 
         callback = self._get_on_message(l)
-        callback(m.decode(), m)
+        callback(m)
         self.assertTrue(warn.call_count)
 
     @patch('celery.worker.strategy.to_timestamp')
     def test_receive_message_eta_OverflowError(self, to_timestamp):
         to_timestamp.side_effect = OverflowError()
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.blueprint.state = RUN
         l.steps.pop()
-        m = create_message(Mock(), task=self.foo_task.name,
-                           args=('2, 2'),
-                           kwargs={},
-                           eta=datetime.now().isoformat())
+        m = create_task_message(
+            Mock(), self.foo_task.name,
+            args=('2, 2'), kwargs={},
+            eta=datetime.now().isoformat(),
+        )
         l.event_dispatcher = mock_event_dispatcher()
         l.node = MockNode()
         l.update_strategies()
         l.qos = Mock()
 
         callback = self._get_on_message(l)
-        callback(m.decode(), m)
+        callback(m)
         self.assertTrue(m.acknowledged)
 
     @patch('celery.worker.consumer.error')
@@ -241,13 +255,19 @@ class test_Consumer(AppCase):
         l.blueprint.state = RUN
         l.event_dispatcher = mock_event_dispatcher()
         l.steps.pop()
-        m = create_message(Mock(), task=self.foo_task.name,
-                           args=(1, 2), kwargs='foobarbaz', id=1)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
+        m = create_task_message(
+            Mock(), self.foo_task.name,
+            args=(1, 2), kwargs='foobarbaz', id=1)
         l.update_strategies()
         l.event_dispatcher = mock_event_dispatcher()
+        strat = l.strategies[self.foo_task.name] = Mock(name='strategy')
+        strat.side_effect = InvalidTaskError()
 
         callback = self._get_on_message(l)
-        callback(m.decode(), m)
+        callback(m)
+        self.assertTrue(error.called)
         self.assertIn('Received invalid task message', error.call_args[0][0])
 
     @patch('celery.worker.consumer.crit')
@@ -274,18 +294,22 @@ class test_Consumer(AppCase):
 
         with self.assertRaises(WorkerShutdown):
             l.loop(*l.loop_args())
-        self.assertTrue(l.task_consumer.register_callback.called)
-        return l.task_consumer.register_callback.call_args[0][0]
+        self.assertTrue(l.task_consumer.on_message)
+        return l.task_consumer.on_message
 
     def test_receieve_message(self):
         l = Consumer(self.buffer.put, timer=self.timer, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.blueprint.state = RUN
         l.event_dispatcher = mock_event_dispatcher()
-        m = create_message(Mock(), task=self.foo_task.name,
-                           args=[2, 4, 8], kwargs={})
+        m = create_task_message(
+            Mock(), self.foo_task.name,
+            args=[2, 4, 8], kwargs={},
+        )
         l.update_strategies()
         callback = self._get_on_message(l)
-        callback(m.decode(), m)
+        callback(m)
 
         in_bucket = self.buffer.get_nowait()
         self.assertIsInstance(in_bucket, Request)
@@ -306,6 +330,8 @@ class test_Consumer(AppCase):
 
         l = MockConsumer(self.buffer.put, timer=self.timer,
                          send_events=False, pool=BasePool(), app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.channel_errors = (KeyError, )
         with self.assertRaises(KeyError):
             l.start()
@@ -324,6 +350,8 @@ class test_Consumer(AppCase):
 
         l = MockConsumer(self.buffer.put, timer=self.timer,
                          send_events=False, pool=BasePool(), app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
 
         l.connection_errors = (KeyError, )
         self.assertRaises(SyntaxError, l.start)
@@ -406,6 +434,8 @@ class test_Consumer(AppCase):
     def test_apply_eta_task(self):
         from celery.worker import state
         l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.qos = QoS(None, 10)
 
         task = object()
@@ -417,10 +447,12 @@ class test_Consumer(AppCase):
 
     def test_receieve_message_eta_isoformat(self):
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.blueprint.state = RUN
         l.steps.pop()
-        m = create_message(
-            Mock(), task=self.foo_task.name,
+        m = create_task_message(
+            Mock(), self.foo_task.name,
             eta=(datetime.now() + timedelta(days=1)).isoformat(),
             args=[2, 4, 8], kwargs={},
         )
@@ -432,7 +464,7 @@ class test_Consumer(AppCase):
         l.enabled = False
         l.update_strategies()
         callback = self._get_on_message(l)
-        callback(m.decode(), m)
+        callback(m)
         l.timer.stop()
         l.timer.join(1)
 
@@ -469,27 +501,31 @@ class test_Consumer(AppCase):
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
         l.blueprint.state = RUN
         l.steps.pop()
-        backend = Mock()
+        channel = Mock()
         id = uuid()
-        t = create_message(backend, task=self.foo_task.name, args=[2, 4, 8],
-                           kwargs={}, id=id)
+        t = create_task_message(
+            channel, self.foo_task.name,
+            args=[2, 4, 8], kwargs={}, id=id,
+        )
         from celery.worker.state import revoked
         revoked.add(id)
 
         callback = self._get_on_message(l)
-        callback(t.decode(), t)
+        callback(t)
         self.assertTrue(self.buffer.empty())
 
     def test_receieve_message_not_registered(self):
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
         l.blueprint.state = RUN
         l.steps.pop()
-        backend = Mock()
-        m = create_message(backend, task='x.X.31x', args=[2, 4, 8], kwargs={})
+        channel = Mock(name='channel')
+        m = create_task_message(
+            channel, 'x.X.31x', args=[2, 4, 8], kwargs={},
+        )
 
         l.event_dispatcher = mock_event_dispatcher()
         callback = self._get_on_message(l)
-        self.assertFalse(callback(m.decode(), m))
+        self.assertFalse(callback(m))
         with self.assertRaises(Empty):
             self.buffer.get_nowait()
         self.assertTrue(self.timer.empty())
@@ -498,22 +534,28 @@ class test_Consumer(AppCase):
     @patch('celery.worker.consumer.logger')
     def test_receieve_message_ack_raises(self, logger, warn):
         l = Consumer(self.buffer.put, timer=self.timer, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.blueprint.state = RUN
-        backend = Mock()
-        m = create_message(backend, args=[2, 4, 8], kwargs={})
+        channel = Mock()
+        m = create_task_message(
+            channel, self.foo_task.name,
+            args=[2, 4, 8], kwargs={},
+        )
+        m.headers = None
 
         l.event_dispatcher = mock_event_dispatcher()
+        l.update_strategies()
         l.connection_errors = (socket.error, )
         m.reject = Mock()
         m.reject.side_effect = socket.error('foo')
         callback = self._get_on_message(l)
-        self.assertFalse(callback(m.decode(), m))
+        self.assertFalse(callback(m))
         self.assertTrue(warn.call_count)
         with self.assertRaises(Empty):
             self.buffer.get_nowait()
         self.assertTrue(self.timer.empty())
-        m.reject.assert_called_with(requeue=False)
-        self.assertTrue(logger.critical.call_count)
+        m.reject_log_error.assert_called_with(logger, l.connection_errors)
 
     def test_receive_message_eta(self):
         import sys
@@ -526,13 +568,15 @@ class test_Consumer(AppCase):
         pp('TEST RECEIVE MESSAGE ETA')
         pp('+CREATE MYKOMBUCONSUMER')
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         pp('-CREATE MYKOMBUCONSUMER')
         l.steps.pop()
         l.event_dispatcher = mock_event_dispatcher()
-        backend = Mock()
+        channel = Mock(name='channel')
         pp('+ CREATE MESSAGE')
-        m = create_message(
-            backend, task=self.foo_task.name,
+        m = create_task_message(
+            channel, self.foo_task.name,
             args=[2, 4, 8], kwargs={},
             eta=(datetime.now() + timedelta(days=1)).isoformat(),
         )
@@ -556,7 +600,7 @@ class test_Consumer(AppCase):
             callback = self._get_on_message(l)
             pp('- GET ON MESSAGE')
             pp('+ CALLBACK')
-            callback(m.decode(), m)
+            callback(m)
             pp('- CALLBACK')
         finally:
             pp('+ STOP TIMER')
@@ -708,6 +752,8 @@ class test_Consumer(AppCase):
         init_callback = Mock()
         l = _Consumer(self.buffer.put, timer=self.timer,
                       init_callback=init_callback, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.task_consumer = Mock()
         l.broadcast_consumer = Mock()
         l.qos = _QoS()
@@ -730,6 +776,8 @@ class test_Consumer(AppCase):
         init_callback.reset_mock()
         l = _Consumer(self.buffer.put, timer=self.timer, app=self.app,
                       send_events=False, init_callback=init_callback)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.qos = _QoS()
         l.task_consumer = Mock()
         l.broadcast_consumer = Mock()
@@ -741,8 +789,9 @@ class test_Consumer(AppCase):
 
     def test_reset_connection_with_no_node(self):
         l = Consumer(self.buffer.put, timer=self.timer, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.steps.pop()
-        self.assertEqual(None, l.pool)
         l.blueprint.start(l)
 
 
@@ -925,10 +974,12 @@ class test_WorkController(AppCase):
     def test_process_task(self):
         worker = self.worker
         worker.pool = Mock()
-        backend = Mock()
-        m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10],
-                           kwargs={})
-        task = Request(m.decode(), message=m, app=self.app)
+        channel = Mock()
+        m = create_task_message(
+            channel, self.foo_task.name,
+            args=[4, 8, 10], kwargs={},
+        )
+        task = Request(m, app=self.app)
         worker._process_task(task)
         self.assertEqual(worker.pool.apply_async.call_count, 1)
         worker.pool.stop()
@@ -937,10 +988,12 @@ class test_WorkController(AppCase):
         worker = self.worker
         worker.pool = Mock()
         worker.pool.apply_async.side_effect = KeyboardInterrupt('Ctrl+C')
-        backend = Mock()
-        m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10],
-                           kwargs={})
-        task = Request(m.decode(), message=m, app=self.app)
+        channel = Mock()
+        m = create_task_message(
+            channel, self.foo_task.name,
+            args=[4, 8, 10], kwargs={},
+        )
+        task = Request(m, app=self.app)
         worker.steps = []
         worker.blueprint.state = RUN
         with self.assertRaises(KeyboardInterrupt):
@@ -950,10 +1003,12 @@ class test_WorkController(AppCase):
         worker = self.worker
         worker.pool = Mock()
         worker.pool.apply_async.side_effect = WorkerTerminate()
-        backend = Mock()
-        m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10],
-                           kwargs={})
-        task = Request(m.decode(), message=m, app=self.app)
+        channel = Mock()
+        m = create_task_message(
+            channel, self.foo_task.name,
+            args=[4, 8, 10], kwargs={},
+        )
+        task = Request(m, app=self.app)
         worker.steps = []
         worker.blueprint.state = RUN
         with self.assertRaises(SystemExit):
@@ -963,10 +1018,12 @@ class test_WorkController(AppCase):
         worker = self.worker
         worker.pool = Mock()
         worker.pool.apply_async.side_effect = KeyError('some exception')
-        backend = Mock()
-        m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10],
-                           kwargs={})
-        task = Request(m.decode(), message=m, app=self.app)
+        channel = Mock()
+        m = create_task_message(
+            channel, self.foo_task.name,
+            args=[4, 8, 10], kwargs={},
+        )
+        task = Request(m, app=self.app)
         worker._process_task(task)
         worker.pool.stop()
 

+ 0 - 9
celery/utils/__init__.py

@@ -19,7 +19,6 @@ import datetime
 
 from collections import Callable
 from functools import partial, wraps
-from inspect import getargspec
 from pprint import pprint
 
 from kombu.entity import Exchange, Queue
@@ -189,14 +188,6 @@ def is_iterable(obj):
     return True
 
 
-def fun_takes_kwargs(fun, kwlist=[]):
-    # deprecated
-    S = getattr(fun, 'argspec', getargspec(fun))
-    if S.keywords is not None:
-        return kwlist
-    return [kw for kw in kwlist if kw in S.args]
-
-
 def isatty(fh):
     try:
         return fh.isatty()

+ 3 - 1
celery/utils/objects.py

@@ -74,7 +74,9 @@ class FallbackContext(object):
     def __enter__(self):
         if self.provided is not None:
             return self.provided
-        context = self._context = self.fallback(*self.fb_args, **self.fb_kwargs).__enter__()
+        context = self._context = self.fallback(
+            *self.fb_args, **self.fb_kwargs
+        ).__enter__()
         return context
 
     def __exit__(self, *exc_info):

+ 1 - 1
celery/worker/autoscale.py

@@ -81,7 +81,7 @@ class Autoscaler(bgThread):
             self.maybe_scale()
         sleep(1.0)
 
-    def _maybe_scale(self):
+    def _maybe_scale(self, req=None):
         procs = self.processes
         cur = min(self.qty, self.max_concurrency)
         if cur > procs:

+ 35 - 23
celery/worker/consumer.py

@@ -35,7 +35,7 @@ from celery import bootsteps
 from celery.app.trace import build_tracer
 from celery.canvas import signature
 from celery.exceptions import InvalidTaskError
-from celery.five import items, values
+from celery.five import buffer_t, items, values
 from celery.utils.functional import noop
 from celery.utils.log import get_logger
 from celery.utils.text import truncate
@@ -44,14 +44,6 @@ from celery.utils.timeutils import humanize_seconds, rate
 from . import heartbeat, loops, pidbox
 from .state import task_reserved, maybe_shutdown, revoked, reserved_requests
 
-try:
-    buffer_t = buffer
-except NameError:  # pragma: no cover
-    # Py3 does not have buffer, but we only need isinstance.
-
-    class buffer_t(object):  # noqa
-        pass
-
 __all__ = [
     'Consumer', 'Connection', 'Events', 'Heart', 'Control',
     'Tasks', 'Evloop', 'Agent', 'Mingle', 'Gossip', 'dump_body',
@@ -127,6 +119,8 @@ MINGLE_GET_FIELDS = itemgetter('clock', 'revoked')
 
 
 def dump_body(m, body):
+    # v2 protocol does not deserialize body
+    body = m.body if body is None else body
     if isinstance(body, buffer_t):
         body = bytes_t(body)
     return '{0} ({1}b)'.format(truncate(safe_repr(body), 1024),
@@ -445,21 +439,38 @@ class Consumer(object):
         on_invalid_task = self.on_invalid_task
         callbacks = self.on_task_message
 
-        def on_task_received(body, message):
-            try:
-                name = body['task']
-            except (KeyError, TypeError):
-                return on_unknown_message(body, message)
+        def on_task_received(message):
 
+            # payload will only be set for v1 protocol, since v2
+            # will defer deserializing the message body to the pool.
+            payload = None
             try:
-                strategies[name](message, body,
-                                 message.ack_log_error,
-                                 message.reject_log_error,
-                                 callbacks)
+                type_ = message.headers['task']                # protocol v2
+            except TypeError:
+                return on_unknown_message(None, message)
+            except KeyError:
+                payload = message.payload
+                try:
+                    type_, payload = payload['task'], payload  # protocol v1
+                except (TypeError, KeyError):
+                    return on_unknown_message(payload, message)
+            try:
+                strategy = strategies[type_]
             except KeyError as exc:
-                on_unknown_task(body, message, exc)
-            except InvalidTaskError as exc:
-                on_invalid_task(body, message, exc)
+                return on_unknown_task(payload, message, exc)
+            else:
+                try:
+                    strategy(
+                        message, payload, message.ack_log_error,
+                        message.reject_log_error, callbacks,
+                    )
+                except InvalidTaskError as exc:
+                    return on_invalid_task(payload, message, exc)
+                except MemoryError:
+                    raise
+                except Exception as exc:
+                    # XXX handle as internal error?
+                    return on_invalid_task(payload, message, exc)
 
         return on_task_received
 
@@ -541,8 +552,9 @@ class Heart(bootsteps.StartStopStep):
         c.heart = None
 
     def start(self, c):
-        c.heart = heartbeat.Heart(c.timer, c.event_dispatcher,
-            self.heartbeat_interval)
+        c.heart = heartbeat.Heart(
+            c.timer, c.event_dispatcher, self.heartbeat_interval,
+        )
         c.heart.start()
 
     def stop(self, c):

+ 4 - 2
celery/worker/control.py

@@ -22,8 +22,8 @@ from celery.utils.log import get_logger
 from celery.utils import jsonify
 
 from . import state as worker_state
+from .request import Request
 from .state import revoked
-from .job import Request
 
 __all__ = ['Panel']
 DEFAULT_TASK_INFO_ITEMS = ('exchange', 'routing_key', 'rate_limit')
@@ -364,7 +364,9 @@ def active_queues(state):
 
 
 def _wanted_config_key(key):
-    return isinstance(key, string_t) and key.isupper() and not key.startswith('__')
+    return (isinstance(key, string_t) and
+            key.isupper() and
+            not key.startswith('__'))
 
 
 @Panel.register

+ 0 - 590
celery/worker/job.py

@@ -1,590 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-    celery.worker.job
-    ~~~~~~~~~~~~~~~~~
-
-    This module defines the :class:`Request` class,
-    which specifies how tasks are executed.
-
-"""
-from __future__ import absolute_import, unicode_literals
-
-import logging
-import socket
-import sys
-
-from billiard.einfo import ExceptionInfo
-from datetime import datetime
-from weakref import ref
-
-from kombu.utils import kwdict, reprcall
-from kombu.utils.encoding import safe_repr, safe_str
-
-from celery import signals
-from celery.app.trace import trace_task, trace_task_ret
-from celery.exceptions import (
-    Ignore, TaskRevokedError, InvalidTaskError,
-    SoftTimeLimitExceeded, TimeLimitExceeded,
-    WorkerLostError, Terminated, Retry, Reject,
-)
-from celery.five import items, monotonic, string, string_t
-from celery.platforms import signals as _signals
-from celery.utils import fun_takes_kwargs
-from celery.utils.functional import noop
-from celery.utils.log import get_logger
-from celery.utils.serialization import get_pickled_exception
-from celery.utils.text import truncate
-from celery.utils.timeutils import maybe_iso8601, timezone, maybe_make_aware
-
-from . import state
-
-__all__ = ['Request']
-
-IS_PYPY = hasattr(sys, 'pypy_version_info')
-
-logger = get_logger(__name__)
-debug, info, warn, error = (logger.debug, logger.info,
-                            logger.warning, logger.error)
-_does_info = False
-_does_debug = False
-
-#: Max length of result representation
-RESULT_MAXLEN = 128
-
-
-def __optimize__():
-    # this is also called by celery.app.trace.setup_worker_optimizations
-    global _does_debug
-    global _does_info
-    _does_debug = logger.isEnabledFor(logging.DEBUG)
-    _does_info = logger.isEnabledFor(logging.INFO)
-__optimize__()
-
-# Localize
-tz_utc = timezone.utc
-tz_or_local = timezone.tz_or_local
-send_revoked = signals.task_revoked.send
-
-task_accepted = state.task_accepted
-task_ready = state.task_ready
-revoked_tasks = state.revoked
-
-NEEDS_KWDICT = sys.version_info <= (2, 6)
-
-#: Use when no message object passed to :class:`Request`.
-DEFAULT_FIELDS = {
-    'headers': None,
-    'reply_to': None,
-    'correlation_id': None,
-    'delivery_info': {
-        'exchange': None,
-        'routing_key': None,
-        'priority': 0,
-        'redelivered': False,
-    },
-}
-
-
-class Request(object):
-    """A request for task execution."""
-    if not IS_PYPY:  # pragma: no cover
-        __slots__ = (
-            'app', 'name', 'id', 'args', 'kwargs', 'on_ack',
-            'hostname', 'eventer', 'connection_errors', 'task', 'eta',
-            'expires', 'request_dict', 'acknowledged', 'on_reject',
-            'utc', 'time_start', 'worker_pid', '_already_revoked',
-            '_terminate_on_ack', '_apply_result',
-            '_tzlocal', '__weakref__', '__dict__',
-        )
-
-    #: Format string used to log task success.
-    success_msg = """\
-        Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s
-    """
-
-    #: Format string used to log task failure.
-    error_msg = """\
-        Task %(name)s[%(id)s] %(description)s: %(exc)s
-    """
-
-    #: Format string used to log internal error.
-    internal_error_msg = """\
-        Task %(name)s[%(id)s] %(description)s: %(exc)s
-    """
-
-    ignored_msg = """\
-        Task %(name)s[%(id)s] %(description)s
-    """
-
-    rejected_msg = """\
-        Task %(name)s[%(id)s] %(exc)s
-    """
-
-    #: Format string used to log task retry.
-    retry_msg = """Task %(name)s[%(id)s] retry: %(exc)s"""
-
-    def __init__(self, body, on_ack=noop,
-                 hostname=None, eventer=None, app=None,
-                 connection_errors=None, request_dict=None,
-                 message=None, task=None, on_reject=noop, **opts):
-        self.app = app
-        name = self.name = body['task']
-        self.id = body['id']
-        self.args = body.get('args', [])
-        self.kwargs = body.get('kwargs', {})
-        try:
-            self.kwargs.items
-        except AttributeError:
-            raise InvalidTaskError(
-                'Task keyword arguments is not a mapping')
-        if NEEDS_KWDICT:
-            self.kwargs = kwdict(self.kwargs)
-        eta = body.get('eta')
-        expires = body.get('expires')
-        utc = self.utc = body.get('utc', False)
-        self.on_ack = on_ack
-        self.on_reject = on_reject
-        self.hostname = hostname or socket.gethostname()
-        self.eventer = eventer
-        self.connection_errors = connection_errors or ()
-        self.task = task or self.app.tasks[name]
-        self.acknowledged = self._already_revoked = False
-        self.time_start = self.worker_pid = self._terminate_on_ack = None
-        self._apply_result = None
-        self._tzlocal = None
-
-        # timezone means the message is timezone-aware, and the only timezone
-        # supported at this point is UTC.
-        if eta is not None:
-            try:
-                self.eta = maybe_iso8601(eta)
-            except (AttributeError, ValueError, TypeError) as exc:
-                raise InvalidTaskError(
-                    'invalid eta value {0!r}: {1}'.format(eta, exc))
-            if utc:
-                self.eta = maybe_make_aware(self.eta, self.tzlocal)
-        else:
-            self.eta = None
-        if expires is not None:
-            try:
-                self.expires = maybe_iso8601(expires)
-            except (AttributeError, ValueError, TypeError) as exc:
-                raise InvalidTaskError(
-                    'invalid expires value {0!r}: {1}'.format(expires, exc))
-            if utc:
-                self.expires = maybe_make_aware(self.expires, self.tzlocal)
-        else:
-            self.expires = None
-
-        if message:
-            delivery_info = message.delivery_info or {}
-            properties = message.properties or {}
-            body.update({
-                'headers': message.headers,
-                'reply_to': properties.get('reply_to'),
-                'correlation_id': properties.get('correlation_id'),
-                'delivery_info': {
-                    'exchange': delivery_info.get('exchange'),
-                    'routing_key': delivery_info.get('routing_key'),
-                    'priority': delivery_info.get('priority'),
-                    'redelivered': delivery_info.get('redelivered'),
-                }
-
-            })
-        else:
-            body.update(DEFAULT_FIELDS)
-        self.request_dict = body
-
-    @property
-    def delivery_info(self):
-        return self.request_dict['delivery_info']
-
-    def extend_with_default_kwargs(self):
-        """Extend the tasks keyword arguments with standard task arguments.
-
-        Currently these are `logfile`, `loglevel`, `task_id`,
-        `task_name`, `task_retries`, and `delivery_info`.
-
-        See :meth:`celery.task.base.Task.run` for more information.
-
-        Magic keyword arguments are deprecated and will be removed
-        in version 4.0.
-
-        """
-        kwargs = dict(self.kwargs)
-        default_kwargs = {'logfile': None,   # deprecated
-                          'loglevel': None,  # deprecated
-                          'task_id': self.id,
-                          'task_name': self.name,
-                          'task_retries': self.request_dict.get('retries', 0),
-                          'task_is_eager': False,
-                          'delivery_info': self.delivery_info}
-        fun = self.task.run
-        supported_keys = fun_takes_kwargs(fun, default_kwargs)
-        extend_with = {key: val for key, val in items(default_kwargs)
-                       if key in supported_keys}
-        kwargs.update(extend_with)
-        return kwargs
-
-    def execute_using_pool(self, pool, **kwargs):
-        """Used by the worker to send this task to the pool.
-
-        :param pool: A :class:`celery.concurrency.base.TaskPool` instance.
-
-        :raises celery.exceptions.TaskRevokedError: if the task was revoked
-            and ignored.
-
-        """
-        uuid = self.id
-        task = self.task
-        if self.revoked():
-            raise TaskRevokedError(uuid)
-
-        hostname = self.hostname
-        kwargs = self.kwargs
-        if task.accept_magic_kwargs:
-            kwargs = self.extend_with_default_kwargs()
-        request = self.request_dict
-        request.update({'hostname': hostname, 'is_eager': False,
-                        'delivery_info': self.delivery_info,
-                        'group': self.request_dict.get('taskset')})
-        timeout, soft_timeout = request.get('timelimit', (None, None))
-        timeout = timeout or task.time_limit
-        soft_timeout = soft_timeout or task.soft_time_limit
-        result = pool.apply_async(
-            trace_task_ret,
-            args=(self.name, uuid, self.args, kwargs, request),
-            accept_callback=self.on_accepted,
-            timeout_callback=self.on_timeout,
-            callback=self.on_success,
-            error_callback=self.on_failure,
-            soft_timeout=soft_timeout,
-            timeout=timeout,
-            correlation_id=uuid,
-        )
-        # cannot create weakref to None
-        self._apply_result = ref(result) if result is not None else result
-        return result
-
-    def execute(self, loglevel=None, logfile=None):
-        """Execute the task in a :func:`~celery.app.trace.trace_task`.
-
-        :keyword loglevel: The loglevel used by the task.
-        :keyword logfile: The logfile used by the task.
-
-        """
-        if self.revoked():
-            return
-
-        # acknowledge task as being processed.
-        if not self.task.acks_late:
-            self.acknowledge()
-
-        kwargs = self.kwargs
-        if self.task.accept_magic_kwargs:
-            kwargs = self.extend_with_default_kwargs()
-        request = self.request_dict
-        request.update({'loglevel': loglevel, 'logfile': logfile,
-                        'hostname': self.hostname, 'is_eager': False,
-                        'delivery_info': self.delivery_info})
-        retval = trace_task(self.task, self.id, self.args, kwargs, request,
-                            hostname=self.hostname, loader=self.app.loader,
-                            app=self.app)
-        self.acknowledge()
-        return retval
-
-    def maybe_expire(self):
-        """If expired, mark the task as revoked."""
-        if self.expires:
-            now = datetime.now(tz_or_local(self.tzlocal) if self.utc else None)
-            if now > self.expires:
-                revoked_tasks.add(self.id)
-                return True
-
-    def terminate(self, pool, signal=None):
-        signal = _signals.signum(signal or 'TERM')
-        if self.time_start:
-            pool.terminate_job(self.worker_pid, signal)
-            self._announce_revoked('terminated', True, signal, False)
-        else:
-            self._terminate_on_ack = pool, signal
-        if self._apply_result is not None:
-            obj = self._apply_result()  # is a weakref
-            if obj is not None:
-                obj.terminate(signal)
-
-    def _announce_revoked(self, reason, terminated, signum, expired):
-        task_ready(self)
-        self.send_event('task-revoked',
-                        terminated=terminated, signum=signum, expired=expired)
-        if self.store_errors:
-            self.task.backend.mark_as_revoked(self.id, reason, request=self)
-        self.acknowledge()
-        self._already_revoked = True
-        send_revoked(self.task, request=self,
-                     terminated=terminated, signum=signum, expired=expired)
-
-    def revoked(self):
-        """If revoked, skip task and mark state."""
-        expired = False
-        if self._already_revoked:
-            return True
-        if self.expires:
-            expired = self.maybe_expire()
-        if self.id in revoked_tasks:
-            info('Discarding revoked task: %s[%s]', self.name, self.id)
-            self._announce_revoked(
-                'expired' if expired else 'revoked', False, None, expired,
-            )
-            return True
-        return False
-
-    def send_event(self, type, **fields):
-        if self.eventer and self.eventer.enabled:
-            self.eventer.send(type, uuid=self.id, **fields)
-
-    def on_accepted(self, pid, time_accepted):
-        """Handler called when task is accepted by worker pool."""
-        self.worker_pid = pid
-        self.time_start = time_accepted
-        task_accepted(self)
-        if not self.task.acks_late:
-            self.acknowledge()
-        self.send_event('task-started')
-        if _does_debug:
-            debug('Task accepted: %s[%s] pid:%r', self.name, self.id, pid)
-        if self._terminate_on_ack is not None:
-            self.terminate(*self._terminate_on_ack)
-
-    def on_timeout(self, soft, timeout):
-        """Handler called if the task times out."""
-        task_ready(self)
-        if soft:
-            warn('Soft time limit (%ss) exceeded for %s[%s]',
-                 timeout, self.name, self.id)
-            exc = SoftTimeLimitExceeded(timeout)
-        else:
-            error('Hard time limit (%ss) exceeded for %s[%s]',
-                  timeout, self.name, self.id)
-            exc = TimeLimitExceeded(timeout)
-
-        if self.store_errors:
-            self.task.backend.mark_as_failure(self.id, exc, request=self)
-
-        if self.task.acks_late:
-            self.acknowledge()
-
-    def on_success(self, ret_value, now=None, nowfun=monotonic):
-        """Handler called if the task was successfully processed."""
-        if isinstance(ret_value, ExceptionInfo):
-            if isinstance(ret_value.exception, (
-                    SystemExit, KeyboardInterrupt)):
-                raise ret_value.exception
-            return self.on_failure(ret_value)
-        task_ready(self)
-
-        if self.task.acks_late:
-            self.acknowledge()
-
-        if self.eventer and self.eventer.enabled:
-            now = nowfun()
-            runtime = self.time_start and (now - self.time_start) or 0
-            self.send_event('task-succeeded',
-                            result=safe_repr(ret_value), runtime=runtime)
-
-        if _does_info:
-            now = now or nowfun()
-            runtime = self.time_start and (now - self.time_start) or 0
-            info(self.success_msg.strip(), {
-                'id': self.id, 'name': self.name,
-                'return_value': self.repr_result(ret_value),
-                'runtime': runtime})
-
-    def on_retry(self, exc_info):
-        """Handler called if the task should be retried."""
-        if self.task.acks_late:
-            self.acknowledge()
-
-        self.send_event('task-retried',
-                        exception=safe_repr(exc_info.exception.exc),
-                        traceback=safe_str(exc_info.traceback))
-
-        if _does_info:
-            info(self.retry_msg.strip(),
-                 {'id': self.id, 'name': self.name,
-                  'exc': exc_info.exception})
-
-    def on_failure(self, exc_info):
-        """Handler called if the task raised an exception."""
-        task_ready(self)
-        send_failed_event = True
-
-        if not exc_info.internal:
-            exc = exc_info.exception
-
-            if isinstance(exc, Retry):
-                return self.on_retry(exc_info)
-
-            # These are special cases where the process would not have had
-            # time to write the result.
-            if self.store_errors:
-                if isinstance(exc, WorkerLostError):
-                    self.task.backend.mark_as_failure(
-                        self.id, exc, request=self,
-                    )
-                elif isinstance(exc, Terminated):
-                    self._announce_revoked(
-                        'terminated', True, string(exc), False)
-                    send_failed_event = False  # already sent revoked event
-            # (acks_late) acknowledge after result stored.
-            if self.task.acks_late:
-                self.acknowledge()
-        self._log_error(exc_info, send_failed_event=send_failed_event)
-
-    def _log_error(self, einfo, send_failed_event=True):
-        einfo.exception = get_pickled_exception(einfo.exception)
-        eobj = einfo.exception
-        exception, traceback, exc_info, internal, sargs, skwargs = (
-            safe_repr(eobj),
-            safe_str(einfo.traceback),
-            einfo.exc_info,
-            einfo.internal,
-            safe_repr(self.args),
-            safe_repr(self.kwargs),
-        )
-        task = self.task
-        if task.throws and isinstance(eobj, task.throws):
-            do_send_mail, severity, exc_info, description = (
-                False, logging.INFO, None, 'raised expected',
-            )
-        else:
-            do_send_mail, severity, description = (
-                True, logging.ERROR, 'raised unexpected',
-            )
-        format = self.error_msg
-        if send_failed_event:
-            self.send_event(
-                'task-failed', exception=exception, traceback=traceback,
-            )
-
-        if internal:
-            if isinstance(einfo.exception, MemoryError):
-                raise MemoryError('Process got: %s' % (einfo.exception, ))
-            elif isinstance(einfo.exception, Reject):
-                format = self.rejected_msg
-                description = 'rejected'
-                severity = logging.WARN
-                exc_info = einfo
-                self.reject(requeue=einfo.exception.requeue)
-            elif isinstance(einfo.exception, Ignore):
-                format = self.ignored_msg
-                description = 'ignored'
-                severity = logging.INFO
-                exc_info = None
-                self.acknowledge()
-            else:
-                format = self.internal_error_msg
-                description = 'INTERNAL ERROR'
-                severity = logging.CRITICAL
-
-        context = {
-            'hostname': self.hostname,
-            'id': self.id,
-            'name': self.name,
-            'exc': exception,
-            'traceback': traceback,
-            'args': sargs,
-            'kwargs': skwargs,
-            'description': description,
-        }
-
-        logger.log(severity, format.strip(), context,
-                   exc_info=exc_info,
-                   extra={'data': {'id': self.id,
-                                   'name': self.name,
-                                   'args': sargs,
-                                   'kwargs': skwargs,
-                                   'hostname': self.hostname,
-                                   'internal': internal}})
-
-        if do_send_mail:
-            task.send_error_email(context, einfo.exception)
-
-    def acknowledge(self):
-        """Acknowledge task."""
-        if not self.acknowledged:
-            self.on_ack(logger, self.connection_errors)
-            self.acknowledged = True
-
-    def reject(self, requeue=False):
-        if not self.acknowledged:
-            self.on_reject(logger, self.connection_errors, requeue)
-            self.acknowledged = True
-
-    def repr_result(self, result, maxlen=RESULT_MAXLEN):
-        # 46 is the length needed to fit
-        #     'the quick brown fox jumps over the lazy dog' :)
-        if not isinstance(result, string_t):
-            result = safe_repr(result)
-        return truncate(result) if len(result) > maxlen else result
-
-    def info(self, safe=False):
-        return {'id': self.id,
-                'name': self.name,
-                'args': self.args if safe else safe_repr(self.args),
-                'kwargs': self.kwargs if safe else safe_repr(self.kwargs),
-                'hostname': self.hostname,
-                'time_start': self.time_start,
-                'acknowledged': self.acknowledged,
-                'delivery_info': self.delivery_info,
-                'worker_pid': self.worker_pid}
-
-    def __str__(self):
-        return '{0.name}[{0.id}]{1}{2}'.format(self,
-               ' eta:[{0}]'.format(self.eta) if self.eta else '',
-               ' expires:[{0}]'.format(self.expires) if self.expires else '')
-    shortinfo = __str__
-
-    def __repr__(self):
-        return '<{0} {1}: {2}>'.format(
-            type(self).__name__, self.id,
-            reprcall(self.name, self.args, self.kwargs))
-
-    @property
-    def tzlocal(self):
-        if self._tzlocal is None:
-            self._tzlocal = self.app.conf.CELERY_TIMEZONE
-        return self._tzlocal
-
-    @property
-    def store_errors(self):
-        return (not self.task.ignore_result
-                or self.task.store_errors_even_if_ignored)
-
-    @property
-    def task_id(self):
-        # XXX compat
-        return self.id
-
-    @task_id.setter  # noqa
-    def task_id(self, value):
-        self.id = value
-
-    @property
-    def task_name(self):
-        # XXX compat
-        return self.name
-
-    @task_name.setter  # noqa
-    def task_name(self, value):
-        self.name = value
-
-    @property
-    def reply_to(self):
-        # used by rpc backend when failures reported by parent process
-        return self.request_dict['reply_to']
-
-    @property
-    def correlation_id(self):
-        # used similarly to reply_to
-        return self.request_dict['correlation_id']

+ 2 - 2
celery/worker/loops.py

@@ -37,7 +37,7 @@ def asynloop(obj, connection, consumer, blueprint, hub, qos,
     if heartbeat and connection.supports_heartbeats:
         hub.call_repeatedly(heartbeat / hbrate, hbtick, hbrate)
 
-    consumer.callbacks = [on_task_received]
+    consumer.on_message = on_task_received
     consumer.consume()
     obj.on_ready()
     obj.controller.register_with_event_loop(hub)
@@ -86,7 +86,7 @@ def synloop(obj, connection, consumer, blueprint, hub, qos,
     """Fallback blocking event loop for transports that doesn't support AIO."""
 
     on_task_received = obj.create_task_handler()
-    consumer.register_callback(on_task_received)
+    consumer.on_message = on_task_received
     consumer.consume()
 
     obj.on_ready()

+ 494 - 0
celery/worker/request.py

@@ -0,0 +1,494 @@
+# -*- coding: utf-8 -*-
+"""
+    celery.worker.request
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    This module defines the :class:`Request` class,
+    which specifies how tasks are executed.
+
+"""
+from __future__ import absolute_import, unicode_literals
+
+import logging
+import socket
+import sys
+
+from datetime import datetime
+from weakref import ref
+
+from kombu.utils.encoding import safe_repr, safe_str
+
+from celery import signals
+from celery.app.trace import trace_task, trace_task_ret
+from celery.exceptions import (
+    Ignore, TaskRevokedError, InvalidTaskError,
+    SoftTimeLimitExceeded, TimeLimitExceeded,
+    WorkerLostError, Terminated, Retry, Reject,
+)
+from celery.five import string
+from celery.platforms import signals as _signals
+from celery.utils.functional import noop
+from celery.utils.log import get_logger
+from celery.utils.timeutils import maybe_iso8601, timezone, maybe_make_aware
+from celery.utils.serialization import get_pickled_exception
+
+from . import state
+
+__all__ = ['Request']
+
+IS_PYPY = hasattr(sys, 'pypy_version_info')
+
+logger = get_logger(__name__)
+debug, info, warn, error = (logger.debug, logger.info,
+                            logger.warning, logger.error)
+_does_info = False
+_does_debug = False
+
+
+def __optimize__():
+    # this is also called by celery.app.trace.setup_worker_optimizations
+    global _does_debug
+    global _does_info
+    _does_debug = logger.isEnabledFor(logging.DEBUG)
+    _does_info = logger.isEnabledFor(logging.INFO)
+__optimize__()
+
+# Localize
+tz_utc = timezone.utc
+tz_or_local = timezone.tz_or_local
+send_revoked = signals.task_revoked.send
+
+task_accepted = state.task_accepted
+task_ready = state.task_ready
+revoked_tasks = state.revoked
+
+
+class Request(object):
+    """A request for task execution."""
+    acknowledged = False
+    time_start = None
+    worker_pid = None
+    timeouts = (None, None)
+    _already_revoked = False
+    _terminate_on_ack = None
+    _apply_result = None
+    _tzlocal = None
+
+    if not IS_PYPY:  # pragma: no cover
+        __slots__ = (
+            'app', 'name', 'id', 'on_ack', 'body',
+            'hostname', 'eventer', 'connection_errors', 'task', 'eta',
+            'expires', 'request_dict', 'on_reject', 'utc',
+            'content_type', 'content_encoding',
+            '__weakref__', '__dict__',
+        )
+
+    def __init__(self, message, on_ack=noop,
+                 hostname=None, eventer=None, app=None,
+                 connection_errors=None, request_dict=None,
+                 task=None, on_reject=noop, body=None,
+                 headers=None, decoded=False, utc=True,
+                 maybe_make_aware=maybe_make_aware,
+                 maybe_iso8601=maybe_iso8601, **opts):
+        if headers is None:
+            headers = message.headers
+        if body is None:
+            body = message.body
+        self.app = app
+        self.message = message
+        self.body = body
+        self.utc = utc
+        if decoded:
+            self.content_type = self.content_encoding = None
+        else:
+            self.content_type, self.content_encoding = (
+                message.content_type, message.content_encoding,
+                )
+
+        name = self.name = headers['task']
+        self.id = headers['id']
+        if 'timeouts' in headers:
+            self.timeouts = headers['timeouts']
+        self.on_ack = on_ack
+        self.on_reject = on_reject
+        self.hostname = hostname or socket.gethostname()
+        self.eventer = eventer
+        self.connection_errors = connection_errors or ()
+        self.task = task or self.app.tasks[name]
+
+        # timezone means the message is timezone-aware, and the only timezone
+        # supported at this point is UTC.
+        eta = headers.get('eta')
+        if eta is not None:
+            try:
+                eta = maybe_iso8601(eta)
+            except (AttributeError, ValueError, TypeError) as exc:
+                raise InvalidTaskError(
+                    'invalid eta value {0!r}: {1}'.format(eta, exc))
+            self.eta = maybe_make_aware(eta, self.tzlocal)
+        else:
+            self.eta = None
+
+        expires = headers.get('expires')
+        if expires is not None:
+            try:
+                expires = maybe_iso8601(expires)
+            except (AttributeError, ValueError, TypeError) as exc:
+                raise InvalidTaskError(
+                    'invalid expires value {0!r}: {1}'.format(expires, exc))
+            self.expires = maybe_make_aware(expires, self.tzlocal)
+        else:
+            self.expires = None
+
+        delivery_info = message.delivery_info or {}
+        properties = message.properties or {}
+        headers.update({
+            'reply_to': properties.get('reply_to'),
+            'correlation_id': properties.get('correlation_id'),
+            'delivery_info': {
+                'exchange': delivery_info.get('exchange'),
+                'routing_key': delivery_info.get('routing_key'),
+                'priority': delivery_info.get('priority'),
+                'redelivered': delivery_info.get('redelivered'),
+            }
+
+        })
+        self.request_dict = headers
+
+    @property
+    def delivery_info(self):
+        return self.request_dict['delivery_info']
+
+    def execute_using_pool(self, pool, **kwargs):
+        """Used by the worker to send this task to the pool.
+
+        :param pool: A :class:`celery.concurrency.base.TaskPool` instance.
+
+        :raises celery.exceptions.TaskRevokedError: if the task was revoked
+            and ignored.
+
+        """
+        task_id = self.id
+        task = self.task
+        if self.revoked():
+            raise TaskRevokedError(task_id)
+
+        timeout, soft_timeout = self.timeouts
+        timeout = timeout or task.time_limit
+        soft_timeout = soft_timeout or task.soft_time_limit
+        result = pool.apply_async(
+            trace_task_ret,
+            args=(self.name, task_id, self.request_dict, self.body,
+                  self.content_type, self.content_encoding, self.hostname),
+            accept_callback=self.on_accepted,
+            timeout_callback=self.on_timeout,
+            callback=self.on_success,
+            error_callback=self.on_failure,
+            soft_timeout=soft_timeout or task.soft_time_limit,
+            timeout=timeout or task.time_limit,
+            correlation_id=task_id,
+        )
+        # cannot create weakref to None
+        self._apply_result = ref(result) if result is not None else result
+        return result
+
+    def execute(self, loglevel=None, logfile=None):
+        """Execute the task in a :func:`~celery.app.trace.trace_task`.
+
+        :keyword loglevel: The loglevel used by the task.
+        :keyword logfile: The logfile used by the task.
+
+        """
+        if self.revoked():
+            return
+
+        # acknowledge task as being processed.
+        if not self.task.acks_late:
+            self.acknowledge()
+
+        request = self.request_dict
+        args, kwargs = self.message.payload
+        request.update({'loglevel': loglevel, 'logfile': logfile,
+                        'hostname': self.hostname, 'is_eager': False,
+                        'args': args, 'kwargs': kwargs})
+        retval = trace_task(self.task, self.id, args, kwargs, request,
+                            hostname=self.hostname, loader=self.app.loader,
+                            app=self.app)[0]
+        self.acknowledge()
+        return retval
+
+    def maybe_expire(self):
+        """If expired, mark the task as revoked."""
+        if self.expires:
+            now = datetime.now(tz_or_local(self.tzlocal) if self.utc else None)
+            if now > self.expires:
+                revoked_tasks.add(self.id)
+                return True
+
+    def terminate(self, pool, signal=None):
+        signal = _signals.signum(signal or 'TERM')
+        if self.time_start:
+            pool.terminate_job(self.worker_pid, signal)
+            self._announce_revoked('terminated', True, signal, False)
+        else:
+            self._terminate_on_ack = pool, signal
+        if self._apply_result is not None:
+            obj = self._apply_result()  # is a weakref
+            if obj is not None:
+                obj.terminate(signal)
+
+    def _announce_revoked(self, reason, terminated, signum, expired):
+        task_ready(self)
+        self.send_event('task-revoked',
+                        terminated=terminated, signum=signum, expired=expired)
+        if self.store_errors:
+            self.task.backend.mark_as_revoked(self.id, reason, request=self)
+        self.acknowledge()
+        self._already_revoked = True
+        send_revoked(self.task, request=self,
+                     terminated=terminated, signum=signum, expired=expired)
+
+    def revoked(self):
+        """If revoked, skip task and mark state."""
+        expired = False
+        if self._already_revoked:
+            return True
+        if self.expires:
+            expired = self.maybe_expire()
+        if self.id in revoked_tasks:
+            info('Discarding revoked task: %s[%s]', self.name, self.id)
+            self._announce_revoked(
+                'expired' if expired else 'revoked', False, None, expired,
+            )
+            return True
+        return False
+
+    def send_event(self, type, **fields):
+        if self.eventer and self.eventer.enabled:
+            self.eventer.send(type, uuid=self.id, **fields)
+
+    def on_accepted(self, pid, time_accepted):
+        """Handler called when task is accepted by worker pool."""
+        self.worker_pid = pid
+        self.time_start = time_accepted
+        task_accepted(self)
+        if not self.task.acks_late:
+            self.acknowledge()
+        self.send_event('task-started')
+        if _does_debug:
+            debug('Task accepted: %s[%s] pid:%r', self.name, self.id, pid)
+        if self._terminate_on_ack is not None:
+            self.terminate(*self._terminate_on_ack)
+
+    def on_timeout(self, soft, timeout):
+        """Handler called if the task times out."""
+        task_ready(self)
+        if soft:
+            warn('Soft time limit (%ss) exceeded for %s[%s]',
+                 timeout, self.name, self.id)
+            exc = SoftTimeLimitExceeded(timeout)
+        else:
+            error('Hard time limit (%ss) exceeded for %s[%s]',
+                  timeout, self.name, self.id)
+            exc = TimeLimitExceeded(timeout)
+
+        if self.store_errors:
+            self.task.backend.mark_as_failure(self.id, exc, request=self)
+
+        if self.task.acks_late:
+            self.acknowledge()
+
+    def on_success(self, failed__retval__runtime, **kwargs):
+        """Handler called if the task was successfully processed."""
+        failed, retval, runtime = failed__retval__runtime
+        if failed:
+            if isinstance(retval.exception, (SystemExit, KeyboardInterrupt)):
+                raise retval.exception
+            return self.on_failure(retval, return_ok=True)
+        task_ready(self)
+
+        if self.task.acks_late:
+            self.acknowledge()
+
+        if self.eventer and self.eventer.enabled:
+            self.send_event(
+                'task-succeeded', result=retval, runtime=runtime,
+            )
+
+    def on_retry(self, exc_info):
+        """Handler called if the task should be retried."""
+        if self.task.acks_late:
+            self.acknowledge()
+
+        self.send_event('task-retried',
+                        exception=safe_repr(exc_info.exception.exc),
+                        traceback=safe_str(exc_info.traceback))
+
+    def on_failure(self, exc_info, send_failed_event=True, return_ok=False):
+        """Handler called if the task raised an exception."""
+        task_ready(self)
+
+        if isinstance(exc_info.exception, MemoryError):
+            raise MemoryError('Process got: %s' % (exc_info.exception, ))
+        elif isinstance(exc_info.exception, Reject):
+            return self.reject(requeue=exc_info.exception.requeue)
+        elif isinstance(exc_info.exception, Ignore):
+            return self.acknowledge()
+
+        exc = exc_info.exception
+
+        if isinstance(exc, Retry):
+            return self.on_retry(exc_info)
+
+        # These are special cases where the process would not have had
+        # time to write the result.
+        if self.store_errors:
+            if isinstance(exc, Terminated):
+                self._announce_revoked(
+                    'terminated', True, string(exc), False)
+                send_failed_event = False  # already sent revoked event
+            elif isinstance(exc, WorkerLostError) or not return_ok:
+                self.task.backend.mark_as_failure(
+                    self.id, exc, request=self,
+                )
+        # (acks_late) acknowledge after result stored.
+        if self.task.acks_late:
+            self.acknowledge()
+
+        if send_failed_event:
+            self.send_event(
+                'task-failed',
+                exception=safe_repr(get_pickled_exception(exc_info.exception)),
+                traceback=exc_info.traceback,
+            )
+
+        if not return_ok:
+            error('Task handler raised error: %r', exc,
+                  exc_info=exc_info.exc_info)
+
+    def acknowledge(self):
+        """Acknowledge task."""
+        if not self.acknowledged:
+            self.on_ack(logger, self.connection_errors)
+            self.acknowledged = True
+
+    def reject(self, requeue=False):
+        if not self.acknowledged:
+            self.on_reject(logger, self.connection_errors, requeue)
+            self.acknowledged = True
+
+    def info(self, safe=False):
+        return {'id': self.id,
+                'name': self.name,
+                'body': self.body,
+                'hostname': self.hostname,
+                'time_start': self.time_start,
+                'acknowledged': self.acknowledged,
+                'delivery_info': self.delivery_info,
+                'worker_pid': self.worker_pid}
+
+    def __str__(self):
+        return '{0.name}[{0.id}]{1}{2}'.format(self,
+               ' eta:[{0}]'.format(self.eta) if self.eta else '',
+               ' expires:[{0}]'.format(self.expires) if self.expires else '')
+    shortinfo = __str__
+
+    def __repr__(self):
+        return '<{0} {1}: {2}>'.format(type(self).__name__, self.id, self.name)
+
+    @property
+    def tzlocal(self):
+        if self._tzlocal is None:
+            self._tzlocal = self.app.conf.CELERY_TIMEZONE
+        return self._tzlocal
+
+    @property
+    def store_errors(self):
+        return (not self.task.ignore_result
+                or self.task.store_errors_even_if_ignored)
+
+    @property
+    def task_id(self):
+        # XXX compat
+        return self.id
+
+    @task_id.setter  # noqa
+    def task_id(self, value):
+        self.id = value
+
+    @property
+    def task_name(self):
+        # XXX compat
+        return self.name
+
+    @task_name.setter  # noqa
+    def task_name(self, value):
+        self.name = value
+
+    @property
+    def reply_to(self):
+        # used by rpc backend when failures reported by parent process
+        return self.request_dict['reply_to']
+
+    @property
+    def correlation_id(self):
+        # used similarly to reply_to
+        return self.request_dict['correlation_id']
+
+
+def create_request_cls(base, task, pool, hostname, eventer,
+                       ref=ref, revoked_tasks=revoked_tasks,
+                       task_ready=task_ready):
+    from celery.app.trace import trace_task_ret as trace
+    default_time_limit = task.time_limit
+    default_soft_time_limit = task.soft_time_limit
+    apply_async = pool.apply_async
+    acks_late = task.acks_late
+    std_kwargs = {'hostname': hostname, 'is_eager': False}
+    events = eventer and eventer.enabled
+
+    class Request(base):
+
+        def execute_using_pool(self, pool, **kwargs):
+            task_id = self.id
+            if (self.expires or task_id in revoked_tasks) and self.revoked():
+                raise TaskRevokedError(task_id)
+
+            timeout, soft_timeout = self.timeouts
+            timeout = timeout or default_time_limit
+            soft_timeout = soft_timeout or default_soft_time_limit
+            result = apply_async(
+                trace,
+                args=(self.name, task_id, self.request_dict, self.body,
+                      self.content_type, self.content_encoding),
+                kwargs=std_kwargs,
+                accept_callback=self.on_accepted,
+                timeout_callback=self.on_timeout,
+                callback=self.on_success,
+                error_callback=self.on_failure,
+                soft_timeout=soft_timeout,
+                timeout=timeout,
+                correlation_id=task_id,
+            )
+            # cannot create weakref to None
+            self._apply_result = ref(result) if result is not None else result
+            return result
+
+        def on_success(self, failed__retval__runtime, **kwargs):
+            failed, retval, runtime = failed__retval__runtime
+            if failed:
+                if isinstance(retval.exception, (
+                        SystemExit, KeyboardInterrupt)):
+                    raise retval.exception
+                return self.on_failure(retval, return_ok=True)
+            task_ready(self)
+
+            if acks_late:
+                self.acknowledge()
+
+            if events:
+                self.send_event(
+                    'task-succeeded', result=retval, runtime=runtime,
+                )
+
+    return Request

+ 2 - 2
celery/worker/state.py

@@ -42,10 +42,10 @@ REVOKES_MAX = 50000
 #: being expired when the max limit has been exceeded.
 REVOKE_EXPIRES = 10800
 
-#: set of all reserved :class:`~celery.worker.job.Request`'s.
+#: set of all reserved :class:`~celery.worker.request.Request`'s.
 reserved_requests = set()
 
-#: set of currently active :class:`~celery.worker.job.Request`'s.
+#: set of currently active :class:`~celery.worker.request.Request`'s.
 active_requests = set()
 
 #: count of tasks accepted by the worker, sorted by type.

+ 45 - 12
celery/worker/strategy.py

@@ -11,12 +11,13 @@ from __future__ import absolute_import
 import logging
 
 from kombu.async.timer import to_timestamp
-from kombu.utils.encoding import safe_repr
 
+from celery.exceptions import InvalidTaskError
+from celery.five import buffer_t
 from celery.utils.log import get_logger
 from celery.utils.timeutils import timezone
 
-from .job import Request
+from .request import Request, create_request_cls
 from .state import task_reserved
 
 __all__ = ['default']
@@ -24,12 +25,31 @@ __all__ = ['default']
 logger = get_logger(__name__)
 
 
+def proto1_to_proto2(message, body):
+    """Converts Task message protocol 1 arguments to protocol 2.
+
+    Returns tuple of ``(body, headers, already_decoded_status, utc)``
+
+    """
+    try:
+        args, kwargs = body['args'], body['kwargs']
+        kwargs.items
+    except KeyError:
+        raise InvalidTaskError('Message does not have args/kwargs')
+    except AttributeError:
+        raise InvalidTaskError(
+            'Task keyword arguments must be a mapping',
+        )
+    body['headers'] = message.headers
+    return (args, kwargs), body, True, body.get('utc', True)
+
+
 def default(task, app, consumer,
             info=logger.info, error=logger.error, task_reserved=task_reserved,
-            to_system_tz=timezone.to_system):
+            to_system_tz=timezone.to_system, bytes=bytes, buffer_t=buffer_t,
+            proto1_to_proto2=proto1_to_proto2):
     hostname = consumer.hostname
     eventer = consumer.event_dispatcher
-    Req = Request
     connection_errors = consumer.connection_errors
     _does_info = logger.isEnabledFor(logging.INFO)
     events = eventer and eventer.enabled
@@ -40,15 +60,28 @@ def default(task, app, consumer,
     bucket = consumer.task_buckets[task.name]
     handle = consumer.on_task_request
     limit_task = consumer._limit_task
+    body_can_be_buffer = consumer.pool.body_can_be_buffer
+    Req = create_request_cls(Request, task, consumer.pool, hostname, eventer)
+
+    revoked_tasks = consumer.controller.state.revoked
 
     def task_message_handler(message, body, ack, reject, callbacks,
                              to_timestamp=to_timestamp):
-        req = Req(body, on_ack=ack, on_reject=reject,
-                  app=app, hostname=hostname,
-                  eventer=eventer, task=task,
-                  connection_errors=connection_errors,
-                  message=message)
-        if req.revoked():
+        if body is None:
+            body, headers, decoded, utc = (
+                message.body, message.headers, False, True,
+            )
+            if not body_can_be_buffer:
+                body = bytes(body) if isinstance(body, buffer_t) else body
+        else:
+            body, headers, decoded, utc = proto1_to_proto2(message, body)
+        req = Req(
+            message,
+            on_ack=ack, on_reject=reject, app=app, hostname=hostname,
+            eventer=eventer, task=task, connection_errors=connection_errors,
+            body=body, headers=headers, decoded=decoded, utc=utc,
+        )
+        if (req.expires or req.id in revoked_tasks) and req.revoked():
             return
 
         if _does_info:
@@ -58,7 +91,7 @@ def default(task, app, consumer,
             send_event(
                 'task-received',
                 uuid=req.id, name=req.name,
-                args=safe_repr(req.args), kwargs=safe_repr(req.kwargs),
+                args='', kwargs='',
                 retries=req.request_dict.get('retries', 0),
                 eta=req.eta and req.eta.isoformat(),
                 expires=req.expires and req.expires.isoformat(),
@@ -83,7 +116,7 @@ def default(task, app, consumer,
                     return limit_task(req, bucket, 1)
             task_reserved(req)
             if callbacks:
-                [callback() for callback in callbacks]
+                [callback(req) for callback in callbacks]
             handle(req)
 
     return task_message_handler

+ 1 - 1
docs/internals/app-overview.rst

@@ -226,7 +226,7 @@ App Dependency Tree
     * celery.apps.worker.Worker
         * celery.worker.WorkerController
             * celery.worker.consumer.Consumer
-                * celery.worker.job.TaskRequest
+                * celery.worker.request.Request
                 * celery.events.EventDispatcher
                 * celery.worker.control.ControlDispatch
                     * celery.woker.control.registry.Panel

+ 11 - 7
docs/internals/protov2.rst

@@ -28,9 +28,9 @@ Notes
 
     - Java/C, etc. can use a thrift/protobuf document as the body
 
-- Dispatches to actor based on ``c_type``, ``c_meth`` headers
+- Dispatches to actor based on ``task``, ``meth`` headers
 
-    ``c_meth`` is unused by python, but may be used in the future
+    ``meth`` is unused by python, but may be used in the future
     to specify class+method pairs.
 
 - Chain gains a dedicated field.
@@ -50,8 +50,9 @@ Notes
 
 - ``correlation_id`` replaces ``task_id`` field.
 
+- ``root_id`` and ``parent_id`` fields helps keep track of workflows.
 
-- ``c_shadow`` lets you specify a different name for logs, monitors
+- ``shadow`` lets you specify a different name for logs, monitors
   can be used for e.g. meta tasks that calls any function::
 
     from celery.utils.imports import qualname
@@ -101,11 +102,14 @@ Definition
     }
     headers = {
         'lang': (string)'py'
-        'c_type': (string)task,
+        'task': (string)task,
+        'id': (uuid)task_id,
+        'root_id': (uuid)root_id,
+        'parent_id': (uuid)parent_id,
 
         # optional
-        'c_meth': (string)unused,
-        'c_shadow': (string)replace_name,
+        'meth': (string)unused,
+        'shadow': (string)replace_name,
         'eta': (iso8601)eta,
         'expires'; (iso8601)expires,
         'callbacks': (list)Signature,
@@ -131,7 +135,7 @@ Example
         message=json.dumps([[2, 2], {}]),
         application_headers={
             'lang': 'py',
-            'c_type': 'proj.tasks.add',
+            'task': 'proj.tasks.add',
             'chain': [
                 # reversed chain list
                 {'task': 'proj.tasks.add', 'args': (8, )},

+ 1 - 1
docs/internals/worker.rst

@@ -35,7 +35,7 @@ Receives messages from the broker using `Kombu`_.
 .. _`Kombu`: http://pypi.python.org/pypi/kombu
 
 When a message is received it's converted into a
-:class:`celery.worker.job.TaskRequest` object.
+:class:`celery.worker.request.Request` object.
 
 Tasks with an ETA, or rate-limit are entered into the `timer`,
 messages that can be immediately processed are sent to the execution pool.

+ 3 - 3
docs/reference/celery.worker.job.rst → docs/reference/celery.worker.request.rst

@@ -1,11 +1,11 @@
 =====================================
- celery.worker.job
+ celery.worker.request
 =====================================
 
 .. contents::
     :local:
-.. currentmodule:: celery.worker.job
+.. currentmodule:: celery.worker.request
 
-.. automodule:: celery.worker.job
+.. automodule:: celery.worker.request
     :members:
     :undoc-members:

+ 1 - 1
docs/reference/index.rst

@@ -47,7 +47,7 @@
     celery.apps.beat
     celery.worker
     celery.worker.consumer
-    celery.worker.job
+    celery.worker.request
     celery.worker.state
     celery.worker.strategy
     celery.bin.base

+ 1 - 1
docs/userguide/extending.rst

@@ -463,7 +463,7 @@ Methods
 .. method:: apply_eta_task(request)
 
     Schedule eta task to execute based on the ``request.eta`` attribute.
-    (:class:`~celery.worker.job.Request`)
+    (:class:`~celery.worker.request.Request`)
 
 
 

+ 1 - 1
docs/userguide/signals.rst

@@ -271,7 +271,7 @@ Provides arguments:
 
 * request
 
-    This is a :class:`~celery.worker.job.Request` instance, and not
+    This is a :class:`~celery.worker.request.Request` instance, and not
     ``task.request``.   When using the prefork pool this signal
     is dispatched in the parent process, so ``task.request`` is not available
     and should not be used.  Use this object instead, which should have many

+ 4 - 3
docs/whatsnew-3.1.rst

@@ -1072,8 +1072,9 @@ In Other News
   (Issue #1555).
 
     The revoked signal is dispatched after the task request is removed from
-    the stack, so it must instead use the :class:`~celery.worker.job.Request`
-    object to get information about the task.
+    the stack, so it must instead use the
+    :class:`~celery.worker.request.Request` object to get information
+    about the task.
 
 - Worker: New :option:`-X` command line argument to exclude queues
   (Issue #1399).
@@ -1235,7 +1236,7 @@ Internal changes
     - Result backends (:class:`celery.backends.base.BaseBackend`)
     - :class:`celery.worker.WorkController`
     - :class:`celery.worker.Consumer`
-    - :class:`celery.worker.job.Request`
+    - :class:`celery.worker.request.Request`
 
     This means that you have to pass a specific app when instantiating
     these classes.

+ 0 - 1
extra/release/doc4allmods

@@ -7,7 +7,6 @@ SKIP_FILES="celery.five.rst
             celery.task.rst
             celery.task.base.rst
             celery.task.sets.rst
-            celery.task.trace.rst
             celery.bin.rst
             celery.bin.celeryd_detach.rst
             celery.contrib.rst

+ 5 - 0
funtests/stress/stress/templates.py

@@ -70,6 +70,7 @@ class default(object):
         'interval_max': 2,
         'interval_step': 0.1,
     }
+    CELERY_TASK_PROTOCOL = 2
 
 
 @template()
@@ -124,3 +125,7 @@ class sqs(default):
     BROKER_TRANSPORT_OPTIONS = {
         'region': os.environ.get('AWS_REGION', 'us-east-1'),
     }
+
+@template()
+class proto1(default):
+    CELERY_TASK_PROTOCOL = 1