浏览代码

Merge branch 'canvas_refactor'

Ask Solem 11 年之前
父节点
当前提交
eb7dab17dd
共有 52 个文件被更改,包括 1423 次插入1462 次删除
  1. 1 3
      celery/_state.py
  2. 11 4
      celery/app/amqp.py
  3. 8 16
      celery/app/base.py
  4. 14 181
      celery/app/builtins.py
  5. 14 27
      celery/app/task.py
  6. 174 32
      celery/app/trace.py
  7. 0 1
      celery/app/utils.py
  8. 4 4
      celery/backends/base.py
  9. 209 56
      celery/canvas.py
  10. 0 9
      celery/concurrency/asynpool.py
  11. 3 1
      celery/concurrency/base.py
  12. 1 0
      celery/concurrency/solo.py
  13. 1 1
      celery/contrib/batches.py
  14. 2 4
      celery/events/state.py
  15. 9 8
      celery/five.py
  16. 1 3
      celery/task/base.py
  17. 1 2
      celery/task/http.py
  18. 0 12
      celery/task/trace.py
  19. 2 2
      celery/tests/app/test_app.py
  20. 10 12
      celery/tests/app/test_builtins.py
  21. 28 14
      celery/tests/case.py
  22. 0 23
      celery/tests/compat_modules/test_compat.py
  23. 0 4
      celery/tests/compat_modules/test_compat_utils.py
  24. 0 1
      celery/tests/compat_modules/test_decorators.py
  25. 4 4
      celery/tests/tasks/test_chord.py
  26. 0 4
      celery/tests/tasks/test_tasks.py
  27. 7 4
      celery/tests/tasks/test_trace.py
  28. 7 22
      celery/tests/worker/test_control.py
  29. 21 22
      celery/tests/worker/test_loops.py
  30. 156 260
      celery/tests/worker/test_request.py
  31. 8 9
      celery/tests/worker/test_strategy.py
  32. 113 56
      celery/tests/worker/test_worker.py
  33. 0 9
      celery/utils/__init__.py
  34. 3 1
      celery/utils/objects.py
  35. 1 1
      celery/worker/autoscale.py
  36. 35 23
      celery/worker/consumer.py
  37. 4 2
      celery/worker/control.py
  38. 0 590
      celery/worker/job.py
  39. 2 2
      celery/worker/loops.py
  40. 494 0
      celery/worker/request.py
  41. 2 2
      celery/worker/state.py
  42. 45 12
      celery/worker/strategy.py
  43. 1 1
      docs/internals/app-overview.rst
  44. 11 7
      docs/internals/protov2.rst
  45. 1 1
      docs/internals/worker.rst
  46. 3 3
      docs/reference/celery.worker.request.rst
  47. 1 1
      docs/reference/index.rst
  48. 1 1
      docs/userguide/extending.rst
  49. 1 1
      docs/userguide/signals.rst
  50. 4 3
      docs/whatsnew-3.1.rst
  51. 0 1
      extra/release/doc4allmods
  52. 5 0
      funtests/stress/stress/templates.py

+ 1 - 3
celery/_state.py

@@ -77,10 +77,8 @@ def _get_current_app():
         #: creates the global fallback app instance.
         #: creates the global fallback app instance.
         from celery.app import Celery
         from celery.app import Celery
         set_default_app(Celery(
         set_default_app(Celery(
-            'default',
+            'default', fixups=[], set_as_current=False,
             loader=os.environ.get('CELERY_LOADER') or 'default',
             loader=os.environ.get('CELERY_LOADER') or 'default',
-            fixups=[],
-            set_as_current=False, accept_magic_kwargs=True,
         ))
         ))
     return _tls.current_app or default_app
     return _tls.current_app or default_app
 
 

+ 11 - 4
celery/app/amqp.py

@@ -269,7 +269,8 @@ class AMQP(object):
                    expires=None, retries=0, chord=None,
                    expires=None, retries=0, chord=None,
                    callbacks=None, errbacks=None, reply_to=None,
                    callbacks=None, errbacks=None, reply_to=None,
                    time_limit=None, soft_time_limit=None,
                    time_limit=None, soft_time_limit=None,
-                   create_sent_event=False, now=None, timezone=None):
+                   create_sent_event=False, now=None, timezone=None,
+                   root_id=None, parent_id=None):
         args = args or ()
         args = args or ()
         kwargs = kwargs or {}
         kwargs = kwargs or {}
         utc = self.utc
         utc = self.utc
@@ -295,7 +296,8 @@ class AMQP(object):
         return task_message(
         return task_message(
             headers={
             headers={
                 'lang': 'py',
                 'lang': 'py',
-                'c_type': name,
+                'task': name,
+                'id': task_id,
                 'eta': eta,
                 'eta': eta,
                 'expires': expires,
                 'expires': expires,
                 'callbacks': callbacks,
                 'callbacks': callbacks,
@@ -304,7 +306,9 @@ class AMQP(object):
                 'group': group_id,
                 'group': group_id,
                 'chord': chord,
                 'chord': chord,
                 'retries': retries,
                 'retries': retries,
-                'timelimit': (time_limit, soft_time_limit),
+                'timelimit': [time_limit, soft_time_limit],
+                'root_id': root_id,
+                'parent_id': parent_id,
             },
             },
             properties={
             properties={
                 'correlation_id': task_id,
                 'correlation_id': task_id,
@@ -313,6 +317,8 @@ class AMQP(object):
             body=(args, kwargs),
             body=(args, kwargs),
             sent_event={
             sent_event={
                 'uuid': task_id,
                 'uuid': task_id,
+                'root': root_id,
+                'parent': parent_id,
                 'name': name,
                 'name': name,
                 'args': safe_repr(args),
                 'args': safe_repr(args),
                 'kwargs': safe_repr(kwargs),
                 'kwargs': safe_repr(kwargs),
@@ -327,7 +333,8 @@ class AMQP(object):
                    expires=None, retries=0,
                    expires=None, retries=0,
                    chord=None, callbacks=None, errbacks=None, reply_to=None,
                    chord=None, callbacks=None, errbacks=None, reply_to=None,
                    time_limit=None, soft_time_limit=None,
                    time_limit=None, soft_time_limit=None,
-                   create_sent_event=False, now=None, timezone=None):
+                   create_sent_event=False, now=None, timezone=None,
+                   root_id=None, parent_id=None):
         args = args or ()
         args = args or ()
         kwargs = kwargs or {}
         kwargs = kwargs or {}
         utc = self.utc
         utc = self.utc

+ 8 - 16
celery/app/base.py

@@ -9,12 +9,10 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
 import os
 import os
-import sys
 import threading
 import threading
 import warnings
 import warnings
 
 
 from collections import defaultdict, deque
 from collections import defaultdict, deque
-from contextlib import contextmanager
 from copy import deepcopy
 from copy import deepcopy
 from operator import attrgetter
 from operator import attrgetter
 
 
@@ -128,11 +126,13 @@ class Celery(object):
     #: Signal sent after app has been finalized.
     #: Signal sent after app has been finalized.
     on_after_finalize = None
     on_after_finalize = None
 
 
+    #: ignored
+    accept_magic_kwargs = False
+
     def __init__(self, main=None, loader=None, backend=None,
     def __init__(self, main=None, loader=None, backend=None,
                  amqp=None, events=None, log=None, control=None,
                  amqp=None, events=None, log=None, control=None,
-                 set_as_current=True, accept_magic_kwargs=False,
-                 tasks=None, broker=None, include=None, changes=None,
-                 config_source=None, fixups=None, task_cls=None,
+                 set_as_current=True, tasks=None, broker=None, include=None,
+                 changes=None, config_source=None, fixups=None, task_cls=None,
                  autofinalize=True, **kwargs):
                  autofinalize=True, **kwargs):
         self.clock = LamportClock()
         self.clock = LamportClock()
         self.main = main
         self.main = main
@@ -145,7 +145,6 @@ class Celery(object):
         self.task_cls = task_cls or self.task_cls
         self.task_cls = task_cls or self.task_cls
         self.set_as_current = set_as_current
         self.set_as_current = set_as_current
         self.registry_cls = symbol_by_name(self.registry_cls)
         self.registry_cls = symbol_by_name(self.registry_cls)
-        self.accept_magic_kwargs = accept_magic_kwargs
         self.user_options = defaultdict(set)
         self.user_options = defaultdict(set)
         self.steps = defaultdict(set)
         self.steps = defaultdict(set)
         self.autofinalize = autofinalize
         self.autofinalize = autofinalize
@@ -240,12 +239,6 @@ class Celery(object):
                     cons = lambda app: app._task_from_fun(fun, **opts)
                     cons = lambda app: app._task_from_fun(fun, **opts)
                     cons.__name__ = fun.__name__
                     cons.__name__ = fun.__name__
                     connect_on_app_finalize(cons)
                     connect_on_app_finalize(cons)
-                if self.accept_magic_kwargs:  # compat mode
-                    task = self._task_from_fun(fun, **opts)
-                    if filter:
-                        task = filter(task)
-                    return task
-
                 if self.finalized or opts.get('_force_evaluate'):
                 if self.finalized or opts.get('_force_evaluate'):
                     ret = self._task_from_fun(fun, **opts)
                     ret = self._task_from_fun(fun, **opts)
                 else:
                 else:
@@ -277,7 +270,6 @@ class Celery(object):
 
 
         T = type(fun.__name__, (base, ), dict({
         T = type(fun.__name__, (base, ), dict({
             'app': self,
             'app': self,
-            'accept_magic_kwargs': False,
             'run': fun if bind else staticmethod(fun),
             'run': fun if bind else staticmethod(fun),
             '_decorated': True,
             '_decorated': True,
             '__doc__': fun.__doc__,
             '__doc__': fun.__doc__,
@@ -352,7 +344,7 @@ class Celery(object):
                   publisher=None, link=None, link_error=None,
                   publisher=None, link=None, link_error=None,
                   add_to_parent=True, group_id=None, retries=0, chord=None,
                   add_to_parent=True, group_id=None, retries=0, chord=None,
                   reply_to=None, time_limit=None, soft_time_limit=None,
                   reply_to=None, time_limit=None, soft_time_limit=None,
-                  **options):
+                  root_id=None, parent_id=None, **options):
         amqp = self.amqp
         amqp = self.amqp
         task_id = task_id or uuid()
         task_id = task_id or uuid()
         producer = producer or publisher  # XXX compat
         producer = producer or publisher  # XXX compat
@@ -370,6 +362,7 @@ class Celery(object):
             maybe_list(link), maybe_list(link_error),
             maybe_list(link), maybe_list(link_error),
             reply_to or self.oid, time_limit, soft_time_limit,
             reply_to or self.oid, time_limit, soft_time_limit,
             self.conf.CELERY_SEND_TASK_SENT_EVENT,
             self.conf.CELERY_SEND_TASK_SENT_EVENT,
+            root_id, parent_id,
         )
         )
 
 
         if connection:
         if connection:
@@ -574,7 +567,6 @@ class Celery(object):
             'events': self.events_cls,
             'events': self.events_cls,
             'log': self.log_cls,
             'log': self.log_cls,
             'control': self.control_cls,
             'control': self.control_cls,
-            'accept_magic_kwargs': self.accept_magic_kwargs,
             'fixups': self.fixups,
             'fixups': self.fixups,
             'config_source': self._config_source,
             'config_source': self._config_source,
             'task_cls': self.task_cls,
             'task_cls': self.task_cls,
@@ -585,7 +577,7 @@ class Celery(object):
         return (self.main, self.conf.changes,
         return (self.main, self.conf.changes,
                 self.loader_cls, self.backend_cls, self.amqp_cls,
                 self.loader_cls, self.backend_cls, self.amqp_cls,
                 self.events_cls, self.log_cls, self.control_cls,
                 self.events_cls, self.log_cls, self.control_cls,
-                self.accept_magic_kwargs, self._config_source)
+                False, self._config_source)
 
 
     @cached_property
     @cached_property
     def Worker(self):
     def Worker(self):

+ 14 - 181
celery/app/builtins.py

@@ -9,10 +9,7 @@
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
-from collections import deque
-
 from celery._state import get_current_worker_task, connect_on_app_finalize
 from celery._state import get_current_worker_task, connect_on_app_finalize
-from celery.utils import uuid
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
 
 
 __all__ = []
 __all__ = []
@@ -44,7 +41,7 @@ def add_unlock_chord_task(app):
     It joins chords by creating a task chain polling the header for completion.
     It joins chords by creating a task chain polling the header for completion.
 
 
     """
     """
-    from celery.canvas import signature
+    from celery.canvas import maybe_signature
     from celery.exceptions import ChordError
     from celery.exceptions import ChordError
     from celery.result import allow_join_result, result_from_tuple
     from celery.result import allow_join_result, result_from_tuple
 
 
@@ -66,6 +63,7 @@ def add_unlock_chord_task(app):
             interval = unlock_chord.default_retry_delay
             interval = unlock_chord.default_retry_delay
 
 
         # check if the task group is ready, and if so apply the callback.
         # check if the task group is ready, and if so apply the callback.
+        callback = maybe_signature(callback, app)
         deps = GroupResult(
         deps = GroupResult(
             group_id,
             group_id,
             [result_from_tuple(r, app=app) for r in result],
             [result_from_tuple(r, app=app) for r in result],
@@ -73,7 +71,7 @@ def add_unlock_chord_task(app):
         j = deps.join_native if deps.supports_native_join else deps.join
         j = deps.join_native if deps.supports_native_join else deps.join
 
 
         if deps.ready():
         if deps.ready():
-            callback = signature(callback, app=app)
+            callback = maybe_signature(callback, app=app)
             try:
             try:
                 with allow_join_result():
                 with allow_join_result():
                     ret = j(timeout=3.0, propagate=propagate)
                     ret = j(timeout=3.0, propagate=propagate)
@@ -138,14 +136,14 @@ def add_chunk_task(app):
 
 
 @connect_on_app_finalize
 @connect_on_app_finalize
 def add_group_task(app):
 def add_group_task(app):
+    """No longer used, but here for backwards compatibility."""
     _app = app
     _app = app
-    from celery.canvas import maybe_signature, signature
+    from celery.canvas import maybe_signature
     from celery.result import result_from_tuple
     from celery.result import result_from_tuple
 
 
     class Group(app.Task):
     class Group(app.Task):
         app = _app
         app = _app
         name = 'celery.group'
         name = 'celery.group'
-        accept_magic_kwargs = False
         _decorated = True
         _decorated = True
 
 
         def run(self, tasks, result, group_id, partial_args,
         def run(self, tasks, result, group_id, partial_args,
@@ -153,13 +151,8 @@ def add_group_task(app):
             app = self.app
             app = self.app
             result = result_from_tuple(result, app)
             result = result_from_tuple(result, app)
             # any partial args are added to all tasks in the group
             # any partial args are added to all tasks in the group
-            taskit = (signature(task, app=app).clone(partial_args)
+            taskit = (maybe_signature(task, app=app).clone(partial_args)
                       for i, task in enumerate(tasks))
                       for i, task in enumerate(tasks))
-            if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER:
-                return app.GroupResult(
-                    result.id,
-                    [stask.apply(group_id=group_id) for stask in taskit],
-                )
             with app.producer_or_acquire() as pub:
             with app.producer_or_acquire() as pub:
                 [stask.apply_async(group_id=group_id, producer=pub,
                 [stask.apply_async(group_id=group_id, producer=pub,
                                    add_to_parent=False) for stask in taskit]
                                    add_to_parent=False) for stask in taskit]
@@ -167,152 +160,32 @@ def add_group_task(app):
             if add_to_parent and parent:
             if add_to_parent and parent:
                 parent.add_trail(result)
                 parent.add_trail(result)
             return result
             return result
-
-        def prepare(self, options, tasks, args, **kwargs):
-            options['group_id'] = group_id = (
-                options.setdefault('task_id', uuid()))
-
-            def prepare_member(task):
-                task = maybe_signature(task, app=self.app)
-                task.options['group_id'] = group_id
-                return task, task.freeze()
-
-            try:
-                tasks, res = list(zip(
-                    *[prepare_member(task) for task in tasks]
-                ))
-            except ValueError:  # tasks empty
-                tasks, res = [], []
-            return (tasks, self.app.GroupResult(group_id, res), group_id, args)
-
-        def apply_async(self, partial_args=(), kwargs={}, **options):
-            if self.app.conf.CELERY_ALWAYS_EAGER:
-                return self.apply(partial_args, kwargs, **options)
-            tasks, result, gid, args = self.prepare(
-                options, args=partial_args, **kwargs
-            )
-            super(Group, self).apply_async((
-                list(tasks), result.as_tuple(), gid, args), **options
-            )
-            return result
-
-        def apply(self, args=(), kwargs={}, **options):
-            return super(Group, self).apply(
-                self.prepare(options, args=args, **kwargs),
-                **options).get()
     return Group
     return Group
 
 
 
 
 @connect_on_app_finalize
 @connect_on_app_finalize
 def add_chain_task(app):
 def add_chain_task(app):
-    from celery.canvas import (
-        Signature, chain, chord, group, maybe_signature, maybe_unroll_group,
-    )
-
+    """No longer used, but here for backwards compatibility."""
     _app = app
     _app = app
 
 
     class Chain(app.Task):
     class Chain(app.Task):
         app = _app
         app = _app
         name = 'celery.chain'
         name = 'celery.chain'
-        accept_magic_kwargs = False
         _decorated = True
         _decorated = True
 
 
-        def prepare_steps(self, args, tasks):
-            app = self.app
-            steps = deque(tasks)
-            next_step = prev_task = prev_res = None
-            tasks, results = [], []
-            i = 0
-            while steps:
-                # First task get partial args from chain.
-                task = maybe_signature(steps.popleft(), app=app)
-                task = task.clone() if i else task.clone(args)
-                res = task.freeze()
-                i += 1
-
-                if isinstance(task, group):
-                    task = maybe_unroll_group(task)
-                if isinstance(task, chain):
-                    # splice the chain
-                    steps.extendleft(reversed(task.tasks))
-                    continue
-
-                elif isinstance(task, group) and steps and \
-                        not isinstance(steps[0], group):
-                    # automatically upgrade group(..) | s to chord(group, s)
-                    try:
-                        next_step = steps.popleft()
-                        # for chords we freeze by pretending it's a normal
-                        # task instead of a group.
-                        res = Signature.freeze(next_step)
-                        task = chord(task, body=next_step, task_id=res.task_id)
-                    except IndexError:
-                        pass  # no callback, so keep as group
-                if prev_task:
-                    # link previous task to this task.
-                    prev_task.link(task)
-                    # set the results parent attribute.
-                    if not res.parent:
-                        res.parent = prev_res
-
-                if not isinstance(prev_task, chord):
-                    results.append(res)
-                    tasks.append(task)
-                prev_task, prev_res = task, res
-
-            return tasks, results
-
-        def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
-                        task_id=None, link=None, link_error=None, **options):
-            if self.app.conf.CELERY_ALWAYS_EAGER:
-                return self.apply(args, kwargs, **options)
-            options.pop('publisher', None)
-            tasks, results = self.prepare_steps(args, kwargs['tasks'])
-            result = results[-1]
-            if group_id:
-                tasks[-1].set(group_id=group_id)
-            if chord:
-                tasks[-1].set(chord=chord)
-            if task_id:
-                tasks[-1].set(task_id=task_id)
-                result = tasks[-1].type.AsyncResult(task_id)
-            # make sure we can do a link() and link_error() on a chain object.
-            if link:
-                tasks[-1].set(link=link)
-            # and if any task in the chain fails, call the errbacks
-            if link_error:
-                for task in tasks:
-                    task.set(link_error=link_error)
-            tasks[0].apply_async(**options)
-            return result
-
-        def apply(self, args=(), kwargs={}, signature=maybe_signature,
-                  **options):
-            app = self.app
-            last, fargs = None, args  # fargs passed to first task only
-            for task in kwargs['tasks']:
-                res = signature(task, app=app).clone(fargs).apply(
-                    last and (last.get(), ),
-                )
-                res.parent, last, fargs = last, res, None
-            return last
     return Chain
     return Chain
 
 
 
 
 @connect_on_app_finalize
 @connect_on_app_finalize
 def add_chord_task(app):
 def add_chord_task(app):
-    """Every chord is executed in a dedicated task, so that the chord
-    can be used as a signature, and this generates the task
-    responsible for that."""
-    from celery import group
+    """No longer used, but here for backwards compatibility."""
+    from celery import group, chord as _chord
     from celery.canvas import maybe_signature
     from celery.canvas import maybe_signature
     _app = app
     _app = app
-    default_propagate = app.conf.CELERY_CHORD_PROPAGATES
 
 
     class Chord(app.Task):
     class Chord(app.Task):
         app = _app
         app = _app
         name = 'celery.chord'
         name = 'celery.chord'
-        accept_magic_kwargs = False
         ignore_result = False
         ignore_result = False
         _decorated = True
         _decorated = True
 
 
@@ -320,53 +193,13 @@ def add_chord_task(app):
                 countdown=1, max_retries=None, propagate=None,
                 countdown=1, max_retries=None, propagate=None,
                 eager=False, **kwargs):
                 eager=False, **kwargs):
             app = self.app
             app = self.app
-            propagate = default_propagate if propagate is None else propagate
-            group_id = uuid()
-
             # - convert back to group if serialized
             # - convert back to group if serialized
             tasks = header.tasks if isinstance(header, group) else header
             tasks = header.tasks if isinstance(header, group) else header
             header = group([
             header = group([
-                maybe_signature(s, app=app).clone() for s in tasks
+                maybe_signature(s, app=app) for s in tasks
             ], app=self.app)
             ], app=self.app)
-            # - eager applies the group inline
-            if eager:
-                return header.apply(args=partial_args, task_id=group_id)
-
-            body.setdefault('chord_size', len(header.tasks))
-            results = header.freeze(group_id=group_id, chord=body).results
-
-            return self.backend.apply_chord(
-                header, partial_args, group_id,
-                body, interval=interval, countdown=countdown,
-                max_retries=max_retries, propagate=propagate, result=results,
-            )
-
-        def apply_async(self, args=(), kwargs={}, task_id=None,
-                        group_id=None, chord=None, **options):
-            app = self.app
-            if app.conf.CELERY_ALWAYS_EAGER:
-                return self.apply(args, kwargs, **options)
-            header = kwargs.pop('header')
-            body = kwargs.pop('body')
-            header, body = (maybe_signature(header, app=app),
-                            maybe_signature(body, app=app))
-            # forward certain options to body
-            if chord is not None:
-                body.options['chord'] = chord
-            if group_id is not None:
-                body.options['group_id'] = group_id
-            [body.link(s) for s in options.pop('link', [])]
-            [body.link_error(s) for s in options.pop('link_error', [])]
-            body_result = body.freeze(task_id)
-            parent = super(Chord, self).apply_async((header, body, args),
-                                                    kwargs, **options)
-            body_result.parent = parent
-            return body_result
-
-        def apply(self, args=(), kwargs={}, propagate=True, **options):
-            body = kwargs['body']
-            res = super(Chord, self).apply(args, dict(kwargs, eager=True),
-                                           **options)
-            return maybe_signature(body, app=self.app).apply(
-                args=(res.get(propagate=propagate).get(), ))
+            body = maybe_signature(body, app=app)
+            ch = _chord(header, body)
+            return ch.run(header, body, partial_args, app, interval,
+                          countdown, max_retries, propagate, **kwargs)
     return Chord
     return Chord

+ 14 - 27
celery/app/task.py

@@ -20,7 +20,7 @@ from celery.exceptions import MaxRetriesExceededError, Reject, Retry
 from celery.five import class_property, items, with_metaclass
 from celery.five import class_property, items, with_metaclass
 from celery.local import Proxy
 from celery.local import Proxy
 from celery.result import EagerResult
 from celery.result import EagerResult
-from celery.utils import gen_task_name, fun_takes_kwargs, uuid, maybe_reraise
+from celery.utils import gen_task_name, uuid, maybe_reraise
 from celery.utils.functional import mattrgetter, maybe_list
 from celery.utils.functional import mattrgetter, maybe_list
 from celery.utils.imports import instantiate
 from celery.utils.imports import instantiate
 from celery.utils.mail import ErrorMail
 from celery.utils.mail import ErrorMail
@@ -93,6 +93,8 @@ class Context(object):
     headers = None
     headers = None
     delivery_info = None
     delivery_info = None
     reply_to = None
     reply_to = None
+    root_id = None
+    parent_id = None
     correlation_id = None
     correlation_id = None
     taskset = None   # compat alias to group
     taskset = None   # compat alias to group
     group = None
     group = None
@@ -235,10 +237,6 @@ class Task(object):
     #: If :const:`True` the task is an abstract base class.
     #: If :const:`True` the task is an abstract base class.
     abstract = True
     abstract = True
 
 
-    #: If disabled the worker will not forward magic keyword arguments.
-    #: Deprecated and scheduled for removal in v4.0.
-    accept_magic_kwargs = False
-
     #: Maximum number of retries before giving up.  If set to :const:`None`,
     #: Maximum number of retries before giving up.  If set to :const:`None`,
     #: it will **never** stop retrying.
     #: it will **never** stop retrying.
     max_retries = 3
     max_retries = 3
@@ -343,6 +341,9 @@ class Task(object):
             'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'),
             'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'),
     )
     )
 
 
+    #: ignored
+    accept_magic_kwargs = False
+
     _backend = None  # set by backend property.
     _backend = None  # set by backend property.
 
 
     __bound__ = False
     __bound__ = False
@@ -360,8 +361,6 @@ class Task(object):
         for attr_name, config_name in self.from_config:
         for attr_name, config_name in self.from_config:
             if getattr(self, attr_name, None) is None:
             if getattr(self, attr_name, None) is None:
                 setattr(self, attr_name, conf[config_name])
                 setattr(self, attr_name, conf[config_name])
-        if self.accept_magic_kwargs is None:
-            self.accept_magic_kwargs = app.accept_magic_kwargs
 
 
         # decorate with annotations from config.
         # decorate with annotations from config.
         if not was_bound:
         if not was_bound:
@@ -693,7 +692,7 @@ class Task(object):
 
 
         """
         """
         # trace imports Task, so need to import inline.
         # trace imports Task, so need to import inline.
-        from celery.app.trace import eager_trace_task
+        from celery.app.trace import build_tracer
 
 
         app = self._get_app()
         app = self._get_app()
         args = args or ()
         args = args or ()
@@ -718,28 +717,16 @@ class Task(object):
                    'errbacks': maybe_list(link_error),
                    'errbacks': maybe_list(link_error),
                    'headers': options.get('headers'),
                    'headers': options.get('headers'),
                    'delivery_info': {'is_eager': True}}
                    'delivery_info': {'is_eager': True}}
-        if self.accept_magic_kwargs:
-            default_kwargs = {'task_name': task.name,
-                              'task_id': task_id,
-                              'task_retries': retries,
-                              'task_is_eager': True,
-                              'logfile': options.get('logfile'),
-                              'loglevel': options.get('loglevel', 0),
-                              'delivery_info': {'is_eager': True}}
-            supported_keys = fun_takes_kwargs(task.run, default_kwargs)
-            extend_with = {
-                key: val for key, val in items(default_kwargs)
-                if key in supported_keys
-            }
-            kwargs.update(extend_with)
-
         tb = None
         tb = None
-        retval, info = eager_trace_task(task, task_id, args, kwargs,
-                                        app=self._get_app(),
-                                        request=request, propagate=throw)
+        tracer = build_tracer(
+            task.name, task, eager=True,
+            propagate=throw, app=self._get_app(),
+        )
+        ret = tracer(task_id, args, kwargs, request)
+        retval = ret.retval
         if isinstance(retval, ExceptionInfo):
         if isinstance(retval, ExceptionInfo):
             retval, tb = retval.exception, retval.traceback
             retval, tb = retval.exception, retval.traceback
-        state = states.SUCCESS if info is None else info.state
+        state = states.SUCCESS if ret.info is None else ret.info.state
         return EagerResult(task_id, retval, state, traceback=tb)
         return EagerResult(task_id, retval, state, traceback=tb)
 
 
     def AsyncResult(self, task_id, **kwargs):
     def AsyncResult(self, task_id, **kwargs):

+ 174 - 32
celery/app/trace.py

@@ -15,33 +15,84 @@ from __future__ import absolute_import
 # but in the end it only resulted in bad performance and horrible tracebacks,
 # but in the end it only resulted in bad performance and horrible tracebacks,
 # so instead we now use one closure per task class.
 # so instead we now use one closure per task class.
 
 
+import logging
 import os
 import os
 import socket
 import socket
 import sys
 import sys
 
 
+from collections import namedtuple
 from warnings import warn
 from warnings import warn
 
 
 from billiard.einfo import ExceptionInfo
 from billiard.einfo import ExceptionInfo
 from kombu.exceptions import EncodeError
 from kombu.exceptions import EncodeError
-from kombu.utils import kwdict
+from kombu.serialization import loads as loads_message, prepare_accept_content
+from kombu.utils.encoding import safe_repr, safe_str
 
 
 from celery import current_app, group
 from celery import current_app, group
 from celery import states, signals
 from celery import states, signals
 from celery._state import _task_stack
 from celery._state import _task_stack
 from celery.app import set_default_app
 from celery.app import set_default_app
 from celery.app.task import Task as BaseTask, Context
 from celery.app.task import Task as BaseTask, Context
-from celery.exceptions import Ignore, Reject, Retry
+from celery.exceptions import Ignore, Reject, Retry, InvalidTaskError
+from celery.five import monotonic
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
 from celery.utils.objects import mro_lookup
 from celery.utils.objects import mro_lookup
 from celery.utils.serialization import (
 from celery.utils.serialization import (
-    get_pickleable_exception,
-    get_pickleable_etype,
+    get_pickleable_exception, get_pickled_exception, get_pickleable_etype,
 )
 )
+from celery.utils.text import truncate
 
 
-__all__ = ['TraceInfo', 'build_tracer', 'trace_task', 'eager_trace_task',
+__all__ = ['TraceInfo', 'build_tracer', 'trace_task',
            'setup_worker_optimizations', 'reset_worker_optimizations']
            'setup_worker_optimizations', 'reset_worker_optimizations']
 
 
-_logger = get_logger(__name__)
+logger = get_logger(__name__)
+info = logger.info
+
+#: Format string used to log task success.
+LOG_SUCCESS = """\
+Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s\
+"""
+
+#: Format string used to log task failure.
+LOG_FAILURE = """\
+Task %(name)s[%(id)s] %(description)s: %(exc)s\
+"""
+
+#: Format string used to log task internal error.
+LOG_INTERNAL_ERROR = """\
+Task %(name)s[%(id)s] %(description)s: %(exc)s\
+"""
+
+#: Format string used to log task ignored.
+LOG_IGNORED = """\
+Task %(name)s[%(id)s] %(description)s\
+"""
+
+#: Format string used to log task rejected.
+LOG_REJECTED = """\
+Task %(name)s[%(id)s] %(exc)s\
+"""
+
+#: Format string used to log task retry.
+LOG_RETRY = """\
+Task %(name)s[%(id)s] retry: %(exc)s\
+"""
+
+log_policy_t = namedtuple(
+    'log_policy_t', ('format', 'description', 'severity', 'traceback', 'mail'),
+)
+
+log_policy_reject = log_policy_t(LOG_REJECTED, 'rejected', logging.WARN, 1, 1)
+log_policy_ignore = log_policy_t(LOG_IGNORED, 'ignored', logging.INFO, 0, 0)
+log_policy_internal = log_policy_t(
+    LOG_INTERNAL_ERROR, 'INTERNAL ERROR', logging.CRITICAL, 1, 1,
+)
+log_policy_expected = log_policy_t(
+    LOG_FAILURE, 'raised expected', logging.INFO, 0, 0,
+)
+log_policy_unexpected = log_policy_t(
+    LOG_FAILURE, 'raised unexpected', logging.ERROR, 1, 1,
+)
 
 
 send_prerun = signals.task_prerun.send
 send_prerun = signals.task_prerun.send
 send_postrun = signals.task_postrun.send
 send_postrun = signals.task_postrun.send
@@ -56,9 +107,11 @@ EXCEPTION_STATES = states.EXCEPTION_STATES
 IGNORE_STATES = frozenset([IGNORED, RETRY, REJECTED])
 IGNORE_STATES = frozenset([IGNORED, RETRY, REJECTED])
 
 
 #: set by :func:`setup_worker_optimizations`
 #: set by :func:`setup_worker_optimizations`
-_tasks = None
+_localized = []
 _patched = {}
 _patched = {}
 
 
+trace_ok_t = namedtuple('trace_ok_t', ('retval', 'info', 'runtime', 'retstr'))
+
 
 
 def task_has_custom(task, attr):
 def task_has_custom(task, attr):
     """Return true if the task or one of its bases
     """Return true if the task or one of its bases
@@ -67,6 +120,19 @@ def task_has_custom(task, attr):
                       monkey_patched=['celery.app.task'])
                       monkey_patched=['celery.app.task'])
 
 
 
 
+def get_log_policy(task, einfo, exc):
+    if isinstance(exc, Reject):
+        return log_policy_reject
+    elif isinstance(exc, Ignore):
+        return log_policy_ignore
+    elif einfo.internal:
+        return log_policy_internal
+    else:
+        if task.throws and isinstance(exc, task.throws):
+            return log_policy_expected
+        return log_policy_unexpected
+
+
 class TraceInfo(object):
 class TraceInfo(object):
     __slots__ = ('state', 'retval')
     __slots__ = ('state', 'retval')
 
 
@@ -100,6 +166,10 @@ class TraceInfo(object):
             task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo)
             task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo)
             signals.task_retry.send(sender=task, request=req,
             signals.task_retry.send(sender=task, request=req,
                                     reason=reason, einfo=einfo)
                                     reason=reason, einfo=einfo)
+            info(LOG_RETRY, {
+                'id': req.id, 'name': task.name,
+                'exc': safe_repr(reason.exc),
+            })
             return einfo
             return einfo
         finally:
         finally:
             del(tb)
             del(tb)
@@ -123,14 +193,47 @@ class TraceInfo(object):
                                       kwargs=req.kwargs,
                                       kwargs=req.kwargs,
                                       traceback=tb,
                                       traceback=tb,
                                       einfo=einfo)
                                       einfo=einfo)
+            self._log_error(task, einfo)
             return einfo
             return einfo
         finally:
         finally:
             del(tb)
             del(tb)
 
 
+    def _log_error(self, task, einfo):
+        req = task.request
+        eobj = einfo.exception = get_pickled_exception(einfo.exception)
+        exception, traceback, exc_info, sargs, skwargs = (
+            safe_repr(eobj),
+            safe_str(einfo.traceback),
+            einfo.exc_info,
+            safe_repr(req.args),
+            safe_repr(req.kwargs),
+        )
+        policy = get_log_policy(task, einfo, eobj)
+
+        context = {
+            'hostname': req.hostname,
+            'id': req.id,
+            'name': task.name,
+            'exc': exception,
+            'traceback': traceback,
+            'args': sargs,
+            'kwargs': skwargs,
+            'description': policy.description,
+            'internal': einfo.internal,
+        }
+
+        logger.log(policy.severity, policy.format.strip(), context,
+                   exc_info=exc_info if policy.traceback else None,
+                   extra={'data': context})
+
+        if policy.mail:
+            task.send_error_email(context, einfo.exception)
+
 
 
 def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
 def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                  Info=TraceInfo, eager=False, propagate=False, app=None,
                  Info=TraceInfo, eager=False, propagate=False, app=None,
-                 IGNORE_STATES=IGNORE_STATES):
+                 monotonic=monotonic, truncate=truncate,
+                 trace_ok_t=trace_ok_t, IGNORE_STATES=IGNORE_STATES):
     """Return a function that traces task execution; catches all
     """Return a function that traces task execution; catches all
     exceptions and updates result backend with the state and result
     exceptions and updates result backend with the state and result
 
 
@@ -186,6 +289,7 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
     push_task = _task_stack.push
     push_task = _task_stack.push
     pop_task = _task_stack.pop
     pop_task = _task_stack.pop
     on_chord_part_return = backend.on_chord_part_return
     on_chord_part_return = backend.on_chord_part_return
+    _does_info = logger.isEnabledFor(logging.INFO)
 
 
     prerun_receivers = signals.task_prerun.receivers
     prerun_receivers = signals.task_prerun.receivers
     postrun_receivers = signals.task_postrun.receivers
     postrun_receivers = signals.task_postrun.receivers
@@ -209,6 +313,8 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
     def trace_task(uuid, args, kwargs, request=None):
     def trace_task(uuid, args, kwargs, request=None):
         # R      - is the possibly prepared return value.
         # R      - is the possibly prepared return value.
         # I      - is the Info object.
         # I      - is the Info object.
+        # T      - runtime
+        # Rstr   - textual representation of return value
         # retval - is the always unmodified return value.
         # retval - is the always unmodified return value.
         # state  - is the resulting task state.
         # state  - is the resulting task state.
 
 
@@ -216,9 +322,14 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
         # for performance reasons, and because the function is so long
         # for performance reasons, and because the function is so long
         # we want the main variables (I, and R) to stand out visually from the
         # we want the main variables (I, and R) to stand out visually from the
         # the rest of the variables, so breaking PEP8 is worth it ;)
         # the rest of the variables, so breaking PEP8 is worth it ;)
-        R = I = retval = state = None
-        kwargs = kwdict(kwargs)
+        R = I = T = Rstr = retval = state = None
+        time_start = monotonic()
         try:
         try:
+            try:
+                kwargs.items
+            except AttributeError:
+                raise InvalidTaskError(
+                    'Task keyword arguments is not a mapping')
             push_task(task)
             push_task(task)
             task_request = Context(request or {}, args=args,
             task_request = Context(request or {}, args=args,
                                    called_directly=False, kwargs=kwargs)
                                    called_directly=False, kwargs=kwargs)
@@ -289,6 +400,13 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                             task_on_success(retval, uuid, args, kwargs)
                             task_on_success(retval, uuid, args, kwargs)
                         if success_receivers:
                         if success_receivers:
                             send_success(sender=task, result=retval)
                             send_success(sender=task, result=retval)
+                        if _does_info:
+                            T = monotonic() - time_start
+                            Rstr = truncate(safe_repr(R), 256)
+                            info(LOG_SUCCESS, {
+                                'id': uuid, 'name': name,
+                                'return_value': Rstr, 'runtime': T,
+                            })
 
 
                 # -* POST *-
                 # -* POST *-
                 if state not in IGNORE_STATES:
                 if state not in IGNORE_STATES:
@@ -314,15 +432,15 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                         except (KeyboardInterrupt, SystemExit, MemoryError):
                         except (KeyboardInterrupt, SystemExit, MemoryError):
                             raise
                             raise
                         except Exception as exc:
                         except Exception as exc:
-                            _logger.error('Process cleanup failed: %r', exc,
-                                          exc_info=True)
+                            logger.error('Process cleanup failed: %r', exc,
+                                         exc_info=True)
         except MemoryError:
         except MemoryError:
             raise
             raise
         except Exception as exc:
         except Exception as exc:
             if eager:
             if eager:
                 raise
                 raise
             R = report_internal_error(task, exc)
             R = report_internal_error(task, exc)
-        return R, I
+        return trace_ok_t(R, I, T, Rstr)
 
 
     return trace_task
     return trace_task
 
 
@@ -331,27 +449,49 @@ def trace_task(task, uuid, args, kwargs, request={}, **opts):
     try:
     try:
         if task.__trace__ is None:
         if task.__trace__ is None:
             task.__trace__ = build_tracer(task.name, task, **opts)
             task.__trace__ = build_tracer(task.name, task, **opts)
-        return task.__trace__(uuid, args, kwargs, request)[0]
+        return task.__trace__(uuid, args, kwargs, request)
     except Exception as exc:
     except Exception as exc:
         return report_internal_error(task, exc)
         return report_internal_error(task, exc)
 
 
 
 
-def _trace_task_ret(name, uuid, args, kwargs, request={}, app=None, **opts):
-    return trace_task((app or current_app).tasks[name],
-                      uuid, args, kwargs, request, app=app, **opts)
+def _trace_task_ret(name, uuid, request, body, content_type,
+                    content_encoding, loads=loads_message, app=None,
+                    **extra_request):
+    app = app or current_app._get_current_object()
+    accept = prepare_accept_content(app.conf.CELERY_ACCEPT_CONTENT)
+    args, kwargs = loads(body, content_type, content_encoding, accept=accept)
+    request.update(args=args, kwargs=kwargs, **extra_request)
+    R, I, T, Rstr = trace_task(app.tasks[name],
+                               uuid, args, kwargs, request, app=app)
+    return (1, R, T) if I else (0, Rstr, T)
 trace_task_ret = _trace_task_ret
 trace_task_ret = _trace_task_ret
 
 
 
 
-def _fast_trace_task(task, uuid, args, kwargs, request={}):
+def _fast_trace_task_v1(task, uuid, args, kwargs, request={}, _loc=_localized):
     # setup_worker_optimizations will point trace_task_ret to here,
     # setup_worker_optimizations will point trace_task_ret to here,
     # so this is the function used in the worker.
     # so this is the function used in the worker.
-    return _tasks[task].__trace__(uuid, args, kwargs, request)[0]
-
-
-def eager_trace_task(task, uuid, args, kwargs, request=None, **opts):
-    opts.setdefault('eager', True)
-    return build_tracer(task.name, task, **opts)(
-        uuid, args, kwargs, request)
+    tasks, _ = _loc
+    R, I, T, Rstr = tasks[task].__trace__(uuid, args, kwargs, request)[0]
+    # exception instance if error, else result text
+    return (1, R, T) if I else (0, Rstr, T)
+
+
+def _fast_trace_task(task, uuid, request, body, content_type,
+                     content_encoding, loads=loads_message, _loc=_localized,
+                     hostname=None, **_):
+    tasks, accept = _loc
+    if content_type:
+        args, kwargs = loads(body, content_type, content_encoding,
+                             accept=accept)
+    else:
+        args, kwargs = body
+    request.update({
+        'args': args, 'kwargs': kwargs, 'hostname': hostname,
+    })
+    R, I, T, Rstr = tasks[task].__trace__(
+        uuid, args, kwargs, request,
+    )
+    return (1, R, T) if I else (0, Rstr, T)
 
 
 
 
 def report_internal_error(task, exc):
 def report_internal_error(task, exc):
@@ -368,7 +508,6 @@ def report_internal_error(task, exc):
 
 
 
 
 def setup_worker_optimizations(app):
 def setup_worker_optimizations(app):
-    global _tasks
     global trace_task_ret
     global trace_task_ret
 
 
     # make sure custom Task.__call__ methods that calls super
     # make sure custom Task.__call__ methods that calls super
@@ -388,12 +527,15 @@ def setup_worker_optimizations(app):
     app.finalize()
     app.finalize()
 
 
     # set fast shortcut to task registry
     # set fast shortcut to task registry
-    _tasks = app._tasks
+    _localized[:] = [
+        app._tasks,
+        prepare_accept_content(app.conf.CELERY_ACCEPT_CONTENT),
+    ]
 
 
     trace_task_ret = _fast_trace_task
     trace_task_ret = _fast_trace_task
-    from celery.worker import job as job_module
-    job_module.trace_task_ret = _fast_trace_task
-    job_module.__optimize__()
+    from celery.worker import request as request_module
+    request_module.trace_task_ret = _fast_trace_task
+    request_module.__optimize__()
 
 
 
 
 def reset_worker_optimizations():
 def reset_worker_optimizations():
@@ -407,8 +549,8 @@ def reset_worker_optimizations():
         BaseTask.__call__ = _patched.pop('BaseTask.__call__')
         BaseTask.__call__ = _patched.pop('BaseTask.__call__')
     except KeyError:
     except KeyError:
         pass
         pass
-    from celery.worker import job as job_module
-    job_module.trace_task_ret = _trace_task_ret
+    from celery.worker import request as request_module
+    request_module.trace_task_ret = _trace_task_ret
 
 
 
 
 def _install_stack_protection():
 def _install_stack_protection():

+ 0 - 1
celery/app/utils.py

@@ -152,7 +152,6 @@ class AppPickler(object):
         return dict(main=main, loader=loader, backend=backend, amqp=amqp,
         return dict(main=main, loader=loader, backend=backend, amqp=amqp,
                     changes=changes, events=events, log=log, control=control,
                     changes=changes, events=events, log=log, control=control,
                     set_as_current=False,
                     set_as_current=False,
-                    accept_magic_kwargs=accept_magic_kwargs,
                     config_source=config_source)
                     config_source=config_source)
 
 
     def construct(self, cls, **kwargs):
     def construct(self, cls, **kwargs):

+ 4 - 4
celery/backends/base.py

@@ -165,11 +165,11 @@ class BaseBackend(object):
 
 
     def exception_to_python(self, exc):
     def exception_to_python(self, exc):
         """Convert serialized exception to Python exception."""
         """Convert serialized exception to Python exception."""
-        if self.serializer in EXCEPTION_ABLE_CODECS:
-            return get_pickled_exception(exc)
-        elif not isinstance(exc, BaseException):
-            return create_exception_cls(
+        if not isinstance(exc, BaseException):
+            exc = create_exception_cls(
                 from_utf8(exc['exc_type']), __name__)(exc['exc_message'])
                 from_utf8(exc['exc_type']), __name__)(exc['exc_message'])
+        if self.serializer in EXCEPTION_ABLE_CODECS:
+            exc = get_pickled_exception(exc)
         return exc
         return exc
 
 
     def prepare_value(self, result):
     def prepare_value(self, result):

+ 209 - 56
celery/canvas.py

@@ -12,6 +12,7 @@
 """
 """
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
+from collections import deque
 from copy import deepcopy
 from copy import deepcopy
 from functools import partial as _partial, reduce
 from functools import partial as _partial, reduce
 from operator import itemgetter
 from operator import itemgetter
@@ -19,7 +20,7 @@ from itertools import chain as _chain
 
 
 from kombu.utils import cached_property, fxrange, kwdict, reprcall, uuid
 from kombu.utils import cached_property, fxrange, kwdict, reprcall, uuid
 
 
-from celery._state import current_app
+from celery._state import current_app, get_current_worker_task
 from celery.utils.functional import (
 from celery.utils.functional import (
     maybe_list, is_list, regen,
     maybe_list, is_list, regen,
     chunks as _chunks,
     chunks as _chunks,
@@ -194,12 +195,13 @@ class Signature(dict):
         return s
         return s
     partial = clone
     partial = clone
 
 
-    def freeze(self, _id=None, group_id=None, chord=None):
+    def freeze(self, _id=None, group_id=None, chord=None, root_id=None):
         opts = self.options
         opts = self.options
         try:
         try:
             tid = opts['task_id']
             tid = opts['task_id']
         except KeyError:
         except KeyError:
             tid = opts['task_id'] = _id or uuid()
             tid = opts['task_id'] = _id or uuid()
+        root_id = opts.setdefault('root_id', root_id)
         if 'reply_to' not in opts:
         if 'reply_to' not in opts:
             opts['reply_to'] = self.app.oid
             opts['reply_to'] = self.app.oid
         if group_id:
         if group_id:
@@ -348,6 +350,99 @@ class chain(Signature):
         if self.tasks:
         if self.tasks:
             return self.apply_async(args, kwargs)
             return self.apply_async(args, kwargs)
 
 
+    def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
+                    task_id=None, link=None, link_error=None,
+                    publisher=None, root_id=None, **options):
+        app = self.app
+        if app.conf.CELERY_ALWAYS_EAGER:
+            return self.apply(args, kwargs, **options)
+        tasks, results = self.prepare_steps(
+            args, self.tasks, root_id, link_error,
+        )
+        if not results:
+            return
+        result = results[-1]
+        last_task = tasks[-1]
+        if group_id:
+            last_task.set(group_id=group_id)
+        if chord:
+            last_task.set(chord=chord)
+        if task_id:
+            last_task.set(task_id=task_id)
+            result = last_task.type.AsyncResult(task_id)
+        # make sure we can do a link() and link_error() on a chain object.
+        if link:
+            tasks[-1].set(link=link)
+        tasks[0].apply_async(**options)
+        return result
+
+    def prepare_steps(self, args, tasks,
+                      root_id=None, link_error=None, app=None):
+        app = app or self.app
+        steps = deque(tasks)
+        next_step = prev_task = prev_res = None
+        tasks, results = [], []
+        i = 0
+        while steps:
+            task = steps.popleft()
+            if not i:  # first task
+                # first task gets partial args from chain
+                task = task.clone(args)
+                res = task.freeze(root_id=root_id)
+                root_id = res.id if root_id is None else root_id
+            else:
+                task = task.clone()
+                res = task.freeze(root_id=root_id)
+            i += 1
+
+            if isinstance(task, group):
+                task = maybe_unroll_group(task)
+
+            if isinstance(task, chain):
+                # splice the chain
+                steps.extendleft(reversed(task.tasks))
+                continue
+            elif isinstance(task, group) and steps and \
+                    not isinstance(steps[0], group):
+                # automatically upgrade group(...) | s to chord(group, s)
+                try:
+                    next_step = steps.popleft()
+                    # for chords we freeze by pretending it's a normal
+                    # signature instead of a group.
+                    res = Signature.freeze(next_step)
+                    task = chord(
+                        task, body=next_step,
+                        task_id=res.task_id, root_id=root_id,
+                    )
+                except IndexError:
+                    pass  # no callback, so keep as group.
+
+            if prev_task:
+                # link previous task to this task.
+                prev_task.link(task)
+                # set AsyncResult.parent
+                if not res.parent:
+                    res.parent = prev_res
+
+            if link_error:
+                task.set(link_error=link_error)
+
+            if not isinstance(prev_task, chord):
+                results.append(res)
+                tasks.append(task)
+            prev_task, prev_res = task, res
+
+        return tasks, results
+
+    def apply(self, args=(), kwargs={}, **options):
+        last, fargs = None, args
+        for task in self.tasks:
+            res = task.clone(fargs).apply(
+                last and (last.get(), ), **options
+            )
+            res.parent, last, fargs = last, res, None
+        return last
+
     @classmethod
     @classmethod
     def from_dict(self, d, app=None):
     def from_dict(self, d, app=None):
         tasks = d['kwargs']['tasks']
         tasks = d['kwargs']['tasks']
@@ -357,11 +452,14 @@ class chain(Signature):
         return chain(*d['kwargs']['tasks'], app=app, **kwdict(d['options']))
         return chain(*d['kwargs']['tasks'], app=app, **kwdict(d['options']))
 
 
     @property
     @property
-    def type(self):
-        try:
-            return self._type or self.tasks[0].type.app.tasks['celery.chain']
-        except KeyError:
-            return self.app.tasks['celery.chain']
+    def app(self):
+        app = self._app
+        if app is None:
+            try:
+                app = self.tasks[0]._app
+            except (KeyError, IndexError):
+                pass
+        return app or current_app
 
 
     def __repr__(self):
     def __repr__(self):
         return ' | '.join(repr(t) for t in self.tasks)
         return ' | '.join(repr(t) for t in self.tasks)
@@ -452,11 +550,6 @@ def _maybe_group(tasks):
     return tasks
     return tasks
 
 
 
 
-def _maybe_clone(tasks, app):
-    return [s.clone() if isinstance(s, Signature) else signature(s, app=app)
-            for s in tasks]
-
-
 @Signature.register_type
 @Signature.register_type
 class group(Signature):
 class group(Signature):
 
 
@@ -477,13 +570,54 @@ class group(Signature):
                 task['args'] = task._merge(d['args'])[0]
                 task['args'] = task._merge(d['args'])[0]
         return group(tasks, app=app, **kwdict(d['options']))
         return group(tasks, app=app, **kwdict(d['options']))
 
 
-    def apply_async(self, args=(), kwargs=None, add_to_parent=True, **options):
-        tasks = _maybe_clone(self.tasks, app=self._app)
-        if not tasks:
+    def _prepared(self, tasks, partial_args, group_id, root_id):
+        for task in tasks:
+            task = task.clone(partial_args)
+            yield task, task.freeze(group_id=group_id, root_id=root_id)
+
+    def _apply_tasks(self, tasks, producer=None, app=None, **options):
+        app = app or self.app
+        with app.producer_or_acquire(producer) as producer:
+            for sig, res in tasks:
+                sig.apply_async(producer=producer, add_to_parent=False,
+                                **options)
+                yield res
+
+    def _freeze_gid(self, options):
+        # remove task_id and use that as the group_id,
+        # if we don't remove it then every task will have the same id...
+        options = dict(self.options, **options)
+        options['group_id'] = group_id = (
+            options.pop('task_id', uuid()))
+        return options, group_id, options.get('root_id')
+
+    def apply_async(self, args=(), kwargs=None, add_to_parent=True,
+                    producer=None, **options):
+        app = self.app
+        if app.conf.CELERY_ALWAYS_EAGER:
+            return self.apply(args, kwargs, **options)
+        if not self.tasks:
             return self.freeze()
             return self.freeze()
-        type = self.type
-        return type(*type.prepare(dict(self.options, **options), tasks, args),
-                    add_to_parent=add_to_parent)
+
+        options, group_id, root_id = self._freeze_gid(options)
+        tasks = self._prepared(self.tasks, args, group_id, root_id)
+        result = self.app.GroupResult(
+            group_id, list(self._apply_tasks(tasks, producer, app, **options)),
+        )
+        parent_task = get_current_worker_task()
+        if add_to_parent and parent_task:
+            parent_task.add_trail(result)
+        return result
+
+    def apply(self, args=(), kwargs={}, **options):
+        app = self.app
+        if not self.tasks:
+            return self.freeze()  # empty group returns GroupResult
+        options, group_id, root_id = self._freeze_gid(options)
+        tasks = self._prepared(self.tasks, args, group_id, root_id)
+        return app.GroupResult(group_id, [
+            sig.apply(**options) for sig, _ in tasks
+        ])
 
 
     def set_immutable(self, immutable):
     def set_immutable(self, immutable):
         for task in self.tasks:
         for task in self.tasks:
@@ -498,15 +632,10 @@ class group(Signature):
         sig = sig.clone().set(immutable=True)
         sig = sig.clone().set(immutable=True)
         return self.tasks[0].link_error(sig)
         return self.tasks[0].link_error(sig)
 
 
-    def apply(self, *args, **kwargs):
-        if not self.tasks:
-            return self.freeze()  # empty group returns GroupResult
-        return Signature.apply(self, *args, **kwargs)
-
     def __call__(self, *partial_args, **options):
     def __call__(self, *partial_args, **options):
         return self.apply_async(partial_args, **options)
         return self.apply_async(partial_args, **options)
 
 
-    def freeze(self, _id=None, group_id=None, chord=None):
+    def freeze(self, _id=None, group_id=None, chord=None, root_id=None):
         opts = self.options
         opts = self.options
         try:
         try:
             gid = opts['task_id']
             gid = opts['task_id']
@@ -516,10 +645,13 @@ class group(Signature):
             opts['group_id'] = group_id
             opts['group_id'] = group_id
         if chord:
         if chord:
             opts['chord'] = group_id
             opts['chord'] = group_id
+        root_id = opts.setdefault('root_id', root_id)
         new_tasks, results = [], []
         new_tasks, results = [], []
         for task in self.tasks:
         for task in self.tasks:
             task = maybe_signature(task, app=self._app).clone()
             task = maybe_signature(task, app=self._app).clone()
-            results.append(task.freeze(group_id=group_id, chord=chord))
+            results.append(task.freeze(
+                group_id=group_id, chord=chord, root_id=root_id,
+            ))
             new_tasks.append(task)
             new_tasks.append(task)
         self.tasks = self.kwargs['tasks'] = new_tasks
         self.tasks = self.kwargs['tasks'] = new_tasks
         return self.app.GroupResult(gid, results)
         return self.app.GroupResult(gid, results)
@@ -538,14 +670,14 @@ class group(Signature):
         return repr(self.tasks)
         return repr(self.tasks)
 
 
     @property
     @property
-    def type(self):
-        if self._type:
-            return self._type
-        # taking the app from the first task in the list, there may be a
-        # better solution for this, e.g. to consolidate tasks with the same
-        # app and apply them in batches.
-        app = self._app if self._app else self.tasks[0].type.app
-        return app.tasks[self['task']]
+    def app(self):
+        app = self._app
+        if app is None:
+            try:
+                app = self.tasks[0]._app
+            except (KeyError, IndexError):
+                pass
+        return app if app is not None else current_app
 
 
 
 
 @Signature.register_type
 @Signature.register_type
@@ -560,8 +692,8 @@ class chord(Signature):
         )
         )
         self.subtask_type = 'chord'
         self.subtask_type = 'chord'
 
 
-    def freeze(self, _id=None, group_id=None, chord=None):
-        return self.body.freeze(_id, group_id=group_id, chord=chord)
+    def freeze(self, *args, **kwargs):
+        return self.body.freeze(*args, **kwargs)
 
 
     @classmethod
     @classmethod
     def from_dict(self, d, app=None):
     def from_dict(self, d, app=None):
@@ -574,20 +706,14 @@ class chord(Signature):
         # than manually popping things off.
         # than manually popping things off.
         return (header, body), kwargs
         return (header, body), kwargs
 
 
-    @property
-    def type(self):
-        if self._type:
-            return self._type
-        # we will be able to fix this mess in 3.2 when we no longer
-        # require an actual task implementation for chord/group
-        if self._app:
-            app = self._app
-        else:
-            try:
-                app = self.tasks[0].type.app
-            except IndexError:
-                app = self.body.type.app
-        return app.tasks['celery.chord']
+    @cached_property
+    def app(self):
+        app = self._app
+        if app is None:
+            app = self.tasks[0]._app
+            if app is None:
+                app = self.body._app
+        return app if app is not None else current_app
 
 
     def apply_async(self, args=(), kwargs={}, task_id=None,
     def apply_async(self, args=(), kwargs={}, task_id=None,
                     producer=None, publisher=None, connection=None,
                     producer=None, publisher=None, connection=None,
@@ -595,14 +721,41 @@ class chord(Signature):
         body = kwargs.get('body') or self.kwargs['body']
         body = kwargs.get('body') or self.kwargs['body']
         kwargs = dict(self.kwargs, **kwargs)
         kwargs = dict(self.kwargs, **kwargs)
         body = body.clone(**options)
         body = body.clone(**options)
+        app = self.app
+        tasks = (self.tasks.clone() if isinstance(self.tasks, group)
+                 else group(self.tasks))
+        if app.conf.CELERY_ALWAYS_EAGER:
+            return self.apply((), kwargs,
+                              body=body, task_id=task_id, **options)
+        return self.run(tasks, body, args, task_id=task_id, **options)
+
+    def apply(self, args=(), kwargs={}, propagate=True, body=None, **options):
+        body = self.body if body is None else body
+        tasks = (self.tasks.clone() if isinstance(self.tasks, group)
+                 else group(self.tasks))
+        return body.apply(
+            args=(tasks.apply().get(propagate=propagate), ),
+        )
 
 
-        _chord = self.type
-        if _chord.app.conf.CELERY_ALWAYS_EAGER:
-            return self.apply((), kwargs, task_id=task_id, **options)
-        res = body.freeze(task_id)
-        parent = _chord(self.tasks, body, args, **options)
-        res.parent = parent
-        return res
+    def run(self, header, body, partial_args, app=None, interval=None,
+            countdown=1, max_retries=None, propagate=None, eager=False,
+            task_id=None, **options):
+        app = app or self.app
+        propagate = (app.conf.CELERY_CHORD_PROPAGATES
+                     if propagate is None else propagate)
+        group_id = uuid()
+        root_id = body.options.get('root_id')
+        body.setdefault('chord_size', len(header.tasks))
+        results = header.freeze(
+            group_id=group_id, chord=body, root_id=root_id).results
+        bodyres = body.freeze(task_id, root_id=root_id)
+
+        parent = app.backend.apply_chord(
+            header, partial_args, group_id, body,
+            interval=interval, countdown=countdown,
+            max_retries=max_retries, propagate=propagate, result=results)
+        bodyres.parent = parent
+        return bodyres
 
 
     def __call__(self, body=None, **options):
     def __call__(self, body=None, **options):
         return self.apply_async((), {'body': body} if body else {}, **options)
         return self.apply_async((), {'body': body} if body else {}, **options)

+ 0 - 9
celery/concurrency/asynpool.py

@@ -37,7 +37,6 @@ from amqp.utils import promise
 from billiard.pool import RUN, TERMINATE, ACK, NACK, WorkersJoined
 from billiard.pool import RUN, TERMINATE, ACK, NACK, WorkersJoined
 from billiard import pool as _pool
 from billiard import pool as _pool
 from billiard.compat import buf_t, setblocking, isblocking
 from billiard.compat import buf_t, setblocking, isblocking
-from billiard.einfo import ExceptionInfo
 from billiard.queues import _SimpleQueue
 from billiard.queues import _SimpleQueue
 from kombu.async import READ, WRITE, ERR
 from kombu.async import READ, WRITE, ERR
 from kombu.serialization import pickle as _pickle
 from kombu.serialization import pickle as _pickle
@@ -46,7 +45,6 @@ from kombu.utils.compat import get_errno
 from kombu.utils.eventio import SELECT_BAD_FD
 from kombu.utils.eventio import SELECT_BAD_FD
 from celery.five import Counter, items, values
 from celery.five import Counter, items, values
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
-from celery.utils.text import truncate
 from celery.worker import state as worker_state
 from celery.worker import state as worker_state
 
 
 try:
 try:
@@ -96,8 +94,6 @@ SCHED_STRATEGIES = {
     'fair': SCHED_STRATEGY_FAIR,
     'fair': SCHED_STRATEGY_FAIR,
 }
 }
 
 
-RESULT_MAXLEN = 128
-
 Ack = namedtuple('Ack', ('id', 'fd', 'payload'))
 Ack = namedtuple('Ack', ('id', 'fd', 'payload'))
 
 
 
 
@@ -170,11 +166,6 @@ class Worker(_pool.Worker):
         # is writable.
         # is writable.
         self.outq.put((WORKER_UP, (pid, )))
         self.outq.put((WORKER_UP, (pid, )))
 
 
-    def prepare_result(self, result, RESULT_MAXLEN=RESULT_MAXLEN):
-        if not isinstance(result, ExceptionInfo):
-            return truncate(repr(result), RESULT_MAXLEN)
-        return result
-
 
 
 class ResultHandler(_pool.ResultHandler):
 class ResultHandler(_pool.ResultHandler):
     """Handles messages from the pool processes."""
     """Handles messages from the pool processes."""

+ 3 - 1
celery/concurrency/base.py

@@ -66,11 +66,13 @@ class BasePool(object):
 
 
     _state = None
     _state = None
     _pool = None
     _pool = None
+    _does_debug = True
 
 
     #: only used by multiprocessing pool
     #: only used by multiprocessing pool
     uses_semaphore = False
     uses_semaphore = False
 
 
     task_join_will_block = True
     task_join_will_block = True
+    body_can_be_buffer = False
 
 
     def __init__(self, limit=None, putlocks=True,
     def __init__(self, limit=None, putlocks=True,
                  forking_enable=True, callbacks_propagate=(), **options):
                  forking_enable=True, callbacks_propagate=(), **options):
@@ -79,7 +81,6 @@ class BasePool(object):
         self.options = options
         self.options = options
         self.forking_enable = forking_enable
         self.forking_enable = forking_enable
         self.callbacks_propagate = callbacks_propagate
         self.callbacks_propagate = callbacks_propagate
-        self._does_debug = logger.isEnabledFor(logging.DEBUG)
 
 
     def on_start(self):
     def on_start(self):
         pass
         pass
@@ -128,6 +129,7 @@ class BasePool(object):
         self.on_terminate()
         self.on_terminate()
 
 
     def start(self):
     def start(self):
+        self._does_debug = logger.isEnabledFor(logging.DEBUG)
         self.on_start()
         self.on_start()
         self._state = self.RUN
         self._state = self.RUN
 
 

+ 1 - 0
celery/concurrency/solo.py

@@ -17,6 +17,7 @@ __all__ = ['TaskPool']
 
 
 class TaskPool(BasePool):
 class TaskPool(BasePool):
     """Solo task pool (blocking, inline, fast)."""
     """Solo task pool (blocking, inline, fast)."""
+    body_can_be_buffer = True
 
 
     def __init__(self, *args, **kwargs):
     def __init__(self, *args, **kwargs):
         super(TaskPool, self).__init__(*args, **kwargs)
         super(TaskPool, self).__init__(*args, **kwargs)

+ 1 - 1
celery/contrib/batches.py

@@ -88,7 +88,7 @@ from itertools import count
 from celery.task import Task
 from celery.task import Task
 from celery.five import Empty, Queue
 from celery.five import Empty, Queue
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
-from celery.worker.job import Request
+from celery.worker.request import Request
 from celery.utils import noop
 from celery.utils import noop
 
 
 __all__ = ['Batches']
 __all__ = ['Batches']

+ 2 - 4
celery/events/state.py

@@ -30,7 +30,7 @@ from time import time
 from weakref import ref
 from weakref import ref
 
 
 from kombu.clocks import timetuple
 from kombu.clocks import timetuple
-from kombu.utils import cached_property, kwdict
+from kombu.utils import cached_property
 
 
 from celery import states
 from celery import states
 from celery.five import class_property, items, values
 from celery.five import class_property, items, values
@@ -54,8 +54,6 @@ Substantial drift from %s may mean clocks are out of sync.  Current drift is
 %s seconds.  [orig: %s recv: %s]
 %s seconds.  [orig: %s recv: %s]
 """
 """
 
 
-CAN_KWDICT = sys.version_info >= (2, 6, 5)
-
 logger = get_logger(__name__)
 logger = get_logger(__name__)
 warn = logger.warning
 warn = logger.warning
 
 
@@ -86,7 +84,7 @@ def heartbeat_expires(timestamp, freq=60,
 
 
 
 
 def _depickle_task(cls, fields):
 def _depickle_task(cls, fields):
-    return cls(**(fields if CAN_KWDICT else kwdict(fields)))
+    return cls(**fields)
 
 
 
 
 def with_unique_field(attr):
 def with_unique_field(attr):

+ 9 - 8
celery/five.py

@@ -28,6 +28,14 @@ except ImportError:  # pragma: no cover
     def Counter():  # noqa
     def Counter():  # noqa
         return defaultdict(int)
         return defaultdict(int)
 
 
+try:
+    buffer_t = buffer
+except NameError:  # pragma: no cover
+    # Py3 does not have buffer, but we only need isinstance.
+
+    class buffer_t(object):  # noqa
+        pass
+
 ############## py3k #########################################################
 ############## py3k #########################################################
 import sys
 import sys
 PY3 = sys.version_info[0] == 3
 PY3 = sys.version_info[0] == 3
@@ -210,15 +218,8 @@ def getappattr(path):
     return current_app._rgetattr(path)
     return current_app._rgetattr(path)
 
 
 
 
-def _compat_task_decorator(*args, **kwargs):
-    from celery import current_app
-    kwargs.setdefault('accept_magic_kwargs', True)
-    return current_app.task(*args, **kwargs)
-
-
 def _compat_periodic_task_decorator(*args, **kwargs):
 def _compat_periodic_task_decorator(*args, **kwargs):
     from celery.task import periodic_task
     from celery.task import periodic_task
-    kwargs.setdefault('accept_magic_kwargs', True)
     return periodic_task(*args, **kwargs)
     return periodic_task(*args, **kwargs)
 
 
 
 
@@ -228,7 +229,7 @@ COMPAT_MODULES = {
             'send_task': 'send_task',
             'send_task': 'send_task',
         },
         },
         'decorators': {
         'decorators': {
-            'task': _compat_task_decorator,
+            'task': 'task',
             'periodic_task': _compat_periodic_task_decorator,
             'periodic_task': _compat_periodic_task_decorator,
         },
         },
         'log': {
         'log': {

+ 1 - 3
celery/task/base.py

@@ -51,7 +51,6 @@ class Task(BaseTask):
     priority = None
     priority = None
     type = 'regular'
     type = 'regular'
     disable_error_emails = False
     disable_error_emails = False
-    accept_magic_kwargs = False
 
 
     from_config = BaseTask.from_config + (
     from_config = BaseTask.from_config + (
         ('exchange_type', 'CELERY_DEFAULT_EXCHANGE_TYPE'),
         ('exchange_type', 'CELERY_DEFAULT_EXCHANGE_TYPE'),
@@ -178,8 +177,7 @@ class PeriodicTask(Task):
 
 
 def task(*args, **kwargs):
 def task(*args, **kwargs):
     """Deprecated decorator, please use :func:`celery.task`."""
     """Deprecated decorator, please use :func:`celery.task`."""
-    return current_app.task(*args, **dict({'accept_magic_kwargs': False,
-                                           'base': Task}, **kwargs))
+    return current_app.task(*args, **dict({'base': Task}, **kwargs))
 
 
 
 
 def periodic_task(*args, **options):
 def periodic_task(*args, **options):

+ 1 - 2
celery/task/http.py

@@ -162,8 +162,7 @@ class HttpDispatch(object):
         return headers
         return headers
 
 
 
 
-@shared_task(name='celery.http_dispatch', bind=True,
-             url=None, method=None, accept_magic_kwargs=False)
+@shared_task(name='celery.http_dispatch', bind=True, url=None, method=None)
 def dispatch(self, url=None, method='GET', **kwargs):
 def dispatch(self, url=None, method='GET', **kwargs):
     """Task dispatching to an URL.
     """Task dispatching to an URL.
 
 

+ 0 - 12
celery/task/trace.py

@@ -1,12 +0,0 @@
-"""This module has moved to celery.app.trace."""
-from __future__ import absolute_import
-
-import sys
-
-from celery.utils import warn_deprecated
-
-warn_deprecated('celery.task.trace', removal='3.2',
-                alternative='Please use celery.app.trace instead.')
-
-from celery.app import trace
-sys.modules[__name__] = trace

+ 2 - 2
celery/tests/app/test_app.py

@@ -258,7 +258,7 @@ class test_App(AppCase):
             self.assertFalse(sh.called)
             self.assertFalse(sh.called)
 
 
     def test_task_compat_with_filter(self):
     def test_task_compat_with_filter(self):
-        with self.Celery(accept_magic_kwargs=True) as app:
+        with self.Celery() as app:
             check = Mock()
             check = Mock()
 
 
             def filter(task):
             def filter(task):
@@ -271,7 +271,7 @@ class test_App(AppCase):
             check.assert_called_with(foo)
             check.assert_called_with(foo)
 
 
     def test_task_with_filter(self):
     def test_task_with_filter(self):
-        with self.Celery(accept_magic_kwargs=False) as app:
+        with self.Celery() as app:
             check = Mock()
             check = Mock()
 
 
             def filter(task):
             def filter(task):

+ 10 - 12
celery/tests/app/test_builtins.py

@@ -136,18 +136,18 @@ class test_chain(BuiltinsCase):
 
 
     def test_group_to_chord(self):
     def test_group_to_chord(self):
         c = (
         c = (
-            group(self.add.s(i, i) for i in range(5)) |
+            group([self.add.s(i, i) for i in range(5)], app=self.app) |
             self.add.s(10) |
             self.add.s(10) |
             self.add.s(20) |
             self.add.s(20) |
             self.add.s(30)
             self.add.s(30)
         )
         )
-        tasks, _ = c.type.prepare_steps((), c.tasks)
+        tasks, _ = c.prepare_steps((), c.tasks)
         self.assertIsInstance(tasks[0], chord)
         self.assertIsInstance(tasks[0], chord)
         self.assertTrue(tasks[0].body.options['link'])
         self.assertTrue(tasks[0].body.options['link'])
         self.assertTrue(tasks[0].body.options['link'][0].options['link'])
         self.assertTrue(tasks[0].body.options['link'][0].options['link'])
 
 
         c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10))
         c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10))
-        tasks2, _ = c2.type.prepare_steps((), c2.tasks)
+        tasks2, _ = c2.prepare_steps((), c2.tasks)
         self.assertIsInstance(tasks2[1], group)
         self.assertIsInstance(tasks2[1], group)
 
 
     def test_apply_options(self):
     def test_apply_options(self):
@@ -158,7 +158,7 @@ class test_chain(BuiltinsCase):
                 return self
                 return self
 
 
         def s(*args, **kwargs):
         def s(*args, **kwargs):
-            return static(self.add, args, kwargs, type=self.add)
+            return static(self.add, args, kwargs, type=self.add, app=self.app)
 
 
         c = s(2, 2) | s(4, 4) | s(8, 8)
         c = s(2, 2) | s(4, 4) | s(8, 8)
         r1 = c.apply_async(task_id='some_id')
         r1 = c.apply_async(task_id='some_id')
@@ -196,18 +196,16 @@ class test_chord(BuiltinsCase):
     def test_forward_options(self):
     def test_forward_options(self):
         body = self.xsum.s()
         body = self.xsum.s()
         x = chord([self.add.s(i, i) for i in range(10)], body=body)
         x = chord([self.add.s(i, i) for i in range(10)], body=body)
-        x._type = Mock()
-        x._type.app.conf.CELERY_ALWAYS_EAGER = False
+        x.run = Mock(name='chord.run(x)')
         x.apply_async(group_id='some_group_id')
         x.apply_async(group_id='some_group_id')
-        self.assertTrue(x._type.called)
-        resbody = x._type.call_args[0][1]
+        self.assertTrue(x.run.called)
+        resbody = x.run.call_args[0][1]
         self.assertEqual(resbody.options['group_id'], 'some_group_id')
         self.assertEqual(resbody.options['group_id'], 'some_group_id')
         x2 = chord([self.add.s(i, i) for i in range(10)], body=body)
         x2 = chord([self.add.s(i, i) for i in range(10)], body=body)
-        x2._type = Mock()
-        x2._type.app.conf.CELERY_ALWAYS_EAGER = False
+        x2.run = Mock(name='chord.run(x2)')
         x2.apply_async(chord='some_chord_id')
         x2.apply_async(chord='some_chord_id')
-        self.assertTrue(x2._type.called)
-        resbody = x2._type.call_args[0][1]
+        self.assertTrue(x2.run.called)
+        resbody = x2.run.call_args[0][1]
         self.assertEqual(resbody.options['chord'], 'some_chord_id')
         self.assertEqual(resbody.options['chord'], 'some_chord_id')
 
 
     def test_apply_eager(self):
     def test_apply_eager(self):

+ 28 - 14
celery/tests/case.py

@@ -48,7 +48,7 @@ from celery.utils.functional import noop
 from celery.utils.imports import qualname
 from celery.utils.imports import qualname
 
 
 __all__ = [
 __all__ = [
-    'Case', 'AppCase', 'Mock', 'MagicMock', 'ANY',
+    'Case', 'AppCase', 'Mock', 'MagicMock', 'ANY', 'TaskMessage',
     'patch', 'call', 'sentinel', 'skip_unless_module',
     'patch', 'call', 'sentinel', 'skip_unless_module',
     'wrap_logger', 'with_environ', 'sleepdeprived',
     'wrap_logger', 'with_environ', 'sleepdeprived',
     'skip_if_environ', 'todo', 'skip', 'skip_if',
     'skip_if_environ', 'todo', 'skip', 'skip_if',
@@ -56,7 +56,7 @@ __all__ = [
     'replace_module_value', 'sys_platform', 'reset_modules',
     'replace_module_value', 'sys_platform', 'reset_modules',
     'patch_modules', 'mock_context', 'mock_open', 'patch_many',
     'patch_modules', 'mock_context', 'mock_open', 'patch_many',
     'assert_signal_called', 'skip_if_pypy',
     'assert_signal_called', 'skip_if_pypy',
-    'skip_if_jython', 'body_from_sig', 'restore_logging',
+    'skip_if_jython', 'task_message_from_sig', 'restore_logging',
 ]
 ]
 patch = mock.patch
 patch = mock.patch
 call = mock.call
 call = mock.call
@@ -819,7 +819,7 @@ def skip_if_jython(fun):
     return _inner
     return _inner
 
 
 
 
-def body_from_sig(app, sig, utc=True):
+def task_message_from_sig(app, sig, utc=True):
     sig.freeze()
     sig.freeze()
     callbacks = sig.options.pop('link', None)
     callbacks = sig.options.pop('link', None)
     errbacks = sig.options.pop('link_error', None)
     errbacks = sig.options.pop('link_error', None)
@@ -835,17 +835,14 @@ def body_from_sig(app, sig, utc=True):
         expires = app.now() + timedelta(seconds=expires)
         expires = app.now() + timedelta(seconds=expires)
     if expires and isinstance(expires, datetime):
     if expires and isinstance(expires, datetime):
         expires = expires.isoformat()
         expires = expires.isoformat()
-    return {
-        'task': sig.task,
-        'id': sig.id,
-        'args': sig.args,
-        'kwargs': sig.kwargs,
-        'callbacks': [dict(s) for s in callbacks] if callbacks else None,
-        'errbacks': [dict(s) for s in errbacks] if errbacks else None,
-        'eta': eta,
-        'utc': utc,
-        'expires': expires,
-    }
+    return TaskMessage(
+        sig.task, id=sig.id, args=sig.args,
+        kwargs=sig.kwargs,
+        callbacks=[dict(s) for s in callbacks] if callbacks else None,
+        errbacks=[dict(s) for s in errbacks] if errbacks else None,
+        eta=eta,
+        expires=expires,
+    )
 
 
 
 
 @contextmanager
 @contextmanager
@@ -861,3 +858,20 @@ def restore_logging():
         sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ = outs
         sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ = outs
         root.level = level
         root.level = level
         root.handlers[:] = handlers
         root.handlers[:] = handlers
+
+
+def TaskMessage(name, id=None, args=(), kwargs={}, **options):
+    from celery import uuid
+    from kombu.serialization import dumps
+    id = id or uuid()
+    message = Mock(name='TaskMessage-{0}'.format(id))
+    message.headers = {
+        'id': id,
+        'task': name,
+    }
+    message.headers.update(options)
+    message.content_type, message.content_encoding, message.body = dumps(
+        (args, kwargs), serializer='json',
+    )
+    message.payload = (args, kwargs)
+    return message

+ 0 - 23
celery/tests/compat_modules/test_compat.py

@@ -15,29 +15,6 @@ from celery.utils.timeutils import timedelta_seconds
 from celery.tests.case import AppCase, depends_on_current_app
 from celery.tests.case import AppCase, depends_on_current_app
 
 
 
 
-class test_Task(AppCase):
-
-    def test_base_task_inherits_magic_kwargs_from_app(self):
-        from celery.task import Task as OldTask
-
-        class timkX(OldTask):
-            abstract = True
-
-        with self.Celery(set_as_current=False,
-                         accept_magic_kwargs=True) as app:
-            timkX.bind(app)
-            # see #918
-            self.assertFalse(timkX.accept_magic_kwargs)
-
-            from celery import Task as NewTask
-
-            class timkY(NewTask):
-                abstract = True
-
-            timkY.bind(app)
-            self.assertFalse(timkY.accept_magic_kwargs)
-
-
 @depends_on_current_app
 @depends_on_current_app
 class test_periodic_tasks(AppCase):
 class test_periodic_tasks(AppCase):
 
 

+ 0 - 4
celery/tests/compat_modules/test_compat_utils.py

@@ -40,11 +40,7 @@ class test_MagicModule(AppCase):
         def _test_decorators_task():
         def _test_decorators_task():
             pass
             pass
 
 
-        self.assertTrue(_test_decorators_task.accept_magic_kwargs)
-
     def test_decorators_periodic_task(self):
     def test_decorators_periodic_task(self):
         @celery.decorators.periodic_task(run_every=3600)
         @celery.decorators.periodic_task(run_every=3600)
         def _test_decorators_ptask():
         def _test_decorators_ptask():
             pass
             pass
-
-        self.assertTrue(_test_decorators_ptask.accept_magic_kwargs)

+ 0 - 1
celery/tests/compat_modules/test_decorators.py

@@ -27,7 +27,6 @@ class test_decorators(AppCase):
     def assertCompatDecorator(self, decorator, type, **opts):
     def assertCompatDecorator(self, decorator, type, **opts):
         task = decorator(**opts)(add)
         task = decorator(**opts)(add)
         self.assertEqual(task(8, 8), 16)
         self.assertEqual(task(8, 8), 16)
-        self.assertTrue(task.accept_magic_kwargs)
         self.assertIsInstance(task, type)
         self.assertIsInstance(task, type)
 
 
     def test_task(self):
     def test_task(self):

+ 4 - 4
celery/tests/tasks/test_chord.py

@@ -205,7 +205,7 @@ class test_chord(ChordCase):
         m = Mock()
         m = Mock()
         m.app.conf.CELERY_ALWAYS_EAGER = False
         m.app.conf.CELERY_ALWAYS_EAGER = False
         m.AsyncResult = AsyncResult
         m.AsyncResult = AsyncResult
-        prev, chord._type = chord._type, m
+        prev, chord.run = chord.run, m
         try:
         try:
             x = chord(self.add.s(i, i) for i in range(10))
             x = chord(self.add.s(i, i) for i in range(10))
             body = self.add.s(2)
             body = self.add.s(2)
@@ -214,9 +214,9 @@ class test_chord(ChordCase):
             # does not modify original signature
             # does not modify original signature
             with self.assertRaises(KeyError):
             with self.assertRaises(KeyError):
                 body.options['task_id']
                 body.options['task_id']
-            self.assertTrue(chord._type.called)
+            self.assertTrue(chord.run.called)
         finally:
         finally:
-            chord._type = prev
+            chord.run = prev
 
 
 
 
 class test_Chord_task(ChordCase):
 class test_Chord_task(ChordCase):
@@ -227,7 +227,7 @@ class test_Chord_task(ChordCase):
         self.app.backend.cleanup.__name__ = 'cleanup'
         self.app.backend.cleanup.__name__ = 'cleanup'
         Chord = self.app.tasks['celery.chord']
         Chord = self.app.tasks['celery.chord']
 
 
-        body = dict()
+        body = self.add.signature()
         Chord(group(self.add.signature((i, i)) for i in range(5)), body)
         Chord(group(self.add.signature((i, i)) for i in range(5)), body)
         Chord([self.add.signature((j, j)) for j in range(5)], body)
         Chord([self.add.signature((j, j)) for j in range(5)], body)
         self.assertEqual(self.app.backend.apply_chord.call_count, 2)
         self.assertEqual(self.app.backend.apply_chord.call_count, 2)

+ 0 - 4
celery/tests/tasks/test_tasks.py

@@ -363,10 +363,6 @@ class test_tasks(TasksCase):
         self.mytask.app.Task._app = None
         self.mytask.app.Task._app = None
         self.assertIn('unbound', repr(self.mytask.app.Task, ))
         self.assertIn('unbound', repr(self.mytask.app.Task, ))
 
 
-    def test_bind_no_magic_kwargs(self):
-        self.mytask.accept_magic_kwargs = None
-        self.mytask.bind(self.mytask.app)
-
     def test_annotate(self):
     def test_annotate(self):
         with patch('celery.app.task.resolve_all_annotations') as anno:
         with patch('celery.app.task.resolve_all_annotations') as anno:
             anno.return_value = [{'FOO': 'BAR'}]
             anno.return_value = [{'FOO': 'BAR'}]

+ 7 - 4
celery/tests/tasks/test_trace.py

@@ -6,7 +6,7 @@ from celery import states
 from celery.exceptions import Ignore, Retry
 from celery.exceptions import Ignore, Retry
 from celery.app.trace import (
 from celery.app.trace import (
     TraceInfo,
     TraceInfo,
-    eager_trace_task,
+    build_tracer,
     trace_task,
     trace_task,
     setup_worker_optimizations,
     setup_worker_optimizations,
     reset_worker_optimizations,
     reset_worker_optimizations,
@@ -14,9 +14,12 @@ from celery.app.trace import (
 from celery.tests.case import AppCase, Mock, patch
 from celery.tests.case import AppCase, Mock, patch
 
 
 
 
-def trace(app, task, args=(), kwargs={}, propagate=False, **opts):
-    return eager_trace_task(task, 'id-1', args, kwargs,
-                            propagate=propagate, app=app, **opts)
+def trace(app, task, args=(), kwargs={},
+          propagate=False, eager=True, request=None, **opts):
+    t = build_tracer(task.name, task,
+                     eager=eager, propagate=propagate, app=app, **opts)
+    ret = t('id-1', args, kwargs, request)
+    return ret.retval, ret.info
 
 
 
 
 class TraceCase(AppCase):
 class TraceCase(AppCase):

+ 7 - 22
celery/tests/worker/test_control.py

@@ -16,12 +16,12 @@ from celery.worker import WorkController as _WC
 from celery.worker import consumer
 from celery.worker import consumer
 from celery.worker import control
 from celery.worker import control
 from celery.worker import state as worker_state
 from celery.worker import state as worker_state
-from celery.worker.job import Request
+from celery.worker.request import Request
 from celery.worker.state import revoked
 from celery.worker.state import revoked
 from celery.worker.control import Panel
 from celery.worker.control import Panel
 from celery.worker.pidbox import Pidbox, gPidbox
 from celery.worker.pidbox import Pidbox, gPidbox
 
 
-from celery.tests.case import AppCase, Mock, call, patch
+from celery.tests.case import AppCase, Mock, TaskMessage, call, patch
 
 
 hostname = socket.gethostname()
 hostname = socket.gethostname()
 
 
@@ -250,12 +250,7 @@ class test_ControlPanel(AppCase):
         self.panel.handle('report')
         self.panel.handle('report')
 
 
     def test_active(self):
     def test_active(self):
-        r = Request({
-            'task': self.mytask.name,
-            'id': 'do re mi',
-            'args': (),
-            'kwargs': {},
-        }, app=self.app)
+        r = Request(TaskMessage(self.mytask.name, 'do re mi'), app=self.app)
         worker_state.active_requests.add(r)
         worker_state.active_requests.add(r)
         try:
         try:
             self.assertTrue(self.panel.handle('dump_active'))
             self.assertTrue(self.panel.handle('dump_active'))
@@ -347,12 +342,7 @@ class test_ControlPanel(AppCase):
         consumer = Consumer(self.app)
         consumer = Consumer(self.app)
         panel = self.create_panel(consumer=consumer)
         panel = self.create_panel(consumer=consumer)
         self.assertFalse(panel.handle('dump_schedule'))
         self.assertFalse(panel.handle('dump_schedule'))
-        r = Request({
-            'task': self.mytask.name,
-            'id': 'CAFEBABE',
-            'args': (),
-            'kwargs': {},
-        }, app=self.app)
+        r = Request(TaskMessage(self.mytask.name, 'CAFEBABE'), app=self.app)
         consumer.timer.schedule.enter_at(
         consumer.timer.schedule.enter_at(
             consumer.timer.Entry(lambda x: x, (r, )),
             consumer.timer.Entry(lambda x: x, (r, )),
             datetime.now() + timedelta(seconds=10))
             datetime.now() + timedelta(seconds=10))
@@ -363,19 +353,14 @@ class test_ControlPanel(AppCase):
 
 
     def test_dump_reserved(self):
     def test_dump_reserved(self):
         consumer = Consumer(self.app)
         consumer = Consumer(self.app)
-        worker_state.reserved_requests.add(Request({
-            'task': self.mytask.name,
-            'id': uuid(),
-            'args': (2, 2),
-            'kwargs': {},
-        }, app=self.app))
+        worker_state.reserved_requests.add(
+            Request(TaskMessage(self.mytask.name, args=(2, 2)), app=self.app),
+        )
         try:
         try:
             panel = self.create_panel(consumer=consumer)
             panel = self.create_panel(consumer=consumer)
             response = panel.handle('dump_reserved', {'safe': True})
             response = panel.handle('dump_reserved', {'safe': True})
             self.assertDictContainsSubset(
             self.assertDictContainsSubset(
                 {'name': self.mytask.name,
                 {'name': self.mytask.name,
-                 'args': (2, 2),
-                 'kwargs': {},
                  'hostname': socket.gethostname()},
                  'hostname': socket.gethostname()},
                 response[0],
                 response[0],
             )
             )

+ 21 - 22
celery/tests/worker/test_loops.py

@@ -11,7 +11,7 @@ from celery.worker import state
 from celery.worker.consumer import Consumer
 from celery.worker.consumer import Consumer
 from celery.worker.loops import asynloop, synloop
 from celery.worker.loops import asynloop, synloop
 
 
-from celery.tests.case import AppCase, Mock, body_from_sig
+from celery.tests.case import AppCase, Mock, task_message_from_sig
 
 
 
 
 class X(object):
 class X(object):
@@ -107,7 +107,7 @@ def get_task_callback(*args, **kwargs):
     x = X(*args, **kwargs)
     x = X(*args, **kwargs)
     x.blueprint.state = CLOSE
     x.blueprint.state = CLOSE
     asynloop(*x.args)
     asynloop(*x.args)
-    return x, x.consumer.callbacks[0]
+    return x, x.consumer.on_message
 
 
 
 
 class test_asynloop(AppCase):
 class test_asynloop(AppCase):
@@ -132,45 +132,44 @@ class test_asynloop(AppCase):
 
 
     def task_context(self, sig, **kwargs):
     def task_context(self, sig, **kwargs):
         x, on_task = get_task_callback(self.app, **kwargs)
         x, on_task = get_task_callback(self.app, **kwargs)
-        body = body_from_sig(self.app, sig)
-        message = Mock()
-        strategy = x.obj.strategies[sig.task] = Mock()
-        return x, on_task, body, message, strategy
+        message = task_message_from_sig(self.app, sig)
+        strategy = x.obj.strategies[sig.task] = Mock(name='strategy')
+        return x, on_task, message, strategy
 
 
     def test_on_task_received(self):
     def test_on_task_received(self):
-        _, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2))
-        on_task(body, msg)
+        _, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
+        on_task(msg)
         strategy.assert_called_with(
         strategy.assert_called_with(
-            msg, body, msg.ack_log_error, msg.reject_log_error, [],
+            msg, None, msg.ack_log_error, msg.reject_log_error, [],
         )
         )
 
 
     def test_on_task_received_executes_on_task_message(self):
     def test_on_task_received_executes_on_task_message(self):
         cbs = [Mock(), Mock(), Mock()]
         cbs = [Mock(), Mock(), Mock()]
-        _, on_task, body, msg, strategy = self.task_context(
+        _, on_task, msg, strategy = self.task_context(
             self.add.s(2, 2), on_task_message=cbs,
             self.add.s(2, 2), on_task_message=cbs,
         )
         )
-        on_task(body, msg)
+        on_task(msg)
         strategy.assert_called_with(
         strategy.assert_called_with(
-            msg, body, msg.ack_log_error, msg.reject_log_error, cbs,
+            msg, None, msg.ack_log_error, msg.reject_log_error, cbs,
         )
         )
 
 
     def test_on_task_message_missing_name(self):
     def test_on_task_message_missing_name(self):
-        x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2))
-        body.pop('task')
-        on_task(body, msg)
-        x.on_unknown_message.assert_called_with(body, msg)
+        x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
+        msg.headers.pop('task')
+        on_task(msg)
+        x.on_unknown_message.assert_called_with(((2, 2), {}), msg)
 
 
     def test_on_task_not_registered(self):
     def test_on_task_not_registered(self):
-        x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2))
+        x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
         exc = strategy.side_effect = KeyError(self.add.name)
         exc = strategy.side_effect = KeyError(self.add.name)
-        on_task(body, msg)
-        x.on_unknown_task.assert_called_with(body, msg, exc)
+        on_task(msg)
+        x.on_invalid_task.assert_called_with(None, msg, exc)
 
 
     def test_on_task_InvalidTaskError(self):
     def test_on_task_InvalidTaskError(self):
-        x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2))
+        x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
         exc = strategy.side_effect = InvalidTaskError()
         exc = strategy.side_effect = InvalidTaskError()
-        on_task(body, msg)
-        x.on_invalid_task.assert_called_with(body, msg, exc)
+        on_task(msg)
+        x.on_invalid_task.assert_called_with(None, msg, exc)
 
 
     def test_should_terminate(self):
     def test_should_terminate(self):
         x = X(self.app)
         x = X(self.app)

+ 156 - 260
celery/tests/worker/test_request.py

@@ -1,7 +1,7 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
 from __future__ import absolute_import, unicode_literals
 from __future__ import absolute_import, unicode_literals
 
 
-import anyjson
+import numbers
 import os
 import os
 import signal
 import signal
 import socket
 import socket
@@ -10,7 +10,6 @@ import sys
 from datetime import datetime, timedelta
 from datetime import datetime, timedelta
 
 
 from billiard.einfo import ExceptionInfo
 from billiard.einfo import ExceptionInfo
-from kombu.transport.base import Message
 from kombu.utils.encoding import from_utf8, default_encode
 from kombu.utils.encoding import from_utf8, default_encode
 
 
 from celery import states
 from celery import states
@@ -27,16 +26,17 @@ from celery.concurrency.base import BasePool
 from celery.exceptions import (
 from celery.exceptions import (
     Ignore,
     Ignore,
     InvalidTaskError,
     InvalidTaskError,
+    Reject,
     Retry,
     Retry,
     TaskRevokedError,
     TaskRevokedError,
     Terminated,
     Terminated,
     WorkerLostError,
     WorkerLostError,
 )
 )
-from celery.five import keys, monotonic
+from celery.five import monotonic
 from celery.signals import task_revoked
 from celery.signals import task_revoked
 from celery.utils import uuid
 from celery.utils import uuid
-from celery.worker import job as module
-from celery.worker.job import Request, logger as req_logger
+from celery.worker import request as module
+from celery.worker.request import Request, logger as req_logger
 from celery.worker.state import revoked
 from celery.worker.state import revoked
 
 
 from celery.tests.case import (
 from celery.tests.case import (
@@ -44,8 +44,9 @@ from celery.tests.case import (
     Case,
     Case,
     Mock,
     Mock,
     SkipTest,
     SkipTest,
+    TaskMessage,
     assert_signal_called,
     assert_signal_called,
-    body_from_sig,
+    task_message_from_sig,
     patch,
     patch,
 )
 )
 
 
@@ -85,7 +86,7 @@ def jail(app, task_id, name, args, kwargs):
     task.__trace__ = None  # rebuild
     task.__trace__ = None  # rebuild
     return trace_task(
     return trace_task(
         task, task_id, args, kwargs, request=request, eager=False, app=app,
         task, task_id, args, kwargs, request=request, eager=False, app=app,
-    )
+    ).retval
 
 
 
 
 class test_default_encode(AppCase):
 class test_default_encode(AppCase):
@@ -138,7 +139,7 @@ class test_trace_task(AppCase):
             raise KeyError(i)
             raise KeyError(i)
         self.mytask_raising = mytask_raising
         self.mytask_raising = mytask_raising
 
 
-    @patch('celery.app.trace._logger')
+    @patch('celery.app.trace.logger')
     def test_process_cleanup_fails(self, _logger):
     def test_process_cleanup_fails(self, _logger):
         self.mytask.backend = Mock()
         self.mytask.backend = Mock()
         self.mytask.backend.process_cleanup = Mock(side_effect=KeyError())
         self.mytask.backend.process_cleanup = Mock(side_effect=KeyError())
@@ -227,9 +228,10 @@ class test_Request(AppCase):
 
 
     def get_request(self, sig, Request=Request, **kwargs):
     def get_request(self, sig, Request=Request, **kwargs):
         return Request(
         return Request(
-            body_from_sig(self.app, sig),
-            on_ack=Mock(),
-            eventer=Mock(),
+            task_message_from_sig(self.app, sig),
+            on_ack=Mock(name='on_ack'),
+            on_reject=Mock(name='on_reject'),
+            eventer=Mock(name='eventer'),
             app=self.app,
             app=self.app,
             connection_errors=(socket.error, ),
             connection_errors=(socket.error, ),
             task=sig.type,
             task=sig.type,
@@ -245,8 +247,9 @@ class test_Request(AppCase):
             self.get_request(self.add.s(2, 2).set(expires='12345'))
             self.get_request(self.add.s(2, 2).set(expires='12345'))
 
 
     def test_valid_expires_with_utc_makes_aware(self):
     def test_valid_expires_with_utc_makes_aware(self):
-        with patch('celery.worker.job.maybe_make_aware') as mma:
-            self.get_request(self.add.s(2, 2).set(expires=10))
+        with patch('celery.worker.request.maybe_make_aware') as mma:
+            self.get_request(self.add.s(2, 2).set(expires=10),
+                             maybe_make_aware=mma)
             self.assertTrue(mma.called)
             self.assertTrue(mma.called)
 
 
     def test_maybe_expire_when_expires_is_None(self):
     def test_maybe_expire_when_expires_is_None(self):
@@ -273,7 +276,7 @@ class test_Request(AppCase):
             uuid=req.id, terminated=True, signum='9', expired=False,
             uuid=req.id, terminated=True, signum='9', expired=False,
         )
         )
 
 
-    def test_log_error_propagates_MemoryError(self):
+    def test_on_failure_propagates_MemoryError(self):
         einfo = None
         einfo = None
         try:
         try:
             raise MemoryError()
             raise MemoryError()
@@ -282,9 +285,9 @@ class test_Request(AppCase):
         self.assertIsNotNone(einfo)
         self.assertIsNotNone(einfo)
         req = self.get_request(self.add.s(2, 2))
         req = self.get_request(self.add.s(2, 2))
         with self.assertRaises(MemoryError):
         with self.assertRaises(MemoryError):
-            req._log_error(einfo)
+            req.on_failure(einfo)
 
 
-    def test_log_error_when_Ignore(self):
+    def test_on_failure_Ignore_acknowledges(self):
         einfo = None
         einfo = None
         try:
         try:
             raise Ignore()
             raise Ignore()
@@ -292,48 +295,55 @@ class test_Request(AppCase):
             einfo = ExceptionInfo(internal=True)
             einfo = ExceptionInfo(internal=True)
         self.assertIsNotNone(einfo)
         self.assertIsNotNone(einfo)
         req = self.get_request(self.add.s(2, 2))
         req = self.get_request(self.add.s(2, 2))
-        req._log_error(einfo)
+        req.on_failure(einfo)
         req.on_ack.assert_called_with(req_logger, req.connection_errors)
         req.on_ack.assert_called_with(req_logger, req.connection_errors)
 
 
+    def test_on_failure_Reject_rejects(self):
+        einfo = None
+        try:
+            raise Reject()
+        except Reject:
+            einfo = ExceptionInfo(internal=True)
+        self.assertIsNotNone(einfo)
+        req = self.get_request(self.add.s(2, 2))
+        req.on_failure(einfo)
+        req.on_reject.assert_called_with(
+            req_logger, req.connection_errors, False,
+        )
+
+    def test_on_failure_Reject_rejects_with_requeue(self):
+        einfo = None
+        try:
+            raise Reject(requeue=True)
+        except Reject:
+            einfo = ExceptionInfo(internal=True)
+        self.assertIsNotNone(einfo)
+        req = self.get_request(self.add.s(2, 2))
+        req.on_failure(einfo)
+        req.on_reject.assert_called_with(
+            req_logger, req.connection_errors, True,
+        )
+
     def test_tzlocal_is_cached(self):
     def test_tzlocal_is_cached(self):
         req = self.get_request(self.add.s(2, 2))
         req = self.get_request(self.add.s(2, 2))
         req._tzlocal = 'foo'
         req._tzlocal = 'foo'
         self.assertEqual(req.tzlocal, 'foo')
         self.assertEqual(req.tzlocal, 'foo')
 
 
-    def test_execute_magic_kwargs(self):
-        task = self.add.s(2, 2)
-        task.freeze()
-        req = self.get_request(task)
-        self.add.accept_magic_kwargs = True
-        pool = Mock()
-        req.execute_using_pool(pool)
-        self.assertTrue(pool.apply_async.called)
-        args = pool.apply_async.call_args[1]['args']
-        self.assertEqual(args[0], task.task)
-        self.assertEqual(args[1], task.id)
-        self.assertEqual(args[2], task.args)
-        kwargs = args[3]
-        self.assertEqual(kwargs.get('task_name'), task.task)
-
-    def xRequest(self, body=None, **kwargs):
-        body = dict({'task': self.mytask.name,
-                     'id': uuid(),
-                     'args': [1],
-                     'kwargs': {'f': 'x'}}, **body or {})
-        return Request(body, app=self.app, **kwargs)
+    def xRequest(self, name=None, id=None, args=None, kwargs=None,
+                 on_ack=None, on_reject=None, **head):
+        args = [1] if args is None else args
+        kwargs = {'f': 'x'} if kwargs is None else kwargs
+        on_ack = on_ack or Mock(name='on_ack')
+        on_reject = on_reject or Mock(name='on_reject')
+        message = TaskMessage(
+            name or self.mytask.name, id, args=args, kwargs=kwargs, **head
+        )
+        return Request(message, app=self.app,
+                       on_ack=on_ack, on_reject=on_reject)
 
 
     def test_task_wrapper_repr(self):
     def test_task_wrapper_repr(self):
         self.assertTrue(repr(self.xRequest()))
         self.assertTrue(repr(self.xRequest()))
 
 
-    @patch('celery.worker.job.kwdict')
-    def test_kwdict(self, kwdict):
-        prev, module.NEEDS_KWDICT = module.NEEDS_KWDICT, True
-        try:
-            self.xRequest()
-            self.assertTrue(kwdict.called)
-        finally:
-            module.NEEDS_KWDICT = prev
-
     def test_sets_store_errors(self):
     def test_sets_store_errors(self):
         self.mytask.ignore_result = True
         self.mytask.ignore_result = True
         job = self.xRequest()
         job = self.xRequest()
@@ -350,12 +360,7 @@ class test_Request(AppCase):
         self.assertIn('task-frobulated', job.eventer.sent)
         self.assertIn('task-frobulated', job.eventer.sent)
 
 
     def test_on_retry(self):
     def test_on_retry(self):
-        job = Request({
-            'task': self.mytask.name,
-            'id': uuid(),
-            'args': [1],
-            'kwargs': {'f': 'x'},
-        }, app=self.app)
+        job = self.get_request(self.mytask.s(1, f='x'))
         job.eventer = MockEventDispatcher()
         job.eventer = MockEventDispatcher()
         try:
         try:
             raise Retry('foo', KeyError('moofoobar'))
             raise Retry('foo', KeyError('moofoobar'))
@@ -372,12 +377,7 @@ class test_Request(AppCase):
             job.on_failure(einfo)
             job.on_failure(einfo)
 
 
     def test_compat_properties(self):
     def test_compat_properties(self):
-        job = Request({
-            'task': self.mytask.name,
-            'id': uuid(),
-            'args': [1],
-            'kwargs': {'f': 'x'},
-        }, app=self.app)
+        job = self.xRequest()
         self.assertEqual(job.task_id, job.id)
         self.assertEqual(job.task_id, job.id)
         self.assertEqual(job.task_name, job.name)
         self.assertEqual(job.task_name, job.name)
         job.task_id = 'ID'
         job.task_id = 'ID'
@@ -388,12 +388,7 @@ class test_Request(AppCase):
     def test_terminate__task_started(self):
     def test_terminate__task_started(self):
         pool = Mock()
         pool = Mock()
         signum = signal.SIGTERM
         signum = signal.SIGTERM
-        job = Request({
-            'task': self.mytask.name,
-            'id': uuid(),
-            'args': [1],
-            'kwrgs': {'f': 'x'},
-        }, app=self.app)
+        job = self.get_request(self.mytask.s(1, f='x'))
         with assert_signal_called(
         with assert_signal_called(
                 task_revoked, sender=job.task, request=job,
                 task_revoked, sender=job.task, request=job,
                 terminated=True, expired=False, signum=signum):
                 terminated=True, expired=False, signum=signum):
@@ -404,12 +399,7 @@ class test_Request(AppCase):
 
 
     def test_terminate__task_reserved(self):
     def test_terminate__task_reserved(self):
         pool = Mock()
         pool = Mock()
-        job = Request({
-            'task': self.mytask.name,
-            'id': uuid(),
-            'args': [1],
-            'kwargs': {'f': 'x'},
-        }, app=self.app)
+        job = self.get_request(self.mytask.s(1, f='x'))
         job.time_start = None
         job.time_start = None
         job.terminate(pool, signal='TERM')
         job.terminate(pool, signal='TERM')
         self.assertFalse(pool.terminate_job.called)
         self.assertFalse(pool.terminate_job.called)
@@ -417,13 +407,9 @@ class test_Request(AppCase):
         job.terminate(pool, signal='TERM')
         job.terminate(pool, signal='TERM')
 
 
     def test_revoked_expires_expired(self):
     def test_revoked_expires_expired(self):
-        job = Request({
-            'task': self.mytask.name,
-            'id': uuid(),
-            'args': [1],
-            'kwargs': {'f': 'x'},
-            'expires': datetime.utcnow() - timedelta(days=1),
-        }, app=self.app)
+        job = self.get_request(self.mytask.s(1, f='x').set(
+            expires=datetime.utcnow() - timedelta(days=1)
+        ))
         with assert_signal_called(
         with assert_signal_called(
                 task_revoked, sender=job.task, request=job,
                 task_revoked, sender=job.task, request=job,
                 terminated=False, expired=True, signum=None):
                 terminated=False, expired=True, signum=None):
@@ -435,9 +421,9 @@ class test_Request(AppCase):
             )
             )
 
 
     def test_revoked_expires_not_expired(self):
     def test_revoked_expires_not_expired(self):
-        job = self.xRequest({
-            'expires': datetime.utcnow() + timedelta(days=1),
-        })
+        job = self.xRequest(
+            expires=datetime.utcnow() + timedelta(days=1),
+        )
         job.revoked()
         job.revoked()
         self.assertNotIn(job.id, revoked)
         self.assertNotIn(job.id, revoked)
         self.assertNotEqual(
         self.assertNotEqual(
@@ -447,47 +433,15 @@ class test_Request(AppCase):
 
 
     def test_revoked_expires_ignore_result(self):
     def test_revoked_expires_ignore_result(self):
         self.mytask.ignore_result = True
         self.mytask.ignore_result = True
-        job = self.xRequest({
-            'expires': datetime.utcnow() - timedelta(days=1),
-        })
+        job = self.xRequest(
+            expires=datetime.utcnow() - timedelta(days=1),
+        )
         job.revoked()
         job.revoked()
         self.assertIn(job.id, revoked)
         self.assertIn(job.id, revoked)
         self.assertNotEqual(
         self.assertNotEqual(
             self.mytask.backend.get_status(job.id), states.REVOKED,
             self.mytask.backend.get_status(job.id), states.REVOKED,
         )
         )
 
 
-    def test_send_email(self):
-        app = self.app
-        mail_sent = [False]
-
-        def mock_mail_admins(*args, **kwargs):
-            mail_sent[0] = True
-
-        def get_ei():
-            try:
-                raise KeyError('moofoobar')
-            except:
-                return ExceptionInfo()
-
-        app.mail_admins = mock_mail_admins
-        self.mytask.send_error_emails = True
-        job = self.xRequest()
-        einfo = get_ei()
-        job.on_failure(einfo)
-        self.assertTrue(mail_sent[0])
-
-        einfo = get_ei()
-        mail_sent[0] = False
-        self.mytask.send_error_emails = False
-        job.on_failure(einfo)
-        self.assertFalse(mail_sent[0])
-
-        einfo = get_ei()
-        mail_sent[0] = False
-        self.mytask.send_error_emails = True
-        job.on_failure(einfo)
-        self.assertTrue(mail_sent[0])
-
     def test_already_revoked(self):
     def test_already_revoked(self):
         job = self.xRequest()
         job = self.xRequest()
         job._already_revoked = True
         job._already_revoked = True
@@ -510,10 +464,10 @@ class test_Request(AppCase):
 
 
     def test_execute_acks_late(self):
     def test_execute_acks_late(self):
         self.mytask_raising.acks_late = True
         self.mytask_raising.acks_late = True
-        job = self.xRequest({
-            'task': self.mytask_raising.name,
-            'kwargs': {},
-        })
+        job = self.xRequest(
+            name=self.mytask_raising.name,
+            kwargs={},
+        )
         job.execute()
         job.execute()
         self.assertTrue(job.acknowledged)
         self.assertTrue(job.acknowledged)
         job.execute()
         job.execute()
@@ -555,10 +509,10 @@ class test_Request(AppCase):
     def test_on_success_acks_early(self):
     def test_on_success_acks_early(self):
         job = self.xRequest()
         job = self.xRequest()
         job.time_start = 1
         job.time_start = 1
-        job.on_success(42)
+        job.on_success((0, 42, 0.001))
         prev, module._does_info = module._does_info, False
         prev, module._does_info = module._does_info, False
         try:
         try:
-            job.on_success(42)
+            job.on_success((0, 42, 0.001))
             self.assertFalse(job.acknowledged)
             self.assertFalse(job.acknowledged)
         finally:
         finally:
             module._does_info = prev
             module._does_info = prev
@@ -570,7 +524,7 @@ class test_Request(AppCase):
             try:
             try:
                 raise SystemExit()
                 raise SystemExit()
             except SystemExit:
             except SystemExit:
-                job.on_success(ExceptionInfo())
+                job.on_success((1, ExceptionInfo(), 0.01))
             else:
             else:
                 assert False
                 assert False
 
 
@@ -579,7 +533,7 @@ class test_Request(AppCase):
         job.time_start = 1
         job.time_start = 1
         job.eventer = Mock()
         job.eventer = Mock()
         job.eventer.send = Mock()
         job.eventer.send = Mock()
-        job.on_success(42)
+        job.on_success((0, 42, 0.001))
         self.assertTrue(job.eventer.send.called)
         self.assertTrue(job.eventer.send.called)
 
 
     def test_on_success_when_failure(self):
     def test_on_success_when_failure(self):
@@ -589,14 +543,14 @@ class test_Request(AppCase):
         try:
         try:
             raise KeyError('foo')
             raise KeyError('foo')
         except Exception:
         except Exception:
-            job.on_success(ExceptionInfo())
+            job.on_success((1, ExceptionInfo(), 0.001))
             self.assertTrue(job.on_failure.called)
             self.assertTrue(job.on_failure.called)
 
 
     def test_on_success_acks_late(self):
     def test_on_success_acks_late(self):
         job = self.xRequest()
         job = self.xRequest()
         job.time_start = 1
         job.time_start = 1
         self.mytask.acks_late = True
         self.mytask.acks_late = True
-        job.on_success(42)
+        job.on_success((0, 42, 0.001))
         self.assertTrue(job.acknowledged)
         self.assertTrue(job.acknowledged)
 
 
     def test_on_failure_WorkerLostError(self):
     def test_on_failure_WorkerLostError(self):
@@ -634,12 +588,13 @@ class test_Request(AppCase):
             self.assertTrue(job.acknowledged)
             self.assertTrue(job.acknowledged)
 
 
     def test_from_message_invalid_kwargs(self):
     def test_from_message_invalid_kwargs(self):
-        body = dict(task=self.mytask.name, id=1, args=(), kwargs='foo')
+        m = TaskMessage(self.mytask.name, args=(), kwargs='foo')
+        req = Request(m, app=self.app)
         with self.assertRaises(InvalidTaskError):
         with self.assertRaises(InvalidTaskError):
-            Request(body, message=None, app=self.app)
+            raise req.execute().exception
 
 
-    @patch('celery.worker.job.error')
-    @patch('celery.worker.job.warn')
+    @patch('celery.worker.request.error')
+    @patch('celery.worker.request.warn')
     def test_on_timeout(self, warn, error):
     def test_on_timeout(self, warn, error):
 
 
         job = self.xRequest()
         job = self.xRequest()
@@ -662,37 +617,60 @@ class test_Request(AppCase):
         from celery.app import trace
         from celery.app import trace
         setup_worker_optimizations(self.app)
         setup_worker_optimizations(self.app)
         self.assertIs(trace.trace_task_ret, trace._fast_trace_task)
         self.assertIs(trace.trace_task_ret, trace._fast_trace_task)
+        tid = uuid()
+        message = TaskMessage(self.mytask.name, tid, args=[4])
         try:
         try:
             self.mytask.__trace__ = build_tracer(
             self.mytask.__trace__ = build_tracer(
                 self.mytask.name, self.mytask, self.app.loader, 'test',
                 self.mytask.name, self.mytask, self.app.loader, 'test',
                 app=self.app,
                 app=self.app,
             )
             )
-            res = trace.trace_task_ret(self.mytask.name, uuid(), [4], {})
-            self.assertEqual(res, 4 ** 4)
+            failed, res, runtime = trace.trace_task_ret(
+                self.mytask.name, tid, message.headers, message.body,
+                message.content_type, message.content_encoding)
+            self.assertFalse(failed)
+            self.assertEqual(res, repr(4 ** 4))
+            self.assertTrue(runtime)
+            self.assertIsInstance(runtime, numbers.Real)
         finally:
         finally:
             reset_worker_optimizations()
             reset_worker_optimizations()
             self.assertIs(trace.trace_task_ret, trace._trace_task_ret)
             self.assertIs(trace.trace_task_ret, trace._trace_task_ret)
         delattr(self.mytask, '__trace__')
         delattr(self.mytask, '__trace__')
-        res = trace.trace_task_ret(
-            self.mytask.name, uuid(), [4], {}, app=self.app,
+        failed, res, runtime = trace.trace_task_ret(
+            self.mytask.name, tid, message.headers, message.body,
+            message.content_type, message.content_encoding, app=self.app,
         )
         )
-        self.assertEqual(res, 4 ** 4)
+        self.assertFalse(failed)
+        self.assertEqual(res, repr(4 ** 4))
+        self.assertTrue(runtime)
+        self.assertIsInstance(runtime, numbers.Real)
 
 
     def test_trace_task_ret(self):
     def test_trace_task_ret(self):
         self.mytask.__trace__ = build_tracer(
         self.mytask.__trace__ = build_tracer(
             self.mytask.name, self.mytask, self.app.loader, 'test',
             self.mytask.name, self.mytask, self.app.loader, 'test',
             app=self.app,
             app=self.app,
         )
         )
-        res = _trace_task_ret(self.mytask.name, uuid(), [4], {}, app=self.app)
-        self.assertEqual(res, 4 ** 4)
+        tid = uuid()
+        message = TaskMessage(self.mytask.name, tid, args=[4])
+        _, R, _ = _trace_task_ret(
+            self.mytask.name, tid, message.headers,
+            message.body, message.content_type,
+            message.content_encoding, app=self.app,
+        )
+        self.assertEqual(R, repr(4 ** 4))
 
 
     def test_trace_task_ret__no_trace(self):
     def test_trace_task_ret__no_trace(self):
         try:
         try:
             delattr(self.mytask, '__trace__')
             delattr(self.mytask, '__trace__')
         except AttributeError:
         except AttributeError:
             pass
             pass
-        res = _trace_task_ret(self.mytask.name, uuid(), [4], {}, app=self.app)
-        self.assertEqual(res, 4 ** 4)
+        tid = uuid()
+        message = TaskMessage(self.mytask.name, tid, args=[4])
+        _, R, _ = _trace_task_ret(
+            self.mytask.name, tid, message.headers,
+            message.body, message.content_type,
+            message.content_encoding, app=self.app,
+        )
+        self.assertEqual(R, repr(4 ** 4))
 
 
     def test_trace_catches_exception(self):
     def test_trace_catches_exception(self):
 
 
@@ -705,7 +683,7 @@ class test_Request(AppCase):
 
 
         with self.assertWarnsRegex(RuntimeWarning,
         with self.assertWarnsRegex(RuntimeWarning,
                                    r'Exception raised outside'):
                                    r'Exception raised outside'):
-            res = trace_task(raising, uuid(), [], {}, app=self.app)
+            res = trace_task(raising, uuid(), [], {}, app=self.app)[0]
             self.assertIsInstance(res, ExceptionInfo)
             self.assertIsInstance(res, ExceptionInfo)
 
 
     def test_worker_task_trace_handle_retry(self):
     def test_worker_task_trace_handle_retry(self):
@@ -749,71 +727,39 @@ class test_Request(AppCase):
         finally:
         finally:
             self.mytask.pop_request()
             self.mytask.pop_request()
 
 
-    def test_task_wrapper_mail_attrs(self):
-        job = self.xRequest({'args': [], 'kwargs': {}})
-        x = job.success_msg % {
-            'name': job.name,
-            'id': job.id,
-            'return_value': 10,
-            'runtime': 0.3641,
-        }
-        self.assertTrue(x)
-        x = job.error_msg % {
-            'name': job.name,
-            'id': job.id,
-            'exc': 'FOOBARBAZ',
-            'description': 'raised unexpected',
-            'traceback': 'foobarbaz',
-        }
-        self.assertTrue(x)
-
     def test_from_message(self):
     def test_from_message(self):
         us = 'æØåveéðƒeæ'
         us = 'æØåveéðƒeæ'
-        body = {'task': self.mytask.name, 'id': uuid(),
-                'args': [2], 'kwargs': {us: 'bar'}}
-        m = Message(None, body=anyjson.dumps(body), backend='foo',
-                    content_type='application/json',
-                    content_encoding='utf-8')
-        job = Request(m.decode(), message=m, app=self.app)
+        tid = uuid()
+        m = TaskMessage(self.mytask.name, tid, args=[2], kwargs={us: 'bar'})
+        job = Request(m, app=self.app)
         self.assertIsInstance(job, Request)
         self.assertIsInstance(job, Request)
-        self.assertEqual(job.name, body['task'])
-        self.assertEqual(job.id, body['id'])
-        self.assertEqual(job.args, body['args'])
-        us = from_utf8(us)
-        if sys.version_info < (2, 6):
-            self.assertEqual(next(keys(job.kwargs)), us)
-            self.assertIsInstance(next(keys(job.kwargs)), str)
+        self.assertEqual(job.name, self.mytask.name)
+        self.assertEqual(job.id, tid)
+        self.assertIs(job.message, m)
 
 
     def test_from_message_empty_args(self):
     def test_from_message_empty_args(self):
-        body = {'task': self.mytask.name, 'id': uuid()}
-        m = Message(None, body=anyjson.dumps(body), backend='foo',
-                    content_type='application/json',
-                    content_encoding='utf-8')
-        job = Request(m.decode(), message=m, app=self.app)
+        tid = uuid()
+        m = TaskMessage(self.mytask.name, tid, args=[], kwargs={})
+        job = Request(m, app=self.app)
         self.assertIsInstance(job, Request)
         self.assertIsInstance(job, Request)
-        self.assertEqual(job.args, [])
-        self.assertEqual(job.kwargs, {})
 
 
     def test_from_message_missing_required_fields(self):
     def test_from_message_missing_required_fields(self):
-        body = {}
-        m = Message(None, body=anyjson.dumps(body), backend='foo',
-                    content_type='application/json',
-                    content_encoding='utf-8')
+        m = TaskMessage(self.mytask.name)
+        m.headers.clear()
         with self.assertRaises(KeyError):
         with self.assertRaises(KeyError):
-            Request(m.decode(), message=m, app=self.app)
+            Request(m, app=self.app)
 
 
     def test_from_message_nonexistant_task(self):
     def test_from_message_nonexistant_task(self):
-        body = {'task': 'cu.mytask.doesnotexist', 'id': uuid(),
-                'args': [2], 'kwargs': {'æØåveéðƒeæ': 'bar'}}
-        m = Message(None, body=anyjson.dumps(body), backend='foo',
-                    content_type='application/json',
-                    content_encoding='utf-8')
+        m = TaskMessage(
+            'cu.mytask.doesnotexist',
+            args=[2], kwargs={'æØåveéðƒeæ': 'bar'},
+        )
         with self.assertRaises(KeyError):
         with self.assertRaises(KeyError):
-            Request(m.decode(), message=m, app=self.app)
+            Request(m, app=self.app)
 
 
     def test_execute(self):
     def test_execute(self):
         tid = uuid()
         tid = uuid()
-        job = self.xRequest({'id': tid, 'args': [4], 'kwargs': {}})
+        job = self.xRequest(id=tid, args=[4], kwargs={})
         self.assertEqual(job.execute(), 256)
         self.assertEqual(job.execute(), 256)
         meta = self.mytask.backend.get_task_meta(tid)
         meta = self.mytask.backend.get_task_meta(tid)
         self.assertEqual(meta['status'], states.SUCCESS)
         self.assertEqual(meta['status'], states.SUCCESS)
@@ -826,38 +772,17 @@ class test_Request(AppCase):
             return i ** i
             return i ** i
 
 
         tid = uuid()
         tid = uuid()
-        job = self.xRequest({
-            'task': mytask_no_kwargs.name,
-            'id': tid,
-            'args': [4],
-            'kwargs': {},
-        })
+        job = self.xRequest(
+            name=mytask_no_kwargs.name,
+            id=tid,
+            args=[4],
+            kwargs={},
+        )
         self.assertEqual(job.execute(), 256)
         self.assertEqual(job.execute(), 256)
         meta = mytask_no_kwargs.backend.get_task_meta(tid)
         meta = mytask_no_kwargs.backend.get_task_meta(tid)
         self.assertEqual(meta['result'], 256)
         self.assertEqual(meta['result'], 256)
         self.assertEqual(meta['status'], states.SUCCESS)
         self.assertEqual(meta['status'], states.SUCCESS)
 
 
-    def test_execute_success_some_kwargs(self):
-        scratch = {'task_id': None}
-
-        @self.app.task(shared=False, accept_magic_kwargs=True)
-        def mytask_some_kwargs(i, task_id):
-            scratch['task_id'] = task_id
-            return i ** i
-
-        tid = uuid()
-        job = self.xRequest({
-            'task': mytask_some_kwargs.name,
-            'id': tid,
-            'args': [4],
-            'kwargs': {},
-        })
-        self.assertEqual(job.execute(), 256)
-        meta = mytask_some_kwargs.backend.get_task_meta(tid)
-        self.assertEqual(scratch.get('task_id'), tid)
-        self.assertEqual(meta['result'], 256)
-        self.assertEqual(meta['status'], states.SUCCESS)
-
     def test_execute_ack(self):
     def test_execute_ack(self):
         scratch = {'ACK': False}
         scratch = {'ACK': False}
 
 
@@ -865,7 +790,7 @@ class test_Request(AppCase):
             scratch['ACK'] = True
             scratch['ACK'] = True
 
 
         tid = uuid()
         tid = uuid()
-        job = self.xRequest({'id': tid, 'args': [4]}, on_ack=on_ack)
+        job = self.xRequest(id=tid, args=[4], on_ack=on_ack)
         self.assertEqual(job.execute(), 256)
         self.assertEqual(job.execute(), 256)
         meta = self.mytask.backend.get_task_meta(tid)
         meta = self.mytask.backend.get_task_meta(tid)
         self.assertTrue(scratch['ACK'])
         self.assertTrue(scratch['ACK'])
@@ -874,12 +799,13 @@ class test_Request(AppCase):
 
 
     def test_execute_fail(self):
     def test_execute_fail(self):
         tid = uuid()
         tid = uuid()
-        job = self.xRequest({
-            'task': self.mytask_raising.name,
-            'id': tid,
-            'args': [4],
-            'kwargs': {},
-        })
+        job = self.xRequest(
+            name=self.mytask_raising.name,
+            id=tid,
+            args=[4],
+            kwargs={},
+        )
+        print(job.execute())
         self.assertIsInstance(job.execute(), ExceptionInfo)
         self.assertIsInstance(job.execute(), ExceptionInfo)
         meta = self.mytask_raising.backend.get_task_meta(tid)
         meta = self.mytask_raising.backend.get_task_meta(tid)
         self.assertEqual(meta['status'], states.FAILURE)
         self.assertEqual(meta['status'], states.FAILURE)
@@ -887,7 +813,7 @@ class test_Request(AppCase):
 
 
     def test_execute_using_pool(self):
     def test_execute_using_pool(self):
         tid = uuid()
         tid = uuid()
-        job = self.xRequest({'id': tid, 'args': [4]})
+        job = self.xRequest(id=tid, args=[4])
 
 
         class MockPool(BasePool):
         class MockPool(BasePool):
             target = None
             target = None
@@ -908,48 +834,18 @@ class test_Request(AppCase):
         self.assertTrue(p.target)
         self.assertTrue(p.target)
         self.assertEqual(p.args[0], self.mytask.name)
         self.assertEqual(p.args[0], self.mytask.name)
         self.assertEqual(p.args[1], tid)
         self.assertEqual(p.args[1], tid)
-        self.assertEqual(p.args[2], [4])
-        self.assertIn('f', p.args[3])
-        self.assertIn([4], p.args)
+        self.assertEqual(p.args[3], job.message.body)
 
 
-        job.task.accept_magic_kwargs = False
-        job.execute_using_pool(p)
-
-    def test_default_kwargs(self):
-        self.maxDiff = 3000
-        tid = uuid()
-        job = self.xRequest({'id': tid, 'args': [4]})
-        self.assertDictEqual(
-            job.extend_with_default_kwargs(), {
-                'f': 'x',
-                'logfile': None,
-                'loglevel': None,
-                'task_id': job.id,
-                'task_retries': 0,
-                'task_is_eager': False,
-                'delivery_info': {
-                    'exchange': None,
-                    'routing_key': None,
-                    'priority': 0,
-                    'redelivered': False,
-                },
-                'task_name': job.name})
-
-    @patch('celery.worker.job.logger')
-    def _test_on_failure(self, exception, logger):
-        app = self.app
+    def _test_on_failure(self, exception):
         tid = uuid()
         tid = uuid()
-        job = self.xRequest({'id': tid, 'args': [4]})
+        job = self.xRequest(id=tid, args=[4])
+        job.send_event = Mock(name='send_event')
         try:
         try:
             raise exception
             raise exception
         except Exception:
         except Exception:
             exc_info = ExceptionInfo()
             exc_info = ExceptionInfo()
-            app.conf.CELERY_SEND_TASK_ERROR_EMAILS = True
             job.on_failure(exc_info)
             job.on_failure(exc_info)
-            self.assertTrue(logger.log.called)
-            context = logger.log.call_args[0][2]
-            self.assertEqual(self.mytask.name, context['name'])
-            self.assertIn(tid, context['id'])
+            self.assertTrue(job.send_event.called)
 
 
     def test_on_failure(self):
     def test_on_failure(self):
         self._test_on_failure(Exception('Inside unit tests'))
         self._test_on_failure(Exception('Inside unit tests'))

+ 8 - 9
celery/tests/worker/test_strategy.py

@@ -8,7 +8,7 @@ from kombu.utils.limits import TokenBucket
 from celery.worker import state
 from celery.worker import state
 from celery.utils.timeutils import rate
 from celery.utils.timeutils import rate
 
 
-from celery.tests.case import AppCase, Mock, patch, body_from_sig
+from celery.tests.case import AppCase, Mock, patch, task_message_from_sig
 
 
 
 
 class test_default_strategy(AppCase):
 class test_default_strategy(AppCase):
@@ -22,17 +22,16 @@ class test_default_strategy(AppCase):
 
 
     class Context(object):
     class Context(object):
 
 
-        def __init__(self, sig, s, reserved, consumer, message, body):
+        def __init__(self, sig, s, reserved, consumer, message):
             self.sig = sig
             self.sig = sig
             self.s = s
             self.s = s
             self.reserved = reserved
             self.reserved = reserved
             self.consumer = consumer
             self.consumer = consumer
             self.message = message
             self.message = message
-            self.body = body
 
 
         def __call__(self, **kwargs):
         def __call__(self, **kwargs):
             return self.s(
             return self.s(
-                self.message, self.body,
+                self.message, None,
                 self.message.ack, self.message.reject, [], **kwargs
                 self.message.ack, self.message.reject, [], **kwargs
             )
             )
 
 
@@ -71,15 +70,14 @@ class test_default_strategy(AppCase):
         if limit:
         if limit:
             bucket = TokenBucket(rate(limit), capacity=1)
             bucket = TokenBucket(rate(limit), capacity=1)
             consumer.task_buckets[sig.task] = bucket
             consumer.task_buckets[sig.task] = bucket
+        consumer.controller.state.revoked = set()
         consumer.disable_rate_limits = not rate_limits
         consumer.disable_rate_limits = not rate_limits
         consumer.event_dispatcher.enabled = events
         consumer.event_dispatcher.enabled = events
         s = sig.type.start_strategy(self.app, consumer, task_reserved=reserved)
         s = sig.type.start_strategy(self.app, consumer, task_reserved=reserved)
         self.assertTrue(s)
         self.assertTrue(s)
 
 
-        message = Mock()
-        body = body_from_sig(self.app, sig, utc=utc)
-
-        yield self.Context(sig, s, reserved, consumer, message, body)
+        message = task_message_from_sig(self.app, sig, utc=utc)
+        yield self.Context(sig, s, reserved, consumer, message)
 
 
     def test_when_logging_disabled(self):
     def test_when_logging_disabled(self):
         with patch('celery.worker.strategy.logger') as logger:
         with patch('celery.worker.strategy.logger') as logger:
@@ -129,9 +127,10 @@ class test_default_strategy(AppCase):
     def test_when_revoked(self):
     def test_when_revoked(self):
         task = self.add.s(2, 2)
         task = self.add.s(2, 2)
         task.freeze()
         task.freeze()
-        state.revoked.add(task.id)
         try:
         try:
             with self._context(task) as C:
             with self._context(task) as C:
+                C.consumer.controller.state.revoked.add(task.id)
+                state.revoked.add(task.id)
                 C()
                 C()
                 with self.assertRaises(ValueError):
                 with self.assertRaises(ValueError):
                     C.get_request()
                     C.get_request()

+ 113 - 56
celery/tests/worker/test_worker.py

@@ -17,19 +17,21 @@ from celery.bootsteps import RUN, CLOSE, StartStopStep
 from celery.concurrency.base import BasePool
 from celery.concurrency.base import BasePool
 from celery.datastructures import AttributeDict
 from celery.datastructures import AttributeDict
 from celery.exceptions import (
 from celery.exceptions import (
-    WorkerShutdown, WorkerTerminate, TaskRevokedError,
+    WorkerShutdown, WorkerTerminate, TaskRevokedError, InvalidTaskError,
 )
 )
 from celery.five import Empty, range, Queue as FastQueue
 from celery.five import Empty, range, Queue as FastQueue
 from celery.utils import uuid
 from celery.utils import uuid
 from celery.worker import components
 from celery.worker import components
 from celery.worker import consumer
 from celery.worker import consumer
 from celery.worker.consumer import Consumer as __Consumer
 from celery.worker.consumer import Consumer as __Consumer
-from celery.worker.job import Request
+from celery.worker.request import Request
 from celery.utils import worker_direct
 from celery.utils import worker_direct
 from celery.utils.serialization import pickle
 from celery.utils.serialization import pickle
 from celery.utils.timer2 import Timer
 from celery.utils.timer2 import Timer
 
 
-from celery.tests.case import AppCase, Mock, SkipTest, patch, restore_logging
+from celery.tests.case import (
+    AppCase, Mock, SkipTest, TaskMessage, patch, restore_logging,
+)
 
 
 
 
 def MockStep(step=None):
 def MockStep(step=None):
@@ -123,6 +125,13 @@ def create_message(channel, **data):
     return m
     return m
 
 
 
 
+def create_task_message(channel, *args, **kwargs):
+    m = TaskMessage(*args, **kwargs)
+    m.channel = channel
+    m.delivery_info = {'consumer_tag': 'mock'}
+    return m
+
+
 class test_Consumer(AppCase):
 class test_Consumer(AppCase):
 
 
     def setup(self):
     def setup(self):
@@ -144,7 +153,7 @@ class test_Consumer(AppCase):
         l.connection = Mock()
         l.connection = Mock()
         l.connection.info.return_value = {'foo': 'bar'}
         l.connection.info.return_value = {'foo': 'bar'}
         l.controller = l.app.WorkController()
         l.controller = l.app.WorkController()
-        l.controller.pool = Mock()
+        l.pool = l.controller.pool = Mock()
         l.controller.pool.info.return_value = [Mock(), Mock()]
         l.controller.pool.info.return_value = [Mock(), Mock()]
         l.controller.consumer = l
         l.controller.consumer = l
         info = l.controller.stats()
         info = l.controller.stats()
@@ -158,6 +167,8 @@ class test_Consumer(AppCase):
 
 
     def test_connection(self):
     def test_connection(self):
         l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
         l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
 
 
         l.blueprint.start(l)
         l.blueprint.start(l)
         self.assertIsInstance(l.connection, Connection)
         self.assertIsInstance(l.connection, Connection)
@@ -207,32 +218,35 @@ class test_Consumer(AppCase):
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
         l.blueprint.state = RUN
         l.blueprint.state = RUN
         l.steps.pop()
         l.steps.pop()
-        backend = Mock()
-        m = create_message(backend, unknown={'baz': '!!!'})
+        channel = Mock()
+        m = create_message(channel, unknown={'baz': '!!!'})
         l.event_dispatcher = mock_event_dispatcher()
         l.event_dispatcher = mock_event_dispatcher()
         l.node = MockNode()
         l.node = MockNode()
 
 
         callback = self._get_on_message(l)
         callback = self._get_on_message(l)
-        callback(m.decode(), m)
+        callback(m)
         self.assertTrue(warn.call_count)
         self.assertTrue(warn.call_count)
 
 
     @patch('celery.worker.strategy.to_timestamp')
     @patch('celery.worker.strategy.to_timestamp')
     def test_receive_message_eta_OverflowError(self, to_timestamp):
     def test_receive_message_eta_OverflowError(self, to_timestamp):
         to_timestamp.side_effect = OverflowError()
         to_timestamp.side_effect = OverflowError()
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.blueprint.state = RUN
         l.blueprint.state = RUN
         l.steps.pop()
         l.steps.pop()
-        m = create_message(Mock(), task=self.foo_task.name,
-                           args=('2, 2'),
-                           kwargs={},
-                           eta=datetime.now().isoformat())
+        m = create_task_message(
+            Mock(), self.foo_task.name,
+            args=('2, 2'), kwargs={},
+            eta=datetime.now().isoformat(),
+        )
         l.event_dispatcher = mock_event_dispatcher()
         l.event_dispatcher = mock_event_dispatcher()
         l.node = MockNode()
         l.node = MockNode()
         l.update_strategies()
         l.update_strategies()
         l.qos = Mock()
         l.qos = Mock()
 
 
         callback = self._get_on_message(l)
         callback = self._get_on_message(l)
-        callback(m.decode(), m)
+        callback(m)
         self.assertTrue(m.acknowledged)
         self.assertTrue(m.acknowledged)
 
 
     @patch('celery.worker.consumer.error')
     @patch('celery.worker.consumer.error')
@@ -241,13 +255,19 @@ class test_Consumer(AppCase):
         l.blueprint.state = RUN
         l.blueprint.state = RUN
         l.event_dispatcher = mock_event_dispatcher()
         l.event_dispatcher = mock_event_dispatcher()
         l.steps.pop()
         l.steps.pop()
-        m = create_message(Mock(), task=self.foo_task.name,
-                           args=(1, 2), kwargs='foobarbaz', id=1)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
+        m = create_task_message(
+            Mock(), self.foo_task.name,
+            args=(1, 2), kwargs='foobarbaz', id=1)
         l.update_strategies()
         l.update_strategies()
         l.event_dispatcher = mock_event_dispatcher()
         l.event_dispatcher = mock_event_dispatcher()
+        strat = l.strategies[self.foo_task.name] = Mock(name='strategy')
+        strat.side_effect = InvalidTaskError()
 
 
         callback = self._get_on_message(l)
         callback = self._get_on_message(l)
-        callback(m.decode(), m)
+        callback(m)
+        self.assertTrue(error.called)
         self.assertIn('Received invalid task message', error.call_args[0][0])
         self.assertIn('Received invalid task message', error.call_args[0][0])
 
 
     @patch('celery.worker.consumer.crit')
     @patch('celery.worker.consumer.crit')
@@ -274,18 +294,22 @@ class test_Consumer(AppCase):
 
 
         with self.assertRaises(WorkerShutdown):
         with self.assertRaises(WorkerShutdown):
             l.loop(*l.loop_args())
             l.loop(*l.loop_args())
-        self.assertTrue(l.task_consumer.register_callback.called)
-        return l.task_consumer.register_callback.call_args[0][0]
+        self.assertTrue(l.task_consumer.on_message)
+        return l.task_consumer.on_message
 
 
     def test_receieve_message(self):
     def test_receieve_message(self):
         l = Consumer(self.buffer.put, timer=self.timer, app=self.app)
         l = Consumer(self.buffer.put, timer=self.timer, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.blueprint.state = RUN
         l.blueprint.state = RUN
         l.event_dispatcher = mock_event_dispatcher()
         l.event_dispatcher = mock_event_dispatcher()
-        m = create_message(Mock(), task=self.foo_task.name,
-                           args=[2, 4, 8], kwargs={})
+        m = create_task_message(
+            Mock(), self.foo_task.name,
+            args=[2, 4, 8], kwargs={},
+        )
         l.update_strategies()
         l.update_strategies()
         callback = self._get_on_message(l)
         callback = self._get_on_message(l)
-        callback(m.decode(), m)
+        callback(m)
 
 
         in_bucket = self.buffer.get_nowait()
         in_bucket = self.buffer.get_nowait()
         self.assertIsInstance(in_bucket, Request)
         self.assertIsInstance(in_bucket, Request)
@@ -306,6 +330,8 @@ class test_Consumer(AppCase):
 
 
         l = MockConsumer(self.buffer.put, timer=self.timer,
         l = MockConsumer(self.buffer.put, timer=self.timer,
                          send_events=False, pool=BasePool(), app=self.app)
                          send_events=False, pool=BasePool(), app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.channel_errors = (KeyError, )
         l.channel_errors = (KeyError, )
         with self.assertRaises(KeyError):
         with self.assertRaises(KeyError):
             l.start()
             l.start()
@@ -324,6 +350,8 @@ class test_Consumer(AppCase):
 
 
         l = MockConsumer(self.buffer.put, timer=self.timer,
         l = MockConsumer(self.buffer.put, timer=self.timer,
                          send_events=False, pool=BasePool(), app=self.app)
                          send_events=False, pool=BasePool(), app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
 
 
         l.connection_errors = (KeyError, )
         l.connection_errors = (KeyError, )
         self.assertRaises(SyntaxError, l.start)
         self.assertRaises(SyntaxError, l.start)
@@ -406,6 +434,8 @@ class test_Consumer(AppCase):
     def test_apply_eta_task(self):
     def test_apply_eta_task(self):
         from celery.worker import state
         from celery.worker import state
         l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
         l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.qos = QoS(None, 10)
         l.qos = QoS(None, 10)
 
 
         task = object()
         task = object()
@@ -417,10 +447,12 @@ class test_Consumer(AppCase):
 
 
     def test_receieve_message_eta_isoformat(self):
     def test_receieve_message_eta_isoformat(self):
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.blueprint.state = RUN
         l.blueprint.state = RUN
         l.steps.pop()
         l.steps.pop()
-        m = create_message(
-            Mock(), task=self.foo_task.name,
+        m = create_task_message(
+            Mock(), self.foo_task.name,
             eta=(datetime.now() + timedelta(days=1)).isoformat(),
             eta=(datetime.now() + timedelta(days=1)).isoformat(),
             args=[2, 4, 8], kwargs={},
             args=[2, 4, 8], kwargs={},
         )
         )
@@ -432,7 +464,7 @@ class test_Consumer(AppCase):
         l.enabled = False
         l.enabled = False
         l.update_strategies()
         l.update_strategies()
         callback = self._get_on_message(l)
         callback = self._get_on_message(l)
-        callback(m.decode(), m)
+        callback(m)
         l.timer.stop()
         l.timer.stop()
         l.timer.join(1)
         l.timer.join(1)
 
 
@@ -469,27 +501,31 @@ class test_Consumer(AppCase):
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
         l.blueprint.state = RUN
         l.blueprint.state = RUN
         l.steps.pop()
         l.steps.pop()
-        backend = Mock()
+        channel = Mock()
         id = uuid()
         id = uuid()
-        t = create_message(backend, task=self.foo_task.name, args=[2, 4, 8],
-                           kwargs={}, id=id)
+        t = create_task_message(
+            channel, self.foo_task.name,
+            args=[2, 4, 8], kwargs={}, id=id,
+        )
         from celery.worker.state import revoked
         from celery.worker.state import revoked
         revoked.add(id)
         revoked.add(id)
 
 
         callback = self._get_on_message(l)
         callback = self._get_on_message(l)
-        callback(t.decode(), t)
+        callback(t)
         self.assertTrue(self.buffer.empty())
         self.assertTrue(self.buffer.empty())
 
 
     def test_receieve_message_not_registered(self):
     def test_receieve_message_not_registered(self):
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
         l.blueprint.state = RUN
         l.blueprint.state = RUN
         l.steps.pop()
         l.steps.pop()
-        backend = Mock()
-        m = create_message(backend, task='x.X.31x', args=[2, 4, 8], kwargs={})
+        channel = Mock(name='channel')
+        m = create_task_message(
+            channel, 'x.X.31x', args=[2, 4, 8], kwargs={},
+        )
 
 
         l.event_dispatcher = mock_event_dispatcher()
         l.event_dispatcher = mock_event_dispatcher()
         callback = self._get_on_message(l)
         callback = self._get_on_message(l)
-        self.assertFalse(callback(m.decode(), m))
+        self.assertFalse(callback(m))
         with self.assertRaises(Empty):
         with self.assertRaises(Empty):
             self.buffer.get_nowait()
             self.buffer.get_nowait()
         self.assertTrue(self.timer.empty())
         self.assertTrue(self.timer.empty())
@@ -498,22 +534,28 @@ class test_Consumer(AppCase):
     @patch('celery.worker.consumer.logger')
     @patch('celery.worker.consumer.logger')
     def test_receieve_message_ack_raises(self, logger, warn):
     def test_receieve_message_ack_raises(self, logger, warn):
         l = Consumer(self.buffer.put, timer=self.timer, app=self.app)
         l = Consumer(self.buffer.put, timer=self.timer, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.blueprint.state = RUN
         l.blueprint.state = RUN
-        backend = Mock()
-        m = create_message(backend, args=[2, 4, 8], kwargs={})
+        channel = Mock()
+        m = create_task_message(
+            channel, self.foo_task.name,
+            args=[2, 4, 8], kwargs={},
+        )
+        m.headers = None
 
 
         l.event_dispatcher = mock_event_dispatcher()
         l.event_dispatcher = mock_event_dispatcher()
+        l.update_strategies()
         l.connection_errors = (socket.error, )
         l.connection_errors = (socket.error, )
         m.reject = Mock()
         m.reject = Mock()
         m.reject.side_effect = socket.error('foo')
         m.reject.side_effect = socket.error('foo')
         callback = self._get_on_message(l)
         callback = self._get_on_message(l)
-        self.assertFalse(callback(m.decode(), m))
+        self.assertFalse(callback(m))
         self.assertTrue(warn.call_count)
         self.assertTrue(warn.call_count)
         with self.assertRaises(Empty):
         with self.assertRaises(Empty):
             self.buffer.get_nowait()
             self.buffer.get_nowait()
         self.assertTrue(self.timer.empty())
         self.assertTrue(self.timer.empty())
-        m.reject.assert_called_with(requeue=False)
-        self.assertTrue(logger.critical.call_count)
+        m.reject_log_error.assert_called_with(logger, l.connection_errors)
 
 
     def test_receive_message_eta(self):
     def test_receive_message_eta(self):
         import sys
         import sys
@@ -526,13 +568,15 @@ class test_Consumer(AppCase):
         pp('TEST RECEIVE MESSAGE ETA')
         pp('TEST RECEIVE MESSAGE ETA')
         pp('+CREATE MYKOMBUCONSUMER')
         pp('+CREATE MYKOMBUCONSUMER')
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
         l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         pp('-CREATE MYKOMBUCONSUMER')
         pp('-CREATE MYKOMBUCONSUMER')
         l.steps.pop()
         l.steps.pop()
         l.event_dispatcher = mock_event_dispatcher()
         l.event_dispatcher = mock_event_dispatcher()
-        backend = Mock()
+        channel = Mock(name='channel')
         pp('+ CREATE MESSAGE')
         pp('+ CREATE MESSAGE')
-        m = create_message(
-            backend, task=self.foo_task.name,
+        m = create_task_message(
+            channel, self.foo_task.name,
             args=[2, 4, 8], kwargs={},
             args=[2, 4, 8], kwargs={},
             eta=(datetime.now() + timedelta(days=1)).isoformat(),
             eta=(datetime.now() + timedelta(days=1)).isoformat(),
         )
         )
@@ -556,7 +600,7 @@ class test_Consumer(AppCase):
             callback = self._get_on_message(l)
             callback = self._get_on_message(l)
             pp('- GET ON MESSAGE')
             pp('- GET ON MESSAGE')
             pp('+ CALLBACK')
             pp('+ CALLBACK')
-            callback(m.decode(), m)
+            callback(m)
             pp('- CALLBACK')
             pp('- CALLBACK')
         finally:
         finally:
             pp('+ STOP TIMER')
             pp('+ STOP TIMER')
@@ -708,6 +752,8 @@ class test_Consumer(AppCase):
         init_callback = Mock()
         init_callback = Mock()
         l = _Consumer(self.buffer.put, timer=self.timer,
         l = _Consumer(self.buffer.put, timer=self.timer,
                       init_callback=init_callback, app=self.app)
                       init_callback=init_callback, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.task_consumer = Mock()
         l.task_consumer = Mock()
         l.broadcast_consumer = Mock()
         l.broadcast_consumer = Mock()
         l.qos = _QoS()
         l.qos = _QoS()
@@ -730,6 +776,8 @@ class test_Consumer(AppCase):
         init_callback.reset_mock()
         init_callback.reset_mock()
         l = _Consumer(self.buffer.put, timer=self.timer, app=self.app,
         l = _Consumer(self.buffer.put, timer=self.timer, app=self.app,
                       send_events=False, init_callback=init_callback)
                       send_events=False, init_callback=init_callback)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.qos = _QoS()
         l.qos = _QoS()
         l.task_consumer = Mock()
         l.task_consumer = Mock()
         l.broadcast_consumer = Mock()
         l.broadcast_consumer = Mock()
@@ -741,8 +789,9 @@ class test_Consumer(AppCase):
 
 
     def test_reset_connection_with_no_node(self):
     def test_reset_connection_with_no_node(self):
         l = Consumer(self.buffer.put, timer=self.timer, app=self.app)
         l = Consumer(self.buffer.put, timer=self.timer, app=self.app)
+        l.controller = l.app.WorkController()
+        l.pool = l.controller.pool = Mock()
         l.steps.pop()
         l.steps.pop()
-        self.assertEqual(None, l.pool)
         l.blueprint.start(l)
         l.blueprint.start(l)
 
 
 
 
@@ -925,10 +974,12 @@ class test_WorkController(AppCase):
     def test_process_task(self):
     def test_process_task(self):
         worker = self.worker
         worker = self.worker
         worker.pool = Mock()
         worker.pool = Mock()
-        backend = Mock()
-        m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10],
-                           kwargs={})
-        task = Request(m.decode(), message=m, app=self.app)
+        channel = Mock()
+        m = create_task_message(
+            channel, self.foo_task.name,
+            args=[4, 8, 10], kwargs={},
+        )
+        task = Request(m, app=self.app)
         worker._process_task(task)
         worker._process_task(task)
         self.assertEqual(worker.pool.apply_async.call_count, 1)
         self.assertEqual(worker.pool.apply_async.call_count, 1)
         worker.pool.stop()
         worker.pool.stop()
@@ -937,10 +988,12 @@ class test_WorkController(AppCase):
         worker = self.worker
         worker = self.worker
         worker.pool = Mock()
         worker.pool = Mock()
         worker.pool.apply_async.side_effect = KeyboardInterrupt('Ctrl+C')
         worker.pool.apply_async.side_effect = KeyboardInterrupt('Ctrl+C')
-        backend = Mock()
-        m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10],
-                           kwargs={})
-        task = Request(m.decode(), message=m, app=self.app)
+        channel = Mock()
+        m = create_task_message(
+            channel, self.foo_task.name,
+            args=[4, 8, 10], kwargs={},
+        )
+        task = Request(m, app=self.app)
         worker.steps = []
         worker.steps = []
         worker.blueprint.state = RUN
         worker.blueprint.state = RUN
         with self.assertRaises(KeyboardInterrupt):
         with self.assertRaises(KeyboardInterrupt):
@@ -950,10 +1003,12 @@ class test_WorkController(AppCase):
         worker = self.worker
         worker = self.worker
         worker.pool = Mock()
         worker.pool = Mock()
         worker.pool.apply_async.side_effect = WorkerTerminate()
         worker.pool.apply_async.side_effect = WorkerTerminate()
-        backend = Mock()
-        m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10],
-                           kwargs={})
-        task = Request(m.decode(), message=m, app=self.app)
+        channel = Mock()
+        m = create_task_message(
+            channel, self.foo_task.name,
+            args=[4, 8, 10], kwargs={},
+        )
+        task = Request(m, app=self.app)
         worker.steps = []
         worker.steps = []
         worker.blueprint.state = RUN
         worker.blueprint.state = RUN
         with self.assertRaises(SystemExit):
         with self.assertRaises(SystemExit):
@@ -963,10 +1018,12 @@ class test_WorkController(AppCase):
         worker = self.worker
         worker = self.worker
         worker.pool = Mock()
         worker.pool = Mock()
         worker.pool.apply_async.side_effect = KeyError('some exception')
         worker.pool.apply_async.side_effect = KeyError('some exception')
-        backend = Mock()
-        m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10],
-                           kwargs={})
-        task = Request(m.decode(), message=m, app=self.app)
+        channel = Mock()
+        m = create_task_message(
+            channel, self.foo_task.name,
+            args=[4, 8, 10], kwargs={},
+        )
+        task = Request(m, app=self.app)
         worker._process_task(task)
         worker._process_task(task)
         worker.pool.stop()
         worker.pool.stop()
 
 

+ 0 - 9
celery/utils/__init__.py

@@ -19,7 +19,6 @@ import datetime
 
 
 from collections import Callable
 from collections import Callable
 from functools import partial, wraps
 from functools import partial, wraps
-from inspect import getargspec
 from pprint import pprint
 from pprint import pprint
 
 
 from kombu.entity import Exchange, Queue
 from kombu.entity import Exchange, Queue
@@ -189,14 +188,6 @@ def is_iterable(obj):
     return True
     return True
 
 
 
 
-def fun_takes_kwargs(fun, kwlist=[]):
-    # deprecated
-    S = getattr(fun, 'argspec', getargspec(fun))
-    if S.keywords is not None:
-        return kwlist
-    return [kw for kw in kwlist if kw in S.args]
-
-
 def isatty(fh):
 def isatty(fh):
     try:
     try:
         return fh.isatty()
         return fh.isatty()

+ 3 - 1
celery/utils/objects.py

@@ -74,7 +74,9 @@ class FallbackContext(object):
     def __enter__(self):
     def __enter__(self):
         if self.provided is not None:
         if self.provided is not None:
             return self.provided
             return self.provided
-        context = self._context = self.fallback(*self.fb_args, **self.fb_kwargs).__enter__()
+        context = self._context = self.fallback(
+            *self.fb_args, **self.fb_kwargs
+        ).__enter__()
         return context
         return context
 
 
     def __exit__(self, *exc_info):
     def __exit__(self, *exc_info):

+ 1 - 1
celery/worker/autoscale.py

@@ -81,7 +81,7 @@ class Autoscaler(bgThread):
             self.maybe_scale()
             self.maybe_scale()
         sleep(1.0)
         sleep(1.0)
 
 
-    def _maybe_scale(self):
+    def _maybe_scale(self, req=None):
         procs = self.processes
         procs = self.processes
         cur = min(self.qty, self.max_concurrency)
         cur = min(self.qty, self.max_concurrency)
         if cur > procs:
         if cur > procs:

+ 35 - 23
celery/worker/consumer.py

@@ -35,7 +35,7 @@ from celery import bootsteps
 from celery.app.trace import build_tracer
 from celery.app.trace import build_tracer
 from celery.canvas import signature
 from celery.canvas import signature
 from celery.exceptions import InvalidTaskError
 from celery.exceptions import InvalidTaskError
-from celery.five import items, values
+from celery.five import buffer_t, items, values
 from celery.utils.functional import noop
 from celery.utils.functional import noop
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
 from celery.utils.text import truncate
 from celery.utils.text import truncate
@@ -44,14 +44,6 @@ from celery.utils.timeutils import humanize_seconds, rate
 from . import heartbeat, loops, pidbox
 from . import heartbeat, loops, pidbox
 from .state import task_reserved, maybe_shutdown, revoked, reserved_requests
 from .state import task_reserved, maybe_shutdown, revoked, reserved_requests
 
 
-try:
-    buffer_t = buffer
-except NameError:  # pragma: no cover
-    # Py3 does not have buffer, but we only need isinstance.
-
-    class buffer_t(object):  # noqa
-        pass
-
 __all__ = [
 __all__ = [
     'Consumer', 'Connection', 'Events', 'Heart', 'Control',
     'Consumer', 'Connection', 'Events', 'Heart', 'Control',
     'Tasks', 'Evloop', 'Agent', 'Mingle', 'Gossip', 'dump_body',
     'Tasks', 'Evloop', 'Agent', 'Mingle', 'Gossip', 'dump_body',
@@ -127,6 +119,8 @@ MINGLE_GET_FIELDS = itemgetter('clock', 'revoked')
 
 
 
 
 def dump_body(m, body):
 def dump_body(m, body):
+    # v2 protocol does not deserialize body
+    body = m.body if body is None else body
     if isinstance(body, buffer_t):
     if isinstance(body, buffer_t):
         body = bytes_t(body)
         body = bytes_t(body)
     return '{0} ({1}b)'.format(truncate(safe_repr(body), 1024),
     return '{0} ({1}b)'.format(truncate(safe_repr(body), 1024),
@@ -445,21 +439,38 @@ class Consumer(object):
         on_invalid_task = self.on_invalid_task
         on_invalid_task = self.on_invalid_task
         callbacks = self.on_task_message
         callbacks = self.on_task_message
 
 
-        def on_task_received(body, message):
-            try:
-                name = body['task']
-            except (KeyError, TypeError):
-                return on_unknown_message(body, message)
+        def on_task_received(message):
 
 
+            # payload will only be set for v1 protocol, since v2
+            # will defer deserializing the message body to the pool.
+            payload = None
             try:
             try:
-                strategies[name](message, body,
-                                 message.ack_log_error,
-                                 message.reject_log_error,
-                                 callbacks)
+                type_ = message.headers['task']                # protocol v2
+            except TypeError:
+                return on_unknown_message(None, message)
+            except KeyError:
+                payload = message.payload
+                try:
+                    type_, payload = payload['task'], payload  # protocol v1
+                except (TypeError, KeyError):
+                    return on_unknown_message(payload, message)
+            try:
+                strategy = strategies[type_]
             except KeyError as exc:
             except KeyError as exc:
-                on_unknown_task(body, message, exc)
-            except InvalidTaskError as exc:
-                on_invalid_task(body, message, exc)
+                return on_unknown_task(payload, message, exc)
+            else:
+                try:
+                    strategy(
+                        message, payload, message.ack_log_error,
+                        message.reject_log_error, callbacks,
+                    )
+                except InvalidTaskError as exc:
+                    return on_invalid_task(payload, message, exc)
+                except MemoryError:
+                    raise
+                except Exception as exc:
+                    # XXX handle as internal error?
+                    return on_invalid_task(payload, message, exc)
 
 
         return on_task_received
         return on_task_received
 
 
@@ -541,8 +552,9 @@ class Heart(bootsteps.StartStopStep):
         c.heart = None
         c.heart = None
 
 
     def start(self, c):
     def start(self, c):
-        c.heart = heartbeat.Heart(c.timer, c.event_dispatcher,
-            self.heartbeat_interval)
+        c.heart = heartbeat.Heart(
+            c.timer, c.event_dispatcher, self.heartbeat_interval,
+        )
         c.heart.start()
         c.heart.start()
 
 
     def stop(self, c):
     def stop(self, c):

+ 4 - 2
celery/worker/control.py

@@ -22,8 +22,8 @@ from celery.utils.log import get_logger
 from celery.utils import jsonify
 from celery.utils import jsonify
 
 
 from . import state as worker_state
 from . import state as worker_state
+from .request import Request
 from .state import revoked
 from .state import revoked
-from .job import Request
 
 
 __all__ = ['Panel']
 __all__ = ['Panel']
 DEFAULT_TASK_INFO_ITEMS = ('exchange', 'routing_key', 'rate_limit')
 DEFAULT_TASK_INFO_ITEMS = ('exchange', 'routing_key', 'rate_limit')
@@ -364,7 +364,9 @@ def active_queues(state):
 
 
 
 
 def _wanted_config_key(key):
 def _wanted_config_key(key):
-    return isinstance(key, string_t) and key.isupper() and not key.startswith('__')
+    return (isinstance(key, string_t) and
+            key.isupper() and
+            not key.startswith('__'))
 
 
 
 
 @Panel.register
 @Panel.register

+ 0 - 590
celery/worker/job.py

@@ -1,590 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-    celery.worker.job
-    ~~~~~~~~~~~~~~~~~
-
-    This module defines the :class:`Request` class,
-    which specifies how tasks are executed.
-
-"""
-from __future__ import absolute_import, unicode_literals
-
-import logging
-import socket
-import sys
-
-from billiard.einfo import ExceptionInfo
-from datetime import datetime
-from weakref import ref
-
-from kombu.utils import kwdict, reprcall
-from kombu.utils.encoding import safe_repr, safe_str
-
-from celery import signals
-from celery.app.trace import trace_task, trace_task_ret
-from celery.exceptions import (
-    Ignore, TaskRevokedError, InvalidTaskError,
-    SoftTimeLimitExceeded, TimeLimitExceeded,
-    WorkerLostError, Terminated, Retry, Reject,
-)
-from celery.five import items, monotonic, string, string_t
-from celery.platforms import signals as _signals
-from celery.utils import fun_takes_kwargs
-from celery.utils.functional import noop
-from celery.utils.log import get_logger
-from celery.utils.serialization import get_pickled_exception
-from celery.utils.text import truncate
-from celery.utils.timeutils import maybe_iso8601, timezone, maybe_make_aware
-
-from . import state
-
-__all__ = ['Request']
-
-IS_PYPY = hasattr(sys, 'pypy_version_info')
-
-logger = get_logger(__name__)
-debug, info, warn, error = (logger.debug, logger.info,
-                            logger.warning, logger.error)
-_does_info = False
-_does_debug = False
-
-#: Max length of result representation
-RESULT_MAXLEN = 128
-
-
-def __optimize__():
-    # this is also called by celery.app.trace.setup_worker_optimizations
-    global _does_debug
-    global _does_info
-    _does_debug = logger.isEnabledFor(logging.DEBUG)
-    _does_info = logger.isEnabledFor(logging.INFO)
-__optimize__()
-
-# Localize
-tz_utc = timezone.utc
-tz_or_local = timezone.tz_or_local
-send_revoked = signals.task_revoked.send
-
-task_accepted = state.task_accepted
-task_ready = state.task_ready
-revoked_tasks = state.revoked
-
-NEEDS_KWDICT = sys.version_info <= (2, 6)
-
-#: Use when no message object passed to :class:`Request`.
-DEFAULT_FIELDS = {
-    'headers': None,
-    'reply_to': None,
-    'correlation_id': None,
-    'delivery_info': {
-        'exchange': None,
-        'routing_key': None,
-        'priority': 0,
-        'redelivered': False,
-    },
-}
-
-
-class Request(object):
-    """A request for task execution."""
-    if not IS_PYPY:  # pragma: no cover
-        __slots__ = (
-            'app', 'name', 'id', 'args', 'kwargs', 'on_ack',
-            'hostname', 'eventer', 'connection_errors', 'task', 'eta',
-            'expires', 'request_dict', 'acknowledged', 'on_reject',
-            'utc', 'time_start', 'worker_pid', '_already_revoked',
-            '_terminate_on_ack', '_apply_result',
-            '_tzlocal', '__weakref__', '__dict__',
-        )
-
-    #: Format string used to log task success.
-    success_msg = """\
-        Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s
-    """
-
-    #: Format string used to log task failure.
-    error_msg = """\
-        Task %(name)s[%(id)s] %(description)s: %(exc)s
-    """
-
-    #: Format string used to log internal error.
-    internal_error_msg = """\
-        Task %(name)s[%(id)s] %(description)s: %(exc)s
-    """
-
-    ignored_msg = """\
-        Task %(name)s[%(id)s] %(description)s
-    """
-
-    rejected_msg = """\
-        Task %(name)s[%(id)s] %(exc)s
-    """
-
-    #: Format string used to log task retry.
-    retry_msg = """Task %(name)s[%(id)s] retry: %(exc)s"""
-
-    def __init__(self, body, on_ack=noop,
-                 hostname=None, eventer=None, app=None,
-                 connection_errors=None, request_dict=None,
-                 message=None, task=None, on_reject=noop, **opts):
-        self.app = app
-        name = self.name = body['task']
-        self.id = body['id']
-        self.args = body.get('args', [])
-        self.kwargs = body.get('kwargs', {})
-        try:
-            self.kwargs.items
-        except AttributeError:
-            raise InvalidTaskError(
-                'Task keyword arguments is not a mapping')
-        if NEEDS_KWDICT:
-            self.kwargs = kwdict(self.kwargs)
-        eta = body.get('eta')
-        expires = body.get('expires')
-        utc = self.utc = body.get('utc', False)
-        self.on_ack = on_ack
-        self.on_reject = on_reject
-        self.hostname = hostname or socket.gethostname()
-        self.eventer = eventer
-        self.connection_errors = connection_errors or ()
-        self.task = task or self.app.tasks[name]
-        self.acknowledged = self._already_revoked = False
-        self.time_start = self.worker_pid = self._terminate_on_ack = None
-        self._apply_result = None
-        self._tzlocal = None
-
-        # timezone means the message is timezone-aware, and the only timezone
-        # supported at this point is UTC.
-        if eta is not None:
-            try:
-                self.eta = maybe_iso8601(eta)
-            except (AttributeError, ValueError, TypeError) as exc:
-                raise InvalidTaskError(
-                    'invalid eta value {0!r}: {1}'.format(eta, exc))
-            if utc:
-                self.eta = maybe_make_aware(self.eta, self.tzlocal)
-        else:
-            self.eta = None
-        if expires is not None:
-            try:
-                self.expires = maybe_iso8601(expires)
-            except (AttributeError, ValueError, TypeError) as exc:
-                raise InvalidTaskError(
-                    'invalid expires value {0!r}: {1}'.format(expires, exc))
-            if utc:
-                self.expires = maybe_make_aware(self.expires, self.tzlocal)
-        else:
-            self.expires = None
-
-        if message:
-            delivery_info = message.delivery_info or {}
-            properties = message.properties or {}
-            body.update({
-                'headers': message.headers,
-                'reply_to': properties.get('reply_to'),
-                'correlation_id': properties.get('correlation_id'),
-                'delivery_info': {
-                    'exchange': delivery_info.get('exchange'),
-                    'routing_key': delivery_info.get('routing_key'),
-                    'priority': delivery_info.get('priority'),
-                    'redelivered': delivery_info.get('redelivered'),
-                }
-
-            })
-        else:
-            body.update(DEFAULT_FIELDS)
-        self.request_dict = body
-
-    @property
-    def delivery_info(self):
-        return self.request_dict['delivery_info']
-
-    def extend_with_default_kwargs(self):
-        """Extend the tasks keyword arguments with standard task arguments.
-
-        Currently these are `logfile`, `loglevel`, `task_id`,
-        `task_name`, `task_retries`, and `delivery_info`.
-
-        See :meth:`celery.task.base.Task.run` for more information.
-
-        Magic keyword arguments are deprecated and will be removed
-        in version 4.0.
-
-        """
-        kwargs = dict(self.kwargs)
-        default_kwargs = {'logfile': None,   # deprecated
-                          'loglevel': None,  # deprecated
-                          'task_id': self.id,
-                          'task_name': self.name,
-                          'task_retries': self.request_dict.get('retries', 0),
-                          'task_is_eager': False,
-                          'delivery_info': self.delivery_info}
-        fun = self.task.run
-        supported_keys = fun_takes_kwargs(fun, default_kwargs)
-        extend_with = {key: val for key, val in items(default_kwargs)
-                       if key in supported_keys}
-        kwargs.update(extend_with)
-        return kwargs
-
-    def execute_using_pool(self, pool, **kwargs):
-        """Used by the worker to send this task to the pool.
-
-        :param pool: A :class:`celery.concurrency.base.TaskPool` instance.
-
-        :raises celery.exceptions.TaskRevokedError: if the task was revoked
-            and ignored.
-
-        """
-        uuid = self.id
-        task = self.task
-        if self.revoked():
-            raise TaskRevokedError(uuid)
-
-        hostname = self.hostname
-        kwargs = self.kwargs
-        if task.accept_magic_kwargs:
-            kwargs = self.extend_with_default_kwargs()
-        request = self.request_dict
-        request.update({'hostname': hostname, 'is_eager': False,
-                        'delivery_info': self.delivery_info,
-                        'group': self.request_dict.get('taskset')})
-        timeout, soft_timeout = request.get('timelimit', (None, None))
-        timeout = timeout or task.time_limit
-        soft_timeout = soft_timeout or task.soft_time_limit
-        result = pool.apply_async(
-            trace_task_ret,
-            args=(self.name, uuid, self.args, kwargs, request),
-            accept_callback=self.on_accepted,
-            timeout_callback=self.on_timeout,
-            callback=self.on_success,
-            error_callback=self.on_failure,
-            soft_timeout=soft_timeout,
-            timeout=timeout,
-            correlation_id=uuid,
-        )
-        # cannot create weakref to None
-        self._apply_result = ref(result) if result is not None else result
-        return result
-
-    def execute(self, loglevel=None, logfile=None):
-        """Execute the task in a :func:`~celery.app.trace.trace_task`.
-
-        :keyword loglevel: The loglevel used by the task.
-        :keyword logfile: The logfile used by the task.
-
-        """
-        if self.revoked():
-            return
-
-        # acknowledge task as being processed.
-        if not self.task.acks_late:
-            self.acknowledge()
-
-        kwargs = self.kwargs
-        if self.task.accept_magic_kwargs:
-            kwargs = self.extend_with_default_kwargs()
-        request = self.request_dict
-        request.update({'loglevel': loglevel, 'logfile': logfile,
-                        'hostname': self.hostname, 'is_eager': False,
-                        'delivery_info': self.delivery_info})
-        retval = trace_task(self.task, self.id, self.args, kwargs, request,
-                            hostname=self.hostname, loader=self.app.loader,
-                            app=self.app)
-        self.acknowledge()
-        return retval
-
-    def maybe_expire(self):
-        """If expired, mark the task as revoked."""
-        if self.expires:
-            now = datetime.now(tz_or_local(self.tzlocal) if self.utc else None)
-            if now > self.expires:
-                revoked_tasks.add(self.id)
-                return True
-
-    def terminate(self, pool, signal=None):
-        signal = _signals.signum(signal or 'TERM')
-        if self.time_start:
-            pool.terminate_job(self.worker_pid, signal)
-            self._announce_revoked('terminated', True, signal, False)
-        else:
-            self._terminate_on_ack = pool, signal
-        if self._apply_result is not None:
-            obj = self._apply_result()  # is a weakref
-            if obj is not None:
-                obj.terminate(signal)
-
-    def _announce_revoked(self, reason, terminated, signum, expired):
-        task_ready(self)
-        self.send_event('task-revoked',
-                        terminated=terminated, signum=signum, expired=expired)
-        if self.store_errors:
-            self.task.backend.mark_as_revoked(self.id, reason, request=self)
-        self.acknowledge()
-        self._already_revoked = True
-        send_revoked(self.task, request=self,
-                     terminated=terminated, signum=signum, expired=expired)
-
-    def revoked(self):
-        """If revoked, skip task and mark state."""
-        expired = False
-        if self._already_revoked:
-            return True
-        if self.expires:
-            expired = self.maybe_expire()
-        if self.id in revoked_tasks:
-            info('Discarding revoked task: %s[%s]', self.name, self.id)
-            self._announce_revoked(
-                'expired' if expired else 'revoked', False, None, expired,
-            )
-            return True
-        return False
-
-    def send_event(self, type, **fields):
-        if self.eventer and self.eventer.enabled:
-            self.eventer.send(type, uuid=self.id, **fields)
-
-    def on_accepted(self, pid, time_accepted):
-        """Handler called when task is accepted by worker pool."""
-        self.worker_pid = pid
-        self.time_start = time_accepted
-        task_accepted(self)
-        if not self.task.acks_late:
-            self.acknowledge()
-        self.send_event('task-started')
-        if _does_debug:
-            debug('Task accepted: %s[%s] pid:%r', self.name, self.id, pid)
-        if self._terminate_on_ack is not None:
-            self.terminate(*self._terminate_on_ack)
-
-    def on_timeout(self, soft, timeout):
-        """Handler called if the task times out."""
-        task_ready(self)
-        if soft:
-            warn('Soft time limit (%ss) exceeded for %s[%s]',
-                 timeout, self.name, self.id)
-            exc = SoftTimeLimitExceeded(timeout)
-        else:
-            error('Hard time limit (%ss) exceeded for %s[%s]',
-                  timeout, self.name, self.id)
-            exc = TimeLimitExceeded(timeout)
-
-        if self.store_errors:
-            self.task.backend.mark_as_failure(self.id, exc, request=self)
-
-        if self.task.acks_late:
-            self.acknowledge()
-
-    def on_success(self, ret_value, now=None, nowfun=monotonic):
-        """Handler called if the task was successfully processed."""
-        if isinstance(ret_value, ExceptionInfo):
-            if isinstance(ret_value.exception, (
-                    SystemExit, KeyboardInterrupt)):
-                raise ret_value.exception
-            return self.on_failure(ret_value)
-        task_ready(self)
-
-        if self.task.acks_late:
-            self.acknowledge()
-
-        if self.eventer and self.eventer.enabled:
-            now = nowfun()
-            runtime = self.time_start and (now - self.time_start) or 0
-            self.send_event('task-succeeded',
-                            result=safe_repr(ret_value), runtime=runtime)
-
-        if _does_info:
-            now = now or nowfun()
-            runtime = self.time_start and (now - self.time_start) or 0
-            info(self.success_msg.strip(), {
-                'id': self.id, 'name': self.name,
-                'return_value': self.repr_result(ret_value),
-                'runtime': runtime})
-
-    def on_retry(self, exc_info):
-        """Handler called if the task should be retried."""
-        if self.task.acks_late:
-            self.acknowledge()
-
-        self.send_event('task-retried',
-                        exception=safe_repr(exc_info.exception.exc),
-                        traceback=safe_str(exc_info.traceback))
-
-        if _does_info:
-            info(self.retry_msg.strip(),
-                 {'id': self.id, 'name': self.name,
-                  'exc': exc_info.exception})
-
-    def on_failure(self, exc_info):
-        """Handler called if the task raised an exception."""
-        task_ready(self)
-        send_failed_event = True
-
-        if not exc_info.internal:
-            exc = exc_info.exception
-
-            if isinstance(exc, Retry):
-                return self.on_retry(exc_info)
-
-            # These are special cases where the process would not have had
-            # time to write the result.
-            if self.store_errors:
-                if isinstance(exc, WorkerLostError):
-                    self.task.backend.mark_as_failure(
-                        self.id, exc, request=self,
-                    )
-                elif isinstance(exc, Terminated):
-                    self._announce_revoked(
-                        'terminated', True, string(exc), False)
-                    send_failed_event = False  # already sent revoked event
-            # (acks_late) acknowledge after result stored.
-            if self.task.acks_late:
-                self.acknowledge()
-        self._log_error(exc_info, send_failed_event=send_failed_event)
-
-    def _log_error(self, einfo, send_failed_event=True):
-        einfo.exception = get_pickled_exception(einfo.exception)
-        eobj = einfo.exception
-        exception, traceback, exc_info, internal, sargs, skwargs = (
-            safe_repr(eobj),
-            safe_str(einfo.traceback),
-            einfo.exc_info,
-            einfo.internal,
-            safe_repr(self.args),
-            safe_repr(self.kwargs),
-        )
-        task = self.task
-        if task.throws and isinstance(eobj, task.throws):
-            do_send_mail, severity, exc_info, description = (
-                False, logging.INFO, None, 'raised expected',
-            )
-        else:
-            do_send_mail, severity, description = (
-                True, logging.ERROR, 'raised unexpected',
-            )
-        format = self.error_msg
-        if send_failed_event:
-            self.send_event(
-                'task-failed', exception=exception, traceback=traceback,
-            )
-
-        if internal:
-            if isinstance(einfo.exception, MemoryError):
-                raise MemoryError('Process got: %s' % (einfo.exception, ))
-            elif isinstance(einfo.exception, Reject):
-                format = self.rejected_msg
-                description = 'rejected'
-                severity = logging.WARN
-                exc_info = einfo
-                self.reject(requeue=einfo.exception.requeue)
-            elif isinstance(einfo.exception, Ignore):
-                format = self.ignored_msg
-                description = 'ignored'
-                severity = logging.INFO
-                exc_info = None
-                self.acknowledge()
-            else:
-                format = self.internal_error_msg
-                description = 'INTERNAL ERROR'
-                severity = logging.CRITICAL
-
-        context = {
-            'hostname': self.hostname,
-            'id': self.id,
-            'name': self.name,
-            'exc': exception,
-            'traceback': traceback,
-            'args': sargs,
-            'kwargs': skwargs,
-            'description': description,
-        }
-
-        logger.log(severity, format.strip(), context,
-                   exc_info=exc_info,
-                   extra={'data': {'id': self.id,
-                                   'name': self.name,
-                                   'args': sargs,
-                                   'kwargs': skwargs,
-                                   'hostname': self.hostname,
-                                   'internal': internal}})
-
-        if do_send_mail:
-            task.send_error_email(context, einfo.exception)
-
-    def acknowledge(self):
-        """Acknowledge task."""
-        if not self.acknowledged:
-            self.on_ack(logger, self.connection_errors)
-            self.acknowledged = True
-
-    def reject(self, requeue=False):
-        if not self.acknowledged:
-            self.on_reject(logger, self.connection_errors, requeue)
-            self.acknowledged = True
-
-    def repr_result(self, result, maxlen=RESULT_MAXLEN):
-        # 46 is the length needed to fit
-        #     'the quick brown fox jumps over the lazy dog' :)
-        if not isinstance(result, string_t):
-            result = safe_repr(result)
-        return truncate(result) if len(result) > maxlen else result
-
-    def info(self, safe=False):
-        return {'id': self.id,
-                'name': self.name,
-                'args': self.args if safe else safe_repr(self.args),
-                'kwargs': self.kwargs if safe else safe_repr(self.kwargs),
-                'hostname': self.hostname,
-                'time_start': self.time_start,
-                'acknowledged': self.acknowledged,
-                'delivery_info': self.delivery_info,
-                'worker_pid': self.worker_pid}
-
-    def __str__(self):
-        return '{0.name}[{0.id}]{1}{2}'.format(self,
-               ' eta:[{0}]'.format(self.eta) if self.eta else '',
-               ' expires:[{0}]'.format(self.expires) if self.expires else '')
-    shortinfo = __str__
-
-    def __repr__(self):
-        return '<{0} {1}: {2}>'.format(
-            type(self).__name__, self.id,
-            reprcall(self.name, self.args, self.kwargs))
-
-    @property
-    def tzlocal(self):
-        if self._tzlocal is None:
-            self._tzlocal = self.app.conf.CELERY_TIMEZONE
-        return self._tzlocal
-
-    @property
-    def store_errors(self):
-        return (not self.task.ignore_result
-                or self.task.store_errors_even_if_ignored)
-
-    @property
-    def task_id(self):
-        # XXX compat
-        return self.id
-
-    @task_id.setter  # noqa
-    def task_id(self, value):
-        self.id = value
-
-    @property
-    def task_name(self):
-        # XXX compat
-        return self.name
-
-    @task_name.setter  # noqa
-    def task_name(self, value):
-        self.name = value
-
-    @property
-    def reply_to(self):
-        # used by rpc backend when failures reported by parent process
-        return self.request_dict['reply_to']
-
-    @property
-    def correlation_id(self):
-        # used similarly to reply_to
-        return self.request_dict['correlation_id']

+ 2 - 2
celery/worker/loops.py

@@ -37,7 +37,7 @@ def asynloop(obj, connection, consumer, blueprint, hub, qos,
     if heartbeat and connection.supports_heartbeats:
     if heartbeat and connection.supports_heartbeats:
         hub.call_repeatedly(heartbeat / hbrate, hbtick, hbrate)
         hub.call_repeatedly(heartbeat / hbrate, hbtick, hbrate)
 
 
-    consumer.callbacks = [on_task_received]
+    consumer.on_message = on_task_received
     consumer.consume()
     consumer.consume()
     obj.on_ready()
     obj.on_ready()
     obj.controller.register_with_event_loop(hub)
     obj.controller.register_with_event_loop(hub)
@@ -86,7 +86,7 @@ def synloop(obj, connection, consumer, blueprint, hub, qos,
     """Fallback blocking event loop for transports that doesn't support AIO."""
     """Fallback blocking event loop for transports that doesn't support AIO."""
 
 
     on_task_received = obj.create_task_handler()
     on_task_received = obj.create_task_handler()
-    consumer.register_callback(on_task_received)
+    consumer.on_message = on_task_received
     consumer.consume()
     consumer.consume()
 
 
     obj.on_ready()
     obj.on_ready()

+ 494 - 0
celery/worker/request.py

@@ -0,0 +1,494 @@
+# -*- coding: utf-8 -*-
+"""
+    celery.worker.request
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    This module defines the :class:`Request` class,
+    which specifies how tasks are executed.
+
+"""
+from __future__ import absolute_import, unicode_literals
+
+import logging
+import socket
+import sys
+
+from datetime import datetime
+from weakref import ref
+
+from kombu.utils.encoding import safe_repr, safe_str
+
+from celery import signals
+from celery.app.trace import trace_task, trace_task_ret
+from celery.exceptions import (
+    Ignore, TaskRevokedError, InvalidTaskError,
+    SoftTimeLimitExceeded, TimeLimitExceeded,
+    WorkerLostError, Terminated, Retry, Reject,
+)
+from celery.five import string
+from celery.platforms import signals as _signals
+from celery.utils.functional import noop
+from celery.utils.log import get_logger
+from celery.utils.timeutils import maybe_iso8601, timezone, maybe_make_aware
+from celery.utils.serialization import get_pickled_exception
+
+from . import state
+
+__all__ = ['Request']
+
+IS_PYPY = hasattr(sys, 'pypy_version_info')
+
+logger = get_logger(__name__)
+debug, info, warn, error = (logger.debug, logger.info,
+                            logger.warning, logger.error)
+_does_info = False
+_does_debug = False
+
+
+def __optimize__():
+    # this is also called by celery.app.trace.setup_worker_optimizations
+    global _does_debug
+    global _does_info
+    _does_debug = logger.isEnabledFor(logging.DEBUG)
+    _does_info = logger.isEnabledFor(logging.INFO)
+__optimize__()
+
+# Localize
+tz_utc = timezone.utc
+tz_or_local = timezone.tz_or_local
+send_revoked = signals.task_revoked.send
+
+task_accepted = state.task_accepted
+task_ready = state.task_ready
+revoked_tasks = state.revoked
+
+
+class Request(object):
+    """A request for task execution."""
+    acknowledged = False
+    time_start = None
+    worker_pid = None
+    timeouts = (None, None)
+    _already_revoked = False
+    _terminate_on_ack = None
+    _apply_result = None
+    _tzlocal = None
+
+    if not IS_PYPY:  # pragma: no cover
+        __slots__ = (
+            'app', 'name', 'id', 'on_ack', 'body',
+            'hostname', 'eventer', 'connection_errors', 'task', 'eta',
+            'expires', 'request_dict', 'on_reject', 'utc',
+            'content_type', 'content_encoding',
+            '__weakref__', '__dict__',
+        )
+
+    def __init__(self, message, on_ack=noop,
+                 hostname=None, eventer=None, app=None,
+                 connection_errors=None, request_dict=None,
+                 task=None, on_reject=noop, body=None,
+                 headers=None, decoded=False, utc=True,
+                 maybe_make_aware=maybe_make_aware,
+                 maybe_iso8601=maybe_iso8601, **opts):
+        if headers is None:
+            headers = message.headers
+        if body is None:
+            body = message.body
+        self.app = app
+        self.message = message
+        self.body = body
+        self.utc = utc
+        if decoded:
+            self.content_type = self.content_encoding = None
+        else:
+            self.content_type, self.content_encoding = (
+                message.content_type, message.content_encoding,
+                )
+
+        name = self.name = headers['task']
+        self.id = headers['id']
+        if 'timeouts' in headers:
+            self.timeouts = headers['timeouts']
+        self.on_ack = on_ack
+        self.on_reject = on_reject
+        self.hostname = hostname or socket.gethostname()
+        self.eventer = eventer
+        self.connection_errors = connection_errors or ()
+        self.task = task or self.app.tasks[name]
+
+        # timezone means the message is timezone-aware, and the only timezone
+        # supported at this point is UTC.
+        eta = headers.get('eta')
+        if eta is not None:
+            try:
+                eta = maybe_iso8601(eta)
+            except (AttributeError, ValueError, TypeError) as exc:
+                raise InvalidTaskError(
+                    'invalid eta value {0!r}: {1}'.format(eta, exc))
+            self.eta = maybe_make_aware(eta, self.tzlocal)
+        else:
+            self.eta = None
+
+        expires = headers.get('expires')
+        if expires is not None:
+            try:
+                expires = maybe_iso8601(expires)
+            except (AttributeError, ValueError, TypeError) as exc:
+                raise InvalidTaskError(
+                    'invalid expires value {0!r}: {1}'.format(expires, exc))
+            self.expires = maybe_make_aware(expires, self.tzlocal)
+        else:
+            self.expires = None
+
+        delivery_info = message.delivery_info or {}
+        properties = message.properties or {}
+        headers.update({
+            'reply_to': properties.get('reply_to'),
+            'correlation_id': properties.get('correlation_id'),
+            'delivery_info': {
+                'exchange': delivery_info.get('exchange'),
+                'routing_key': delivery_info.get('routing_key'),
+                'priority': delivery_info.get('priority'),
+                'redelivered': delivery_info.get('redelivered'),
+            }
+
+        })
+        self.request_dict = headers
+
+    @property
+    def delivery_info(self):
+        return self.request_dict['delivery_info']
+
+    def execute_using_pool(self, pool, **kwargs):
+        """Used by the worker to send this task to the pool.
+
+        :param pool: A :class:`celery.concurrency.base.TaskPool` instance.
+
+        :raises celery.exceptions.TaskRevokedError: if the task was revoked
+            and ignored.
+
+        """
+        task_id = self.id
+        task = self.task
+        if self.revoked():
+            raise TaskRevokedError(task_id)
+
+        timeout, soft_timeout = self.timeouts
+        timeout = timeout or task.time_limit
+        soft_timeout = soft_timeout or task.soft_time_limit
+        result = pool.apply_async(
+            trace_task_ret,
+            args=(self.name, task_id, self.request_dict, self.body,
+                  self.content_type, self.content_encoding, self.hostname),
+            accept_callback=self.on_accepted,
+            timeout_callback=self.on_timeout,
+            callback=self.on_success,
+            error_callback=self.on_failure,
+            soft_timeout=soft_timeout or task.soft_time_limit,
+            timeout=timeout or task.time_limit,
+            correlation_id=task_id,
+        )
+        # cannot create weakref to None
+        self._apply_result = ref(result) if result is not None else result
+        return result
+
+    def execute(self, loglevel=None, logfile=None):
+        """Execute the task in a :func:`~celery.app.trace.trace_task`.
+
+        :keyword loglevel: The loglevel used by the task.
+        :keyword logfile: The logfile used by the task.
+
+        """
+        if self.revoked():
+            return
+
+        # acknowledge task as being processed.
+        if not self.task.acks_late:
+            self.acknowledge()
+
+        request = self.request_dict
+        args, kwargs = self.message.payload
+        request.update({'loglevel': loglevel, 'logfile': logfile,
+                        'hostname': self.hostname, 'is_eager': False,
+                        'args': args, 'kwargs': kwargs})
+        retval = trace_task(self.task, self.id, args, kwargs, request,
+                            hostname=self.hostname, loader=self.app.loader,
+                            app=self.app)[0]
+        self.acknowledge()
+        return retval
+
+    def maybe_expire(self):
+        """If expired, mark the task as revoked."""
+        if self.expires:
+            now = datetime.now(tz_or_local(self.tzlocal) if self.utc else None)
+            if now > self.expires:
+                revoked_tasks.add(self.id)
+                return True
+
+    def terminate(self, pool, signal=None):
+        signal = _signals.signum(signal or 'TERM')
+        if self.time_start:
+            pool.terminate_job(self.worker_pid, signal)
+            self._announce_revoked('terminated', True, signal, False)
+        else:
+            self._terminate_on_ack = pool, signal
+        if self._apply_result is not None:
+            obj = self._apply_result()  # is a weakref
+            if obj is not None:
+                obj.terminate(signal)
+
+    def _announce_revoked(self, reason, terminated, signum, expired):
+        task_ready(self)
+        self.send_event('task-revoked',
+                        terminated=terminated, signum=signum, expired=expired)
+        if self.store_errors:
+            self.task.backend.mark_as_revoked(self.id, reason, request=self)
+        self.acknowledge()
+        self._already_revoked = True
+        send_revoked(self.task, request=self,
+                     terminated=terminated, signum=signum, expired=expired)
+
+    def revoked(self):
+        """If revoked, skip task and mark state."""
+        expired = False
+        if self._already_revoked:
+            return True
+        if self.expires:
+            expired = self.maybe_expire()
+        if self.id in revoked_tasks:
+            info('Discarding revoked task: %s[%s]', self.name, self.id)
+            self._announce_revoked(
+                'expired' if expired else 'revoked', False, None, expired,
+            )
+            return True
+        return False
+
+    def send_event(self, type, **fields):
+        if self.eventer and self.eventer.enabled:
+            self.eventer.send(type, uuid=self.id, **fields)
+
+    def on_accepted(self, pid, time_accepted):
+        """Handler called when task is accepted by worker pool."""
+        self.worker_pid = pid
+        self.time_start = time_accepted
+        task_accepted(self)
+        if not self.task.acks_late:
+            self.acknowledge()
+        self.send_event('task-started')
+        if _does_debug:
+            debug('Task accepted: %s[%s] pid:%r', self.name, self.id, pid)
+        if self._terminate_on_ack is not None:
+            self.terminate(*self._terminate_on_ack)
+
+    def on_timeout(self, soft, timeout):
+        """Handler called if the task times out."""
+        task_ready(self)
+        if soft:
+            warn('Soft time limit (%ss) exceeded for %s[%s]',
+                 timeout, self.name, self.id)
+            exc = SoftTimeLimitExceeded(timeout)
+        else:
+            error('Hard time limit (%ss) exceeded for %s[%s]',
+                  timeout, self.name, self.id)
+            exc = TimeLimitExceeded(timeout)
+
+        if self.store_errors:
+            self.task.backend.mark_as_failure(self.id, exc, request=self)
+
+        if self.task.acks_late:
+            self.acknowledge()
+
+    def on_success(self, failed__retval__runtime, **kwargs):
+        """Handler called if the task was successfully processed."""
+        failed, retval, runtime = failed__retval__runtime
+        if failed:
+            if isinstance(retval.exception, (SystemExit, KeyboardInterrupt)):
+                raise retval.exception
+            return self.on_failure(retval, return_ok=True)
+        task_ready(self)
+
+        if self.task.acks_late:
+            self.acknowledge()
+
+        if self.eventer and self.eventer.enabled:
+            self.send_event(
+                'task-succeeded', result=retval, runtime=runtime,
+            )
+
+    def on_retry(self, exc_info):
+        """Handler called if the task should be retried."""
+        if self.task.acks_late:
+            self.acknowledge()
+
+        self.send_event('task-retried',
+                        exception=safe_repr(exc_info.exception.exc),
+                        traceback=safe_str(exc_info.traceback))
+
+    def on_failure(self, exc_info, send_failed_event=True, return_ok=False):
+        """Handler called if the task raised an exception."""
+        task_ready(self)
+
+        if isinstance(exc_info.exception, MemoryError):
+            raise MemoryError('Process got: %s' % (exc_info.exception, ))
+        elif isinstance(exc_info.exception, Reject):
+            return self.reject(requeue=exc_info.exception.requeue)
+        elif isinstance(exc_info.exception, Ignore):
+            return self.acknowledge()
+
+        exc = exc_info.exception
+
+        if isinstance(exc, Retry):
+            return self.on_retry(exc_info)
+
+        # These are special cases where the process would not have had
+        # time to write the result.
+        if self.store_errors:
+            if isinstance(exc, Terminated):
+                self._announce_revoked(
+                    'terminated', True, string(exc), False)
+                send_failed_event = False  # already sent revoked event
+            elif isinstance(exc, WorkerLostError) or not return_ok:
+                self.task.backend.mark_as_failure(
+                    self.id, exc, request=self,
+                )
+        # (acks_late) acknowledge after result stored.
+        if self.task.acks_late:
+            self.acknowledge()
+
+        if send_failed_event:
+            self.send_event(
+                'task-failed',
+                exception=safe_repr(get_pickled_exception(exc_info.exception)),
+                traceback=exc_info.traceback,
+            )
+
+        if not return_ok:
+            error('Task handler raised error: %r', exc,
+                  exc_info=exc_info.exc_info)
+
+    def acknowledge(self):
+        """Acknowledge task."""
+        if not self.acknowledged:
+            self.on_ack(logger, self.connection_errors)
+            self.acknowledged = True
+
+    def reject(self, requeue=False):
+        if not self.acknowledged:
+            self.on_reject(logger, self.connection_errors, requeue)
+            self.acknowledged = True
+
+    def info(self, safe=False):
+        return {'id': self.id,
+                'name': self.name,
+                'body': self.body,
+                'hostname': self.hostname,
+                'time_start': self.time_start,
+                'acknowledged': self.acknowledged,
+                'delivery_info': self.delivery_info,
+                'worker_pid': self.worker_pid}
+
+    def __str__(self):
+        return '{0.name}[{0.id}]{1}{2}'.format(self,
+               ' eta:[{0}]'.format(self.eta) if self.eta else '',
+               ' expires:[{0}]'.format(self.expires) if self.expires else '')
+    shortinfo = __str__
+
+    def __repr__(self):
+        return '<{0} {1}: {2}>'.format(type(self).__name__, self.id, self.name)
+
+    @property
+    def tzlocal(self):
+        if self._tzlocal is None:
+            self._tzlocal = self.app.conf.CELERY_TIMEZONE
+        return self._tzlocal
+
+    @property
+    def store_errors(self):
+        return (not self.task.ignore_result
+                or self.task.store_errors_even_if_ignored)
+
+    @property
+    def task_id(self):
+        # XXX compat
+        return self.id
+
+    @task_id.setter  # noqa
+    def task_id(self, value):
+        self.id = value
+
+    @property
+    def task_name(self):
+        # XXX compat
+        return self.name
+
+    @task_name.setter  # noqa
+    def task_name(self, value):
+        self.name = value
+
+    @property
+    def reply_to(self):
+        # used by rpc backend when failures reported by parent process
+        return self.request_dict['reply_to']
+
+    @property
+    def correlation_id(self):
+        # used similarly to reply_to
+        return self.request_dict['correlation_id']
+
+
+def create_request_cls(base, task, pool, hostname, eventer,
+                       ref=ref, revoked_tasks=revoked_tasks,
+                       task_ready=task_ready):
+    from celery.app.trace import trace_task_ret as trace
+    default_time_limit = task.time_limit
+    default_soft_time_limit = task.soft_time_limit
+    apply_async = pool.apply_async
+    acks_late = task.acks_late
+    std_kwargs = {'hostname': hostname, 'is_eager': False}
+    events = eventer and eventer.enabled
+
+    class Request(base):
+
+        def execute_using_pool(self, pool, **kwargs):
+            task_id = self.id
+            if (self.expires or task_id in revoked_tasks) and self.revoked():
+                raise TaskRevokedError(task_id)
+
+            timeout, soft_timeout = self.timeouts
+            timeout = timeout or default_time_limit
+            soft_timeout = soft_timeout or default_soft_time_limit
+            result = apply_async(
+                trace,
+                args=(self.name, task_id, self.request_dict, self.body,
+                      self.content_type, self.content_encoding),
+                kwargs=std_kwargs,
+                accept_callback=self.on_accepted,
+                timeout_callback=self.on_timeout,
+                callback=self.on_success,
+                error_callback=self.on_failure,
+                soft_timeout=soft_timeout,
+                timeout=timeout,
+                correlation_id=task_id,
+            )
+            # cannot create weakref to None
+            self._apply_result = ref(result) if result is not None else result
+            return result
+
+        def on_success(self, failed__retval__runtime, **kwargs):
+            failed, retval, runtime = failed__retval__runtime
+            if failed:
+                if isinstance(retval.exception, (
+                        SystemExit, KeyboardInterrupt)):
+                    raise retval.exception
+                return self.on_failure(retval, return_ok=True)
+            task_ready(self)
+
+            if acks_late:
+                self.acknowledge()
+
+            if events:
+                self.send_event(
+                    'task-succeeded', result=retval, runtime=runtime,
+                )
+
+    return Request

+ 2 - 2
celery/worker/state.py

@@ -42,10 +42,10 @@ REVOKES_MAX = 50000
 #: being expired when the max limit has been exceeded.
 #: being expired when the max limit has been exceeded.
 REVOKE_EXPIRES = 10800
 REVOKE_EXPIRES = 10800
 
 
-#: set of all reserved :class:`~celery.worker.job.Request`'s.
+#: set of all reserved :class:`~celery.worker.request.Request`'s.
 reserved_requests = set()
 reserved_requests = set()
 
 
-#: set of currently active :class:`~celery.worker.job.Request`'s.
+#: set of currently active :class:`~celery.worker.request.Request`'s.
 active_requests = set()
 active_requests = set()
 
 
 #: count of tasks accepted by the worker, sorted by type.
 #: count of tasks accepted by the worker, sorted by type.

+ 45 - 12
celery/worker/strategy.py

@@ -11,12 +11,13 @@ from __future__ import absolute_import
 import logging
 import logging
 
 
 from kombu.async.timer import to_timestamp
 from kombu.async.timer import to_timestamp
-from kombu.utils.encoding import safe_repr
 
 
+from celery.exceptions import InvalidTaskError
+from celery.five import buffer_t
 from celery.utils.log import get_logger
 from celery.utils.log import get_logger
 from celery.utils.timeutils import timezone
 from celery.utils.timeutils import timezone
 
 
-from .job import Request
+from .request import Request, create_request_cls
 from .state import task_reserved
 from .state import task_reserved
 
 
 __all__ = ['default']
 __all__ = ['default']
@@ -24,12 +25,31 @@ __all__ = ['default']
 logger = get_logger(__name__)
 logger = get_logger(__name__)
 
 
 
 
+def proto1_to_proto2(message, body):
+    """Converts Task message protocol 1 arguments to protocol 2.
+
+    Returns tuple of ``(body, headers, already_decoded_status, utc)``
+
+    """
+    try:
+        args, kwargs = body['args'], body['kwargs']
+        kwargs.items
+    except KeyError:
+        raise InvalidTaskError('Message does not have args/kwargs')
+    except AttributeError:
+        raise InvalidTaskError(
+            'Task keyword arguments must be a mapping',
+        )
+    body['headers'] = message.headers
+    return (args, kwargs), body, True, body.get('utc', True)
+
+
 def default(task, app, consumer,
 def default(task, app, consumer,
             info=logger.info, error=logger.error, task_reserved=task_reserved,
             info=logger.info, error=logger.error, task_reserved=task_reserved,
-            to_system_tz=timezone.to_system):
+            to_system_tz=timezone.to_system, bytes=bytes, buffer_t=buffer_t,
+            proto1_to_proto2=proto1_to_proto2):
     hostname = consumer.hostname
     hostname = consumer.hostname
     eventer = consumer.event_dispatcher
     eventer = consumer.event_dispatcher
-    Req = Request
     connection_errors = consumer.connection_errors
     connection_errors = consumer.connection_errors
     _does_info = logger.isEnabledFor(logging.INFO)
     _does_info = logger.isEnabledFor(logging.INFO)
     events = eventer and eventer.enabled
     events = eventer and eventer.enabled
@@ -40,15 +60,28 @@ def default(task, app, consumer,
     bucket = consumer.task_buckets[task.name]
     bucket = consumer.task_buckets[task.name]
     handle = consumer.on_task_request
     handle = consumer.on_task_request
     limit_task = consumer._limit_task
     limit_task = consumer._limit_task
+    body_can_be_buffer = consumer.pool.body_can_be_buffer
+    Req = create_request_cls(Request, task, consumer.pool, hostname, eventer)
+
+    revoked_tasks = consumer.controller.state.revoked
 
 
     def task_message_handler(message, body, ack, reject, callbacks,
     def task_message_handler(message, body, ack, reject, callbacks,
                              to_timestamp=to_timestamp):
                              to_timestamp=to_timestamp):
-        req = Req(body, on_ack=ack, on_reject=reject,
-                  app=app, hostname=hostname,
-                  eventer=eventer, task=task,
-                  connection_errors=connection_errors,
-                  message=message)
-        if req.revoked():
+        if body is None:
+            body, headers, decoded, utc = (
+                message.body, message.headers, False, True,
+            )
+            if not body_can_be_buffer:
+                body = bytes(body) if isinstance(body, buffer_t) else body
+        else:
+            body, headers, decoded, utc = proto1_to_proto2(message, body)
+        req = Req(
+            message,
+            on_ack=ack, on_reject=reject, app=app, hostname=hostname,
+            eventer=eventer, task=task, connection_errors=connection_errors,
+            body=body, headers=headers, decoded=decoded, utc=utc,
+        )
+        if (req.expires or req.id in revoked_tasks) and req.revoked():
             return
             return
 
 
         if _does_info:
         if _does_info:
@@ -58,7 +91,7 @@ def default(task, app, consumer,
             send_event(
             send_event(
                 'task-received',
                 'task-received',
                 uuid=req.id, name=req.name,
                 uuid=req.id, name=req.name,
-                args=safe_repr(req.args), kwargs=safe_repr(req.kwargs),
+                args='', kwargs='',
                 retries=req.request_dict.get('retries', 0),
                 retries=req.request_dict.get('retries', 0),
                 eta=req.eta and req.eta.isoformat(),
                 eta=req.eta and req.eta.isoformat(),
                 expires=req.expires and req.expires.isoformat(),
                 expires=req.expires and req.expires.isoformat(),
@@ -83,7 +116,7 @@ def default(task, app, consumer,
                     return limit_task(req, bucket, 1)
                     return limit_task(req, bucket, 1)
             task_reserved(req)
             task_reserved(req)
             if callbacks:
             if callbacks:
-                [callback() for callback in callbacks]
+                [callback(req) for callback in callbacks]
             handle(req)
             handle(req)
 
 
     return task_message_handler
     return task_message_handler

+ 1 - 1
docs/internals/app-overview.rst

@@ -226,7 +226,7 @@ App Dependency Tree
     * celery.apps.worker.Worker
     * celery.apps.worker.Worker
         * celery.worker.WorkerController
         * celery.worker.WorkerController
             * celery.worker.consumer.Consumer
             * celery.worker.consumer.Consumer
-                * celery.worker.job.TaskRequest
+                * celery.worker.request.Request
                 * celery.events.EventDispatcher
                 * celery.events.EventDispatcher
                 * celery.worker.control.ControlDispatch
                 * celery.worker.control.ControlDispatch
                     * celery.woker.control.registry.Panel
                     * celery.woker.control.registry.Panel

+ 11 - 7
docs/internals/protov2.rst

@@ -28,9 +28,9 @@ Notes
 
 
     - Java/C, etc. can use a thrift/protobuf document as the body
     - Java/C, etc. can use a thrift/protobuf document as the body
 
 
-- Dispatches to actor based on ``c_type``, ``c_meth`` headers
+- Dispatches to actor based on ``task``, ``meth`` headers
 
 
-    ``c_meth`` is unused by python, but may be used in the future
+    ``meth`` is unused by python, but may be used in the future
     to specify class+method pairs.
     to specify class+method pairs.
 
 
 - Chain gains a dedicated field.
 - Chain gains a dedicated field.
@@ -50,8 +50,9 @@ Notes
 
 
 - ``correlation_id`` replaces ``task_id`` field.
 - ``correlation_id`` replaces ``task_id`` field.
 
 
+- ``root_id`` and ``parent_id`` fields helps keep track of workflows.
 
 
-- ``c_shadow`` lets you specify a different name for logs, monitors
+- ``shadow`` lets you specify a different name for logs, monitors
   can be used for e.g. meta tasks that calls any function::
   can be used for e.g. meta tasks that calls any function::
 
 
     from celery.utils.imports import qualname
     from celery.utils.imports import qualname
@@ -101,11 +102,14 @@ Definition
     }
     }
     headers = {
     headers = {
         'lang': (string)'py'
         'lang': (string)'py'
-        'c_type': (string)task,
+        'task': (string)task,
+        'id': (uuid)task_id,
+        'root_id': (uuid)root_id,
+        'parent_id': (uuid)parent_id,
 
 
         # optional
         # optional
-        'c_meth': (string)unused,
-        'c_shadow': (string)replace_name,
+        'meth': (string)unused,
+        'shadow': (string)replace_name,
         'eta': (iso8601)eta,
         'eta': (iso8601)eta,
         'expires'; (iso8601)expires,
         'expires'; (iso8601)expires,
         'callbacks': (list)Signature,
         'callbacks': (list)Signature,
@@ -131,7 +135,7 @@ Example
         message=json.dumps([[2, 2], {}]),
         message=json.dumps([[2, 2], {}]),
         application_headers={
         application_headers={
             'lang': 'py',
             'lang': 'py',
-            'c_type': 'proj.tasks.add',
+            'task': 'proj.tasks.add',
             'chain': [
             'chain': [
                 # reversed chain list
                 # reversed chain list
                 {'task': 'proj.tasks.add', 'args': (8, )},
                 {'task': 'proj.tasks.add', 'args': (8, )},

+ 1 - 1
docs/internals/worker.rst

@@ -35,7 +35,7 @@ Receives messages from the broker using `Kombu`_.
 .. _`Kombu`: http://pypi.python.org/pypi/kombu
 .. _`Kombu`: http://pypi.python.org/pypi/kombu
 
 
 When a message is received it's converted into a
 When a message is received it's converted into a
-:class:`celery.worker.job.TaskRequest` object.
+:class:`celery.worker.request.Request` object.
 
 
 Tasks with an ETA, or rate-limit are entered into the `timer`,
 Tasks with an ETA, or rate-limit are entered into the `timer`,
 messages that can be immediately processed are sent to the execution pool.
 messages that can be immediately processed are sent to the execution pool.

+ 3 - 3
docs/reference/celery.worker.job.rst → docs/reference/celery.worker.request.rst

@@ -1,11 +1,11 @@
 =====================================
 =====================================
- celery.worker.job
+ celery.worker.request
 =====================================
 =====================================
 
 
 .. contents::
 .. contents::
     :local:
     :local:
-.. currentmodule:: celery.worker.job
+.. currentmodule:: celery.worker.request
 
 
-.. automodule:: celery.worker.job
+.. automodule:: celery.worker.request
     :members:
     :members:
     :undoc-members:
     :undoc-members:

+ 1 - 1
docs/reference/index.rst

@@ -47,7 +47,7 @@
     celery.apps.beat
     celery.apps.beat
     celery.worker
     celery.worker
     celery.worker.consumer
     celery.worker.consumer
-    celery.worker.job
+    celery.worker.request
     celery.worker.state
     celery.worker.state
     celery.worker.strategy
     celery.worker.strategy
     celery.bin.base
     celery.bin.base

+ 1 - 1
docs/userguide/extending.rst

@@ -463,7 +463,7 @@ Methods
 .. method:: apply_eta_task(request)
 .. method:: apply_eta_task(request)
 
 
     Schedule eta task to execute based on the ``request.eta`` attribute.
     Schedule eta task to execute based on the ``request.eta`` attribute.
-    (:class:`~celery.worker.job.Request`)
+    (:class:`~celery.worker.request.Request`)
 
 
 
 
 
 

+ 1 - 1
docs/userguide/signals.rst

@@ -271,7 +271,7 @@ Provides arguments:
 
 
 * request
 * request
 
 
-    This is a :class:`~celery.worker.job.Request` instance, and not
+    This is a :class:`~celery.worker.request.Request` instance, and not
     ``task.request``.   When using the prefork pool this signal
     ``task.request``.   When using the prefork pool this signal
     is dispatched in the parent process, so ``task.request`` is not available
     is dispatched in the parent process, so ``task.request`` is not available
     and should not be used.  Use this object instead, which should have many
     and should not be used.  Use this object instead, which should have many

+ 4 - 3
docs/whatsnew-3.1.rst

@@ -1072,8 +1072,9 @@ In Other News
   (Issue #1555).
   (Issue #1555).
 
 
     The revoked signal is dispatched after the task request is removed from
     The revoked signal is dispatched after the task request is removed from
-    the stack, so it must instead use the :class:`~celery.worker.job.Request`
-    object to get information about the task.
+    the stack, so it must instead use the
+    :class:`~celery.worker.request.Request` object to get information
+    about the task.
 
 
 - Worker: New :option:`-X` command line argument to exclude queues
 - Worker: New :option:`-X` command line argument to exclude queues
   (Issue #1399).
   (Issue #1399).
@@ -1235,7 +1236,7 @@ Internal changes
     - Result backends (:class:`celery.backends.base.BaseBackend`)
     - Result backends (:class:`celery.backends.base.BaseBackend`)
     - :class:`celery.worker.WorkController`
     - :class:`celery.worker.WorkController`
     - :class:`celery.worker.Consumer`
     - :class:`celery.worker.Consumer`
-    - :class:`celery.worker.job.Request`
+    - :class:`celery.worker.request.Request`
 
 
     This means that you have to pass a specific app when instantiating
     This means that you have to pass a specific app when instantiating
     these classes.
     these classes.

+ 0 - 1
extra/release/doc4allmods

@@ -7,7 +7,6 @@ SKIP_FILES="celery.five.rst
             celery.task.rst
             celery.task.rst
             celery.task.base.rst
             celery.task.base.rst
             celery.task.sets.rst
             celery.task.sets.rst
-            celery.task.trace.rst
             celery.bin.rst
             celery.bin.rst
             celery.bin.celeryd_detach.rst
             celery.bin.celeryd_detach.rst
             celery.contrib.rst
             celery.contrib.rst

+ 5 - 0
funtests/stress/stress/templates.py

@@ -70,6 +70,7 @@ class default(object):
         'interval_max': 2,
         'interval_max': 2,
         'interval_step': 0.1,
         'interval_step': 0.1,
     }
     }
+    CELERY_TASK_PROTOCOL = 2
 
 
 
 
 @template()
 @template()
@@ -124,3 +125,7 @@ class sqs(default):
     BROKER_TRANSPORT_OPTIONS = {
     BROKER_TRANSPORT_OPTIONS = {
         'region': os.environ.get('AWS_REGION', 'us-east-1'),
         'region': os.environ.get('AWS_REGION', 'us-east-1'),
     }
     }
+
+@template()
+class proto1(default):
+    CELERY_TASK_PROTOCOL = 1