فهرست منبع

Merge branch 'hub_semaphore'

Ask Solem 13 سال پیش
والد
کامیت
9ac95c7020

+ 8 - 2
celery/app/amqp.py

@@ -29,6 +29,9 @@ QUEUE_FORMAT = """
 . %(name)s exchange:%(exchange)s(%(exchange_type)s) binding:%(routing_key)s
 """
 
+TASK_BARE = 0x004
+TASK_DEFAULT = 0
+
 
 class Queues(dict):
     """Queue name⇒ declaration mapping.
@@ -155,7 +158,7 @@ class TaskProducer(Producer):
             queue=None, now=None, retries=0, chord=None, callbacks=None,
             errbacks=None, mandatory=None, priority=None, immediate=None,
             routing_key=None, serializer=None, delivery_mode=None,
-            compression=None, **kwargs):
+            compression=None, bare=False, **kwargs):
         """Send task message."""
         # merge default and custom policy
         _rp = (dict(self.retry_policy, **retry_policy) if retry_policy
@@ -175,6 +178,8 @@ class TaskProducer(Producer):
             expires = now + timedelta(seconds=expires)
         eta = eta and eta.isoformat()
         expires = expires and expires.isoformat()
+        flags = TASK_DEFAULT
+        flags |= TASK_BARE if bare else 0
 
         body = {"task": task_name,
                 "id": task_id,
@@ -185,7 +190,8 @@ class TaskProducer(Producer):
                 "expires": expires,
                 "utc": self.utc,
                 "callbacks": callbacks,
-                "errbacks": errbacks}
+                "errbacks": errbacks,
+                "flags": flags}
         if taskset_id:
             body["taskset"] = taskset_id
         if chord:

+ 1 - 34
celery/app/task.py

@@ -40,7 +40,7 @@ extract_exec_options = mattrgetter("queue", "routing_key",
                                    "exchange", "immediate",
                                    "mandatory", "priority",
                                    "serializer", "delivery_mode",
-                                   "compression", "expires")
+                                   "compression", "expires", "bare")
 
 
 class Context(object):
@@ -796,24 +796,6 @@ class BaseTask(object):
         """
         pass
 
-    def after_return(self, status, retval, task_id, args, kwargs, einfo):
-        """Handler called after the task returns.
-
-        :param status: Current task state.
-        :param retval: Task return value/exception.
-        :param task_id: Unique id of the task.
-        :param args: Original arguments for the task that failed.
-        :param kwargs: Original keyword arguments for the task
-                       that failed.
-
-        :keyword einfo: :class:`~celery.datastructures.ExceptionInfo`
-                        instance, containing the traceback (if any).
-
-        The return value of this handler is ignored.
-
-        """
-        pass
-
     def on_failure(self, exc, task_id, args, kwargs, einfo):
         """Error handler.
 
@@ -837,21 +819,6 @@ class BaseTask(object):
         if self.send_error_emails and not self.disable_error_emails:
             self.ErrorMail(self, **kwargs).send(context, exc)
 
-    def on_success(self, retval, task_id, args, kwargs):
-        """Success handler.
-
-        Run by the worker if the task executes successfully.
-
-        :param retval: The return value of the task.
-        :param task_id: Unique id of the executed task.
-        :param args: Original arguments for the executed task.
-        :param kwargs: Original keyword arguments for the executed task.
-
-        The return value of this handler is ignored.
-
-        """
-        pass
-
     def execute(self, request, pool, loglevel, logfile, **kwargs):
         """The method the worker calls to execute the task.
 

+ 6 - 4
celery/apps/worker.py

@@ -19,6 +19,7 @@ from celery.utils import cry, isatty
 from celery.utils.imports import qualname
 from celery.utils.log import LOG_LEVELS, get_logger, mlevel, set_in_sighandler
 from celery.utils.text import pluralize
+from celery.utils.threads import active_count as active_thread_count
 from celery.worker import WorkController
 
 try:
@@ -241,10 +242,11 @@ def _shutdown_handler(worker, sig="TERM", how="Warm", exc=SystemExit,
                 if callback:
                     callback(worker)
                 safe_say("celeryd: %s shutdown (MainProcess)" % how)
-            if how == "Warm":
-                state.should_stop = True
-            elif how == "Cold":
-                state.should_terminate = True
+            if active_thread_count() > 1:
+                setattr(state, {"Warm": "should_stop",
+                                "Cold": "should_terminate"}[how], True)
+            else:
+                raise exc()
         finally:
             set_in_sighandler(False)
     _handle_request.__name__ = "worker_" + how

+ 11 - 1
celery/concurrency/base.py

@@ -52,6 +52,9 @@ class BasePool(object):
     #: only used by multiprocessing pool
     on_process_down = None
 
+    #: only used by multiprocessing pool
+    uses_semaphore = False
+
     def __init__(self, limit=None, putlocks=True, **options):
         self.limit = limit
         self.putlocks = putlocks
@@ -61,6 +64,9 @@ class BasePool(object):
     def on_start(self):
         pass
 
+    def did_start_ok(self):
+        return True
+
     def on_stop(self):
         pass
 
@@ -128,7 +134,11 @@ class BasePool(object):
         return self.limit
 
     @property
-    def eventmap(self):
+    def readers(self):
+        return {}
+
+    @property
+    def writers(self):
         return {}
 
     @property

+ 11 - 3
celery/concurrency/processes/__init__.py

@@ -56,6 +56,7 @@ class TaskPool(BasePool):
     Pool = Pool
 
     requires_mediator = True
+    uses_semaphore = True
 
     def on_start(self):
         """Run the task pool.
@@ -68,6 +69,9 @@ class TaskPool(BasePool):
                                **self.options)
         self.on_apply = self._pool.apply_async
 
+    def did_start_ok(self):
+        return self._pool.did_start_ok()
+
     def on_stop(self):
         """Gracefully stop the pool."""
         if self._pool is not None and self._pool._state in (RUN, CLOSE):
@@ -128,9 +132,13 @@ class TaskPool(BasePool):
         return self._pool._processes
 
     @property
-    def eventmap(self):
-        return self._pool.eventmap
+    def readers(self):
+        return self._pool.readers
+
+    @property
+    def writers(self):
+        return self._pool.writers
 
     @property
     def timers(self):
-        return self._pool.timers
+        return {self._pool.maintain_pool: 30}

+ 6 - 2
celery/concurrency/processes/_win.py

@@ -8,8 +8,12 @@ import os
 
 # Code based on the winappdbg project http://winappdbg.sourceforge.net/
 # (BSD License)
-from ctypes import byref, sizeof, windll, Structure, WinError, POINTER
-from ctypes.wintypes import DWORD, c_size_t, LONG, c_char, c_void_p
+from ctypes import (
+    byref, sizeof, windll,
+    Structure, WinError, POINTER,
+    c_size_t, c_char, c_void_p,
+)
+from ctypes.wintypes import DWORD, LONG
 
 ERROR_NO_MORE_FILES = 18
 INVALID_HANDLE_VALUE = c_void_p(-1).value

+ 30 - 4
celery/task/trace.py

@@ -128,6 +128,30 @@ class TraceInfo(object):
             del(tb)
 
 
+def execute_bare(task, uuid, args, kwargs, request=None):
+    R = I = None
+    kwargs = kwdict(kwargs)
+    try:
+        try:
+            R = retval = task(*args, **kwargs)
+            state = SUCCESS
+        except Exception, exc:
+            I = Info(FAILURE, exc)
+            state, retval = I.state, I.retval
+            R = I.handle_error_state(task)
+        except BaseException, exc:
+            raise
+        except:  # pragma: no cover
+            # For Python2.5 where raising strings are still allowed
+            # (but deprecated)
+            I = Info(FAILURE, None)
+            state, retval = I.state, I.retval
+            R = I.handle_error_state(task, eager=eager)
+    except Exception, exc:
+        R = report_internal_error(task, exc)
+    return R
+
+
 def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
         Info=TraceInfo, eager=False, propagate=False):
     # If the task doesn't define a custom __call__ method
@@ -146,8 +170,8 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
     loader_task_init = loader.on_task_init
     loader_cleanup = loader.on_process_cleanup
 
-    task_on_success = task.on_success
-    task_after_return = task.after_return
+    task_on_success = getattr(task, "on_success", None)
+    task_after_return = getattr(task, "after_return", None)
 
     store_result = backend.store_result
     backend_cleanup = backend.process_cleanup
@@ -215,14 +239,16 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                     # stored, so that result.children is populated.
                     [subtask(callback).apply_async((retval, ))
                         for callback in task_request.callbacks or []]
-                    task_on_success(retval, uuid, args, kwargs)
+                    if task_on_success:
+                        task_on_success(retval, uuid, args, kwargs)
                     if success_receivers:
                         send_success(sender=task, result=retval)
 
                 # -* POST *-
                 if task_request.chord:
                     on_chord_part_return(task)
-                task_after_return(state, retval, uuid, args, kwargs, None)
+                if task_after_return:
+                    task_after_return(state, retval, uuid, args, kwargs, None)
                 if postrun_receivers:
                     send_postrun(sender=task, task_id=uuid, task=task,
                                  args=args, kwargs=kwargs,

+ 3 - 0
celery/utils/threads.py

@@ -8,6 +8,9 @@ import traceback
 _Thread = threading.Thread
 _Event = threading._Event
 
+active_count = (getattr(threading, "active_count", None) or
+                threading.activeCount)
+
 
 class Event(_Event):
 

+ 20 - 5
celery/worker/__init__.py

@@ -38,6 +38,7 @@ from celery.utils.timer2 import Schedule
 from . import abstract
 from . import state
 from .buckets import TaskBucket, FastQueue
+from .hub import BoundedSemaphore
 
 RUN = 0x1
 CLOSE = 0x2
@@ -90,19 +91,25 @@ class Pool(abstract.StartStopComponent):
         if w.autoscale:
             w.max_concurrency, w.min_concurrency = w.autoscale
 
-    def create(self, w):
+    def create(self, w, semaphore=None, max_restarts=None):
         threaded = not w.use_eventloop
-        forking_enable(w.no_execv or not w.force_execv)
+        forking_enable(not threaded or (w.no_execv or not w.force_execv))
+        procs = w.min_concurrency
+        if not threaded:
+            semaphore = w.semaphore = BoundedSemaphore(procs)
+            max_restarts = 100
         pool = w.pool = self.instantiate(w.pool_cls, w.min_concurrency,
                             initargs=(w.app, w.hostname),
                             maxtasksperchild=w.max_tasks_per_child,
                             timeout=w.task_time_limit,
                             soft_timeout=w.task_soft_time_limit,
-                            putlocks=w.pool_putlocks,
+                            putlocks=w.pool_putlocks and threaded,
                             lost_worker_timeout=w.worker_lost_wait,
                             with_task_thread=threaded,
                             with_result_thread=threaded,
-                            with_supervisor_thread=threaded)
+                            with_supervisor_thread=threaded,
+                            max_restarts=max_restarts,
+                            semaphore=semaphore)
         return pool
 
 
@@ -137,7 +144,12 @@ class Queues(abstract.Component):
             w.disable_rate_limits = True
         if w.disable_rate_limits:
             w.ready_queue = FastQueue()
-            if not w.pool_cls.requires_mediator:
+            if w.use_eventloop:
+                if w.pool_putlocks and w.pool_cls.uses_semaphore:
+                    w.ready_queue.put = w.process_task_sem
+                else:
+                    w.ready_queue.put = w.process_task
+            elif not w.pool_cls.requires_mediator:
                 # just send task directly to pool, skip the mediator.
                 w.ready_queue.put = w.process_task
         else:
@@ -273,6 +285,9 @@ class WorkController(configurated):
         # makes sure all greenthreads have exited.
         self._shutdown_complete.wait()
 
+    def process_task_sem(self, req):
+        return self.semaphore.acquire(self.process_task, req)
+
     def process_task(self, req):
         """Process task by sending it to the pool of workers."""
         try:

+ 74 - 31
celery/worker/consumer.py

@@ -83,6 +83,7 @@ import threading
 from time import sleep
 from Queue import Empty
 
+from billiard.exceptions import WorkerLostError
 from kombu.utils.encoding import safe_repr
 
 from celery.app import app_or_default
@@ -172,7 +173,8 @@ class Component(StartStopComponent):
                 pool=w.pool,
                 timer=w.timer,
                 app=w.app,
-                controller=w)
+                controller=w,
+                use_eventloop=w.use_eventloop)
         return c
 
 
@@ -303,7 +305,7 @@ class Consumer(object):
     def __init__(self, ready_queue,
             init_callback=noop, send_events=False, hostname=None,
             initial_prefetch_count=2, pool=None, app=None,
-            timer=None, controller=None, **kwargs):
+            timer=None, controller=None, use_eventloop=False, **kwargs):
         self.app = app_or_default(app)
         self.connection = None
         self.task_consumer = None
@@ -318,6 +320,7 @@ class Consumer(object):
         self.heart = None
         self.pool = pool
         self.timer = timer or timer2.default_timer
+        self.use_eventloop = use_eventloop
         pidbox_state = AttributeDict(app=self.app,
                                      hostname=self.hostname,
                                      listener=self,     # pre 2.2
@@ -331,6 +334,8 @@ class Consumer(object):
 
         self._does_info = logger.isEnabledFor(logging.INFO)
         self.strategies = {}
+        if self.use_eventloop:
+            self.hub = Hub(self.timer)
 
     def update_strategies(self):
         S = self.strategies
@@ -358,23 +363,47 @@ class Consumer(object):
 
     def consume_messages(self, sleep=sleep, min=min, Empty=Empty):
         """Consume messages forever (or until an exception is raised)."""
-        self.task_consumer.consume()
-        debug("Ready to accept tasks!")
 
-        with Hub(self.timer) as hub:
+        with self.hub as hub:
             qos = self.qos
+            concurrency = self.pool.num_processes
             update_qos = qos.update
-            update_fds = hub.update
+            update_readers = hub.update_readers
             fdmap = hub.fdmap
             poll = hub.poller.poll
             fire_timers = hub.fire_timers
             scheduled = hub.timer._queue
             transport = self.connection.transport
             on_poll_start = transport.on_poll_start
-
-            self.task_consumer.callbacks.append(fire_timers)
-
-            update_fds(self.connection.eventmap, self.pool.eventmap)
+            strategies = self.strategies
+            buffer = []
+
+            def flush_buffer():
+                for name, body, message in buffer:
+                    try:
+                        strategies[name](message, body, message.ack_log_error)
+                    except KeyError, exc:
+                        self.handle_unknown_task(body, message, exc)
+                    except InvalidTaskError, exc:
+                        self.handle_invalid_task(body, message, exc)
+                buffer[:] = []
+
+            def on_task_received(body, message):
+                try:
+                    name = body["task"]
+                except (KeyError, TypeError):
+                    return self.handle_unknown_message(body, message)
+                bufferlen = len(buffer)
+                buffer.append((name, body, message))
+                if bufferlen + 1 >= 4:
+                    flush_buffer()
+                if bufferlen:
+                    fire_timers()
+
+            if not self.pool.did_start_ok():
+                raise WorkerLostError("Could not start worker processes")
+
+            update_readers(self.connection.eventmap, self.pool.readers)
             for handler, interval in self.pool.timers.iteritems():
                 self.timer.apply_interval(interval * 1000.0, handler)
 
@@ -387,6 +416,10 @@ class Consumer(object):
             self.pool.on_process_down = on_process_down
 
             transport.on_poll_init(hub.poller)
+            self.task_consumer.callbacks = [on_task_received]
+            self.task_consumer.consume()
+
+            debug("Ready to accept tasks!")
 
             while self._state != CLOSE and self.connection:
                 # shutdown if signal handlers told us to.
@@ -402,18 +435,21 @@ class Consumer(object):
                 if qos.prev != qos.value:
                     update_qos()
 
-                update_fds(on_poll_start())
+                update_readers(on_poll_start())
                 if fdmap:
-                    for fileno, event in poll(time_to_sleep) or ():
-                        try:
-                            fdmap[fileno](fileno, event)
-                        except Empty:
-                            pass
-                        except socket.error:
-                            if self._state != CLOSE:        # pragma: no cover
-                                raise
+                    for timeout in (time_to_sleep, 0.001):
+                        for fileno, event in poll(timeout) or ():
+                            try:
+                                fdmap[fileno](fileno, event)
+                            except Empty:
+                                break
+                            except socket.error:
+                                if self._state != CLOSE:        # pragma: no cover
+                                    raise
+                        if buffer:
+                            flush_buffer()
                 else:
-                    sleep(min(time_to_sleep, 1))
+                    sleep(min(time_to_sleep, 0.1))
 
     def on_task(self, task):
         """Handle received task.
@@ -422,7 +458,6 @@ class Consumer(object):
         otherwise we move it the ready queue for immediate processing.
 
         """
-
         if task.revoked():
             return
 
@@ -475,6 +510,18 @@ class Consumer(object):
                                      safe_repr(message.content_encoding),
                                      safe_repr(message.delivery_info))
 
+    def handle_unknown_message(self, body, message):
+        warn(UNKNOWN_FORMAT, self._message_report(body, message))
+        message.reject_log_error(logger, self.connection_errors)
+
+    def handle_unknown_task(self, body, message, exc):
+        error(UNKNOWN_TASK_ERROR, exc, safe_repr(body), exc_info=True)
+        message.reject_log_error(logger, self.connection_errors)
+
+    def handle_invalid_task(self, body, message, exc):
+        error(INVALID_TASK_ERROR, str(exc), safe_repr(body), exc_info=True)
+        message.reject_log_error(logger, self.connection_errors)
+
     def receive_message(self, body, message):
         """Handles incoming messages.
 
@@ -485,18 +532,14 @@ class Consumer(object):
         try:
             name = body["task"]
         except (KeyError, TypeError):
-            warn(UNKNOWN_FORMAT, self._message_report(body, message))
-            message.reject_log_error(logger, self.connection_errors)
-            return
+            return self.handle_unknown_message(body, message)
 
         try:
             self.strategies[name](message, body, message.ack_log_error)
         except KeyError, exc:
-            error(UNKNOWN_TASK_ERROR, exc, safe_repr(body), exc_info=True)
-            message.reject_log_error(logger, self.connection_errors)
+            self.handle_unknown_task(body, message, exc)
         except InvalidTaskError, exc:
-            error(INVALID_TASK_ERROR, str(exc), safe_repr(body), exc_info=True)
-            message.reject_log_error(logger, self.connection_errors)
+            self.handle_invalid_task(body, message, exc)
 
     def maybe_conn_error(self, fun):
         """Applies function but ignores any connection or channel
@@ -644,9 +687,6 @@ class Consumer(object):
         self.qos = QoS(self.task_consumer, self.initial_prefetch_count)
         self.qos.update()
 
-        # receive_message handles incoming messages.
-        self.task_consumer.register_callback(self.receive_message)
-
         # Setup the process mailbox.
         self.reset_pidbox_node()
 
@@ -745,7 +785,10 @@ class Consumer(object):
 class BlockingConsumer(Consumer):
 
     def consume_messages(self):
+        # receive_message handles incoming messages.
+        self.task_consumer.register_callback(self.receive_message)
         self.task_consumer.consume()
+
         debug("Ready to accept tasks!")
 
         while self._state != CLOSE and self.connection:

+ 46 - 3
celery/worker/hub.py

@@ -1,11 +1,44 @@
 from __future__ import absolute_import
 
+from collections import deque
+
 from kombu.utils import cached_property
-from kombu.utils.eventio import poll, POLL_READ, POLL_ERR
+from kombu.utils.eventio import poll, POLL_READ, POLL_ERR, POLL_WRITE
 
 from celery.utils.timer2 import Schedule
 
 
+class BoundedSemaphore(object):
+
+    def __init__(self, value=1):
+        self.initial_value = self.value = value
+        self._waiting = []
+
+    def grow(self):
+        self.initial_value += 1
+
+    def shrink(self):
+        self.initial_value -= 1
+
+    def acquire(self, callback, *partial_args, **partial_kwargs):
+        if self.value <= 0:
+            self._waiting.append((callback, partial_args))
+            return False
+        else:
+            self.value = max(self.value - 1, 0)
+            callback(*partial_args, **partial_kwargs)
+            return True
+
+    def release(self):
+        self.value = min(self.value + 1, self.initial_value)
+        if self._waiting:
+            waiter, args = self._waiting.pop()
+            waiter(*args)
+
+    def clear(self):
+        pass
+
+
 class Hub(object):
     eventflags = POLL_READ | POLL_ERR
 
@@ -39,8 +72,17 @@ class Hub(object):
             fileno = fd
         self.fdmap[fileno] = callback
 
-    def update(self, *maps):
-        [self.add(*x) for row in maps for x in row.iteritems()]
+    def add_reader(self, fd, callback):
+        return self.add(fd, callback, POLL_READ|POLL_ERR)
+
+    def add_writer(self, fd, callback):
+        return self.add(fd, callback, POLL_WRITE)
+
+    def update_readers(self, *maps):
+        [self.add_reader(*x) for row in maps for x in row.iteritems()]
+
+    def update_writers(self, *maps):
+        [self.add_writer(*x) for row in maps for x in row.iteritems()]
 
     def remove(self, fd):
         try:
@@ -50,6 +92,7 @@ class Hub(object):
 
     def close(self):
         [self.remove(fd) for fd in self.fdmap.keys()]
+        self.poller.close()
 
     @cached_property
     def scheduler(self):

+ 18 - 3
celery/worker/job.py

@@ -26,7 +26,12 @@ from celery import current_app
 from celery import exceptions
 from celery.app import app_or_default
 from celery.datastructures import ExceptionInfo
-from celery.task.trace import build_tracer, trace_task, report_internal_error
+from celery.task.trace import (
+    build_tracer,
+    trace_task,
+    report_internal_error,
+    execute_bare,
+)
 from celery.platforms import set_mp_process_title as setps
 from celery.utils import fun_takes_kwargs
 from celery.utils.functional import noop
@@ -78,7 +83,7 @@ class Request(object):
                  "on_ack", "delivery_info", "hostname",
                  "callbacks", "errbacks",
                  "eventer", "connection_errors",
-                 "task", "eta", "expires",
+                 "task", "eta", "expires", "bare",
                  "request_dict", "acknowledged", "success_msg",
                  "error_msg", "retry_msg", "time_start", "worker_pid",
                  "_already_revoked", "_terminate_on_ack", "_tzlocal")
@@ -120,6 +125,7 @@ class Request(object):
         eta = body.get("eta")
         expires = body.get("expires")
         utc = body.get("utc", False)
+        self.flags = body.get("flags", False)
         self.on_ack = on_ack
         self.hostname = hostname or socket.gethostname()
         self.eventer = eventer
@@ -194,10 +200,19 @@ class Request(object):
         :keyword logfile: The logfile used by the task.
 
         """
+        task = self.task
+        if self.flags & 0x004:
+            return pool.apply_async(execute_bare,
+                    args=(self.task, self.id, self.args, self.kwargs),
+                    accept_callback=self.on_accepted,
+                    timeout_callback=self.on_timeout,
+                    callback=self.on_success,
+                    error_callback=self.on_failure,
+                    soft_timeout=task.soft_time_limit,
+                    timeout=task.time_limit)
         if self.revoked():
             return
 
-        task = self.task
         hostname = self.hostname
         kwargs = self.kwargs
         if self.task.accept_magic_kwargs:

+ 3 - 1
celery/worker/mediator.py

@@ -39,7 +39,9 @@ class WorkerComponent(StartStopComponent):
         w.mediator = None
 
     def include_if(self, w):
-        return not w.disable_rate_limits or w.pool_cls.requires_mediator
+        return ((not w.disable_rate_limits or
+                w.pool_cls.requires_mediator) and
+                    not w.use_eventloop)
 
     def create(self, w):
         m = w.mediator = self.instantiate(w.mediator_cls, w.ready_queue,

+ 29 - 6
celery/worker/state.py

@@ -68,28 +68,51 @@ def task_ready(request):
 
 
 if os.environ.get("CELERY_BENCH"):  # pragma: no cover
+    import atexit
+
     from time import time
+    from billiard import current_process
 
     all_count = 0
+    bench_first = None
     bench_start = None
+    bench_last = None
     bench_every = int(os.environ.get("CELERY_BENCH_EVERY", 1000))
+    bench_sample = []
     __reserved = task_reserved
     __ready = task_ready
 
+    if current_process()._name == 'MainProcess':
+        @atexit.register
+        def on_shutdown():
+            if bench_first is not None and bench_last is not None:
+                print("\n- Time spent in benchmark: %r" % (
+                    bench_last - bench_first))
+                print("- Avg: %s" % (sum(bench_sample) / len(bench_sample)))
+
     def task_reserved(request):  # noqa
         global bench_start
+        global bench_first
+        now = None
         if bench_start is None:
-            bench_start = time()
+            bench_start = now = time()
+        if bench_first is None:
+            bench_first = now
+
         return __reserved(request)
 
     def task_ready(request):  # noqa
-        global all_count, bench_start
+        global all_count
+        global bench_start
+        global bench_last
         all_count += 1
         if not all_count % bench_every:
-            print("* Time spent processing %s tasks (since first "
-                    "task received): ~%.4fs\n" % (
-                bench_every, time() - bench_start))
-            bench_start = None
+            now = time()
+            diff = now - bench_start
+            print("- Time spent processing %s tasks (since first "
+                    "task received): ~%.4fs\n" % (bench_every, diff))
+            bench_start, bench_last = None, now
+            bench_sample.append(diff)
 
         return __ready(request)
 

+ 1 - 1
funtests/benchmarks/bench_worker.py

@@ -46,7 +46,7 @@ def tdiff(then):
     return time.time() - then
 
 
-@celery.task(cur=0, time_start=None, queue="bench.worker")
+@celery.task(cur=0, time_start=None, queue="bench.worker", bare=True)
 def it(_, n):
     i = it.cur  # use internal counter, as ordering can be skewed
                 # by previous runs, or the broker.