Przeglądaj źródła

Merge branch 'acdha/sentry-friendly-logging'

Ask Solem 13 lat temu
rodzic
commit
b863abc518

+ 4 - 4
celery/apps/beat.py

@@ -80,15 +80,15 @@ class Beat(object):
                   c.blue("        _\n"),
                   c.reset(self.startup_info(beat)))))
         if self.socket_timeout:
-            logger.debug("Setting default socket timeout to %r" % (
-                self.socket_timeout))
+            logger.debug("Setting default socket timeout to %r",
+                         self.socket_timeout)
             socket.setdefaulttimeout(self.socket_timeout)
         try:
             self.install_sync_handler(beat)
             beat.start()
         except Exception, exc:
-            logger.critical("celerybeat raised exception %s: %r\n%s" % (
-                            exc.__class__, exc, traceback.format_exc()),
+            logger.critical("celerybeat raised exception %s: %r\n%s",
+                            exc.__class__, exc, traceback.format_exc(),
                             exc_info=sys.exc_info())
 
     def init_loader(self):

+ 1 - 1
celery/backends/cassandra.py

@@ -100,7 +100,7 @@ class CassandraBackend(BaseDictBackend):
                     Thrift.TException), exc:
                 if time.time() > ts:
                     raise
-                self.logger.warn('Cassandra error: %r. Retrying...' % (exc, ))
+                self.logger.warn('Cassandra error: %r. Retrying...', exc)
                 time.sleep(self._retry_wait)
 
     def _get_column_family(self):

+ 11 - 11
celery/beat.py

@@ -162,15 +162,15 @@ class Scheduler(object):
         is_due, next_time_to_run = entry.is_due()
 
         if is_due:
-            self.logger.debug("Scheduler: Sending due task %s" % entry.task)
+            self.logger.debug("Scheduler: Sending due task %s", entry.task)
             try:
                 result = self.apply_async(entry, publisher=publisher)
             except Exception, exc:
-                self.logger.error("Message Error: %s\n%s" % (exc,
-                    traceback.format_stack()), exc_info=sys.exc_info())
+                self.logger.error("Message Error: %s\n%s", exc,
+                                  traceback.format_stack(),
+                                  exc_info=sys.exc_info())
             else:
-                self.logger.debug("%s sent. id->%s" % (entry.task,
-                                                       result.task_id))
+                self.logger.debug("%s sent. id->%s", entry.task, result.task_id)
         return next_time_to_run
 
     def tick(self):
@@ -281,8 +281,8 @@ class Scheduler(object):
         # callback called for each retry while the connection
         # can't be established.
         def _error_handler(exc, interval):
-            self.logger.error("Celerybeat: Connection error: %s. " % exc
-                            + "Trying again in %s seconds..." % interval)
+            self.logger.error("Celerybeat: Connection error: %s. "
+                              "Trying again in %s seconds...", exc, interval)
 
         return self.connection.ensure_connection(_error_handler,
                     self.app.conf.BROKER_CONNECTION_MAX_RETRIES)
@@ -327,8 +327,8 @@ class PersistentScheduler(Scheduler):
                                                 writeback=True)
             entries = self._store.setdefault("entries", {})
         except Exception, exc:
-            self.logger.error("Removing corrupted schedule file %r: %r" % (
-                self.schedule_filename, exc))
+            self.logger.error("Removing corrupted schedule file %r: %r",
+                              self.schedule_filename, exc, exc_info=True)
             self._remove_db()
             self._store = self.persistence.open(self.schedule_filename,
                                                 writeback=True)
@@ -380,8 +380,8 @@ class Service(object):
 
     def start(self, embedded_process=False):
         self.logger.info("Celerybeat: Starting...")
-        self.logger.debug("Celerybeat: Ticking with max interval->%s" % (
-                    humanize_seconds(self.scheduler.max_interval)))
+        self.logger.debug("Celerybeat: Ticking with max interval->%s",
+                          humanize_seconds(self.scheduler.max_interval))
 
         signals.beat_init.send(sender=self)
         if embedded_process:

+ 2 - 3
celery/bin/celeryd_detach.py

@@ -22,9 +22,8 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
             import logging
             from celery.log import setup_logger
             logger = setup_logger(logfile=logfile, loglevel=logging.ERROR)
-            logger.critical("Can't exec %r" % (
-                    " ".join([path] + argv), ),
-                    exc_info=sys.exc_info())
+            logger.critical("Can't exec %r", " ".join([path] + argv),
+                            exc_info=sys.exc_info())
 
 
 class PartialOptionParser(OptionParser):

+ 5 - 4
celery/concurrency/base.py

@@ -80,8 +80,8 @@ class BasePool(object):
         on_ready = partial(self.on_ready, callback, errback)
         on_worker_error = partial(self.on_worker_error, errback)
 
-        self.logger.debug("TaskPool: Apply %s (args:%s kwargs:%s)" % (
-            target, args, kwargs))
+        self.logger.debug("TaskPool: Apply %s (args:%s kwargs:%s)",
+                          target, args, kwargs)
 
         return self.on_apply(target, args, kwargs,
                              callback=on_ready,
@@ -112,8 +112,9 @@ class BasePool(object):
             try:
                 fun(*args)
             except BaseException:
-                self.logger.error("Pool callback raised exception: %s" % (
-                    traceback.format_exc(), ), exc_info=sys.exc_info())
+                self.logger.error("Pool callback raised exception: %s",
+                                  traceback.format_exc(),
+                                  exc_info=sys.exc_info())
 
     def _get_info(self):
         return {}

+ 2 - 2
celery/contrib/batches.py

@@ -56,7 +56,7 @@ def apply_batches_task(task, args, loglevel, logfile):
         result = task(*args)
     except Exception, exp:
         result = None
-        task.logger.error("There was an Exception: %s" % exp)
+        task.logger.error("There was an Exception: %s", exp, exc_info=True)
     finally:
         task.request.clear()
     return result
@@ -167,7 +167,7 @@ class Batches(Task):
                     callback=acks_late[True] and on_return or None)
 
     def debug(self, msg):
-        self.logger.debug("%s: %s" % (self.name, msg))
+        self.logger.debug("%s: %s", self.name, msg)
 
     @cached_property
     def logger(self):

+ 1 - 1
celery/events/snapshot.py

@@ -48,7 +48,7 @@ class Polaroid(object):
 
     def shutter(self):
         if self.maxrate is None or self.maxrate.can_consume():
-            self.logger.debug("Shutter: %s" % (self.state, ))
+            self.logger.debug("Shutter: %s", self.state)
             self.shutter_signal.send(self.state)
             self.on_shutter(self.state)
 

+ 3 - 3
celery/tests/test_app/test_beat.py

@@ -104,7 +104,7 @@ class MockLogger(logging.Logger):
         logging.Logger.__init__(self, *args, **kwargs)
 
     def _log(self, level, msg, args, **kwargs):
-        self.logged.append((level, msg))
+        self.logged.append((level, msg, args, kwargs))
 
 
 class mScheduler(beat.Scheduler):
@@ -189,9 +189,9 @@ class test_Scheduler(unittest.TestCase):
                       schedule=always_due)
         self.assertEqual(scheduler.tick(), 1)
         self.assertTrue(scheduler.logger.logged[0])
-        level, msg = scheduler.logger.logged[0]
+        level, msg, args, kwargs = scheduler.logger.logged[0]
         self.assertEqual(level, logging.ERROR)
-        self.assertIn("Couldn't apply scheduled task", msg)
+        self.assertIn("Couldn't apply scheduled task", args[0].message)
 
     def test_due_tick_RuntimeError(self):
         scheduler = mSchedulerRuntimeError()

+ 2 - 2
celery/tests/test_worker/test_worker.py

@@ -766,8 +766,8 @@ class test_WorkController(AppCase):
             exc_info = sys.exc_info()
 
         worker.on_timer_error(exc_info)
-        logged = worker.logger.error.call_args[0][0]
-        self.assertIn("KeyError", logged)
+        msg, args = worker.logger.error.call_args[0]
+        self.assertIn("KeyError", msg % args)
 
     def test_on_timer_tick(self):
         worker = WorkController(concurrency=1, loglevel=10)

+ 2 - 2
celery/tests/test_worker/test_worker_job.py

@@ -423,10 +423,10 @@ class test_TaskRequest(unittest.TestCase):
                 self.errors = []
 
             def warning(self, msg, *args, **kwargs):
-                self.warnings.append(msg)
+                self.warnings.append(msg % args)
 
             def error(self, msg, *args, **kwargs):
-                self.errors.append(msg)
+                self.errors.append(msg % args)
 
         tw = TaskRequest(mytask.name, gen_unique_id(), [1], {"f": "x"})
         tw.logger = MockLogger()

+ 2 - 2
celery/utils/timer2.py

@@ -195,8 +195,8 @@ class Timer(Thread):
                 # so gc collected built-in modules.
                 pass
         except Exception, exc:
-            self.logger.error("Thread Timer crashed: %r" % (exc, ),
-                  exc_info=sys.exc_info())
+            self.logger.error("Thread Timer crashed: %r", exc,
+                              exc_info=sys.exc_info())
             os._exit(1)
 
     def stop(self):

+ 8 - 7
celery/worker/__init__.py

@@ -246,8 +246,8 @@ class WorkController(object):
 
         try:
             for i, component in enumerate(self.components):
-                self.logger.debug("Starting thread %s..." % (
-                                        component.__class__.__name__))
+                self.logger.debug("Starting thread %s...",
+                                  component.__class__.__name__)
                 self._running = i + 1
                 blocking(component.start)
         except SystemTerminate:
@@ -267,8 +267,9 @@ class WorkController(object):
             request.task.execute(request, self.pool,
                                  self.loglevel, self.logfile)
         except Exception, exc:
-            self.logger.critical("Internal error %s: %s\n%s" % (
-                            exc.__class__, exc, traceback.format_exc()))
+            self.logger.critical("Internal error %s: %s\n%s",
+                                 exc.__class__, exc, traceback.format_exc(),
+                                 exc_info=True)
         except SystemTerminate:
             self.terminate()
             raise SystemExit()
@@ -298,8 +299,8 @@ class WorkController(object):
         signals.worker_shutdown.send(sender=self)
 
         for component in reversed(self.components):
-            self.logger.debug("%s thread %s..." % (
-                    what, component.__class__.__name__))
+            self.logger.debug("%s thread %s...", what,
+                              component.__class__.__name__)
             stop = component.stop
             if not warm:
                 stop = getattr(component, "terminate", None) or stop
@@ -311,7 +312,7 @@ class WorkController(object):
 
     def on_timer_error(self, exc_info):
         _, exc, _ = exc_info
-        self.logger.error("Timer error: %r" % (exc, ))
+        self.logger.error("Timer error: %r", exc, exc_info=exc_info)
 
     def on_timer_tick(self, delay):
         self.timer_debug("Scheduler wake-up! Next eta %s secs." % delay)

+ 5 - 5
celery/worker/autoscale.py

@@ -71,19 +71,19 @@ class Autoscaler(threading.Thread):
         return self._grow(n)
 
     def _grow(self, n):
-        self.logger.info("Scaling up %s processes." % (n, ))
+        self.logger.info("Scaling up %s processes.", n)
         self.pool.grow(n)
 
     def _shrink(self, n):
-        self.logger.info("Scaling down %s processes." % (n, ))
+        self.logger.info("Scaling down %s processes.", n)
         try:
             self.pool.shrink(n)
         except ValueError:
             self.logger.debug(
                 "Autoscaler won't scale down: all processes busy.")
         except Exception, exc:
-            self.logger.error("Autoscaler: scale_down: %r\n%r" % (
-                                exc, traceback.format_stack()),
+            self.logger.error("Autoscaler: scale_down: %r\n%r",
+                                exc, traceback.format_stack(),
                                 exc_info=sys.exc_info())
 
     def scale_down(self, n):
@@ -99,7 +99,7 @@ class Autoscaler(threading.Thread):
                 self.scale()
                 sleep(1.0)
             except Exception, exc:
-                self.logger.error("Thread Autoscaler crashed: %r" % (exc, ),
+                self.logger.error("Thread Autoscaler crashed: %r", exc,
                                   exc_info=sys.exc_info())
                 os._exit(1)
         self._stopped.set()

+ 23 - 24
celery/worker/consumer.py

@@ -177,11 +177,10 @@ class QoS(object):
         if pcount != self.prev:
             new_value = pcount
             if pcount > PREFETCH_COUNT_MAX:
-                self.logger.warning(
-                    "QoS: Disabled: prefetch_count exceeds %r" % (
-                        PREFETCH_COUNT_MAX, ))
+                self.logger.warning("QoS: Disabled: prefetch_count exceeds %r",
+                                    PREFETCH_COUNT_MAX)
                 new_value = 0
-            self.logger.debug("basic.qos: prefetch_count->%s" % new_value)
+            self.logger.debug("basic.qos: prefetch_count->%s", new_value)
             self.consumer.qos(prefetch_count=new_value)
             self.prev = pcount
         return pcount
@@ -226,7 +225,7 @@ class Consumer(object):
 
     #: The thread that sends event heartbeats at regular intervals.
     #: The heartbeats are used by monitors to detect that a worker
-    #: went off-line/disappeared.
+    #: went offline/disappeared.
     heart = None
 
     #: The logger instance to use.  Defaults to the default Celery logger.
@@ -334,7 +333,7 @@ class Consumer(object):
         if task.revoked():
             return
 
-        self.logger.info("Got task from broker: %s" % (task.shortinfo(), ))
+        self.logger.info("Got task from broker: %s", task.shortinfo())
 
         if self.event_dispatcher.enabled:
             self.event_dispatcher.send("task-received", uuid=task.task_id,
@@ -348,8 +347,8 @@ class Consumer(object):
                 eta = timer2.to_timestamp(task.eta)
             except OverflowError, exc:
                 self.logger.error(
-                    "Couldn't convert eta %s to time stamp: %r. Task: %r" % (
-                        task.eta, exc, task.info(safe=True)),
+                    "Couldn't convert eta %s to timestamp: %r. Task: %r",
+                    task.eta, exc, task.info(safe=True),
                     exc_info=sys.exc_info())
                 task.acknowledge()
             else:
@@ -365,11 +364,11 @@ class Consumer(object):
         try:
             self.pidbox_node.handle_message(body, message)
         except KeyError, exc:
-            self.logger.error("No such control command: %s" % exc)
+            self.logger.error("No such control command: %s", exc)
         except Exception, exc:
             self.logger.error(
-                "Error occurred while handling control command: %r\n%r" % (
-                    exc, traceback.format_exc()), exc_info=sys.exc_info())
+                "Error occurred while handling control command: %r\n%r",
+                    exc, traceback.format_exc(), exc_info=sys.exc_info())
             self.reset_pidbox_node()
 
     def apply_eta_task(self, task):
@@ -392,15 +391,15 @@ class Consumer(object):
         :param message: The kombu message object.
 
         """
-        # need to guard against errors occurring while acking the message.
+        # need to guard against errors occuring while acking the message.
         def ack():
             try:
                 message.ack()
             except self.connection_errors + (AttributeError, ), exc:
                 self.logger.critical(
-                    "Couldn't ack %r: %s reason:%r" % (
+                    "Couldn't ack %r: %s reason:%r",
                         message.delivery_tag,
-                        self._message_report(body, message), exc))
+                        self._message_report(body, message), exc)
 
         try:
             body["task"]
@@ -420,12 +419,12 @@ class Consumer(object):
                                             eventer=self.event_dispatcher)
 
         except NotRegistered, exc:
-            self.logger.error(UNKNOWN_TASK_ERROR % (
-                    exc, safe_repr(body)), exc_info=sys.exc_info())
+            self.logger.error(UNKNOWN_TASK_ERROR, exc, safe_repr(body),
+                              exc_info=sys.exc_info())
             ack()
         except InvalidTaskError, exc:
-            self.logger.error(INVALID_TASK_ERROR % (
-                    str(exc), safe_repr(body)), exc_info=sys.exc_info())
+            self.logger.error(INVALID_TASK_ERROR, str(exc), safe_repr(body),
+                              exc_info=sys.exc_info())
             ack()
         else:
             self.on_task(task)
@@ -500,9 +499,9 @@ class Consumer(object):
 
         """
         self.logger.critical(
-            "Can't decode message body: %r (type:%r encoding:%r raw:%r')" % (
+            "Can't decode message body: %r (type:%r encoding:%r raw:%r')",
                     exc, message.content_type, message.content_encoding,
-                    safe_repr(message.body)))
+                    safe_repr(message.body))
         message.ack()
 
     def reset_pidbox_node(self):
@@ -558,7 +557,7 @@ class Consumer(object):
                        self.initial_prefetch_count, self.logger)
         self.qos.update()
 
-        # receive_message handles incoming messages.
+        # receive_message handles incomsing messages.
         self.task_consumer.register_callback(self.receive_message)
 
         # Setup the process mailbox.
@@ -600,8 +599,8 @@ class Consumer(object):
         # Callback called for each retry while the connection
         # can't be established.
         def _error_handler(exc, interval):
-            self.logger.error("Consumer: Connection Error: %s. " % exc
-                            + "Trying again in %d seconds..." % interval)
+            self.logger.error("Consumer: Connection Error: %s. "
+                              "Trying again in %d seconds...", exc, interval)
 
         # remember that the connection is lazy, it won't establish
         # until it's needed.
@@ -643,4 +642,4 @@ class Consumer(object):
         return {"broker": conninfo, "prefetch_count": self.qos.value}
 
     def _debug(self, msg, **kwargs):
-        self.logger.debug("Consumer: %s" % (msg, ), **kwargs)
+        self.logger.debug("Consumer: %s", msg, **kwargs)

+ 17 - 19
celery/worker/control/builtins.py

@@ -26,7 +26,7 @@ def revoke(panel, task_id, terminate=False, signal=None, **kwargs):
                 request.terminate(panel.consumer.pool, signal=signum)
                 break
 
-    panel.logger.info("Task %s %s." % (task_id, action))
+    panel.logger.info("Task %s %s.", task_id, action)
     return {"ok": "task %s %s" % (task_id, action)}
 
 
@@ -78,8 +78,8 @@ def rate_limit(panel, task_name, rate_limit, **kwargs):
     try:
         tasks[task_name].rate_limit = rate_limit
     except KeyError:
-        panel.logger.error("Rate limit attempt for unknown task %s" % (
-            task_name, ), exc_info=sys.exc_info())
+        panel.logger.error("Rate limit attempt for unknown task %s",
+                           task_name, exc_info=sys.exc_info())
         return {"error": "unknown task"}
 
     if not hasattr(panel.consumer.ready_queue, "refresh"):
@@ -89,12 +89,12 @@ def rate_limit(panel, task_name, rate_limit, **kwargs):
     panel.consumer.ready_queue.refresh()
 
     if not rate_limit:
-        panel.logger.info("Disabled rate limits for tasks of type %s" % (
-                            task_name, ))
+        panel.logger.info("Disabled rate limits for tasks of type %s",
+                          task_name)
         return {"ok": "rate limit disabled successfully"}
 
-    panel.logger.info("New rate limit for tasks of type %s: %s." % (
-                task_name, rate_limit))
+    panel.logger.info("New rate limit for tasks of type %s: %s.",
+                      task_name, rate_limit)
     return {"ok": "new rate limit set successfully"}
 
 
@@ -103,16 +103,15 @@ def time_limit(panel, task_name=None, hard=None, soft=None, **kwargs):
     try:
         task = tasks[task_name]
     except KeyError:
-        panel.logger.error(
-            "Change time limit attempt for unknown task %s" % (task_name, ))
+        panel.logger.error("Change time limit attempt for unknown task %s",
+                           task_name, exc_info=True)
         return {"error": "unknown task"}
 
     task.soft_time_limit = soft
     task.time_limit = hard
 
-    panel.logger.info(
-        "New time limits for tasks of type %s: soft=%s hard=%s" % (
-            task_name, soft, hard))
+    panel.logger.info("New time limits for tasks of type %s: soft=%s hard=%s",
+                      task_name, soft, hard)
     return {"ok": "time limits set successfully"}
 
 
@@ -128,8 +127,7 @@ def dump_schedule(panel, safe=False, **kwargs):
             item["priority"],
             item["item"])
     info = map(formatitem, enumerate(schedule.info()))
-    panel.logger.debug("* Dump of current schedule:\n%s" % (
-                            "\n".join(info, )))
+    panel.logger.debug("* Dump of current schedule:\n%s", "\n".join(info))
     scheduled_tasks = []
     for item in schedule.info():
         scheduled_tasks.append({"eta": item["eta"],
@@ -146,8 +144,8 @@ def dump_reserved(panel, safe=False, **kwargs):
     if not reserved:
         panel.logger.info("--Empty queue--")
         return []
-    panel.logger.debug("* Dump of currently reserved tasks:\n%s" % (
-                            "\n".join(map(safe_repr, reserved), )))
+    panel.logger.debug("* Dump of currently reserved tasks:\n%s",
+                       "\n".join(map(safe_repr, reserved)))
     return [request.info(safe=safe)
             for request in reserved]
 
@@ -188,8 +186,8 @@ def dump_tasks(panel, **kwargs):
 
     info = map(_extract_info, (tasks[task]
                                         for task in sorted(tasks.keys())))
-    panel.logger.debug("* Dump of currently registered tasks:\n%s" % (
-                    "\n".join(info)))
+    panel.logger.debug("* Dump of currently registered tasks:\n%s",
+                       "\n".join(info))
 
     return info
 
@@ -244,7 +242,7 @@ def add_consumer(panel, queue=None, exchange=None, exchange_type="direct",
                            **options)
         cset.add_consumer_from_dict(**declaration)
         cset.consume()
-        panel.logger.info("Started consuming from %r" % (declaration, ))
+        panel.logger.info("Started consuming from %r", declaration)
         return {"ok": "started consuming from %s" % (queue, )}
     else:
         return {"ok": "already consuming from %s" % (queue, )}

+ 20 - 19
celery/worker/job.py

@@ -131,7 +131,7 @@ class WorkerTaskTrace(TaskTrace):
                 raise
             except Exception, exc:
                 logger = current_app.log.get_default_logger()
-                logger.error("Process cleanup failed: %r" % (exc, ),
+                logger.error("Process cleanup failed: %r", exc,
                              exc_info=sys.exc_info())
 
     def handle_success(self, retval, *args):
@@ -410,8 +410,8 @@ class TaskRequest(object):
         if self.expires:
             self.maybe_expire()
         if self.task_id in state.revoked:
-            self.logger.warn("Skipping revoked task: %s[%s]" % (
-                self.task_name, self.task_id))
+            self.logger.warn("Skipping revoked task: %s[%s]",
+                             self.task_name, self.task_id)
             self.send_event("task-revoked", uuid=self.task_id)
             self.acknowledge()
             self._already_revoked = True
@@ -430,8 +430,8 @@ class TaskRequest(object):
         if not self.task.acks_late:
             self.acknowledge()
         self.send_event("task-started", uuid=self.task_id, pid=pid)
-        self.logger.debug("Task accepted: %s[%s] pid:%r" % (
-            self.task_name, self.task_id, pid))
+        self.logger.debug("Task accepted: %s[%s] pid:%r",
+                          self.task_name, self.task_id, pid)
         if self._terminate_on_ack is not None:
             _, pool, signal = self._terminate_on_ack
             self.terminate(pool, signal)
@@ -440,12 +440,12 @@ class TaskRequest(object):
         """Handler called if the task times out."""
         state.task_ready(self)
         if soft:
-            self.logger.warning("Soft time limit (%ss) exceeded for %s[%s]" % (
-                timeout, self.task_name, self.task_id))
+            self.logger.warning("Soft time limit (%ss) exceeded for %s[%s]",
+                                timeout, self.task_name, self.task_id)
             exc = exceptions.SoftTimeLimitExceeded(timeout)
         else:
-            self.logger.error("Hard time limit (%ss) exceeded for %s[%s]" % (
-                timeout, self.task_name, self.task_id))
+            self.logger.error("Hard time limit (%ss) exceeded for %s[%s]",
+                              timeout, self.task_name, self.task_id)
             exc = exceptions.TimeLimitExceeded(timeout)
 
         if self._store_errors:
@@ -462,11 +462,11 @@ class TaskRequest(object):
         self.send_event("task-succeeded", uuid=self.task_id,
                         result=safe_repr(ret_value), runtime=runtime)
 
-        self.logger.info(self.success_msg.strip() % {
-                            "id": self.task_id,
-                            "name": self.task_name,
-                            "return_value": self.repr_result(ret_value),
-                            "runtime": runtime})
+        self.logger.info(self.success_msg.strip(),
+                         {"id": self.task_id,
+                          "name": self.task_name,
+                          "return_value": self.repr_result(ret_value),
+                          "runtime": runtime})
 
     def on_retry(self, exc_info):
         """Handler called if the task should be retried."""
@@ -474,10 +474,11 @@ class TaskRequest(object):
                          exception=safe_repr(exc_info.exception.exc),
                          traceback=safe_str(exc_info.traceback))
 
-        self.logger.info(self.retry_msg.strip() % {
-                            "id": self.task_id,
-                            "name": self.task_name,
-                            "exc": safe_repr(exc_info.exception.exc)})
+        self.logger.info(self.retry_msg.strip(),
+                         {"id": self.task_id,
+                         "name": self.task_name,
+                         "exc": safe_repr(exc_info.exception.exc)},
+                         exc_info=exc_info)
 
     def on_failure(self, exc_info):
         """Handler called if the task raised an exception."""
@@ -507,7 +508,7 @@ class TaskRequest(object):
                    "args": self.args,
                    "kwargs": self.kwargs}
 
-        self.logger.error(self.error_msg.strip() % context,
+        self.logger.error(self.error_msg.strip(), context,
                           exc_info=exc_info,
                           extra={"data": {"id": self.task_id,
                                           "name": self.task_name,

+ 3 - 4
celery/worker/mediator.py

@@ -44,8 +44,8 @@ class Mediator(threading.Thread):
         try:
             self.callback(task)
         except Exception, exc:
-            self.logger.error("Mediator callback raised exception %r\n%s" % (
-                                exc, traceback.format_exc()),
+            self.logger.error("Mediator callback raised exception %r\n%s",
+                              exc, traceback.format_exc(),
                               exc_info=sys.exc_info(),
                               extra={"data": {"id": task.task_id,
                                               "name": task.task_name,
@@ -57,8 +57,7 @@ class Mediator(threading.Thread):
             try:
                 self.move()
             except Exception, exc:
-                self.logger.error("Mediator crash: %r" % (exc, ),
-                    exc_info=sys.exc_info())
+                self.logger.error("Mediator crash: %r", exc, exc_info=True)
                 # exiting by normal means does not work here, so force exit.
                 os._exit(1)
         self._stopped.set()