Prechádzať zdrojové kódy

Improve how exc_info is passed into loggers.

This adjusts all loggers to pass exc_info=True rather than
sys.exc_info, with the goal to standardize how logging happens
within Celery. The logging framework itself (as well as things like
Sentry) understand that this means "capture the exc info if youre
going to use it". So in certain cases (maybe loggings disabled for
whatever crazy reason), this could potentially be an optimization.

It also benefits that the loggers take care of grabbing the exc_info
and Celery doesnt need to deal with sys.exc_info/cleanup unless
it explicitly wants to.
David Cramer 13 rokov pred
rodič
commit
2d09e5c1fd

+ 3 - 4
celery/apps/beat.py

@@ -4,7 +4,6 @@ from __future__ import absolute_import
 import atexit
 import socket
 import sys
-import traceback
 
 from .. import __version__, platforms
 from .. import beat
@@ -88,9 +87,9 @@ class Beat(configurated):
             self.install_sync_handler(beat)
             beat.start()
         except Exception, exc:
-            logger.critical("celerybeat raised exception %s: %r\n%s",
-                            exc.__class__, exc, traceback.format_exc(),
-                            exc_info=sys.exc_info())
+            logger.critical("celerybeat raised exception %s: %r",
+                            exc.__class__, exc,
+                            exc_info=True)
 
     def init_loader(self):
         # Run the worker init handler.

+ 1 - 1
celery/beat.py

@@ -179,7 +179,7 @@ class Scheduler(object):
             except Exception, exc:
                 self.logger.error("Message Error: %s\n%s", exc,
                                   traceback.format_stack(),
-                                  exc_info=sys.exc_info())
+                                  exc_info=True)
             else:
                 self.logger.debug("%s sent. id->%s", entry.task,
                                                      result.task_id)

+ 1 - 1
celery/bin/celeryd_detach.py

@@ -28,7 +28,7 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
             from ..log import setup_logger
             logger = setup_logger(logfile=logfile, loglevel=logging.ERROR)
             logger.critical("Can't exec %r", " ".join([path] + argv),
-                            exc_info=sys.exc_info())
+                            exc_info=True)
 
 
 class PartialOptionParser(OptionParser):

+ 2 - 2
celery/concurrency/processes/pool.py

@@ -76,7 +76,7 @@ def safe_apply_callback(fun, *args):
             fun(*args)
         except BaseException, exc:
             error("Pool callback raised exception: %r", exc,
-                  exc_info=sys.exc_info())
+                  exc_info=True)
 
 
 class LaxBoundedSemaphore(threading._Semaphore):
@@ -239,7 +239,7 @@ class PoolThread(threading.Thread):
             return self.body()
         except Exception, exc:
             error("Thread %r crashed: %r" % (self.__class__.__name__, exc, ),
-                  exc_info=sys.exc_info())
+                  exc_info=True)
             os._exit(1)
 
     def terminate(self):

+ 1 - 1
celery/execute/trace.py

@@ -195,7 +195,7 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                     except Exception, exc:
                         logger = current_app.log.get_default_logger()
                         logger.error("Process cleanup failed: %r", exc,
-                                     exc_info=sys.exc_info())
+                                     exc_info=True)
         except Exception, exc:
             if eager:
                 raise

+ 1 - 1
celery/utils/timer2.py

@@ -229,7 +229,7 @@ class Timer(Thread):
                 pass
         except Exception, exc:
             self.logger.error("Thread Timer crashed: %r", exc,
-                              exc_info=sys.exc_info())
+                              exc_info=True)
             os._exit(1)
 
     def stop(self):

+ 1 - 1
celery/worker/__init__.py

@@ -233,7 +233,7 @@ class WorkController(configurated):
             self.terminate()
         except Exception, exc:
             self.logger.error("Unrecoverable error: %r", exc,
-                              exc_info=sys.exc_info())
+                              exc_info=True)
             self.stop()
         except (KeyboardInterrupt, SystemExit):
             self.stop()

+ 1 - 5
celery/worker/autoscale.py

@@ -17,9 +17,7 @@
 from __future__ import absolute_import
 from __future__ import with_statement
 
-import sys
 import threading
-import traceback
 
 from time import sleep, time
 
@@ -113,9 +111,7 @@ class Autoscaler(bgThread):
             self.logger.debug(
                 "Autoscaler won't scale down: all processes busy.")
         except Exception, exc:
-            self.logger.error("Autoscaler: scale_down: %r\n%r",
-                                exc, traceback.format_stack(),
-                                exc_info=sys.exc_info())
+            self.logger.error("Autoscaler: scale_down: %r", exc, exc_info=True)
 
     def scale_down(self, n):
         if not self._last_action or not n:

+ 6 - 8
celery/worker/consumer.py

@@ -78,9 +78,7 @@ from __future__ import with_statement
 
 import logging
 import socket
-import sys
 import threading
-import traceback
 import warnings
 
 from ..abstract import StartStopComponent
@@ -340,7 +338,7 @@ class Consumer(object):
             except self.connection_errors + self.channel_errors:
                 self.logger.error("Consumer: Connection to broker lost."
                                 + " Trying to re-establish the connection...",
-                                exc_info=sys.exc_info())
+                                exc_info=True)
 
     def consume_messages(self):
         """Consume messages forever (or until an exception is raised)."""
@@ -388,7 +386,7 @@ class Consumer(object):
                 self.logger.error(
                     "Couldn't convert eta %s to timestamp: %r. Task: %r",
                     task.eta, exc, task.info(safe=True),
-                    exc_info=sys.exc_info())
+                    exc_info=True)
                 task.acknowledge()
             else:
                 self.qos.increment()
@@ -406,8 +404,8 @@ class Consumer(object):
             self.logger.error("No such control command: %s", exc)
         except Exception, exc:
             self.logger.error(
-                "Error occurred while handling control command: %r\n%r",
-                    exc, traceback.format_exc(), exc_info=sys.exc_info())
+                "Error occurred while handling control command: %r",
+                    exc, exc_info=True)
             self.reset_pidbox_node()
 
     def apply_eta_task(self, task):
@@ -444,11 +442,11 @@ class Consumer(object):
             self.strategies[name](message, body, message.ack_log_error)
         except KeyError, exc:
             self.logger.error(UNKNOWN_TASK_ERROR, exc, safe_repr(body),
-                              exc_info=sys.exc_info())
+                              exc_info=True)
             message.ack_log_error(self.logger, self.connection_errors)
         except InvalidTaskError, exc:
             self.logger.error(INVALID_TASK_ERROR, str(exc), safe_repr(body),
-                              exc_info=sys.exc_info())
+                              exc_info=True)
             message.ack_log_error(self.logger, self.connection_errors)
 
     def maybe_conn_error(self, fun):

+ 1 - 3
celery/worker/control.py

@@ -11,8 +11,6 @@
 """
 from __future__ import absolute_import
 
-import sys
-
 from datetime import datetime
 
 from ..platforms import signals as _signals
@@ -102,7 +100,7 @@ def rate_limit(panel, task_name, rate_limit, **kwargs):
         tasks[task_name].rate_limit = rate_limit
     except KeyError:
         panel.logger.error("Rate limit attempt for unknown task %s",
-                           task_name, exc_info=sys.exc_info())
+                           task_name, exc_info=True)
         return {"error": "unknown task"}
 
     if not hasattr(panel.consumer.ready_queue, "refresh"):

+ 2 - 5
celery/worker/mediator.py

@@ -19,8 +19,6 @@
 from __future__ import absolute_import
 
 import logging
-import sys
-import traceback
 
 from Queue import Empty
 
@@ -79,9 +77,8 @@ class Mediator(bgThread):
         try:
             self.callback(task)
         except Exception, exc:
-            self.logger.error("Mediator callback raised exception %r\n%s",
-                              exc, traceback.format_exc(),
-                              exc_info=sys.exc_info(),
+            self.logger.error("Mediator callback raised exception %r",
+                              exc, exc_info=True,
                               extra={"data": {"id": task.task_id,
                                               "name": task.task_name,
                                               "hostname": task.hostname}})