log.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. """celery.log"""
  2. from __future__ import absolute_import
  3. import logging
  4. import threading
  5. import sys
  6. import traceback
  7. try:
  8. from multiprocessing import current_process
  9. from multiprocessing import util as mputil
  10. except ImportError:
  11. current_process = mputil = None # noqa
  12. from celery import signals
  13. from celery import current_app
  14. from celery.utils import LOG_LEVELS, isatty
  15. from celery.utils.compat import LoggerAdapter
  16. from celery.utils.compat import WatchedFileHandler
  17. from celery.utils.encoding import safe_str
  18. from celery.utils.patch import ensure_process_aware_logger
  19. from celery.utils.term import colored
  20. class ColorFormatter(logging.Formatter):
  21. #: Loglevel -> Color mapping.
  22. COLORS = colored().names
  23. colors = {"DEBUG": COLORS["blue"], "WARNING": COLORS["yellow"],
  24. "ERROR": COLORS["red"], "CRITICAL": COLORS["magenta"]}
  25. def __init__(self, msg, use_color=True):
  26. logging.Formatter.__init__(self, msg)
  27. self.use_color = use_color
  28. def formatException(self, ei):
  29. r = logging.Formatter.formatException(self, ei)
  30. if isinstance(r, str):
  31. return r.decode("utf-8", "replace") # Convert to unicode
  32. return r
  33. def format(self, record):
  34. levelname = record.levelname
  35. color = self.colors.get(levelname)
  36. if self.use_color and color:
  37. try:
  38. record.msg = color(safe_str(record.msg))
  39. except Exception:
  40. record.msg = "<Unrepresentable %r: %r>" % (
  41. type(record.msg), traceback.format_stack())
  42. # Very ugly, but have to make sure processName is supported
  43. # by foreign logger instances.
  44. # (processName is always supported by Python 2.7)
  45. if "processName" not in record.__dict__:
  46. process_name = current_process and current_process()._name or ""
  47. record.__dict__["processName"] = process_name
  48. t = logging.Formatter.format(self, record)
  49. if isinstance(t, unicode):
  50. return t.encode("utf-8", "replace")
  51. return t
  52. class Logging(object):
  53. #: The logging subsystem is only configured once per process.
  54. #: setup_logging_subsystem sets this flag, and subsequent calls
  55. #: will do nothing.
  56. _setup = False
  57. def __init__(self, app):
  58. self.app = app
  59. self.loglevel = self.app.conf.CELERYD_LOG_LEVEL
  60. self.format = self.app.conf.CELERYD_LOG_FORMAT
  61. self.task_format = self.app.conf.CELERYD_TASK_LOG_FORMAT
  62. self.colorize = self.app.conf.CELERYD_LOG_COLOR
  63. def supports_color(self, logfile=None):
  64. if self.app.IS_WINDOWS:
  65. # Windows does not support ANSI color codes.
  66. return False
  67. if self.colorize is None:
  68. # Only use color if there is no active log file
  69. # and stderr is an actual terminal.
  70. return logfile is None and isatty(sys.stderr)
  71. return self.colorize
  72. def colored(self, logfile=None):
  73. return colored(enabled=self.supports_color(logfile))
  74. def get_task_logger(self, loglevel=None, name=None):
  75. logger = logging.getLogger(name or "celery.task.default")
  76. if loglevel is not None:
  77. logger.setLevel(loglevel)
  78. return logger
  79. def setup_logging_subsystem(self, loglevel=None, logfile=None,
  80. format=None, colorize=None, **kwargs):
  81. if Logging._setup:
  82. return
  83. loglevel = loglevel or self.loglevel
  84. format = format or self.format
  85. if colorize is None:
  86. colorize = self.supports_color(logfile)
  87. if mputil and hasattr(mputil, "_logger"):
  88. mputil._logger = None
  89. ensure_process_aware_logger()
  90. receivers = signals.setup_logging.send(sender=None,
  91. loglevel=loglevel, logfile=logfile,
  92. format=format, colorize=colorize)
  93. if not receivers:
  94. root = logging.getLogger()
  95. if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
  96. root.handlers = []
  97. mp = mputil.get_logger() if mputil else None
  98. for logger in filter(None, (root, mp)):
  99. self._setup_logger(logger, logfile, format, colorize, **kwargs)
  100. logger.setLevel(loglevel)
  101. signals.after_setup_logger.send(sender=None, logger=logger,
  102. loglevel=loglevel, logfile=logfile,
  103. format=format, colorize=colorize)
  104. Logging._setup = True
  105. return receivers
  106. def _detect_handler(self, logfile=None):
  107. """Create log handler with either a filename, an open stream
  108. or :const:`None` (stderr)."""
  109. logfile = sys.__stderr__ if logfile is None else logfile
  110. if hasattr(logfile, "write"):
  111. return logging.StreamHandler(logfile)
  112. return WatchedFileHandler(logfile)
  113. def get_default_logger(self, loglevel=None, name="celery"):
  114. """Get default logger instance.
  115. :keyword loglevel: Initial log level.
  116. """
  117. logger = logging.getLogger(name)
  118. if loglevel is not None:
  119. logger.setLevel(loglevel)
  120. return logger
  121. def setup_logger(self, loglevel=None, logfile=None,
  122. format=None, colorize=None, name="celery", root=True,
  123. app=None, **kwargs):
  124. """Setup the :mod:`multiprocessing` logger.
  125. If `logfile` is not specified, then `sys.stderr` is used.
  126. Returns logger object.
  127. """
  128. loglevel = loglevel or self.loglevel
  129. format = format or self.format
  130. if colorize is None:
  131. colorize = self.supports_color(logfile)
  132. if not root or self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
  133. return self._setup_logger(self.get_default_logger(loglevel, name),
  134. logfile, format, colorize, **kwargs)
  135. self.setup_logging_subsystem(loglevel, logfile,
  136. format, colorize, **kwargs)
  137. return self.get_default_logger(name=name)
  138. def setup_task_logger(self, loglevel=None, logfile=None, format=None,
  139. colorize=None, task_name=None, task_id=None, propagate=False,
  140. app=None, **kwargs):
  141. """Setup the task logger.
  142. If `logfile` is not specified, then `sys.stderr` is used.
  143. Returns logger object.
  144. """
  145. loglevel = loglevel or self.loglevel
  146. format = format or self.task_format
  147. if colorize is None:
  148. colorize = self.supports_color(logfile)
  149. logger = self._setup_logger(self.get_task_logger(loglevel, task_name),
  150. logfile, format, colorize, **kwargs)
  151. logger.propagate = int(propagate) # this is an int for some reason.
  152. # better to not question why.
  153. signals.after_setup_task_logger.send(sender=None, logger=logger,
  154. loglevel=loglevel, logfile=logfile,
  155. format=format, colorize=colorize)
  156. return LoggerAdapter(logger, {"task_id": task_id,
  157. "task_name": task_name})
  158. def redirect_stdouts_to_logger(self, logger, loglevel=None):
  159. """Redirect :class:`sys.stdout` and :class:`sys.stderr` to a
  160. logging instance.
  161. :param logger: The :class:`logging.Logger` instance to redirect to.
  162. :param loglevel: The loglevel redirected messages will be logged as.
  163. """
  164. proxy = LoggingProxy(logger, loglevel)
  165. sys.stdout = sys.stderr = proxy
  166. return proxy
  167. def _setup_logger(self, logger, logfile, format, colorize,
  168. formatter=ColorFormatter, **kwargs):
  169. if logger.handlers: # Logger already configured
  170. return logger
  171. handler = self._detect_handler(logfile)
  172. handler.setFormatter(formatter(format, use_color=colorize))
  173. logger.addHandler(handler)
  174. return logger
  175. setup_logging_subsystem = current_app.log.setup_logging_subsystem
  176. get_default_logger = current_app.log.get_default_logger
  177. setup_logger = current_app.log.setup_logger
  178. setup_task_logger = current_app.log.setup_task_logger
  179. get_task_logger = current_app.log.get_task_logger
  180. redirect_stdouts_to_logger = current_app.log.redirect_stdouts_to_logger
  181. class LoggingProxy(object):
  182. """Forward file object to :class:`logging.Logger` instance.
  183. :param logger: The :class:`logging.Logger` instance to forward to.
  184. :param loglevel: Loglevel to use when writing messages.
  185. """
  186. mode = "w"
  187. name = None
  188. closed = False
  189. loglevel = logging.ERROR
  190. _thread = threading.local()
  191. def __init__(self, logger, loglevel=None):
  192. self.logger = logger
  193. self.loglevel = loglevel or self.logger.level or self.loglevel
  194. if not isinstance(self.loglevel, int):
  195. self.loglevel = LOG_LEVELS[self.loglevel.upper()]
  196. self._safewrap_handlers()
  197. def _safewrap_handlers(self):
  198. """Make the logger handlers dump internal errors to
  199. `sys.__stderr__` instead of `sys.stderr` to circumvent
  200. infinite loops."""
  201. def wrap_handler(handler): # pragma: no cover
  202. class WithSafeHandleError(logging.Handler):
  203. def handleError(self, record):
  204. exc_info = sys.exc_info()
  205. try:
  206. try:
  207. traceback.print_exception(exc_info[0],
  208. exc_info[1],
  209. exc_info[2],
  210. None, sys.__stderr__)
  211. except IOError:
  212. pass # see python issue 5971
  213. finally:
  214. del(exc_info)
  215. handler.handleError = WithSafeHandleError().handleError
  216. return map(wrap_handler, self.logger.handlers)
  217. def write(self, data):
  218. if getattr(self._thread, "recurse_protection", False):
  219. # Logger is logging back to this file, so stop recursing.
  220. return
  221. """Write message to logging object."""
  222. data = data.strip()
  223. if data and not self.closed:
  224. self._thread.recurse_protection = True
  225. try:
  226. self.logger.log(self.loglevel, data)
  227. finally:
  228. self._thread.recurse_protection = False
  229. def writelines(self, sequence):
  230. """`writelines(sequence_of_strings) -> None`.
  231. Write the strings to the file.
  232. The sequence can be any iterable object producing strings.
  233. This is equivalent to calling :meth:`write` for each string.
  234. """
  235. for part in sequence:
  236. self.write(part)
  237. def flush(self):
  238. """This object is not buffered so any :meth:`flush` requests
  239. are ignored."""
  240. pass
  241. def close(self):
  242. """When the object is closed, no write requests are forwarded to
  243. the logging object anymore."""
  244. self.closed = True
  245. def isatty(self):
  246. """Always returns :const:`False`. Just here for file support."""
  247. return False
  248. def fileno(self):
  249. return None
  250. class SilenceRepeated(object):
  251. """Only log action every n iterations."""
  252. def __init__(self, action, max_iterations=10):
  253. self.action = action
  254. self.max_iterations = max_iterations
  255. self._iterations = 0
  256. def __call__(self, *msgs):
  257. if not self._iterations or self._iterations >= self.max_iterations:
  258. for msg in msgs:
  259. self.action(msg)
  260. self._iterations = 0
  261. else:
  262. self._iterations += 1