log.py 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. # -*- coding: utf-8 -*-
  2. from __future__ import absolute_import
  3. import logging
  4. import os
  5. import sys
  6. import threading
  7. import traceback
  8. from billiard import current_process, util as mputil
  9. from kombu.log import get_logger as _get_logger, LOG_LEVELS
  10. from .encoding import safe_str, str_t
  11. from .term import colored
  12. _process_aware = False
  13. is_py3k = sys.version_info[0] == 3
  14. MP_LOG = os.environ.get("MP_LOG", False)
  15. # Sets up our logging hierarchy.
  16. #
  17. # Every logger in the celery package inherits from the "celery"
  18. # logger, and every task logger inherits from the "celery.task"
  19. # logger.
  20. base_logger = logger = _get_logger("celery")
  21. mp_logger = _get_logger("multiprocessing")
  22. in_sighandler = False
  23. def set_in_sighandler(value):
  24. global in_sighandler
  25. in_sighandler = value
  26. def get_logger(name):
  27. l = _get_logger(name)
  28. if logging.root not in (l, l.parent) and l is not base_logger:
  29. l.parent = base_logger
  30. return l
  31. task_logger = get_logger("celery.task")
  32. def mlevel(level):
  33. if level and not isinstance(level, int):
  34. return LOG_LEVELS[level.upper()]
  35. return level
  36. class ColorFormatter(logging.Formatter):
  37. #: Loglevel -> Color mapping.
  38. COLORS = colored().names
  39. colors = {"DEBUG": COLORS["blue"], "WARNING": COLORS["yellow"],
  40. "ERROR": COLORS["red"], "CRITICAL": COLORS["magenta"]}
  41. def __init__(self, fmt=None, use_color=True):
  42. logging.Formatter.__init__(self, fmt)
  43. self.use_color = use_color
  44. def formatException(self, ei):
  45. r = logging.Formatter.formatException(self, ei)
  46. if isinstance(r, str) and not is_py3k:
  47. return safe_str(r)
  48. return r
  49. def format(self, record):
  50. levelname = record.levelname
  51. color = self.colors.get(levelname)
  52. if self.use_color and color:
  53. try:
  54. record.msg = safe_str(str_t(color(record.msg)))
  55. except Exception, exc:
  56. record.msg = "<Unrepresentable %r: %r>" % (
  57. type(record.msg), exc)
  58. record.exc_info = True
  59. if not is_py3k and "processName" not in record.__dict__:
  60. # Very ugly, but have to make sure processName is supported
  61. # by foreign logger instances.
  62. # (processName is always supported by Python 2.7)
  63. process_name = current_process and current_process()._name or ""
  64. record.__dict__["processName"] = process_name
  65. return safe_str(logging.Formatter.format(self, record))
  66. class LoggingProxy(object):
  67. """Forward file object to :class:`logging.Logger` instance.
  68. :param logger: The :class:`logging.Logger` instance to forward to.
  69. :param loglevel: Loglevel to use when writing messages.
  70. """
  71. mode = "w"
  72. name = None
  73. closed = False
  74. loglevel = logging.ERROR
  75. _thread = threading.local()
  76. def __init__(self, logger, loglevel=None):
  77. self.logger = logger
  78. self.loglevel = mlevel(loglevel or self.logger.level or self.loglevel)
  79. self._safewrap_handlers()
  80. def _safewrap_handlers(self):
  81. """Make the logger handlers dump internal errors to
  82. `sys.__stderr__` instead of `sys.stderr` to circumvent
  83. infinite loops."""
  84. def wrap_handler(handler): # pragma: no cover
  85. class WithSafeHandleError(logging.Handler):
  86. def handleError(self, record):
  87. exc_info = sys.exc_info()
  88. try:
  89. try:
  90. traceback.print_exception(exc_info[0],
  91. exc_info[1],
  92. exc_info[2],
  93. None, sys.__stderr__)
  94. except IOError:
  95. pass # see python issue 5971
  96. finally:
  97. del(exc_info)
  98. handler.handleError = WithSafeHandleError().handleError
  99. return map(wrap_handler, self.logger.handlers)
  100. def write(self, data):
  101. """Write message to logging object."""
  102. if in_sighandler:
  103. return sys.__stderr__.write(safe_str(data))
  104. if getattr(self._thread, "recurse_protection", False):
  105. # Logger is logging back to this file, so stop recursing.
  106. return
  107. data = data.strip()
  108. if data and not self.closed:
  109. self._thread.recurse_protection = True
  110. try:
  111. self.logger.log(self.loglevel, safe_str(data))
  112. finally:
  113. self._thread.recurse_protection = False
  114. def writelines(self, sequence):
  115. """`writelines(sequence_of_strings) -> None`.
  116. Write the strings to the file.
  117. The sequence can be any iterable object producing strings.
  118. This is equivalent to calling :meth:`write` for each string.
  119. """
  120. for part in sequence:
  121. self.write(part)
  122. def flush(self):
  123. """This object is not buffered so any :meth:`flush` requests
  124. are ignored."""
  125. pass
  126. def close(self):
  127. """When the object is closed, no write requests are forwarded to
  128. the logging object anymore."""
  129. self.closed = True
  130. def isatty(self):
  131. """Always returns :const:`False`. Just here for file support."""
  132. return False
  133. def fileno(self):
  134. return None
  135. def ensure_process_aware_logger():
  136. """Make sure process name is recorded when loggers are used."""
  137. global _process_aware
  138. if not _process_aware:
  139. logging._acquireLock()
  140. try:
  141. _process_aware = True
  142. Logger = logging.getLoggerClass()
  143. if getattr(Logger, '_process_aware', False): # pragma: no cover
  144. return
  145. class ProcessAwareLogger(Logger):
  146. _process_aware = True
  147. def makeRecord(self, *args, **kwds):
  148. record = Logger.makeRecord(self, *args, **kwds)
  149. record.processName = current_process()._name
  150. return record
  151. logging.setLoggerClass(ProcessAwareLogger)
  152. finally:
  153. logging._releaseLock()
  154. def get_multiprocessing_logger():
  155. return mputil.get_logger() if mputil and MP_LOG else None
  156. def reset_multiprocessing_logger():
  157. if mputil and hasattr(mputil, "_logger"):
  158. mputil._logger = None
  159. def _patch_logger_class():
  160. """Make sure loggers don't log while in a signal handler."""
  161. logging._acquireLock()
  162. try:
  163. OldLoggerClass = logging.getLoggerClass()
  164. if not getattr(OldLoggerClass, '_signal_safe', False):
  165. class SigSafeLogger(OldLoggerClass):
  166. _signal_safe = True
  167. def log(self, *args, **kwargs):
  168. if in_sighandler:
  169. sys.__stderr__.write("IN SIGHANDLER WON'T LOG")
  170. return
  171. return OldLoggerClass.log(self, *args, **kwargs)
  172. logging.setLoggerClass(SigSafeLogger)
  173. finally:
  174. logging._releaseLock()
  175. _patch_logger_class()