log.py 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. # -*- coding: utf-8 -*-
  2. """
  3. celery.utils.log
  4. ~~~~~~~~~~~~~~~~
  5. Logging utilities.
  6. """
  7. from __future__ import absolute_import, print_function
  8. import logging
  9. import os
  10. import sys
  11. import threading
  12. import traceback
  13. from contextlib import contextmanager
  14. from billiard import current_process, util as mputil
  15. from kombu.log import get_logger as _get_logger, LOG_LEVELS
  16. from celery.five import string_t
  17. from .encoding import safe_str, str_t
  18. from .term import colored
  19. __all__ = ['ColorFormatter', 'LoggingProxy', 'base_logger',
  20. 'set_in_sighandler', 'in_sighandler', 'get_logger',
  21. 'get_task_logger', 'mlevel', 'ensure_process_aware_logger',
  22. 'get_multiprocessing_logger', 'reset_multiprocessing_logger']
  23. _process_aware = False
  24. PY3 = sys.version_info[0] == 3
  25. MP_LOG = os.environ.get('MP_LOG', False)
  26. # Sets up our logging hierarchy.
  27. #
  28. # Every logger in the celery package inherits from the "celery"
  29. # logger, and every task logger inherits from the "celery.task"
  30. # logger.
  31. base_logger = logger = _get_logger('celery')
  32. mp_logger = _get_logger('multiprocessing')
  33. _in_sighandler = False
  34. def set_in_sighandler(value):
  35. global _in_sighandler
  36. _in_sighandler = value
  37. @contextmanager
  38. def in_sighandler():
  39. set_in_sighandler(True)
  40. try:
  41. yield
  42. finally:
  43. set_in_sighandler(False)
  44. def get_logger(name):
  45. l = _get_logger(name)
  46. if logging.root not in (l, l.parent) and l is not base_logger:
  47. l.parent = base_logger
  48. return l
  49. task_logger = get_logger('celery.task')
  50. worker_logger = get_logger('celery.worker')
  51. def get_task_logger(name):
  52. logger = get_logger(name)
  53. if logger.parent is logging.root:
  54. logger.parent = task_logger
  55. return logger
  56. def mlevel(level):
  57. if level and not isinstance(level, int):
  58. return LOG_LEVELS[level.upper()]
  59. return level
  60. class ColorFormatter(logging.Formatter):
  61. #: Loglevel -> Color mapping.
  62. COLORS = colored().names
  63. colors = {'DEBUG': COLORS['blue'], 'WARNING': COLORS['yellow'],
  64. 'ERROR': COLORS['red'], 'CRITICAL': COLORS['magenta']}
  65. def __init__(self, fmt=None, use_color=True):
  66. logging.Formatter.__init__(self, fmt)
  67. self.use_color = use_color
  68. def formatException(self, ei):
  69. if ei and not isinstance(ei, tuple):
  70. ei = sys.exc_info()
  71. r = logging.Formatter.formatException(self, ei)
  72. if isinstance(r, str) and not PY3:
  73. return safe_str(r)
  74. return r
  75. def format(self, record):
  76. color = self.colors.get(record.levelname)
  77. if color and self.use_color:
  78. msg = record.msg
  79. try:
  80. # safe_str will repr the color object
  81. # and color will break on non-string objects
  82. # so need to reorder calls based on type.
  83. # Issue #427
  84. if isinstance(msg, string_t):
  85. record.msg = str_t(color(safe_str(msg)))
  86. else:
  87. record.msg = safe_str(color(msg))
  88. except Exception as exc:
  89. record.msg = '<Unrepresentable {0!r}: {1!r}>'.format(
  90. type(msg), exc)
  91. record.exc_info = True
  92. return safe_str(logging.Formatter.format(self, record))
  93. class LoggingProxy(object):
  94. """Forward file object to :class:`logging.Logger` instance.
  95. :param logger: The :class:`logging.Logger` instance to forward to.
  96. :param loglevel: Loglevel to use when writing messages.
  97. """
  98. mode = 'w'
  99. name = None
  100. closed = False
  101. loglevel = logging.ERROR
  102. _thread = threading.local()
  103. def __init__(self, logger, loglevel=None):
  104. self.logger = logger
  105. self.loglevel = mlevel(loglevel or self.logger.level or self.loglevel)
  106. self._safewrap_handlers()
  107. def _safewrap_handlers(self):
  108. """Make the logger handlers dump internal errors to
  109. `sys.__stderr__` instead of `sys.stderr` to circumvent
  110. infinite loops."""
  111. def wrap_handler(handler): # pragma: no cover
  112. class WithSafeHandleError(logging.Handler):
  113. def handleError(self, record):
  114. exc_info = sys.exc_info()
  115. try:
  116. try:
  117. traceback.print_exception(exc_info[0],
  118. exc_info[1],
  119. exc_info[2],
  120. None, sys.__stderr__)
  121. except IOError:
  122. pass # see python issue 5971
  123. finally:
  124. del(exc_info)
  125. handler.handleError = WithSafeHandleError().handleError
  126. return [wrap_handler(h) for h in self.logger.handlers]
  127. def write(self, data):
  128. """Write message to logging object."""
  129. if _in_sighandler:
  130. return print(safe_str(data), file=sys.__stderr__)
  131. if getattr(self._thread, 'recurse_protection', False):
  132. # Logger is logging back to this file, so stop recursing.
  133. return
  134. data = data.strip()
  135. if data and not self.closed:
  136. self._thread.recurse_protection = True
  137. try:
  138. self.logger.log(self.loglevel, safe_str(data))
  139. finally:
  140. self._thread.recurse_protection = False
  141. def writelines(self, sequence):
  142. """`writelines(sequence_of_strings) -> None`.
  143. Write the strings to the file.
  144. The sequence can be any iterable object producing strings.
  145. This is equivalent to calling :meth:`write` for each string.
  146. """
  147. for part in sequence:
  148. self.write(part)
  149. def flush(self):
  150. """This object is not buffered so any :meth:`flush` requests
  151. are ignored."""
  152. pass
  153. def close(self):
  154. """When the object is closed, no write requests are forwarded to
  155. the logging object anymore."""
  156. self.closed = True
  157. def isatty(self):
  158. """Always return :const:`False`. Just here for file support."""
  159. return False
  160. def ensure_process_aware_logger():
  161. """Make sure process name is recorded when loggers are used."""
  162. global _process_aware
  163. if not _process_aware:
  164. logging._acquireLock()
  165. try:
  166. _process_aware = True
  167. Logger = logging.getLoggerClass()
  168. if getattr(Logger, '_process_aware', False): # pragma: no cover
  169. return
  170. class ProcessAwareLogger(Logger):
  171. _process_aware = True
  172. def makeRecord(self, *args, **kwds):
  173. record = Logger.makeRecord(self, *args, **kwds)
  174. record.processName = current_process()._name
  175. return record
  176. logging.setLoggerClass(ProcessAwareLogger)
  177. finally:
  178. logging._releaseLock()
  179. def get_multiprocessing_logger():
  180. return mputil.get_logger() if mputil else None
  181. def reset_multiprocessing_logger():
  182. if mputil and hasattr(mputil, '_logger'):
  183. mputil._logger = None
  184. def _patch_logger_class():
  185. """Make sure loggers don't log while in a signal handler."""
  186. logging._acquireLock()
  187. try:
  188. OldLoggerClass = logging.getLoggerClass()
  189. if not getattr(OldLoggerClass, '_signal_safe', False):
  190. class SigSafeLogger(OldLoggerClass):
  191. _signal_safe = True
  192. def log(self, *args, **kwargs):
  193. if _in_sighandler:
  194. return
  195. return OldLoggerClass.log(self, *args, **kwargs)
  196. logging.setLoggerClass(SigSafeLogger)
  197. finally:
  198. logging._releaseLock()
  199. _patch_logger_class()