log.py 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. # -*- coding: utf-8 -*-
  2. """
  3. celery.utils.log
  4. ~~~~~~~~~~~~~~~~
  5. Logging utilities.
  6. """
  7. from __future__ import absolute_import
  8. import logging
  9. import os
  10. import sys
  11. import threading
  12. import traceback
  13. from billiard import current_process, util as mputil
  14. from kombu.log import get_logger as _get_logger, LOG_LEVELS
  15. from .encoding import safe_str, str_t
  16. from .term import colored
  17. _process_aware = False
  18. is_py3k = sys.version_info[0] == 3
  19. MP_LOG = os.environ.get('MP_LOG', False)
  20. # Sets up our logging hierarchy.
  21. #
  22. # Every logger in the celery package inherits from the "celery"
  23. # logger, and every task logger inherits from the "celery.task"
  24. # logger.
  25. base_logger = logger = _get_logger('celery')
  26. mp_logger = _get_logger('multiprocessing')
  27. in_sighandler = False
  28. def set_in_sighandler(value):
  29. global in_sighandler
  30. in_sighandler = value
  31. def get_logger(name):
  32. l = _get_logger(name)
  33. if logging.root not in (l, l.parent) and l is not base_logger:
  34. l.parent = base_logger
  35. return l
  36. task_logger = get_logger('celery.task')
  37. def get_task_logger(name):
  38. logger = get_logger(name)
  39. if logger.parent is logging.root:
  40. logger.parent = task_logger
  41. return logger
  42. def mlevel(level):
  43. if level and not isinstance(level, int):
  44. return LOG_LEVELS[level.upper()]
  45. return level
  46. class ColorFormatter(logging.Formatter):
  47. #: Loglevel -> Color mapping.
  48. COLORS = colored().names
  49. colors = {'DEBUG': COLORS['blue'], 'WARNING': COLORS['yellow'],
  50. 'ERROR': COLORS['red'], 'CRITICAL': COLORS['magenta']}
  51. def __init__(self, fmt=None, use_color=True):
  52. logging.Formatter.__init__(self, fmt)
  53. self.use_color = use_color
  54. def formatException(self, ei):
  55. r = logging.Formatter.formatException(self, ei)
  56. if isinstance(r, str) and not is_py3k:
  57. return safe_str(r)
  58. return r
  59. def format(self, record):
  60. levelname = record.levelname
  61. color = self.colors.get(levelname)
  62. if self.use_color and color:
  63. try:
  64. record.msg = safe_str(str_t(color(record.msg)))
  65. except Exception, exc:
  66. record.msg = '<Unrepresentable %r: %r>' % (
  67. type(record.msg), exc)
  68. record.exc_info = True
  69. if not is_py3k and 'processName' not in record.__dict__:
  70. # Very ugly, but have to make sure processName is supported
  71. # by foreign logger instances.
  72. # (processName is always supported by Python 2.7)
  73. process_name = current_process and current_process()._name or ''
  74. record.__dict__['processName'] = process_name
  75. return safe_str(logging.Formatter.format(self, record))
  76. class LoggingProxy(object):
  77. """Forward file object to :class:`logging.Logger` instance.
  78. :param logger: The :class:`logging.Logger` instance to forward to.
  79. :param loglevel: Loglevel to use when writing messages.
  80. """
  81. mode = 'w'
  82. name = None
  83. closed = False
  84. loglevel = logging.ERROR
  85. _thread = threading.local()
  86. def __init__(self, logger, loglevel=None):
  87. self.logger = logger
  88. self.loglevel = mlevel(loglevel or self.logger.level or self.loglevel)
  89. self._safewrap_handlers()
  90. def _safewrap_handlers(self):
  91. """Make the logger handlers dump internal errors to
  92. `sys.__stderr__` instead of `sys.stderr` to circumvent
  93. infinite loops."""
  94. def wrap_handler(handler): # pragma: no cover
  95. class WithSafeHandleError(logging.Handler):
  96. def handleError(self, record):
  97. exc_info = sys.exc_info()
  98. try:
  99. try:
  100. traceback.print_exception(exc_info[0],
  101. exc_info[1],
  102. exc_info[2],
  103. None, sys.__stderr__)
  104. except IOError:
  105. pass # see python issue 5971
  106. finally:
  107. del(exc_info)
  108. handler.handleError = WithSafeHandleError().handleError
  109. return map(wrap_handler, self.logger.handlers)
  110. def write(self, data):
  111. """Write message to logging object."""
  112. if in_sighandler:
  113. return sys.__stderr__.write(safe_str(data))
  114. if getattr(self._thread, 'recurse_protection', False):
  115. # Logger is logging back to this file, so stop recursing.
  116. return
  117. data = data.strip()
  118. if data and not self.closed:
  119. self._thread.recurse_protection = True
  120. try:
  121. self.logger.log(self.loglevel, safe_str(data))
  122. finally:
  123. self._thread.recurse_protection = False
  124. def writelines(self, sequence):
  125. """`writelines(sequence_of_strings) -> None`.
  126. Write the strings to the file.
  127. The sequence can be any iterable object producing strings.
  128. This is equivalent to calling :meth:`write` for each string.
  129. """
  130. for part in sequence:
  131. self.write(part)
  132. def flush(self):
  133. """This object is not buffered so any :meth:`flush` requests
  134. are ignored."""
  135. pass
  136. def close(self):
  137. """When the object is closed, no write requests are forwarded to
  138. the logging object anymore."""
  139. self.closed = True
  140. def isatty(self):
  141. """Always returns :const:`False`. Just here for file support."""
  142. return False
  143. def fileno(self):
  144. return None
  145. def ensure_process_aware_logger():
  146. """Make sure process name is recorded when loggers are used."""
  147. global _process_aware
  148. if not _process_aware:
  149. logging._acquireLock()
  150. try:
  151. _process_aware = True
  152. Logger = logging.getLoggerClass()
  153. if getattr(Logger, '_process_aware', False): # pragma: no cover
  154. return
  155. class ProcessAwareLogger(Logger):
  156. _process_aware = True
  157. def makeRecord(self, *args, **kwds):
  158. record = Logger.makeRecord(self, *args, **kwds)
  159. record.processName = current_process()._name
  160. return record
  161. logging.setLoggerClass(ProcessAwareLogger)
  162. finally:
  163. logging._releaseLock()
  164. def get_multiprocessing_logger():
  165. return mputil.get_logger() if mputil and MP_LOG else None
  166. def reset_multiprocessing_logger():
  167. if mputil and hasattr(mputil, '_logger'):
  168. mputil._logger = None
  169. def _patch_logger_class():
  170. """Make sure loggers don't log while in a signal handler."""
  171. logging._acquireLock()
  172. try:
  173. OldLoggerClass = logging.getLoggerClass()
  174. if not getattr(OldLoggerClass, '_signal_safe', False):
  175. class SigSafeLogger(OldLoggerClass):
  176. _signal_safe = True
  177. def log(self, *args, **kwargs):
  178. if in_sighandler:
  179. sys.__stderr__.write('CANNOT LOG IN SIGHANDLER')
  180. return
  181. return OldLoggerClass.log(self, *args, **kwargs)
  182. logging.setLoggerClass(SigSafeLogger)
  183. finally:
  184. logging._releaseLock()
  185. _patch_logger_class()