log.py 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. # -*- coding: utf-8 -*-
  2. """Logging utilities."""
  3. from __future__ import absolute_import, print_function, unicode_literals
  4. import logging
  5. import numbers
  6. import os
  7. import sys
  8. import threading
  9. import traceback
  10. from contextlib import contextmanager
  11. from kombu.five import values
  12. from kombu.log import get_logger as _get_logger, LOG_LEVELS
  13. from kombu.utils.encoding import safe_str
  14. from celery.five import string_t, text_t
  15. from .term import colored
  16. __all__ = (
  17. 'ColorFormatter', 'LoggingProxy', 'base_logger',
  18. 'set_in_sighandler', 'in_sighandler', 'get_logger',
  19. 'get_task_logger', 'mlevel',
  20. 'get_multiprocessing_logger', 'reset_multiprocessing_logger',
  21. )
  22. _process_aware = False
  23. _in_sighandler = False
  24. PY3 = sys.version_info[0] == 3
  25. MP_LOG = os.environ.get('MP_LOG', False)
  26. RESERVED_LOGGER_NAMES = {'celery', 'celery.task'}
  27. # Sets up our logging hierarchy.
  28. #
  29. # Every logger in the celery package inherits from the "celery"
  30. # logger, and every task logger inherits from the "celery.task"
  31. # logger.
  32. base_logger = logger = _get_logger('celery')
  33. def set_in_sighandler(value):
  34. """Set flag signifiying that we're inside a signal handler."""
  35. global _in_sighandler
  36. _in_sighandler = value
  37. def iter_open_logger_fds():
  38. seen = set()
  39. loggers = (list(values(logging.Logger.manager.loggerDict)) +
  40. [logging.getLogger(None)])
  41. for l in loggers:
  42. try:
  43. for handler in l.handlers:
  44. try:
  45. if handler not in seen: # pragma: no cover
  46. yield handler.stream
  47. seen.add(handler)
  48. except AttributeError:
  49. pass
  50. except AttributeError: # PlaceHolder does not have handlers
  51. pass
  52. @contextmanager
  53. def in_sighandler():
  54. """Context that records that we are in a signal handler."""
  55. set_in_sighandler(True)
  56. try:
  57. yield
  58. finally:
  59. set_in_sighandler(False)
  60. def logger_isa(l, p, max=1000):
  61. this, seen = l, set()
  62. for _ in range(max):
  63. if this == p:
  64. return True
  65. else:
  66. if this in seen:
  67. raise RuntimeError(
  68. 'Logger {0!r} parents recursive'.format(l.name),
  69. )
  70. seen.add(this)
  71. this = this.parent
  72. if not this:
  73. break
  74. else: # pragma: no cover
  75. raise RuntimeError('Logger hierarchy exceeds {0}'.format(max))
  76. return False
  77. def _using_logger_parent(parent_logger, logger_):
  78. if not logger_isa(logger_, parent_logger):
  79. logger_.parent = parent_logger
  80. return logger_
  81. def get_logger(name):
  82. """Get logger by name."""
  83. l = _get_logger(name)
  84. if logging.root not in (l, l.parent) and l is not base_logger:
  85. l = _using_logger_parent(base_logger, l)
  86. return l
  87. task_logger = get_logger('celery.task')
  88. worker_logger = get_logger('celery.worker')
  89. def get_task_logger(name):
  90. """Get logger for task module by name."""
  91. if name in RESERVED_LOGGER_NAMES:
  92. raise RuntimeError('Logger name {0!r} is reserved!'.format(name))
  93. return _using_logger_parent(task_logger, get_logger(name))
  94. def mlevel(level):
  95. """Convert level name/int to log level."""
  96. if level and not isinstance(level, numbers.Integral):
  97. return LOG_LEVELS[level.upper()]
  98. return level
  99. class ColorFormatter(logging.Formatter):
  100. """Logging formatter that adds colors based on severity."""
  101. #: Loglevel -> Color mapping.
  102. COLORS = colored().names
  103. colors = {
  104. 'DEBUG': COLORS['blue'],
  105. 'WARNING': COLORS['yellow'],
  106. 'ERROR': COLORS['red'],
  107. 'CRITICAL': COLORS['magenta'],
  108. }
  109. def __init__(self, fmt=None, use_color=True):
  110. logging.Formatter.__init__(self, fmt)
  111. self.use_color = use_color
  112. def formatException(self, ei):
  113. if ei and not isinstance(ei, tuple):
  114. ei = sys.exc_info()
  115. r = logging.Formatter.formatException(self, ei)
  116. if isinstance(r, str) and not PY3:
  117. return safe_str(r)
  118. return r
  119. def format(self, record):
  120. msg = logging.Formatter.format(self, record)
  121. color = self.colors.get(record.levelname)
  122. # reset exception info later for other handlers...
  123. einfo = sys.exc_info() if record.exc_info == 1 else record.exc_info
  124. if color and self.use_color:
  125. try:
  126. # safe_str will repr the color object
  127. # and color will break on non-string objects
  128. # so need to reorder calls based on type.
  129. # Issue #427
  130. try:
  131. if isinstance(msg, string_t):
  132. return text_t(color(safe_str(msg)))
  133. return safe_str(color(msg))
  134. except UnicodeDecodeError: # pragma: no cover
  135. return safe_str(msg) # skip colors
  136. except Exception as exc: # pylint: disable=broad-except
  137. prev_msg, record.exc_info, record.msg = (
  138. record.msg, 1, '<Unrepresentable {0!r}: {1!r}>'.format(
  139. type(msg), exc
  140. ),
  141. )
  142. try:
  143. return logging.Formatter.format(self, record)
  144. finally:
  145. record.msg, record.exc_info = prev_msg, einfo
  146. else:
  147. return safe_str(msg)
  148. class LoggingProxy(object):
  149. """Forward file object to :class:`logging.Logger` instance.
  150. Arguments:
  151. logger (~logging.Logger): Logger instance to forward to.
  152. loglevel (int, str): Log level to use when logging messages.
  153. """
  154. mode = 'w'
  155. name = None
  156. closed = False
  157. loglevel = logging.ERROR
  158. _thread = threading.local()
  159. def __init__(self, logger, loglevel=None):
  160. # pylint: disable=redefined-outer-name
  161. # Note that the logger global is redefined here, be careful changing.
  162. self.logger = logger
  163. self.loglevel = mlevel(loglevel or self.logger.level or self.loglevel)
  164. self._safewrap_handlers()
  165. def _safewrap_handlers(self):
  166. # Make the logger handlers dump internal errors to
  167. # :data:`sys.__stderr__` instead of :data:`sys.stderr` to circumvent
  168. # infinite loops.
  169. def wrap_handler(handler): # pragma: no cover
  170. class WithSafeHandleError(logging.Handler):
  171. def handleError(self, record):
  172. try:
  173. traceback.print_exc(None, sys.__stderr__)
  174. except IOError:
  175. pass # see python issue 5971
  176. handler.handleError = WithSafeHandleError().handleError
  177. return [wrap_handler(h) for h in self.logger.handlers]
  178. def write(self, data):
  179. """Write message to logging object."""
  180. if _in_sighandler:
  181. return print(safe_str(data), file=sys.__stderr__)
  182. if getattr(self._thread, 'recurse_protection', False):
  183. # Logger is logging back to this file, so stop recursing.
  184. return
  185. data = data.strip()
  186. if data and not self.closed:
  187. self._thread.recurse_protection = True
  188. try:
  189. self.logger.log(self.loglevel, safe_str(data))
  190. finally:
  191. self._thread.recurse_protection = False
  192. def writelines(self, sequence):
  193. # type: (Sequence[str]) -> None
  194. """Write list of strings to file.
  195. The sequence can be any iterable object producing strings.
  196. This is equivalent to calling :meth:`write` for each string.
  197. """
  198. for part in sequence:
  199. self.write(part)
  200. def flush(self):
  201. # This object is not buffered so any :meth:`flush`
  202. # requests are ignored.
  203. pass
  204. def close(self):
  205. # when the object is closed, no write requests are
  206. # forwarded to the logging object anymore.
  207. self.closed = True
  208. def isatty(self):
  209. """Here for file support."""
  210. return False
  211. def get_multiprocessing_logger():
  212. """Return the multiprocessing logger."""
  213. try:
  214. from billiard import util
  215. except ImportError: # pragma: no cover
  216. pass
  217. else:
  218. return util.get_logger()
  219. def reset_multiprocessing_logger():
  220. """Reset multiprocessing logging setup."""
  221. try:
  222. from billiard import util
  223. except ImportError: # pragma: no cover
  224. pass
  225. else:
  226. if hasattr(util, '_logger'): # pragma: no cover
  227. util._logger = None
  228. def current_process():
  229. try:
  230. from billiard import process
  231. except ImportError: # pragma: no cover
  232. pass
  233. else:
  234. return process.current_process()
  235. def current_process_index(base=1):
  236. index = getattr(current_process(), 'index', None)
  237. return index + base if index is not None else index