log.py 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. """celery.log"""
  2. import os
  3. import sys
  4. import time
  5. import logging
  6. import traceback
  7. from celery import conf
  8. from celery.patch import monkeypatch
  9. def get_default_logger(loglevel=None):
  10. from multiprocessing.util import get_logger
  11. logger = get_logger()
  12. loglevel is not None and logger.setLevel(loglevel)
  13. return logger
  14. _monkeypatched = [False]
  15. def setup_logger(loglevel=conf.DAEMON_LOG_LEVEL, logfile=None,
  16. format=conf.LOG_FORMAT, **kwargs):
  17. """Setup the ``multiprocessing`` logger. If ``logfile`` is not specified,
  18. ``stderr`` is used.
  19. Returns logger object.
  20. """
  21. if not _monkeypatched[0]:
  22. monkeypatch()
  23. _monkeypatched[0] = True
  24. logger = get_default_logger(loglevel=loglevel)
  25. if logger.handlers:
  26. # Logger already configured
  27. return logger
  28. if logfile:
  29. if hasattr(logfile, "write"):
  30. log_file_handler = logging.StreamHandler(logfile)
  31. else:
  32. log_file_handler = logging.FileHandler(logfile)
  33. formatter = logging.Formatter(format)
  34. log_file_handler.setFormatter(formatter)
  35. logger.addHandler(log_file_handler)
  36. else:
  37. import multiprocessing
  38. multiprocessing.log_to_stderr()
  39. return logger
  40. def emergency_error(logfile, message):
  41. """Emergency error logging, for when there's no standard file
  42. descriptors open because the process has been daemonized or for
  43. some other reason."""
  44. logfh_needs_to_close = False
  45. if not logfile:
  46. logfile = sys.__stderr__
  47. if hasattr(logfile, "write"):
  48. logfh = logfile
  49. else:
  50. logfh = open(logfile, "a")
  51. logfh_needs_to_close = True
  52. logfh.write("[%(asctime)s: FATAL/%(pid)d]: %(message)s\n" % {
  53. "asctime": time.asctime(),
  54. "pid": os.getpid(),
  55. "message": message})
  56. if logfh_needs_to_close:
  57. logfh.close()
  58. def redirect_stdouts_to_logger(logger, loglevel=None):
  59. """Redirect :class:`sys.stdout` and :class:`sys.stderr` to a
  60. logging instance.
  61. :param logger: The :class:`logging.Logger` instance to redirect to.
  62. :param loglevel: The loglevel redirected messages will be logged as.
  63. """
  64. proxy = LoggingProxy(logger, loglevel)
  65. sys.stdout = proxy
  66. sys.stderr = proxy
  67. return proxy
  68. class LoggingProxy(object):
  69. """Forward file object to :class:`logging.Logger` instance.
  70. :param logger: The :class:`logging.Logger` instance to forward to.
  71. :param loglevel: Loglevel to use when writing messages.
  72. """
  73. mode = "w"
  74. name = None
  75. closed = False
  76. loglevel = logging.INFO
  77. def __init__(self, logger, loglevel=None):
  78. self.logger = logger
  79. self.loglevel = loglevel or self.logger.level or self.loglevel
  80. self._safewrap_handlers()
  81. def _safewrap_handlers(self):
  82. """Make the logger handlers dump internal errors to
  83. ``sys.__stderr__`` instead of ``sys.stderr`` to circumvent
  84. infinite loops."""
  85. def wrap_handler(handler):
  86. class WithSafeHandleError(logging.Handler):
  87. def handleError(self, record):
  88. exc_info = sys.exc_info()
  89. try:
  90. traceback.print_exception(exc_info[0], exc_info[1],
  91. exc_info[2], None,
  92. sys.__stderr__)
  93. except IOError:
  94. pass # see python issue 5971
  95. finally:
  96. del(exc_info)
  97. handler.handleError = WithSafeHandleError().handleError
  98. return map(wrap_handler, self.logger.handlers)
  99. def write(self, data):
  100. """Write message to logging object."""
  101. if not self.closed:
  102. self.logger.log(self.loglevel, data)
  103. def writelines(self, sequence):
  104. """``writelines(sequence_of_strings) -> None``.
  105. Write the strings to the file.
  106. The sequence can be any iterable object producing strings.
  107. This is equivalent to calling :meth:`write` for each string.
  108. """
  109. map(self.write, sequence)
  110. def flush(self):
  111. """This object is not buffered so any :meth:`flush` requests
  112. are ignored."""
  113. pass
  114. def close(self):
  115. """When the object is closed, no write requests are forwarded to
  116. the logging object anymore."""
  117. self.closed = True
  118. def isatty(self):
  119. """Always returns ``False``. Just here for file support."""
  120. return False
  121. def fileno(self):
  122. return None