log.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. """celery.log"""
  2. import os
  3. import sys
  4. import time
  5. import logging
  6. import traceback
  7. from celery.conf import LOG_FORMAT, DAEMON_LOG_LEVEL
  8. def get_default_logger(loglevel=None):
  9. import multiprocessing
  10. logger = multiprocessing.get_logger()
  11. loglevel is not None and logger.setLevel(loglevel)
  12. return logger
  13. def setup_logger(loglevel=DAEMON_LOG_LEVEL, logfile=None, format=LOG_FORMAT,
  14. **kwargs):
  15. """Setup the ``multiprocessing`` logger. If ``logfile`` is not specified,
  16. ``stderr`` is used.
  17. Returns logger object.
  18. """
  19. logger = get_default_logger(loglevel=loglevel)
  20. if logger.handlers:
  21. # Logger already configured
  22. return logger
  23. if logfile:
  24. if hasattr(logfile, "write"):
  25. log_file_handler = logging.StreamHandler(logfile)
  26. else:
  27. log_file_handler = logging.FileHandler(logfile)
  28. formatter = logging.Formatter(format)
  29. log_file_handler.setFormatter(formatter)
  30. logger.addHandler(log_file_handler)
  31. else:
  32. import multiprocessing
  33. multiprocessing.log_to_stderr()
  34. return logger
  35. def emergency_error(logfile, message):
  36. """Emergency error logging, for when there's no standard file
  37. descriptors open because the process has been daemonized or for
  38. some other reason."""
  39. logfh_needs_to_close = False
  40. if not logfile:
  41. logfile = sys.__stderr__
  42. if hasattr(logfile, "write"):
  43. logfh = logfile
  44. else:
  45. logfh = open(logfile, "a")
  46. logfh_needs_to_close = True
  47. logfh.write("[%(asctime)s: FATAL/%(pid)d]: %(message)s\n" % {
  48. "asctime": time.asctime(),
  49. "pid": os.getpid(),
  50. "message": message})
  51. if logfh_needs_to_close:
  52. logfh.close()
  53. def redirect_stdouts_to_logger(logger, loglevel=None):
  54. """Redirect :class:`sys.stdout` and :class:`sys.stderr` to a
  55. logging instance.
  56. :param logger: The :class:`logging.Logger` instance to redirect to.
  57. :param loglevel: The loglevel redirected messages will be logged as.
  58. """
  59. proxy = LoggingProxy(logger, loglevel)
  60. sys.stdout = proxy
  61. sys.stderr = proxy
  62. return proxy
  63. class LoggingProxy(object):
  64. """Forward file object to :class:`logging.Logger` instance.
  65. :param logger: The :class:`logging.Logger` instance to forward to.
  66. :param loglevel: Loglevel to use when writing messages.
  67. """
  68. mode = "w"
  69. name = None
  70. closed = False
  71. loglevel = logging.INFO
  72. def __init__(self, logger, loglevel=None):
  73. self.logger = logger
  74. self.loglevel = loglevel or self.logger.level or self.loglevel
  75. self._safewrap_handlers()
  76. def _safewrap_handlers(self):
  77. """Make the logger handlers dump internal errors to
  78. ``sys.__stderr__`` instead of ``sys.stderr`` to circumvent
  79. infinite loops."""
  80. def wrap_handler(handler):
  81. class WithSafeHandleError(logging.Handler):
  82. def handleError(self, record):
  83. exc_info = sys.exc_info()
  84. try:
  85. traceback.print_exception(exc_info[0], exc_info[1],
  86. exc_info[2], None,
  87. sys.__stderr__)
  88. except IOError:
  89. pass # see python issue 5971
  90. finally:
  91. del(exc_info)
  92. handler.handleError = WithSafeHandleError().handleError
  93. return map(wrap_handler, self.logger.handlers)
  94. def write(self, data):
  95. """Write message to logging object."""
  96. if not self.closed:
  97. self.logger.log(self.loglevel, data)
  98. def writelines(self, sequence):
  99. """``writelines(sequence_of_strings) -> None``.
  100. Write the strings to the file.
  101. The sequence can be any iterable object producing strings.
  102. This is equivalent to calling :meth:`write` for each string.
  103. """
  104. map(self.write, sequence)
  105. def flush(self):
  106. """This object is not buffered so any :meth:`flush` requests
  107. are ignored."""
  108. pass
  109. def close(self):
  110. """When the object is closed, no write requests are forwarded to
  111. the logging object anymore."""
  112. self.closed = True
  113. def isatty(self):
  114. """Always returns ``False``. Just here for file support."""
  115. return False
  116. def fileno(self):
  117. return None