فهرست منبع

Fix logging configuration/setup and we need to monkeypatch the logger to be
process aware even on Python 2.6.

Ask Solem 15 سال پیش
والد
کامیت
30865890b1
3فایلهای تغییر یافته به همراه39 افزوده شده و 19 حذف شده
  1. 36 11
      celery/log.py
  2. 1 3
      celery/utils/patch.py
  3. 2 5
      celery/worker/__init__.py

+ 36 - 11
celery/log.py

@@ -7,7 +7,38 @@ import traceback
 
 from celery import conf
 from celery.utils import noop
-from celery.utils.patch import monkeypatch
+
+_hijacked = False
+_monkeypatched = False
+
+def _ensure_process_aware_logger():
+    global _monkeypatched
+
+    if not _monkeypatched:
+        from celery.utils.patch import monkeypatch
+        monkeypatch()
+        _monkeypatched = True
+
+
+def _hijack_multiprocessing_logger():
+    from multiprocessing import util as mputil
+    global _hijacked
+
+    if _hijacked:
+        return mputil.get_logger()
+
+    _ensure_process_aware_logger()
+
+    logging.Logger.manager.loggerDict.clear()
+
+    try:
+        if mputil._logger is not None:
+            mputil.logger = None
+    except AttributeError:
+        pass
+
+    _hijacked = True
+    return mputil.get_logger()
 
 
 def get_default_logger(loglevel=None):
@@ -16,13 +47,12 @@ def get_default_logger(loglevel=None):
     :keyword loglevel: Initial log level.
 
     """
-    from multiprocessing.util import get_logger
-    logger = get_logger()
-    loglevel is not None and logger.setLevel(loglevel)
+    logger = _hijack_multiprocessing_logger()
+    if loglevel is not None:
+        logger.setLevel(loglevel)
     return logger
 
 
-_monkeypatched = [False]
 def setup_logger(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
         format=conf.CELERYD_LOG_FORMAT, **kwargs):
     """Setup the ``multiprocessing`` logger. If ``logfile`` is not specified,
@@ -30,15 +60,11 @@ def setup_logger(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
 
     Returns logger object.
     """
-    if not _monkeypatched[0]:
-        monkeypatch()
-        _monkeypatched[0] = True
 
     logger = get_default_logger(loglevel=loglevel)
-    if getattr(logger, "_configured", False):
+    if logger.handlers:
         # Logger already configured
         return logger
-    logger.handlers = []
     if logfile:
         handler = logging.FileHandler
         if hasattr(logfile, "write"):
@@ -50,7 +76,6 @@ def setup_logger(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
     else:
         from multiprocessing.util import log_to_stderr
         log_to_stderr()
-    logger._configured = True
     return logger
 
 

+ 1 - 3
celery/utils/patch.py

@@ -24,6 +24,4 @@ def _check_logger_class():
 
 
 def monkeypatch():
-    major, minor = sys.version_info[:2]
-    if major == 2 and minor < 6: # python < 2.6
-        _check_logger_class()
+    _check_logger_class()

+ 2 - 5
celery/worker/__init__.py

@@ -11,7 +11,7 @@ from celery import conf
 from celery import registry
 from celery import platform
 from celery import signals
-from celery.log import setup_logger
+from celery.log import setup_logger, _hijack_multiprocessing_logger
 from celery.beat import ClockServiceThread
 
 from celery.worker.pool import TaskPool
@@ -25,10 +25,7 @@ def process_initializer():
     # There seems to a bug in multiprocessing (backport?)
     # when detached, where the worker gets EOFErrors from time to time
     # and the logger is left from the parent process causing a crash.
-    from logging import Logger
-    from multiprocessing import util as mputil
-    Logger.manager.loggerDict.clear()
-    mputil._logger = None
+    _hijack_multiprocessing_logger()
     platform.set_mp_process_title("celeryd")