Browse Source

Abstract away the use of multiprocessing.get_logger()

Ask Solem 15 năm trước cách đây
mục cha
commit
c1c01f8bb0
4 tập tin đã thay đổi với 17 bổ sung10 xóa
  1. 0 1
      celery/execute.py
  2. 10 3
      celery/log.py
  3. 5 5
      celery/worker/controllers.py
  4. 2 1
      celery/worker/job.py

+ 0 - 1
celery/execute.py

@@ -6,7 +6,6 @@ from celery.registry import tasks
 from celery.utils import gen_unique_id, noop, fun_takes_kwargs
 from functools import partial as curry
 from datetime import datetime, timedelta
-from multiprocessing import get_logger
 from celery.exceptions import RetryTaskError
 from celery.datastructures import ExceptionInfo
 from celery.backends import default_backend

+ 10 - 3
celery/log.py

@@ -7,6 +7,13 @@ import traceback
 from celery.conf import LOG_FORMAT, DAEMON_LOG_LEVEL
 
 
+def get_default_logger(loglevel=None):
+    import multiprocessing
+    logger = multiprocessing.get_logger()
+    loglevel is not None and logger.setLevel(loglevel)
+    return logger
+
+
 def setup_logger(loglevel=DAEMON_LOG_LEVEL, logfile=None, format=LOG_FORMAT,
         **kwargs):
     """Setup the ``multiprocessing`` logger. If ``logfile`` is not specified,
@@ -14,10 +21,9 @@ def setup_logger(loglevel=DAEMON_LOG_LEVEL, logfile=None, format=LOG_FORMAT,
 
     Returns logger object.
     """
-    import multiprocessing
-    logger = multiprocessing.get_logger()
-    logger.setLevel(loglevel)
+    logger = get_default_logger(loglevel=loglevel)
     if logger.handlers:
+        # Logger already configured
         return logger
     if logfile:
         if hasattr(logfile, "write"):
@@ -28,6 +34,7 @@ def setup_logger(loglevel=DAEMON_LOG_LEVEL, logfile=None, format=LOG_FORMAT,
         log_file_handler.setFormatter(formatter)
         logger.addHandler(log_file_handler)
     else:
+        import multiprocessing
         multiprocessing.log_to_stderr()
     return logger
 

+ 5 - 5
celery/worker/controllers.py

@@ -6,7 +6,7 @@ Worker Controller Threads
 from celery.backends import default_periodic_status_backend
 from Queue import Empty as QueueEmpty
 from datetime import datetime
-from multiprocessing import get_logger
+from celery.log import get_default_logger
 import traceback
 import threading
 import time
@@ -84,7 +84,7 @@ class Mediator(BackgroundThread):
         self.callback = callback
 
     def on_iteration(self):
-        logger = get_logger()
+        logger = get_default_logger()
         try:
             logger.debug("Mediator: Trying to get message from bucket_queue")
             # This blocks until there's a message in the queue.
@@ -119,7 +119,7 @@ class PeriodicWorkController(BackgroundThread):
         default_periodic_status_backend.init_periodic_tasks()
 
     def on_iteration(self):
-        logger = get_logger()
+        logger = get_default_logger()
         logger.debug("PeriodicWorkController: Running periodic tasks...")
         try:
             self.run_periodic_tasks()
@@ -133,7 +133,7 @@ class PeriodicWorkController(BackgroundThread):
         time.sleep(1)
 
     def run_periodic_tasks(self):
-        logger = get_logger()
+        logger = get_default_logger()
         applied = default_periodic_status_backend.run_periodic_tasks()
         for task, task_id in applied:
             logger.debug(
@@ -143,7 +143,7 @@ class PeriodicWorkController(BackgroundThread):
     def process_hold_queue(self):
         """Finds paused tasks that are ready for execution and move
         them to the :attr:`bucket_queue`."""
-        logger = get_logger()
+        logger = get_default_logger()
         try:
             logger.debug(
                 "PeriodicWorkController: Getting next task from hold queue..")

+ 2 - 1
celery/worker/job.py

@@ -7,6 +7,7 @@ from celery.registry import tasks
 from celery.exceptions import NotRegistered
 from celery.execute import ExecuteWrapper
 from celery.utils import noop, fun_takes_kwargs
+from celery.log import get_default_logger
 from django.core.mail import mail_admins
 import multiprocessing
 import socket
@@ -105,7 +106,7 @@ class TaskWrapper(object):
                 "fail_email_body"):
             setattr(self, opt, opts.get(opt, getattr(self, opt, None)))
         if not self.logger:
-            self.logger = multiprocessing.get_logger()
+            self.logger = get_default_logger()
 
     def __repr__(self):
         return '<%s: {name:"%s", id:"%s", args:"%s", kwargs:"%s"}>' % (