ソースを参照

Merge branch 'master' of http://github.com/ask/celery

David Cramer 14 年 前
コミット
935f6ce4c2
6 ファイル変更24 行追加20 行削除
  1. 2 0
      AUTHORS
  2. 4 7
      FAQ
  3. 4 5
      celery/apps/beat.py
  4. 1 1
      celery/bin/celeryev.py
  5. 9 6
      celery/worker/job.py
  6. 4 1
      docs/community.rst

+ 2 - 0
AUTHORS

@@ -44,3 +44,5 @@ Ordered by date of first contribution:
   Noah Kantrowitz <noah@coderanger.net>
   Gert Van Gool <gertvangool@gmail.com>
   sdcooke
+  David Cramer <dcramer@gmail.com>
+  Bryan Berg <bryan@mixedmedialabs.com>

+ 4 - 7
FAQ

@@ -461,16 +461,13 @@ that has an AMQP client.
 How can I get the task id of the current task?
 ----------------------------------------------
 
-**Answer**: Celery does set some default keyword arguments if the task
-accepts them (you can accept them by either using ``**kwargs``, or list them
-specifically)::
+**Answer**: The current id and more is available in the task request::
 
     @task
-    def mytask(task_id=None):
-        cache.set(task_id, "Running")
+    def mytask():
+        cache.set(mytask.request.id, "Running")
 
-The default keyword arguments are documented here:
-http://celeryq.org/docs/userguide/tasks.html#default-keyword-arguments
+For more information see :ref:`task-request-info`.
 
 .. _faq-custom-task-ids:
 

+ 4 - 5
celery/apps/beat.py

@@ -57,11 +57,10 @@ class Beat(object):
     def setup_logging(self):
         handled = self.app.log.setup_logging_subsystem(loglevel=self.loglevel,
                                                        logfile=self.logfile)
-        if not handled:
-            logger = self.app.log.get_default_logger(name="celery.beat")
-            if self.redirect_stdouts:
-                self.app.log.redirect_stdouts_to_logger(logger,
-                        loglevel=self.redirect_stdouts_level)
+        logger = self.app.log.get_default_logger(name="celery.beat")
+        if self.redirect_stdouts and not handled:
+            self.app.log.redirect_stdouts_to_logger(logger,
+                    loglevel=self.redirect_stdouts_level)
         return logger
 
     def start_scheduler(self, logger=None):

+ 1 - 1
celery/bin/celeryev.py

@@ -37,7 +37,7 @@ class EvCommand(Command):
     def set_process_status(self, prog, info=""):
         prog = "%s:%s" % (self.prog_name, prog)
         info = "%s %s" % (info, platforms.strargv(sys.argv))
-        return platform.set_process_title(prog, info=info)
+        return platforms.set_process_title(prog, info=info)
 
     def get_options(self):
         return (

+ 9 - 6
celery/worker/job.py

@@ -100,9 +100,10 @@ class WorkerTaskTrace(TaskTrace):
         """Execute, trace and store the result of the task."""
         self.loader.on_task_init(self.task_id, self.task)
         if self.task.track_started:
-            self.task.backend.mark_as_started(self.task_id,
-                                              pid=os.getpid(),
-                                              hostname=self.hostname)
+            if not self.task.ignore_result:
+                self.task.backend.mark_as_started(self.task_id,
+                                                  pid=os.getpid(),
+                                                  hostname=self.hostname)
         try:
             return super(WorkerTaskTrace, self).execute()
         finally:
@@ -415,7 +416,8 @@ class TaskRequest(object):
                 self.task_name, self.task_id))
             exc = TimeLimitExceeded()
 
-        self.task.backend.mark_as_failure(self.task_id, exc)
+        if self._store_errors:
+            self.task.backend.mark_as_failure(self.task_id, exc)
 
     def acknowledge(self):
         if not self.acknowledged:
@@ -459,8 +461,9 @@ class TaskRequest(object):
         # This is a special case as the process would not have had
         # time to write the result.
         if isinstance(exc_info.exception, WorkerLostError):
-            self.task.backend.mark_as_failure(self.task_id,
-                                              exc_info.exception)
+            if self._store_errors:
+                self.task.backend.mark_as_failure(self.task_id,
+                                                  exc_info.exception)
 
         context = {"hostname": self.hostname,
                    "id": self.task_id,

+ 4 - 1
docs/community.rst

@@ -56,6 +56,10 @@ http://botland.oebfare.com/logger/celery/
 News
 ====
 
+Celery, RabbitMQ and sending messages directly.
+-----------------------------------------------
+http://blog.timc3.com/2010/10/17/celery-rabbitmq-and-sending-messages-directly/
+
 Asynchronous Processing Using Celery (historio.us)
 --------------------------------------------------
 http://blog.historio.us/asynchronous-processing-using-celery
@@ -68,7 +72,6 @@ http://www.slideshare.net/shawnrider/massaging-the-pony-message-queues-and-you
 ------------------------------------------------
 http://www.slideshare.net/ericholscher/large-problems
 
-
 Django and asynchronous jobs
 ----------------------------
 http://www.davidfischer.name/2010/09/django-and-asynchronous-jobs/